1 /* 2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_bit.h" 25 #include "xfs_mount.h" 26 #include "xfs_inode.h" 27 #include "xfs_btree.h" 28 #include "xfs_ialloc.h" 29 #include "xfs_ialloc_btree.h" 30 #include "xfs_alloc.h" 31 #include "xfs_error.h" 32 #include "xfs_trace.h" 33 #include "xfs_cksum.h" 34 #include "xfs_trans.h" 35 36 37 STATIC int 38 xfs_inobt_get_minrecs( 39 struct xfs_btree_cur *cur, 40 int level) 41 { 42 return cur->bc_mp->m_inobt_mnr[level != 0]; 43 } 44 45 STATIC struct xfs_btree_cur * 46 xfs_inobt_dup_cursor( 47 struct xfs_btree_cur *cur) 48 { 49 return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp, 50 cur->bc_private.a.agbp, cur->bc_private.a.agno, 51 cur->bc_btnum); 52 } 53 54 STATIC void 55 xfs_inobt_set_root( 56 struct xfs_btree_cur *cur, 57 union xfs_btree_ptr *nptr, 58 int inc) /* level change */ 59 { 60 struct xfs_buf *agbp = cur->bc_private.a.agbp; 61 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); 62 63 agi->agi_root = nptr->s; 64 be32_add_cpu(&agi->agi_level, inc); 65 xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL); 66 } 67 68 STATIC void 69 xfs_finobt_set_root( 70 struct xfs_btree_cur *cur, 71 union xfs_btree_ptr *nptr, 72 int inc) /* level change */ 73 { 74 struct xfs_buf *agbp = cur->bc_private.a.agbp; 75 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); 76 77 agi->agi_free_root = nptr->s; 78 be32_add_cpu(&agi->agi_free_level, inc); 79 xfs_ialloc_log_agi(cur->bc_tp, agbp, 80 XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL); 81 } 82 83 STATIC int 84 xfs_inobt_alloc_block( 85 struct xfs_btree_cur *cur, 86 union xfs_btree_ptr *start, 87 union xfs_btree_ptr *new, 88 int *stat) 89 { 90 xfs_alloc_arg_t args; /* block allocation args */ 91 int error; /* error return value */ 92 xfs_agblock_t sbno = be32_to_cpu(start->s); 93 94 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 95 96 memset(&args, 0, sizeof(args)); 97 args.tp = cur->bc_tp; 98 args.mp = cur->bc_mp; 99 args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, sbno); 100 args.minlen = 1; 101 args.maxlen = 1; 102 args.prod = 1; 103 args.type = XFS_ALLOCTYPE_NEAR_BNO; 104 105 error = xfs_alloc_vextent(&args); 106 if (error) { 107 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 108 return error; 109 } 110 if (args.fsbno == NULLFSBLOCK) { 111 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 112 *stat = 0; 113 return 0; 114 } 115 ASSERT(args.len == 1); 116 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 117 118 new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno)); 119 *stat = 1; 120 return 0; 121 } 122 123 STATIC int 124 xfs_inobt_free_block( 125 struct xfs_btree_cur *cur, 126 struct xfs_buf *bp) 127 { 128 return xfs_free_extent(cur->bc_tp, 129 XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1); 130 } 131 132 STATIC int 133 xfs_inobt_get_maxrecs( 134 struct xfs_btree_cur *cur, 135 int level) 136 { 137 return cur->bc_mp->m_inobt_mxr[level != 0]; 138 } 139 140 STATIC void 141 xfs_inobt_init_key_from_rec( 142 union xfs_btree_key *key, 143 union xfs_btree_rec *rec) 144 { 145 key->inobt.ir_startino = rec->inobt.ir_startino; 146 } 147 148 STATIC void 149 xfs_inobt_init_rec_from_key( 150 union xfs_btree_key *key, 151 union xfs_btree_rec *rec) 152 { 153 rec->inobt.ir_startino = key->inobt.ir_startino; 154 } 155 156 STATIC void 157 xfs_inobt_init_rec_from_cur( 158 struct xfs_btree_cur *cur, 159 union xfs_btree_rec *rec) 160 { 161 rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino); 162 if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) { 163 rec->inobt.ir_u.sp.ir_holemask = 164 cpu_to_be16(cur->bc_rec.i.ir_holemask); 165 rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count; 166 rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount; 167 } else { 168 /* ir_holemask/ir_count not supported on-disk */ 169 rec->inobt.ir_u.f.ir_freecount = 170 cpu_to_be32(cur->bc_rec.i.ir_freecount); 171 } 172 rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free); 173 } 174 175 /* 176 * initial value of ptr for lookup 177 */ 178 STATIC void 179 xfs_inobt_init_ptr_from_cur( 180 struct xfs_btree_cur *cur, 181 union xfs_btree_ptr *ptr) 182 { 183 struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp); 184 185 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno)); 186 187 ptr->s = agi->agi_root; 188 } 189 190 STATIC void 191 xfs_finobt_init_ptr_from_cur( 192 struct xfs_btree_cur *cur, 193 union xfs_btree_ptr *ptr) 194 { 195 struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp); 196 197 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno)); 198 ptr->s = agi->agi_free_root; 199 } 200 201 STATIC __int64_t 202 xfs_inobt_key_diff( 203 struct xfs_btree_cur *cur, 204 union xfs_btree_key *key) 205 { 206 return (__int64_t)be32_to_cpu(key->inobt.ir_startino) - 207 cur->bc_rec.i.ir_startino; 208 } 209 210 static int 211 xfs_inobt_verify( 212 struct xfs_buf *bp) 213 { 214 struct xfs_mount *mp = bp->b_target->bt_mount; 215 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 216 unsigned int level; 217 218 /* 219 * During growfs operations, we can't verify the exact owner as the 220 * perag is not fully initialised and hence not attached to the buffer. 221 * 222 * Similarly, during log recovery we will have a perag structure 223 * attached, but the agi information will not yet have been initialised 224 * from the on disk AGI. We don't currently use any of this information, 225 * but beware of the landmine (i.e. need to check pag->pagi_init) if we 226 * ever do. 227 */ 228 switch (block->bb_magic) { 229 case cpu_to_be32(XFS_IBT_CRC_MAGIC): 230 case cpu_to_be32(XFS_FIBT_CRC_MAGIC): 231 if (!xfs_btree_sblock_v5hdr_verify(bp)) 232 return false; 233 /* fall through */ 234 case cpu_to_be32(XFS_IBT_MAGIC): 235 case cpu_to_be32(XFS_FIBT_MAGIC): 236 break; 237 default: 238 return 0; 239 } 240 241 /* level verification */ 242 level = be16_to_cpu(block->bb_level); 243 if (level >= mp->m_in_maxlevels) 244 return false; 245 246 return xfs_btree_sblock_verify(bp, mp->m_inobt_mxr[level != 0]); 247 } 248 249 static void 250 xfs_inobt_read_verify( 251 struct xfs_buf *bp) 252 { 253 if (!xfs_btree_sblock_verify_crc(bp)) 254 xfs_buf_ioerror(bp, -EFSBADCRC); 255 else if (!xfs_inobt_verify(bp)) 256 xfs_buf_ioerror(bp, -EFSCORRUPTED); 257 258 if (bp->b_error) { 259 trace_xfs_btree_corrupt(bp, _RET_IP_); 260 xfs_verifier_error(bp); 261 } 262 } 263 264 static void 265 xfs_inobt_write_verify( 266 struct xfs_buf *bp) 267 { 268 if (!xfs_inobt_verify(bp)) { 269 trace_xfs_btree_corrupt(bp, _RET_IP_); 270 xfs_buf_ioerror(bp, -EFSCORRUPTED); 271 xfs_verifier_error(bp); 272 return; 273 } 274 xfs_btree_sblock_calc_crc(bp); 275 276 } 277 278 const struct xfs_buf_ops xfs_inobt_buf_ops = { 279 .name = "xfs_inobt", 280 .verify_read = xfs_inobt_read_verify, 281 .verify_write = xfs_inobt_write_verify, 282 }; 283 284 #if defined(DEBUG) || defined(XFS_WARN) 285 STATIC int 286 xfs_inobt_keys_inorder( 287 struct xfs_btree_cur *cur, 288 union xfs_btree_key *k1, 289 union xfs_btree_key *k2) 290 { 291 return be32_to_cpu(k1->inobt.ir_startino) < 292 be32_to_cpu(k2->inobt.ir_startino); 293 } 294 295 STATIC int 296 xfs_inobt_recs_inorder( 297 struct xfs_btree_cur *cur, 298 union xfs_btree_rec *r1, 299 union xfs_btree_rec *r2) 300 { 301 return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <= 302 be32_to_cpu(r2->inobt.ir_startino); 303 } 304 #endif /* DEBUG */ 305 306 static const struct xfs_btree_ops xfs_inobt_ops = { 307 .rec_len = sizeof(xfs_inobt_rec_t), 308 .key_len = sizeof(xfs_inobt_key_t), 309 310 .dup_cursor = xfs_inobt_dup_cursor, 311 .set_root = xfs_inobt_set_root, 312 .alloc_block = xfs_inobt_alloc_block, 313 .free_block = xfs_inobt_free_block, 314 .get_minrecs = xfs_inobt_get_minrecs, 315 .get_maxrecs = xfs_inobt_get_maxrecs, 316 .init_key_from_rec = xfs_inobt_init_key_from_rec, 317 .init_rec_from_key = xfs_inobt_init_rec_from_key, 318 .init_rec_from_cur = xfs_inobt_init_rec_from_cur, 319 .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur, 320 .key_diff = xfs_inobt_key_diff, 321 .buf_ops = &xfs_inobt_buf_ops, 322 #if defined(DEBUG) || defined(XFS_WARN) 323 .keys_inorder = xfs_inobt_keys_inorder, 324 .recs_inorder = xfs_inobt_recs_inorder, 325 #endif 326 }; 327 328 static const struct xfs_btree_ops xfs_finobt_ops = { 329 .rec_len = sizeof(xfs_inobt_rec_t), 330 .key_len = sizeof(xfs_inobt_key_t), 331 332 .dup_cursor = xfs_inobt_dup_cursor, 333 .set_root = xfs_finobt_set_root, 334 .alloc_block = xfs_inobt_alloc_block, 335 .free_block = xfs_inobt_free_block, 336 .get_minrecs = xfs_inobt_get_minrecs, 337 .get_maxrecs = xfs_inobt_get_maxrecs, 338 .init_key_from_rec = xfs_inobt_init_key_from_rec, 339 .init_rec_from_key = xfs_inobt_init_rec_from_key, 340 .init_rec_from_cur = xfs_inobt_init_rec_from_cur, 341 .init_ptr_from_cur = xfs_finobt_init_ptr_from_cur, 342 .key_diff = xfs_inobt_key_diff, 343 .buf_ops = &xfs_inobt_buf_ops, 344 #if defined(DEBUG) || defined(XFS_WARN) 345 .keys_inorder = xfs_inobt_keys_inorder, 346 .recs_inorder = xfs_inobt_recs_inorder, 347 #endif 348 }; 349 350 /* 351 * Allocate a new inode btree cursor. 352 */ 353 struct xfs_btree_cur * /* new inode btree cursor */ 354 xfs_inobt_init_cursor( 355 struct xfs_mount *mp, /* file system mount point */ 356 struct xfs_trans *tp, /* transaction pointer */ 357 struct xfs_buf *agbp, /* buffer for agi structure */ 358 xfs_agnumber_t agno, /* allocation group number */ 359 xfs_btnum_t btnum) /* ialloc or free ino btree */ 360 { 361 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); 362 struct xfs_btree_cur *cur; 363 364 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP); 365 366 cur->bc_tp = tp; 367 cur->bc_mp = mp; 368 cur->bc_btnum = btnum; 369 if (btnum == XFS_BTNUM_INO) { 370 cur->bc_nlevels = be32_to_cpu(agi->agi_level); 371 cur->bc_ops = &xfs_inobt_ops; 372 } else { 373 cur->bc_nlevels = be32_to_cpu(agi->agi_free_level); 374 cur->bc_ops = &xfs_finobt_ops; 375 } 376 377 cur->bc_blocklog = mp->m_sb.sb_blocklog; 378 379 if (xfs_sb_version_hascrc(&mp->m_sb)) 380 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; 381 382 cur->bc_private.a.agbp = agbp; 383 cur->bc_private.a.agno = agno; 384 385 return cur; 386 } 387 388 /* 389 * Calculate number of records in an inobt btree block. 390 */ 391 int 392 xfs_inobt_maxrecs( 393 struct xfs_mount *mp, 394 int blocklen, 395 int leaf) 396 { 397 blocklen -= XFS_INOBT_BLOCK_LEN(mp); 398 399 if (leaf) 400 return blocklen / sizeof(xfs_inobt_rec_t); 401 return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t)); 402 } 403 404 /* 405 * Convert the inode record holemask to an inode allocation bitmap. The inode 406 * allocation bitmap is inode granularity and specifies whether an inode is 407 * physically allocated on disk (not whether the inode is considered allocated 408 * or free by the fs). 409 * 410 * A bit value of 1 means the inode is allocated, a value of 0 means it is free. 411 */ 412 uint64_t 413 xfs_inobt_irec_to_allocmask( 414 struct xfs_inobt_rec_incore *rec) 415 { 416 uint64_t bitmap = 0; 417 uint64_t inodespbit; 418 int nextbit; 419 uint allocbitmap; 420 421 /* 422 * The holemask has 16-bits for a 64 inode record. Therefore each 423 * holemask bit represents multiple inodes. Create a mask of bits to set 424 * in the allocmask for each holemask bit. 425 */ 426 inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1; 427 428 /* 429 * Allocated inodes are represented by 0 bits in holemask. Invert the 0 430 * bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask 431 * anything beyond the 16 holemask bits since this casts to a larger 432 * type. 433 */ 434 allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1); 435 436 /* 437 * allocbitmap is the inverted holemask so every set bit represents 438 * allocated inodes. To expand from 16-bit holemask granularity to 439 * 64-bit (e.g., bit-per-inode), set inodespbit bits in the target 440 * bitmap for every holemask bit. 441 */ 442 nextbit = xfs_next_bit(&allocbitmap, 1, 0); 443 while (nextbit != -1) { 444 ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY)); 445 446 bitmap |= (inodespbit << 447 (nextbit * XFS_INODES_PER_HOLEMASK_BIT)); 448 449 nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1); 450 } 451 452 return bitmap; 453 } 454 455 #if defined(DEBUG) || defined(XFS_WARN) 456 /* 457 * Verify that an in-core inode record has a valid inode count. 458 */ 459 int 460 xfs_inobt_rec_check_count( 461 struct xfs_mount *mp, 462 struct xfs_inobt_rec_incore *rec) 463 { 464 int inocount = 0; 465 int nextbit = 0; 466 uint64_t allocbmap; 467 int wordsz; 468 469 wordsz = sizeof(allocbmap) / sizeof(unsigned int); 470 allocbmap = xfs_inobt_irec_to_allocmask(rec); 471 472 nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit); 473 while (nextbit != -1) { 474 inocount++; 475 nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, 476 nextbit + 1); 477 } 478 479 if (inocount != rec->ir_count) 480 return -EFSCORRUPTED; 481 482 return 0; 483 } 484 #endif /* DEBUG */ 485