1 /* 2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_types.h" 21 #include "xfs_bit.h" 22 #include "xfs_log.h" 23 #include "xfs_inum.h" 24 #include "xfs_trans.h" 25 #include "xfs_sb.h" 26 #include "xfs_ag.h" 27 #include "xfs_mount.h" 28 #include "xfs_bmap_btree.h" 29 #include "xfs_alloc_btree.h" 30 #include "xfs_ialloc_btree.h" 31 #include "xfs_dinode.h" 32 #include "xfs_inode.h" 33 #include "xfs_ialloc.h" 34 #include "xfs_itable.h" 35 #include "xfs_error.h" 36 #include "xfs_btree.h" 37 #include "xfs_trace.h" 38 39 STATIC int 40 xfs_internal_inum( 41 xfs_mount_t *mp, 42 xfs_ino_t ino) 43 { 44 return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino || 45 (xfs_sb_version_hasquota(&mp->m_sb) && 46 (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino))); 47 } 48 49 /* 50 * Return stat information for one inode. 51 * Return 0 if ok, else errno. 52 */ 53 int 54 xfs_bulkstat_one_int( 55 struct xfs_mount *mp, /* mount point for filesystem */ 56 xfs_ino_t ino, /* inode to get data for */ 57 void __user *buffer, /* buffer to place output in */ 58 int ubsize, /* size of buffer */ 59 bulkstat_one_fmt_pf formatter, /* formatter, copy to user */ 60 int *ubused, /* bytes used by me */ 61 int *stat) /* BULKSTAT_RV_... */ 62 { 63 struct xfs_icdinode *dic; /* dinode core info pointer */ 64 struct xfs_inode *ip; /* incore inode pointer */ 65 struct inode *inode; 66 struct xfs_bstat *buf; /* return buffer */ 67 int error = 0; /* error value */ 68 69 *stat = BULKSTAT_RV_NOTHING; 70 71 if (!buffer || xfs_internal_inum(mp, ino)) 72 return XFS_ERROR(EINVAL); 73 74 buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL); 75 if (!buf) 76 return XFS_ERROR(ENOMEM); 77 78 error = xfs_iget(mp, NULL, ino, 79 XFS_IGET_UNTRUSTED, XFS_ILOCK_SHARED, &ip); 80 if (error) { 81 *stat = BULKSTAT_RV_NOTHING; 82 goto out_free; 83 } 84 85 ASSERT(ip != NULL); 86 ASSERT(ip->i_imap.im_blkno != 0); 87 88 dic = &ip->i_d; 89 inode = VFS_I(ip); 90 91 /* xfs_iget returns the following without needing 92 * further change. 93 */ 94 buf->bs_nlink = dic->di_nlink; 95 buf->bs_projid = dic->di_projid; 96 buf->bs_ino = ino; 97 buf->bs_mode = dic->di_mode; 98 buf->bs_uid = dic->di_uid; 99 buf->bs_gid = dic->di_gid; 100 buf->bs_size = dic->di_size; 101 102 /* 103 * We need to read the timestamps from the Linux inode because 104 * the VFS keeps writing directly into the inode structure instead 105 * of telling us about the updates. 106 */ 107 buf->bs_atime.tv_sec = inode->i_atime.tv_sec; 108 buf->bs_atime.tv_nsec = inode->i_atime.tv_nsec; 109 buf->bs_mtime.tv_sec = inode->i_mtime.tv_sec; 110 buf->bs_mtime.tv_nsec = inode->i_mtime.tv_nsec; 111 buf->bs_ctime.tv_sec = inode->i_ctime.tv_sec; 112 buf->bs_ctime.tv_nsec = inode->i_ctime.tv_nsec; 113 114 buf->bs_xflags = xfs_ip2xflags(ip); 115 buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog; 116 buf->bs_extents = dic->di_nextents; 117 buf->bs_gen = dic->di_gen; 118 memset(buf->bs_pad, 0, sizeof(buf->bs_pad)); 119 buf->bs_dmevmask = dic->di_dmevmask; 120 buf->bs_dmstate = dic->di_dmstate; 121 buf->bs_aextents = dic->di_anextents; 122 buf->bs_forkoff = XFS_IFORK_BOFF(ip); 123 124 switch (dic->di_format) { 125 case XFS_DINODE_FMT_DEV: 126 buf->bs_rdev = ip->i_df.if_u2.if_rdev; 127 buf->bs_blksize = BLKDEV_IOSIZE; 128 buf->bs_blocks = 0; 129 break; 130 case XFS_DINODE_FMT_LOCAL: 131 case XFS_DINODE_FMT_UUID: 132 buf->bs_rdev = 0; 133 buf->bs_blksize = mp->m_sb.sb_blocksize; 134 buf->bs_blocks = 0; 135 break; 136 case XFS_DINODE_FMT_EXTENTS: 137 case XFS_DINODE_FMT_BTREE: 138 buf->bs_rdev = 0; 139 buf->bs_blksize = mp->m_sb.sb_blocksize; 140 buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks; 141 break; 142 } 143 xfs_iunlock(ip, XFS_ILOCK_SHARED); 144 IRELE(ip); 145 146 error = formatter(buffer, ubsize, ubused, buf); 147 148 if (!error) 149 *stat = BULKSTAT_RV_DIDONE; 150 151 out_free: 152 kmem_free(buf); 153 return error; 154 } 155 156 /* Return 0 on success or positive error */ 157 STATIC int 158 xfs_bulkstat_one_fmt( 159 void __user *ubuffer, 160 int ubsize, 161 int *ubused, 162 const xfs_bstat_t *buffer) 163 { 164 if (ubsize < sizeof(*buffer)) 165 return XFS_ERROR(ENOMEM); 166 if (copy_to_user(ubuffer, buffer, sizeof(*buffer))) 167 return XFS_ERROR(EFAULT); 168 if (ubused) 169 *ubused = sizeof(*buffer); 170 return 0; 171 } 172 173 int 174 xfs_bulkstat_one( 175 xfs_mount_t *mp, /* mount point for filesystem */ 176 xfs_ino_t ino, /* inode number to get data for */ 177 void __user *buffer, /* buffer to place output in */ 178 int ubsize, /* size of buffer */ 179 int *ubused, /* bytes used by me */ 180 int *stat) /* BULKSTAT_RV_... */ 181 { 182 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, 183 xfs_bulkstat_one_fmt, ubused, stat); 184 } 185 186 #define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) 187 188 /* 189 * Return stat information in bulk (by-inode) for the filesystem. 190 */ 191 int /* error status */ 192 xfs_bulkstat( 193 xfs_mount_t *mp, /* mount point for filesystem */ 194 xfs_ino_t *lastinop, /* last inode returned */ 195 int *ubcountp, /* size of buffer/count returned */ 196 bulkstat_one_pf formatter, /* func that'd fill a single buf */ 197 size_t statstruct_size, /* sizeof struct filling */ 198 char __user *ubuffer, /* buffer with inode stats */ 199 int *done) /* 1 if there are more stats to get */ 200 { 201 xfs_agblock_t agbno=0;/* allocation group block number */ 202 xfs_buf_t *agbp; /* agi header buffer */ 203 xfs_agi_t *agi; /* agi header data */ 204 xfs_agino_t agino; /* inode # in allocation group */ 205 xfs_agnumber_t agno; /* allocation group number */ 206 xfs_daddr_t bno; /* inode cluster start daddr */ 207 int chunkidx; /* current index into inode chunk */ 208 int clustidx; /* current index into inode cluster */ 209 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ 210 int end_of_ag; /* set if we've seen the ag end */ 211 int error; /* error code */ 212 int fmterror;/* bulkstat formatter result */ 213 int i; /* loop index */ 214 int icount; /* count of inodes good in irbuf */ 215 size_t irbsize; /* size of irec buffer in bytes */ 216 xfs_ino_t ino; /* inode number (filesystem) */ 217 xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */ 218 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ 219 xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ 220 xfs_ino_t lastino; /* last inode number returned */ 221 int nbcluster; /* # of blocks in a cluster */ 222 int nicluster; /* # of inodes in a cluster */ 223 int nimask; /* mask for inode clusters */ 224 int nirbuf; /* size of irbuf */ 225 int rval; /* return value error code */ 226 int tmp; /* result value from btree calls */ 227 int ubcount; /* size of user's buffer */ 228 int ubleft; /* bytes left in user's buffer */ 229 char __user *ubufp; /* pointer into user's buffer */ 230 int ubelem; /* spaces used in user's buffer */ 231 int ubused; /* bytes used by formatter */ 232 xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */ 233 234 /* 235 * Get the last inode value, see if there's nothing to do. 236 */ 237 ino = (xfs_ino_t)*lastinop; 238 lastino = ino; 239 agno = XFS_INO_TO_AGNO(mp, ino); 240 agino = XFS_INO_TO_AGINO(mp, ino); 241 if (agno >= mp->m_sb.sb_agcount || 242 ino != XFS_AGINO_TO_INO(mp, agno, agino)) { 243 *done = 1; 244 *ubcountp = 0; 245 return 0; 246 } 247 if (!ubcountp || *ubcountp <= 0) { 248 return EINVAL; 249 } 250 ubcount = *ubcountp; /* statstruct's */ 251 ubleft = ubcount * statstruct_size; /* bytes */ 252 *ubcountp = ubelem = 0; 253 *done = 0; 254 fmterror = 0; 255 ubufp = ubuffer; 256 nicluster = mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp) ? 257 mp->m_sb.sb_inopblock : 258 (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); 259 nimask = ~(nicluster - 1); 260 nbcluster = nicluster >> mp->m_sb.sb_inopblog; 261 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); 262 if (!irbuf) 263 return ENOMEM; 264 265 nirbuf = irbsize / sizeof(*irbuf); 266 267 /* 268 * Loop over the allocation groups, starting from the last 269 * inode returned; 0 means start of the allocation group. 270 */ 271 rval = 0; 272 while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) { 273 cond_resched(); 274 bp = NULL; 275 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 276 if (error) { 277 /* 278 * Skip this allocation group and go to the next one. 279 */ 280 agno++; 281 agino = 0; 282 continue; 283 } 284 agi = XFS_BUF_TO_AGI(agbp); 285 /* 286 * Allocate and initialize a btree cursor for ialloc btree. 287 */ 288 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno); 289 irbp = irbuf; 290 irbufend = irbuf + nirbuf; 291 end_of_ag = 0; 292 /* 293 * If we're returning in the middle of an allocation group, 294 * we need to get the remainder of the chunk we're in. 295 */ 296 if (agino > 0) { 297 xfs_inobt_rec_incore_t r; 298 299 /* 300 * Lookup the inode chunk that this inode lives in. 301 */ 302 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, 303 &tmp); 304 if (!error && /* no I/O error */ 305 tmp && /* lookup succeeded */ 306 /* got the record, should always work */ 307 !(error = xfs_inobt_get_rec(cur, &r, &i)) && 308 i == 1 && 309 /* this is the right chunk */ 310 agino < r.ir_startino + XFS_INODES_PER_CHUNK && 311 /* lastino was not last in chunk */ 312 (chunkidx = agino - r.ir_startino + 1) < 313 XFS_INODES_PER_CHUNK && 314 /* there are some left allocated */ 315 xfs_inobt_maskn(chunkidx, 316 XFS_INODES_PER_CHUNK - chunkidx) & 317 ~r.ir_free) { 318 /* 319 * Grab the chunk record. Mark all the 320 * uninteresting inodes (because they're 321 * before our start point) free. 322 */ 323 for (i = 0; i < chunkidx; i++) { 324 if (XFS_INOBT_MASK(i) & ~r.ir_free) 325 r.ir_freecount++; 326 } 327 r.ir_free |= xfs_inobt_maskn(0, chunkidx); 328 irbp->ir_startino = r.ir_startino; 329 irbp->ir_freecount = r.ir_freecount; 330 irbp->ir_free = r.ir_free; 331 irbp++; 332 agino = r.ir_startino + XFS_INODES_PER_CHUNK; 333 icount = XFS_INODES_PER_CHUNK - r.ir_freecount; 334 } else { 335 /* 336 * If any of those tests failed, bump the 337 * inode number (just in case). 338 */ 339 agino++; 340 icount = 0; 341 } 342 /* 343 * In any case, increment to the next record. 344 */ 345 if (!error) 346 error = xfs_btree_increment(cur, 0, &tmp); 347 } else { 348 /* 349 * Start of ag. Lookup the first inode chunk. 350 */ 351 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp); 352 icount = 0; 353 } 354 /* 355 * Loop through inode btree records in this ag, 356 * until we run out of inodes or space in the buffer. 357 */ 358 while (irbp < irbufend && icount < ubcount) { 359 xfs_inobt_rec_incore_t r; 360 361 /* 362 * Loop as long as we're unable to read the 363 * inode btree. 364 */ 365 while (error) { 366 agino += XFS_INODES_PER_CHUNK; 367 if (XFS_AGINO_TO_AGBNO(mp, agino) >= 368 be32_to_cpu(agi->agi_length)) 369 break; 370 error = xfs_inobt_lookup(cur, agino, 371 XFS_LOOKUP_GE, &tmp); 372 cond_resched(); 373 } 374 /* 375 * If ran off the end of the ag either with an error, 376 * or the normal way, set end and stop collecting. 377 */ 378 if (error) { 379 end_of_ag = 1; 380 break; 381 } 382 383 error = xfs_inobt_get_rec(cur, &r, &i); 384 if (error || i == 0) { 385 end_of_ag = 1; 386 break; 387 } 388 389 /* 390 * If this chunk has any allocated inodes, save it. 391 * Also start read-ahead now for this chunk. 392 */ 393 if (r.ir_freecount < XFS_INODES_PER_CHUNK) { 394 /* 395 * Loop over all clusters in the next chunk. 396 * Do a readahead if there are any allocated 397 * inodes in that cluster. 398 */ 399 agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino); 400 for (chunkidx = 0; 401 chunkidx < XFS_INODES_PER_CHUNK; 402 chunkidx += nicluster, 403 agbno += nbcluster) { 404 if (xfs_inobt_maskn(chunkidx, nicluster) 405 & ~r.ir_free) 406 xfs_btree_reada_bufs(mp, agno, 407 agbno, nbcluster); 408 } 409 irbp->ir_startino = r.ir_startino; 410 irbp->ir_freecount = r.ir_freecount; 411 irbp->ir_free = r.ir_free; 412 irbp++; 413 icount += XFS_INODES_PER_CHUNK - r.ir_freecount; 414 } 415 /* 416 * Set agino to after this chunk and bump the cursor. 417 */ 418 agino = r.ir_startino + XFS_INODES_PER_CHUNK; 419 error = xfs_btree_increment(cur, 0, &tmp); 420 cond_resched(); 421 } 422 /* 423 * Drop the btree buffers and the agi buffer. 424 * We can't hold any of the locks these represent 425 * when calling iget. 426 */ 427 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 428 xfs_buf_relse(agbp); 429 /* 430 * Now format all the good inodes into the user's buffer. 431 */ 432 irbufend = irbp; 433 for (irbp = irbuf; 434 irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) { 435 /* 436 * Now process this chunk of inodes. 437 */ 438 for (agino = irbp->ir_startino, chunkidx = clustidx = 0; 439 XFS_BULKSTAT_UBLEFT(ubleft) && 440 irbp->ir_freecount < XFS_INODES_PER_CHUNK; 441 chunkidx++, clustidx++, agino++) { 442 ASSERT(chunkidx < XFS_INODES_PER_CHUNK); 443 /* 444 * Recompute agbno if this is the 445 * first inode of the cluster. 446 * 447 * Careful with clustidx. There can be 448 * multiple clusters per chunk, a single 449 * cluster per chunk or a cluster that has 450 * inodes represented from several different 451 * chunks (if blocksize is large). 452 * 453 * Because of this, the starting clustidx is 454 * initialized to zero in this loop but must 455 * later be reset after reading in the cluster 456 * buffer. 457 */ 458 if ((chunkidx & (nicluster - 1)) == 0) { 459 agbno = XFS_AGINO_TO_AGBNO(mp, 460 irbp->ir_startino) + 461 ((chunkidx & nimask) >> 462 mp->m_sb.sb_inopblog); 463 } 464 ino = XFS_AGINO_TO_INO(mp, agno, agino); 465 bno = XFS_AGB_TO_DADDR(mp, agno, agbno); 466 /* 467 * Skip if this inode is free. 468 */ 469 if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) { 470 lastino = ino; 471 continue; 472 } 473 /* 474 * Count used inodes as free so we can tell 475 * when the chunk is used up. 476 */ 477 irbp->ir_freecount++; 478 479 /* 480 * Get the inode and fill in a single buffer. 481 */ 482 ubused = statstruct_size; 483 error = formatter(mp, ino, ubufp, ubleft, 484 &ubused, &fmterror); 485 if (fmterror == BULKSTAT_RV_NOTHING) { 486 if (error && error != ENOENT && 487 error != EINVAL) { 488 ubleft = 0; 489 rval = error; 490 break; 491 } 492 lastino = ino; 493 continue; 494 } 495 if (fmterror == BULKSTAT_RV_GIVEUP) { 496 ubleft = 0; 497 ASSERT(error); 498 rval = error; 499 break; 500 } 501 if (ubufp) 502 ubufp += ubused; 503 ubleft -= ubused; 504 ubelem++; 505 lastino = ino; 506 } 507 508 cond_resched(); 509 } 510 511 if (bp) 512 xfs_buf_relse(bp); 513 514 /* 515 * Set up for the next loop iteration. 516 */ 517 if (XFS_BULKSTAT_UBLEFT(ubleft)) { 518 if (end_of_ag) { 519 agno++; 520 agino = 0; 521 } else 522 agino = XFS_INO_TO_AGINO(mp, lastino); 523 } else 524 break; 525 } 526 /* 527 * Done, we're either out of filesystem or space to put the data. 528 */ 529 kmem_free_large(irbuf); 530 *ubcountp = ubelem; 531 /* 532 * Found some inodes, return them now and return the error next time. 533 */ 534 if (ubelem) 535 rval = 0; 536 if (agno >= mp->m_sb.sb_agcount) { 537 /* 538 * If we ran out of filesystem, mark lastino as off 539 * the end of the filesystem, so the next call 540 * will return immediately. 541 */ 542 *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0); 543 *done = 1; 544 } else 545 *lastinop = (xfs_ino_t)lastino; 546 547 return rval; 548 } 549 550 /* 551 * Return stat information in bulk (by-inode) for the filesystem. 552 * Special case for non-sequential one inode bulkstat. 553 */ 554 int /* error status */ 555 xfs_bulkstat_single( 556 xfs_mount_t *mp, /* mount point for filesystem */ 557 xfs_ino_t *lastinop, /* inode to return */ 558 char __user *buffer, /* buffer with inode stats */ 559 int *done) /* 1 if there are more stats to get */ 560 { 561 int count; /* count value for bulkstat call */ 562 int error; /* return value */ 563 xfs_ino_t ino; /* filesystem inode number */ 564 int res; /* result from bs1 */ 565 566 /* 567 * note that requesting valid inode numbers which are not allocated 568 * to inodes will most likely cause xfs_itobp to generate warning 569 * messages about bad magic numbers. This is ok. The fact that 570 * the inode isn't actually an inode is handled by the 571 * error check below. Done this way to make the usual case faster 572 * at the expense of the error case. 573 */ 574 575 ino = (xfs_ino_t)*lastinop; 576 error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), 0, &res); 577 if (error) { 578 /* 579 * Special case way failed, do it the "long" way 580 * to see if that works. 581 */ 582 (*lastinop)--; 583 count = 1; 584 if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one, 585 sizeof(xfs_bstat_t), buffer, done)) 586 return error; 587 if (count == 0 || (xfs_ino_t)*lastinop != ino) 588 return error == EFSCORRUPTED ? 589 XFS_ERROR(EINVAL) : error; 590 else 591 return 0; 592 } 593 *done = 0; 594 return 0; 595 } 596 597 int 598 xfs_inumbers_fmt( 599 void __user *ubuffer, /* buffer to write to */ 600 const xfs_inogrp_t *buffer, /* buffer to read from */ 601 long count, /* # of elements to read */ 602 long *written) /* # of bytes written */ 603 { 604 if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer))) 605 return -EFAULT; 606 *written = count * sizeof(*buffer); 607 return 0; 608 } 609 610 /* 611 * Return inode number table for the filesystem. 612 */ 613 int /* error status */ 614 xfs_inumbers( 615 xfs_mount_t *mp, /* mount point for filesystem */ 616 xfs_ino_t *lastino, /* last inode returned */ 617 int *count, /* size of buffer/count returned */ 618 void __user *ubuffer,/* buffer with inode descriptions */ 619 inumbers_fmt_pf formatter) 620 { 621 xfs_buf_t *agbp; 622 xfs_agino_t agino; 623 xfs_agnumber_t agno; 624 int bcount; 625 xfs_inogrp_t *buffer; 626 int bufidx; 627 xfs_btree_cur_t *cur; 628 int error; 629 xfs_inobt_rec_incore_t r; 630 int i; 631 xfs_ino_t ino; 632 int left; 633 int tmp; 634 635 ino = (xfs_ino_t)*lastino; 636 agno = XFS_INO_TO_AGNO(mp, ino); 637 agino = XFS_INO_TO_AGINO(mp, ino); 638 left = *count; 639 *count = 0; 640 bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); 641 buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); 642 error = bufidx = 0; 643 cur = NULL; 644 agbp = NULL; 645 while (left > 0 && agno < mp->m_sb.sb_agcount) { 646 if (agbp == NULL) { 647 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 648 if (error) { 649 /* 650 * If we can't read the AGI of this ag, 651 * then just skip to the next one. 652 */ 653 ASSERT(cur == NULL); 654 agbp = NULL; 655 agno++; 656 agino = 0; 657 continue; 658 } 659 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno); 660 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE, 661 &tmp); 662 if (error) { 663 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 664 cur = NULL; 665 xfs_buf_relse(agbp); 666 agbp = NULL; 667 /* 668 * Move up the last inode in the current 669 * chunk. The lookup_ge will always get 670 * us the first inode in the next chunk. 671 */ 672 agino += XFS_INODES_PER_CHUNK - 1; 673 continue; 674 } 675 } 676 error = xfs_inobt_get_rec(cur, &r, &i); 677 if (error || i == 0) { 678 xfs_buf_relse(agbp); 679 agbp = NULL; 680 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 681 cur = NULL; 682 agno++; 683 agino = 0; 684 continue; 685 } 686 agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1; 687 buffer[bufidx].xi_startino = 688 XFS_AGINO_TO_INO(mp, agno, r.ir_startino); 689 buffer[bufidx].xi_alloccount = 690 XFS_INODES_PER_CHUNK - r.ir_freecount; 691 buffer[bufidx].xi_allocmask = ~r.ir_free; 692 bufidx++; 693 left--; 694 if (bufidx == bcount) { 695 long written; 696 if (formatter(ubuffer, buffer, bufidx, &written)) { 697 error = XFS_ERROR(EFAULT); 698 break; 699 } 700 ubuffer += written; 701 *count += bufidx; 702 bufidx = 0; 703 } 704 if (left) { 705 error = xfs_btree_increment(cur, 0, &tmp); 706 if (error) { 707 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 708 cur = NULL; 709 xfs_buf_relse(agbp); 710 agbp = NULL; 711 /* 712 * The agino value has already been bumped. 713 * Just try to skip up to it. 714 */ 715 agino += XFS_INODES_PER_CHUNK; 716 continue; 717 } 718 } 719 } 720 if (!error) { 721 if (bufidx) { 722 long written; 723 if (formatter(ubuffer, buffer, bufidx, &written)) 724 error = XFS_ERROR(EFAULT); 725 else 726 *count += bufidx; 727 } 728 *lastino = XFS_AGINO_TO_INO(mp, agno, agino); 729 } 730 kmem_free(buffer); 731 if (cur) 732 xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR : 733 XFS_BTREE_NOERROR)); 734 if (agbp) 735 xfs_buf_relse(agbp); 736 return error; 737 } 738