1 /* 2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_types.h" 21 #include "xfs_bit.h" 22 #include "xfs_log.h" 23 #include "xfs_inum.h" 24 #include "xfs_trans.h" 25 #include "xfs_sb.h" 26 #include "xfs_ag.h" 27 #include "xfs_dir.h" 28 #include "xfs_dir2.h" 29 #include "xfs_dmapi.h" 30 #include "xfs_mount.h" 31 #include "xfs_bmap_btree.h" 32 #include "xfs_alloc_btree.h" 33 #include "xfs_ialloc_btree.h" 34 #include "xfs_dir_sf.h" 35 #include "xfs_dir2_sf.h" 36 #include "xfs_attr_sf.h" 37 #include "xfs_dinode.h" 38 #include "xfs_inode.h" 39 #include "xfs_ialloc.h" 40 #include "xfs_itable.h" 41 #include "xfs_error.h" 42 #include "xfs_btree.h" 43 44 #ifndef HAVE_USERACC 45 #define useracc(ubuffer, size, flags, foo) (0) 46 #define unuseracc(ubuffer, size, flags) 47 #endif 48 49 STATIC int 50 xfs_bulkstat_one_iget( 51 xfs_mount_t *mp, /* mount point for filesystem */ 52 xfs_ino_t ino, /* inode number to get data for */ 53 xfs_daddr_t bno, /* starting bno of inode cluster */ 54 xfs_bstat_t *buf, /* return buffer */ 55 int *stat) /* BULKSTAT_RV_... */ 56 { 57 xfs_dinode_core_t *dic; /* dinode core info pointer */ 58 xfs_inode_t *ip; /* incore inode pointer */ 59 int error; 60 61 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, bno); 62 if (error) { 63 *stat = BULKSTAT_RV_NOTHING; 64 return error; 65 } 66 67 ASSERT(ip != NULL); 68 ASSERT(ip->i_blkno != (xfs_daddr_t)0); 69 if (ip->i_d.di_mode == 0) { 70 *stat = BULKSTAT_RV_NOTHING; 71 error = XFS_ERROR(ENOENT); 72 goto out_iput; 73 } 74 75 dic = &ip->i_d; 76 77 /* xfs_iget returns the following without needing 78 * further change. 79 */ 80 buf->bs_nlink = dic->di_nlink; 81 buf->bs_projid = dic->di_projid; 82 buf->bs_ino = ino; 83 buf->bs_mode = dic->di_mode; 84 buf->bs_uid = dic->di_uid; 85 buf->bs_gid = dic->di_gid; 86 buf->bs_size = dic->di_size; 87 buf->bs_atime.tv_sec = dic->di_atime.t_sec; 88 buf->bs_atime.tv_nsec = dic->di_atime.t_nsec; 89 buf->bs_mtime.tv_sec = dic->di_mtime.t_sec; 90 buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec; 91 buf->bs_ctime.tv_sec = dic->di_ctime.t_sec; 92 buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec; 93 buf->bs_xflags = xfs_ip2xflags(ip); 94 buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog; 95 buf->bs_extents = dic->di_nextents; 96 buf->bs_gen = dic->di_gen; 97 memset(buf->bs_pad, 0, sizeof(buf->bs_pad)); 98 buf->bs_dmevmask = dic->di_dmevmask; 99 buf->bs_dmstate = dic->di_dmstate; 100 buf->bs_aextents = dic->di_anextents; 101 102 switch (dic->di_format) { 103 case XFS_DINODE_FMT_DEV: 104 buf->bs_rdev = ip->i_df.if_u2.if_rdev; 105 buf->bs_blksize = BLKDEV_IOSIZE; 106 buf->bs_blocks = 0; 107 break; 108 case XFS_DINODE_FMT_LOCAL: 109 case XFS_DINODE_FMT_UUID: 110 buf->bs_rdev = 0; 111 buf->bs_blksize = mp->m_sb.sb_blocksize; 112 buf->bs_blocks = 0; 113 break; 114 case XFS_DINODE_FMT_EXTENTS: 115 case XFS_DINODE_FMT_BTREE: 116 buf->bs_rdev = 0; 117 buf->bs_blksize = mp->m_sb.sb_blocksize; 118 buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks; 119 break; 120 } 121 122 out_iput: 123 xfs_iput(ip, XFS_ILOCK_SHARED); 124 return error; 125 } 126 127 STATIC int 128 xfs_bulkstat_one_dinode( 129 xfs_mount_t *mp, /* mount point for filesystem */ 130 xfs_ino_t ino, /* inode number to get data for */ 131 xfs_dinode_t *dip, /* dinode inode pointer */ 132 xfs_bstat_t *buf) /* return buffer */ 133 { 134 xfs_dinode_core_t *dic; /* dinode core info pointer */ 135 136 dic = &dip->di_core; 137 138 /* 139 * The inode format changed when we moved the link count and 140 * made it 32 bits long. If this is an old format inode, 141 * convert it in memory to look like a new one. If it gets 142 * flushed to disk we will convert back before flushing or 143 * logging it. We zero out the new projid field and the old link 144 * count field. We'll handle clearing the pad field (the remains 145 * of the old uuid field) when we actually convert the inode to 146 * the new format. We don't change the version number so that we 147 * can distinguish this from a real new format inode. 148 */ 149 if (INT_GET(dic->di_version, ARCH_CONVERT) == XFS_DINODE_VERSION_1) { 150 buf->bs_nlink = INT_GET(dic->di_onlink, ARCH_CONVERT); 151 buf->bs_projid = 0; 152 } else { 153 buf->bs_nlink = INT_GET(dic->di_nlink, ARCH_CONVERT); 154 buf->bs_projid = INT_GET(dic->di_projid, ARCH_CONVERT); 155 } 156 157 buf->bs_ino = ino; 158 buf->bs_mode = INT_GET(dic->di_mode, ARCH_CONVERT); 159 buf->bs_uid = INT_GET(dic->di_uid, ARCH_CONVERT); 160 buf->bs_gid = INT_GET(dic->di_gid, ARCH_CONVERT); 161 buf->bs_size = INT_GET(dic->di_size, ARCH_CONVERT); 162 buf->bs_atime.tv_sec = INT_GET(dic->di_atime.t_sec, ARCH_CONVERT); 163 buf->bs_atime.tv_nsec = INT_GET(dic->di_atime.t_nsec, ARCH_CONVERT); 164 buf->bs_mtime.tv_sec = INT_GET(dic->di_mtime.t_sec, ARCH_CONVERT); 165 buf->bs_mtime.tv_nsec = INT_GET(dic->di_mtime.t_nsec, ARCH_CONVERT); 166 buf->bs_ctime.tv_sec = INT_GET(dic->di_ctime.t_sec, ARCH_CONVERT); 167 buf->bs_ctime.tv_nsec = INT_GET(dic->di_ctime.t_nsec, ARCH_CONVERT); 168 buf->bs_xflags = xfs_dic2xflags(dic); 169 buf->bs_extsize = INT_GET(dic->di_extsize, ARCH_CONVERT) << mp->m_sb.sb_blocklog; 170 buf->bs_extents = INT_GET(dic->di_nextents, ARCH_CONVERT); 171 buf->bs_gen = INT_GET(dic->di_gen, ARCH_CONVERT); 172 memset(buf->bs_pad, 0, sizeof(buf->bs_pad)); 173 buf->bs_dmevmask = INT_GET(dic->di_dmevmask, ARCH_CONVERT); 174 buf->bs_dmstate = INT_GET(dic->di_dmstate, ARCH_CONVERT); 175 buf->bs_aextents = INT_GET(dic->di_anextents, ARCH_CONVERT); 176 177 switch (INT_GET(dic->di_format, ARCH_CONVERT)) { 178 case XFS_DINODE_FMT_DEV: 179 buf->bs_rdev = INT_GET(dip->di_u.di_dev, ARCH_CONVERT); 180 buf->bs_blksize = BLKDEV_IOSIZE; 181 buf->bs_blocks = 0; 182 break; 183 case XFS_DINODE_FMT_LOCAL: 184 case XFS_DINODE_FMT_UUID: 185 buf->bs_rdev = 0; 186 buf->bs_blksize = mp->m_sb.sb_blocksize; 187 buf->bs_blocks = 0; 188 break; 189 case XFS_DINODE_FMT_EXTENTS: 190 case XFS_DINODE_FMT_BTREE: 191 buf->bs_rdev = 0; 192 buf->bs_blksize = mp->m_sb.sb_blocksize; 193 buf->bs_blocks = INT_GET(dic->di_nblocks, ARCH_CONVERT); 194 break; 195 } 196 197 return 0; 198 } 199 200 /* 201 * Return stat information for one inode. 202 * Return 0 if ok, else errno. 203 */ 204 int /* error status */ 205 xfs_bulkstat_one( 206 xfs_mount_t *mp, /* mount point for filesystem */ 207 xfs_ino_t ino, /* inode number to get data for */ 208 void __user *buffer, /* buffer to place output in */ 209 int ubsize, /* size of buffer */ 210 void *private_data, /* my private data */ 211 xfs_daddr_t bno, /* starting bno of inode cluster */ 212 int *ubused, /* bytes used by me */ 213 void *dibuff, /* on-disk inode buffer */ 214 int *stat) /* BULKSTAT_RV_... */ 215 { 216 xfs_bstat_t *buf; /* return buffer */ 217 int error = 0; /* error value */ 218 xfs_dinode_t *dip; /* dinode inode pointer */ 219 220 dip = (xfs_dinode_t *)dibuff; 221 222 if (!buffer || ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino || 223 (XFS_SB_VERSION_HASQUOTA(&mp->m_sb) && 224 (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino))) { 225 *stat = BULKSTAT_RV_NOTHING; 226 return XFS_ERROR(EINVAL); 227 } 228 if (ubsize < sizeof(*buf)) { 229 *stat = BULKSTAT_RV_NOTHING; 230 return XFS_ERROR(ENOMEM); 231 } 232 233 buf = kmem_alloc(sizeof(*buf), KM_SLEEP); 234 235 if (dip == NULL) { 236 /* We're not being passed a pointer to a dinode. This happens 237 * if BULKSTAT_FG_IGET is selected. Do the iget. 238 */ 239 error = xfs_bulkstat_one_iget(mp, ino, bno, buf, stat); 240 if (error) 241 goto out_free; 242 } else { 243 xfs_bulkstat_one_dinode(mp, ino, dip, buf); 244 } 245 246 if (copy_to_user(buffer, buf, sizeof(*buf))) { 247 *stat = BULKSTAT_RV_NOTHING; 248 error = EFAULT; 249 goto out_free; 250 } 251 252 *stat = BULKSTAT_RV_DIDONE; 253 if (ubused) 254 *ubused = sizeof(*buf); 255 256 out_free: 257 kmem_free(buf, sizeof(*buf)); 258 return error; 259 } 260 261 /* 262 * Return stat information in bulk (by-inode) for the filesystem. 263 */ 264 int /* error status */ 265 xfs_bulkstat( 266 xfs_mount_t *mp, /* mount point for filesystem */ 267 xfs_ino_t *lastinop, /* last inode returned */ 268 int *ubcountp, /* size of buffer/count returned */ 269 bulkstat_one_pf formatter, /* func that'd fill a single buf */ 270 void *private_data,/* private data for formatter */ 271 size_t statstruct_size, /* sizeof struct filling */ 272 char __user *ubuffer, /* buffer with inode stats */ 273 int flags, /* defined in xfs_itable.h */ 274 int *done) /* 1 if there're more stats to get */ 275 { 276 xfs_agblock_t agbno=0;/* allocation group block number */ 277 xfs_buf_t *agbp; /* agi header buffer */ 278 xfs_agi_t *agi; /* agi header data */ 279 xfs_agino_t agino; /* inode # in allocation group */ 280 xfs_agnumber_t agno; /* allocation group number */ 281 xfs_daddr_t bno; /* inode cluster start daddr */ 282 int chunkidx; /* current index into inode chunk */ 283 int clustidx; /* current index into inode cluster */ 284 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ 285 int end_of_ag; /* set if we've seen the ag end */ 286 int error; /* error code */ 287 int fmterror;/* bulkstat formatter result */ 288 __int32_t gcnt; /* current btree rec's count */ 289 xfs_inofree_t gfree; /* current btree rec's free mask */ 290 xfs_agino_t gino; /* current btree rec's start inode */ 291 int i; /* loop index */ 292 int icount; /* count of inodes good in irbuf */ 293 xfs_ino_t ino; /* inode number (filesystem) */ 294 xfs_inobt_rec_t *irbp; /* current irec buffer pointer */ 295 xfs_inobt_rec_t *irbuf; /* start of irec buffer */ 296 xfs_inobt_rec_t *irbufend; /* end of good irec buffer entries */ 297 xfs_ino_t lastino=0; /* last inode number returned */ 298 int nbcluster; /* # of blocks in a cluster */ 299 int nicluster; /* # of inodes in a cluster */ 300 int nimask; /* mask for inode clusters */ 301 int nirbuf; /* size of irbuf */ 302 int rval; /* return value error code */ 303 int tmp; /* result value from btree calls */ 304 int ubcount; /* size of user's buffer */ 305 int ubleft; /* bytes left in user's buffer */ 306 char __user *ubufp; /* pointer into user's buffer */ 307 int ubelem; /* spaces used in user's buffer */ 308 int ubused; /* bytes used by formatter */ 309 xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */ 310 xfs_dinode_t *dip; /* ptr into bp for specific inode */ 311 xfs_inode_t *ip; /* ptr to in-core inode struct */ 312 313 /* 314 * Get the last inode value, see if there's nothing to do. 315 */ 316 ino = (xfs_ino_t)*lastinop; 317 dip = NULL; 318 agno = XFS_INO_TO_AGNO(mp, ino); 319 agino = XFS_INO_TO_AGINO(mp, ino); 320 if (agno >= mp->m_sb.sb_agcount || 321 ino != XFS_AGINO_TO_INO(mp, agno, agino)) { 322 *done = 1; 323 *ubcountp = 0; 324 return 0; 325 } 326 ubcount = *ubcountp; /* statstruct's */ 327 ubleft = ubcount * statstruct_size; /* bytes */ 328 *ubcountp = ubelem = 0; 329 *done = 0; 330 fmterror = 0; 331 ubufp = ubuffer; 332 nicluster = mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp) ? 333 mp->m_sb.sb_inopblock : 334 (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); 335 nimask = ~(nicluster - 1); 336 nbcluster = nicluster >> mp->m_sb.sb_inopblog; 337 /* 338 * Lock down the user's buffer. If a buffer was not sent, as in the case 339 * disk quota code calls here, we skip this. 340 */ 341 if (ubuffer && 342 (error = useracc(ubuffer, ubcount * statstruct_size, 343 (B_READ|B_PHYS), NULL))) { 344 return error; 345 } 346 /* 347 * Allocate a page-sized buffer for inode btree records. 348 * We could try allocating something smaller, but for normal 349 * calls we'll always (potentially) need the whole page. 350 */ 351 irbuf = kmem_alloc(NBPC, KM_SLEEP); 352 nirbuf = NBPC / sizeof(*irbuf); 353 /* 354 * Loop over the allocation groups, starting from the last 355 * inode returned; 0 means start of the allocation group. 356 */ 357 rval = 0; 358 while (ubleft >= statstruct_size && agno < mp->m_sb.sb_agcount) { 359 bp = NULL; 360 down_read(&mp->m_peraglock); 361 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 362 up_read(&mp->m_peraglock); 363 if (error) { 364 /* 365 * Skip this allocation group and go to the next one. 366 */ 367 agno++; 368 agino = 0; 369 continue; 370 } 371 agi = XFS_BUF_TO_AGI(agbp); 372 /* 373 * Allocate and initialize a btree cursor for ialloc btree. 374 */ 375 cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_INO, 376 (xfs_inode_t *)0, 0); 377 irbp = irbuf; 378 irbufend = irbuf + nirbuf; 379 end_of_ag = 0; 380 /* 381 * If we're returning in the middle of an allocation group, 382 * we need to get the remainder of the chunk we're in. 383 */ 384 if (agino > 0) { 385 /* 386 * Lookup the inode chunk that this inode lives in. 387 */ 388 error = xfs_inobt_lookup_le(cur, agino, 0, 0, &tmp); 389 if (!error && /* no I/O error */ 390 tmp && /* lookup succeeded */ 391 /* got the record, should always work */ 392 !(error = xfs_inobt_get_rec(cur, &gino, &gcnt, 393 &gfree, &i)) && 394 i == 1 && 395 /* this is the right chunk */ 396 agino < gino + XFS_INODES_PER_CHUNK && 397 /* lastino was not last in chunk */ 398 (chunkidx = agino - gino + 1) < 399 XFS_INODES_PER_CHUNK && 400 /* there are some left allocated */ 401 XFS_INOBT_MASKN(chunkidx, 402 XFS_INODES_PER_CHUNK - chunkidx) & ~gfree) { 403 /* 404 * Grab the chunk record. Mark all the 405 * uninteresting inodes (because they're 406 * before our start point) free. 407 */ 408 for (i = 0; i < chunkidx; i++) { 409 if (XFS_INOBT_MASK(i) & ~gfree) 410 gcnt++; 411 } 412 gfree |= XFS_INOBT_MASKN(0, chunkidx); 413 INT_SET(irbp->ir_startino, ARCH_CONVERT, gino); 414 INT_SET(irbp->ir_freecount, ARCH_CONVERT, gcnt); 415 INT_SET(irbp->ir_free, ARCH_CONVERT, gfree); 416 irbp++; 417 agino = gino + XFS_INODES_PER_CHUNK; 418 icount = XFS_INODES_PER_CHUNK - gcnt; 419 } else { 420 /* 421 * If any of those tests failed, bump the 422 * inode number (just in case). 423 */ 424 agino++; 425 icount = 0; 426 } 427 /* 428 * In any case, increment to the next record. 429 */ 430 if (!error) 431 error = xfs_inobt_increment(cur, 0, &tmp); 432 } else { 433 /* 434 * Start of ag. Lookup the first inode chunk. 435 */ 436 error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &tmp); 437 icount = 0; 438 } 439 /* 440 * Loop through inode btree records in this ag, 441 * until we run out of inodes or space in the buffer. 442 */ 443 while (irbp < irbufend && icount < ubcount) { 444 /* 445 * Loop as long as we're unable to read the 446 * inode btree. 447 */ 448 while (error) { 449 agino += XFS_INODES_PER_CHUNK; 450 if (XFS_AGINO_TO_AGBNO(mp, agino) >= 451 be32_to_cpu(agi->agi_length)) 452 break; 453 error = xfs_inobt_lookup_ge(cur, agino, 0, 0, 454 &tmp); 455 } 456 /* 457 * If ran off the end of the ag either with an error, 458 * or the normal way, set end and stop collecting. 459 */ 460 if (error || 461 (error = xfs_inobt_get_rec(cur, &gino, &gcnt, 462 &gfree, &i)) || 463 i == 0) { 464 end_of_ag = 1; 465 break; 466 } 467 /* 468 * If this chunk has any allocated inodes, save it. 469 */ 470 if (gcnt < XFS_INODES_PER_CHUNK) { 471 INT_SET(irbp->ir_startino, ARCH_CONVERT, gino); 472 INT_SET(irbp->ir_freecount, ARCH_CONVERT, gcnt); 473 INT_SET(irbp->ir_free, ARCH_CONVERT, gfree); 474 irbp++; 475 icount += XFS_INODES_PER_CHUNK - gcnt; 476 } 477 /* 478 * Set agino to after this chunk and bump the cursor. 479 */ 480 agino = gino + XFS_INODES_PER_CHUNK; 481 error = xfs_inobt_increment(cur, 0, &tmp); 482 } 483 /* 484 * Drop the btree buffers and the agi buffer. 485 * We can't hold any of the locks these represent 486 * when calling iget. 487 */ 488 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 489 xfs_buf_relse(agbp); 490 /* 491 * Now format all the good inodes into the user's buffer. 492 */ 493 irbufend = irbp; 494 for (irbp = irbuf; 495 irbp < irbufend && ubleft >= statstruct_size; irbp++) { 496 /* 497 * Read-ahead the next chunk's worth of inodes. 498 */ 499 if (&irbp[1] < irbufend) { 500 /* 501 * Loop over all clusters in the next chunk. 502 * Do a readahead if there are any allocated 503 * inodes in that cluster. 504 */ 505 for (agbno = XFS_AGINO_TO_AGBNO(mp, 506 INT_GET(irbp[1].ir_startino, ARCH_CONVERT)), 507 chunkidx = 0; 508 chunkidx < XFS_INODES_PER_CHUNK; 509 chunkidx += nicluster, 510 agbno += nbcluster) { 511 if (XFS_INOBT_MASKN(chunkidx, 512 nicluster) & 513 ~(INT_GET(irbp[1].ir_free, ARCH_CONVERT))) 514 xfs_btree_reada_bufs(mp, agno, 515 agbno, nbcluster); 516 } 517 } 518 /* 519 * Now process this chunk of inodes. 520 */ 521 for (agino = INT_GET(irbp->ir_startino, ARCH_CONVERT), chunkidx = 0, clustidx = 0; 522 ubleft > 0 && 523 INT_GET(irbp->ir_freecount, ARCH_CONVERT) < XFS_INODES_PER_CHUNK; 524 chunkidx++, clustidx++, agino++) { 525 ASSERT(chunkidx < XFS_INODES_PER_CHUNK); 526 /* 527 * Recompute agbno if this is the 528 * first inode of the cluster. 529 * 530 * Careful with clustidx. There can be 531 * multple clusters per chunk, a single 532 * cluster per chunk or a cluster that has 533 * inodes represented from several different 534 * chunks (if blocksize is large). 535 * 536 * Because of this, the starting clustidx is 537 * initialized to zero in this loop but must 538 * later be reset after reading in the cluster 539 * buffer. 540 */ 541 if ((chunkidx & (nicluster - 1)) == 0) { 542 agbno = XFS_AGINO_TO_AGBNO(mp, 543 INT_GET(irbp->ir_startino, ARCH_CONVERT)) + 544 ((chunkidx & nimask) >> 545 mp->m_sb.sb_inopblog); 546 547 if (flags & BULKSTAT_FG_QUICK) { 548 ino = XFS_AGINO_TO_INO(mp, agno, 549 agino); 550 bno = XFS_AGB_TO_DADDR(mp, agno, 551 agbno); 552 553 /* 554 * Get the inode cluster buffer 555 */ 556 ASSERT(xfs_inode_zone != NULL); 557 ip = kmem_zone_zalloc(xfs_inode_zone, 558 KM_SLEEP); 559 ip->i_ino = ino; 560 ip->i_mount = mp; 561 if (bp) 562 xfs_buf_relse(bp); 563 error = xfs_itobp(mp, NULL, ip, 564 &dip, &bp, bno); 565 if (!error) 566 clustidx = ip->i_boffset / mp->m_sb.sb_inodesize; 567 kmem_zone_free(xfs_inode_zone, ip); 568 if (XFS_TEST_ERROR(error != 0, 569 mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK, 570 XFS_RANDOM_BULKSTAT_READ_CHUNK)) { 571 bp = NULL; 572 break; 573 } 574 } 575 } 576 /* 577 * Skip if this inode is free. 578 */ 579 if (XFS_INOBT_MASK(chunkidx) & INT_GET(irbp->ir_free, ARCH_CONVERT)) 580 continue; 581 /* 582 * Count used inodes as free so we can tell 583 * when the chunk is used up. 584 */ 585 INT_MOD(irbp->ir_freecount, ARCH_CONVERT, +1); 586 ino = XFS_AGINO_TO_INO(mp, agno, agino); 587 bno = XFS_AGB_TO_DADDR(mp, agno, agbno); 588 if (flags & BULKSTAT_FG_QUICK) { 589 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 590 (clustidx << mp->m_sb.sb_inodelog)); 591 592 if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) 593 != XFS_DINODE_MAGIC 594 || !XFS_DINODE_GOOD_VERSION( 595 INT_GET(dip->di_core.di_version, ARCH_CONVERT))) 596 continue; 597 } 598 599 /* 600 * Get the inode and fill in a single buffer. 601 * BULKSTAT_FG_QUICK uses dip to fill it in. 602 * BULKSTAT_FG_IGET uses igets. 603 * See: xfs_bulkstat_one & xfs_dm_bulkstat_one. 604 * This is also used to count inodes/blks, etc 605 * in xfs_qm_quotacheck. 606 */ 607 ubused = statstruct_size; 608 error = formatter(mp, ino, ubufp, 609 ubleft, private_data, 610 bno, &ubused, dip, &fmterror); 611 if (fmterror == BULKSTAT_RV_NOTHING) { 612 if (error == ENOMEM) 613 ubleft = 0; 614 continue; 615 } 616 if (fmterror == BULKSTAT_RV_GIVEUP) { 617 ubleft = 0; 618 ASSERT(error); 619 rval = error; 620 break; 621 } 622 if (ubufp) 623 ubufp += ubused; 624 ubleft -= ubused; 625 ubelem++; 626 lastino = ino; 627 } 628 } 629 630 if (bp) 631 xfs_buf_relse(bp); 632 633 /* 634 * Set up for the next loop iteration. 635 */ 636 if (ubleft > 0) { 637 if (end_of_ag) { 638 agno++; 639 agino = 0; 640 } else 641 agino = XFS_INO_TO_AGINO(mp, lastino); 642 } else 643 break; 644 } 645 /* 646 * Done, we're either out of filesystem or space to put the data. 647 */ 648 kmem_free(irbuf, NBPC); 649 if (ubuffer) 650 unuseracc(ubuffer, ubcount * statstruct_size, (B_READ|B_PHYS)); 651 *ubcountp = ubelem; 652 if (agno >= mp->m_sb.sb_agcount) { 653 /* 654 * If we ran out of filesystem, mark lastino as off 655 * the end of the filesystem, so the next call 656 * will return immediately. 657 */ 658 *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0); 659 *done = 1; 660 } else 661 *lastinop = (xfs_ino_t)lastino; 662 663 return rval; 664 } 665 666 /* 667 * Return stat information in bulk (by-inode) for the filesystem. 668 * Special case for non-sequential one inode bulkstat. 669 */ 670 int /* error status */ 671 xfs_bulkstat_single( 672 xfs_mount_t *mp, /* mount point for filesystem */ 673 xfs_ino_t *lastinop, /* inode to return */ 674 char __user *buffer, /* buffer with inode stats */ 675 int *done) /* 1 if there're more stats to get */ 676 { 677 int count; /* count value for bulkstat call */ 678 int error; /* return value */ 679 xfs_ino_t ino; /* filesystem inode number */ 680 int res; /* result from bs1 */ 681 682 /* 683 * note that requesting valid inode numbers which are not allocated 684 * to inodes will most likely cause xfs_itobp to generate warning 685 * messages about bad magic numbers. This is ok. The fact that 686 * the inode isn't actually an inode is handled by the 687 * error check below. Done this way to make the usual case faster 688 * at the expense of the error case. 689 */ 690 691 ino = (xfs_ino_t)*lastinop; 692 error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), 693 NULL, 0, NULL, NULL, &res); 694 if (error) { 695 /* 696 * Special case way failed, do it the "long" way 697 * to see if that works. 698 */ 699 (*lastinop)--; 700 count = 1; 701 if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one, 702 NULL, sizeof(xfs_bstat_t), buffer, 703 BULKSTAT_FG_IGET, done)) 704 return error; 705 if (count == 0 || (xfs_ino_t)*lastinop != ino) 706 return error == EFSCORRUPTED ? 707 XFS_ERROR(EINVAL) : error; 708 else 709 return 0; 710 } 711 *done = 0; 712 return 0; 713 } 714 715 /* 716 * Return inode number table for the filesystem. 717 */ 718 int /* error status */ 719 xfs_inumbers( 720 xfs_mount_t *mp, /* mount point for filesystem */ 721 xfs_ino_t *lastino, /* last inode returned */ 722 int *count, /* size of buffer/count returned */ 723 xfs_inogrp_t __user *ubuffer)/* buffer with inode descriptions */ 724 { 725 xfs_buf_t *agbp; 726 xfs_agino_t agino; 727 xfs_agnumber_t agno; 728 int bcount; 729 xfs_inogrp_t *buffer; 730 int bufidx; 731 xfs_btree_cur_t *cur; 732 int error; 733 __int32_t gcnt; 734 xfs_inofree_t gfree; 735 xfs_agino_t gino; 736 int i; 737 xfs_ino_t ino; 738 int left; 739 int tmp; 740 741 ino = (xfs_ino_t)*lastino; 742 agno = XFS_INO_TO_AGNO(mp, ino); 743 agino = XFS_INO_TO_AGINO(mp, ino); 744 left = *count; 745 *count = 0; 746 bcount = MIN(left, (int)(NBPP / sizeof(*buffer))); 747 buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); 748 error = bufidx = 0; 749 cur = NULL; 750 agbp = NULL; 751 while (left > 0 && agno < mp->m_sb.sb_agcount) { 752 if (agbp == NULL) { 753 down_read(&mp->m_peraglock); 754 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 755 up_read(&mp->m_peraglock); 756 if (error) { 757 /* 758 * If we can't read the AGI of this ag, 759 * then just skip to the next one. 760 */ 761 ASSERT(cur == NULL); 762 agbp = NULL; 763 agno++; 764 agino = 0; 765 continue; 766 } 767 cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, 768 XFS_BTNUM_INO, (xfs_inode_t *)0, 0); 769 error = xfs_inobt_lookup_ge(cur, agino, 0, 0, &tmp); 770 if (error) { 771 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 772 cur = NULL; 773 xfs_buf_relse(agbp); 774 agbp = NULL; 775 /* 776 * Move up the the last inode in the current 777 * chunk. The lookup_ge will always get 778 * us the first inode in the next chunk. 779 */ 780 agino += XFS_INODES_PER_CHUNK - 1; 781 continue; 782 } 783 } 784 if ((error = xfs_inobt_get_rec(cur, &gino, &gcnt, &gfree, 785 &i)) || 786 i == 0) { 787 xfs_buf_relse(agbp); 788 agbp = NULL; 789 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 790 cur = NULL; 791 agno++; 792 agino = 0; 793 continue; 794 } 795 agino = gino + XFS_INODES_PER_CHUNK - 1; 796 buffer[bufidx].xi_startino = XFS_AGINO_TO_INO(mp, agno, gino); 797 buffer[bufidx].xi_alloccount = XFS_INODES_PER_CHUNK - gcnt; 798 buffer[bufidx].xi_allocmask = ~gfree; 799 bufidx++; 800 left--; 801 if (bufidx == bcount) { 802 if (copy_to_user(ubuffer, buffer, 803 bufidx * sizeof(*buffer))) { 804 error = XFS_ERROR(EFAULT); 805 break; 806 } 807 ubuffer += bufidx; 808 *count += bufidx; 809 bufidx = 0; 810 } 811 if (left) { 812 error = xfs_inobt_increment(cur, 0, &tmp); 813 if (error) { 814 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 815 cur = NULL; 816 xfs_buf_relse(agbp); 817 agbp = NULL; 818 /* 819 * The agino value has already been bumped. 820 * Just try to skip up to it. 821 */ 822 agino += XFS_INODES_PER_CHUNK; 823 continue; 824 } 825 } 826 } 827 if (!error) { 828 if (bufidx) { 829 if (copy_to_user(ubuffer, buffer, 830 bufidx * sizeof(*buffer))) 831 error = XFS_ERROR(EFAULT); 832 else 833 *count += bufidx; 834 } 835 *lastino = XFS_AGINO_TO_INO(mp, agno, agino); 836 } 837 kmem_free(buffer, bcount * sizeof(*buffer)); 838 if (cur) 839 xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR : 840 XFS_BTREE_NOERROR)); 841 if (agbp) 842 xfs_buf_relse(agbp); 843 return error; 844 } 845