1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_inode.h" 14 #include "xfs_btree.h" 15 #include "xfs_ialloc.h" 16 #include "xfs_ialloc_btree.h" 17 #include "xfs_itable.h" 18 #include "xfs_error.h" 19 #include "xfs_trace.h" 20 #include "xfs_icache.h" 21 22 /* 23 * Return stat information for one inode. 24 * Return 0 if ok, else errno. 25 */ 26 int 27 xfs_bulkstat_one_int( 28 struct xfs_mount *mp, /* mount point for filesystem */ 29 xfs_ino_t ino, /* inode to get data for */ 30 void __user *buffer, /* buffer to place output in */ 31 int ubsize, /* size of buffer */ 32 bulkstat_one_fmt_pf formatter, /* formatter, copy to user */ 33 int *ubused, /* bytes used by me */ 34 int *stat) /* BULKSTAT_RV_... */ 35 { 36 struct xfs_icdinode *dic; /* dinode core info pointer */ 37 struct xfs_inode *ip; /* incore inode pointer */ 38 struct inode *inode; 39 struct xfs_bstat *buf; /* return buffer */ 40 int error = 0; /* error value */ 41 42 *stat = BULKSTAT_RV_NOTHING; 43 44 if (!buffer || xfs_internal_inum(mp, ino)) 45 return -EINVAL; 46 47 buf = kmem_zalloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL); 48 if (!buf) 49 return -ENOMEM; 50 51 error = xfs_iget(mp, NULL, ino, 52 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED), 53 XFS_ILOCK_SHARED, &ip); 54 if (error) 55 goto out_free; 56 57 ASSERT(ip != NULL); 58 ASSERT(ip->i_imap.im_blkno != 0); 59 inode = VFS_I(ip); 60 61 dic = &ip->i_d; 62 63 /* xfs_iget returns the following without needing 64 * further change. 65 */ 66 buf->bs_projid_lo = dic->di_projid_lo; 67 buf->bs_projid_hi = dic->di_projid_hi; 68 buf->bs_ino = ino; 69 buf->bs_uid = dic->di_uid; 70 buf->bs_gid = dic->di_gid; 71 buf->bs_size = dic->di_size; 72 73 buf->bs_nlink = inode->i_nlink; 74 buf->bs_atime.tv_sec = inode->i_atime.tv_sec; 75 buf->bs_atime.tv_nsec = inode->i_atime.tv_nsec; 76 buf->bs_mtime.tv_sec = inode->i_mtime.tv_sec; 77 buf->bs_mtime.tv_nsec = inode->i_mtime.tv_nsec; 78 buf->bs_ctime.tv_sec = inode->i_ctime.tv_sec; 79 buf->bs_ctime.tv_nsec = inode->i_ctime.tv_nsec; 80 buf->bs_gen = inode->i_generation; 81 buf->bs_mode = inode->i_mode; 82 83 buf->bs_xflags = xfs_ip2xflags(ip); 84 buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog; 85 buf->bs_extents = dic->di_nextents; 86 memset(buf->bs_pad, 0, sizeof(buf->bs_pad)); 87 buf->bs_dmevmask = dic->di_dmevmask; 88 buf->bs_dmstate = dic->di_dmstate; 89 buf->bs_aextents = dic->di_anextents; 90 buf->bs_forkoff = XFS_IFORK_BOFF(ip); 91 92 if (dic->di_version == 3) { 93 if (dic->di_flags2 & XFS_DIFLAG2_COWEXTSIZE) 94 buf->bs_cowextsize = dic->di_cowextsize << 95 mp->m_sb.sb_blocklog; 96 } 97 98 switch (dic->di_format) { 99 case XFS_DINODE_FMT_DEV: 100 buf->bs_rdev = sysv_encode_dev(inode->i_rdev); 101 buf->bs_blksize = BLKDEV_IOSIZE; 102 buf->bs_blocks = 0; 103 break; 104 case XFS_DINODE_FMT_LOCAL: 105 buf->bs_rdev = 0; 106 buf->bs_blksize = mp->m_sb.sb_blocksize; 107 buf->bs_blocks = 0; 108 break; 109 case XFS_DINODE_FMT_EXTENTS: 110 case XFS_DINODE_FMT_BTREE: 111 buf->bs_rdev = 0; 112 buf->bs_blksize = mp->m_sb.sb_blocksize; 113 buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks; 114 break; 115 } 116 xfs_iunlock(ip, XFS_ILOCK_SHARED); 117 IRELE(ip); 118 119 error = formatter(buffer, ubsize, ubused, buf); 120 if (!error) 121 *stat = BULKSTAT_RV_DIDONE; 122 123 out_free: 124 kmem_free(buf); 125 return error; 126 } 127 128 /* Return 0 on success or positive error */ 129 STATIC int 130 xfs_bulkstat_one_fmt( 131 void __user *ubuffer, 132 int ubsize, 133 int *ubused, 134 const xfs_bstat_t *buffer) 135 { 136 if (ubsize < sizeof(*buffer)) 137 return -ENOMEM; 138 if (copy_to_user(ubuffer, buffer, sizeof(*buffer))) 139 return -EFAULT; 140 if (ubused) 141 *ubused = sizeof(*buffer); 142 return 0; 143 } 144 145 int 146 xfs_bulkstat_one( 147 xfs_mount_t *mp, /* mount point for filesystem */ 148 xfs_ino_t ino, /* inode number to get data for */ 149 void __user *buffer, /* buffer to place output in */ 150 int ubsize, /* size of buffer */ 151 int *ubused, /* bytes used by me */ 152 int *stat) /* BULKSTAT_RV_... */ 153 { 154 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, 155 xfs_bulkstat_one_fmt, ubused, stat); 156 } 157 158 /* 159 * Loop over all clusters in a chunk for a given incore inode allocation btree 160 * record. Do a readahead if there are any allocated inodes in that cluster. 161 */ 162 STATIC void 163 xfs_bulkstat_ichunk_ra( 164 struct xfs_mount *mp, 165 xfs_agnumber_t agno, 166 struct xfs_inobt_rec_incore *irec) 167 { 168 xfs_agblock_t agbno; 169 struct blk_plug plug; 170 int blks_per_cluster; 171 int inodes_per_cluster; 172 int i; /* inode chunk index */ 173 174 agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino); 175 blks_per_cluster = xfs_icluster_size_fsb(mp); 176 inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog; 177 178 blk_start_plug(&plug); 179 for (i = 0; i < XFS_INODES_PER_CHUNK; 180 i += inodes_per_cluster, agbno += blks_per_cluster) { 181 if (xfs_inobt_maskn(i, inodes_per_cluster) & ~irec->ir_free) { 182 xfs_btree_reada_bufs(mp, agno, agbno, blks_per_cluster, 183 &xfs_inode_buf_ops); 184 } 185 } 186 blk_finish_plug(&plug); 187 } 188 189 /* 190 * Lookup the inode chunk that the given inode lives in and then get the record 191 * if we found the chunk. If the inode was not the last in the chunk and there 192 * are some left allocated, update the data for the pointed-to record as well as 193 * return the count of grabbed inodes. 194 */ 195 STATIC int 196 xfs_bulkstat_grab_ichunk( 197 struct xfs_btree_cur *cur, /* btree cursor */ 198 xfs_agino_t agino, /* starting inode of chunk */ 199 int *icount,/* return # of inodes grabbed */ 200 struct xfs_inobt_rec_incore *irec) /* btree record */ 201 { 202 int idx; /* index into inode chunk */ 203 int stat; 204 int error = 0; 205 206 /* Lookup the inode chunk that this inode lives in */ 207 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &stat); 208 if (error) 209 return error; 210 if (!stat) { 211 *icount = 0; 212 return error; 213 } 214 215 /* Get the record, should always work */ 216 error = xfs_inobt_get_rec(cur, irec, &stat); 217 if (error) 218 return error; 219 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, stat == 1); 220 221 /* Check if the record contains the inode in request */ 222 if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) { 223 *icount = 0; 224 return 0; 225 } 226 227 idx = agino - irec->ir_startino + 1; 228 if (idx < XFS_INODES_PER_CHUNK && 229 (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) { 230 int i; 231 232 /* We got a right chunk with some left inodes allocated at it. 233 * Grab the chunk record. Mark all the uninteresting inodes 234 * free -- because they're before our start point. 235 */ 236 for (i = 0; i < idx; i++) { 237 if (XFS_INOBT_MASK(i) & ~irec->ir_free) 238 irec->ir_freecount++; 239 } 240 241 irec->ir_free |= xfs_inobt_maskn(0, idx); 242 *icount = irec->ir_count - irec->ir_freecount; 243 } 244 245 return 0; 246 } 247 248 #define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) 249 250 struct xfs_bulkstat_agichunk { 251 char __user **ac_ubuffer;/* pointer into user's buffer */ 252 int ac_ubleft; /* bytes left in user's buffer */ 253 int ac_ubelem; /* spaces used in user's buffer */ 254 }; 255 256 /* 257 * Process inodes in chunk with a pointer to a formatter function 258 * that will iget the inode and fill in the appropriate structure. 259 */ 260 static int 261 xfs_bulkstat_ag_ichunk( 262 struct xfs_mount *mp, 263 xfs_agnumber_t agno, 264 struct xfs_inobt_rec_incore *irbp, 265 bulkstat_one_pf formatter, 266 size_t statstruct_size, 267 struct xfs_bulkstat_agichunk *acp, 268 xfs_agino_t *last_agino) 269 { 270 char __user **ubufp = acp->ac_ubuffer; 271 int chunkidx; 272 int error = 0; 273 xfs_agino_t agino = irbp->ir_startino; 274 275 for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK; 276 chunkidx++, agino++) { 277 int fmterror; 278 int ubused; 279 280 /* inode won't fit in buffer, we are done */ 281 if (acp->ac_ubleft < statstruct_size) 282 break; 283 284 /* Skip if this inode is free */ 285 if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) 286 continue; 287 288 /* Get the inode and fill in a single buffer */ 289 ubused = statstruct_size; 290 error = formatter(mp, XFS_AGINO_TO_INO(mp, agno, agino), 291 *ubufp, acp->ac_ubleft, &ubused, &fmterror); 292 293 if (fmterror == BULKSTAT_RV_GIVEUP || 294 (error && error != -ENOENT && error != -EINVAL)) { 295 acp->ac_ubleft = 0; 296 ASSERT(error); 297 break; 298 } 299 300 /* be careful not to leak error if at end of chunk */ 301 if (fmterror == BULKSTAT_RV_NOTHING || error) { 302 error = 0; 303 continue; 304 } 305 306 *ubufp += ubused; 307 acp->ac_ubleft -= ubused; 308 acp->ac_ubelem++; 309 } 310 311 /* 312 * Post-update *last_agino. At this point, agino will always point one 313 * inode past the last inode we processed successfully. Hence we 314 * substract that inode when setting the *last_agino cursor so that we 315 * return the correct cookie to userspace. On the next bulkstat call, 316 * the inode under the lastino cookie will be skipped as we have already 317 * processed it here. 318 */ 319 *last_agino = agino - 1; 320 321 return error; 322 } 323 324 /* 325 * Return stat information in bulk (by-inode) for the filesystem. 326 */ 327 int /* error status */ 328 xfs_bulkstat( 329 xfs_mount_t *mp, /* mount point for filesystem */ 330 xfs_ino_t *lastinop, /* last inode returned */ 331 int *ubcountp, /* size of buffer/count returned */ 332 bulkstat_one_pf formatter, /* func that'd fill a single buf */ 333 size_t statstruct_size, /* sizeof struct filling */ 334 char __user *ubuffer, /* buffer with inode stats */ 335 int *done) /* 1 if there are more stats to get */ 336 { 337 xfs_buf_t *agbp; /* agi header buffer */ 338 xfs_agino_t agino; /* inode # in allocation group */ 339 xfs_agnumber_t agno; /* allocation group number */ 340 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ 341 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ 342 int nirbuf; /* size of irbuf */ 343 int ubcount; /* size of user's buffer */ 344 struct xfs_bulkstat_agichunk ac; 345 int error = 0; 346 347 /* 348 * Get the last inode value, see if there's nothing to do. 349 */ 350 agno = XFS_INO_TO_AGNO(mp, *lastinop); 351 agino = XFS_INO_TO_AGINO(mp, *lastinop); 352 if (agno >= mp->m_sb.sb_agcount || 353 *lastinop != XFS_AGINO_TO_INO(mp, agno, agino)) { 354 *done = 1; 355 *ubcountp = 0; 356 return 0; 357 } 358 359 ubcount = *ubcountp; /* statstruct's */ 360 ac.ac_ubuffer = &ubuffer; 361 ac.ac_ubleft = ubcount * statstruct_size; /* bytes */; 362 ac.ac_ubelem = 0; 363 364 *ubcountp = 0; 365 *done = 0; 366 367 irbuf = kmem_zalloc_large(PAGE_SIZE * 4, KM_SLEEP); 368 if (!irbuf) 369 return -ENOMEM; 370 nirbuf = (PAGE_SIZE * 4) / sizeof(*irbuf); 371 372 /* 373 * Loop over the allocation groups, starting from the last 374 * inode returned; 0 means start of the allocation group. 375 */ 376 while (agno < mp->m_sb.sb_agcount) { 377 struct xfs_inobt_rec_incore *irbp = irbuf; 378 struct xfs_inobt_rec_incore *irbufend = irbuf + nirbuf; 379 bool end_of_ag = false; 380 int icount = 0; 381 int stat; 382 383 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 384 if (error) 385 break; 386 /* 387 * Allocate and initialize a btree cursor for ialloc btree. 388 */ 389 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, 390 XFS_BTNUM_INO); 391 if (agino > 0) { 392 /* 393 * In the middle of an allocation group, we need to get 394 * the remainder of the chunk we're in. 395 */ 396 struct xfs_inobt_rec_incore r; 397 398 error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r); 399 if (error) 400 goto del_cursor; 401 if (icount) { 402 irbp->ir_startino = r.ir_startino; 403 irbp->ir_holemask = r.ir_holemask; 404 irbp->ir_count = r.ir_count; 405 irbp->ir_freecount = r.ir_freecount; 406 irbp->ir_free = r.ir_free; 407 irbp++; 408 } 409 /* Increment to the next record */ 410 error = xfs_btree_increment(cur, 0, &stat); 411 } else { 412 /* Start of ag. Lookup the first inode chunk */ 413 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &stat); 414 } 415 if (error || stat == 0) { 416 end_of_ag = true; 417 goto del_cursor; 418 } 419 420 /* 421 * Loop through inode btree records in this ag, 422 * until we run out of inodes or space in the buffer. 423 */ 424 while (irbp < irbufend && icount < ubcount) { 425 struct xfs_inobt_rec_incore r; 426 427 error = xfs_inobt_get_rec(cur, &r, &stat); 428 if (error || stat == 0) { 429 end_of_ag = true; 430 goto del_cursor; 431 } 432 433 /* 434 * If this chunk has any allocated inodes, save it. 435 * Also start read-ahead now for this chunk. 436 */ 437 if (r.ir_freecount < r.ir_count) { 438 xfs_bulkstat_ichunk_ra(mp, agno, &r); 439 irbp->ir_startino = r.ir_startino; 440 irbp->ir_holemask = r.ir_holemask; 441 irbp->ir_count = r.ir_count; 442 irbp->ir_freecount = r.ir_freecount; 443 irbp->ir_free = r.ir_free; 444 irbp++; 445 icount += r.ir_count - r.ir_freecount; 446 } 447 error = xfs_btree_increment(cur, 0, &stat); 448 if (error || stat == 0) { 449 end_of_ag = true; 450 goto del_cursor; 451 } 452 cond_resched(); 453 } 454 455 /* 456 * Drop the btree buffers and the agi buffer as we can't hold any 457 * of the locks these represent when calling iget. If there is a 458 * pending error, then we are done. 459 */ 460 del_cursor: 461 xfs_btree_del_cursor(cur, error ? 462 XFS_BTREE_ERROR : XFS_BTREE_NOERROR); 463 xfs_buf_relse(agbp); 464 if (error) 465 break; 466 /* 467 * Now format all the good inodes into the user's buffer. The 468 * call to xfs_bulkstat_ag_ichunk() sets up the agino pointer 469 * for the next loop iteration. 470 */ 471 irbufend = irbp; 472 for (irbp = irbuf; 473 irbp < irbufend && ac.ac_ubleft >= statstruct_size; 474 irbp++) { 475 error = xfs_bulkstat_ag_ichunk(mp, agno, irbp, 476 formatter, statstruct_size, &ac, 477 &agino); 478 if (error) 479 break; 480 481 cond_resched(); 482 } 483 484 /* 485 * If we've run out of space or had a formatting error, we 486 * are now done 487 */ 488 if (ac.ac_ubleft < statstruct_size || error) 489 break; 490 491 if (end_of_ag) { 492 agno++; 493 agino = 0; 494 } 495 } 496 /* 497 * Done, we're either out of filesystem or space to put the data. 498 */ 499 kmem_free(irbuf); 500 *ubcountp = ac.ac_ubelem; 501 502 /* 503 * We found some inodes, so clear the error status and return them. 504 * The lastino pointer will point directly at the inode that triggered 505 * any error that occurred, so on the next call the error will be 506 * triggered again and propagated to userspace as there will be no 507 * formatted inodes in the buffer. 508 */ 509 if (ac.ac_ubelem) 510 error = 0; 511 512 /* 513 * If we ran out of filesystem, lastino will point off the end of 514 * the filesystem so the next call will return immediately. 515 */ 516 *lastinop = XFS_AGINO_TO_INO(mp, agno, agino); 517 if (agno >= mp->m_sb.sb_agcount) 518 *done = 1; 519 520 return error; 521 } 522 523 int 524 xfs_inumbers_fmt( 525 void __user *ubuffer, /* buffer to write to */ 526 const struct xfs_inogrp *buffer, /* buffer to read from */ 527 long count, /* # of elements to read */ 528 long *written) /* # of bytes written */ 529 { 530 if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer))) 531 return -EFAULT; 532 *written = count * sizeof(*buffer); 533 return 0; 534 } 535 536 /* 537 * Return inode number table for the filesystem. 538 */ 539 int /* error status */ 540 xfs_inumbers( 541 struct xfs_mount *mp,/* mount point for filesystem */ 542 xfs_ino_t *lastino,/* last inode returned */ 543 int *count,/* size of buffer/count returned */ 544 void __user *ubuffer,/* buffer with inode descriptions */ 545 inumbers_fmt_pf formatter) 546 { 547 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, *lastino); 548 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, *lastino); 549 struct xfs_btree_cur *cur = NULL; 550 struct xfs_buf *agbp = NULL; 551 struct xfs_inogrp *buffer; 552 int bcount; 553 int left = *count; 554 int bufidx = 0; 555 int error = 0; 556 557 *count = 0; 558 if (agno >= mp->m_sb.sb_agcount || 559 *lastino != XFS_AGINO_TO_INO(mp, agno, agino)) 560 return error; 561 562 bcount = min(left, (int)(PAGE_SIZE / sizeof(*buffer))); 563 buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP); 564 do { 565 struct xfs_inobt_rec_incore r; 566 int stat; 567 568 if (!agbp) { 569 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 570 if (error) 571 break; 572 573 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, 574 XFS_BTNUM_INO); 575 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE, 576 &stat); 577 if (error) 578 break; 579 if (!stat) 580 goto next_ag; 581 } 582 583 error = xfs_inobt_get_rec(cur, &r, &stat); 584 if (error) 585 break; 586 if (!stat) 587 goto next_ag; 588 589 agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1; 590 buffer[bufidx].xi_startino = 591 XFS_AGINO_TO_INO(mp, agno, r.ir_startino); 592 buffer[bufidx].xi_alloccount = r.ir_count - r.ir_freecount; 593 buffer[bufidx].xi_allocmask = ~r.ir_free; 594 if (++bufidx == bcount) { 595 long written; 596 597 error = formatter(ubuffer, buffer, bufidx, &written); 598 if (error) 599 break; 600 ubuffer += written; 601 *count += bufidx; 602 bufidx = 0; 603 } 604 if (!--left) 605 break; 606 607 error = xfs_btree_increment(cur, 0, &stat); 608 if (error) 609 break; 610 if (stat) 611 continue; 612 613 next_ag: 614 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 615 cur = NULL; 616 xfs_buf_relse(agbp); 617 agbp = NULL; 618 agino = 0; 619 agno++; 620 } while (agno < mp->m_sb.sb_agcount); 621 622 if (!error) { 623 if (bufidx) { 624 long written; 625 626 error = formatter(ubuffer, buffer, bufidx, &written); 627 if (!error) 628 *count += bufidx; 629 } 630 *lastino = XFS_AGINO_TO_INO(mp, agno, agino); 631 } 632 633 kmem_free(buffer); 634 if (cur) 635 xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR : 636 XFS_BTREE_NOERROR)); 637 if (agbp) 638 xfs_buf_relse(agbp); 639 640 return error; 641 } 642