1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_inode.h" 14 #include "xfs_btree.h" 15 #include "xfs_ialloc.h" 16 #include "xfs_ialloc_btree.h" 17 #include "xfs_iwalk.h" 18 #include "xfs_itable.h" 19 #include "xfs_error.h" 20 #include "xfs_icache.h" 21 #include "xfs_health.h" 22 #include "xfs_trans.h" 23 24 /* 25 * Bulk Stat 26 * ========= 27 * 28 * Use the inode walking functions to fill out struct xfs_bulkstat for every 29 * allocated inode, then pass the stat information to some externally provided 30 * iteration function. 31 */ 32 33 struct xfs_bstat_chunk { 34 bulkstat_one_fmt_pf formatter; 35 struct xfs_ibulk *breq; 36 struct xfs_bulkstat *buf; 37 }; 38 39 /* 40 * Fill out the bulkstat info for a single inode and report it somewhere. 41 * 42 * bc->breq->lastino is effectively the inode cursor as we walk through the 43 * filesystem. Therefore, we update it any time we need to move the cursor 44 * forward, regardless of whether or not we're sending any bstat information 45 * back to userspace. If the inode is internal metadata or, has been freed 46 * out from under us, we just simply keep going. 47 * 48 * However, if any other type of error happens we want to stop right where we 49 * are so that userspace will call back with exact number of the bad inode and 50 * we can send back an error code. 51 * 52 * Note that if the formatter tells us there's no space left in the buffer we 53 * move the cursor forward and abort the walk. 54 */ 55 STATIC int 56 xfs_bulkstat_one_int( 57 struct xfs_mount *mp, 58 struct mnt_idmap *idmap, 59 struct xfs_trans *tp, 60 xfs_ino_t ino, 61 struct xfs_bstat_chunk *bc) 62 { 63 struct user_namespace *sb_userns = mp->m_super->s_user_ns; 64 struct xfs_inode *ip; /* incore inode pointer */ 65 struct inode *inode; 66 struct xfs_bulkstat *buf = bc->buf; 67 xfs_extnum_t nextents; 68 int error = -EINVAL; 69 vfsuid_t vfsuid; 70 vfsgid_t vfsgid; 71 72 if (xfs_internal_inum(mp, ino)) 73 goto out_advance; 74 75 error = xfs_iget(mp, tp, ino, 76 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED), 77 XFS_ILOCK_SHARED, &ip); 78 if (error == -ENOENT || error == -EINVAL) 79 goto out_advance; 80 if (error) 81 goto out; 82 83 if (xfs_inode_unlinked_incomplete(ip)) { 84 error = xfs_inode_reload_unlinked_bucket(tp, ip); 85 if (error) { 86 xfs_iunlock(ip, XFS_ILOCK_SHARED); 87 xfs_irele(ip); 88 return error; 89 } 90 } 91 92 ASSERT(ip != NULL); 93 ASSERT(ip->i_imap.im_blkno != 0); 94 inode = VFS_I(ip); 95 vfsuid = i_uid_into_vfsuid(idmap, inode); 96 vfsgid = i_gid_into_vfsgid(idmap, inode); 97 98 /* xfs_iget returns the following without needing 99 * further change. 100 */ 101 buf->bs_projectid = ip->i_projid; 102 buf->bs_ino = ino; 103 buf->bs_uid = from_kuid(sb_userns, vfsuid_into_kuid(vfsuid)); 104 buf->bs_gid = from_kgid(sb_userns, vfsgid_into_kgid(vfsgid)); 105 buf->bs_size = ip->i_disk_size; 106 107 buf->bs_nlink = inode->i_nlink; 108 buf->bs_atime = inode->i_atime.tv_sec; 109 buf->bs_atime_nsec = inode->i_atime.tv_nsec; 110 buf->bs_mtime = inode->i_mtime.tv_sec; 111 buf->bs_mtime_nsec = inode->i_mtime.tv_nsec; 112 buf->bs_ctime = inode_get_ctime(inode).tv_sec; 113 buf->bs_ctime_nsec = inode_get_ctime(inode).tv_nsec; 114 buf->bs_gen = inode->i_generation; 115 buf->bs_mode = inode->i_mode; 116 117 buf->bs_xflags = xfs_ip2xflags(ip); 118 buf->bs_extsize_blks = ip->i_extsize; 119 120 nextents = xfs_ifork_nextents(&ip->i_df); 121 if (!(bc->breq->flags & XFS_IBULK_NREXT64)) 122 buf->bs_extents = min(nextents, XFS_MAX_EXTCNT_DATA_FORK_SMALL); 123 else 124 buf->bs_extents64 = nextents; 125 126 xfs_bulkstat_health(ip, buf); 127 buf->bs_aextents = xfs_ifork_nextents(&ip->i_af); 128 buf->bs_forkoff = xfs_inode_fork_boff(ip); 129 buf->bs_version = XFS_BULKSTAT_VERSION_V5; 130 131 if (xfs_has_v3inodes(mp)) { 132 buf->bs_btime = ip->i_crtime.tv_sec; 133 buf->bs_btime_nsec = ip->i_crtime.tv_nsec; 134 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) 135 buf->bs_cowextsize_blks = ip->i_cowextsize; 136 } 137 138 switch (ip->i_df.if_format) { 139 case XFS_DINODE_FMT_DEV: 140 buf->bs_rdev = sysv_encode_dev(inode->i_rdev); 141 buf->bs_blksize = BLKDEV_IOSIZE; 142 buf->bs_blocks = 0; 143 break; 144 case XFS_DINODE_FMT_LOCAL: 145 buf->bs_rdev = 0; 146 buf->bs_blksize = mp->m_sb.sb_blocksize; 147 buf->bs_blocks = 0; 148 break; 149 case XFS_DINODE_FMT_EXTENTS: 150 case XFS_DINODE_FMT_BTREE: 151 buf->bs_rdev = 0; 152 buf->bs_blksize = mp->m_sb.sb_blocksize; 153 buf->bs_blocks = ip->i_nblocks + ip->i_delayed_blks; 154 break; 155 } 156 xfs_iunlock(ip, XFS_ILOCK_SHARED); 157 xfs_irele(ip); 158 159 error = bc->formatter(bc->breq, buf); 160 if (error == -ECANCELED) 161 goto out_advance; 162 if (error) 163 goto out; 164 165 out_advance: 166 /* 167 * Advance the cursor to the inode that comes after the one we just 168 * looked at. We want the caller to move along if the bulkstat 169 * information was copied successfully; if we tried to grab the inode 170 * but it's no longer allocated; or if it's internal metadata. 171 */ 172 bc->breq->startino = ino + 1; 173 out: 174 return error; 175 } 176 177 /* Bulkstat a single inode. */ 178 int 179 xfs_bulkstat_one( 180 struct xfs_ibulk *breq, 181 bulkstat_one_fmt_pf formatter) 182 { 183 struct xfs_bstat_chunk bc = { 184 .formatter = formatter, 185 .breq = breq, 186 }; 187 struct xfs_trans *tp; 188 int error; 189 190 if (breq->idmap != &nop_mnt_idmap) { 191 xfs_warn_ratelimited(breq->mp, 192 "bulkstat not supported inside of idmapped mounts."); 193 return -EINVAL; 194 } 195 196 ASSERT(breq->icount == 1); 197 198 bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat), 199 KM_MAYFAIL); 200 if (!bc.buf) 201 return -ENOMEM; 202 203 /* 204 * Grab an empty transaction so that we can use its recursive buffer 205 * locking abilities to detect cycles in the inobt without deadlocking. 206 */ 207 error = xfs_trans_alloc_empty(breq->mp, &tp); 208 if (error) 209 goto out; 210 211 error = xfs_bulkstat_one_int(breq->mp, breq->idmap, tp, 212 breq->startino, &bc); 213 xfs_trans_cancel(tp); 214 out: 215 kmem_free(bc.buf); 216 217 /* 218 * If we reported one inode to userspace then we abort because we hit 219 * the end of the buffer. Don't leak that back to userspace. 220 */ 221 if (error == -ECANCELED) 222 error = 0; 223 224 return error; 225 } 226 227 static int 228 xfs_bulkstat_iwalk( 229 struct xfs_mount *mp, 230 struct xfs_trans *tp, 231 xfs_ino_t ino, 232 void *data) 233 { 234 struct xfs_bstat_chunk *bc = data; 235 int error; 236 237 error = xfs_bulkstat_one_int(mp, bc->breq->idmap, tp, ino, data); 238 /* bulkstat just skips over missing inodes */ 239 if (error == -ENOENT || error == -EINVAL) 240 return 0; 241 return error; 242 } 243 244 /* 245 * Check the incoming lastino parameter. 246 * 247 * We allow any inode value that could map to physical space inside the 248 * filesystem because if there are no inodes there, bulkstat moves on to the 249 * next chunk. In other words, the magic agino value of zero takes us to the 250 * first chunk in the AG, and an agino value past the end of the AG takes us to 251 * the first chunk in the next AG. 252 * 253 * Therefore we can end early if the requested inode is beyond the end of the 254 * filesystem or doesn't map properly. 255 */ 256 static inline bool 257 xfs_bulkstat_already_done( 258 struct xfs_mount *mp, 259 xfs_ino_t startino) 260 { 261 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino); 262 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, startino); 263 264 return agno >= mp->m_sb.sb_agcount || 265 startino != XFS_AGINO_TO_INO(mp, agno, agino); 266 } 267 268 /* Return stat information in bulk (by-inode) for the filesystem. */ 269 int 270 xfs_bulkstat( 271 struct xfs_ibulk *breq, 272 bulkstat_one_fmt_pf formatter) 273 { 274 struct xfs_bstat_chunk bc = { 275 .formatter = formatter, 276 .breq = breq, 277 }; 278 struct xfs_trans *tp; 279 unsigned int iwalk_flags = 0; 280 int error; 281 282 if (breq->idmap != &nop_mnt_idmap) { 283 xfs_warn_ratelimited(breq->mp, 284 "bulkstat not supported inside of idmapped mounts."); 285 return -EINVAL; 286 } 287 if (xfs_bulkstat_already_done(breq->mp, breq->startino)) 288 return 0; 289 290 bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat), 291 KM_MAYFAIL); 292 if (!bc.buf) 293 return -ENOMEM; 294 295 /* 296 * Grab an empty transaction so that we can use its recursive buffer 297 * locking abilities to detect cycles in the inobt without deadlocking. 298 */ 299 error = xfs_trans_alloc_empty(breq->mp, &tp); 300 if (error) 301 goto out; 302 303 if (breq->flags & XFS_IBULK_SAME_AG) 304 iwalk_flags |= XFS_IWALK_SAME_AG; 305 306 error = xfs_iwalk(breq->mp, tp, breq->startino, iwalk_flags, 307 xfs_bulkstat_iwalk, breq->icount, &bc); 308 xfs_trans_cancel(tp); 309 out: 310 kmem_free(bc.buf); 311 312 /* 313 * We found some inodes, so clear the error status and return them. 314 * The lastino pointer will point directly at the inode that triggered 315 * any error that occurred, so on the next call the error will be 316 * triggered again and propagated to userspace as there will be no 317 * formatted inodes in the buffer. 318 */ 319 if (breq->ocount > 0) 320 error = 0; 321 322 return error; 323 } 324 325 /* Convert bulkstat (v5) to bstat (v1). */ 326 void 327 xfs_bulkstat_to_bstat( 328 struct xfs_mount *mp, 329 struct xfs_bstat *bs1, 330 const struct xfs_bulkstat *bstat) 331 { 332 /* memset is needed here because of padding holes in the structure. */ 333 memset(bs1, 0, sizeof(struct xfs_bstat)); 334 bs1->bs_ino = bstat->bs_ino; 335 bs1->bs_mode = bstat->bs_mode; 336 bs1->bs_nlink = bstat->bs_nlink; 337 bs1->bs_uid = bstat->bs_uid; 338 bs1->bs_gid = bstat->bs_gid; 339 bs1->bs_rdev = bstat->bs_rdev; 340 bs1->bs_blksize = bstat->bs_blksize; 341 bs1->bs_size = bstat->bs_size; 342 bs1->bs_atime.tv_sec = bstat->bs_atime; 343 bs1->bs_mtime.tv_sec = bstat->bs_mtime; 344 bs1->bs_ctime.tv_sec = bstat->bs_ctime; 345 bs1->bs_atime.tv_nsec = bstat->bs_atime_nsec; 346 bs1->bs_mtime.tv_nsec = bstat->bs_mtime_nsec; 347 bs1->bs_ctime.tv_nsec = bstat->bs_ctime_nsec; 348 bs1->bs_blocks = bstat->bs_blocks; 349 bs1->bs_xflags = bstat->bs_xflags; 350 bs1->bs_extsize = XFS_FSB_TO_B(mp, bstat->bs_extsize_blks); 351 bs1->bs_extents = bstat->bs_extents; 352 bs1->bs_gen = bstat->bs_gen; 353 bs1->bs_projid_lo = bstat->bs_projectid & 0xFFFF; 354 bs1->bs_forkoff = bstat->bs_forkoff; 355 bs1->bs_projid_hi = bstat->bs_projectid >> 16; 356 bs1->bs_sick = bstat->bs_sick; 357 bs1->bs_checked = bstat->bs_checked; 358 bs1->bs_cowextsize = XFS_FSB_TO_B(mp, bstat->bs_cowextsize_blks); 359 bs1->bs_dmevmask = 0; 360 bs1->bs_dmstate = 0; 361 bs1->bs_aextents = bstat->bs_aextents; 362 } 363 364 struct xfs_inumbers_chunk { 365 inumbers_fmt_pf formatter; 366 struct xfs_ibulk *breq; 367 }; 368 369 /* 370 * INUMBERS 371 * ======== 372 * This is how we export inode btree records to userspace, so that XFS tools 373 * can figure out where inodes are allocated. 374 */ 375 376 /* 377 * Format the inode group structure and report it somewhere. 378 * 379 * Similar to xfs_bulkstat_one_int, lastino is the inode cursor as we walk 380 * through the filesystem so we move it forward unless there was a runtime 381 * error. If the formatter tells us the buffer is now full we also move the 382 * cursor forward and abort the walk. 383 */ 384 STATIC int 385 xfs_inumbers_walk( 386 struct xfs_mount *mp, 387 struct xfs_trans *tp, 388 xfs_agnumber_t agno, 389 const struct xfs_inobt_rec_incore *irec, 390 void *data) 391 { 392 struct xfs_inumbers inogrp = { 393 .xi_startino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino), 394 .xi_alloccount = irec->ir_count - irec->ir_freecount, 395 .xi_allocmask = ~irec->ir_free, 396 .xi_version = XFS_INUMBERS_VERSION_V5, 397 }; 398 struct xfs_inumbers_chunk *ic = data; 399 int error; 400 401 error = ic->formatter(ic->breq, &inogrp); 402 if (error && error != -ECANCELED) 403 return error; 404 405 ic->breq->startino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino) + 406 XFS_INODES_PER_CHUNK; 407 return error; 408 } 409 410 /* 411 * Return inode number table for the filesystem. 412 */ 413 int 414 xfs_inumbers( 415 struct xfs_ibulk *breq, 416 inumbers_fmt_pf formatter) 417 { 418 struct xfs_inumbers_chunk ic = { 419 .formatter = formatter, 420 .breq = breq, 421 }; 422 struct xfs_trans *tp; 423 int error = 0; 424 425 if (xfs_bulkstat_already_done(breq->mp, breq->startino)) 426 return 0; 427 428 /* 429 * Grab an empty transaction so that we can use its recursive buffer 430 * locking abilities to detect cycles in the inobt without deadlocking. 431 */ 432 error = xfs_trans_alloc_empty(breq->mp, &tp); 433 if (error) 434 goto out; 435 436 error = xfs_inobt_walk(breq->mp, tp, breq->startino, breq->flags, 437 xfs_inumbers_walk, breq->icount, &ic); 438 xfs_trans_cancel(tp); 439 out: 440 441 /* 442 * We found some inode groups, so clear the error status and return 443 * them. The lastino pointer will point directly at the inode that 444 * triggered any error that occurred, so on the next call the error 445 * will be triggered again and propagated to userspace as there will be 446 * no formatted inode groups in the buffer. 447 */ 448 if (breq->ocount > 0) 449 error = 0; 450 451 return error; 452 } 453 454 /* Convert an inumbers (v5) struct to a inogrp (v1) struct. */ 455 void 456 xfs_inumbers_to_inogrp( 457 struct xfs_inogrp *ig1, 458 const struct xfs_inumbers *ig) 459 { 460 /* memset is needed here because of padding holes in the structure. */ 461 memset(ig1, 0, sizeof(struct xfs_inogrp)); 462 ig1->xi_startino = ig->xi_startino; 463 ig1->xi_alloccount = ig->xi_alloccount; 464 ig1->xi_allocmask = ig->xi_allocmask; 465 } 466