1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2017-2023 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <djwong@kernel.org> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_bit.h" 10 #include "xfs_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_log_format.h" 14 #include "xfs_trans.h" 15 #include "xfs_inode.h" 16 #include "xfs_quota.h" 17 #include "xfs_qm.h" 18 #include "xfs_bmap.h" 19 #include "scrub/scrub.h" 20 #include "scrub/common.h" 21 #include "scrub/quota.h" 22 23 /* Convert a scrub type code to a DQ flag, or return 0 if error. */ 24 xfs_dqtype_t 25 xchk_quota_to_dqtype( 26 struct xfs_scrub *sc) 27 { 28 switch (sc->sm->sm_type) { 29 case XFS_SCRUB_TYPE_UQUOTA: 30 return XFS_DQTYPE_USER; 31 case XFS_SCRUB_TYPE_GQUOTA: 32 return XFS_DQTYPE_GROUP; 33 case XFS_SCRUB_TYPE_PQUOTA: 34 return XFS_DQTYPE_PROJ; 35 default: 36 return 0; 37 } 38 } 39 40 /* Set us up to scrub a quota. */ 41 int 42 xchk_setup_quota( 43 struct xfs_scrub *sc) 44 { 45 xfs_dqtype_t dqtype; 46 int error; 47 48 if (!XFS_IS_QUOTA_ON(sc->mp)) 49 return -ENOENT; 50 51 dqtype = xchk_quota_to_dqtype(sc); 52 if (dqtype == 0) 53 return -EINVAL; 54 55 if (!xfs_this_quota_on(sc->mp, dqtype)) 56 return -ENOENT; 57 58 if (xchk_need_intent_drain(sc)) 59 xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN); 60 61 error = xchk_setup_fs(sc); 62 if (error) 63 return error; 64 65 error = xchk_install_live_inode(sc, xfs_quota_inode(sc->mp, dqtype)); 66 if (error) 67 return error; 68 69 xchk_ilock(sc, XFS_ILOCK_EXCL); 70 return 0; 71 } 72 73 /* Quotas. */ 74 75 struct xchk_quota_info { 76 struct xfs_scrub *sc; 77 xfs_dqid_t last_id; 78 }; 79 80 /* There's a written block backing this dquot, right? */ 81 STATIC int 82 xchk_quota_item_bmap( 83 struct xfs_scrub *sc, 84 struct xfs_dquot *dq, 85 xfs_fileoff_t offset) 86 { 87 struct xfs_bmbt_irec irec; 88 struct xfs_mount *mp = sc->mp; 89 int nmaps = 1; 90 int error; 91 92 if (!xfs_verify_fileoff(mp, offset)) { 93 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 94 return 0; 95 } 96 97 if (dq->q_fileoffset != offset) { 98 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 99 return 0; 100 } 101 102 error = xfs_bmapi_read(sc->ip, offset, 1, &irec, &nmaps, 0); 103 if (error) 104 return error; 105 106 if (nmaps != 1) { 107 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 108 return 0; 109 } 110 111 if (!xfs_verify_fsbno(mp, irec.br_startblock)) 112 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 113 if (XFS_FSB_TO_DADDR(mp, irec.br_startblock) != dq->q_blkno) 114 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 115 if (!xfs_bmap_is_written_extent(&irec)) 116 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 117 118 return 0; 119 } 120 121 /* Complain if a quota timer is incorrectly set. */ 122 static inline void 123 xchk_quota_item_timer( 124 struct xfs_scrub *sc, 125 xfs_fileoff_t offset, 126 const struct xfs_dquot_res *res) 127 { 128 if ((res->softlimit && res->count > res->softlimit) || 129 (res->hardlimit && res->count > res->hardlimit)) { 130 if (!res->timer) 131 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 132 } else { 133 if (res->timer) 134 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 135 } 136 } 137 138 /* Scrub the fields in an individual quota item. */ 139 STATIC int 140 xchk_quota_item( 141 struct xchk_quota_info *sqi, 142 struct xfs_dquot *dq) 143 { 144 struct xfs_scrub *sc = sqi->sc; 145 struct xfs_mount *mp = sc->mp; 146 struct xfs_quotainfo *qi = mp->m_quotainfo; 147 xfs_fileoff_t offset; 148 xfs_ino_t fs_icount; 149 int error = 0; 150 151 if (xchk_should_terminate(sc, &error)) 152 return error; 153 154 /* 155 * We want to validate the bmap record for the storage backing this 156 * dquot, so we need to lock the dquot and the quota file. For quota 157 * operations, the locking order is first the ILOCK and then the dquot. 158 */ 159 xchk_ilock(sc, XFS_ILOCK_SHARED); 160 mutex_lock(&dq->q_qlock); 161 162 /* 163 * Except for the root dquot, the actual dquot we got must either have 164 * the same or higher id as we saw before. 165 */ 166 offset = dq->q_id / qi->qi_dqperchunk; 167 if (dq->q_id && dq->q_id <= sqi->last_id) 168 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 169 170 sqi->last_id = dq->q_id; 171 172 error = xchk_quota_item_bmap(sc, dq, offset); 173 xchk_iunlock(sc, XFS_ILOCK_SHARED); 174 if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, offset, &error)) 175 return error; 176 177 /* 178 * Warn if the hard limits are larger than the fs. 179 * Administrators can do this, though in production this seems 180 * suspect, which is why we flag it for review. 181 * 182 * Complain about corruption if the soft limit is greater than 183 * the hard limit. 184 */ 185 if (dq->q_blk.hardlimit > mp->m_sb.sb_dblocks) 186 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); 187 if (dq->q_blk.softlimit > dq->q_blk.hardlimit) 188 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 189 190 if (dq->q_ino.hardlimit > M_IGEO(mp)->maxicount) 191 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); 192 if (dq->q_ino.softlimit > dq->q_ino.hardlimit) 193 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 194 195 if (dq->q_rtb.hardlimit > mp->m_sb.sb_rblocks) 196 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); 197 if (dq->q_rtb.softlimit > dq->q_rtb.hardlimit) 198 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 199 200 /* Check the resource counts. */ 201 fs_icount = percpu_counter_sum(&mp->m_icount); 202 203 /* 204 * Check that usage doesn't exceed physical limits. However, on 205 * a reflink filesystem we're allowed to exceed physical space 206 * if there are no quota limits. 207 */ 208 if (xfs_has_reflink(mp)) { 209 if (mp->m_sb.sb_dblocks < dq->q_blk.count) 210 xchk_fblock_set_warning(sc, XFS_DATA_FORK, 211 offset); 212 if (mp->m_sb.sb_rblocks < dq->q_rtb.count) 213 xchk_fblock_set_warning(sc, XFS_DATA_FORK, 214 offset); 215 } else { 216 if (mp->m_sb.sb_dblocks < dq->q_blk.count) 217 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 218 offset); 219 if (mp->m_sb.sb_rblocks < dq->q_rtb.count) 220 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 221 offset); 222 } 223 if (dq->q_ino.count > fs_icount) 224 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 225 226 /* 227 * We can violate the hard limits if the admin suddenly sets a 228 * lower limit than the actual usage. However, we flag it for 229 * admin review. 230 */ 231 if (dq->q_id == 0) 232 goto out; 233 234 if (dq->q_blk.hardlimit != 0 && 235 dq->q_blk.count > dq->q_blk.hardlimit) 236 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); 237 238 if (dq->q_ino.hardlimit != 0 && 239 dq->q_ino.count > dq->q_ino.hardlimit) 240 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); 241 242 if (dq->q_rtb.hardlimit != 0 && 243 dq->q_rtb.count > dq->q_rtb.hardlimit) 244 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); 245 246 xchk_quota_item_timer(sc, offset, &dq->q_blk); 247 xchk_quota_item_timer(sc, offset, &dq->q_ino); 248 xchk_quota_item_timer(sc, offset, &dq->q_rtb); 249 250 out: 251 mutex_unlock(&dq->q_qlock); 252 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 253 return -ECANCELED; 254 255 return 0; 256 } 257 258 /* Check the quota's data fork. */ 259 STATIC int 260 xchk_quota_data_fork( 261 struct xfs_scrub *sc) 262 { 263 struct xfs_bmbt_irec irec = { 0 }; 264 struct xfs_iext_cursor icur; 265 struct xfs_quotainfo *qi = sc->mp->m_quotainfo; 266 struct xfs_ifork *ifp; 267 xfs_fileoff_t max_dqid_off; 268 int error = 0; 269 270 /* Invoke the fork scrubber. */ 271 error = xchk_metadata_inode_forks(sc); 272 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 273 return error; 274 275 /* Check for data fork problems that apply only to quota files. */ 276 max_dqid_off = XFS_DQ_ID_MAX / qi->qi_dqperchunk; 277 ifp = xfs_ifork_ptr(sc->ip, XFS_DATA_FORK); 278 for_each_xfs_iext(ifp, &icur, &irec) { 279 if (xchk_should_terminate(sc, &error)) 280 break; 281 282 /* 283 * delalloc/unwritten extents or blocks mapped above the highest 284 * quota id shouldn't happen. 285 */ 286 if (!xfs_bmap_is_written_extent(&irec) || 287 irec.br_startoff > max_dqid_off || 288 irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) { 289 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 290 irec.br_startoff); 291 break; 292 } 293 } 294 295 return error; 296 } 297 298 /* Scrub all of a quota type's items. */ 299 int 300 xchk_quota( 301 struct xfs_scrub *sc) 302 { 303 struct xchk_dqiter cursor = { }; 304 struct xchk_quota_info sqi = { .sc = sc }; 305 struct xfs_mount *mp = sc->mp; 306 struct xfs_quotainfo *qi = mp->m_quotainfo; 307 struct xfs_dquot *dq; 308 xfs_dqtype_t dqtype; 309 int error = 0; 310 311 dqtype = xchk_quota_to_dqtype(sc); 312 313 /* Look for problem extents. */ 314 error = xchk_quota_data_fork(sc); 315 if (error) 316 goto out; 317 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 318 goto out; 319 320 /* 321 * Check all the quota items. Now that we've checked the quota inode 322 * data fork we have to drop ILOCK_EXCL to use the regular dquot 323 * functions. 324 */ 325 xchk_iunlock(sc, sc->ilock_flags); 326 327 /* Now look for things that the quota verifiers won't complain about. */ 328 xchk_dqiter_init(&cursor, sc, dqtype); 329 while ((error = xchk_dquot_iter(&cursor, &dq)) == 1) { 330 error = xchk_quota_item(&sqi, dq); 331 xfs_qm_dqrele(dq); 332 if (error) 333 break; 334 } 335 if (error == -ECANCELED) 336 error = 0; 337 if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 338 sqi.last_id * qi->qi_dqperchunk, &error)) 339 goto out; 340 341 out: 342 return error; 343 } 344