1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2017-2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_bit.h"
10 #include "xfs_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_log_format.h"
14 #include "xfs_trans.h"
15 #include "xfs_inode.h"
16 #include "xfs_quota.h"
17 #include "xfs_qm.h"
18 #include "xfs_bmap.h"
19 #include "scrub/scrub.h"
20 #include "scrub/common.h"
21 #include "scrub/quota.h"
22
23 /* Convert a scrub type code to a DQ flag, or return 0 if error. */
24 xfs_dqtype_t
xchk_quota_to_dqtype(struct xfs_scrub * sc)25 xchk_quota_to_dqtype(
26 struct xfs_scrub *sc)
27 {
28 switch (sc->sm->sm_type) {
29 case XFS_SCRUB_TYPE_UQUOTA:
30 return XFS_DQTYPE_USER;
31 case XFS_SCRUB_TYPE_GQUOTA:
32 return XFS_DQTYPE_GROUP;
33 case XFS_SCRUB_TYPE_PQUOTA:
34 return XFS_DQTYPE_PROJ;
35 default:
36 return 0;
37 }
38 }
39
40 /* Set us up to scrub a quota. */
41 int
xchk_setup_quota(struct xfs_scrub * sc)42 xchk_setup_quota(
43 struct xfs_scrub *sc)
44 {
45 xfs_dqtype_t dqtype;
46 int error;
47
48 if (!XFS_IS_QUOTA_ON(sc->mp))
49 return -ENOENT;
50
51 dqtype = xchk_quota_to_dqtype(sc);
52 if (dqtype == 0)
53 return -EINVAL;
54
55 if (!xfs_this_quota_on(sc->mp, dqtype))
56 return -ENOENT;
57
58 if (xchk_need_intent_drain(sc))
59 xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
60
61 error = xchk_setup_fs(sc);
62 if (error)
63 return error;
64
65 error = xchk_install_live_inode(sc, xfs_quota_inode(sc->mp, dqtype));
66 if (error)
67 return error;
68
69 xchk_ilock(sc, XFS_ILOCK_EXCL);
70 return 0;
71 }
72
73 /* Quotas. */
74
75 struct xchk_quota_info {
76 struct xfs_scrub *sc;
77 xfs_dqid_t last_id;
78 };
79
80 /* There's a written block backing this dquot, right? */
81 STATIC int
xchk_quota_item_bmap(struct xfs_scrub * sc,struct xfs_dquot * dq,xfs_fileoff_t offset)82 xchk_quota_item_bmap(
83 struct xfs_scrub *sc,
84 struct xfs_dquot *dq,
85 xfs_fileoff_t offset)
86 {
87 struct xfs_bmbt_irec irec;
88 struct xfs_mount *mp = sc->mp;
89 int nmaps = 1;
90 int error;
91
92 if (!xfs_verify_fileoff(mp, offset)) {
93 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
94 return 0;
95 }
96
97 if (dq->q_fileoffset != offset) {
98 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
99 return 0;
100 }
101
102 error = xfs_bmapi_read(sc->ip, offset, 1, &irec, &nmaps, 0);
103 if (error)
104 return error;
105
106 if (nmaps != 1) {
107 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
108 return 0;
109 }
110
111 if (!xfs_verify_fsbno(mp, irec.br_startblock))
112 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
113 if (XFS_FSB_TO_DADDR(mp, irec.br_startblock) != dq->q_blkno)
114 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
115 if (!xfs_bmap_is_written_extent(&irec))
116 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
117
118 return 0;
119 }
120
121 /* Complain if a quota timer is incorrectly set. */
122 static inline void
xchk_quota_item_timer(struct xfs_scrub * sc,xfs_fileoff_t offset,const struct xfs_dquot_res * res)123 xchk_quota_item_timer(
124 struct xfs_scrub *sc,
125 xfs_fileoff_t offset,
126 const struct xfs_dquot_res *res)
127 {
128 if ((res->softlimit && res->count > res->softlimit) ||
129 (res->hardlimit && res->count > res->hardlimit)) {
130 if (!res->timer)
131 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
132 } else {
133 if (res->timer)
134 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
135 }
136 }
137
138 /* Scrub the fields in an individual quota item. */
139 STATIC int
xchk_quota_item(struct xchk_quota_info * sqi,struct xfs_dquot * dq)140 xchk_quota_item(
141 struct xchk_quota_info *sqi,
142 struct xfs_dquot *dq)
143 {
144 struct xfs_scrub *sc = sqi->sc;
145 struct xfs_mount *mp = sc->mp;
146 struct xfs_quotainfo *qi = mp->m_quotainfo;
147 xfs_fileoff_t offset;
148 xfs_ino_t fs_icount;
149 int error = 0;
150
151 if (xchk_should_terminate(sc, &error))
152 return error;
153
154 /*
155 * We want to validate the bmap record for the storage backing this
156 * dquot, so we need to lock the dquot and the quota file. For quota
157 * operations, the locking order is first the ILOCK and then the dquot.
158 * However, dqiterate gave us a locked dquot, so drop the dquot lock to
159 * get the ILOCK.
160 */
161 xfs_dqunlock(dq);
162 xchk_ilock(sc, XFS_ILOCK_SHARED);
163 xfs_dqlock(dq);
164
165 /*
166 * Except for the root dquot, the actual dquot we got must either have
167 * the same or higher id as we saw before.
168 */
169 offset = dq->q_id / qi->qi_dqperchunk;
170 if (dq->q_id && dq->q_id <= sqi->last_id)
171 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
172
173 sqi->last_id = dq->q_id;
174
175 error = xchk_quota_item_bmap(sc, dq, offset);
176 xchk_iunlock(sc, XFS_ILOCK_SHARED);
177 if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, offset, &error))
178 return error;
179
180 /*
181 * Warn if the hard limits are larger than the fs.
182 * Administrators can do this, though in production this seems
183 * suspect, which is why we flag it for review.
184 *
185 * Complain about corruption if the soft limit is greater than
186 * the hard limit.
187 */
188 if (dq->q_blk.hardlimit > mp->m_sb.sb_dblocks)
189 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
190 if (dq->q_blk.softlimit > dq->q_blk.hardlimit)
191 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
192
193 if (dq->q_ino.hardlimit > M_IGEO(mp)->maxicount)
194 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
195 if (dq->q_ino.softlimit > dq->q_ino.hardlimit)
196 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
197
198 if (dq->q_rtb.hardlimit > mp->m_sb.sb_rblocks)
199 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
200 if (dq->q_rtb.softlimit > dq->q_rtb.hardlimit)
201 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
202
203 /* Check the resource counts. */
204 fs_icount = percpu_counter_sum(&mp->m_icount);
205
206 /*
207 * Check that usage doesn't exceed physical limits. However, on
208 * a reflink filesystem we're allowed to exceed physical space
209 * if there are no quota limits.
210 */
211 if (xfs_has_reflink(mp)) {
212 if (mp->m_sb.sb_dblocks < dq->q_blk.count)
213 xchk_fblock_set_warning(sc, XFS_DATA_FORK,
214 offset);
215 } else {
216 if (mp->m_sb.sb_dblocks < dq->q_blk.count)
217 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
218 offset);
219 }
220 if (dq->q_ino.count > fs_icount || dq->q_rtb.count > mp->m_sb.sb_rblocks)
221 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
222
223 /*
224 * We can violate the hard limits if the admin suddenly sets a
225 * lower limit than the actual usage. However, we flag it for
226 * admin review.
227 */
228 if (dq->q_id == 0)
229 goto out;
230
231 if (dq->q_blk.hardlimit != 0 &&
232 dq->q_blk.count > dq->q_blk.hardlimit)
233 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
234
235 if (dq->q_ino.hardlimit != 0 &&
236 dq->q_ino.count > dq->q_ino.hardlimit)
237 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
238
239 if (dq->q_rtb.hardlimit != 0 &&
240 dq->q_rtb.count > dq->q_rtb.hardlimit)
241 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
242
243 xchk_quota_item_timer(sc, offset, &dq->q_blk);
244 xchk_quota_item_timer(sc, offset, &dq->q_ino);
245 xchk_quota_item_timer(sc, offset, &dq->q_rtb);
246
247 out:
248 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
249 return -ECANCELED;
250
251 return 0;
252 }
253
254 /* Check the quota's data fork. */
255 STATIC int
xchk_quota_data_fork(struct xfs_scrub * sc)256 xchk_quota_data_fork(
257 struct xfs_scrub *sc)
258 {
259 struct xfs_bmbt_irec irec = { 0 };
260 struct xfs_iext_cursor icur;
261 struct xfs_quotainfo *qi = sc->mp->m_quotainfo;
262 struct xfs_ifork *ifp;
263 xfs_fileoff_t max_dqid_off;
264 int error = 0;
265
266 /* Invoke the fork scrubber. */
267 error = xchk_metadata_inode_forks(sc);
268 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
269 return error;
270
271 /* Check for data fork problems that apply only to quota files. */
272 max_dqid_off = XFS_DQ_ID_MAX / qi->qi_dqperchunk;
273 ifp = xfs_ifork_ptr(sc->ip, XFS_DATA_FORK);
274 for_each_xfs_iext(ifp, &icur, &irec) {
275 if (xchk_should_terminate(sc, &error))
276 break;
277
278 /*
279 * delalloc/unwritten extents or blocks mapped above the highest
280 * quota id shouldn't happen.
281 */
282 if (!xfs_bmap_is_written_extent(&irec) ||
283 irec.br_startoff > max_dqid_off ||
284 irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) {
285 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
286 irec.br_startoff);
287 break;
288 }
289 }
290
291 return error;
292 }
293
294 /* Scrub all of a quota type's items. */
295 int
xchk_quota(struct xfs_scrub * sc)296 xchk_quota(
297 struct xfs_scrub *sc)
298 {
299 struct xchk_dqiter cursor = { };
300 struct xchk_quota_info sqi = { .sc = sc };
301 struct xfs_mount *mp = sc->mp;
302 struct xfs_quotainfo *qi = mp->m_quotainfo;
303 struct xfs_dquot *dq;
304 xfs_dqtype_t dqtype;
305 int error = 0;
306
307 dqtype = xchk_quota_to_dqtype(sc);
308
309 /* Look for problem extents. */
310 error = xchk_quota_data_fork(sc);
311 if (error)
312 goto out;
313 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
314 goto out;
315
316 /*
317 * Check all the quota items. Now that we've checked the quota inode
318 * data fork we have to drop ILOCK_EXCL to use the regular dquot
319 * functions.
320 */
321 xchk_iunlock(sc, sc->ilock_flags);
322
323 /* Now look for things that the quota verifiers won't complain about. */
324 xchk_dqiter_init(&cursor, sc, dqtype);
325 while ((error = xchk_dquot_iter(&cursor, &dq)) == 1) {
326 error = xchk_quota_item(&sqi, dq);
327 xfs_qm_dqput(dq);
328 if (error)
329 break;
330 }
331 if (error == -ECANCELED)
332 error = 0;
333 if (!xchk_fblock_process_error(sc, XFS_DATA_FORK,
334 sqi.last_id * qi->qi_dqperchunk, &error))
335 goto out;
336
337 out:
338 return error;
339 }
340