xref: /linux/fs/xfs/scrub/quotacheck_repair.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) 2020-2024 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <djwong@kernel.org>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans.h"
14 #include "xfs_inode.h"
15 #include "xfs_quota.h"
16 #include "xfs_qm.h"
17 #include "xfs_icache.h"
18 #include "xfs_bmap_util.h"
19 #include "xfs_iwalk.h"
20 #include "xfs_ialloc.h"
21 #include "xfs_sb.h"
22 #include "scrub/scrub.h"
23 #include "scrub/common.h"
24 #include "scrub/repair.h"
25 #include "scrub/xfile.h"
26 #include "scrub/xfarray.h"
27 #include "scrub/iscan.h"
28 #include "scrub/quota.h"
29 #include "scrub/quotacheck.h"
30 #include "scrub/trace.h"
31 
32 /*
33  * Live Quotacheck Repair
34  * ======================
35  *
36  * Use the live quota counter information that we collected to replace the
37  * counter values in the incore dquots.  A scrub->repair cycle should have left
38  * the live data and hooks active, so this is safe so long as we make sure the
39  * dquot is locked.
40  */
41 
42 /* Commit new counters to a dquot. */
43 static int
44 xqcheck_commit_dquot(
45 	struct xqcheck		*xqc,
46 	xfs_dqtype_t		dqtype,
47 	struct xfs_dquot	*dq)
48 {
49 	struct xqcheck_dquot	xcdq;
50 	struct xfarray		*counts = xqcheck_counters_for(xqc, dqtype);
51 	int64_t			delta;
52 	bool			dirty = false;
53 	int			error = 0;
54 
55 	/* Unlock the dquot just long enough to allocate a transaction. */
56 	xfs_dqunlock(dq);
57 	error = xchk_trans_alloc(xqc->sc, 0);
58 	xfs_dqlock(dq);
59 	if (error)
60 		return error;
61 
62 	xfs_trans_dqjoin(xqc->sc->tp, dq);
63 
64 	if (xchk_iscan_aborted(&xqc->iscan)) {
65 		error = -ECANCELED;
66 		goto out_cancel;
67 	}
68 
69 	mutex_lock(&xqc->lock);
70 	error = xfarray_load_sparse(counts, dq->q_id, &xcdq);
71 	if (error)
72 		goto out_unlock;
73 
74 	/* Adjust counters as needed. */
75 	delta = (int64_t)xcdq.icount - dq->q_ino.count;
76 	if (delta) {
77 		dq->q_ino.reserved += delta;
78 		dq->q_ino.count += delta;
79 		dirty = true;
80 	}
81 
82 	delta = (int64_t)xcdq.bcount - dq->q_blk.count;
83 	if (delta) {
84 		dq->q_blk.reserved += delta;
85 		dq->q_blk.count += delta;
86 		dirty = true;
87 	}
88 
89 	delta = (int64_t)xcdq.rtbcount - dq->q_rtb.count;
90 	if (delta) {
91 		dq->q_rtb.reserved += delta;
92 		dq->q_rtb.count += delta;
93 		dirty = true;
94 	}
95 
96 	xcdq.flags |= (XQCHECK_DQUOT_REPAIR_SCANNED | XQCHECK_DQUOT_WRITTEN);
97 	error = xfarray_store(counts, dq->q_id, &xcdq);
98 	if (error == -EFBIG) {
99 		/*
100 		 * EFBIG means we tried to store data at too high a byte offset
101 		 * in the sparse array.  IOWs, we cannot complete the repair
102 		 * and must cancel the whole operation.  This should never
103 		 * happen, but we need to catch it anyway.
104 		 */
105 		error = -ECANCELED;
106 	}
107 	mutex_unlock(&xqc->lock);
108 	if (error || !dirty)
109 		goto out_cancel;
110 
111 	trace_xrep_quotacheck_dquot(xqc->sc->mp, dq->q_type, dq->q_id);
112 
113 	/* Commit the dirty dquot to disk. */
114 	dq->q_flags |= XFS_DQFLAG_DIRTY;
115 	if (dq->q_id)
116 		xfs_qm_adjust_dqtimers(dq);
117 	xfs_trans_log_dquot(xqc->sc->tp, dq);
118 
119 	/*
120 	 * Transaction commit unlocks the dquot, so we must re-lock it so that
121 	 * the caller can put the reference (which apparently requires a locked
122 	 * dquot).
123 	 */
124 	error = xrep_trans_commit(xqc->sc);
125 	xfs_dqlock(dq);
126 	return error;
127 
128 out_unlock:
129 	mutex_unlock(&xqc->lock);
130 out_cancel:
131 	xchk_trans_cancel(xqc->sc);
132 
133 	/* Re-lock the dquot so the caller can put the reference. */
134 	xfs_dqlock(dq);
135 	return error;
136 }
137 
138 /* Commit new quota counters for a particular quota type. */
139 STATIC int
140 xqcheck_commit_dqtype(
141 	struct xqcheck		*xqc,
142 	unsigned int		dqtype)
143 {
144 	struct xchk_dqiter	cursor = { };
145 	struct xqcheck_dquot	xcdq;
146 	struct xfs_scrub	*sc = xqc->sc;
147 	struct xfs_mount	*mp = sc->mp;
148 	struct xfarray		*counts = xqcheck_counters_for(xqc, dqtype);
149 	struct xfs_dquot	*dq;
150 	xfarray_idx_t		cur = XFARRAY_CURSOR_INIT;
151 	int			error;
152 
153 	/*
154 	 * Update the counters of every dquot that the quota file knows about.
155 	 */
156 	xchk_dqiter_init(&cursor, sc, dqtype);
157 	while ((error = xchk_dquot_iter(&cursor, &dq)) == 1) {
158 		error = xqcheck_commit_dquot(xqc, dqtype, dq);
159 		xfs_qm_dqput(dq);
160 		if (error)
161 			break;
162 	}
163 	if (error)
164 		return error;
165 
166 	/*
167 	 * Make a second pass to deal with the dquots that we know about but
168 	 * the quota file previously did not know about.
169 	 */
170 	mutex_lock(&xqc->lock);
171 	while ((error = xfarray_iter(counts, &cur, &xcdq)) == 1) {
172 		xfs_dqid_t	id = cur - 1;
173 
174 		if (xcdq.flags & XQCHECK_DQUOT_REPAIR_SCANNED)
175 			continue;
176 
177 		mutex_unlock(&xqc->lock);
178 
179 		/*
180 		 * Grab the dquot, allowing for dquot block allocation in a
181 		 * separate transaction.  We committed the scrub transaction
182 		 * in a previous step, so we will not be creating nested
183 		 * transactions here.
184 		 */
185 		error = xfs_qm_dqget(mp, id, dqtype, true, &dq);
186 		if (error)
187 			return error;
188 
189 		error = xqcheck_commit_dquot(xqc, dqtype, dq);
190 		xfs_qm_dqput(dq);
191 		if (error)
192 			return error;
193 
194 		mutex_lock(&xqc->lock);
195 	}
196 	mutex_unlock(&xqc->lock);
197 
198 	return error;
199 }
200 
201 /* Figure out quota CHKD flags for the running quota types. */
202 static inline unsigned int
203 xqcheck_chkd_flags(
204 	struct xfs_mount	*mp)
205 {
206 	unsigned int		ret = 0;
207 
208 	if (XFS_IS_UQUOTA_ON(mp))
209 		ret |= XFS_UQUOTA_CHKD;
210 	if (XFS_IS_GQUOTA_ON(mp))
211 		ret |= XFS_GQUOTA_CHKD;
212 	if (XFS_IS_PQUOTA_ON(mp))
213 		ret |= XFS_PQUOTA_CHKD;
214 	return ret;
215 }
216 
217 /* Commit the new dquot counters. */
218 int
219 xrep_quotacheck(
220 	struct xfs_scrub	*sc)
221 {
222 	struct xqcheck		*xqc = sc->buf;
223 	unsigned int		qflags = xqcheck_chkd_flags(sc->mp);
224 	int			error;
225 
226 	/*
227 	 * Clear the CHKD flag for the running quota types and commit the scrub
228 	 * transaction so that we can allocate new quota block mappings if we
229 	 * have to.  If we crash after this point, the sb still has the CHKD
230 	 * flags cleared, so mount quotacheck will fix all of this up.
231 	 */
232 	xrep_update_qflags(sc, qflags, 0);
233 	error = xrep_trans_commit(sc);
234 	if (error)
235 		return error;
236 
237 	/* Commit the new counters to the dquots. */
238 	if (xqc->ucounts) {
239 		error = xqcheck_commit_dqtype(xqc, XFS_DQTYPE_USER);
240 		if (error)
241 			return error;
242 	}
243 	if (xqc->gcounts) {
244 		error = xqcheck_commit_dqtype(xqc, XFS_DQTYPE_GROUP);
245 		if (error)
246 			return error;
247 	}
248 	if (xqc->pcounts) {
249 		error = xqcheck_commit_dqtype(xqc, XFS_DQTYPE_PROJ);
250 		if (error)
251 			return error;
252 	}
253 
254 	/* Set the CHKD flags now that we've fixed quota counts. */
255 	error = xchk_trans_alloc(sc, 0);
256 	if (error)
257 		return error;
258 
259 	xrep_update_qflags(sc, 0, qflags);
260 	return xrep_trans_commit(sc);
261 }
262