xref: /linux/fs/xfs/scrub/quota.c (revision a8b70ccf10e38775785d9cb12ead916474549f99)
1 /*
2  * Copyright (C) 2017 Oracle.  All Rights Reserved.
3  *
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it would be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write the Free Software Foundation,
18  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
19  */
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_defer.h"
27 #include "xfs_btree.h"
28 #include "xfs_bit.h"
29 #include "xfs_log_format.h"
30 #include "xfs_trans.h"
31 #include "xfs_sb.h"
32 #include "xfs_inode.h"
33 #include "xfs_inode_fork.h"
34 #include "xfs_alloc.h"
35 #include "xfs_bmap.h"
36 #include "xfs_quota.h"
37 #include "xfs_qm.h"
38 #include "xfs_dquot.h"
39 #include "xfs_dquot_item.h"
40 #include "scrub/xfs_scrub.h"
41 #include "scrub/scrub.h"
42 #include "scrub/common.h"
43 #include "scrub/trace.h"
44 
45 /* Convert a scrub type code to a DQ flag, or return 0 if error. */
46 static inline uint
47 xfs_scrub_quota_to_dqtype(
48 	struct xfs_scrub_context	*sc)
49 {
50 	switch (sc->sm->sm_type) {
51 	case XFS_SCRUB_TYPE_UQUOTA:
52 		return XFS_DQ_USER;
53 	case XFS_SCRUB_TYPE_GQUOTA:
54 		return XFS_DQ_GROUP;
55 	case XFS_SCRUB_TYPE_PQUOTA:
56 		return XFS_DQ_PROJ;
57 	default:
58 		return 0;
59 	}
60 }
61 
62 /* Set us up to scrub a quota. */
63 int
64 xfs_scrub_setup_quota(
65 	struct xfs_scrub_context	*sc,
66 	struct xfs_inode		*ip)
67 {
68 	uint				dqtype;
69 
70 	dqtype = xfs_scrub_quota_to_dqtype(sc);
71 	if (dqtype == 0)
72 		return -EINVAL;
73 	if (!xfs_this_quota_on(sc->mp, dqtype))
74 		return -ENOENT;
75 	return 0;
76 }
77 
78 /* Quotas. */
79 
80 /* Scrub the fields in an individual quota item. */
81 STATIC void
82 xfs_scrub_quota_item(
83 	struct xfs_scrub_context	*sc,
84 	uint				dqtype,
85 	struct xfs_dquot		*dq,
86 	xfs_dqid_t			id)
87 {
88 	struct xfs_mount		*mp = sc->mp;
89 	struct xfs_disk_dquot		*d = &dq->q_core;
90 	struct xfs_quotainfo		*qi = mp->m_quotainfo;
91 	xfs_fileoff_t			offset;
92 	unsigned long long		bsoft;
93 	unsigned long long		isoft;
94 	unsigned long long		rsoft;
95 	unsigned long long		bhard;
96 	unsigned long long		ihard;
97 	unsigned long long		rhard;
98 	unsigned long long		bcount;
99 	unsigned long long		icount;
100 	unsigned long long		rcount;
101 	xfs_ino_t			fs_icount;
102 
103 	offset = id / qi->qi_dqperchunk;
104 
105 	/*
106 	 * We fed $id and DQNEXT into the xfs_qm_dqget call, which means
107 	 * that the actual dquot we got must either have the same id or
108 	 * the next higher id.
109 	 */
110 	if (id > be32_to_cpu(d->d_id))
111 		xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
112 
113 	/* Did we get the dquot type we wanted? */
114 	if (dqtype != (d->d_flags & XFS_DQ_ALLTYPES))
115 		xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
116 
117 	if (d->d_pad0 != cpu_to_be32(0) || d->d_pad != cpu_to_be16(0))
118 		xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
119 
120 	/* Check the limits. */
121 	bhard = be64_to_cpu(d->d_blk_hardlimit);
122 	ihard = be64_to_cpu(d->d_ino_hardlimit);
123 	rhard = be64_to_cpu(d->d_rtb_hardlimit);
124 
125 	bsoft = be64_to_cpu(d->d_blk_softlimit);
126 	isoft = be64_to_cpu(d->d_ino_softlimit);
127 	rsoft = be64_to_cpu(d->d_rtb_softlimit);
128 
129 	/*
130 	 * Warn if the hard limits are larger than the fs.
131 	 * Administrators can do this, though in production this seems
132 	 * suspect, which is why we flag it for review.
133 	 *
134 	 * Complain about corruption if the soft limit is greater than
135 	 * the hard limit.
136 	 */
137 	if (bhard > mp->m_sb.sb_dblocks)
138 		xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
139 	if (bsoft > bhard)
140 		xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
141 
142 	if (ihard > mp->m_maxicount)
143 		xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
144 	if (isoft > ihard)
145 		xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
146 
147 	if (rhard > mp->m_sb.sb_rblocks)
148 		xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
149 	if (rsoft > rhard)
150 		xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
151 
152 	/* Check the resource counts. */
153 	bcount = be64_to_cpu(d->d_bcount);
154 	icount = be64_to_cpu(d->d_icount);
155 	rcount = be64_to_cpu(d->d_rtbcount);
156 	fs_icount = percpu_counter_sum(&mp->m_icount);
157 
158 	/*
159 	 * Check that usage doesn't exceed physical limits.  However, on
160 	 * a reflink filesystem we're allowed to exceed physical space
161 	 * if there are no quota limits.
162 	 */
163 	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
164 		if (mp->m_sb.sb_dblocks < bcount)
165 			xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK,
166 					offset);
167 	} else {
168 		if (mp->m_sb.sb_dblocks < bcount)
169 			xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
170 					offset);
171 	}
172 	if (icount > fs_icount || rcount > mp->m_sb.sb_rblocks)
173 		xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
174 
175 	/*
176 	 * We can violate the hard limits if the admin suddenly sets a
177 	 * lower limit than the actual usage.  However, we flag it for
178 	 * admin review.
179 	 */
180 	if (id != 0 && bhard != 0 && bcount > bhard)
181 		xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
182 	if (id != 0 && ihard != 0 && icount > ihard)
183 		xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
184 	if (id != 0 && rhard != 0 && rcount > rhard)
185 		xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
186 }
187 
188 /* Scrub all of a quota type's items. */
189 int
190 xfs_scrub_quota(
191 	struct xfs_scrub_context	*sc)
192 {
193 	struct xfs_bmbt_irec		irec = { 0 };
194 	struct xfs_mount		*mp = sc->mp;
195 	struct xfs_inode		*ip;
196 	struct xfs_quotainfo		*qi = mp->m_quotainfo;
197 	struct xfs_dquot		*dq;
198 	xfs_fileoff_t			max_dqid_off;
199 	xfs_fileoff_t			off = 0;
200 	xfs_dqid_t			id = 0;
201 	uint				dqtype;
202 	int				nimaps;
203 	int				error = 0;
204 
205 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
206 		return -ENOENT;
207 
208 	mutex_lock(&qi->qi_quotaofflock);
209 	dqtype = xfs_scrub_quota_to_dqtype(sc);
210 	if (!xfs_this_quota_on(sc->mp, dqtype)) {
211 		error = -ENOENT;
212 		goto out_unlock_quota;
213 	}
214 
215 	/* Attach to the quota inode and set sc->ip so that reporting works. */
216 	ip = xfs_quota_inode(sc->mp, dqtype);
217 	sc->ip = ip;
218 
219 	/* Look for problem extents. */
220 	xfs_ilock(ip, XFS_ILOCK_EXCL);
221 	if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) {
222 		xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
223 		goto out_unlock_inode;
224 	}
225 	max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk;
226 	while (1) {
227 		if (xfs_scrub_should_terminate(sc, &error))
228 			break;
229 
230 		off = irec.br_startoff + irec.br_blockcount;
231 		nimaps = 1;
232 		error = xfs_bmapi_read(ip, off, -1, &irec, &nimaps,
233 				XFS_BMAPI_ENTIRE);
234 		if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, off,
235 				&error))
236 			goto out_unlock_inode;
237 		if (!nimaps)
238 			break;
239 		if (irec.br_startblock == HOLESTARTBLOCK)
240 			continue;
241 
242 		/* Check the extent record doesn't point to crap. */
243 		if (irec.br_startblock + irec.br_blockcount <=
244 		    irec.br_startblock)
245 			xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
246 					irec.br_startoff);
247 		if (!xfs_verify_fsbno(mp, irec.br_startblock) ||
248 		    !xfs_verify_fsbno(mp, irec.br_startblock +
249 					irec.br_blockcount - 1))
250 			xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
251 					irec.br_startoff);
252 
253 		/*
254 		 * Unwritten extents or blocks mapped above the highest
255 		 * quota id shouldn't happen.
256 		 */
257 		if (isnullstartblock(irec.br_startblock) ||
258 		    irec.br_startoff > max_dqid_off ||
259 		    irec.br_startoff + irec.br_blockcount > max_dqid_off + 1)
260 			xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, off);
261 	}
262 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
263 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
264 		goto out;
265 
266 	/* Check all the quota items. */
267 	while (id < ((xfs_dqid_t)-1ULL)) {
268 		if (xfs_scrub_should_terminate(sc, &error))
269 			break;
270 
271 		error = xfs_qm_dqget(mp, NULL, id, dqtype, XFS_QMOPT_DQNEXT,
272 				&dq);
273 		if (error == -ENOENT)
274 			break;
275 		if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK,
276 				id * qi->qi_dqperchunk, &error))
277 			break;
278 
279 		xfs_scrub_quota_item(sc, dqtype, dq, id);
280 
281 		id = be32_to_cpu(dq->q_core.d_id) + 1;
282 		xfs_qm_dqput(dq);
283 		if (!id)
284 			break;
285 	}
286 
287 out:
288 	/* We set sc->ip earlier, so make sure we clear it now. */
289 	sc->ip = NULL;
290 out_unlock_quota:
291 	mutex_unlock(&qi->qi_quotaofflock);
292 	return error;
293 
294 out_unlock_inode:
295 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
296 	goto out;
297 }
298