xref: /linux/fs/xfs/scrub/quota_repair.c (revision 151ebcf0797b1a3ba53c8843dc21748c80e098c7)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2018-2023 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <djwong@kernel.org>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_defer.h"
13 #include "xfs_btree.h"
14 #include "xfs_bit.h"
15 #include "xfs_format.h"
16 #include "xfs_log_format.h"
17 #include "xfs_trans.h"
18 #include "xfs_sb.h"
19 #include "xfs_inode.h"
20 #include "xfs_inode_fork.h"
21 #include "xfs_alloc.h"
22 #include "xfs_bmap.h"
23 #include "xfs_quota.h"
24 #include "xfs_qm.h"
25 #include "xfs_dquot.h"
26 #include "xfs_dquot_item.h"
27 #include "xfs_reflink.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_trans_space.h"
30 #include "scrub/xfs_scrub.h"
31 #include "scrub/scrub.h"
32 #include "scrub/common.h"
33 #include "scrub/quota.h"
34 #include "scrub/trace.h"
35 #include "scrub/repair.h"
36 
37 /*
38  * Quota Repair
39  * ============
40  *
41  * Quota repairs are fairly simplistic; we fix everything that the dquot
42  * verifiers complain about, cap any counters or limits that make no sense,
43  * and schedule a quotacheck if we had to fix anything.  We also repair any
44  * data fork extent records that don't apply to metadata files.
45  */
46 
47 struct xrep_quota_info {
48 	struct xfs_scrub	*sc;
49 	bool			need_quotacheck;
50 };
51 
52 /*
53  * Allocate a new block into a sparse hole in the quota file backing this
54  * dquot, initialize the block, and commit the whole mess.
55  */
56 STATIC int
57 xrep_quota_item_fill_bmap_hole(
58 	struct xfs_scrub	*sc,
59 	struct xfs_dquot	*dq,
60 	struct xfs_bmbt_irec	*irec)
61 {
62 	struct xfs_buf		*bp;
63 	struct xfs_mount	*mp = sc->mp;
64 	int			nmaps = 1;
65 	int			error;
66 
67 	xfs_trans_ijoin(sc->tp, sc->ip, 0);
68 
69 	/* Map a block into the file. */
70 	error = xfs_trans_reserve_more(sc->tp, XFS_QM_DQALLOC_SPACE_RES(mp),
71 			0);
72 	if (error)
73 		return error;
74 
75 	error = xfs_bmapi_write(sc->tp, sc->ip, dq->q_fileoffset,
76 			XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, 0,
77 			irec, &nmaps);
78 	if (error)
79 		return error;
80 
81 	dq->q_blkno = XFS_FSB_TO_DADDR(mp, irec->br_startblock);
82 
83 	trace_xrep_dquot_item_fill_bmap_hole(sc->mp, dq->q_type, dq->q_id);
84 
85 	/* Initialize the new block. */
86 	error = xfs_trans_get_buf(sc->tp, mp->m_ddev_targp, dq->q_blkno,
87 			mp->m_quotainfo->qi_dqchunklen, 0, &bp);
88 	if (error)
89 		return error;
90 	bp->b_ops = &xfs_dquot_buf_ops;
91 
92 	xfs_qm_init_dquot_blk(sc->tp, dq->q_id, dq->q_type, bp);
93 	xfs_buf_set_ref(bp, XFS_DQUOT_REF);
94 
95 	/*
96 	 * Finish the mapping transactions and roll one more time to
97 	 * disconnect sc->ip from sc->tp.
98 	 */
99 	error = xrep_defer_finish(sc);
100 	if (error)
101 		return error;
102 	return xfs_trans_roll(&sc->tp);
103 }
104 
105 /* Make sure there's a written block backing this dquot */
106 STATIC int
107 xrep_quota_item_bmap(
108 	struct xfs_scrub	*sc,
109 	struct xfs_dquot	*dq,
110 	bool			*dirty)
111 {
112 	struct xfs_bmbt_irec	irec;
113 	struct xfs_mount	*mp = sc->mp;
114 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
115 	xfs_fileoff_t		offset = dq->q_id / qi->qi_dqperchunk;
116 	int			nmaps = 1;
117 	int			error;
118 
119 	/* The computed file offset should always be valid. */
120 	if (!xfs_verify_fileoff(mp, offset)) {
121 		ASSERT(xfs_verify_fileoff(mp, offset));
122 		return -EFSCORRUPTED;
123 	}
124 	dq->q_fileoffset = offset;
125 
126 	error = xfs_bmapi_read(sc->ip, offset, 1, &irec, &nmaps, 0);
127 	if (error)
128 		return error;
129 
130 	if (nmaps < 1 || !xfs_bmap_is_real_extent(&irec)) {
131 		/* Hole/delalloc extent; allocate a real block. */
132 		error = xrep_quota_item_fill_bmap_hole(sc, dq, &irec);
133 		if (error)
134 			return error;
135 	} else if (irec.br_state != XFS_EXT_NORM) {
136 		/* Unwritten extent, which we already took care of? */
137 		ASSERT(irec.br_state == XFS_EXT_NORM);
138 		return -EFSCORRUPTED;
139 	} else if (dq->q_blkno != XFS_FSB_TO_DADDR(mp, irec.br_startblock)) {
140 		/*
141 		 * If the cached daddr is incorrect, repair probably punched a
142 		 * hole out of the quota file and filled it back in with a new
143 		 * block.  Update the block mapping in the dquot.
144 		 */
145 		dq->q_blkno = XFS_FSB_TO_DADDR(mp, irec.br_startblock);
146 	}
147 
148 	*dirty = true;
149 	return 0;
150 }
151 
152 /* Reset quota timers if incorrectly set. */
153 static inline void
154 xrep_quota_item_timer(
155 	struct xfs_scrub		*sc,
156 	const struct xfs_dquot_res	*res,
157 	bool				*dirty)
158 {
159 	if ((res->softlimit && res->count > res->softlimit) ||
160 	    (res->hardlimit && res->count > res->hardlimit)) {
161 		if (!res->timer)
162 			*dirty = true;
163 	} else {
164 		if (res->timer)
165 			*dirty = true;
166 	}
167 }
168 
169 /* Scrub the fields in an individual quota item. */
170 STATIC int
171 xrep_quota_item(
172 	struct xrep_quota_info	*rqi,
173 	struct xfs_dquot	*dq)
174 {
175 	struct xfs_scrub	*sc = rqi->sc;
176 	struct xfs_mount	*mp = sc->mp;
177 	xfs_ino_t		fs_icount;
178 	bool			dirty = false;
179 	int			error = 0;
180 
181 	/* Last chance to abort before we start committing fixes. */
182 	if (xchk_should_terminate(sc, &error))
183 		return error;
184 
185 	/*
186 	 * We might need to fix holes in the bmap record for the storage
187 	 * backing this dquot, so we need to lock the dquot and the quota file.
188 	 * dqiterate gave us a locked dquot, so drop the dquot lock to get the
189 	 * ILOCK_EXCL.
190 	 */
191 	xfs_dqunlock(dq);
192 	xchk_ilock(sc, XFS_ILOCK_EXCL);
193 	xfs_dqlock(dq);
194 
195 	error = xrep_quota_item_bmap(sc, dq, &dirty);
196 	xchk_iunlock(sc, XFS_ILOCK_EXCL);
197 	if (error)
198 		return error;
199 
200 	/* Check the limits. */
201 	if (dq->q_blk.softlimit > dq->q_blk.hardlimit) {
202 		dq->q_blk.softlimit = dq->q_blk.hardlimit;
203 		dirty = true;
204 	}
205 
206 	if (dq->q_ino.softlimit > dq->q_ino.hardlimit) {
207 		dq->q_ino.softlimit = dq->q_ino.hardlimit;
208 		dirty = true;
209 	}
210 
211 	if (dq->q_rtb.softlimit > dq->q_rtb.hardlimit) {
212 		dq->q_rtb.softlimit = dq->q_rtb.hardlimit;
213 		dirty = true;
214 	}
215 
216 	/*
217 	 * Check that usage doesn't exceed physical limits.  However, on
218 	 * a reflink filesystem we're allowed to exceed physical space
219 	 * if there are no quota limits.  We don't know what the real number
220 	 * is, but we can make quotacheck find out for us.
221 	 */
222 	if (!xfs_has_reflink(mp) && dq->q_blk.count > mp->m_sb.sb_dblocks) {
223 		dq->q_blk.reserved -= dq->q_blk.count;
224 		dq->q_blk.reserved += mp->m_sb.sb_dblocks;
225 		dq->q_blk.count = mp->m_sb.sb_dblocks;
226 		rqi->need_quotacheck = true;
227 		dirty = true;
228 	}
229 	fs_icount = percpu_counter_sum(&mp->m_icount);
230 	if (dq->q_ino.count > fs_icount) {
231 		dq->q_ino.reserved -= dq->q_ino.count;
232 		dq->q_ino.reserved += fs_icount;
233 		dq->q_ino.count = fs_icount;
234 		rqi->need_quotacheck = true;
235 		dirty = true;
236 	}
237 	if (dq->q_rtb.count > mp->m_sb.sb_rblocks) {
238 		dq->q_rtb.reserved -= dq->q_rtb.count;
239 		dq->q_rtb.reserved += mp->m_sb.sb_rblocks;
240 		dq->q_rtb.count = mp->m_sb.sb_rblocks;
241 		rqi->need_quotacheck = true;
242 		dirty = true;
243 	}
244 
245 	xrep_quota_item_timer(sc, &dq->q_blk, &dirty);
246 	xrep_quota_item_timer(sc, &dq->q_ino, &dirty);
247 	xrep_quota_item_timer(sc, &dq->q_rtb, &dirty);
248 
249 	if (!dirty)
250 		return 0;
251 
252 	trace_xrep_dquot_item(sc->mp, dq->q_type, dq->q_id);
253 
254 	dq->q_flags |= XFS_DQFLAG_DIRTY;
255 	xfs_trans_dqjoin(sc->tp, dq);
256 	if (dq->q_id) {
257 		xfs_qm_adjust_dqlimits(dq);
258 		xfs_qm_adjust_dqtimers(dq);
259 	}
260 	xfs_trans_log_dquot(sc->tp, dq);
261 	error = xfs_trans_roll(&sc->tp);
262 	xfs_dqlock(dq);
263 	return error;
264 }
265 
266 /* Fix a quota timer so that we can pass the verifier. */
267 STATIC void
268 xrep_quota_fix_timer(
269 	struct xfs_mount	*mp,
270 	const struct xfs_disk_dquot *ddq,
271 	__be64			softlimit,
272 	__be64			countnow,
273 	__be32			*timer,
274 	time64_t		timelimit)
275 {
276 	uint64_t		soft = be64_to_cpu(softlimit);
277 	uint64_t		count = be64_to_cpu(countnow);
278 	time64_t		new_timer;
279 	uint32_t		t;
280 
281 	if (!soft || count <= soft || *timer != 0)
282 		return;
283 
284 	new_timer = xfs_dquot_set_timeout(mp,
285 				ktime_get_real_seconds() + timelimit);
286 	if (ddq->d_type & XFS_DQTYPE_BIGTIME)
287 		t = xfs_dq_unix_to_bigtime(new_timer);
288 	else
289 		t = new_timer;
290 
291 	*timer = cpu_to_be32(t);
292 }
293 
294 /* Fix anything the verifiers complain about. */
295 STATIC int
296 xrep_quota_block(
297 	struct xfs_scrub	*sc,
298 	xfs_daddr_t		daddr,
299 	xfs_dqtype_t		dqtype,
300 	xfs_dqid_t		id)
301 {
302 	struct xfs_dqblk	*dqblk;
303 	struct xfs_disk_dquot	*ddq;
304 	struct xfs_quotainfo	*qi = sc->mp->m_quotainfo;
305 	struct xfs_def_quota	*defq = xfs_get_defquota(qi, dqtype);
306 	struct xfs_buf		*bp = NULL;
307 	enum xfs_blft		buftype = 0;
308 	int			i;
309 	int			error;
310 
311 	error = xfs_trans_read_buf(sc->mp, sc->tp, sc->mp->m_ddev_targp, daddr,
312 			qi->qi_dqchunklen, 0, &bp, &xfs_dquot_buf_ops);
313 	switch (error) {
314 	case -EFSBADCRC:
315 	case -EFSCORRUPTED:
316 		/* Failed verifier, retry read with no ops. */
317 		error = xfs_trans_read_buf(sc->mp, sc->tp,
318 				sc->mp->m_ddev_targp, daddr, qi->qi_dqchunklen,
319 				0, &bp, NULL);
320 		if (error)
321 			return error;
322 		break;
323 	case 0:
324 		dqblk = bp->b_addr;
325 		ddq = &dqblk[0].dd_diskdq;
326 
327 		/*
328 		 * If there's nothing that would impede a dqiterate, we're
329 		 * done.
330 		 */
331 		if ((ddq->d_type & XFS_DQTYPE_REC_MASK) != dqtype ||
332 		    id == be32_to_cpu(ddq->d_id)) {
333 			xfs_trans_brelse(sc->tp, bp);
334 			return 0;
335 		}
336 		break;
337 	default:
338 		return error;
339 	}
340 
341 	/* Something's wrong with the block, fix the whole thing. */
342 	dqblk = bp->b_addr;
343 	bp->b_ops = &xfs_dquot_buf_ops;
344 	for (i = 0; i < qi->qi_dqperchunk; i++, dqblk++) {
345 		ddq = &dqblk->dd_diskdq;
346 
347 		trace_xrep_disk_dquot(sc->mp, dqtype, id + i);
348 
349 		ddq->d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
350 		ddq->d_version = XFS_DQUOT_VERSION;
351 		ddq->d_type = dqtype;
352 		ddq->d_id = cpu_to_be32(id + i);
353 
354 		if (xfs_has_bigtime(sc->mp) && ddq->d_id)
355 			ddq->d_type |= XFS_DQTYPE_BIGTIME;
356 
357 		xrep_quota_fix_timer(sc->mp, ddq, ddq->d_blk_softlimit,
358 				ddq->d_bcount, &ddq->d_btimer,
359 				defq->blk.time);
360 
361 		xrep_quota_fix_timer(sc->mp, ddq, ddq->d_ino_softlimit,
362 				ddq->d_icount, &ddq->d_itimer,
363 				defq->ino.time);
364 
365 		xrep_quota_fix_timer(sc->mp, ddq, ddq->d_rtb_softlimit,
366 				ddq->d_rtbcount, &ddq->d_rtbtimer,
367 				defq->rtb.time);
368 
369 		/* We only support v5 filesystems so always set these. */
370 		uuid_copy(&dqblk->dd_uuid, &sc->mp->m_sb.sb_meta_uuid);
371 		xfs_update_cksum((char *)dqblk, sizeof(struct xfs_dqblk),
372 				 XFS_DQUOT_CRC_OFF);
373 		dqblk->dd_lsn = 0;
374 	}
375 	switch (dqtype) {
376 	case XFS_DQTYPE_USER:
377 		buftype = XFS_BLFT_UDQUOT_BUF;
378 		break;
379 	case XFS_DQTYPE_GROUP:
380 		buftype = XFS_BLFT_GDQUOT_BUF;
381 		break;
382 	case XFS_DQTYPE_PROJ:
383 		buftype = XFS_BLFT_PDQUOT_BUF;
384 		break;
385 	}
386 	xfs_trans_buf_set_type(sc->tp, bp, buftype);
387 	xfs_trans_log_buf(sc->tp, bp, 0, BBTOB(bp->b_length) - 1);
388 	return xrep_roll_trans(sc);
389 }
390 
391 /*
392  * Repair a quota file's data fork.  The function returns with the inode
393  * joined.
394  */
395 STATIC int
396 xrep_quota_data_fork(
397 	struct xfs_scrub	*sc,
398 	xfs_dqtype_t		dqtype)
399 {
400 	struct xfs_bmbt_irec	irec = { 0 };
401 	struct xfs_iext_cursor	icur;
402 	struct xfs_quotainfo	*qi = sc->mp->m_quotainfo;
403 	struct xfs_ifork	*ifp;
404 	xfs_fileoff_t		max_dqid_off;
405 	xfs_fileoff_t		off;
406 	xfs_fsblock_t		fsbno;
407 	bool			truncate = false;
408 	bool			joined = false;
409 	int			error = 0;
410 
411 	error = xrep_metadata_inode_forks(sc);
412 	if (error)
413 		goto out;
414 
415 	/* Check for data fork problems that apply only to quota files. */
416 	max_dqid_off = XFS_DQ_ID_MAX / qi->qi_dqperchunk;
417 	ifp = xfs_ifork_ptr(sc->ip, XFS_DATA_FORK);
418 	for_each_xfs_iext(ifp, &icur, &irec) {
419 		if (isnullstartblock(irec.br_startblock)) {
420 			error = -EFSCORRUPTED;
421 			goto out;
422 		}
423 
424 		if (irec.br_startoff > max_dqid_off ||
425 		    irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) {
426 			truncate = true;
427 			break;
428 		}
429 
430 		/* Convert unwritten extents to real ones. */
431 		if (irec.br_state == XFS_EXT_UNWRITTEN) {
432 			struct xfs_bmbt_irec	nrec;
433 			int			nmap = 1;
434 
435 			if (!joined) {
436 				xfs_trans_ijoin(sc->tp, sc->ip, 0);
437 				joined = true;
438 			}
439 
440 			error = xfs_bmapi_write(sc->tp, sc->ip,
441 					irec.br_startoff, irec.br_blockcount,
442 					XFS_BMAPI_CONVERT, 0, &nrec, &nmap);
443 			if (error)
444 				goto out;
445 			ASSERT(nrec.br_startoff == irec.br_startoff);
446 			ASSERT(nrec.br_blockcount == irec.br_blockcount);
447 
448 			error = xfs_defer_finish(&sc->tp);
449 			if (error)
450 				goto out;
451 		}
452 	}
453 
454 	if (!joined) {
455 		xfs_trans_ijoin(sc->tp, sc->ip, 0);
456 		joined = true;
457 	}
458 
459 	if (truncate) {
460 		/* Erase everything after the block containing the max dquot */
461 		error = xfs_bunmapi_range(&sc->tp, sc->ip, 0,
462 				max_dqid_off * sc->mp->m_sb.sb_blocksize,
463 				XFS_MAX_FILEOFF);
464 		if (error)
465 			goto out;
466 
467 		/* Remove all CoW reservations. */
468 		error = xfs_reflink_cancel_cow_blocks(sc->ip, &sc->tp, 0,
469 				XFS_MAX_FILEOFF, true);
470 		if (error)
471 			goto out;
472 		sc->ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
473 
474 		/*
475 		 * Always re-log the inode so that our permanent transaction
476 		 * can keep on rolling it forward in the log.
477 		 */
478 		xfs_trans_log_inode(sc->tp, sc->ip, XFS_ILOG_CORE);
479 	}
480 
481 	/* Now go fix anything that fails the verifiers. */
482 	for_each_xfs_iext(ifp, &icur, &irec) {
483 		for (fsbno = irec.br_startblock, off = irec.br_startoff;
484 		     fsbno < irec.br_startblock + irec.br_blockcount;
485 		     fsbno += XFS_DQUOT_CLUSTER_SIZE_FSB,
486 				off += XFS_DQUOT_CLUSTER_SIZE_FSB) {
487 			error = xrep_quota_block(sc,
488 					XFS_FSB_TO_DADDR(sc->mp, fsbno),
489 					dqtype, off * qi->qi_dqperchunk);
490 			if (error)
491 				goto out;
492 		}
493 	}
494 
495 out:
496 	return error;
497 }
498 
499 /*
500  * Go fix anything in the quota items that we could have been mad about.  Now
501  * that we've checked the quota inode data fork we have to drop ILOCK_EXCL to
502  * use the regular dquot functions.
503  */
504 STATIC int
505 xrep_quota_problems(
506 	struct xfs_scrub	*sc,
507 	xfs_dqtype_t		dqtype)
508 {
509 	struct xchk_dqiter	cursor = { };
510 	struct xrep_quota_info	rqi = { .sc = sc };
511 	struct xfs_dquot	*dq;
512 	int			error;
513 
514 	xchk_dqiter_init(&cursor, sc, dqtype);
515 	while ((error = xchk_dquot_iter(&cursor, &dq)) == 1) {
516 		error = xrep_quota_item(&rqi, dq);
517 		xfs_qm_dqput(dq);
518 		if (error)
519 			break;
520 	}
521 	if (error)
522 		return error;
523 
524 	/* Make a quotacheck happen. */
525 	if (rqi.need_quotacheck)
526 		xrep_force_quotacheck(sc, dqtype);
527 	return 0;
528 }
529 
530 /* Repair all of a quota type's items. */
531 int
532 xrep_quota(
533 	struct xfs_scrub	*sc)
534 {
535 	xfs_dqtype_t		dqtype;
536 	int			error;
537 
538 	dqtype = xchk_quota_to_dqtype(sc);
539 
540 	/*
541 	 * Re-take the ILOCK so that we can fix any problems that we found
542 	 * with the data fork mappings, or with the dquot bufs themselves.
543 	 */
544 	if (!(sc->ilock_flags & XFS_ILOCK_EXCL))
545 		xchk_ilock(sc, XFS_ILOCK_EXCL);
546 	error = xrep_quota_data_fork(sc, dqtype);
547 	if (error)
548 		return error;
549 
550 	/*
551 	 * Finish deferred items and roll the transaction to unjoin the quota
552 	 * inode from transaction so that we can unlock the quota inode; we
553 	 * play only with dquots from now on.
554 	 */
555 	error = xrep_defer_finish(sc);
556 	if (error)
557 		return error;
558 	error = xfs_trans_roll(&sc->tp);
559 	if (error)
560 		return error;
561 	xchk_iunlock(sc, sc->ilock_flags);
562 
563 	/* Fix anything the dquot verifiers don't complain about. */
564 	error = xrep_quota_problems(sc, dqtype);
565 	if (error)
566 		return error;
567 
568 	return xrep_trans_commit(sc);
569 }
570