xref: /linux/fs/xfs/libxfs/xfs_refcount_btree.c (revision 1947b92464c3268381604bbe2ac977a3fd78192f)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2016 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_btree.h"
14 #include "xfs_btree_staging.h"
15 #include "xfs_refcount_btree.h"
16 #include "xfs_refcount.h"
17 #include "xfs_alloc.h"
18 #include "xfs_error.h"
19 #include "xfs_trace.h"
20 #include "xfs_trans.h"
21 #include "xfs_bit.h"
22 #include "xfs_rmap.h"
23 #include "xfs_ag.h"
24 
25 static struct kmem_cache	*xfs_refcountbt_cur_cache;
26 
27 static struct xfs_btree_cur *
28 xfs_refcountbt_dup_cursor(
29 	struct xfs_btree_cur	*cur)
30 {
31 	return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
32 			cur->bc_ag.agbp, cur->bc_ag.pag);
33 }
34 
35 STATIC void
36 xfs_refcountbt_set_root(
37 	struct xfs_btree_cur		*cur,
38 	const union xfs_btree_ptr	*ptr,
39 	int				inc)
40 {
41 	struct xfs_buf		*agbp = cur->bc_ag.agbp;
42 	struct xfs_agf		*agf = agbp->b_addr;
43 	struct xfs_perag	*pag = agbp->b_pag;
44 
45 	ASSERT(ptr->s != 0);
46 
47 	agf->agf_refcount_root = ptr->s;
48 	be32_add_cpu(&agf->agf_refcount_level, inc);
49 	pag->pagf_refcount_level += inc;
50 
51 	xfs_alloc_log_agf(cur->bc_tp, agbp,
52 			XFS_AGF_REFCOUNT_ROOT | XFS_AGF_REFCOUNT_LEVEL);
53 }
54 
55 STATIC int
56 xfs_refcountbt_alloc_block(
57 	struct xfs_btree_cur		*cur,
58 	const union xfs_btree_ptr	*start,
59 	union xfs_btree_ptr		*new,
60 	int				*stat)
61 {
62 	struct xfs_buf		*agbp = cur->bc_ag.agbp;
63 	struct xfs_agf		*agf = agbp->b_addr;
64 	struct xfs_alloc_arg	args;		/* block allocation args */
65 	int			error;		/* error return value */
66 
67 	memset(&args, 0, sizeof(args));
68 	args.tp = cur->bc_tp;
69 	args.mp = cur->bc_mp;
70 	args.pag = cur->bc_ag.pag;
71 	args.oinfo = XFS_RMAP_OINFO_REFC;
72 	args.minlen = args.maxlen = args.prod = 1;
73 	args.resv = XFS_AG_RESV_METADATA;
74 
75 	error = xfs_alloc_vextent_near_bno(&args,
76 			XFS_AGB_TO_FSB(args.mp, args.pag->pag_agno,
77 					xfs_refc_block(args.mp)));
78 	if (error)
79 		goto out_error;
80 	trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
81 			args.agbno, 1);
82 	if (args.fsbno == NULLFSBLOCK) {
83 		*stat = 0;
84 		return 0;
85 	}
86 	ASSERT(args.agno == cur->bc_ag.pag->pag_agno);
87 	ASSERT(args.len == 1);
88 
89 	new->s = cpu_to_be32(args.agbno);
90 	be32_add_cpu(&agf->agf_refcount_blocks, 1);
91 	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
92 
93 	*stat = 1;
94 	return 0;
95 
96 out_error:
97 	return error;
98 }
99 
100 STATIC int
101 xfs_refcountbt_free_block(
102 	struct xfs_btree_cur	*cur,
103 	struct xfs_buf		*bp)
104 {
105 	struct xfs_mount	*mp = cur->bc_mp;
106 	struct xfs_buf		*agbp = cur->bc_ag.agbp;
107 	struct xfs_agf		*agf = agbp->b_addr;
108 	xfs_fsblock_t		fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
109 
110 	trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
111 			XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
112 	be32_add_cpu(&agf->agf_refcount_blocks, -1);
113 	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
114 	return xfs_free_extent_later(cur->bc_tp, fsbno, 1,
115 			&XFS_RMAP_OINFO_REFC, XFS_AG_RESV_METADATA, false);
116 }
117 
118 STATIC int
119 xfs_refcountbt_get_minrecs(
120 	struct xfs_btree_cur	*cur,
121 	int			level)
122 {
123 	return cur->bc_mp->m_refc_mnr[level != 0];
124 }
125 
126 STATIC int
127 xfs_refcountbt_get_maxrecs(
128 	struct xfs_btree_cur	*cur,
129 	int			level)
130 {
131 	return cur->bc_mp->m_refc_mxr[level != 0];
132 }
133 
134 STATIC void
135 xfs_refcountbt_init_key_from_rec(
136 	union xfs_btree_key		*key,
137 	const union xfs_btree_rec	*rec)
138 {
139 	key->refc.rc_startblock = rec->refc.rc_startblock;
140 }
141 
142 STATIC void
143 xfs_refcountbt_init_high_key_from_rec(
144 	union xfs_btree_key		*key,
145 	const union xfs_btree_rec	*rec)
146 {
147 	__u32				x;
148 
149 	x = be32_to_cpu(rec->refc.rc_startblock);
150 	x += be32_to_cpu(rec->refc.rc_blockcount) - 1;
151 	key->refc.rc_startblock = cpu_to_be32(x);
152 }
153 
154 STATIC void
155 xfs_refcountbt_init_rec_from_cur(
156 	struct xfs_btree_cur	*cur,
157 	union xfs_btree_rec	*rec)
158 {
159 	const struct xfs_refcount_irec *irec = &cur->bc_rec.rc;
160 	uint32_t		start;
161 
162 	start = xfs_refcount_encode_startblock(irec->rc_startblock,
163 			irec->rc_domain);
164 	rec->refc.rc_startblock = cpu_to_be32(start);
165 	rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount);
166 	rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount);
167 }
168 
169 STATIC void
170 xfs_refcountbt_init_ptr_from_cur(
171 	struct xfs_btree_cur	*cur,
172 	union xfs_btree_ptr	*ptr)
173 {
174 	struct xfs_agf		*agf = cur->bc_ag.agbp->b_addr;
175 
176 	ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
177 
178 	ptr->s = agf->agf_refcount_root;
179 }
180 
181 STATIC int64_t
182 xfs_refcountbt_key_diff(
183 	struct xfs_btree_cur		*cur,
184 	const union xfs_btree_key	*key)
185 {
186 	const struct xfs_refcount_key	*kp = &key->refc;
187 	const struct xfs_refcount_irec	*irec = &cur->bc_rec.rc;
188 	uint32_t			start;
189 
190 	start = xfs_refcount_encode_startblock(irec->rc_startblock,
191 			irec->rc_domain);
192 	return (int64_t)be32_to_cpu(kp->rc_startblock) - start;
193 }
194 
195 STATIC int64_t
196 xfs_refcountbt_diff_two_keys(
197 	struct xfs_btree_cur		*cur,
198 	const union xfs_btree_key	*k1,
199 	const union xfs_btree_key	*k2,
200 	const union xfs_btree_key	*mask)
201 {
202 	ASSERT(!mask || mask->refc.rc_startblock);
203 
204 	return (int64_t)be32_to_cpu(k1->refc.rc_startblock) -
205 			be32_to_cpu(k2->refc.rc_startblock);
206 }
207 
208 STATIC xfs_failaddr_t
209 xfs_refcountbt_verify(
210 	struct xfs_buf		*bp)
211 {
212 	struct xfs_mount	*mp = bp->b_mount;
213 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
214 	struct xfs_perag	*pag = bp->b_pag;
215 	xfs_failaddr_t		fa;
216 	unsigned int		level;
217 
218 	if (!xfs_verify_magic(bp, block->bb_magic))
219 		return __this_address;
220 
221 	if (!xfs_has_reflink(mp))
222 		return __this_address;
223 	fa = xfs_btree_sblock_v5hdr_verify(bp);
224 	if (fa)
225 		return fa;
226 
227 	level = be16_to_cpu(block->bb_level);
228 	if (pag && xfs_perag_initialised_agf(pag)) {
229 		unsigned int	maxlevel = pag->pagf_refcount_level;
230 
231 #ifdef CONFIG_XFS_ONLINE_REPAIR
232 		/*
233 		 * Online repair could be rewriting the refcount btree, so
234 		 * we'll validate against the larger of either tree while this
235 		 * is going on.
236 		 */
237 		maxlevel = max_t(unsigned int, maxlevel,
238 				pag->pagf_repair_refcount_level);
239 #endif
240 		if (level >= maxlevel)
241 			return __this_address;
242 	} else if (level >= mp->m_refc_maxlevels)
243 		return __this_address;
244 
245 	return xfs_btree_sblock_verify(bp, mp->m_refc_mxr[level != 0]);
246 }
247 
248 STATIC void
249 xfs_refcountbt_read_verify(
250 	struct xfs_buf	*bp)
251 {
252 	xfs_failaddr_t	fa;
253 
254 	if (!xfs_btree_sblock_verify_crc(bp))
255 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
256 	else {
257 		fa = xfs_refcountbt_verify(bp);
258 		if (fa)
259 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
260 	}
261 
262 	if (bp->b_error)
263 		trace_xfs_btree_corrupt(bp, _RET_IP_);
264 }
265 
266 STATIC void
267 xfs_refcountbt_write_verify(
268 	struct xfs_buf	*bp)
269 {
270 	xfs_failaddr_t	fa;
271 
272 	fa = xfs_refcountbt_verify(bp);
273 	if (fa) {
274 		trace_xfs_btree_corrupt(bp, _RET_IP_);
275 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
276 		return;
277 	}
278 	xfs_btree_sblock_calc_crc(bp);
279 
280 }
281 
282 const struct xfs_buf_ops xfs_refcountbt_buf_ops = {
283 	.name			= "xfs_refcountbt",
284 	.magic			= { 0, cpu_to_be32(XFS_REFC_CRC_MAGIC) },
285 	.verify_read		= xfs_refcountbt_read_verify,
286 	.verify_write		= xfs_refcountbt_write_verify,
287 	.verify_struct		= xfs_refcountbt_verify,
288 };
289 
290 STATIC int
291 xfs_refcountbt_keys_inorder(
292 	struct xfs_btree_cur		*cur,
293 	const union xfs_btree_key	*k1,
294 	const union xfs_btree_key	*k2)
295 {
296 	return be32_to_cpu(k1->refc.rc_startblock) <
297 	       be32_to_cpu(k2->refc.rc_startblock);
298 }
299 
300 STATIC int
301 xfs_refcountbt_recs_inorder(
302 	struct xfs_btree_cur		*cur,
303 	const union xfs_btree_rec	*r1,
304 	const union xfs_btree_rec	*r2)
305 {
306 	return  be32_to_cpu(r1->refc.rc_startblock) +
307 		be32_to_cpu(r1->refc.rc_blockcount) <=
308 		be32_to_cpu(r2->refc.rc_startblock);
309 }
310 
311 STATIC enum xbtree_key_contig
312 xfs_refcountbt_keys_contiguous(
313 	struct xfs_btree_cur		*cur,
314 	const union xfs_btree_key	*key1,
315 	const union xfs_btree_key	*key2,
316 	const union xfs_btree_key	*mask)
317 {
318 	ASSERT(!mask || mask->refc.rc_startblock);
319 
320 	return xbtree_key_contig(be32_to_cpu(key1->refc.rc_startblock),
321 				 be32_to_cpu(key2->refc.rc_startblock));
322 }
323 
324 static const struct xfs_btree_ops xfs_refcountbt_ops = {
325 	.rec_len		= sizeof(struct xfs_refcount_rec),
326 	.key_len		= sizeof(struct xfs_refcount_key),
327 
328 	.dup_cursor		= xfs_refcountbt_dup_cursor,
329 	.set_root		= xfs_refcountbt_set_root,
330 	.alloc_block		= xfs_refcountbt_alloc_block,
331 	.free_block		= xfs_refcountbt_free_block,
332 	.get_minrecs		= xfs_refcountbt_get_minrecs,
333 	.get_maxrecs		= xfs_refcountbt_get_maxrecs,
334 	.init_key_from_rec	= xfs_refcountbt_init_key_from_rec,
335 	.init_high_key_from_rec	= xfs_refcountbt_init_high_key_from_rec,
336 	.init_rec_from_cur	= xfs_refcountbt_init_rec_from_cur,
337 	.init_ptr_from_cur	= xfs_refcountbt_init_ptr_from_cur,
338 	.key_diff		= xfs_refcountbt_key_diff,
339 	.buf_ops		= &xfs_refcountbt_buf_ops,
340 	.diff_two_keys		= xfs_refcountbt_diff_two_keys,
341 	.keys_inorder		= xfs_refcountbt_keys_inorder,
342 	.recs_inorder		= xfs_refcountbt_recs_inorder,
343 	.keys_contiguous	= xfs_refcountbt_keys_contiguous,
344 };
345 
346 /*
347  * Initialize a new refcount btree cursor.
348  */
349 static struct xfs_btree_cur *
350 xfs_refcountbt_init_common(
351 	struct xfs_mount	*mp,
352 	struct xfs_trans	*tp,
353 	struct xfs_perag	*pag)
354 {
355 	struct xfs_btree_cur	*cur;
356 
357 	ASSERT(pag->pag_agno < mp->m_sb.sb_agcount);
358 
359 	cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC,
360 			mp->m_refc_maxlevels, xfs_refcountbt_cur_cache);
361 	cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
362 
363 	cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
364 
365 	cur->bc_ag.pag = xfs_perag_hold(pag);
366 	cur->bc_ag.refc.nr_ops = 0;
367 	cur->bc_ag.refc.shape_changes = 0;
368 	cur->bc_ops = &xfs_refcountbt_ops;
369 	return cur;
370 }
371 
372 /* Create a btree cursor. */
373 struct xfs_btree_cur *
374 xfs_refcountbt_init_cursor(
375 	struct xfs_mount	*mp,
376 	struct xfs_trans	*tp,
377 	struct xfs_buf		*agbp,
378 	struct xfs_perag	*pag)
379 {
380 	struct xfs_agf		*agf = agbp->b_addr;
381 	struct xfs_btree_cur	*cur;
382 
383 	cur = xfs_refcountbt_init_common(mp, tp, pag);
384 	cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
385 	cur->bc_ag.agbp = agbp;
386 	return cur;
387 }
388 
389 /* Create a btree cursor with a fake root for staging. */
390 struct xfs_btree_cur *
391 xfs_refcountbt_stage_cursor(
392 	struct xfs_mount	*mp,
393 	struct xbtree_afakeroot	*afake,
394 	struct xfs_perag	*pag)
395 {
396 	struct xfs_btree_cur	*cur;
397 
398 	cur = xfs_refcountbt_init_common(mp, NULL, pag);
399 	xfs_btree_stage_afakeroot(cur, afake);
400 	return cur;
401 }
402 
403 /*
404  * Swap in the new btree root.  Once we pass this point the newly rebuilt btree
405  * is in place and we have to kill off all the old btree blocks.
406  */
407 void
408 xfs_refcountbt_commit_staged_btree(
409 	struct xfs_btree_cur	*cur,
410 	struct xfs_trans	*tp,
411 	struct xfs_buf		*agbp)
412 {
413 	struct xfs_agf		*agf = agbp->b_addr;
414 	struct xbtree_afakeroot	*afake = cur->bc_ag.afake;
415 
416 	ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
417 
418 	agf->agf_refcount_root = cpu_to_be32(afake->af_root);
419 	agf->agf_refcount_level = cpu_to_be32(afake->af_levels);
420 	agf->agf_refcount_blocks = cpu_to_be32(afake->af_blocks);
421 	xfs_alloc_log_agf(tp, agbp, XFS_AGF_REFCOUNT_BLOCKS |
422 				    XFS_AGF_REFCOUNT_ROOT |
423 				    XFS_AGF_REFCOUNT_LEVEL);
424 	xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_refcountbt_ops);
425 }
426 
427 /* Calculate number of records in a refcount btree block. */
428 static inline unsigned int
429 xfs_refcountbt_block_maxrecs(
430 	unsigned int		blocklen,
431 	bool			leaf)
432 {
433 	if (leaf)
434 		return blocklen / sizeof(struct xfs_refcount_rec);
435 	return blocklen / (sizeof(struct xfs_refcount_key) +
436 			   sizeof(xfs_refcount_ptr_t));
437 }
438 
439 /*
440  * Calculate the number of records in a refcount btree block.
441  */
442 int
443 xfs_refcountbt_maxrecs(
444 	int			blocklen,
445 	bool			leaf)
446 {
447 	blocklen -= XFS_REFCOUNT_BLOCK_LEN;
448 	return xfs_refcountbt_block_maxrecs(blocklen, leaf);
449 }
450 
451 /* Compute the max possible height of the maximally sized refcount btree. */
452 unsigned int
453 xfs_refcountbt_maxlevels_ondisk(void)
454 {
455 	unsigned int		minrecs[2];
456 	unsigned int		blocklen;
457 
458 	blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN;
459 
460 	minrecs[0] = xfs_refcountbt_block_maxrecs(blocklen, true) / 2;
461 	minrecs[1] = xfs_refcountbt_block_maxrecs(blocklen, false) / 2;
462 
463 	return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_CRC_AG_BLOCKS);
464 }
465 
466 /* Compute the maximum height of a refcount btree. */
467 void
468 xfs_refcountbt_compute_maxlevels(
469 	struct xfs_mount		*mp)
470 {
471 	if (!xfs_has_reflink(mp)) {
472 		mp->m_refc_maxlevels = 0;
473 		return;
474 	}
475 
476 	mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(
477 			mp->m_refc_mnr, mp->m_sb.sb_agblocks);
478 	ASSERT(mp->m_refc_maxlevels <= xfs_refcountbt_maxlevels_ondisk());
479 }
480 
481 /* Calculate the refcount btree size for some records. */
482 xfs_extlen_t
483 xfs_refcountbt_calc_size(
484 	struct xfs_mount	*mp,
485 	unsigned long long	len)
486 {
487 	return xfs_btree_calc_size(mp->m_refc_mnr, len);
488 }
489 
490 /*
491  * Calculate the maximum refcount btree size.
492  */
493 xfs_extlen_t
494 xfs_refcountbt_max_size(
495 	struct xfs_mount	*mp,
496 	xfs_agblock_t		agblocks)
497 {
498 	/* Bail out if we're uninitialized, which can happen in mkfs. */
499 	if (mp->m_refc_mxr[0] == 0)
500 		return 0;
501 
502 	return xfs_refcountbt_calc_size(mp, agblocks);
503 }
504 
505 /*
506  * Figure out how many blocks to reserve and how many are used by this btree.
507  */
508 int
509 xfs_refcountbt_calc_reserves(
510 	struct xfs_mount	*mp,
511 	struct xfs_trans	*tp,
512 	struct xfs_perag	*pag,
513 	xfs_extlen_t		*ask,
514 	xfs_extlen_t		*used)
515 {
516 	struct xfs_buf		*agbp;
517 	struct xfs_agf		*agf;
518 	xfs_agblock_t		agblocks;
519 	xfs_extlen_t		tree_len;
520 	int			error;
521 
522 	if (!xfs_has_reflink(mp))
523 		return 0;
524 
525 	error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
526 	if (error)
527 		return error;
528 
529 	agf = agbp->b_addr;
530 	agblocks = be32_to_cpu(agf->agf_length);
531 	tree_len = be32_to_cpu(agf->agf_refcount_blocks);
532 	xfs_trans_brelse(tp, agbp);
533 
534 	/*
535 	 * The log is permanently allocated, so the space it occupies will
536 	 * never be available for the kinds of things that would require btree
537 	 * expansion.  We therefore can pretend the space isn't there.
538 	 */
539 	if (xfs_ag_contains_log(mp, pag->pag_agno))
540 		agblocks -= mp->m_sb.sb_logblocks;
541 
542 	*ask += xfs_refcountbt_max_size(mp, agblocks);
543 	*used += tree_len;
544 
545 	return error;
546 }
547 
548 int __init
549 xfs_refcountbt_init_cur_cache(void)
550 {
551 	xfs_refcountbt_cur_cache = kmem_cache_create("xfs_refcbt_cur",
552 			xfs_btree_cur_sizeof(xfs_refcountbt_maxlevels_ondisk()),
553 			0, 0, NULL);
554 
555 	if (!xfs_refcountbt_cur_cache)
556 		return -ENOMEM;
557 	return 0;
558 }
559 
560 void
561 xfs_refcountbt_destroy_cur_cache(void)
562 {
563 	kmem_cache_destroy(xfs_refcountbt_cur_cache);
564 	xfs_refcountbt_cur_cache = NULL;
565 }
566