xref: /linux/fs/xfs/libxfs/xfs_refcount_btree.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * Copyright (C) 2016 Oracle.  All Rights Reserved.
3  *
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it would be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write the Free Software Foundation,
18  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
19  */
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_log_format.h"
25 #include "xfs_trans_resv.h"
26 #include "xfs_sb.h"
27 #include "xfs_mount.h"
28 #include "xfs_btree.h"
29 #include "xfs_bmap.h"
30 #include "xfs_refcount_btree.h"
31 #include "xfs_alloc.h"
32 #include "xfs_error.h"
33 #include "xfs_trace.h"
34 #include "xfs_cksum.h"
35 #include "xfs_trans.h"
36 #include "xfs_bit.h"
37 #include "xfs_rmap.h"
38 
39 static struct xfs_btree_cur *
40 xfs_refcountbt_dup_cursor(
41 	struct xfs_btree_cur	*cur)
42 {
43 	return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
44 			cur->bc_private.a.agbp, cur->bc_private.a.agno,
45 			cur->bc_private.a.dfops);
46 }
47 
48 STATIC void
49 xfs_refcountbt_set_root(
50 	struct xfs_btree_cur	*cur,
51 	union xfs_btree_ptr	*ptr,
52 	int			inc)
53 {
54 	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
55 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
56 	xfs_agnumber_t		seqno = be32_to_cpu(agf->agf_seqno);
57 	struct xfs_perag	*pag = xfs_perag_get(cur->bc_mp, seqno);
58 
59 	ASSERT(ptr->s != 0);
60 
61 	agf->agf_refcount_root = ptr->s;
62 	be32_add_cpu(&agf->agf_refcount_level, inc);
63 	pag->pagf_refcount_level += inc;
64 	xfs_perag_put(pag);
65 
66 	xfs_alloc_log_agf(cur->bc_tp, agbp,
67 			XFS_AGF_REFCOUNT_ROOT | XFS_AGF_REFCOUNT_LEVEL);
68 }
69 
70 STATIC int
71 xfs_refcountbt_alloc_block(
72 	struct xfs_btree_cur	*cur,
73 	union xfs_btree_ptr	*start,
74 	union xfs_btree_ptr	*new,
75 	int			*stat)
76 {
77 	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
78 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
79 	struct xfs_alloc_arg	args;		/* block allocation args */
80 	int			error;		/* error return value */
81 
82 	XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
83 
84 	memset(&args, 0, sizeof(args));
85 	args.tp = cur->bc_tp;
86 	args.mp = cur->bc_mp;
87 	args.type = XFS_ALLOCTYPE_NEAR_BNO;
88 	args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
89 			xfs_refc_block(args.mp));
90 	args.firstblock = args.fsbno;
91 	xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_REFC);
92 	args.minlen = args.maxlen = args.prod = 1;
93 	args.resv = XFS_AG_RESV_METADATA;
94 
95 	error = xfs_alloc_vextent(&args);
96 	if (error)
97 		goto out_error;
98 	trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
99 			args.agbno, 1);
100 	if (args.fsbno == NULLFSBLOCK) {
101 		XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
102 		*stat = 0;
103 		return 0;
104 	}
105 	ASSERT(args.agno == cur->bc_private.a.agno);
106 	ASSERT(args.len == 1);
107 
108 	new->s = cpu_to_be32(args.agbno);
109 	be32_add_cpu(&agf->agf_refcount_blocks, 1);
110 	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
111 
112 	XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
113 	*stat = 1;
114 	return 0;
115 
116 out_error:
117 	XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
118 	return error;
119 }
120 
121 STATIC int
122 xfs_refcountbt_free_block(
123 	struct xfs_btree_cur	*cur,
124 	struct xfs_buf		*bp)
125 {
126 	struct xfs_mount	*mp = cur->bc_mp;
127 	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
128 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
129 	xfs_fsblock_t		fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
130 	struct xfs_owner_info	oinfo;
131 	int			error;
132 
133 	trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
134 			XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
135 	xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC);
136 	be32_add_cpu(&agf->agf_refcount_blocks, -1);
137 	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
138 	error = xfs_free_extent(cur->bc_tp, fsbno, 1, &oinfo,
139 			XFS_AG_RESV_METADATA);
140 	if (error)
141 		return error;
142 
143 	return error;
144 }
145 
146 STATIC int
147 xfs_refcountbt_get_minrecs(
148 	struct xfs_btree_cur	*cur,
149 	int			level)
150 {
151 	return cur->bc_mp->m_refc_mnr[level != 0];
152 }
153 
154 STATIC int
155 xfs_refcountbt_get_maxrecs(
156 	struct xfs_btree_cur	*cur,
157 	int			level)
158 {
159 	return cur->bc_mp->m_refc_mxr[level != 0];
160 }
161 
162 STATIC void
163 xfs_refcountbt_init_key_from_rec(
164 	union xfs_btree_key	*key,
165 	union xfs_btree_rec	*rec)
166 {
167 	key->refc.rc_startblock = rec->refc.rc_startblock;
168 }
169 
170 STATIC void
171 xfs_refcountbt_init_high_key_from_rec(
172 	union xfs_btree_key	*key,
173 	union xfs_btree_rec	*rec)
174 {
175 	__u32			x;
176 
177 	x = be32_to_cpu(rec->refc.rc_startblock);
178 	x += be32_to_cpu(rec->refc.rc_blockcount) - 1;
179 	key->refc.rc_startblock = cpu_to_be32(x);
180 }
181 
182 STATIC void
183 xfs_refcountbt_init_rec_from_cur(
184 	struct xfs_btree_cur	*cur,
185 	union xfs_btree_rec	*rec)
186 {
187 	rec->refc.rc_startblock = cpu_to_be32(cur->bc_rec.rc.rc_startblock);
188 	rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount);
189 	rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount);
190 }
191 
192 STATIC void
193 xfs_refcountbt_init_ptr_from_cur(
194 	struct xfs_btree_cur	*cur,
195 	union xfs_btree_ptr	*ptr)
196 {
197 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
198 
199 	ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
200 	ASSERT(agf->agf_refcount_root != 0);
201 
202 	ptr->s = agf->agf_refcount_root;
203 }
204 
205 STATIC __int64_t
206 xfs_refcountbt_key_diff(
207 	struct xfs_btree_cur	*cur,
208 	union xfs_btree_key	*key)
209 {
210 	struct xfs_refcount_irec	*rec = &cur->bc_rec.rc;
211 	struct xfs_refcount_key		*kp = &key->refc;
212 
213 	return (__int64_t)be32_to_cpu(kp->rc_startblock) - rec->rc_startblock;
214 }
215 
216 STATIC __int64_t
217 xfs_refcountbt_diff_two_keys(
218 	struct xfs_btree_cur	*cur,
219 	union xfs_btree_key	*k1,
220 	union xfs_btree_key	*k2)
221 {
222 	return (__int64_t)be32_to_cpu(k1->refc.rc_startblock) -
223 			  be32_to_cpu(k2->refc.rc_startblock);
224 }
225 
226 STATIC bool
227 xfs_refcountbt_verify(
228 	struct xfs_buf		*bp)
229 {
230 	struct xfs_mount	*mp = bp->b_target->bt_mount;
231 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
232 	struct xfs_perag	*pag = bp->b_pag;
233 	unsigned int		level;
234 
235 	if (block->bb_magic != cpu_to_be32(XFS_REFC_CRC_MAGIC))
236 		return false;
237 
238 	if (!xfs_sb_version_hasreflink(&mp->m_sb))
239 		return false;
240 	if (!xfs_btree_sblock_v5hdr_verify(bp))
241 		return false;
242 
243 	level = be16_to_cpu(block->bb_level);
244 	if (pag && pag->pagf_init) {
245 		if (level >= pag->pagf_refcount_level)
246 			return false;
247 	} else if (level >= mp->m_refc_maxlevels)
248 		return false;
249 
250 	return xfs_btree_sblock_verify(bp, mp->m_refc_mxr[level != 0]);
251 }
252 
253 STATIC void
254 xfs_refcountbt_read_verify(
255 	struct xfs_buf	*bp)
256 {
257 	if (!xfs_btree_sblock_verify_crc(bp))
258 		xfs_buf_ioerror(bp, -EFSBADCRC);
259 	else if (!xfs_refcountbt_verify(bp))
260 		xfs_buf_ioerror(bp, -EFSCORRUPTED);
261 
262 	if (bp->b_error) {
263 		trace_xfs_btree_corrupt(bp, _RET_IP_);
264 		xfs_verifier_error(bp);
265 	}
266 }
267 
268 STATIC void
269 xfs_refcountbt_write_verify(
270 	struct xfs_buf	*bp)
271 {
272 	if (!xfs_refcountbt_verify(bp)) {
273 		trace_xfs_btree_corrupt(bp, _RET_IP_);
274 		xfs_buf_ioerror(bp, -EFSCORRUPTED);
275 		xfs_verifier_error(bp);
276 		return;
277 	}
278 	xfs_btree_sblock_calc_crc(bp);
279 
280 }
281 
282 const struct xfs_buf_ops xfs_refcountbt_buf_ops = {
283 	.name			= "xfs_refcountbt",
284 	.verify_read		= xfs_refcountbt_read_verify,
285 	.verify_write		= xfs_refcountbt_write_verify,
286 };
287 
288 #if defined(DEBUG) || defined(XFS_WARN)
289 STATIC int
290 xfs_refcountbt_keys_inorder(
291 	struct xfs_btree_cur	*cur,
292 	union xfs_btree_key	*k1,
293 	union xfs_btree_key	*k2)
294 {
295 	return be32_to_cpu(k1->refc.rc_startblock) <
296 	       be32_to_cpu(k2->refc.rc_startblock);
297 }
298 
299 STATIC int
300 xfs_refcountbt_recs_inorder(
301 	struct xfs_btree_cur	*cur,
302 	union xfs_btree_rec	*r1,
303 	union xfs_btree_rec	*r2)
304 {
305 	return  be32_to_cpu(r1->refc.rc_startblock) +
306 		be32_to_cpu(r1->refc.rc_blockcount) <=
307 		be32_to_cpu(r2->refc.rc_startblock);
308 }
309 #endif
310 
311 static const struct xfs_btree_ops xfs_refcountbt_ops = {
312 	.rec_len		= sizeof(struct xfs_refcount_rec),
313 	.key_len		= sizeof(struct xfs_refcount_key),
314 
315 	.dup_cursor		= xfs_refcountbt_dup_cursor,
316 	.set_root		= xfs_refcountbt_set_root,
317 	.alloc_block		= xfs_refcountbt_alloc_block,
318 	.free_block		= xfs_refcountbt_free_block,
319 	.get_minrecs		= xfs_refcountbt_get_minrecs,
320 	.get_maxrecs		= xfs_refcountbt_get_maxrecs,
321 	.init_key_from_rec	= xfs_refcountbt_init_key_from_rec,
322 	.init_high_key_from_rec	= xfs_refcountbt_init_high_key_from_rec,
323 	.init_rec_from_cur	= xfs_refcountbt_init_rec_from_cur,
324 	.init_ptr_from_cur	= xfs_refcountbt_init_ptr_from_cur,
325 	.key_diff		= xfs_refcountbt_key_diff,
326 	.buf_ops		= &xfs_refcountbt_buf_ops,
327 	.diff_two_keys		= xfs_refcountbt_diff_two_keys,
328 #if defined(DEBUG) || defined(XFS_WARN)
329 	.keys_inorder		= xfs_refcountbt_keys_inorder,
330 	.recs_inorder		= xfs_refcountbt_recs_inorder,
331 #endif
332 };
333 
334 /*
335  * Allocate a new refcount btree cursor.
336  */
337 struct xfs_btree_cur *
338 xfs_refcountbt_init_cursor(
339 	struct xfs_mount	*mp,
340 	struct xfs_trans	*tp,
341 	struct xfs_buf		*agbp,
342 	xfs_agnumber_t		agno,
343 	struct xfs_defer_ops	*dfops)
344 {
345 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
346 	struct xfs_btree_cur	*cur;
347 
348 	ASSERT(agno != NULLAGNUMBER);
349 	ASSERT(agno < mp->m_sb.sb_agcount);
350 	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
351 
352 	cur->bc_tp = tp;
353 	cur->bc_mp = mp;
354 	cur->bc_btnum = XFS_BTNUM_REFC;
355 	cur->bc_blocklog = mp->m_sb.sb_blocklog;
356 	cur->bc_ops = &xfs_refcountbt_ops;
357 	cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
358 
359 	cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
360 
361 	cur->bc_private.a.agbp = agbp;
362 	cur->bc_private.a.agno = agno;
363 	cur->bc_private.a.dfops = dfops;
364 	cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
365 
366 	cur->bc_private.a.priv.refc.nr_ops = 0;
367 	cur->bc_private.a.priv.refc.shape_changes = 0;
368 
369 	return cur;
370 }
371 
372 /*
373  * Calculate the number of records in a refcount btree block.
374  */
375 int
376 xfs_refcountbt_maxrecs(
377 	struct xfs_mount	*mp,
378 	int			blocklen,
379 	bool			leaf)
380 {
381 	blocklen -= XFS_REFCOUNT_BLOCK_LEN;
382 
383 	if (leaf)
384 		return blocklen / sizeof(struct xfs_refcount_rec);
385 	return blocklen / (sizeof(struct xfs_refcount_key) +
386 			   sizeof(xfs_refcount_ptr_t));
387 }
388 
389 /* Compute the maximum height of a refcount btree. */
390 void
391 xfs_refcountbt_compute_maxlevels(
392 	struct xfs_mount		*mp)
393 {
394 	mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(mp,
395 			mp->m_refc_mnr, mp->m_sb.sb_agblocks);
396 }
397 
398 /* Calculate the refcount btree size for some records. */
399 xfs_extlen_t
400 xfs_refcountbt_calc_size(
401 	struct xfs_mount	*mp,
402 	unsigned long long	len)
403 {
404 	return xfs_btree_calc_size(mp, mp->m_refc_mnr, len);
405 }
406 
407 /*
408  * Calculate the maximum refcount btree size.
409  */
410 xfs_extlen_t
411 xfs_refcountbt_max_size(
412 	struct xfs_mount	*mp,
413 	xfs_agblock_t		agblocks)
414 {
415 	/* Bail out if we're uninitialized, which can happen in mkfs. */
416 	if (mp->m_refc_mxr[0] == 0)
417 		return 0;
418 
419 	return xfs_refcountbt_calc_size(mp, agblocks);
420 }
421 
422 /*
423  * Figure out how many blocks to reserve and how many are used by this btree.
424  */
425 int
426 xfs_refcountbt_calc_reserves(
427 	struct xfs_mount	*mp,
428 	xfs_agnumber_t		agno,
429 	xfs_extlen_t		*ask,
430 	xfs_extlen_t		*used)
431 {
432 	struct xfs_buf		*agbp;
433 	struct xfs_agf		*agf;
434 	xfs_agblock_t		agblocks;
435 	xfs_extlen_t		tree_len;
436 	int			error;
437 
438 	if (!xfs_sb_version_hasreflink(&mp->m_sb))
439 		return 0;
440 
441 
442 	error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
443 	if (error)
444 		return error;
445 
446 	agf = XFS_BUF_TO_AGF(agbp);
447 	agblocks = be32_to_cpu(agf->agf_length);
448 	tree_len = be32_to_cpu(agf->agf_refcount_blocks);
449 	xfs_buf_relse(agbp);
450 
451 	*ask += xfs_refcountbt_max_size(mp, agblocks);
452 	*used += tree_len;
453 
454 	return error;
455 }
456