xref: /linux/fs/xfs/libxfs/xfs_alloc_btree.c (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_btree.h"
14 #include "xfs_btree_staging.h"
15 #include "xfs_alloc_btree.h"
16 #include "xfs_alloc.h"
17 #include "xfs_extent_busy.h"
18 #include "xfs_error.h"
19 #include "xfs_health.h"
20 #include "xfs_trace.h"
21 #include "xfs_trans.h"
22 #include "xfs_ag.h"
23 
24 static struct kmem_cache	*xfs_allocbt_cur_cache;
25 
26 STATIC struct xfs_btree_cur *
27 xfs_bnobt_dup_cursor(
28 	struct xfs_btree_cur	*cur)
29 {
30 	return xfs_bnobt_init_cursor(cur->bc_mp, cur->bc_tp, cur->bc_ag.agbp,
31 			cur->bc_ag.pag);
32 }
33 
34 STATIC struct xfs_btree_cur *
35 xfs_cntbt_dup_cursor(
36 	struct xfs_btree_cur	*cur)
37 {
38 	return xfs_cntbt_init_cursor(cur->bc_mp, cur->bc_tp, cur->bc_ag.agbp,
39 			cur->bc_ag.pag);
40 }
41 
42 
43 STATIC void
44 xfs_allocbt_set_root(
45 	struct xfs_btree_cur		*cur,
46 	const union xfs_btree_ptr	*ptr,
47 	int				inc)
48 {
49 	struct xfs_buf		*agbp = cur->bc_ag.agbp;
50 	struct xfs_agf		*agf = agbp->b_addr;
51 
52 	ASSERT(ptr->s != 0);
53 
54 	if (xfs_btree_is_bno(cur->bc_ops)) {
55 		agf->agf_bno_root = ptr->s;
56 		be32_add_cpu(&agf->agf_bno_level, inc);
57 		cur->bc_ag.pag->pagf_bno_level += inc;
58 	} else {
59 		agf->agf_cnt_root = ptr->s;
60 		be32_add_cpu(&agf->agf_cnt_level, inc);
61 		cur->bc_ag.pag->pagf_cnt_level += inc;
62 	}
63 
64 	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
65 }
66 
67 STATIC int
68 xfs_allocbt_alloc_block(
69 	struct xfs_btree_cur		*cur,
70 	const union xfs_btree_ptr	*start,
71 	union xfs_btree_ptr		*new,
72 	int				*stat)
73 {
74 	int			error;
75 	xfs_agblock_t		bno;
76 
77 	/* Allocate the new block from the freelist. If we can't, give up.  */
78 	error = xfs_alloc_get_freelist(cur->bc_ag.pag, cur->bc_tp,
79 			cur->bc_ag.agbp, &bno, 1);
80 	if (error)
81 		return error;
82 
83 	if (bno == NULLAGBLOCK) {
84 		*stat = 0;
85 		return 0;
86 	}
87 
88 	atomic64_inc(&cur->bc_mp->m_allocbt_blks);
89 	xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.pag, bno, 1, false);
90 
91 	new->s = cpu_to_be32(bno);
92 
93 	*stat = 1;
94 	return 0;
95 }
96 
97 STATIC int
98 xfs_allocbt_free_block(
99 	struct xfs_btree_cur	*cur,
100 	struct xfs_buf		*bp)
101 {
102 	struct xfs_buf		*agbp = cur->bc_ag.agbp;
103 	xfs_agblock_t		bno;
104 	int			error;
105 
106 	bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp));
107 	error = xfs_alloc_put_freelist(cur->bc_ag.pag, cur->bc_tp, agbp, NULL,
108 			bno, 1);
109 	if (error)
110 		return error;
111 
112 	atomic64_dec(&cur->bc_mp->m_allocbt_blks);
113 	xfs_extent_busy_insert(cur->bc_tp, agbp->b_pag, bno, 1,
114 			      XFS_EXTENT_BUSY_SKIP_DISCARD);
115 	return 0;
116 }
117 
118 /*
119  * Update the longest extent in the AGF
120  */
121 STATIC void
122 xfs_allocbt_update_lastrec(
123 	struct xfs_btree_cur		*cur,
124 	const struct xfs_btree_block	*block,
125 	const union xfs_btree_rec	*rec,
126 	int				ptr,
127 	int				reason)
128 {
129 	struct xfs_agf		*agf = cur->bc_ag.agbp->b_addr;
130 	struct xfs_perag	*pag;
131 	__be32			len;
132 	int			numrecs;
133 
134 	ASSERT(!xfs_btree_is_bno(cur->bc_ops));
135 
136 	switch (reason) {
137 	case LASTREC_UPDATE:
138 		/*
139 		 * If this is the last leaf block and it's the last record,
140 		 * then update the size of the longest extent in the AG.
141 		 */
142 		if (ptr != xfs_btree_get_numrecs(block))
143 			return;
144 		len = rec->alloc.ar_blockcount;
145 		break;
146 	case LASTREC_INSREC:
147 		if (be32_to_cpu(rec->alloc.ar_blockcount) <=
148 		    be32_to_cpu(agf->agf_longest))
149 			return;
150 		len = rec->alloc.ar_blockcount;
151 		break;
152 	case LASTREC_DELREC:
153 		numrecs = xfs_btree_get_numrecs(block);
154 		if (ptr <= numrecs)
155 			return;
156 		ASSERT(ptr == numrecs + 1);
157 
158 		if (numrecs) {
159 			xfs_alloc_rec_t *rrp;
160 
161 			rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
162 			len = rrp->ar_blockcount;
163 		} else {
164 			len = 0;
165 		}
166 
167 		break;
168 	default:
169 		ASSERT(0);
170 		return;
171 	}
172 
173 	agf->agf_longest = len;
174 	pag = cur->bc_ag.agbp->b_pag;
175 	pag->pagf_longest = be32_to_cpu(len);
176 	xfs_alloc_log_agf(cur->bc_tp, cur->bc_ag.agbp, XFS_AGF_LONGEST);
177 }
178 
179 STATIC int
180 xfs_allocbt_get_minrecs(
181 	struct xfs_btree_cur	*cur,
182 	int			level)
183 {
184 	return cur->bc_mp->m_alloc_mnr[level != 0];
185 }
186 
187 STATIC int
188 xfs_allocbt_get_maxrecs(
189 	struct xfs_btree_cur	*cur,
190 	int			level)
191 {
192 	return cur->bc_mp->m_alloc_mxr[level != 0];
193 }
194 
195 STATIC void
196 xfs_allocbt_init_key_from_rec(
197 	union xfs_btree_key		*key,
198 	const union xfs_btree_rec	*rec)
199 {
200 	key->alloc.ar_startblock = rec->alloc.ar_startblock;
201 	key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
202 }
203 
204 STATIC void
205 xfs_bnobt_init_high_key_from_rec(
206 	union xfs_btree_key		*key,
207 	const union xfs_btree_rec	*rec)
208 {
209 	__u32				x;
210 
211 	x = be32_to_cpu(rec->alloc.ar_startblock);
212 	x += be32_to_cpu(rec->alloc.ar_blockcount) - 1;
213 	key->alloc.ar_startblock = cpu_to_be32(x);
214 	key->alloc.ar_blockcount = 0;
215 }
216 
217 STATIC void
218 xfs_cntbt_init_high_key_from_rec(
219 	union xfs_btree_key		*key,
220 	const union xfs_btree_rec	*rec)
221 {
222 	key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
223 	key->alloc.ar_startblock = 0;
224 }
225 
226 STATIC void
227 xfs_allocbt_init_rec_from_cur(
228 	struct xfs_btree_cur	*cur,
229 	union xfs_btree_rec	*rec)
230 {
231 	rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
232 	rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
233 }
234 
235 STATIC void
236 xfs_allocbt_init_ptr_from_cur(
237 	struct xfs_btree_cur	*cur,
238 	union xfs_btree_ptr	*ptr)
239 {
240 	struct xfs_agf		*agf = cur->bc_ag.agbp->b_addr;
241 
242 	ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
243 
244 	if (xfs_btree_is_bno(cur->bc_ops))
245 		ptr->s = agf->agf_bno_root;
246 	else
247 		ptr->s = agf->agf_cnt_root;
248 }
249 
250 STATIC int64_t
251 xfs_bnobt_key_diff(
252 	struct xfs_btree_cur		*cur,
253 	const union xfs_btree_key	*key)
254 {
255 	struct xfs_alloc_rec_incore	*rec = &cur->bc_rec.a;
256 	const struct xfs_alloc_rec	*kp = &key->alloc;
257 
258 	return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
259 }
260 
261 STATIC int64_t
262 xfs_cntbt_key_diff(
263 	struct xfs_btree_cur		*cur,
264 	const union xfs_btree_key	*key)
265 {
266 	struct xfs_alloc_rec_incore	*rec = &cur->bc_rec.a;
267 	const struct xfs_alloc_rec	*kp = &key->alloc;
268 	int64_t				diff;
269 
270 	diff = (int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
271 	if (diff)
272 		return diff;
273 
274 	return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
275 }
276 
277 STATIC int64_t
278 xfs_bnobt_diff_two_keys(
279 	struct xfs_btree_cur		*cur,
280 	const union xfs_btree_key	*k1,
281 	const union xfs_btree_key	*k2,
282 	const union xfs_btree_key	*mask)
283 {
284 	ASSERT(!mask || mask->alloc.ar_startblock);
285 
286 	return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
287 			be32_to_cpu(k2->alloc.ar_startblock);
288 }
289 
290 STATIC int64_t
291 xfs_cntbt_diff_two_keys(
292 	struct xfs_btree_cur		*cur,
293 	const union xfs_btree_key	*k1,
294 	const union xfs_btree_key	*k2,
295 	const union xfs_btree_key	*mask)
296 {
297 	int64_t				diff;
298 
299 	ASSERT(!mask || (mask->alloc.ar_blockcount &&
300 			 mask->alloc.ar_startblock));
301 
302 	diff =  be32_to_cpu(k1->alloc.ar_blockcount) -
303 		be32_to_cpu(k2->alloc.ar_blockcount);
304 	if (diff)
305 		return diff;
306 
307 	return  be32_to_cpu(k1->alloc.ar_startblock) -
308 		be32_to_cpu(k2->alloc.ar_startblock);
309 }
310 
311 static xfs_failaddr_t
312 xfs_allocbt_verify(
313 	struct xfs_buf		*bp)
314 {
315 	struct xfs_mount	*mp = bp->b_mount;
316 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
317 	struct xfs_perag	*pag = bp->b_pag;
318 	xfs_failaddr_t		fa;
319 	unsigned int		level;
320 
321 	if (!xfs_verify_magic(bp, block->bb_magic))
322 		return __this_address;
323 
324 	if (xfs_has_crc(mp)) {
325 		fa = xfs_btree_agblock_v5hdr_verify(bp);
326 		if (fa)
327 			return fa;
328 	}
329 
330 	/*
331 	 * The perag may not be attached during grow operations or fully
332 	 * initialized from the AGF during log recovery. Therefore we can only
333 	 * check against maximum tree depth from those contexts.
334 	 *
335 	 * Otherwise check against the per-tree limit. Peek at one of the
336 	 * verifier magic values to determine the type of tree we're verifying
337 	 * against.
338 	 */
339 	level = be16_to_cpu(block->bb_level);
340 	if (pag && xfs_perag_initialised_agf(pag)) {
341 		unsigned int	maxlevel, repair_maxlevel = 0;
342 
343 		/*
344 		 * Online repair could be rewriting the free space btrees, so
345 		 * we'll validate against the larger of either tree while this
346 		 * is going on.
347 		 */
348 		if (bp->b_ops->magic[0] == cpu_to_be32(XFS_ABTC_MAGIC)) {
349 			maxlevel = pag->pagf_cnt_level;
350 #ifdef CONFIG_XFS_ONLINE_REPAIR
351 			repair_maxlevel = pag->pagf_repair_cnt_level;
352 #endif
353 		} else {
354 			maxlevel = pag->pagf_bno_level;
355 #ifdef CONFIG_XFS_ONLINE_REPAIR
356 			repair_maxlevel = pag->pagf_repair_bno_level;
357 #endif
358 		}
359 
360 		if (level >= max(maxlevel, repair_maxlevel))
361 			return __this_address;
362 	} else if (level >= mp->m_alloc_maxlevels)
363 		return __this_address;
364 
365 	return xfs_btree_agblock_verify(bp, mp->m_alloc_mxr[level != 0]);
366 }
367 
368 static void
369 xfs_allocbt_read_verify(
370 	struct xfs_buf	*bp)
371 {
372 	xfs_failaddr_t	fa;
373 
374 	if (!xfs_btree_agblock_verify_crc(bp))
375 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
376 	else {
377 		fa = xfs_allocbt_verify(bp);
378 		if (fa)
379 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
380 	}
381 
382 	if (bp->b_error)
383 		trace_xfs_btree_corrupt(bp, _RET_IP_);
384 }
385 
386 static void
387 xfs_allocbt_write_verify(
388 	struct xfs_buf	*bp)
389 {
390 	xfs_failaddr_t	fa;
391 
392 	fa = xfs_allocbt_verify(bp);
393 	if (fa) {
394 		trace_xfs_btree_corrupt(bp, _RET_IP_);
395 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
396 		return;
397 	}
398 	xfs_btree_agblock_calc_crc(bp);
399 
400 }
401 
402 const struct xfs_buf_ops xfs_bnobt_buf_ops = {
403 	.name = "xfs_bnobt",
404 	.magic = { cpu_to_be32(XFS_ABTB_MAGIC),
405 		   cpu_to_be32(XFS_ABTB_CRC_MAGIC) },
406 	.verify_read = xfs_allocbt_read_verify,
407 	.verify_write = xfs_allocbt_write_verify,
408 	.verify_struct = xfs_allocbt_verify,
409 };
410 
411 const struct xfs_buf_ops xfs_cntbt_buf_ops = {
412 	.name = "xfs_cntbt",
413 	.magic = { cpu_to_be32(XFS_ABTC_MAGIC),
414 		   cpu_to_be32(XFS_ABTC_CRC_MAGIC) },
415 	.verify_read = xfs_allocbt_read_verify,
416 	.verify_write = xfs_allocbt_write_verify,
417 	.verify_struct = xfs_allocbt_verify,
418 };
419 
420 STATIC int
421 xfs_bnobt_keys_inorder(
422 	struct xfs_btree_cur		*cur,
423 	const union xfs_btree_key	*k1,
424 	const union xfs_btree_key	*k2)
425 {
426 	return be32_to_cpu(k1->alloc.ar_startblock) <
427 	       be32_to_cpu(k2->alloc.ar_startblock);
428 }
429 
430 STATIC int
431 xfs_bnobt_recs_inorder(
432 	struct xfs_btree_cur		*cur,
433 	const union xfs_btree_rec	*r1,
434 	const union xfs_btree_rec	*r2)
435 {
436 	return be32_to_cpu(r1->alloc.ar_startblock) +
437 		be32_to_cpu(r1->alloc.ar_blockcount) <=
438 		be32_to_cpu(r2->alloc.ar_startblock);
439 }
440 
441 STATIC int
442 xfs_cntbt_keys_inorder(
443 	struct xfs_btree_cur		*cur,
444 	const union xfs_btree_key	*k1,
445 	const union xfs_btree_key	*k2)
446 {
447 	return be32_to_cpu(k1->alloc.ar_blockcount) <
448 		be32_to_cpu(k2->alloc.ar_blockcount) ||
449 		(k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
450 		 be32_to_cpu(k1->alloc.ar_startblock) <
451 		 be32_to_cpu(k2->alloc.ar_startblock));
452 }
453 
454 STATIC int
455 xfs_cntbt_recs_inorder(
456 	struct xfs_btree_cur		*cur,
457 	const union xfs_btree_rec	*r1,
458 	const union xfs_btree_rec	*r2)
459 {
460 	return be32_to_cpu(r1->alloc.ar_blockcount) <
461 		be32_to_cpu(r2->alloc.ar_blockcount) ||
462 		(r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
463 		 be32_to_cpu(r1->alloc.ar_startblock) <
464 		 be32_to_cpu(r2->alloc.ar_startblock));
465 }
466 
467 STATIC enum xbtree_key_contig
468 xfs_allocbt_keys_contiguous(
469 	struct xfs_btree_cur		*cur,
470 	const union xfs_btree_key	*key1,
471 	const union xfs_btree_key	*key2,
472 	const union xfs_btree_key	*mask)
473 {
474 	ASSERT(!mask || mask->alloc.ar_startblock);
475 
476 	return xbtree_key_contig(be32_to_cpu(key1->alloc.ar_startblock),
477 				 be32_to_cpu(key2->alloc.ar_startblock));
478 }
479 
480 const struct xfs_btree_ops xfs_bnobt_ops = {
481 	.name			= "bno",
482 	.type			= XFS_BTREE_TYPE_AG,
483 
484 	.rec_len		= sizeof(xfs_alloc_rec_t),
485 	.key_len		= sizeof(xfs_alloc_key_t),
486 	.ptr_len		= XFS_BTREE_SHORT_PTR_LEN,
487 
488 	.lru_refs		= XFS_ALLOC_BTREE_REF,
489 	.statoff		= XFS_STATS_CALC_INDEX(xs_abtb_2),
490 	.sick_mask		= XFS_SICK_AG_BNOBT,
491 
492 	.dup_cursor		= xfs_bnobt_dup_cursor,
493 	.set_root		= xfs_allocbt_set_root,
494 	.alloc_block		= xfs_allocbt_alloc_block,
495 	.free_block		= xfs_allocbt_free_block,
496 	.update_lastrec		= xfs_allocbt_update_lastrec,
497 	.get_minrecs		= xfs_allocbt_get_minrecs,
498 	.get_maxrecs		= xfs_allocbt_get_maxrecs,
499 	.init_key_from_rec	= xfs_allocbt_init_key_from_rec,
500 	.init_high_key_from_rec	= xfs_bnobt_init_high_key_from_rec,
501 	.init_rec_from_cur	= xfs_allocbt_init_rec_from_cur,
502 	.init_ptr_from_cur	= xfs_allocbt_init_ptr_from_cur,
503 	.key_diff		= xfs_bnobt_key_diff,
504 	.buf_ops		= &xfs_bnobt_buf_ops,
505 	.diff_two_keys		= xfs_bnobt_diff_two_keys,
506 	.keys_inorder		= xfs_bnobt_keys_inorder,
507 	.recs_inorder		= xfs_bnobt_recs_inorder,
508 	.keys_contiguous	= xfs_allocbt_keys_contiguous,
509 };
510 
511 const struct xfs_btree_ops xfs_cntbt_ops = {
512 	.name			= "cnt",
513 	.type			= XFS_BTREE_TYPE_AG,
514 	.geom_flags		= XFS_BTGEO_LASTREC_UPDATE,
515 
516 	.rec_len		= sizeof(xfs_alloc_rec_t),
517 	.key_len		= sizeof(xfs_alloc_key_t),
518 	.ptr_len		= XFS_BTREE_SHORT_PTR_LEN,
519 
520 	.lru_refs		= XFS_ALLOC_BTREE_REF,
521 	.statoff		= XFS_STATS_CALC_INDEX(xs_abtc_2),
522 	.sick_mask		= XFS_SICK_AG_CNTBT,
523 
524 	.dup_cursor		= xfs_cntbt_dup_cursor,
525 	.set_root		= xfs_allocbt_set_root,
526 	.alloc_block		= xfs_allocbt_alloc_block,
527 	.free_block		= xfs_allocbt_free_block,
528 	.update_lastrec		= xfs_allocbt_update_lastrec,
529 	.get_minrecs		= xfs_allocbt_get_minrecs,
530 	.get_maxrecs		= xfs_allocbt_get_maxrecs,
531 	.init_key_from_rec	= xfs_allocbt_init_key_from_rec,
532 	.init_high_key_from_rec	= xfs_cntbt_init_high_key_from_rec,
533 	.init_rec_from_cur	= xfs_allocbt_init_rec_from_cur,
534 	.init_ptr_from_cur	= xfs_allocbt_init_ptr_from_cur,
535 	.key_diff		= xfs_cntbt_key_diff,
536 	.buf_ops		= &xfs_cntbt_buf_ops,
537 	.diff_two_keys		= xfs_cntbt_diff_two_keys,
538 	.keys_inorder		= xfs_cntbt_keys_inorder,
539 	.recs_inorder		= xfs_cntbt_recs_inorder,
540 	.keys_contiguous	= NULL, /* not needed right now */
541 };
542 
543 /*
544  * Allocate a new bnobt cursor.
545  *
546  * For staging cursors tp and agbp are NULL.
547  */
548 struct xfs_btree_cur *
549 xfs_bnobt_init_cursor(
550 	struct xfs_mount	*mp,
551 	struct xfs_trans	*tp,
552 	struct xfs_buf		*agbp,
553 	struct xfs_perag	*pag)
554 {
555 	struct xfs_btree_cur	*cur;
556 
557 	cur = xfs_btree_alloc_cursor(mp, tp, &xfs_bnobt_ops,
558 			mp->m_alloc_maxlevels, xfs_allocbt_cur_cache);
559 	cur->bc_ag.pag = xfs_perag_hold(pag);
560 	cur->bc_ag.agbp = agbp;
561 	if (agbp) {
562 		struct xfs_agf		*agf = agbp->b_addr;
563 
564 		cur->bc_nlevels = be32_to_cpu(agf->agf_bno_level);
565 	}
566 	return cur;
567 }
568 
569 /*
570  * Allocate a new cntbt cursor.
571  *
572  * For staging cursors tp and agbp are NULL.
573  */
574 struct xfs_btree_cur *
575 xfs_cntbt_init_cursor(
576 	struct xfs_mount	*mp,
577 	struct xfs_trans	*tp,
578 	struct xfs_buf		*agbp,
579 	struct xfs_perag	*pag)
580 {
581 	struct xfs_btree_cur	*cur;
582 
583 	cur = xfs_btree_alloc_cursor(mp, tp, &xfs_cntbt_ops,
584 			mp->m_alloc_maxlevels, xfs_allocbt_cur_cache);
585 	cur->bc_ag.pag = xfs_perag_hold(pag);
586 	cur->bc_ag.agbp = agbp;
587 	if (agbp) {
588 		struct xfs_agf		*agf = agbp->b_addr;
589 
590 		cur->bc_nlevels = be32_to_cpu(agf->agf_cnt_level);
591 	}
592 	return cur;
593 }
594 
595 /*
596  * Install a new free space btree root.  Caller is responsible for invalidating
597  * and freeing the old btree blocks.
598  */
599 void
600 xfs_allocbt_commit_staged_btree(
601 	struct xfs_btree_cur	*cur,
602 	struct xfs_trans	*tp,
603 	struct xfs_buf		*agbp)
604 {
605 	struct xfs_agf		*agf = agbp->b_addr;
606 	struct xbtree_afakeroot	*afake = cur->bc_ag.afake;
607 
608 	ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
609 
610 	if (xfs_btree_is_bno(cur->bc_ops)) {
611 		agf->agf_bno_root = cpu_to_be32(afake->af_root);
612 		agf->agf_bno_level = cpu_to_be32(afake->af_levels);
613 	} else {
614 		agf->agf_cnt_root = cpu_to_be32(afake->af_root);
615 		agf->agf_cnt_level = cpu_to_be32(afake->af_levels);
616 	}
617 	xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
618 
619 	xfs_btree_commit_afakeroot(cur, tp, agbp);
620 }
621 
622 /* Calculate number of records in an alloc btree block. */
623 static inline unsigned int
624 xfs_allocbt_block_maxrecs(
625 	unsigned int		blocklen,
626 	bool			leaf)
627 {
628 	if (leaf)
629 		return blocklen / sizeof(xfs_alloc_rec_t);
630 	return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
631 }
632 
633 /*
634  * Calculate number of records in an alloc btree block.
635  */
636 int
637 xfs_allocbt_maxrecs(
638 	struct xfs_mount	*mp,
639 	int			blocklen,
640 	int			leaf)
641 {
642 	blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
643 	return xfs_allocbt_block_maxrecs(blocklen, leaf);
644 }
645 
646 /* Free space btrees are at their largest when every other block is free. */
647 #define XFS_MAX_FREESP_RECORDS	((XFS_MAX_AG_BLOCKS + 1) / 2)
648 
649 /* Compute the max possible height for free space btrees. */
650 unsigned int
651 xfs_allocbt_maxlevels_ondisk(void)
652 {
653 	unsigned int		minrecs[2];
654 	unsigned int		blocklen;
655 
656 	blocklen = min(XFS_MIN_BLOCKSIZE - XFS_BTREE_SBLOCK_LEN,
657 		       XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN);
658 
659 	minrecs[0] = xfs_allocbt_block_maxrecs(blocklen, true) / 2;
660 	minrecs[1] = xfs_allocbt_block_maxrecs(blocklen, false) / 2;
661 
662 	return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_FREESP_RECORDS);
663 }
664 
665 /* Calculate the freespace btree size for some records. */
666 xfs_extlen_t
667 xfs_allocbt_calc_size(
668 	struct xfs_mount	*mp,
669 	unsigned long long	len)
670 {
671 	return xfs_btree_calc_size(mp->m_alloc_mnr, len);
672 }
673 
674 int __init
675 xfs_allocbt_init_cur_cache(void)
676 {
677 	xfs_allocbt_cur_cache = kmem_cache_create("xfs_bnobt_cur",
678 			xfs_btree_cur_sizeof(xfs_allocbt_maxlevels_ondisk()),
679 			0, 0, NULL);
680 
681 	if (!xfs_allocbt_cur_cache)
682 		return -ENOMEM;
683 	return 0;
684 }
685 
686 void
687 xfs_allocbt_destroy_cur_cache(void)
688 {
689 	kmem_cache_destroy(xfs_allocbt_cur_cache);
690 	xfs_allocbt_cur_cache = NULL;
691 }
692