1fc6856c6SDarrick J. Wong // SPDX-License-Identifier: GPL-2.0-or-later
2fc6856c6SDarrick J. Wong /*
3fc6856c6SDarrick J. Wong * Copyright (c) 2018-2024 Oracle. All Rights Reserved.
4fc6856c6SDarrick J. Wong * Author: Darrick J. Wong <djwong@kernel.org>
5fc6856c6SDarrick J. Wong */
6fc6856c6SDarrick J. Wong #include "xfs.h"
7fc6856c6SDarrick J. Wong #include "xfs_fs.h"
8fc6856c6SDarrick J. Wong #include "xfs_shared.h"
9fc6856c6SDarrick J. Wong #include "xfs_format.h"
10fc6856c6SDarrick J. Wong #include "xfs_log_format.h"
11fc6856c6SDarrick J. Wong #include "xfs_trans_resv.h"
12fc6856c6SDarrick J. Wong #include "xfs_bit.h"
13fc6856c6SDarrick J. Wong #include "xfs_sb.h"
14fc6856c6SDarrick J. Wong #include "xfs_mount.h"
15fc6856c6SDarrick J. Wong #include "xfs_defer.h"
16fc6856c6SDarrick J. Wong #include "xfs_inode.h"
17fc6856c6SDarrick J. Wong #include "xfs_trans.h"
18fc6856c6SDarrick J. Wong #include "xfs_alloc.h"
19fc6856c6SDarrick J. Wong #include "xfs_btree.h"
20fc6856c6SDarrick J. Wong #include "xfs_btree_staging.h"
216b08901aSDarrick J. Wong #include "xfs_metafile.h"
22d386b402SDarrick J. Wong #include "xfs_rmap.h"
23fc6856c6SDarrick J. Wong #include "xfs_rtrmap_btree.h"
24fc6856c6SDarrick J. Wong #include "xfs_trace.h"
25fc6856c6SDarrick J. Wong #include "xfs_cksum.h"
26fc6856c6SDarrick J. Wong #include "xfs_error.h"
27fc6856c6SDarrick J. Wong #include "xfs_extent_busy.h"
28fc6856c6SDarrick J. Wong #include "xfs_rtgroup.h"
29d386b402SDarrick J. Wong #include "xfs_bmap.h"
306d4933c2SDarrick J. Wong #include "xfs_health.h"
314a61f12eSDarrick J. Wong #include "xfs_buf_mem.h"
324a61f12eSDarrick J. Wong #include "xfs_btree_mem.h"
33fc6856c6SDarrick J. Wong
34fc6856c6SDarrick J. Wong static struct kmem_cache *xfs_rtrmapbt_cur_cache;
35fc6856c6SDarrick J. Wong
36fc6856c6SDarrick J. Wong /*
37fc6856c6SDarrick J. Wong * Realtime Reverse Map btree.
38fc6856c6SDarrick J. Wong *
39fc6856c6SDarrick J. Wong * This is a btree used to track the owner(s) of a given extent in the realtime
40fc6856c6SDarrick J. Wong * device. See the comments in xfs_rmap_btree.c for more information.
41fc6856c6SDarrick J. Wong *
42fc6856c6SDarrick J. Wong * This tree is basically the same as the regular rmap btree except that it
43fc6856c6SDarrick J. Wong * is rooted in an inode and does not live in free space.
44fc6856c6SDarrick J. Wong */
45fc6856c6SDarrick J. Wong
46fc6856c6SDarrick J. Wong static struct xfs_btree_cur *
xfs_rtrmapbt_dup_cursor(struct xfs_btree_cur * cur)47fc6856c6SDarrick J. Wong xfs_rtrmapbt_dup_cursor(
48fc6856c6SDarrick J. Wong struct xfs_btree_cur *cur)
49fc6856c6SDarrick J. Wong {
50fc6856c6SDarrick J. Wong return xfs_rtrmapbt_init_cursor(cur->bc_tp, to_rtg(cur->bc_group));
51fc6856c6SDarrick J. Wong }
52fc6856c6SDarrick J. Wong
53d386b402SDarrick J. Wong STATIC int
xfs_rtrmapbt_get_minrecs(struct xfs_btree_cur * cur,int level)54d386b402SDarrick J. Wong xfs_rtrmapbt_get_minrecs(
55d386b402SDarrick J. Wong struct xfs_btree_cur *cur,
56d386b402SDarrick J. Wong int level)
57d386b402SDarrick J. Wong {
58d386b402SDarrick J. Wong if (level == cur->bc_nlevels - 1) {
59d386b402SDarrick J. Wong struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
60d386b402SDarrick J. Wong
61d386b402SDarrick J. Wong return xfs_rtrmapbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes,
62d386b402SDarrick J. Wong level == 0) / 2;
63d386b402SDarrick J. Wong }
64d386b402SDarrick J. Wong
65d386b402SDarrick J. Wong return cur->bc_mp->m_rtrmap_mnr[level != 0];
66d386b402SDarrick J. Wong }
67d386b402SDarrick J. Wong
68d386b402SDarrick J. Wong STATIC int
xfs_rtrmapbt_get_maxrecs(struct xfs_btree_cur * cur,int level)69d386b402SDarrick J. Wong xfs_rtrmapbt_get_maxrecs(
70d386b402SDarrick J. Wong struct xfs_btree_cur *cur,
71d386b402SDarrick J. Wong int level)
72d386b402SDarrick J. Wong {
73d386b402SDarrick J. Wong if (level == cur->bc_nlevels - 1) {
74d386b402SDarrick J. Wong struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
75d386b402SDarrick J. Wong
76d386b402SDarrick J. Wong return xfs_rtrmapbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes,
77d386b402SDarrick J. Wong level == 0);
78d386b402SDarrick J. Wong }
79d386b402SDarrick J. Wong
80d386b402SDarrick J. Wong return cur->bc_mp->m_rtrmap_mxr[level != 0];
81d386b402SDarrick J. Wong }
82d386b402SDarrick J. Wong
83f33659e8SDarrick J. Wong /* Calculate number of records in the ondisk realtime rmap btree inode root. */
84f33659e8SDarrick J. Wong unsigned int
xfs_rtrmapbt_droot_maxrecs(unsigned int blocklen,bool leaf)85f33659e8SDarrick J. Wong xfs_rtrmapbt_droot_maxrecs(
86f33659e8SDarrick J. Wong unsigned int blocklen,
87f33659e8SDarrick J. Wong bool leaf)
88f33659e8SDarrick J. Wong {
89f33659e8SDarrick J. Wong blocklen -= sizeof(struct xfs_rtrmap_root);
90f33659e8SDarrick J. Wong
91f33659e8SDarrick J. Wong if (leaf)
92f33659e8SDarrick J. Wong return blocklen / sizeof(struct xfs_rmap_rec);
93f33659e8SDarrick J. Wong return blocklen / (2 * sizeof(struct xfs_rmap_key) +
94f33659e8SDarrick J. Wong sizeof(xfs_rtrmap_ptr_t));
95f33659e8SDarrick J. Wong }
96f33659e8SDarrick J. Wong
97f33659e8SDarrick J. Wong /*
98f33659e8SDarrick J. Wong * Get the maximum records we could store in the on-disk format.
99f33659e8SDarrick J. Wong *
100f33659e8SDarrick J. Wong * For non-root nodes this is equivalent to xfs_rtrmapbt_get_maxrecs, but
101f33659e8SDarrick J. Wong * for the root node this checks the available space in the dinode fork
102f33659e8SDarrick J. Wong * so that we can resize the in-memory buffer to match it. After a
103f33659e8SDarrick J. Wong * resize to the maximum size this function returns the same value
104f33659e8SDarrick J. Wong * as xfs_rtrmapbt_get_maxrecs for the root node, too.
105f33659e8SDarrick J. Wong */
106f33659e8SDarrick J. Wong STATIC int
xfs_rtrmapbt_get_dmaxrecs(struct xfs_btree_cur * cur,int level)107f33659e8SDarrick J. Wong xfs_rtrmapbt_get_dmaxrecs(
108f33659e8SDarrick J. Wong struct xfs_btree_cur *cur,
109f33659e8SDarrick J. Wong int level)
110f33659e8SDarrick J. Wong {
111f33659e8SDarrick J. Wong if (level != cur->bc_nlevels - 1)
112f33659e8SDarrick J. Wong return cur->bc_mp->m_rtrmap_mxr[level != 0];
113f33659e8SDarrick J. Wong return xfs_rtrmapbt_droot_maxrecs(cur->bc_ino.forksize, level == 0);
114f33659e8SDarrick J. Wong }
115f33659e8SDarrick J. Wong
116d386b402SDarrick J. Wong /*
117d386b402SDarrick J. Wong * Convert the ondisk record's offset field into the ondisk key's offset field.
118d386b402SDarrick J. Wong * Fork and bmbt are significant parts of the rmap record key, but written
119d386b402SDarrick J. Wong * status is merely a record attribute.
120d386b402SDarrick J. Wong */
ondisk_rec_offset_to_key(const union xfs_btree_rec * rec)121d386b402SDarrick J. Wong static inline __be64 ondisk_rec_offset_to_key(const union xfs_btree_rec *rec)
122d386b402SDarrick J. Wong {
123d386b402SDarrick J. Wong return rec->rmap.rm_offset & ~cpu_to_be64(XFS_RMAP_OFF_UNWRITTEN);
124d386b402SDarrick J. Wong }
125d386b402SDarrick J. Wong
126d386b402SDarrick J. Wong STATIC void
xfs_rtrmapbt_init_key_from_rec(union xfs_btree_key * key,const union xfs_btree_rec * rec)127d386b402SDarrick J. Wong xfs_rtrmapbt_init_key_from_rec(
128d386b402SDarrick J. Wong union xfs_btree_key *key,
129d386b402SDarrick J. Wong const union xfs_btree_rec *rec)
130d386b402SDarrick J. Wong {
131d386b402SDarrick J. Wong key->rmap.rm_startblock = rec->rmap.rm_startblock;
132d386b402SDarrick J. Wong key->rmap.rm_owner = rec->rmap.rm_owner;
133d386b402SDarrick J. Wong key->rmap.rm_offset = ondisk_rec_offset_to_key(rec);
134d386b402SDarrick J. Wong }
135d386b402SDarrick J. Wong
136d386b402SDarrick J. Wong STATIC void
xfs_rtrmapbt_init_high_key_from_rec(union xfs_btree_key * key,const union xfs_btree_rec * rec)137d386b402SDarrick J. Wong xfs_rtrmapbt_init_high_key_from_rec(
138d386b402SDarrick J. Wong union xfs_btree_key *key,
139d386b402SDarrick J. Wong const union xfs_btree_rec *rec)
140d386b402SDarrick J. Wong {
141d386b402SDarrick J. Wong uint64_t off;
142d386b402SDarrick J. Wong int adj;
143d386b402SDarrick J. Wong
144d386b402SDarrick J. Wong adj = be32_to_cpu(rec->rmap.rm_blockcount) - 1;
145d386b402SDarrick J. Wong
146d386b402SDarrick J. Wong key->rmap.rm_startblock = rec->rmap.rm_startblock;
147d386b402SDarrick J. Wong be32_add_cpu(&key->rmap.rm_startblock, adj);
148d386b402SDarrick J. Wong key->rmap.rm_owner = rec->rmap.rm_owner;
149d386b402SDarrick J. Wong key->rmap.rm_offset = ondisk_rec_offset_to_key(rec);
150d386b402SDarrick J. Wong if (XFS_RMAP_NON_INODE_OWNER(be64_to_cpu(rec->rmap.rm_owner)) ||
151d386b402SDarrick J. Wong XFS_RMAP_IS_BMBT_BLOCK(be64_to_cpu(rec->rmap.rm_offset)))
152d386b402SDarrick J. Wong return;
153d386b402SDarrick J. Wong off = be64_to_cpu(key->rmap.rm_offset);
154d386b402SDarrick J. Wong off = (XFS_RMAP_OFF(off) + adj) | (off & ~XFS_RMAP_OFF_MASK);
155d386b402SDarrick J. Wong key->rmap.rm_offset = cpu_to_be64(off);
156d386b402SDarrick J. Wong }
157d386b402SDarrick J. Wong
158d386b402SDarrick J. Wong STATIC void
xfs_rtrmapbt_init_rec_from_cur(struct xfs_btree_cur * cur,union xfs_btree_rec * rec)159d386b402SDarrick J. Wong xfs_rtrmapbt_init_rec_from_cur(
160d386b402SDarrick J. Wong struct xfs_btree_cur *cur,
161d386b402SDarrick J. Wong union xfs_btree_rec *rec)
162d386b402SDarrick J. Wong {
163d386b402SDarrick J. Wong rec->rmap.rm_startblock = cpu_to_be32(cur->bc_rec.r.rm_startblock);
164d386b402SDarrick J. Wong rec->rmap.rm_blockcount = cpu_to_be32(cur->bc_rec.r.rm_blockcount);
165d386b402SDarrick J. Wong rec->rmap.rm_owner = cpu_to_be64(cur->bc_rec.r.rm_owner);
166d386b402SDarrick J. Wong rec->rmap.rm_offset = cpu_to_be64(
167d386b402SDarrick J. Wong xfs_rmap_irec_offset_pack(&cur->bc_rec.r));
168d386b402SDarrick J. Wong }
169d386b402SDarrick J. Wong
170d386b402SDarrick J. Wong STATIC void
xfs_rtrmapbt_init_ptr_from_cur(struct xfs_btree_cur * cur,union xfs_btree_ptr * ptr)171d386b402SDarrick J. Wong xfs_rtrmapbt_init_ptr_from_cur(
172d386b402SDarrick J. Wong struct xfs_btree_cur *cur,
173d386b402SDarrick J. Wong union xfs_btree_ptr *ptr)
174d386b402SDarrick J. Wong {
175d386b402SDarrick J. Wong ptr->l = 0;
176d386b402SDarrick J. Wong }
177d386b402SDarrick J. Wong
178d386b402SDarrick J. Wong /*
179d386b402SDarrick J. Wong * Mask the appropriate parts of the ondisk key field for a key comparison.
180d386b402SDarrick J. Wong * Fork and bmbt are significant parts of the rmap record key, but written
181d386b402SDarrick J. Wong * status is merely a record attribute.
182d386b402SDarrick J. Wong */
offset_keymask(uint64_t offset)183d386b402SDarrick J. Wong static inline uint64_t offset_keymask(uint64_t offset)
184d386b402SDarrick J. Wong {
185d386b402SDarrick J. Wong return offset & ~XFS_RMAP_OFF_UNWRITTEN;
186d386b402SDarrick J. Wong }
187d386b402SDarrick J. Wong
188d386b402SDarrick J. Wong STATIC int64_t
xfs_rtrmapbt_key_diff(struct xfs_btree_cur * cur,const union xfs_btree_key * key)189d386b402SDarrick J. Wong xfs_rtrmapbt_key_diff(
190d386b402SDarrick J. Wong struct xfs_btree_cur *cur,
191d386b402SDarrick J. Wong const union xfs_btree_key *key)
192d386b402SDarrick J. Wong {
193d386b402SDarrick J. Wong struct xfs_rmap_irec *rec = &cur->bc_rec.r;
194d386b402SDarrick J. Wong const struct xfs_rmap_key *kp = &key->rmap;
195d386b402SDarrick J. Wong __u64 x, y;
196d386b402SDarrick J. Wong int64_t d;
197d386b402SDarrick J. Wong
198d386b402SDarrick J. Wong d = (int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
199d386b402SDarrick J. Wong if (d)
200d386b402SDarrick J. Wong return d;
201d386b402SDarrick J. Wong
202d386b402SDarrick J. Wong x = be64_to_cpu(kp->rm_owner);
203d386b402SDarrick J. Wong y = rec->rm_owner;
204d386b402SDarrick J. Wong if (x > y)
205d386b402SDarrick J. Wong return 1;
206d386b402SDarrick J. Wong else if (y > x)
207d386b402SDarrick J. Wong return -1;
208d386b402SDarrick J. Wong
209d386b402SDarrick J. Wong x = offset_keymask(be64_to_cpu(kp->rm_offset));
210d386b402SDarrick J. Wong y = offset_keymask(xfs_rmap_irec_offset_pack(rec));
211d386b402SDarrick J. Wong if (x > y)
212d386b402SDarrick J. Wong return 1;
213d386b402SDarrick J. Wong else if (y > x)
214d386b402SDarrick J. Wong return -1;
215d386b402SDarrick J. Wong return 0;
216d386b402SDarrick J. Wong }
217d386b402SDarrick J. Wong
218d386b402SDarrick J. Wong STATIC int64_t
xfs_rtrmapbt_diff_two_keys(struct xfs_btree_cur * cur,const union xfs_btree_key * k1,const union xfs_btree_key * k2,const union xfs_btree_key * mask)219d386b402SDarrick J. Wong xfs_rtrmapbt_diff_two_keys(
220d386b402SDarrick J. Wong struct xfs_btree_cur *cur,
221d386b402SDarrick J. Wong const union xfs_btree_key *k1,
222d386b402SDarrick J. Wong const union xfs_btree_key *k2,
223d386b402SDarrick J. Wong const union xfs_btree_key *mask)
224d386b402SDarrick J. Wong {
225d386b402SDarrick J. Wong const struct xfs_rmap_key *kp1 = &k1->rmap;
226d386b402SDarrick J. Wong const struct xfs_rmap_key *kp2 = &k2->rmap;
227d386b402SDarrick J. Wong int64_t d;
228d386b402SDarrick J. Wong __u64 x, y;
229d386b402SDarrick J. Wong
230d386b402SDarrick J. Wong /* Doesn't make sense to mask off the physical space part */
231d386b402SDarrick J. Wong ASSERT(!mask || mask->rmap.rm_startblock);
232d386b402SDarrick J. Wong
233d386b402SDarrick J. Wong d = (int64_t)be32_to_cpu(kp1->rm_startblock) -
234d386b402SDarrick J. Wong be32_to_cpu(kp2->rm_startblock);
235d386b402SDarrick J. Wong if (d)
236d386b402SDarrick J. Wong return d;
237d386b402SDarrick J. Wong
238d386b402SDarrick J. Wong if (!mask || mask->rmap.rm_owner) {
239d386b402SDarrick J. Wong x = be64_to_cpu(kp1->rm_owner);
240d386b402SDarrick J. Wong y = be64_to_cpu(kp2->rm_owner);
241d386b402SDarrick J. Wong if (x > y)
242d386b402SDarrick J. Wong return 1;
243d386b402SDarrick J. Wong else if (y > x)
244d386b402SDarrick J. Wong return -1;
245d386b402SDarrick J. Wong }
246d386b402SDarrick J. Wong
247d386b402SDarrick J. Wong if (!mask || mask->rmap.rm_offset) {
248d386b402SDarrick J. Wong /* Doesn't make sense to allow offset but not owner */
249d386b402SDarrick J. Wong ASSERT(!mask || mask->rmap.rm_owner);
250d386b402SDarrick J. Wong
251d386b402SDarrick J. Wong x = offset_keymask(be64_to_cpu(kp1->rm_offset));
252d386b402SDarrick J. Wong y = offset_keymask(be64_to_cpu(kp2->rm_offset));
253d386b402SDarrick J. Wong if (x > y)
254d386b402SDarrick J. Wong return 1;
255d386b402SDarrick J. Wong else if (y > x)
256d386b402SDarrick J. Wong return -1;
257d386b402SDarrick J. Wong }
258d386b402SDarrick J. Wong
259d386b402SDarrick J. Wong return 0;
260d386b402SDarrick J. Wong }
261d386b402SDarrick J. Wong
262fc6856c6SDarrick J. Wong static xfs_failaddr_t
xfs_rtrmapbt_verify(struct xfs_buf * bp)263fc6856c6SDarrick J. Wong xfs_rtrmapbt_verify(
264fc6856c6SDarrick J. Wong struct xfs_buf *bp)
265fc6856c6SDarrick J. Wong {
266fc6856c6SDarrick J. Wong struct xfs_mount *mp = bp->b_target->bt_mount;
267fc6856c6SDarrick J. Wong struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
268fc6856c6SDarrick J. Wong xfs_failaddr_t fa;
269fc6856c6SDarrick J. Wong int level;
270fc6856c6SDarrick J. Wong
271fc6856c6SDarrick J. Wong if (!xfs_verify_magic(bp, block->bb_magic))
272fc6856c6SDarrick J. Wong return __this_address;
273fc6856c6SDarrick J. Wong
274fc6856c6SDarrick J. Wong if (!xfs_has_rmapbt(mp))
275fc6856c6SDarrick J. Wong return __this_address;
276fc6856c6SDarrick J. Wong fa = xfs_btree_fsblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
277fc6856c6SDarrick J. Wong if (fa)
278fc6856c6SDarrick J. Wong return fa;
279fc6856c6SDarrick J. Wong level = be16_to_cpu(block->bb_level);
280fc6856c6SDarrick J. Wong if (level > mp->m_rtrmap_maxlevels)
281fc6856c6SDarrick J. Wong return __this_address;
282fc6856c6SDarrick J. Wong
283fc6856c6SDarrick J. Wong return xfs_btree_fsblock_verify(bp, mp->m_rtrmap_mxr[level != 0]);
284fc6856c6SDarrick J. Wong }
285fc6856c6SDarrick J. Wong
286fc6856c6SDarrick J. Wong static void
xfs_rtrmapbt_read_verify(struct xfs_buf * bp)287fc6856c6SDarrick J. Wong xfs_rtrmapbt_read_verify(
288fc6856c6SDarrick J. Wong struct xfs_buf *bp)
289fc6856c6SDarrick J. Wong {
290fc6856c6SDarrick J. Wong xfs_failaddr_t fa;
291fc6856c6SDarrick J. Wong
292fc6856c6SDarrick J. Wong if (!xfs_btree_fsblock_verify_crc(bp))
293fc6856c6SDarrick J. Wong xfs_verifier_error(bp, -EFSBADCRC, __this_address);
294fc6856c6SDarrick J. Wong else {
295fc6856c6SDarrick J. Wong fa = xfs_rtrmapbt_verify(bp);
296fc6856c6SDarrick J. Wong if (fa)
297fc6856c6SDarrick J. Wong xfs_verifier_error(bp, -EFSCORRUPTED, fa);
298fc6856c6SDarrick J. Wong }
299fc6856c6SDarrick J. Wong
300fc6856c6SDarrick J. Wong if (bp->b_error)
301fc6856c6SDarrick J. Wong trace_xfs_btree_corrupt(bp, _RET_IP_);
302fc6856c6SDarrick J. Wong }
303fc6856c6SDarrick J. Wong
304fc6856c6SDarrick J. Wong static void
xfs_rtrmapbt_write_verify(struct xfs_buf * bp)305fc6856c6SDarrick J. Wong xfs_rtrmapbt_write_verify(
306fc6856c6SDarrick J. Wong struct xfs_buf *bp)
307fc6856c6SDarrick J. Wong {
308fc6856c6SDarrick J. Wong xfs_failaddr_t fa;
309fc6856c6SDarrick J. Wong
310fc6856c6SDarrick J. Wong fa = xfs_rtrmapbt_verify(bp);
311fc6856c6SDarrick J. Wong if (fa) {
312fc6856c6SDarrick J. Wong trace_xfs_btree_corrupt(bp, _RET_IP_);
313fc6856c6SDarrick J. Wong xfs_verifier_error(bp, -EFSCORRUPTED, fa);
314fc6856c6SDarrick J. Wong return;
315fc6856c6SDarrick J. Wong }
316fc6856c6SDarrick J. Wong xfs_btree_fsblock_calc_crc(bp);
317fc6856c6SDarrick J. Wong
318fc6856c6SDarrick J. Wong }
319fc6856c6SDarrick J. Wong
320fc6856c6SDarrick J. Wong const struct xfs_buf_ops xfs_rtrmapbt_buf_ops = {
321fc6856c6SDarrick J. Wong .name = "xfs_rtrmapbt",
322fc6856c6SDarrick J. Wong .magic = { 0, cpu_to_be32(XFS_RTRMAP_CRC_MAGIC) },
323fc6856c6SDarrick J. Wong .verify_read = xfs_rtrmapbt_read_verify,
324fc6856c6SDarrick J. Wong .verify_write = xfs_rtrmapbt_write_verify,
325fc6856c6SDarrick J. Wong .verify_struct = xfs_rtrmapbt_verify,
326fc6856c6SDarrick J. Wong };
327fc6856c6SDarrick J. Wong
328d386b402SDarrick J. Wong STATIC int
xfs_rtrmapbt_keys_inorder(struct xfs_btree_cur * cur,const union xfs_btree_key * k1,const union xfs_btree_key * k2)329d386b402SDarrick J. Wong xfs_rtrmapbt_keys_inorder(
330d386b402SDarrick J. Wong struct xfs_btree_cur *cur,
331d386b402SDarrick J. Wong const union xfs_btree_key *k1,
332d386b402SDarrick J. Wong const union xfs_btree_key *k2)
333d386b402SDarrick J. Wong {
334d386b402SDarrick J. Wong uint32_t x;
335d386b402SDarrick J. Wong uint32_t y;
336d386b402SDarrick J. Wong uint64_t a;
337d386b402SDarrick J. Wong uint64_t b;
338d386b402SDarrick J. Wong
339d386b402SDarrick J. Wong x = be32_to_cpu(k1->rmap.rm_startblock);
340d386b402SDarrick J. Wong y = be32_to_cpu(k2->rmap.rm_startblock);
341d386b402SDarrick J. Wong if (x < y)
342d386b402SDarrick J. Wong return 1;
343d386b402SDarrick J. Wong else if (x > y)
344d386b402SDarrick J. Wong return 0;
345d386b402SDarrick J. Wong a = be64_to_cpu(k1->rmap.rm_owner);
346d386b402SDarrick J. Wong b = be64_to_cpu(k2->rmap.rm_owner);
347d386b402SDarrick J. Wong if (a < b)
348d386b402SDarrick J. Wong return 1;
349d386b402SDarrick J. Wong else if (a > b)
350d386b402SDarrick J. Wong return 0;
351d386b402SDarrick J. Wong a = offset_keymask(be64_to_cpu(k1->rmap.rm_offset));
352d386b402SDarrick J. Wong b = offset_keymask(be64_to_cpu(k2->rmap.rm_offset));
353d386b402SDarrick J. Wong if (a <= b)
354d386b402SDarrick J. Wong return 1;
355d386b402SDarrick J. Wong return 0;
356d386b402SDarrick J. Wong }
357d386b402SDarrick J. Wong
358d386b402SDarrick J. Wong STATIC int
xfs_rtrmapbt_recs_inorder(struct xfs_btree_cur * cur,const union xfs_btree_rec * r1,const union xfs_btree_rec * r2)359d386b402SDarrick J. Wong xfs_rtrmapbt_recs_inorder(
360d386b402SDarrick J. Wong struct xfs_btree_cur *cur,
361d386b402SDarrick J. Wong const union xfs_btree_rec *r1,
362d386b402SDarrick J. Wong const union xfs_btree_rec *r2)
363d386b402SDarrick J. Wong {
364d386b402SDarrick J. Wong uint32_t x;
365d386b402SDarrick J. Wong uint32_t y;
366d386b402SDarrick J. Wong uint64_t a;
367d386b402SDarrick J. Wong uint64_t b;
368d386b402SDarrick J. Wong
369d386b402SDarrick J. Wong x = be32_to_cpu(r1->rmap.rm_startblock);
370d386b402SDarrick J. Wong y = be32_to_cpu(r2->rmap.rm_startblock);
371d386b402SDarrick J. Wong if (x < y)
372d386b402SDarrick J. Wong return 1;
373d386b402SDarrick J. Wong else if (x > y)
374d386b402SDarrick J. Wong return 0;
375d386b402SDarrick J. Wong a = be64_to_cpu(r1->rmap.rm_owner);
376d386b402SDarrick J. Wong b = be64_to_cpu(r2->rmap.rm_owner);
377d386b402SDarrick J. Wong if (a < b)
378d386b402SDarrick J. Wong return 1;
379d386b402SDarrick J. Wong else if (a > b)
380d386b402SDarrick J. Wong return 0;
381d386b402SDarrick J. Wong a = offset_keymask(be64_to_cpu(r1->rmap.rm_offset));
382d386b402SDarrick J. Wong b = offset_keymask(be64_to_cpu(r2->rmap.rm_offset));
383d386b402SDarrick J. Wong if (a <= b)
384d386b402SDarrick J. Wong return 1;
385d386b402SDarrick J. Wong return 0;
386d386b402SDarrick J. Wong }
387d386b402SDarrick J. Wong
388d386b402SDarrick J. Wong STATIC enum xbtree_key_contig
xfs_rtrmapbt_keys_contiguous(struct xfs_btree_cur * cur,const union xfs_btree_key * key1,const union xfs_btree_key * key2,const union xfs_btree_key * mask)389d386b402SDarrick J. Wong xfs_rtrmapbt_keys_contiguous(
390d386b402SDarrick J. Wong struct xfs_btree_cur *cur,
391d386b402SDarrick J. Wong const union xfs_btree_key *key1,
392d386b402SDarrick J. Wong const union xfs_btree_key *key2,
393d386b402SDarrick J. Wong const union xfs_btree_key *mask)
394d386b402SDarrick J. Wong {
395d386b402SDarrick J. Wong ASSERT(!mask || mask->rmap.rm_startblock);
396d386b402SDarrick J. Wong
397d386b402SDarrick J. Wong /*
398d386b402SDarrick J. Wong * We only support checking contiguity of the physical space component.
399d386b402SDarrick J. Wong * If any callers ever need more specificity than that, they'll have to
400d386b402SDarrick J. Wong * implement it here.
401d386b402SDarrick J. Wong */
402d386b402SDarrick J. Wong ASSERT(!mask || (!mask->rmap.rm_owner && !mask->rmap.rm_offset));
403d386b402SDarrick J. Wong
404d386b402SDarrick J. Wong return xbtree_key_contig(be32_to_cpu(key1->rmap.rm_startblock),
405d386b402SDarrick J. Wong be32_to_cpu(key2->rmap.rm_startblock));
406d386b402SDarrick J. Wong }
407d386b402SDarrick J. Wong
408f33659e8SDarrick J. Wong static inline void
xfs_rtrmapbt_move_ptrs(struct xfs_mount * mp,struct xfs_btree_block * broot,short old_size,size_t new_size,unsigned int numrecs)409f33659e8SDarrick J. Wong xfs_rtrmapbt_move_ptrs(
410f33659e8SDarrick J. Wong struct xfs_mount *mp,
411f33659e8SDarrick J. Wong struct xfs_btree_block *broot,
412f33659e8SDarrick J. Wong short old_size,
413f33659e8SDarrick J. Wong size_t new_size,
414f33659e8SDarrick J. Wong unsigned int numrecs)
415f33659e8SDarrick J. Wong {
416f33659e8SDarrick J. Wong void *dptr;
417f33659e8SDarrick J. Wong void *sptr;
418f33659e8SDarrick J. Wong
419f33659e8SDarrick J. Wong sptr = xfs_rtrmap_broot_ptr_addr(mp, broot, 1, old_size);
420f33659e8SDarrick J. Wong dptr = xfs_rtrmap_broot_ptr_addr(mp, broot, 1, new_size);
421f33659e8SDarrick J. Wong memmove(dptr, sptr, numrecs * sizeof(xfs_rtrmap_ptr_t));
422f33659e8SDarrick J. Wong }
423f33659e8SDarrick J. Wong
424f33659e8SDarrick J. Wong static struct xfs_btree_block *
xfs_rtrmapbt_broot_realloc(struct xfs_btree_cur * cur,unsigned int new_numrecs)425f33659e8SDarrick J. Wong xfs_rtrmapbt_broot_realloc(
426f33659e8SDarrick J. Wong struct xfs_btree_cur *cur,
427f33659e8SDarrick J. Wong unsigned int new_numrecs)
428f33659e8SDarrick J. Wong {
429f33659e8SDarrick J. Wong struct xfs_mount *mp = cur->bc_mp;
430f33659e8SDarrick J. Wong struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
431f33659e8SDarrick J. Wong struct xfs_btree_block *broot;
432f33659e8SDarrick J. Wong unsigned int new_size;
433f33659e8SDarrick J. Wong unsigned int old_size = ifp->if_broot_bytes;
434f33659e8SDarrick J. Wong const unsigned int level = cur->bc_nlevels - 1;
435f33659e8SDarrick J. Wong
436f33659e8SDarrick J. Wong new_size = xfs_rtrmap_broot_space_calc(mp, level, new_numrecs);
437f33659e8SDarrick J. Wong
438f33659e8SDarrick J. Wong /* Handle the nop case quietly. */
439f33659e8SDarrick J. Wong if (new_size == old_size)
440f33659e8SDarrick J. Wong return ifp->if_broot;
441f33659e8SDarrick J. Wong
442f33659e8SDarrick J. Wong if (new_size > old_size) {
443f33659e8SDarrick J. Wong unsigned int old_numrecs;
444f33659e8SDarrick J. Wong
445f33659e8SDarrick J. Wong /*
446f33659e8SDarrick J. Wong * If there wasn't any memory allocated before, just allocate
447f33659e8SDarrick J. Wong * it now and get out.
448f33659e8SDarrick J. Wong */
449f33659e8SDarrick J. Wong if (old_size == 0)
450f33659e8SDarrick J. Wong return xfs_broot_realloc(ifp, new_size);
451f33659e8SDarrick J. Wong
452f33659e8SDarrick J. Wong /*
453f33659e8SDarrick J. Wong * If there is already an existing if_broot, then we need to
454f33659e8SDarrick J. Wong * realloc it and possibly move the node block pointers because
455f33659e8SDarrick J. Wong * those are not butted up against the btree block header.
456f33659e8SDarrick J. Wong */
457f33659e8SDarrick J. Wong old_numrecs = xfs_rtrmapbt_maxrecs(mp, old_size, level == 0);
458f33659e8SDarrick J. Wong broot = xfs_broot_realloc(ifp, new_size);
459f33659e8SDarrick J. Wong if (level > 0)
460f33659e8SDarrick J. Wong xfs_rtrmapbt_move_ptrs(mp, broot, old_size, new_size,
461f33659e8SDarrick J. Wong old_numrecs);
462f33659e8SDarrick J. Wong goto out_broot;
463f33659e8SDarrick J. Wong }
464f33659e8SDarrick J. Wong
465f33659e8SDarrick J. Wong /*
466f33659e8SDarrick J. Wong * We're reducing numrecs. If we're going all the way to zero, just
467f33659e8SDarrick J. Wong * free the block.
468f33659e8SDarrick J. Wong */
469f33659e8SDarrick J. Wong ASSERT(ifp->if_broot != NULL && old_size > 0);
470f33659e8SDarrick J. Wong if (new_size == 0)
471f33659e8SDarrick J. Wong return xfs_broot_realloc(ifp, 0);
472f33659e8SDarrick J. Wong
473f33659e8SDarrick J. Wong /*
474f33659e8SDarrick J. Wong * Shrink the btree root by possibly moving the rtrmapbt pointers,
475f33659e8SDarrick J. Wong * since they are not butted up against the btree block header. Then
476f33659e8SDarrick J. Wong * reallocate broot.
477f33659e8SDarrick J. Wong */
478f33659e8SDarrick J. Wong if (level > 0)
479f33659e8SDarrick J. Wong xfs_rtrmapbt_move_ptrs(mp, ifp->if_broot, old_size, new_size,
480f33659e8SDarrick J. Wong new_numrecs);
481f33659e8SDarrick J. Wong broot = xfs_broot_realloc(ifp, new_size);
482f33659e8SDarrick J. Wong
483f33659e8SDarrick J. Wong out_broot:
484f33659e8SDarrick J. Wong ASSERT(xfs_rtrmap_droot_space(broot) <=
485f33659e8SDarrick J. Wong xfs_inode_fork_size(cur->bc_ino.ip, cur->bc_ino.whichfork));
486f33659e8SDarrick J. Wong return broot;
487f33659e8SDarrick J. Wong }
488f33659e8SDarrick J. Wong
489fc6856c6SDarrick J. Wong const struct xfs_btree_ops xfs_rtrmapbt_ops = {
490fc6856c6SDarrick J. Wong .name = "rtrmap",
491fc6856c6SDarrick J. Wong .type = XFS_BTREE_TYPE_INODE,
492fc6856c6SDarrick J. Wong .geom_flags = XFS_BTGEO_OVERLAPPING |
493fc6856c6SDarrick J. Wong XFS_BTGEO_IROOT_RECORDS,
494fc6856c6SDarrick J. Wong
495fc6856c6SDarrick J. Wong .rec_len = sizeof(struct xfs_rmap_rec),
496fc6856c6SDarrick J. Wong /* Overlapping btree; 2 keys per pointer. */
497fc6856c6SDarrick J. Wong .key_len = 2 * sizeof(struct xfs_rmap_key),
498fc6856c6SDarrick J. Wong .ptr_len = XFS_BTREE_LONG_PTR_LEN,
499fc6856c6SDarrick J. Wong
500fc6856c6SDarrick J. Wong .lru_refs = XFS_RMAP_BTREE_REF,
501fc6856c6SDarrick J. Wong .statoff = XFS_STATS_CALC_INDEX(xs_rtrmap_2),
5026d4933c2SDarrick J. Wong .sick_mask = XFS_SICK_RG_RMAPBT,
503fc6856c6SDarrick J. Wong
504fc6856c6SDarrick J. Wong .dup_cursor = xfs_rtrmapbt_dup_cursor,
505d386b402SDarrick J. Wong .alloc_block = xfs_btree_alloc_metafile_block,
506d386b402SDarrick J. Wong .free_block = xfs_btree_free_metafile_block,
507d386b402SDarrick J. Wong .get_minrecs = xfs_rtrmapbt_get_minrecs,
508d386b402SDarrick J. Wong .get_maxrecs = xfs_rtrmapbt_get_maxrecs,
509f33659e8SDarrick J. Wong .get_dmaxrecs = xfs_rtrmapbt_get_dmaxrecs,
510d386b402SDarrick J. Wong .init_key_from_rec = xfs_rtrmapbt_init_key_from_rec,
511d386b402SDarrick J. Wong .init_high_key_from_rec = xfs_rtrmapbt_init_high_key_from_rec,
512d386b402SDarrick J. Wong .init_rec_from_cur = xfs_rtrmapbt_init_rec_from_cur,
513d386b402SDarrick J. Wong .init_ptr_from_cur = xfs_rtrmapbt_init_ptr_from_cur,
514d386b402SDarrick J. Wong .key_diff = xfs_rtrmapbt_key_diff,
515fc6856c6SDarrick J. Wong .buf_ops = &xfs_rtrmapbt_buf_ops,
516d386b402SDarrick J. Wong .diff_two_keys = xfs_rtrmapbt_diff_two_keys,
517d386b402SDarrick J. Wong .keys_inorder = xfs_rtrmapbt_keys_inorder,
518d386b402SDarrick J. Wong .recs_inorder = xfs_rtrmapbt_recs_inorder,
519d386b402SDarrick J. Wong .keys_contiguous = xfs_rtrmapbt_keys_contiguous,
520f33659e8SDarrick J. Wong .broot_realloc = xfs_rtrmapbt_broot_realloc,
521fc6856c6SDarrick J. Wong };
522fc6856c6SDarrick J. Wong
523fc6856c6SDarrick J. Wong /* Allocate a new rt rmap btree cursor. */
524fc6856c6SDarrick J. Wong struct xfs_btree_cur *
xfs_rtrmapbt_init_cursor(struct xfs_trans * tp,struct xfs_rtgroup * rtg)525fc6856c6SDarrick J. Wong xfs_rtrmapbt_init_cursor(
526fc6856c6SDarrick J. Wong struct xfs_trans *tp,
527fc6856c6SDarrick J. Wong struct xfs_rtgroup *rtg)
528fc6856c6SDarrick J. Wong {
5296b08901aSDarrick J. Wong struct xfs_inode *ip = rtg_rmap(rtg);
530fc6856c6SDarrick J. Wong struct xfs_mount *mp = rtg_mount(rtg);
531fc6856c6SDarrick J. Wong struct xfs_btree_cur *cur;
532fc6856c6SDarrick J. Wong
533fc6856c6SDarrick J. Wong xfs_assert_ilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL);
534fc6856c6SDarrick J. Wong
535fc6856c6SDarrick J. Wong cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rtrmapbt_ops,
536fc6856c6SDarrick J. Wong mp->m_rtrmap_maxlevels, xfs_rtrmapbt_cur_cache);
537fc6856c6SDarrick J. Wong
538fc6856c6SDarrick J. Wong cur->bc_ino.ip = ip;
539fc6856c6SDarrick J. Wong cur->bc_group = xfs_group_hold(rtg_group(rtg));
540fc6856c6SDarrick J. Wong cur->bc_ino.whichfork = XFS_DATA_FORK;
541fc6856c6SDarrick J. Wong cur->bc_nlevels = be16_to_cpu(ip->i_df.if_broot->bb_level) + 1;
542fc6856c6SDarrick J. Wong cur->bc_ino.forksize = xfs_inode_fork_size(ip, XFS_DATA_FORK);
543fc6856c6SDarrick J. Wong
544fc6856c6SDarrick J. Wong return cur;
545fc6856c6SDarrick J. Wong }
546fc6856c6SDarrick J. Wong
5474a61f12eSDarrick J. Wong #ifdef CONFIG_XFS_BTREE_IN_MEM
5484a61f12eSDarrick J. Wong /*
5494a61f12eSDarrick J. Wong * Validate an in-memory realtime rmap btree block. Callers are allowed to
5504a61f12eSDarrick J. Wong * generate an in-memory btree even if the ondisk feature is not enabled.
5514a61f12eSDarrick J. Wong */
5524a61f12eSDarrick J. Wong static xfs_failaddr_t
xfs_rtrmapbt_mem_verify(struct xfs_buf * bp)5534a61f12eSDarrick J. Wong xfs_rtrmapbt_mem_verify(
5544a61f12eSDarrick J. Wong struct xfs_buf *bp)
5554a61f12eSDarrick J. Wong {
5564a61f12eSDarrick J. Wong struct xfs_mount *mp = bp->b_mount;
5574a61f12eSDarrick J. Wong struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
5584a61f12eSDarrick J. Wong xfs_failaddr_t fa;
5594a61f12eSDarrick J. Wong unsigned int level;
5604a61f12eSDarrick J. Wong unsigned int maxrecs;
5614a61f12eSDarrick J. Wong
5624a61f12eSDarrick J. Wong if (!xfs_verify_magic(bp, block->bb_magic))
5634a61f12eSDarrick J. Wong return __this_address;
5644a61f12eSDarrick J. Wong
5654a61f12eSDarrick J. Wong fa = xfs_btree_fsblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
5664a61f12eSDarrick J. Wong if (fa)
5674a61f12eSDarrick J. Wong return fa;
5684a61f12eSDarrick J. Wong
5694a61f12eSDarrick J. Wong level = be16_to_cpu(block->bb_level);
5704a61f12eSDarrick J. Wong if (xfs_has_rmapbt(mp)) {
5714a61f12eSDarrick J. Wong if (level >= mp->m_rtrmap_maxlevels)
5724a61f12eSDarrick J. Wong return __this_address;
5734a61f12eSDarrick J. Wong } else {
5744a61f12eSDarrick J. Wong if (level >= xfs_rtrmapbt_maxlevels_ondisk())
5754a61f12eSDarrick J. Wong return __this_address;
5764a61f12eSDarrick J. Wong }
5774a61f12eSDarrick J. Wong
5784a61f12eSDarrick J. Wong maxrecs = xfs_rtrmapbt_maxrecs(mp, XFBNO_BLOCKSIZE, level == 0);
5794a61f12eSDarrick J. Wong return xfs_btree_memblock_verify(bp, maxrecs);
5804a61f12eSDarrick J. Wong }
5814a61f12eSDarrick J. Wong
5824a61f12eSDarrick J. Wong static void
xfs_rtrmapbt_mem_rw_verify(struct xfs_buf * bp)5834a61f12eSDarrick J. Wong xfs_rtrmapbt_mem_rw_verify(
5844a61f12eSDarrick J. Wong struct xfs_buf *bp)
5854a61f12eSDarrick J. Wong {
5864a61f12eSDarrick J. Wong xfs_failaddr_t fa = xfs_rtrmapbt_mem_verify(bp);
5874a61f12eSDarrick J. Wong
5884a61f12eSDarrick J. Wong if (fa)
5894a61f12eSDarrick J. Wong xfs_verifier_error(bp, -EFSCORRUPTED, fa);
5904a61f12eSDarrick J. Wong }
5914a61f12eSDarrick J. Wong
5924a61f12eSDarrick J. Wong /* skip crc checks on in-memory btrees to save time */
5934a61f12eSDarrick J. Wong static const struct xfs_buf_ops xfs_rtrmapbt_mem_buf_ops = {
5944a61f12eSDarrick J. Wong .name = "xfs_rtrmapbt_mem",
5954a61f12eSDarrick J. Wong .magic = { 0, cpu_to_be32(XFS_RTRMAP_CRC_MAGIC) },
5964a61f12eSDarrick J. Wong .verify_read = xfs_rtrmapbt_mem_rw_verify,
5974a61f12eSDarrick J. Wong .verify_write = xfs_rtrmapbt_mem_rw_verify,
5984a61f12eSDarrick J. Wong .verify_struct = xfs_rtrmapbt_mem_verify,
5994a61f12eSDarrick J. Wong };
6004a61f12eSDarrick J. Wong
6014a61f12eSDarrick J. Wong const struct xfs_btree_ops xfs_rtrmapbt_mem_ops = {
6024a61f12eSDarrick J. Wong .type = XFS_BTREE_TYPE_MEM,
6034a61f12eSDarrick J. Wong .geom_flags = XFS_BTGEO_OVERLAPPING,
6044a61f12eSDarrick J. Wong
6054a61f12eSDarrick J. Wong .rec_len = sizeof(struct xfs_rmap_rec),
6064a61f12eSDarrick J. Wong /* Overlapping btree; 2 keys per pointer. */
6074a61f12eSDarrick J. Wong .key_len = 2 * sizeof(struct xfs_rmap_key),
6084a61f12eSDarrick J. Wong .ptr_len = XFS_BTREE_LONG_PTR_LEN,
6094a61f12eSDarrick J. Wong
6104a61f12eSDarrick J. Wong .lru_refs = XFS_RMAP_BTREE_REF,
6114a61f12eSDarrick J. Wong .statoff = XFS_STATS_CALC_INDEX(xs_rtrmap_mem_2),
6124a61f12eSDarrick J. Wong
6134a61f12eSDarrick J. Wong .dup_cursor = xfbtree_dup_cursor,
6144a61f12eSDarrick J. Wong .set_root = xfbtree_set_root,
6154a61f12eSDarrick J. Wong .alloc_block = xfbtree_alloc_block,
6164a61f12eSDarrick J. Wong .free_block = xfbtree_free_block,
6174a61f12eSDarrick J. Wong .get_minrecs = xfbtree_get_minrecs,
6184a61f12eSDarrick J. Wong .get_maxrecs = xfbtree_get_maxrecs,
6194a61f12eSDarrick J. Wong .init_key_from_rec = xfs_rtrmapbt_init_key_from_rec,
6204a61f12eSDarrick J. Wong .init_high_key_from_rec = xfs_rtrmapbt_init_high_key_from_rec,
6214a61f12eSDarrick J. Wong .init_rec_from_cur = xfs_rtrmapbt_init_rec_from_cur,
6224a61f12eSDarrick J. Wong .init_ptr_from_cur = xfbtree_init_ptr_from_cur,
6234a61f12eSDarrick J. Wong .key_diff = xfs_rtrmapbt_key_diff,
6244a61f12eSDarrick J. Wong .buf_ops = &xfs_rtrmapbt_mem_buf_ops,
6254a61f12eSDarrick J. Wong .diff_two_keys = xfs_rtrmapbt_diff_two_keys,
6264a61f12eSDarrick J. Wong .keys_inorder = xfs_rtrmapbt_keys_inorder,
6274a61f12eSDarrick J. Wong .recs_inorder = xfs_rtrmapbt_recs_inorder,
6284a61f12eSDarrick J. Wong .keys_contiguous = xfs_rtrmapbt_keys_contiguous,
6294a61f12eSDarrick J. Wong };
6304a61f12eSDarrick J. Wong
6314a61f12eSDarrick J. Wong /* Create a cursor for an in-memory btree. */
6324a61f12eSDarrick J. Wong struct xfs_btree_cur *
xfs_rtrmapbt_mem_cursor(struct xfs_rtgroup * rtg,struct xfs_trans * tp,struct xfbtree * xfbt)6334a61f12eSDarrick J. Wong xfs_rtrmapbt_mem_cursor(
6344a61f12eSDarrick J. Wong struct xfs_rtgroup *rtg,
6354a61f12eSDarrick J. Wong struct xfs_trans *tp,
6364a61f12eSDarrick J. Wong struct xfbtree *xfbt)
6374a61f12eSDarrick J. Wong {
6384a61f12eSDarrick J. Wong struct xfs_mount *mp = rtg_mount(rtg);
6394a61f12eSDarrick J. Wong struct xfs_btree_cur *cur;
6404a61f12eSDarrick J. Wong
6414a61f12eSDarrick J. Wong cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rtrmapbt_mem_ops,
6424a61f12eSDarrick J. Wong mp->m_rtrmap_maxlevels, xfs_rtrmapbt_cur_cache);
6434a61f12eSDarrick J. Wong cur->bc_mem.xfbtree = xfbt;
6444a61f12eSDarrick J. Wong cur->bc_nlevels = xfbt->nlevels;
6454a61f12eSDarrick J. Wong cur->bc_group = xfs_group_hold(rtg_group(rtg));
6464a61f12eSDarrick J. Wong return cur;
6474a61f12eSDarrick J. Wong }
6484a61f12eSDarrick J. Wong
6494a61f12eSDarrick J. Wong /* Create an in-memory realtime rmap btree. */
6504a61f12eSDarrick J. Wong int
xfs_rtrmapbt_mem_init(struct xfs_mount * mp,struct xfbtree * xfbt,struct xfs_buftarg * btp,xfs_rgnumber_t rgno)6514a61f12eSDarrick J. Wong xfs_rtrmapbt_mem_init(
6524a61f12eSDarrick J. Wong struct xfs_mount *mp,
6534a61f12eSDarrick J. Wong struct xfbtree *xfbt,
6544a61f12eSDarrick J. Wong struct xfs_buftarg *btp,
6554a61f12eSDarrick J. Wong xfs_rgnumber_t rgno)
6564a61f12eSDarrick J. Wong {
6574a61f12eSDarrick J. Wong xfbt->owner = rgno;
6584a61f12eSDarrick J. Wong return xfbtree_init(mp, xfbt, btp, &xfs_rtrmapbt_mem_ops);
6594a61f12eSDarrick J. Wong }
6604a61f12eSDarrick J. Wong #endif /* CONFIG_XFS_BTREE_IN_MEM */
6614a61f12eSDarrick J. Wong
662fc6856c6SDarrick J. Wong /*
663fc6856c6SDarrick J. Wong * Install a new rt reverse mapping btree root. Caller is responsible for
664fc6856c6SDarrick J. Wong * invalidating and freeing the old btree blocks.
665fc6856c6SDarrick J. Wong */
666fc6856c6SDarrick J. Wong void
xfs_rtrmapbt_commit_staged_btree(struct xfs_btree_cur * cur,struct xfs_trans * tp)667fc6856c6SDarrick J. Wong xfs_rtrmapbt_commit_staged_btree(
668fc6856c6SDarrick J. Wong struct xfs_btree_cur *cur,
669fc6856c6SDarrick J. Wong struct xfs_trans *tp)
670fc6856c6SDarrick J. Wong {
671fc6856c6SDarrick J. Wong struct xbtree_ifakeroot *ifake = cur->bc_ino.ifake;
672fc6856c6SDarrick J. Wong struct xfs_ifork *ifp;
673fc6856c6SDarrick J. Wong int flags = XFS_ILOG_CORE | XFS_ILOG_DBROOT;
674fc6856c6SDarrick J. Wong
675fc6856c6SDarrick J. Wong ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
6766b08901aSDarrick J. Wong ASSERT(ifake->if_fork->if_format == XFS_DINODE_FMT_META_BTREE);
677fc6856c6SDarrick J. Wong
678fc6856c6SDarrick J. Wong /*
679fc6856c6SDarrick J. Wong * Free any resources hanging off the real fork, then shallow-copy the
680fc6856c6SDarrick J. Wong * staging fork's contents into the real fork to transfer everything
681fc6856c6SDarrick J. Wong * we just built.
682fc6856c6SDarrick J. Wong */
683fc6856c6SDarrick J. Wong ifp = xfs_ifork_ptr(cur->bc_ino.ip, XFS_DATA_FORK);
684fc6856c6SDarrick J. Wong xfs_idestroy_fork(ifp);
685fc6856c6SDarrick J. Wong memcpy(ifp, ifake->if_fork, sizeof(struct xfs_ifork));
686fc6856c6SDarrick J. Wong
687fc6856c6SDarrick J. Wong cur->bc_ino.ip->i_projid = cur->bc_group->xg_gno;
688fc6856c6SDarrick J. Wong xfs_trans_log_inode(tp, cur->bc_ino.ip, flags);
689fc6856c6SDarrick J. Wong xfs_btree_commit_ifakeroot(cur, tp, XFS_DATA_FORK);
690fc6856c6SDarrick J. Wong }
691fc6856c6SDarrick J. Wong
692fc6856c6SDarrick J. Wong /* Calculate number of records in a rt reverse mapping btree block. */
693fc6856c6SDarrick J. Wong static inline unsigned int
xfs_rtrmapbt_block_maxrecs(unsigned int blocklen,bool leaf)694fc6856c6SDarrick J. Wong xfs_rtrmapbt_block_maxrecs(
695fc6856c6SDarrick J. Wong unsigned int blocklen,
696fc6856c6SDarrick J. Wong bool leaf)
697fc6856c6SDarrick J. Wong {
698fc6856c6SDarrick J. Wong if (leaf)
699fc6856c6SDarrick J. Wong return blocklen / sizeof(struct xfs_rmap_rec);
700fc6856c6SDarrick J. Wong return blocklen /
701fc6856c6SDarrick J. Wong (2 * sizeof(struct xfs_rmap_key) + sizeof(xfs_rtrmap_ptr_t));
702fc6856c6SDarrick J. Wong }
703fc6856c6SDarrick J. Wong
704fc6856c6SDarrick J. Wong /*
705fc6856c6SDarrick J. Wong * Calculate number of records in an rt reverse mapping btree block.
706fc6856c6SDarrick J. Wong */
707fc6856c6SDarrick J. Wong unsigned int
xfs_rtrmapbt_maxrecs(struct xfs_mount * mp,unsigned int blocklen,bool leaf)708fc6856c6SDarrick J. Wong xfs_rtrmapbt_maxrecs(
709fc6856c6SDarrick J. Wong struct xfs_mount *mp,
710fc6856c6SDarrick J. Wong unsigned int blocklen,
711fc6856c6SDarrick J. Wong bool leaf)
712fc6856c6SDarrick J. Wong {
713fc6856c6SDarrick J. Wong blocklen -= XFS_RTRMAP_BLOCK_LEN;
714fc6856c6SDarrick J. Wong return xfs_rtrmapbt_block_maxrecs(blocklen, leaf);
715fc6856c6SDarrick J. Wong }
716fc6856c6SDarrick J. Wong
717fc6856c6SDarrick J. Wong /* Compute the max possible height for realtime reverse mapping btrees. */
718fc6856c6SDarrick J. Wong unsigned int
xfs_rtrmapbt_maxlevels_ondisk(void)719fc6856c6SDarrick J. Wong xfs_rtrmapbt_maxlevels_ondisk(void)
720fc6856c6SDarrick J. Wong {
721*c2694ff6SDarrick J. Wong unsigned long long max_dblocks;
722fc6856c6SDarrick J. Wong unsigned int minrecs[2];
723fc6856c6SDarrick J. Wong unsigned int blocklen;
724fc6856c6SDarrick J. Wong
725fc6856c6SDarrick J. Wong blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_LBLOCK_CRC_LEN;
726fc6856c6SDarrick J. Wong
727fc6856c6SDarrick J. Wong minrecs[0] = xfs_rtrmapbt_block_maxrecs(blocklen, true) / 2;
728fc6856c6SDarrick J. Wong minrecs[1] = xfs_rtrmapbt_block_maxrecs(blocklen, false) / 2;
729fc6856c6SDarrick J. Wong
730*c2694ff6SDarrick J. Wong /*
731*c2694ff6SDarrick J. Wong * Compute the asymptotic maxlevels for an rtrmapbt on any rtreflink fs.
732*c2694ff6SDarrick J. Wong *
733*c2694ff6SDarrick J. Wong * On a reflink filesystem, each block in an rtgroup can have up to
734*c2694ff6SDarrick J. Wong * 2^32 (per the refcount record format) owners, which means that
735*c2694ff6SDarrick J. Wong * theoretically we could face up to 2^64 rmap records. However, we're
736*c2694ff6SDarrick J. Wong * likely to run out of blocks in the data device long before that
737*c2694ff6SDarrick J. Wong * happens, which means that we must compute the max height based on
738*c2694ff6SDarrick J. Wong * what the btree will look like if it consumes almost all the blocks
739*c2694ff6SDarrick J. Wong * in the data device due to maximal sharing factor.
740*c2694ff6SDarrick J. Wong */
741*c2694ff6SDarrick J. Wong max_dblocks = -1U; /* max ag count */
742*c2694ff6SDarrick J. Wong max_dblocks *= XFS_MAX_CRC_AG_BLOCKS;
743*c2694ff6SDarrick J. Wong return xfs_btree_space_to_height(minrecs, max_dblocks);
744fc6856c6SDarrick J. Wong }
745fc6856c6SDarrick J. Wong
746fc6856c6SDarrick J. Wong int __init
xfs_rtrmapbt_init_cur_cache(void)747fc6856c6SDarrick J. Wong xfs_rtrmapbt_init_cur_cache(void)
748fc6856c6SDarrick J. Wong {
749fc6856c6SDarrick J. Wong xfs_rtrmapbt_cur_cache = kmem_cache_create("xfs_rtrmapbt_cur",
750fc6856c6SDarrick J. Wong xfs_btree_cur_sizeof(xfs_rtrmapbt_maxlevels_ondisk()),
751fc6856c6SDarrick J. Wong 0, 0, NULL);
752fc6856c6SDarrick J. Wong
753fc6856c6SDarrick J. Wong if (!xfs_rtrmapbt_cur_cache)
754fc6856c6SDarrick J. Wong return -ENOMEM;
755fc6856c6SDarrick J. Wong return 0;
756fc6856c6SDarrick J. Wong }
757fc6856c6SDarrick J. Wong
758fc6856c6SDarrick J. Wong void
xfs_rtrmapbt_destroy_cur_cache(void)759fc6856c6SDarrick J. Wong xfs_rtrmapbt_destroy_cur_cache(void)
760fc6856c6SDarrick J. Wong {
761fc6856c6SDarrick J. Wong kmem_cache_destroy(xfs_rtrmapbt_cur_cache);
762fc6856c6SDarrick J. Wong xfs_rtrmapbt_cur_cache = NULL;
763fc6856c6SDarrick J. Wong }
764fc6856c6SDarrick J. Wong
765fc6856c6SDarrick J. Wong /* Compute the maximum height of an rt reverse mapping btree. */
766fc6856c6SDarrick J. Wong void
xfs_rtrmapbt_compute_maxlevels(struct xfs_mount * mp)767fc6856c6SDarrick J. Wong xfs_rtrmapbt_compute_maxlevels(
768fc6856c6SDarrick J. Wong struct xfs_mount *mp)
769fc6856c6SDarrick J. Wong {
770fc6856c6SDarrick J. Wong unsigned int d_maxlevels, r_maxlevels;
771fc6856c6SDarrick J. Wong
772fc6856c6SDarrick J. Wong if (!xfs_has_rtrmapbt(mp)) {
773fc6856c6SDarrick J. Wong mp->m_rtrmap_maxlevels = 0;
774fc6856c6SDarrick J. Wong return;
775fc6856c6SDarrick J. Wong }
776fc6856c6SDarrick J. Wong
777fc6856c6SDarrick J. Wong /*
778fc6856c6SDarrick J. Wong * The realtime rmapbt lives on the data device, which means that its
779fc6856c6SDarrick J. Wong * maximum height is constrained by the size of the data device and
780fc6856c6SDarrick J. Wong * the height required to store one rmap record for each block in an
781fc6856c6SDarrick J. Wong * rt group.
782*c2694ff6SDarrick J. Wong *
783*c2694ff6SDarrick J. Wong * On a reflink filesystem, each rt block can have up to 2^32 (per the
784*c2694ff6SDarrick J. Wong * refcount record format) owners, which means that theoretically we
785*c2694ff6SDarrick J. Wong * could face up to 2^64 rmap records. This makes the computation of
786*c2694ff6SDarrick J. Wong * maxlevels based on record count meaningless, so we only consider the
787*c2694ff6SDarrick J. Wong * size of the data device.
788fc6856c6SDarrick J. Wong */
789fc6856c6SDarrick J. Wong d_maxlevels = xfs_btree_space_to_height(mp->m_rtrmap_mnr,
790fc6856c6SDarrick J. Wong mp->m_sb.sb_dblocks);
791*c2694ff6SDarrick J. Wong if (xfs_has_rtreflink(mp)) {
792*c2694ff6SDarrick J. Wong mp->m_rtrmap_maxlevels = d_maxlevels + 1;
793*c2694ff6SDarrick J. Wong return;
794*c2694ff6SDarrick J. Wong }
795*c2694ff6SDarrick J. Wong
796fc6856c6SDarrick J. Wong r_maxlevels = xfs_btree_compute_maxlevels(mp->m_rtrmap_mnr,
797fc6856c6SDarrick J. Wong mp->m_groups[XG_TYPE_RTG].blocks);
798fc6856c6SDarrick J. Wong
799fc6856c6SDarrick J. Wong /* Add one level to handle the inode root level. */
800fc6856c6SDarrick J. Wong mp->m_rtrmap_maxlevels = min(d_maxlevels, r_maxlevels) + 1;
801fc6856c6SDarrick J. Wong }
8028491a55cSDarrick J. Wong
8038491a55cSDarrick J. Wong /* Calculate the rtrmap btree size for some records. */
8046a849bd8SDarrick J. Wong unsigned long long
xfs_rtrmapbt_calc_size(struct xfs_mount * mp,unsigned long long len)8058491a55cSDarrick J. Wong xfs_rtrmapbt_calc_size(
8068491a55cSDarrick J. Wong struct xfs_mount *mp,
8078491a55cSDarrick J. Wong unsigned long long len)
8088491a55cSDarrick J. Wong {
8098491a55cSDarrick J. Wong return xfs_btree_calc_size(mp->m_rtrmap_mnr, len);
8108491a55cSDarrick J. Wong }
8118491a55cSDarrick J. Wong
8128491a55cSDarrick J. Wong /*
8138491a55cSDarrick J. Wong * Calculate the maximum rmap btree size.
8148491a55cSDarrick J. Wong */
8158491a55cSDarrick J. Wong static unsigned long long
xfs_rtrmapbt_max_size(struct xfs_mount * mp,xfs_rtblock_t rtblocks)8168491a55cSDarrick J. Wong xfs_rtrmapbt_max_size(
8178491a55cSDarrick J. Wong struct xfs_mount *mp,
8188491a55cSDarrick J. Wong xfs_rtblock_t rtblocks)
8198491a55cSDarrick J. Wong {
8208491a55cSDarrick J. Wong /* Bail out if we're uninitialized, which can happen in mkfs. */
8218491a55cSDarrick J. Wong if (mp->m_rtrmap_mxr[0] == 0)
8228491a55cSDarrick J. Wong return 0;
8238491a55cSDarrick J. Wong
8248491a55cSDarrick J. Wong return xfs_rtrmapbt_calc_size(mp, rtblocks);
8258491a55cSDarrick J. Wong }
8268491a55cSDarrick J. Wong
8278491a55cSDarrick J. Wong /*
8288491a55cSDarrick J. Wong * Figure out how many blocks to reserve and how many are used by this btree.
8298491a55cSDarrick J. Wong */
8308491a55cSDarrick J. Wong xfs_filblks_t
xfs_rtrmapbt_calc_reserves(struct xfs_mount * mp)8318491a55cSDarrick J. Wong xfs_rtrmapbt_calc_reserves(
8328491a55cSDarrick J. Wong struct xfs_mount *mp)
8338491a55cSDarrick J. Wong {
8348491a55cSDarrick J. Wong uint32_t blocks = mp->m_groups[XG_TYPE_RTG].blocks;
8358491a55cSDarrick J. Wong
8368491a55cSDarrick J. Wong if (!xfs_has_rtrmapbt(mp))
8378491a55cSDarrick J. Wong return 0;
8388491a55cSDarrick J. Wong
8398491a55cSDarrick J. Wong /* Reserve 1% of the rtgroup or enough for 1 block per record. */
8408491a55cSDarrick J. Wong return max_t(xfs_filblks_t, blocks / 100,
8418491a55cSDarrick J. Wong xfs_rtrmapbt_max_size(mp, blocks));
8428491a55cSDarrick J. Wong }
843f33659e8SDarrick J. Wong
844f33659e8SDarrick J. Wong /* Convert on-disk form of btree root to in-memory form. */
845f33659e8SDarrick J. Wong STATIC void
xfs_rtrmapbt_from_disk(struct xfs_inode * ip,struct xfs_rtrmap_root * dblock,unsigned int dblocklen,struct xfs_btree_block * rblock)846f33659e8SDarrick J. Wong xfs_rtrmapbt_from_disk(
847f33659e8SDarrick J. Wong struct xfs_inode *ip,
848f33659e8SDarrick J. Wong struct xfs_rtrmap_root *dblock,
849f33659e8SDarrick J. Wong unsigned int dblocklen,
850f33659e8SDarrick J. Wong struct xfs_btree_block *rblock)
851f33659e8SDarrick J. Wong {
852f33659e8SDarrick J. Wong struct xfs_mount *mp = ip->i_mount;
853f33659e8SDarrick J. Wong struct xfs_rmap_key *fkp;
854f33659e8SDarrick J. Wong __be64 *fpp;
855f33659e8SDarrick J. Wong struct xfs_rmap_key *tkp;
856f33659e8SDarrick J. Wong __be64 *tpp;
857f33659e8SDarrick J. Wong struct xfs_rmap_rec *frp;
858f33659e8SDarrick J. Wong struct xfs_rmap_rec *trp;
859f33659e8SDarrick J. Wong unsigned int rblocklen = xfs_rtrmap_broot_space(mp, dblock);
860f33659e8SDarrick J. Wong unsigned int numrecs;
861f33659e8SDarrick J. Wong unsigned int maxrecs;
862f33659e8SDarrick J. Wong
863f33659e8SDarrick J. Wong xfs_btree_init_block(mp, rblock, &xfs_rtrmapbt_ops, 0, 0, ip->i_ino);
864f33659e8SDarrick J. Wong
865f33659e8SDarrick J. Wong rblock->bb_level = dblock->bb_level;
866f33659e8SDarrick J. Wong rblock->bb_numrecs = dblock->bb_numrecs;
867f33659e8SDarrick J. Wong numrecs = be16_to_cpu(dblock->bb_numrecs);
868f33659e8SDarrick J. Wong
869f33659e8SDarrick J. Wong if (be16_to_cpu(rblock->bb_level) > 0) {
870f33659e8SDarrick J. Wong maxrecs = xfs_rtrmapbt_droot_maxrecs(dblocklen, false);
871f33659e8SDarrick J. Wong fkp = xfs_rtrmap_droot_key_addr(dblock, 1);
872f33659e8SDarrick J. Wong tkp = xfs_rtrmap_key_addr(rblock, 1);
873f33659e8SDarrick J. Wong fpp = xfs_rtrmap_droot_ptr_addr(dblock, 1, maxrecs);
874f33659e8SDarrick J. Wong tpp = xfs_rtrmap_broot_ptr_addr(mp, rblock, 1, rblocklen);
875f33659e8SDarrick J. Wong memcpy(tkp, fkp, 2 * sizeof(*fkp) * numrecs);
876f33659e8SDarrick J. Wong memcpy(tpp, fpp, sizeof(*fpp) * numrecs);
877f33659e8SDarrick J. Wong } else {
878f33659e8SDarrick J. Wong frp = xfs_rtrmap_droot_rec_addr(dblock, 1);
879f33659e8SDarrick J. Wong trp = xfs_rtrmap_rec_addr(rblock, 1);
880f33659e8SDarrick J. Wong memcpy(trp, frp, sizeof(*frp) * numrecs);
881f33659e8SDarrick J. Wong }
882f33659e8SDarrick J. Wong }
883f33659e8SDarrick J. Wong
884f33659e8SDarrick J. Wong /* Load a realtime reverse mapping btree root in from disk. */
885f33659e8SDarrick J. Wong int
xfs_iformat_rtrmap(struct xfs_inode * ip,struct xfs_dinode * dip)886f33659e8SDarrick J. Wong xfs_iformat_rtrmap(
887f33659e8SDarrick J. Wong struct xfs_inode *ip,
888f33659e8SDarrick J. Wong struct xfs_dinode *dip)
889f33659e8SDarrick J. Wong {
890f33659e8SDarrick J. Wong struct xfs_mount *mp = ip->i_mount;
891f33659e8SDarrick J. Wong struct xfs_rtrmap_root *dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK);
892f33659e8SDarrick J. Wong struct xfs_btree_block *broot;
893f33659e8SDarrick J. Wong unsigned int numrecs;
894f33659e8SDarrick J. Wong unsigned int level;
895f33659e8SDarrick J. Wong int dsize;
896f33659e8SDarrick J. Wong
897f33659e8SDarrick J. Wong /*
898f33659e8SDarrick J. Wong * growfs must create the rtrmap inodes before adding a realtime volume
899f33659e8SDarrick J. Wong * to the filesystem, so we cannot use the rtrmapbt predicate here.
900f33659e8SDarrick J. Wong */
9016d4933c2SDarrick J. Wong if (!xfs_has_rmapbt(ip->i_mount)) {
9026d4933c2SDarrick J. Wong xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
903f33659e8SDarrick J. Wong return -EFSCORRUPTED;
9046d4933c2SDarrick J. Wong }
905f33659e8SDarrick J. Wong
906f33659e8SDarrick J. Wong dsize = XFS_DFORK_SIZE(dip, mp, XFS_DATA_FORK);
907f33659e8SDarrick J. Wong numrecs = be16_to_cpu(dfp->bb_numrecs);
908f33659e8SDarrick J. Wong level = be16_to_cpu(dfp->bb_level);
909f33659e8SDarrick J. Wong
910f33659e8SDarrick J. Wong if (level > mp->m_rtrmap_maxlevels ||
9116d4933c2SDarrick J. Wong xfs_rtrmap_droot_space_calc(level, numrecs) > dsize) {
9126d4933c2SDarrick J. Wong xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
913f33659e8SDarrick J. Wong return -EFSCORRUPTED;
9146d4933c2SDarrick J. Wong }
915f33659e8SDarrick J. Wong
916f33659e8SDarrick J. Wong broot = xfs_broot_alloc(xfs_ifork_ptr(ip, XFS_DATA_FORK),
917f33659e8SDarrick J. Wong xfs_rtrmap_broot_space_calc(mp, level, numrecs));
918f33659e8SDarrick J. Wong if (broot)
919f33659e8SDarrick J. Wong xfs_rtrmapbt_from_disk(ip, dfp, dsize, broot);
920f33659e8SDarrick J. Wong return 0;
921f33659e8SDarrick J. Wong }
922f33659e8SDarrick J. Wong
923f33659e8SDarrick J. Wong /* Convert in-memory form of btree root to on-disk form. */
924f33659e8SDarrick J. Wong void
xfs_rtrmapbt_to_disk(struct xfs_mount * mp,struct xfs_btree_block * rblock,unsigned int rblocklen,struct xfs_rtrmap_root * dblock,unsigned int dblocklen)925f33659e8SDarrick J. Wong xfs_rtrmapbt_to_disk(
926f33659e8SDarrick J. Wong struct xfs_mount *mp,
927f33659e8SDarrick J. Wong struct xfs_btree_block *rblock,
928f33659e8SDarrick J. Wong unsigned int rblocklen,
929f33659e8SDarrick J. Wong struct xfs_rtrmap_root *dblock,
930f33659e8SDarrick J. Wong unsigned int dblocklen)
931f33659e8SDarrick J. Wong {
932f33659e8SDarrick J. Wong struct xfs_rmap_key *fkp;
933f33659e8SDarrick J. Wong __be64 *fpp;
934f33659e8SDarrick J. Wong struct xfs_rmap_key *tkp;
935f33659e8SDarrick J. Wong __be64 *tpp;
936f33659e8SDarrick J. Wong struct xfs_rmap_rec *frp;
937f33659e8SDarrick J. Wong struct xfs_rmap_rec *trp;
938f33659e8SDarrick J. Wong unsigned int numrecs;
939f33659e8SDarrick J. Wong unsigned int maxrecs;
940f33659e8SDarrick J. Wong
941f33659e8SDarrick J. Wong ASSERT(rblock->bb_magic == cpu_to_be32(XFS_RTRMAP_CRC_MAGIC));
942f33659e8SDarrick J. Wong ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid));
943f33659e8SDarrick J. Wong ASSERT(rblock->bb_u.l.bb_blkno == cpu_to_be64(XFS_BUF_DADDR_NULL));
944f33659e8SDarrick J. Wong ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK));
945f33659e8SDarrick J. Wong ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK));
946f33659e8SDarrick J. Wong
947f33659e8SDarrick J. Wong dblock->bb_level = rblock->bb_level;
948f33659e8SDarrick J. Wong dblock->bb_numrecs = rblock->bb_numrecs;
949f33659e8SDarrick J. Wong numrecs = be16_to_cpu(rblock->bb_numrecs);
950f33659e8SDarrick J. Wong
951f33659e8SDarrick J. Wong if (be16_to_cpu(rblock->bb_level) > 0) {
952f33659e8SDarrick J. Wong maxrecs = xfs_rtrmapbt_droot_maxrecs(dblocklen, false);
953f33659e8SDarrick J. Wong fkp = xfs_rtrmap_key_addr(rblock, 1);
954f33659e8SDarrick J. Wong tkp = xfs_rtrmap_droot_key_addr(dblock, 1);
955f33659e8SDarrick J. Wong fpp = xfs_rtrmap_broot_ptr_addr(mp, rblock, 1, rblocklen);
956f33659e8SDarrick J. Wong tpp = xfs_rtrmap_droot_ptr_addr(dblock, 1, maxrecs);
957f33659e8SDarrick J. Wong memcpy(tkp, fkp, 2 * sizeof(*fkp) * numrecs);
958f33659e8SDarrick J. Wong memcpy(tpp, fpp, sizeof(*fpp) * numrecs);
959f33659e8SDarrick J. Wong } else {
960f33659e8SDarrick J. Wong frp = xfs_rtrmap_rec_addr(rblock, 1);
961f33659e8SDarrick J. Wong trp = xfs_rtrmap_droot_rec_addr(dblock, 1);
962f33659e8SDarrick J. Wong memcpy(trp, frp, sizeof(*frp) * numrecs);
963f33659e8SDarrick J. Wong }
964f33659e8SDarrick J. Wong }
965f33659e8SDarrick J. Wong
966f33659e8SDarrick J. Wong /* Flush a realtime reverse mapping btree root out to disk. */
967f33659e8SDarrick J. Wong void
xfs_iflush_rtrmap(struct xfs_inode * ip,struct xfs_dinode * dip)968f33659e8SDarrick J. Wong xfs_iflush_rtrmap(
969f33659e8SDarrick J. Wong struct xfs_inode *ip,
970f33659e8SDarrick J. Wong struct xfs_dinode *dip)
971f33659e8SDarrick J. Wong {
972f33659e8SDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
973f33659e8SDarrick J. Wong struct xfs_rtrmap_root *dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK);
974f33659e8SDarrick J. Wong
975f33659e8SDarrick J. Wong ASSERT(ifp->if_broot != NULL);
976f33659e8SDarrick J. Wong ASSERT(ifp->if_broot_bytes > 0);
977f33659e8SDarrick J. Wong ASSERT(xfs_rtrmap_droot_space(ifp->if_broot) <=
978f33659e8SDarrick J. Wong xfs_inode_fork_size(ip, XFS_DATA_FORK));
979f33659e8SDarrick J. Wong xfs_rtrmapbt_to_disk(ip->i_mount, ifp->if_broot, ifp->if_broot_bytes,
980f33659e8SDarrick J. Wong dfp, XFS_DFORK_SIZE(dip, ip->i_mount, XFS_DATA_FORK));
981f33659e8SDarrick J. Wong }
98271b8acb4SDarrick J. Wong
98371b8acb4SDarrick J. Wong /*
98471b8acb4SDarrick J. Wong * Create a realtime rmap btree inode.
98571b8acb4SDarrick J. Wong */
98671b8acb4SDarrick J. Wong int
xfs_rtrmapbt_create(struct xfs_rtgroup * rtg,struct xfs_inode * ip,struct xfs_trans * tp,bool init)98771b8acb4SDarrick J. Wong xfs_rtrmapbt_create(
98871b8acb4SDarrick J. Wong struct xfs_rtgroup *rtg,
98971b8acb4SDarrick J. Wong struct xfs_inode *ip,
99071b8acb4SDarrick J. Wong struct xfs_trans *tp,
99171b8acb4SDarrick J. Wong bool init)
99271b8acb4SDarrick J. Wong {
99371b8acb4SDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
99471b8acb4SDarrick J. Wong struct xfs_mount *mp = ip->i_mount;
99571b8acb4SDarrick J. Wong struct xfs_btree_block *broot;
99671b8acb4SDarrick J. Wong
99771b8acb4SDarrick J. Wong ifp->if_format = XFS_DINODE_FMT_META_BTREE;
99871b8acb4SDarrick J. Wong ASSERT(ifp->if_broot_bytes == 0);
99971b8acb4SDarrick J. Wong ASSERT(ifp->if_bytes == 0);
100071b8acb4SDarrick J. Wong
100171b8acb4SDarrick J. Wong /* Initialize the empty incore btree root. */
100271b8acb4SDarrick J. Wong broot = xfs_broot_realloc(ifp, xfs_rtrmap_broot_space_calc(mp, 0, 0));
100371b8acb4SDarrick J. Wong if (broot)
100471b8acb4SDarrick J. Wong xfs_btree_init_block(mp, broot, &xfs_rtrmapbt_ops, 0, 0,
100571b8acb4SDarrick J. Wong ip->i_ino);
100671b8acb4SDarrick J. Wong xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE | XFS_ILOG_DBROOT);
100771b8acb4SDarrick J. Wong
100871b8acb4SDarrick J. Wong return 0;
100971b8acb4SDarrick J. Wong }
101071b8acb4SDarrick J. Wong
101171b8acb4SDarrick J. Wong /*
101271b8acb4SDarrick J. Wong * Initialize an rmap for a realtime superblock using the potentially updated
101371b8acb4SDarrick J. Wong * rt geometry in the provided @mp.
101471b8acb4SDarrick J. Wong */
101571b8acb4SDarrick J. Wong int
xfs_rtrmapbt_init_rtsb(struct xfs_mount * mp,struct xfs_rtgroup * rtg,struct xfs_trans * tp)101671b8acb4SDarrick J. Wong xfs_rtrmapbt_init_rtsb(
101771b8acb4SDarrick J. Wong struct xfs_mount *mp,
101871b8acb4SDarrick J. Wong struct xfs_rtgroup *rtg,
101971b8acb4SDarrick J. Wong struct xfs_trans *tp)
102071b8acb4SDarrick J. Wong {
102171b8acb4SDarrick J. Wong struct xfs_rmap_irec rmap = {
102271b8acb4SDarrick J. Wong .rm_blockcount = mp->m_sb.sb_rextsize,
102371b8acb4SDarrick J. Wong .rm_owner = XFS_RMAP_OWN_FS,
102471b8acb4SDarrick J. Wong };
102571b8acb4SDarrick J. Wong struct xfs_btree_cur *cur;
102671b8acb4SDarrick J. Wong int error;
102771b8acb4SDarrick J. Wong
102871b8acb4SDarrick J. Wong ASSERT(xfs_has_rtsb(mp));
102971b8acb4SDarrick J. Wong ASSERT(rtg_rgno(rtg) == 0);
103071b8acb4SDarrick J. Wong
103171b8acb4SDarrick J. Wong cur = xfs_rtrmapbt_init_cursor(tp, rtg);
103271b8acb4SDarrick J. Wong error = xfs_rmap_map_raw(cur, &rmap);
103371b8acb4SDarrick J. Wong xfs_btree_del_cursor(cur, error);
103471b8acb4SDarrick J. Wong return error;
103571b8acb4SDarrick J. Wong }
1036