xref: /linux/fs/xfs/libxfs/xfs_btree.h (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #ifndef __XFS_BTREE_H__
7 #define	__XFS_BTREE_H__
8 
9 struct xfs_buf;
10 struct xfs_inode;
11 struct xfs_mount;
12 struct xfs_trans;
13 struct xfs_ifork;
14 struct xfs_perag;
15 
16 /*
17  * Generic key, ptr and record wrapper structures.
18  *
19  * These are disk format structures, and are converted where necessary
20  * by the btree specific code that needs to interpret them.
21  */
22 union xfs_btree_ptr {
23 	__be32			s;	/* short form ptr */
24 	__be64			l;	/* long form ptr */
25 };
26 
27 /*
28  * The in-core btree key.  Overlapping btrees actually store two keys
29  * per pointer, so we reserve enough memory to hold both.  The __*bigkey
30  * items should never be accessed directly.
31  */
32 union xfs_btree_key {
33 	struct xfs_bmbt_key		bmbt;
34 	xfs_bmdr_key_t			bmbr;	/* bmbt root block */
35 	xfs_alloc_key_t			alloc;
36 	struct xfs_inobt_key		inobt;
37 	struct xfs_rmap_key		rmap;
38 	struct xfs_rmap_key		__rmap_bigkey[2];
39 	struct xfs_refcount_key		refc;
40 };
41 
42 union xfs_btree_rec {
43 	struct xfs_bmbt_rec		bmbt;
44 	xfs_bmdr_rec_t			bmbr;	/* bmbt root block */
45 	struct xfs_alloc_rec		alloc;
46 	struct xfs_inobt_rec		inobt;
47 	struct xfs_rmap_rec		rmap;
48 	struct xfs_refcount_rec		refc;
49 };
50 
51 /*
52  * This nonsense is to make -wlint happy.
53  */
54 #define	XFS_LOOKUP_EQ	((xfs_lookup_t)XFS_LOOKUP_EQi)
55 #define	XFS_LOOKUP_LE	((xfs_lookup_t)XFS_LOOKUP_LEi)
56 #define	XFS_LOOKUP_GE	((xfs_lookup_t)XFS_LOOKUP_GEi)
57 
58 struct xfs_btree_ops;
59 uint32_t xfs_btree_magic(struct xfs_mount *mp, const struct xfs_btree_ops *ops);
60 
61 /*
62  * For logging record fields.
63  */
64 #define	XFS_BB_MAGIC		(1u << 0)
65 #define	XFS_BB_LEVEL		(1u << 1)
66 #define	XFS_BB_NUMRECS		(1u << 2)
67 #define	XFS_BB_LEFTSIB		(1u << 3)
68 #define	XFS_BB_RIGHTSIB		(1u << 4)
69 #define	XFS_BB_BLKNO		(1u << 5)
70 #define	XFS_BB_LSN		(1u << 6)
71 #define	XFS_BB_UUID		(1u << 7)
72 #define	XFS_BB_OWNER		(1u << 8)
73 #define	XFS_BB_NUM_BITS		5
74 #define	XFS_BB_ALL_BITS		((1u << XFS_BB_NUM_BITS) - 1)
75 #define	XFS_BB_NUM_BITS_CRC	9
76 #define	XFS_BB_ALL_BITS_CRC	((1u << XFS_BB_NUM_BITS_CRC) - 1)
77 
78 /*
79  * Generic stats interface
80  */
81 #define XFS_BTREE_STATS_INC(cur, stat)	\
82 	XFS_STATS_INC_OFF((cur)->bc_mp, \
83 		(cur)->bc_ops->statoff + __XBTS_ ## stat)
84 #define XFS_BTREE_STATS_ADD(cur, stat, val)	\
85 	XFS_STATS_ADD_OFF((cur)->bc_mp, \
86 		(cur)->bc_ops->statoff + __XBTS_ ## stat, val)
87 
88 enum xbtree_key_contig {
89 	XBTREE_KEY_GAP = 0,
90 	XBTREE_KEY_CONTIGUOUS,
91 	XBTREE_KEY_OVERLAP,
92 };
93 
94 /*
95  * Decide if these two numeric btree key fields are contiguous, overlapping,
96  * or if there's a gap between them.  @x should be the field from the high
97  * key and @y should be the field from the low key.
98  */
xbtree_key_contig(uint64_t x,uint64_t y)99 static inline enum xbtree_key_contig xbtree_key_contig(uint64_t x, uint64_t y)
100 {
101 	x++;
102 	if (x < y)
103 		return XBTREE_KEY_GAP;
104 	if (x == y)
105 		return XBTREE_KEY_CONTIGUOUS;
106 	return XBTREE_KEY_OVERLAP;
107 }
108 
109 #define XFS_BTREE_LONG_PTR_LEN		(sizeof(__be64))
110 #define XFS_BTREE_SHORT_PTR_LEN		(sizeof(__be32))
111 
112 enum xfs_btree_type {
113 	XFS_BTREE_TYPE_AG,
114 	XFS_BTREE_TYPE_INODE,
115 	XFS_BTREE_TYPE_MEM,
116 };
117 
118 struct xfs_btree_ops {
119 	const char		*name;
120 
121 	/* Type of btree - AG-rooted or inode-rooted */
122 	enum xfs_btree_type	type;
123 
124 	/* XFS_BTGEO_* flags that determine the geometry of the btree */
125 	unsigned int		geom_flags;
126 
127 	/* size of the key, pointer, and record structures */
128 	size_t			key_len;
129 	size_t			ptr_len;
130 	size_t			rec_len;
131 
132 	/* LRU refcount to set on each btree buffer created */
133 	unsigned int		lru_refs;
134 
135 	/* offset of btree stats array */
136 	unsigned int		statoff;
137 
138 	/* sick mask for health reporting (only for XFS_BTREE_TYPE_AG) */
139 	unsigned int		sick_mask;
140 
141 	/* cursor operations */
142 	struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *);
143 	void	(*update_cursor)(struct xfs_btree_cur *src,
144 				 struct xfs_btree_cur *dst);
145 
146 	/* update btree root pointer */
147 	void	(*set_root)(struct xfs_btree_cur *cur,
148 			    const union xfs_btree_ptr *nptr, int level_change);
149 
150 	/* block allocation / freeing */
151 	int	(*alloc_block)(struct xfs_btree_cur *cur,
152 			       const union xfs_btree_ptr *start_bno,
153 			       union xfs_btree_ptr *new_bno,
154 			       int *stat);
155 	int	(*free_block)(struct xfs_btree_cur *cur, struct xfs_buf *bp);
156 
157 	/* records in block/level */
158 	int	(*get_minrecs)(struct xfs_btree_cur *cur, int level);
159 	int	(*get_maxrecs)(struct xfs_btree_cur *cur, int level);
160 
161 	/* records on disk.  Matter for the root in inode case. */
162 	int	(*get_dmaxrecs)(struct xfs_btree_cur *cur, int level);
163 
164 	/* init values of btree structures */
165 	void	(*init_key_from_rec)(union xfs_btree_key *key,
166 				     const union xfs_btree_rec *rec);
167 	void	(*init_rec_from_cur)(struct xfs_btree_cur *cur,
168 				     union xfs_btree_rec *rec);
169 	void	(*init_ptr_from_cur)(struct xfs_btree_cur *cur,
170 				     union xfs_btree_ptr *ptr);
171 	void	(*init_high_key_from_rec)(union xfs_btree_key *key,
172 					  const union xfs_btree_rec *rec);
173 
174 	/* difference between key value and cursor value */
175 	int64_t (*key_diff)(struct xfs_btree_cur *cur,
176 			    const union xfs_btree_key *key);
177 
178 	/*
179 	 * Difference between key2 and key1 -- positive if key1 > key2,
180 	 * negative if key1 < key2, and zero if equal.  If the @mask parameter
181 	 * is non NULL, each key field to be used in the comparison must
182 	 * contain a nonzero value.
183 	 */
184 	int64_t (*diff_two_keys)(struct xfs_btree_cur *cur,
185 				 const union xfs_btree_key *key1,
186 				 const union xfs_btree_key *key2,
187 				 const union xfs_btree_key *mask);
188 
189 	const struct xfs_buf_ops	*buf_ops;
190 
191 	/* check that k1 is lower than k2 */
192 	int	(*keys_inorder)(struct xfs_btree_cur *cur,
193 				const union xfs_btree_key *k1,
194 				const union xfs_btree_key *k2);
195 
196 	/* check that r1 is lower than r2 */
197 	int	(*recs_inorder)(struct xfs_btree_cur *cur,
198 				const union xfs_btree_rec *r1,
199 				const union xfs_btree_rec *r2);
200 
201 	/*
202 	 * Are these two btree keys immediately adjacent?
203 	 *
204 	 * Given two btree keys @key1 and @key2, decide if it is impossible for
205 	 * there to be a third btree key K satisfying the relationship
206 	 * @key1 < K < @key2.  To determine if two btree records are
207 	 * immediately adjacent, @key1 should be the high key of the first
208 	 * record and @key2 should be the low key of the second record.
209 	 * If the @mask parameter is non NULL, each key field to be used in the
210 	 * comparison must contain a nonzero value.
211 	 */
212 	enum xbtree_key_contig (*keys_contiguous)(struct xfs_btree_cur *cur,
213 			       const union xfs_btree_key *key1,
214 			       const union xfs_btree_key *key2,
215 			       const union xfs_btree_key *mask);
216 };
217 
218 /* btree geometry flags */
219 #define XFS_BTGEO_OVERLAPPING		(1U << 0) /* overlapping intervals */
220 
221 
222 union xfs_btree_irec {
223 	struct xfs_alloc_rec_incore	a;
224 	struct xfs_bmbt_irec		b;
225 	struct xfs_inobt_rec_incore	i;
226 	struct xfs_rmap_irec		r;
227 	struct xfs_refcount_irec	rc;
228 };
229 
230 struct xfs_btree_level {
231 	/* buffer pointer */
232 	struct xfs_buf		*bp;
233 
234 	/* key/record number */
235 	uint16_t		ptr;
236 
237 	/* readahead info */
238 #define XFS_BTCUR_LEFTRA	(1 << 0) /* left sibling has been read-ahead */
239 #define XFS_BTCUR_RIGHTRA	(1 << 1) /* right sibling has been read-ahead */
240 	uint16_t		ra;
241 };
242 
243 /*
244  * Btree cursor structure.
245  * This collects all information needed by the btree code in one place.
246  */
247 struct xfs_btree_cur
248 {
249 	struct xfs_trans	*bc_tp;	/* transaction we're in, if any */
250 	struct xfs_mount	*bc_mp;	/* file system mount struct */
251 	const struct xfs_btree_ops *bc_ops;
252 	struct kmem_cache	*bc_cache; /* cursor cache */
253 	unsigned int		bc_flags; /* btree features - below */
254 	union xfs_btree_irec	bc_rec;	/* current insert/search record value */
255 	uint8_t			bc_nlevels; /* number of levels in the tree */
256 	uint8_t			bc_maxlevels; /* maximum levels for this btree type */
257 
258 	/* per-type information */
259 	union {
260 		struct {
261 			struct xfs_inode	*ip;
262 			short			forksize;
263 			char			whichfork;
264 			struct xbtree_ifakeroot	*ifake;	/* for staging cursor */
265 		} bc_ino;
266 		struct {
267 			struct xfs_perag	*pag;
268 			struct xfs_buf		*agbp;
269 			struct xbtree_afakeroot	*afake;	/* for staging cursor */
270 		} bc_ag;
271 		struct {
272 			struct xfbtree		*xfbtree;
273 			struct xfs_perag	*pag;
274 		} bc_mem;
275 	};
276 
277 	/* per-format private data */
278 	union {
279 		struct {
280 			int		allocated;
281 		} bc_bmap;	/* bmapbt */
282 		struct {
283 			unsigned int	nr_ops;		/* # record updates */
284 			unsigned int	shape_changes;	/* # of extent splits */
285 		} bc_refc;	/* refcountbt */
286 	};
287 
288 	/* Must be at the end of the struct! */
289 	struct xfs_btree_level	bc_levels[];
290 };
291 
292 /*
293  * Compute the size of a btree cursor that can handle a btree of a given
294  * height.  The bc_levels array handles node and leaf blocks, so its size
295  * is exactly nlevels.
296  */
297 static inline size_t
xfs_btree_cur_sizeof(unsigned int nlevels)298 xfs_btree_cur_sizeof(unsigned int nlevels)
299 {
300 	return struct_size_t(struct xfs_btree_cur, bc_levels, nlevels);
301 }
302 
303 /* cursor state flags */
304 /*
305  * The root of this btree is a fakeroot structure so that we can stage a btree
306  * rebuild without leaving it accessible via primary metadata.  The ops struct
307  * is dynamically allocated and must be freed when the cursor is deleted.
308  */
309 #define XFS_BTREE_STAGING		(1U << 0)
310 
311 /* We are converting a delalloc reservation (only for bmbt btrees) */
312 #define	XFS_BTREE_BMBT_WASDEL		(1U << 1)
313 
314 /* For extent swap, ignore owner check in verifier (only for bmbt btrees) */
315 #define	XFS_BTREE_BMBT_INVALID_OWNER	(1U << 2)
316 
317 /* Cursor is active (only for allocbt btrees) */
318 #define	XFS_BTREE_ALLOCBT_ACTIVE	(1U << 3)
319 
320 #define	XFS_BTREE_NOERROR	0
321 #define	XFS_BTREE_ERROR		1
322 
323 /*
324  * Convert from buffer to btree block header.
325  */
326 #define	XFS_BUF_TO_BLOCK(bp)	((struct xfs_btree_block *)((bp)->b_addr))
327 
328 xfs_failaddr_t __xfs_btree_check_block(struct xfs_btree_cur *cur,
329 		struct xfs_btree_block *block, int level, struct xfs_buf *bp);
330 int __xfs_btree_check_ptr(struct xfs_btree_cur *cur,
331 		const union xfs_btree_ptr *ptr, int index, int level);
332 
333 /*
334  * Check that block header is ok.
335  */
336 int
337 xfs_btree_check_block(
338 	struct xfs_btree_cur	*cur,	/* btree cursor */
339 	struct xfs_btree_block	*block,	/* generic btree block pointer */
340 	int			level,	/* level of the btree block */
341 	struct xfs_buf		*bp);	/* buffer containing block, if any */
342 
343 /*
344  * Delete the btree cursor.
345  */
346 void
347 xfs_btree_del_cursor(
348 	struct xfs_btree_cur	*cur,	/* btree cursor */
349 	int			error);	/* del because of error */
350 
351 /*
352  * Duplicate the btree cursor.
353  * Allocate a new one, copy the record, re-get the buffers.
354  */
355 int					/* error */
356 xfs_btree_dup_cursor(
357 	struct xfs_btree_cur		*cur,	/* input cursor */
358 	struct xfs_btree_cur		**ncur);/* output cursor */
359 
360 /*
361  * Compute first and last byte offsets for the fields given.
362  * Interprets the offsets table, which contains struct field offsets.
363  */
364 void
365 xfs_btree_offsets(
366 	uint32_t		fields,	/* bitmask of fields */
367 	const short		*offsets,/* table of field offsets */
368 	int			nbits,	/* number of bits to inspect */
369 	int			*first,	/* output: first byte offset */
370 	int			*last);	/* output: last byte offset */
371 
372 /*
373  * Initialise a new btree block header
374  */
375 void xfs_btree_init_buf(struct xfs_mount *mp, struct xfs_buf *bp,
376 		const struct xfs_btree_ops *ops, __u16 level, __u16 numrecs,
377 		__u64 owner);
378 void xfs_btree_init_block(struct xfs_mount *mp,
379 		struct xfs_btree_block *buf, const struct xfs_btree_ops *ops,
380 		__u16 level, __u16 numrecs, __u64 owner);
381 
382 /*
383  * Common btree core entry points.
384  */
385 int xfs_btree_increment(struct xfs_btree_cur *, int, int *);
386 int xfs_btree_decrement(struct xfs_btree_cur *, int, int *);
387 int xfs_btree_lookup(struct xfs_btree_cur *, xfs_lookup_t, int *);
388 int xfs_btree_update(struct xfs_btree_cur *, union xfs_btree_rec *);
389 int xfs_btree_new_iroot(struct xfs_btree_cur *, int *, int *);
390 int xfs_btree_insert(struct xfs_btree_cur *, int *);
391 int xfs_btree_delete(struct xfs_btree_cur *, int *);
392 int xfs_btree_get_rec(struct xfs_btree_cur *, union xfs_btree_rec **, int *);
393 int xfs_btree_change_owner(struct xfs_btree_cur *cur, uint64_t new_owner,
394 			   struct list_head *buffer_list);
395 
396 /*
397  * btree block CRC helpers
398  */
399 void xfs_btree_fsblock_calc_crc(struct xfs_buf *);
400 bool xfs_btree_fsblock_verify_crc(struct xfs_buf *);
401 void xfs_btree_agblock_calc_crc(struct xfs_buf *);
402 bool xfs_btree_agblock_verify_crc(struct xfs_buf *);
403 
404 /*
405  * Internal btree helpers also used by xfs_bmap.c.
406  */
407 void xfs_btree_log_block(struct xfs_btree_cur *, struct xfs_buf *, uint32_t);
408 void xfs_btree_log_recs(struct xfs_btree_cur *, struct xfs_buf *, int, int);
409 
410 /*
411  * Helpers.
412  */
xfs_btree_get_numrecs(const struct xfs_btree_block * block)413 static inline int xfs_btree_get_numrecs(const struct xfs_btree_block *block)
414 {
415 	return be16_to_cpu(block->bb_numrecs);
416 }
417 
xfs_btree_set_numrecs(struct xfs_btree_block * block,uint16_t numrecs)418 static inline void xfs_btree_set_numrecs(struct xfs_btree_block *block,
419 		uint16_t numrecs)
420 {
421 	block->bb_numrecs = cpu_to_be16(numrecs);
422 }
423 
xfs_btree_get_level(const struct xfs_btree_block * block)424 static inline int xfs_btree_get_level(const struct xfs_btree_block *block)
425 {
426 	return be16_to_cpu(block->bb_level);
427 }
428 
429 
430 /*
431  * Min and max functions for extlen, agblock, fileoff, and filblks types.
432  */
433 #define	XFS_EXTLEN_MIN(a,b)	min_t(xfs_extlen_t, (a), (b))
434 #define	XFS_EXTLEN_MAX(a,b)	max_t(xfs_extlen_t, (a), (b))
435 #define	XFS_AGBLOCK_MIN(a,b)	min_t(xfs_agblock_t, (a), (b))
436 #define	XFS_AGBLOCK_MAX(a,b)	max_t(xfs_agblock_t, (a), (b))
437 #define	XFS_FILEOFF_MIN(a,b)	min_t(xfs_fileoff_t, (a), (b))
438 #define	XFS_FILEOFF_MAX(a,b)	max_t(xfs_fileoff_t, (a), (b))
439 #define	XFS_FILBLKS_MIN(a,b)	min_t(xfs_filblks_t, (a), (b))
440 #define	XFS_FILBLKS_MAX(a,b)	max_t(xfs_filblks_t, (a), (b))
441 
442 xfs_failaddr_t xfs_btree_agblock_v5hdr_verify(struct xfs_buf *bp);
443 xfs_failaddr_t xfs_btree_agblock_verify(struct xfs_buf *bp,
444 		unsigned int max_recs);
445 xfs_failaddr_t xfs_btree_fsblock_v5hdr_verify(struct xfs_buf *bp,
446 		uint64_t owner);
447 xfs_failaddr_t xfs_btree_fsblock_verify(struct xfs_buf *bp,
448 		unsigned int max_recs);
449 xfs_failaddr_t xfs_btree_memblock_verify(struct xfs_buf *bp,
450 		unsigned int max_recs);
451 
452 unsigned int xfs_btree_compute_maxlevels(const unsigned int *limits,
453 		unsigned long long records);
454 unsigned long long xfs_btree_calc_size(const unsigned int *limits,
455 		unsigned long long records);
456 unsigned int xfs_btree_space_to_height(const unsigned int *limits,
457 		unsigned long long blocks);
458 
459 /*
460  * Return codes for the query range iterator function are 0 to continue
461  * iterating, and non-zero to stop iterating.  Any non-zero value will be
462  * passed up to the _query_range caller.  The special value -ECANCELED can be
463  * used to stop iteration, because _query_range never generates that error
464  * code on its own.
465  */
466 typedef int (*xfs_btree_query_range_fn)(struct xfs_btree_cur *cur,
467 		const union xfs_btree_rec *rec, void *priv);
468 
469 int xfs_btree_query_range(struct xfs_btree_cur *cur,
470 		const union xfs_btree_irec *low_rec,
471 		const union xfs_btree_irec *high_rec,
472 		xfs_btree_query_range_fn fn, void *priv);
473 int xfs_btree_query_all(struct xfs_btree_cur *cur, xfs_btree_query_range_fn fn,
474 		void *priv);
475 
476 typedef int (*xfs_btree_visit_blocks_fn)(struct xfs_btree_cur *cur, int level,
477 		void *data);
478 /* Visit record blocks. */
479 #define XFS_BTREE_VISIT_RECORDS		(1 << 0)
480 /* Visit leaf blocks. */
481 #define XFS_BTREE_VISIT_LEAVES		(1 << 1)
482 /* Visit all blocks. */
483 #define XFS_BTREE_VISIT_ALL		(XFS_BTREE_VISIT_RECORDS | \
484 					 XFS_BTREE_VISIT_LEAVES)
485 int xfs_btree_visit_blocks(struct xfs_btree_cur *cur,
486 		xfs_btree_visit_blocks_fn fn, unsigned int flags, void *data);
487 
488 int xfs_btree_count_blocks(struct xfs_btree_cur *cur, xfs_extlen_t *blocks);
489 
490 union xfs_btree_rec *xfs_btree_rec_addr(struct xfs_btree_cur *cur, int n,
491 		struct xfs_btree_block *block);
492 union xfs_btree_key *xfs_btree_key_addr(struct xfs_btree_cur *cur, int n,
493 		struct xfs_btree_block *block);
494 union xfs_btree_key *xfs_btree_high_key_addr(struct xfs_btree_cur *cur, int n,
495 		struct xfs_btree_block *block);
496 union xfs_btree_ptr *xfs_btree_ptr_addr(struct xfs_btree_cur *cur, int n,
497 		struct xfs_btree_block *block);
498 int xfs_btree_lookup_get_block(struct xfs_btree_cur *cur, int level,
499 		const union xfs_btree_ptr *pp, struct xfs_btree_block **blkp);
500 struct xfs_btree_block *xfs_btree_get_block(struct xfs_btree_cur *cur,
501 		int level, struct xfs_buf **bpp);
502 bool xfs_btree_ptr_is_null(struct xfs_btree_cur *cur,
503 		const union xfs_btree_ptr *ptr);
504 int64_t xfs_btree_diff_two_ptrs(struct xfs_btree_cur *cur,
505 				const union xfs_btree_ptr *a,
506 				const union xfs_btree_ptr *b);
507 void xfs_btree_get_sibling(struct xfs_btree_cur *cur,
508 			   struct xfs_btree_block *block,
509 			   union xfs_btree_ptr *ptr, int lr);
510 void xfs_btree_get_keys(struct xfs_btree_cur *cur,
511 		struct xfs_btree_block *block, union xfs_btree_key *key);
512 union xfs_btree_key *xfs_btree_high_key_from_key(struct xfs_btree_cur *cur,
513 		union xfs_btree_key *key);
514 typedef bool (*xfs_btree_key_gap_fn)(struct xfs_btree_cur *cur,
515 		const union xfs_btree_key *key1,
516 		const union xfs_btree_key *key2);
517 
518 int xfs_btree_has_records(struct xfs_btree_cur *cur,
519 		const union xfs_btree_irec *low,
520 		const union xfs_btree_irec *high,
521 		const union xfs_btree_key *mask,
522 		enum xbtree_recpacking *outcome);
523 
524 bool xfs_btree_has_more_records(struct xfs_btree_cur *cur);
525 struct xfs_ifork *xfs_btree_ifork_ptr(struct xfs_btree_cur *cur);
526 
527 /* Key comparison helpers */
528 static inline bool
xfs_btree_keycmp_lt(struct xfs_btree_cur * cur,const union xfs_btree_key * key1,const union xfs_btree_key * key2)529 xfs_btree_keycmp_lt(
530 	struct xfs_btree_cur		*cur,
531 	const union xfs_btree_key	*key1,
532 	const union xfs_btree_key	*key2)
533 {
534 	return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) < 0;
535 }
536 
537 static inline bool
xfs_btree_keycmp_gt(struct xfs_btree_cur * cur,const union xfs_btree_key * key1,const union xfs_btree_key * key2)538 xfs_btree_keycmp_gt(
539 	struct xfs_btree_cur		*cur,
540 	const union xfs_btree_key	*key1,
541 	const union xfs_btree_key	*key2)
542 {
543 	return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) > 0;
544 }
545 
546 static inline bool
xfs_btree_keycmp_eq(struct xfs_btree_cur * cur,const union xfs_btree_key * key1,const union xfs_btree_key * key2)547 xfs_btree_keycmp_eq(
548 	struct xfs_btree_cur		*cur,
549 	const union xfs_btree_key	*key1,
550 	const union xfs_btree_key	*key2)
551 {
552 	return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) == 0;
553 }
554 
555 static inline bool
xfs_btree_keycmp_le(struct xfs_btree_cur * cur,const union xfs_btree_key * key1,const union xfs_btree_key * key2)556 xfs_btree_keycmp_le(
557 	struct xfs_btree_cur		*cur,
558 	const union xfs_btree_key	*key1,
559 	const union xfs_btree_key	*key2)
560 {
561 	return !xfs_btree_keycmp_gt(cur, key1, key2);
562 }
563 
564 static inline bool
xfs_btree_keycmp_ge(struct xfs_btree_cur * cur,const union xfs_btree_key * key1,const union xfs_btree_key * key2)565 xfs_btree_keycmp_ge(
566 	struct xfs_btree_cur		*cur,
567 	const union xfs_btree_key	*key1,
568 	const union xfs_btree_key	*key2)
569 {
570 	return !xfs_btree_keycmp_lt(cur, key1, key2);
571 }
572 
573 static inline bool
xfs_btree_keycmp_ne(struct xfs_btree_cur * cur,const union xfs_btree_key * key1,const union xfs_btree_key * key2)574 xfs_btree_keycmp_ne(
575 	struct xfs_btree_cur		*cur,
576 	const union xfs_btree_key	*key1,
577 	const union xfs_btree_key	*key2)
578 {
579 	return !xfs_btree_keycmp_eq(cur, key1, key2);
580 }
581 
582 /* Masked key comparison helpers */
583 static inline bool
xfs_btree_masked_keycmp_lt(struct xfs_btree_cur * cur,const union xfs_btree_key * key1,const union xfs_btree_key * key2,const union xfs_btree_key * mask)584 xfs_btree_masked_keycmp_lt(
585 	struct xfs_btree_cur		*cur,
586 	const union xfs_btree_key	*key1,
587 	const union xfs_btree_key	*key2,
588 	const union xfs_btree_key	*mask)
589 {
590 	return cur->bc_ops->diff_two_keys(cur, key1, key2, mask) < 0;
591 }
592 
593 static inline bool
xfs_btree_masked_keycmp_gt(struct xfs_btree_cur * cur,const union xfs_btree_key * key1,const union xfs_btree_key * key2,const union xfs_btree_key * mask)594 xfs_btree_masked_keycmp_gt(
595 	struct xfs_btree_cur		*cur,
596 	const union xfs_btree_key	*key1,
597 	const union xfs_btree_key	*key2,
598 	const union xfs_btree_key	*mask)
599 {
600 	return cur->bc_ops->diff_two_keys(cur, key1, key2, mask) > 0;
601 }
602 
603 static inline bool
xfs_btree_masked_keycmp_ge(struct xfs_btree_cur * cur,const union xfs_btree_key * key1,const union xfs_btree_key * key2,const union xfs_btree_key * mask)604 xfs_btree_masked_keycmp_ge(
605 	struct xfs_btree_cur		*cur,
606 	const union xfs_btree_key	*key1,
607 	const union xfs_btree_key	*key2,
608 	const union xfs_btree_key	*mask)
609 {
610 	return !xfs_btree_masked_keycmp_lt(cur, key1, key2, mask);
611 }
612 
613 /* Does this cursor point to the last block in the given level? */
614 static inline bool
xfs_btree_islastblock(struct xfs_btree_cur * cur,int level)615 xfs_btree_islastblock(
616 	struct xfs_btree_cur	*cur,
617 	int			level)
618 {
619 	struct xfs_btree_block	*block;
620 	struct xfs_buf		*bp;
621 
622 	block = xfs_btree_get_block(cur, level, &bp);
623 
624 	if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
625 		return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK);
626 	return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
627 }
628 
629 void xfs_btree_set_ptr_null(struct xfs_btree_cur *cur,
630 		union xfs_btree_ptr *ptr);
631 int xfs_btree_get_buf_block(struct xfs_btree_cur *cur,
632 		const union xfs_btree_ptr *ptr, struct xfs_btree_block **block,
633 		struct xfs_buf **bpp);
634 int xfs_btree_read_buf_block(struct xfs_btree_cur *cur,
635 		const union xfs_btree_ptr *ptr, int flags,
636 		struct xfs_btree_block **block, struct xfs_buf **bpp);
637 void xfs_btree_set_sibling(struct xfs_btree_cur *cur,
638 		struct xfs_btree_block *block, const union xfs_btree_ptr *ptr,
639 		int lr);
640 void xfs_btree_init_block_cur(struct xfs_btree_cur *cur,
641 		struct xfs_buf *bp, int level, int numrecs);
642 void xfs_btree_copy_ptrs(struct xfs_btree_cur *cur,
643 		union xfs_btree_ptr *dst_ptr,
644 		const union xfs_btree_ptr *src_ptr, int numptrs);
645 void xfs_btree_copy_keys(struct xfs_btree_cur *cur,
646 		union xfs_btree_key *dst_key,
647 		const union xfs_btree_key *src_key, int numkeys);
648 void xfs_btree_init_ptr_from_cur(struct xfs_btree_cur *cur,
649 		union xfs_btree_ptr *ptr);
650 
651 static inline struct xfs_btree_cur *
xfs_btree_alloc_cursor(struct xfs_mount * mp,struct xfs_trans * tp,const struct xfs_btree_ops * ops,uint8_t maxlevels,struct kmem_cache * cache)652 xfs_btree_alloc_cursor(
653 	struct xfs_mount	*mp,
654 	struct xfs_trans	*tp,
655 	const struct xfs_btree_ops *ops,
656 	uint8_t			maxlevels,
657 	struct kmem_cache	*cache)
658 {
659 	struct xfs_btree_cur	*cur;
660 
661 	ASSERT(ops->ptr_len == XFS_BTREE_LONG_PTR_LEN ||
662 	       ops->ptr_len == XFS_BTREE_SHORT_PTR_LEN);
663 
664 	/* BMBT allocations can come through from non-transactional context. */
665 	cur = kmem_cache_zalloc(cache,
666 			GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
667 	cur->bc_ops = ops;
668 	cur->bc_tp = tp;
669 	cur->bc_mp = mp;
670 	cur->bc_maxlevels = maxlevels;
671 	cur->bc_cache = cache;
672 
673 	return cur;
674 }
675 
676 int __init xfs_btree_init_cur_caches(void);
677 void xfs_btree_destroy_cur_caches(void);
678 
679 int xfs_btree_goto_left_edge(struct xfs_btree_cur *cur);
680 
681 /* Does this level of the cursor point to the inode root (and not a block)? */
682 static inline bool
xfs_btree_at_iroot(const struct xfs_btree_cur * cur,int level)683 xfs_btree_at_iroot(
684 	const struct xfs_btree_cur	*cur,
685 	int				level)
686 {
687 	return cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
688 	       level == cur->bc_nlevels - 1;
689 }
690 
691 #endif	/* __XFS_BTREE_H__ */
692