1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #ifndef __XFS_BTREE_H__ 7 #define __XFS_BTREE_H__ 8 9 struct xfs_buf; 10 struct xfs_inode; 11 struct xfs_mount; 12 struct xfs_trans; 13 struct xfs_ifork; 14 struct xfs_perag; 15 16 /* 17 * Generic key, ptr and record wrapper structures. 18 * 19 * These are disk format structures, and are converted where necessary 20 * by the btree specific code that needs to interpret them. 21 */ 22 union xfs_btree_ptr { 23 __be32 s; /* short form ptr */ 24 __be64 l; /* long form ptr */ 25 }; 26 27 /* 28 * The in-core btree key. Overlapping btrees actually store two keys 29 * per pointer, so we reserve enough memory to hold both. The __*bigkey 30 * items should never be accessed directly. 31 */ 32 union xfs_btree_key { 33 struct xfs_bmbt_key bmbt; 34 xfs_bmdr_key_t bmbr; /* bmbt root block */ 35 xfs_alloc_key_t alloc; 36 struct xfs_inobt_key inobt; 37 struct xfs_rmap_key rmap; 38 struct xfs_rmap_key __rmap_bigkey[2]; 39 struct xfs_refcount_key refc; 40 }; 41 42 union xfs_btree_rec { 43 struct xfs_bmbt_rec bmbt; 44 xfs_bmdr_rec_t bmbr; /* bmbt root block */ 45 struct xfs_alloc_rec alloc; 46 struct xfs_inobt_rec inobt; 47 struct xfs_rmap_rec rmap; 48 struct xfs_refcount_rec refc; 49 }; 50 51 /* 52 * This nonsense is to make -wlint happy. 53 */ 54 #define XFS_LOOKUP_EQ ((xfs_lookup_t)XFS_LOOKUP_EQi) 55 #define XFS_LOOKUP_LE ((xfs_lookup_t)XFS_LOOKUP_LEi) 56 #define XFS_LOOKUP_GE ((xfs_lookup_t)XFS_LOOKUP_GEi) 57 58 struct xfs_btree_ops; 59 uint32_t xfs_btree_magic(struct xfs_mount *mp, const struct xfs_btree_ops *ops); 60 61 /* 62 * For logging record fields. 63 */ 64 #define XFS_BB_MAGIC (1u << 0) 65 #define XFS_BB_LEVEL (1u << 1) 66 #define XFS_BB_NUMRECS (1u << 2) 67 #define XFS_BB_LEFTSIB (1u << 3) 68 #define XFS_BB_RIGHTSIB (1u << 4) 69 #define XFS_BB_BLKNO (1u << 5) 70 #define XFS_BB_LSN (1u << 6) 71 #define XFS_BB_UUID (1u << 7) 72 #define XFS_BB_OWNER (1u << 8) 73 #define XFS_BB_NUM_BITS 5 74 #define XFS_BB_ALL_BITS ((1u << XFS_BB_NUM_BITS) - 1) 75 #define XFS_BB_NUM_BITS_CRC 9 76 #define XFS_BB_ALL_BITS_CRC ((1u << XFS_BB_NUM_BITS_CRC) - 1) 77 78 /* 79 * Generic stats interface 80 */ 81 #define XFS_BTREE_STATS_INC(cur, stat) \ 82 XFS_STATS_INC_OFF((cur)->bc_mp, \ 83 (cur)->bc_ops->statoff + __XBTS_ ## stat) 84 #define XFS_BTREE_STATS_ADD(cur, stat, val) \ 85 XFS_STATS_ADD_OFF((cur)->bc_mp, \ 86 (cur)->bc_ops->statoff + __XBTS_ ## stat, val) 87 88 enum xbtree_key_contig { 89 XBTREE_KEY_GAP = 0, 90 XBTREE_KEY_CONTIGUOUS, 91 XBTREE_KEY_OVERLAP, 92 }; 93 94 /* 95 * Decide if these two numeric btree key fields are contiguous, overlapping, 96 * or if there's a gap between them. @x should be the field from the high 97 * key and @y should be the field from the low key. 98 */ 99 static inline enum xbtree_key_contig xbtree_key_contig(uint64_t x, uint64_t y) 100 { 101 x++; 102 if (x < y) 103 return XBTREE_KEY_GAP; 104 if (x == y) 105 return XBTREE_KEY_CONTIGUOUS; 106 return XBTREE_KEY_OVERLAP; 107 } 108 109 #define XFS_BTREE_LONG_PTR_LEN (sizeof(__be64)) 110 #define XFS_BTREE_SHORT_PTR_LEN (sizeof(__be32)) 111 112 enum xfs_btree_type { 113 XFS_BTREE_TYPE_AG, 114 XFS_BTREE_TYPE_INODE, 115 XFS_BTREE_TYPE_MEM, 116 }; 117 118 struct xfs_btree_ops { 119 const char *name; 120 121 /* Type of btree - AG-rooted or inode-rooted */ 122 enum xfs_btree_type type; 123 124 /* XFS_BTGEO_* flags that determine the geometry of the btree */ 125 unsigned int geom_flags; 126 127 /* size of the key, pointer, and record structures */ 128 size_t key_len; 129 size_t ptr_len; 130 size_t rec_len; 131 132 /* LRU refcount to set on each btree buffer created */ 133 unsigned int lru_refs; 134 135 /* offset of btree stats array */ 136 unsigned int statoff; 137 138 /* sick mask for health reporting (not for bmap btrees) */ 139 unsigned int sick_mask; 140 141 /* cursor operations */ 142 struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *); 143 void (*update_cursor)(struct xfs_btree_cur *src, 144 struct xfs_btree_cur *dst); 145 146 /* update btree root pointer */ 147 void (*set_root)(struct xfs_btree_cur *cur, 148 const union xfs_btree_ptr *nptr, int level_change); 149 150 /* block allocation / freeing */ 151 int (*alloc_block)(struct xfs_btree_cur *cur, 152 const union xfs_btree_ptr *start_bno, 153 union xfs_btree_ptr *new_bno, 154 int *stat); 155 int (*free_block)(struct xfs_btree_cur *cur, struct xfs_buf *bp); 156 157 /* records in block/level */ 158 int (*get_minrecs)(struct xfs_btree_cur *cur, int level); 159 int (*get_maxrecs)(struct xfs_btree_cur *cur, int level); 160 161 /* records on disk. Matter for the root in inode case. */ 162 int (*get_dmaxrecs)(struct xfs_btree_cur *cur, int level); 163 164 /* init values of btree structures */ 165 void (*init_key_from_rec)(union xfs_btree_key *key, 166 const union xfs_btree_rec *rec); 167 void (*init_rec_from_cur)(struct xfs_btree_cur *cur, 168 union xfs_btree_rec *rec); 169 void (*init_ptr_from_cur)(struct xfs_btree_cur *cur, 170 union xfs_btree_ptr *ptr); 171 void (*init_high_key_from_rec)(union xfs_btree_key *key, 172 const union xfs_btree_rec *rec); 173 174 /* 175 * Compare key value and cursor value -- positive if key > cur, 176 * negative if key < cur, and zero if equal. 177 */ 178 int (*cmp_key_with_cur)(struct xfs_btree_cur *cur, 179 const union xfs_btree_key *key); 180 181 /* 182 * Compare key1 and key2 -- positive if key1 > key2, negative if 183 * key1 < key2, and zero if equal. If the @mask parameter is non NULL, 184 * each key field to be used in the comparison must contain a nonzero 185 * value. 186 */ 187 int (*cmp_two_keys)(struct xfs_btree_cur *cur, 188 const union xfs_btree_key *key1, 189 const union xfs_btree_key *key2, 190 const union xfs_btree_key *mask); 191 192 const struct xfs_buf_ops *buf_ops; 193 194 /* check that k1 is lower than k2 */ 195 int (*keys_inorder)(struct xfs_btree_cur *cur, 196 const union xfs_btree_key *k1, 197 const union xfs_btree_key *k2); 198 199 /* check that r1 is lower than r2 */ 200 int (*recs_inorder)(struct xfs_btree_cur *cur, 201 const union xfs_btree_rec *r1, 202 const union xfs_btree_rec *r2); 203 204 /* 205 * Are these two btree keys immediately adjacent? 206 * 207 * Given two btree keys @key1 and @key2, decide if it is impossible for 208 * there to be a third btree key K satisfying the relationship 209 * @key1 < K < @key2. To determine if two btree records are 210 * immediately adjacent, @key1 should be the high key of the first 211 * record and @key2 should be the low key of the second record. 212 * If the @mask parameter is non NULL, each key field to be used in the 213 * comparison must contain a nonzero value. 214 */ 215 enum xbtree_key_contig (*keys_contiguous)(struct xfs_btree_cur *cur, 216 const union xfs_btree_key *key1, 217 const union xfs_btree_key *key2, 218 const union xfs_btree_key *mask); 219 220 /* 221 * Reallocate the space for if_broot to fit the number of records. 222 * Move the records and pointers in if_broot to fit the new size. When 223 * shrinking this will eliminate holes between the records and pointers 224 * created by the caller. When growing this will create holes to be 225 * filled in by the caller. 226 * 227 * The caller must not request to add more records than would fit in 228 * the on-disk inode root. If the if_broot is currently NULL, then if 229 * we are adding records, one will be allocated. The caller must also 230 * not request that the number of records go below zero, although it 231 * can go to zero. 232 */ 233 struct xfs_btree_block *(*broot_realloc)(struct xfs_btree_cur *cur, 234 unsigned int new_numrecs); 235 }; 236 237 /* btree geometry flags */ 238 #define XFS_BTGEO_OVERLAPPING (1U << 0) /* overlapping intervals */ 239 #define XFS_BTGEO_IROOT_RECORDS (1U << 1) /* iroot can store records */ 240 241 union xfs_btree_irec { 242 struct xfs_alloc_rec_incore a; 243 struct xfs_bmbt_irec b; 244 struct xfs_inobt_rec_incore i; 245 struct xfs_rmap_irec r; 246 struct xfs_refcount_irec rc; 247 }; 248 249 struct xfs_btree_level { 250 /* buffer pointer */ 251 struct xfs_buf *bp; 252 253 /* key/record number */ 254 uint16_t ptr; 255 256 /* readahead info */ 257 #define XFS_BTCUR_LEFTRA (1 << 0) /* left sibling has been read-ahead */ 258 #define XFS_BTCUR_RIGHTRA (1 << 1) /* right sibling has been read-ahead */ 259 uint16_t ra; 260 }; 261 262 /* 263 * Btree cursor structure. 264 * This collects all information needed by the btree code in one place. 265 */ 266 struct xfs_btree_cur 267 { 268 struct xfs_trans *bc_tp; /* transaction we're in, if any */ 269 struct xfs_mount *bc_mp; /* file system mount struct */ 270 const struct xfs_btree_ops *bc_ops; 271 struct kmem_cache *bc_cache; /* cursor cache */ 272 unsigned int bc_flags; /* btree features - below */ 273 union xfs_btree_irec bc_rec; /* current insert/search record value */ 274 uint8_t bc_nlevels; /* number of levels in the tree */ 275 uint8_t bc_maxlevels; /* maximum levels for this btree type */ 276 struct xfs_group *bc_group; 277 278 /* per-type information */ 279 union { 280 struct { 281 struct xfs_inode *ip; 282 short forksize; 283 char whichfork; 284 struct xbtree_ifakeroot *ifake; /* for staging cursor */ 285 } bc_ino; 286 struct { 287 struct xfs_buf *agbp; 288 struct xbtree_afakeroot *afake; /* for staging cursor */ 289 } bc_ag; 290 struct { 291 struct xfbtree *xfbtree; 292 } bc_mem; 293 }; 294 295 /* per-format private data */ 296 union { 297 struct { 298 int allocated; 299 } bc_bmap; /* bmapbt */ 300 struct { 301 unsigned int nr_ops; /* # record updates */ 302 unsigned int shape_changes; /* # of extent splits */ 303 } bc_refc; /* refcountbt/rtrefcountbt */ 304 }; 305 306 /* Must be at the end of the struct! */ 307 struct xfs_btree_level bc_levels[]; 308 }; 309 310 /* 311 * Compute the size of a btree cursor that can handle a btree of a given 312 * height. The bc_levels array handles node and leaf blocks, so its size 313 * is exactly nlevels. 314 */ 315 static inline size_t 316 xfs_btree_cur_sizeof(unsigned int nlevels) 317 { 318 return struct_size_t(struct xfs_btree_cur, bc_levels, nlevels); 319 } 320 321 /* cursor state flags */ 322 /* 323 * The root of this btree is a fakeroot structure so that we can stage a btree 324 * rebuild without leaving it accessible via primary metadata. The ops struct 325 * is dynamically allocated and must be freed when the cursor is deleted. 326 */ 327 #define XFS_BTREE_STAGING (1U << 0) 328 329 /* We are converting a delalloc reservation (only for bmbt btrees) */ 330 #define XFS_BTREE_BMBT_WASDEL (1U << 1) 331 332 /* For extent swap, ignore owner check in verifier (only for bmbt btrees) */ 333 #define XFS_BTREE_BMBT_INVALID_OWNER (1U << 2) 334 335 /* Cursor is active (only for allocbt btrees) */ 336 #define XFS_BTREE_ALLOCBT_ACTIVE (1U << 3) 337 338 #define XFS_BTREE_NOERROR 0 339 #define XFS_BTREE_ERROR 1 340 341 /* 342 * Convert from buffer to btree block header. 343 */ 344 #define XFS_BUF_TO_BLOCK(bp) ((struct xfs_btree_block *)((bp)->b_addr)) 345 346 xfs_failaddr_t __xfs_btree_check_block(struct xfs_btree_cur *cur, 347 struct xfs_btree_block *block, int level, struct xfs_buf *bp); 348 int __xfs_btree_check_ptr(struct xfs_btree_cur *cur, 349 const union xfs_btree_ptr *ptr, int index, int level); 350 351 /* 352 * Check that block header is ok. 353 */ 354 int 355 xfs_btree_check_block( 356 struct xfs_btree_cur *cur, /* btree cursor */ 357 struct xfs_btree_block *block, /* generic btree block pointer */ 358 int level, /* level of the btree block */ 359 struct xfs_buf *bp); /* buffer containing block, if any */ 360 361 /* 362 * Delete the btree cursor. 363 */ 364 void 365 xfs_btree_del_cursor( 366 struct xfs_btree_cur *cur, /* btree cursor */ 367 int error); /* del because of error */ 368 369 /* 370 * Duplicate the btree cursor. 371 * Allocate a new one, copy the record, re-get the buffers. 372 */ 373 int /* error */ 374 xfs_btree_dup_cursor( 375 struct xfs_btree_cur *cur, /* input cursor */ 376 struct xfs_btree_cur **ncur);/* output cursor */ 377 378 /* 379 * Compute first and last byte offsets for the fields given. 380 * Interprets the offsets table, which contains struct field offsets. 381 */ 382 void 383 xfs_btree_offsets( 384 uint32_t fields, /* bitmask of fields */ 385 const short *offsets,/* table of field offsets */ 386 int nbits, /* number of bits to inspect */ 387 int *first, /* output: first byte offset */ 388 int *last); /* output: last byte offset */ 389 390 /* 391 * Initialise a new btree block header 392 */ 393 void xfs_btree_init_buf(struct xfs_mount *mp, struct xfs_buf *bp, 394 const struct xfs_btree_ops *ops, __u16 level, __u16 numrecs, 395 __u64 owner); 396 void xfs_btree_init_block(struct xfs_mount *mp, 397 struct xfs_btree_block *buf, const struct xfs_btree_ops *ops, 398 __u16 level, __u16 numrecs, __u64 owner); 399 400 /* 401 * Common btree core entry points. 402 */ 403 int xfs_btree_increment(struct xfs_btree_cur *, int, int *); 404 int xfs_btree_decrement(struct xfs_btree_cur *, int, int *); 405 int xfs_btree_lookup(struct xfs_btree_cur *, xfs_lookup_t, int *); 406 int xfs_btree_update(struct xfs_btree_cur *, union xfs_btree_rec *); 407 int xfs_btree_new_iroot(struct xfs_btree_cur *, int *, int *); 408 int xfs_btree_insert(struct xfs_btree_cur *, int *); 409 int xfs_btree_delete(struct xfs_btree_cur *, int *); 410 int xfs_btree_get_rec(struct xfs_btree_cur *, union xfs_btree_rec **, int *); 411 int xfs_btree_change_owner(struct xfs_btree_cur *cur, uint64_t new_owner, 412 struct list_head *buffer_list); 413 414 /* 415 * btree block CRC helpers 416 */ 417 void xfs_btree_fsblock_calc_crc(struct xfs_buf *); 418 bool xfs_btree_fsblock_verify_crc(struct xfs_buf *); 419 void xfs_btree_agblock_calc_crc(struct xfs_buf *); 420 bool xfs_btree_agblock_verify_crc(struct xfs_buf *); 421 422 /* 423 * Internal btree helpers also used by xfs_bmap.c. 424 */ 425 void xfs_btree_log_block(struct xfs_btree_cur *, struct xfs_buf *, uint32_t); 426 void xfs_btree_log_recs(struct xfs_btree_cur *, struct xfs_buf *, int, int); 427 428 /* 429 * Helpers. 430 */ 431 static inline int xfs_btree_get_numrecs(const struct xfs_btree_block *block) 432 { 433 return be16_to_cpu(block->bb_numrecs); 434 } 435 436 static inline void xfs_btree_set_numrecs(struct xfs_btree_block *block, 437 uint16_t numrecs) 438 { 439 block->bb_numrecs = cpu_to_be16(numrecs); 440 } 441 442 static inline int xfs_btree_get_level(const struct xfs_btree_block *block) 443 { 444 return be16_to_cpu(block->bb_level); 445 } 446 447 448 /* 449 * Min and max functions for extlen, agblock, fileoff, and filblks types. 450 */ 451 #define XFS_EXTLEN_MIN(a,b) min_t(xfs_extlen_t, (a), (b)) 452 #define XFS_EXTLEN_MAX(a,b) max_t(xfs_extlen_t, (a), (b)) 453 #define XFS_AGBLOCK_MIN(a,b) min_t(xfs_agblock_t, (a), (b)) 454 #define XFS_AGBLOCK_MAX(a,b) max_t(xfs_agblock_t, (a), (b)) 455 #define XFS_FILEOFF_MIN(a,b) min_t(xfs_fileoff_t, (a), (b)) 456 #define XFS_FILEOFF_MAX(a,b) max_t(xfs_fileoff_t, (a), (b)) 457 #define XFS_FILBLKS_MIN(a,b) min_t(xfs_filblks_t, (a), (b)) 458 #define XFS_FILBLKS_MAX(a,b) max_t(xfs_filblks_t, (a), (b)) 459 460 xfs_failaddr_t xfs_btree_agblock_v5hdr_verify(struct xfs_buf *bp); 461 xfs_failaddr_t xfs_btree_agblock_verify(struct xfs_buf *bp, 462 unsigned int max_recs); 463 xfs_failaddr_t xfs_btree_fsblock_v5hdr_verify(struct xfs_buf *bp, 464 uint64_t owner); 465 xfs_failaddr_t xfs_btree_fsblock_verify(struct xfs_buf *bp, 466 unsigned int max_recs); 467 xfs_failaddr_t xfs_btree_memblock_verify(struct xfs_buf *bp, 468 unsigned int max_recs); 469 470 unsigned int xfs_btree_compute_maxlevels(const unsigned int *limits, 471 unsigned long long records); 472 unsigned long long xfs_btree_calc_size(const unsigned int *limits, 473 unsigned long long records); 474 unsigned int xfs_btree_space_to_height(const unsigned int *limits, 475 unsigned long long blocks); 476 477 /* 478 * Return codes for the query range iterator function are 0 to continue 479 * iterating, and non-zero to stop iterating. Any non-zero value will be 480 * passed up to the _query_range caller. The special value -ECANCELED can be 481 * used to stop iteration, because _query_range never generates that error 482 * code on its own. 483 */ 484 typedef int (*xfs_btree_query_range_fn)(struct xfs_btree_cur *cur, 485 const union xfs_btree_rec *rec, void *priv); 486 487 int xfs_btree_query_range(struct xfs_btree_cur *cur, 488 const union xfs_btree_irec *low_rec, 489 const union xfs_btree_irec *high_rec, 490 xfs_btree_query_range_fn fn, void *priv); 491 int xfs_btree_query_all(struct xfs_btree_cur *cur, xfs_btree_query_range_fn fn, 492 void *priv); 493 494 typedef int (*xfs_btree_visit_blocks_fn)(struct xfs_btree_cur *cur, int level, 495 void *data); 496 /* Visit record blocks. */ 497 #define XFS_BTREE_VISIT_RECORDS (1 << 0) 498 /* Visit leaf blocks. */ 499 #define XFS_BTREE_VISIT_LEAVES (1 << 1) 500 /* Visit all blocks. */ 501 #define XFS_BTREE_VISIT_ALL (XFS_BTREE_VISIT_RECORDS | \ 502 XFS_BTREE_VISIT_LEAVES) 503 int xfs_btree_visit_blocks(struct xfs_btree_cur *cur, 504 xfs_btree_visit_blocks_fn fn, unsigned int flags, void *data); 505 506 int xfs_btree_count_blocks(struct xfs_btree_cur *cur, xfs_filblks_t *blocks); 507 508 union xfs_btree_rec *xfs_btree_rec_addr(struct xfs_btree_cur *cur, int n, 509 struct xfs_btree_block *block); 510 union xfs_btree_key *xfs_btree_key_addr(struct xfs_btree_cur *cur, int n, 511 struct xfs_btree_block *block); 512 union xfs_btree_key *xfs_btree_high_key_addr(struct xfs_btree_cur *cur, int n, 513 struct xfs_btree_block *block); 514 union xfs_btree_ptr *xfs_btree_ptr_addr(struct xfs_btree_cur *cur, int n, 515 struct xfs_btree_block *block); 516 int xfs_btree_lookup_get_block(struct xfs_btree_cur *cur, int level, 517 const union xfs_btree_ptr *pp, struct xfs_btree_block **blkp); 518 struct xfs_btree_block *xfs_btree_get_block(struct xfs_btree_cur *cur, 519 int level, struct xfs_buf **bpp); 520 bool xfs_btree_ptr_is_null(struct xfs_btree_cur *cur, 521 const union xfs_btree_ptr *ptr); 522 int xfs_btree_cmp_two_ptrs(struct xfs_btree_cur *cur, 523 const union xfs_btree_ptr *a, 524 const union xfs_btree_ptr *b); 525 void xfs_btree_get_sibling(struct xfs_btree_cur *cur, 526 struct xfs_btree_block *block, 527 union xfs_btree_ptr *ptr, int lr); 528 void xfs_btree_get_keys(struct xfs_btree_cur *cur, 529 struct xfs_btree_block *block, union xfs_btree_key *key); 530 union xfs_btree_key *xfs_btree_high_key_from_key(struct xfs_btree_cur *cur, 531 union xfs_btree_key *key); 532 typedef bool (*xfs_btree_key_gap_fn)(struct xfs_btree_cur *cur, 533 const union xfs_btree_key *key1, 534 const union xfs_btree_key *key2); 535 536 int xfs_btree_has_records(struct xfs_btree_cur *cur, 537 const union xfs_btree_irec *low, 538 const union xfs_btree_irec *high, 539 const union xfs_btree_key *mask, 540 enum xbtree_recpacking *outcome); 541 542 bool xfs_btree_has_more_records(struct xfs_btree_cur *cur); 543 struct xfs_ifork *xfs_btree_ifork_ptr(struct xfs_btree_cur *cur); 544 545 /* Key comparison helpers */ 546 static inline bool 547 xfs_btree_keycmp_lt( 548 struct xfs_btree_cur *cur, 549 const union xfs_btree_key *key1, 550 const union xfs_btree_key *key2) 551 { 552 return cur->bc_ops->cmp_two_keys(cur, key1, key2, NULL) < 0; 553 } 554 555 static inline bool 556 xfs_btree_keycmp_gt( 557 struct xfs_btree_cur *cur, 558 const union xfs_btree_key *key1, 559 const union xfs_btree_key *key2) 560 { 561 return cur->bc_ops->cmp_two_keys(cur, key1, key2, NULL) > 0; 562 } 563 564 static inline bool 565 xfs_btree_keycmp_eq( 566 struct xfs_btree_cur *cur, 567 const union xfs_btree_key *key1, 568 const union xfs_btree_key *key2) 569 { 570 return cur->bc_ops->cmp_two_keys(cur, key1, key2, NULL) == 0; 571 } 572 573 static inline bool 574 xfs_btree_keycmp_le( 575 struct xfs_btree_cur *cur, 576 const union xfs_btree_key *key1, 577 const union xfs_btree_key *key2) 578 { 579 return !xfs_btree_keycmp_gt(cur, key1, key2); 580 } 581 582 static inline bool 583 xfs_btree_keycmp_ge( 584 struct xfs_btree_cur *cur, 585 const union xfs_btree_key *key1, 586 const union xfs_btree_key *key2) 587 { 588 return !xfs_btree_keycmp_lt(cur, key1, key2); 589 } 590 591 static inline bool 592 xfs_btree_keycmp_ne( 593 struct xfs_btree_cur *cur, 594 const union xfs_btree_key *key1, 595 const union xfs_btree_key *key2) 596 { 597 return !xfs_btree_keycmp_eq(cur, key1, key2); 598 } 599 600 /* Masked key comparison helpers */ 601 static inline bool 602 xfs_btree_masked_keycmp_lt( 603 struct xfs_btree_cur *cur, 604 const union xfs_btree_key *key1, 605 const union xfs_btree_key *key2, 606 const union xfs_btree_key *mask) 607 { 608 return cur->bc_ops->cmp_two_keys(cur, key1, key2, mask) < 0; 609 } 610 611 static inline bool 612 xfs_btree_masked_keycmp_gt( 613 struct xfs_btree_cur *cur, 614 const union xfs_btree_key *key1, 615 const union xfs_btree_key *key2, 616 const union xfs_btree_key *mask) 617 { 618 return cur->bc_ops->cmp_two_keys(cur, key1, key2, mask) > 0; 619 } 620 621 static inline bool 622 xfs_btree_masked_keycmp_ge( 623 struct xfs_btree_cur *cur, 624 const union xfs_btree_key *key1, 625 const union xfs_btree_key *key2, 626 const union xfs_btree_key *mask) 627 { 628 return !xfs_btree_masked_keycmp_lt(cur, key1, key2, mask); 629 } 630 631 /* Does this cursor point to the last block in the given level? */ 632 static inline bool 633 xfs_btree_islastblock( 634 struct xfs_btree_cur *cur, 635 int level) 636 { 637 struct xfs_btree_block *block; 638 struct xfs_buf *bp; 639 640 block = xfs_btree_get_block(cur, level, &bp); 641 642 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) 643 return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK); 644 return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK); 645 } 646 647 void xfs_btree_set_ptr_null(struct xfs_btree_cur *cur, 648 union xfs_btree_ptr *ptr); 649 int xfs_btree_get_buf_block(struct xfs_btree_cur *cur, 650 const union xfs_btree_ptr *ptr, struct xfs_btree_block **block, 651 struct xfs_buf **bpp); 652 int xfs_btree_read_buf_block(struct xfs_btree_cur *cur, 653 const union xfs_btree_ptr *ptr, int flags, 654 struct xfs_btree_block **block, struct xfs_buf **bpp); 655 void xfs_btree_set_sibling(struct xfs_btree_cur *cur, 656 struct xfs_btree_block *block, const union xfs_btree_ptr *ptr, 657 int lr); 658 void xfs_btree_init_block_cur(struct xfs_btree_cur *cur, 659 struct xfs_buf *bp, int level, int numrecs); 660 void xfs_btree_copy_ptrs(struct xfs_btree_cur *cur, 661 union xfs_btree_ptr *dst_ptr, 662 const union xfs_btree_ptr *src_ptr, int numptrs); 663 void xfs_btree_copy_keys(struct xfs_btree_cur *cur, 664 union xfs_btree_key *dst_key, 665 const union xfs_btree_key *src_key, int numkeys); 666 void xfs_btree_init_ptr_from_cur(struct xfs_btree_cur *cur, 667 union xfs_btree_ptr *ptr); 668 669 static inline struct xfs_btree_cur * 670 xfs_btree_alloc_cursor( 671 struct xfs_mount *mp, 672 struct xfs_trans *tp, 673 const struct xfs_btree_ops *ops, 674 uint8_t maxlevels, 675 struct kmem_cache *cache) 676 { 677 struct xfs_btree_cur *cur; 678 679 ASSERT(ops->ptr_len == XFS_BTREE_LONG_PTR_LEN || 680 ops->ptr_len == XFS_BTREE_SHORT_PTR_LEN); 681 682 /* BMBT allocations can come through from non-transactional context. */ 683 cur = kmem_cache_zalloc(cache, 684 GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); 685 cur->bc_ops = ops; 686 cur->bc_tp = tp; 687 cur->bc_mp = mp; 688 cur->bc_maxlevels = maxlevels; 689 cur->bc_cache = cache; 690 691 return cur; 692 } 693 694 int __init xfs_btree_init_cur_caches(void); 695 void xfs_btree_destroy_cur_caches(void); 696 697 int xfs_btree_goto_left_edge(struct xfs_btree_cur *cur); 698 699 /* Does this level of the cursor point to the inode root (and not a block)? */ 700 static inline bool 701 xfs_btree_at_iroot( 702 const struct xfs_btree_cur *cur, 703 int level) 704 { 705 return cur->bc_ops->type == XFS_BTREE_TYPE_INODE && 706 level == cur->bc_nlevels - 1; 707 } 708 709 int xfs_btree_alloc_metafile_block(struct xfs_btree_cur *cur, 710 const union xfs_btree_ptr *start, union xfs_btree_ptr *newp, 711 int *stat); 712 int xfs_btree_free_metafile_block(struct xfs_btree_cur *cur, 713 struct xfs_buf *bp); 714 715 #endif /* __XFS_BTREE_H__ */ 716