1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_BKEY_H 3 #define _BCACHEFS_BKEY_H 4 5 #include <linux/bug.h> 6 #include "bcachefs_format.h" 7 #include "bkey_types.h" 8 #include "btree_types.h" 9 #include "util.h" 10 #include "vstructs.h" 11 12 enum bch_validate_flags { 13 BCH_VALIDATE_write = (1U << 0), 14 BCH_VALIDATE_commit = (1U << 1), 15 BCH_VALIDATE_journal = (1U << 2), 16 }; 17 18 #if 0 19 20 /* 21 * compiled unpack functions are disabled, pending a new interface for 22 * dynamically allocating executable memory: 23 */ 24 25 #ifdef CONFIG_X86_64 26 #define HAVE_BCACHEFS_COMPILED_UNPACK 1 27 #endif 28 #endif 29 30 void bch2_bkey_packed_to_binary_text(struct printbuf *, 31 const struct bkey_format *, 32 const struct bkey_packed *); 33 34 enum bkey_lr_packed { 35 BKEY_PACKED_BOTH, 36 BKEY_PACKED_RIGHT, 37 BKEY_PACKED_LEFT, 38 BKEY_PACKED_NONE, 39 }; 40 41 #define bkey_lr_packed(_l, _r) \ 42 ((_l)->format + ((_r)->format << 1)) 43 44 static inline void bkey_p_copy(struct bkey_packed *dst, const struct bkey_packed *src) 45 { 46 memcpy_u64s_small(dst, src, src->u64s); 47 } 48 49 static inline void bkey_copy(struct bkey_i *dst, const struct bkey_i *src) 50 { 51 memcpy_u64s_small(dst, src, src->k.u64s); 52 } 53 54 struct btree; 55 56 __pure 57 unsigned bch2_bkey_greatest_differing_bit(const struct btree *, 58 const struct bkey_packed *, 59 const struct bkey_packed *); 60 __pure 61 unsigned bch2_bkey_ffs(const struct btree *, const struct bkey_packed *); 62 63 __pure 64 int __bch2_bkey_cmp_packed_format_checked(const struct bkey_packed *, 65 const struct bkey_packed *, 66 const struct btree *); 67 68 __pure 69 int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *, 70 const struct bkey_packed *, 71 const struct bpos *); 72 73 __pure 74 int bch2_bkey_cmp_packed(const struct btree *, 75 const struct bkey_packed *, 76 const struct bkey_packed *); 77 78 __pure 79 int __bch2_bkey_cmp_left_packed(const struct btree *, 80 const struct bkey_packed *, 81 const struct bpos *); 82 83 static inline __pure 84 int bkey_cmp_left_packed(const struct btree *b, 85 const struct bkey_packed *l, const struct bpos *r) 86 { 87 return __bch2_bkey_cmp_left_packed(b, l, r); 88 } 89 90 /* 91 * The compiler generates better code when we pass bpos by ref, but it's often 92 * enough terribly convenient to pass it by val... as much as I hate c++, const 93 * ref would be nice here: 94 */ 95 __pure __flatten 96 static inline int bkey_cmp_left_packed_byval(const struct btree *b, 97 const struct bkey_packed *l, 98 struct bpos r) 99 { 100 return bkey_cmp_left_packed(b, l, &r); 101 } 102 103 static __always_inline bool bpos_eq(struct bpos l, struct bpos r) 104 { 105 return !((l.inode ^ r.inode) | 106 (l.offset ^ r.offset) | 107 (l.snapshot ^ r.snapshot)); 108 } 109 110 static __always_inline bool bpos_lt(struct bpos l, struct bpos r) 111 { 112 return l.inode != r.inode ? l.inode < r.inode : 113 l.offset != r.offset ? l.offset < r.offset : 114 l.snapshot != r.snapshot ? l.snapshot < r.snapshot : false; 115 } 116 117 static __always_inline bool bpos_le(struct bpos l, struct bpos r) 118 { 119 return l.inode != r.inode ? l.inode < r.inode : 120 l.offset != r.offset ? l.offset < r.offset : 121 l.snapshot != r.snapshot ? l.snapshot < r.snapshot : true; 122 } 123 124 static __always_inline bool bpos_gt(struct bpos l, struct bpos r) 125 { 126 return bpos_lt(r, l); 127 } 128 129 static __always_inline bool bpos_ge(struct bpos l, struct bpos r) 130 { 131 return bpos_le(r, l); 132 } 133 134 static __always_inline int bpos_cmp(struct bpos l, struct bpos r) 135 { 136 return cmp_int(l.inode, r.inode) ?: 137 cmp_int(l.offset, r.offset) ?: 138 cmp_int(l.snapshot, r.snapshot); 139 } 140 141 static inline struct bpos bpos_min(struct bpos l, struct bpos r) 142 { 143 return bpos_lt(l, r) ? l : r; 144 } 145 146 static inline struct bpos bpos_max(struct bpos l, struct bpos r) 147 { 148 return bpos_gt(l, r) ? l : r; 149 } 150 151 static __always_inline bool bkey_eq(struct bpos l, struct bpos r) 152 { 153 return !((l.inode ^ r.inode) | 154 (l.offset ^ r.offset)); 155 } 156 157 static __always_inline bool bkey_lt(struct bpos l, struct bpos r) 158 { 159 return l.inode != r.inode 160 ? l.inode < r.inode 161 : l.offset < r.offset; 162 } 163 164 static __always_inline bool bkey_le(struct bpos l, struct bpos r) 165 { 166 return l.inode != r.inode 167 ? l.inode < r.inode 168 : l.offset <= r.offset; 169 } 170 171 static __always_inline bool bkey_gt(struct bpos l, struct bpos r) 172 { 173 return bkey_lt(r, l); 174 } 175 176 static __always_inline bool bkey_ge(struct bpos l, struct bpos r) 177 { 178 return bkey_le(r, l); 179 } 180 181 static __always_inline int bkey_cmp(struct bpos l, struct bpos r) 182 { 183 return cmp_int(l.inode, r.inode) ?: 184 cmp_int(l.offset, r.offset); 185 } 186 187 static inline struct bpos bkey_min(struct bpos l, struct bpos r) 188 { 189 return bkey_lt(l, r) ? l : r; 190 } 191 192 static inline struct bpos bkey_max(struct bpos l, struct bpos r) 193 { 194 return bkey_gt(l, r) ? l : r; 195 } 196 197 static inline bool bkey_and_val_eq(struct bkey_s_c l, struct bkey_s_c r) 198 { 199 return bpos_eq(l.k->p, r.k->p) && 200 bkey_bytes(l.k) == bkey_bytes(r.k) && 201 !memcmp(l.v, r.v, bkey_val_bytes(l.k)); 202 } 203 204 void bch2_bpos_swab(struct bpos *); 205 void bch2_bkey_swab_key(const struct bkey_format *, struct bkey_packed *); 206 207 static __always_inline int bversion_cmp(struct bversion l, struct bversion r) 208 { 209 return cmp_int(l.hi, r.hi) ?: 210 cmp_int(l.lo, r.lo); 211 } 212 213 #define ZERO_VERSION ((struct bversion) { .hi = 0, .lo = 0 }) 214 #define MAX_VERSION ((struct bversion) { .hi = ~0, .lo = ~0ULL }) 215 216 static __always_inline int bversion_zero(struct bversion v) 217 { 218 return !bversion_cmp(v, ZERO_VERSION); 219 } 220 221 #ifdef CONFIG_BCACHEFS_DEBUG 222 /* statement expressions confusing unlikely()? */ 223 #define bkey_packed(_k) \ 224 ({ EBUG_ON((_k)->format > KEY_FORMAT_CURRENT); \ 225 (_k)->format != KEY_FORMAT_CURRENT; }) 226 #else 227 #define bkey_packed(_k) ((_k)->format != KEY_FORMAT_CURRENT) 228 #endif 229 230 /* 231 * It's safe to treat an unpacked bkey as a packed one, but not the reverse 232 */ 233 static inline struct bkey_packed *bkey_to_packed(struct bkey_i *k) 234 { 235 return (struct bkey_packed *) k; 236 } 237 238 static inline const struct bkey_packed *bkey_to_packed_c(const struct bkey_i *k) 239 { 240 return (const struct bkey_packed *) k; 241 } 242 243 static inline struct bkey_i *packed_to_bkey(struct bkey_packed *k) 244 { 245 return bkey_packed(k) ? NULL : (struct bkey_i *) k; 246 } 247 248 static inline const struct bkey *packed_to_bkey_c(const struct bkey_packed *k) 249 { 250 return bkey_packed(k) ? NULL : (const struct bkey *) k; 251 } 252 253 static inline unsigned bkey_format_key_bits(const struct bkey_format *format) 254 { 255 return format->bits_per_field[BKEY_FIELD_INODE] + 256 format->bits_per_field[BKEY_FIELD_OFFSET] + 257 format->bits_per_field[BKEY_FIELD_SNAPSHOT]; 258 } 259 260 static inline struct bpos bpos_successor(struct bpos p) 261 { 262 if (!++p.snapshot && 263 !++p.offset && 264 !++p.inode) 265 BUG(); 266 267 return p; 268 } 269 270 static inline struct bpos bpos_predecessor(struct bpos p) 271 { 272 if (!p.snapshot-- && 273 !p.offset-- && 274 !p.inode--) 275 BUG(); 276 277 return p; 278 } 279 280 static inline struct bpos bpos_nosnap_successor(struct bpos p) 281 { 282 p.snapshot = 0; 283 284 if (!++p.offset && 285 !++p.inode) 286 BUG(); 287 288 return p; 289 } 290 291 static inline struct bpos bpos_nosnap_predecessor(struct bpos p) 292 { 293 p.snapshot = 0; 294 295 if (!p.offset-- && 296 !p.inode--) 297 BUG(); 298 299 return p; 300 } 301 302 static inline u64 bkey_start_offset(const struct bkey *k) 303 { 304 return k->p.offset - k->size; 305 } 306 307 static inline struct bpos bkey_start_pos(const struct bkey *k) 308 { 309 return (struct bpos) { 310 .inode = k->p.inode, 311 .offset = bkey_start_offset(k), 312 .snapshot = k->p.snapshot, 313 }; 314 } 315 316 /* Packed helpers */ 317 318 static inline unsigned bkeyp_key_u64s(const struct bkey_format *format, 319 const struct bkey_packed *k) 320 { 321 return bkey_packed(k) ? format->key_u64s : BKEY_U64s; 322 } 323 324 static inline bool bkeyp_u64s_valid(const struct bkey_format *f, 325 const struct bkey_packed *k) 326 { 327 return ((unsigned) k->u64s - bkeyp_key_u64s(f, k) <= U8_MAX - BKEY_U64s); 328 } 329 330 static inline unsigned bkeyp_key_bytes(const struct bkey_format *format, 331 const struct bkey_packed *k) 332 { 333 return bkeyp_key_u64s(format, k) * sizeof(u64); 334 } 335 336 static inline unsigned bkeyp_val_u64s(const struct bkey_format *format, 337 const struct bkey_packed *k) 338 { 339 return k->u64s - bkeyp_key_u64s(format, k); 340 } 341 342 static inline size_t bkeyp_val_bytes(const struct bkey_format *format, 343 const struct bkey_packed *k) 344 { 345 return bkeyp_val_u64s(format, k) * sizeof(u64); 346 } 347 348 static inline void set_bkeyp_val_u64s(const struct bkey_format *format, 349 struct bkey_packed *k, unsigned val_u64s) 350 { 351 k->u64s = bkeyp_key_u64s(format, k) + val_u64s; 352 } 353 354 #define bkeyp_val(_format, _k) \ 355 ((struct bch_val *) ((u64 *) (_k)->_data + bkeyp_key_u64s(_format, _k))) 356 357 extern const struct bkey_format bch2_bkey_format_current; 358 359 bool bch2_bkey_transform(const struct bkey_format *, 360 struct bkey_packed *, 361 const struct bkey_format *, 362 const struct bkey_packed *); 363 364 struct bkey __bch2_bkey_unpack_key(const struct bkey_format *, 365 const struct bkey_packed *); 366 367 #ifndef HAVE_BCACHEFS_COMPILED_UNPACK 368 struct bpos __bkey_unpack_pos(const struct bkey_format *, 369 const struct bkey_packed *); 370 #endif 371 372 bool bch2_bkey_pack_key(struct bkey_packed *, const struct bkey *, 373 const struct bkey_format *); 374 375 enum bkey_pack_pos_ret { 376 BKEY_PACK_POS_EXACT, 377 BKEY_PACK_POS_SMALLER, 378 BKEY_PACK_POS_FAIL, 379 }; 380 381 enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *, struct bpos, 382 const struct btree *); 383 384 static inline bool bkey_pack_pos(struct bkey_packed *out, struct bpos in, 385 const struct btree *b) 386 { 387 return bch2_bkey_pack_pos_lossy(out, in, b) == BKEY_PACK_POS_EXACT; 388 } 389 390 void bch2_bkey_unpack(const struct btree *, struct bkey_i *, 391 const struct bkey_packed *); 392 bool bch2_bkey_pack(struct bkey_packed *, const struct bkey_i *, 393 const struct bkey_format *); 394 395 typedef void (*compiled_unpack_fn)(struct bkey *, const struct bkey_packed *); 396 397 static inline void 398 __bkey_unpack_key_format_checked(const struct btree *b, 399 struct bkey *dst, 400 const struct bkey_packed *src) 401 { 402 if (IS_ENABLED(HAVE_BCACHEFS_COMPILED_UNPACK)) { 403 compiled_unpack_fn unpack_fn = b->aux_data; 404 unpack_fn(dst, src); 405 406 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && 407 bch2_expensive_debug_checks) { 408 struct bkey dst2 = __bch2_bkey_unpack_key(&b->format, src); 409 410 BUG_ON(memcmp(dst, &dst2, sizeof(*dst))); 411 } 412 } else { 413 *dst = __bch2_bkey_unpack_key(&b->format, src); 414 } 415 } 416 417 static inline struct bkey 418 bkey_unpack_key_format_checked(const struct btree *b, 419 const struct bkey_packed *src) 420 { 421 struct bkey dst; 422 423 __bkey_unpack_key_format_checked(b, &dst, src); 424 return dst; 425 } 426 427 static inline void __bkey_unpack_key(const struct btree *b, 428 struct bkey *dst, 429 const struct bkey_packed *src) 430 { 431 if (likely(bkey_packed(src))) 432 __bkey_unpack_key_format_checked(b, dst, src); 433 else 434 *dst = *packed_to_bkey_c(src); 435 } 436 437 /** 438 * bkey_unpack_key -- unpack just the key, not the value 439 */ 440 static inline struct bkey bkey_unpack_key(const struct btree *b, 441 const struct bkey_packed *src) 442 { 443 return likely(bkey_packed(src)) 444 ? bkey_unpack_key_format_checked(b, src) 445 : *packed_to_bkey_c(src); 446 } 447 448 static inline struct bpos 449 bkey_unpack_pos_format_checked(const struct btree *b, 450 const struct bkey_packed *src) 451 { 452 #ifdef HAVE_BCACHEFS_COMPILED_UNPACK 453 return bkey_unpack_key_format_checked(b, src).p; 454 #else 455 return __bkey_unpack_pos(&b->format, src); 456 #endif 457 } 458 459 static inline struct bpos bkey_unpack_pos(const struct btree *b, 460 const struct bkey_packed *src) 461 { 462 return likely(bkey_packed(src)) 463 ? bkey_unpack_pos_format_checked(b, src) 464 : packed_to_bkey_c(src)->p; 465 } 466 467 /* Disassembled bkeys */ 468 469 static inline struct bkey_s_c bkey_disassemble(const struct btree *b, 470 const struct bkey_packed *k, 471 struct bkey *u) 472 { 473 __bkey_unpack_key(b, u, k); 474 475 return (struct bkey_s_c) { u, bkeyp_val(&b->format, k), }; 476 } 477 478 /* non const version: */ 479 static inline struct bkey_s __bkey_disassemble(const struct btree *b, 480 struct bkey_packed *k, 481 struct bkey *u) 482 { 483 __bkey_unpack_key(b, u, k); 484 485 return (struct bkey_s) { .k = u, .v = bkeyp_val(&b->format, k), }; 486 } 487 488 static inline u64 bkey_field_max(const struct bkey_format *f, 489 enum bch_bkey_fields nr) 490 { 491 return f->bits_per_field[nr] < 64 492 ? (le64_to_cpu(f->field_offset[nr]) + 493 ~(~0ULL << f->bits_per_field[nr])) 494 : U64_MAX; 495 } 496 497 #ifdef HAVE_BCACHEFS_COMPILED_UNPACK 498 499 int bch2_compile_bkey_format(const struct bkey_format *, void *); 500 501 #else 502 503 static inline int bch2_compile_bkey_format(const struct bkey_format *format, 504 void *out) { return 0; } 505 506 #endif 507 508 static inline void bkey_reassemble(struct bkey_i *dst, 509 struct bkey_s_c src) 510 { 511 dst->k = *src.k; 512 memcpy_u64s_small(&dst->v, src.v, bkey_val_u64s(src.k)); 513 } 514 515 /* byte order helpers */ 516 517 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 518 519 static inline unsigned high_word_offset(const struct bkey_format *f) 520 { 521 return f->key_u64s - 1; 522 } 523 524 #define high_bit_offset 0 525 #define nth_word(p, n) ((p) - (n)) 526 527 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 528 529 static inline unsigned high_word_offset(const struct bkey_format *f) 530 { 531 return 0; 532 } 533 534 #define high_bit_offset KEY_PACKED_BITS_START 535 #define nth_word(p, n) ((p) + (n)) 536 537 #else 538 #error edit for your odd byteorder. 539 #endif 540 541 #define high_word(f, k) ((u64 *) (k)->_data + high_word_offset(f)) 542 #define next_word(p) nth_word(p, 1) 543 #define prev_word(p) nth_word(p, -1) 544 545 #ifdef CONFIG_BCACHEFS_DEBUG 546 void bch2_bkey_pack_test(void); 547 #else 548 static inline void bch2_bkey_pack_test(void) {} 549 #endif 550 551 #define bkey_fields() \ 552 x(BKEY_FIELD_INODE, p.inode) \ 553 x(BKEY_FIELD_OFFSET, p.offset) \ 554 x(BKEY_FIELD_SNAPSHOT, p.snapshot) \ 555 x(BKEY_FIELD_SIZE, size) \ 556 x(BKEY_FIELD_VERSION_HI, version.hi) \ 557 x(BKEY_FIELD_VERSION_LO, version.lo) 558 559 struct bkey_format_state { 560 u64 field_min[BKEY_NR_FIELDS]; 561 u64 field_max[BKEY_NR_FIELDS]; 562 }; 563 564 void bch2_bkey_format_init(struct bkey_format_state *); 565 566 static inline void __bkey_format_add(struct bkey_format_state *s, unsigned field, u64 v) 567 { 568 s->field_min[field] = min(s->field_min[field], v); 569 s->field_max[field] = max(s->field_max[field], v); 570 } 571 572 /* 573 * Changes @format so that @k can be successfully packed with @format 574 */ 575 static inline void bch2_bkey_format_add_key(struct bkey_format_state *s, const struct bkey *k) 576 { 577 #define x(id, field) __bkey_format_add(s, id, k->field); 578 bkey_fields() 579 #undef x 580 } 581 582 void bch2_bkey_format_add_pos(struct bkey_format_state *, struct bpos); 583 struct bkey_format bch2_bkey_format_done(struct bkey_format_state *); 584 585 static inline bool bch2_bkey_format_field_overflows(struct bkey_format *f, unsigned i) 586 { 587 unsigned f_bits = f->bits_per_field[i]; 588 unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i]; 589 u64 unpacked_mask = ~((~0ULL << 1) << (unpacked_bits - 1)); 590 u64 field_offset = le64_to_cpu(f->field_offset[i]); 591 592 if (f_bits > unpacked_bits) 593 return true; 594 595 if ((f_bits == unpacked_bits) && field_offset) 596 return true; 597 598 u64 f_mask = f_bits 599 ? ~((~0ULL << (f_bits - 1)) << 1) 600 : 0; 601 602 if (((field_offset + f_mask) & unpacked_mask) < field_offset) 603 return true; 604 return false; 605 } 606 607 int bch2_bkey_format_invalid(struct bch_fs *, struct bkey_format *, 608 enum bch_validate_flags, struct printbuf *); 609 void bch2_bkey_format_to_text(struct printbuf *, const struct bkey_format *); 610 611 #endif /* _BCACHEFS_BKEY_H */ 612