1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_BKEY_H 3 #define _BCACHEFS_BKEY_H 4 5 #include <linux/bug.h> 6 #include "bcachefs_format.h" 7 #include "bkey_types.h" 8 #include "btree_types.h" 9 #include "util.h" 10 #include "vstructs.h" 11 12 enum bkey_invalid_flags { 13 BKEY_INVALID_WRITE = (1U << 0), 14 BKEY_INVALID_COMMIT = (1U << 1), 15 BKEY_INVALID_JOURNAL = (1U << 2), 16 }; 17 18 #if 0 19 20 /* 21 * compiled unpack functions are disabled, pending a new interface for 22 * dynamically allocating executable memory: 23 */ 24 25 #ifdef CONFIG_X86_64 26 #define HAVE_BCACHEFS_COMPILED_UNPACK 1 27 #endif 28 #endif 29 30 void bch2_bkey_packed_to_binary_text(struct printbuf *, 31 const struct bkey_format *, 32 const struct bkey_packed *); 33 34 enum bkey_lr_packed { 35 BKEY_PACKED_BOTH, 36 BKEY_PACKED_RIGHT, 37 BKEY_PACKED_LEFT, 38 BKEY_PACKED_NONE, 39 }; 40 41 #define bkey_lr_packed(_l, _r) \ 42 ((_l)->format + ((_r)->format << 1)) 43 44 static inline void bkey_p_copy(struct bkey_packed *dst, const struct bkey_packed *src) 45 { 46 memcpy_u64s_small(dst, src, src->u64s); 47 } 48 49 static inline void bkey_copy(struct bkey_i *dst, const struct bkey_i *src) 50 { 51 memcpy_u64s_small(dst, src, src->k.u64s); 52 } 53 54 struct btree; 55 56 __pure 57 unsigned bch2_bkey_greatest_differing_bit(const struct btree *, 58 const struct bkey_packed *, 59 const struct bkey_packed *); 60 __pure 61 unsigned bch2_bkey_ffs(const struct btree *, const struct bkey_packed *); 62 63 __pure 64 int __bch2_bkey_cmp_packed_format_checked(const struct bkey_packed *, 65 const struct bkey_packed *, 66 const struct btree *); 67 68 __pure 69 int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *, 70 const struct bkey_packed *, 71 const struct bpos *); 72 73 __pure 74 int bch2_bkey_cmp_packed(const struct btree *, 75 const struct bkey_packed *, 76 const struct bkey_packed *); 77 78 __pure 79 int __bch2_bkey_cmp_left_packed(const struct btree *, 80 const struct bkey_packed *, 81 const struct bpos *); 82 83 static inline __pure 84 int bkey_cmp_left_packed(const struct btree *b, 85 const struct bkey_packed *l, const struct bpos *r) 86 { 87 return __bch2_bkey_cmp_left_packed(b, l, r); 88 } 89 90 /* 91 * The compiler generates better code when we pass bpos by ref, but it's often 92 * enough terribly convenient to pass it by val... as much as I hate c++, const 93 * ref would be nice here: 94 */ 95 __pure __flatten 96 static inline int bkey_cmp_left_packed_byval(const struct btree *b, 97 const struct bkey_packed *l, 98 struct bpos r) 99 { 100 return bkey_cmp_left_packed(b, l, &r); 101 } 102 103 static __always_inline bool bpos_eq(struct bpos l, struct bpos r) 104 { 105 return !((l.inode ^ r.inode) | 106 (l.offset ^ r.offset) | 107 (l.snapshot ^ r.snapshot)); 108 } 109 110 static __always_inline bool bpos_lt(struct bpos l, struct bpos r) 111 { 112 return l.inode != r.inode ? l.inode < r.inode : 113 l.offset != r.offset ? l.offset < r.offset : 114 l.snapshot != r.snapshot ? l.snapshot < r.snapshot : false; 115 } 116 117 static __always_inline bool bpos_le(struct bpos l, struct bpos r) 118 { 119 return l.inode != r.inode ? l.inode < r.inode : 120 l.offset != r.offset ? l.offset < r.offset : 121 l.snapshot != r.snapshot ? l.snapshot < r.snapshot : true; 122 } 123 124 static __always_inline bool bpos_gt(struct bpos l, struct bpos r) 125 { 126 return bpos_lt(r, l); 127 } 128 129 static __always_inline bool bpos_ge(struct bpos l, struct bpos r) 130 { 131 return bpos_le(r, l); 132 } 133 134 static __always_inline int bpos_cmp(struct bpos l, struct bpos r) 135 { 136 return cmp_int(l.inode, r.inode) ?: 137 cmp_int(l.offset, r.offset) ?: 138 cmp_int(l.snapshot, r.snapshot); 139 } 140 141 static inline struct bpos bpos_min(struct bpos l, struct bpos r) 142 { 143 return bpos_lt(l, r) ? l : r; 144 } 145 146 static inline struct bpos bpos_max(struct bpos l, struct bpos r) 147 { 148 return bpos_gt(l, r) ? l : r; 149 } 150 151 static __always_inline bool bkey_eq(struct bpos l, struct bpos r) 152 { 153 return !((l.inode ^ r.inode) | 154 (l.offset ^ r.offset)); 155 } 156 157 static __always_inline bool bkey_lt(struct bpos l, struct bpos r) 158 { 159 return l.inode != r.inode 160 ? l.inode < r.inode 161 : l.offset < r.offset; 162 } 163 164 static __always_inline bool bkey_le(struct bpos l, struct bpos r) 165 { 166 return l.inode != r.inode 167 ? l.inode < r.inode 168 : l.offset <= r.offset; 169 } 170 171 static __always_inline bool bkey_gt(struct bpos l, struct bpos r) 172 { 173 return bkey_lt(r, l); 174 } 175 176 static __always_inline bool bkey_ge(struct bpos l, struct bpos r) 177 { 178 return bkey_le(r, l); 179 } 180 181 static __always_inline int bkey_cmp(struct bpos l, struct bpos r) 182 { 183 return cmp_int(l.inode, r.inode) ?: 184 cmp_int(l.offset, r.offset); 185 } 186 187 static inline struct bpos bkey_min(struct bpos l, struct bpos r) 188 { 189 return bkey_lt(l, r) ? l : r; 190 } 191 192 static inline struct bpos bkey_max(struct bpos l, struct bpos r) 193 { 194 return bkey_gt(l, r) ? l : r; 195 } 196 197 void bch2_bpos_swab(struct bpos *); 198 void bch2_bkey_swab_key(const struct bkey_format *, struct bkey_packed *); 199 200 static __always_inline int bversion_cmp(struct bversion l, struct bversion r) 201 { 202 return cmp_int(l.hi, r.hi) ?: 203 cmp_int(l.lo, r.lo); 204 } 205 206 #define ZERO_VERSION ((struct bversion) { .hi = 0, .lo = 0 }) 207 #define MAX_VERSION ((struct bversion) { .hi = ~0, .lo = ~0ULL }) 208 209 static __always_inline int bversion_zero(struct bversion v) 210 { 211 return !bversion_cmp(v, ZERO_VERSION); 212 } 213 214 #ifdef CONFIG_BCACHEFS_DEBUG 215 /* statement expressions confusing unlikely()? */ 216 #define bkey_packed(_k) \ 217 ({ EBUG_ON((_k)->format > KEY_FORMAT_CURRENT); \ 218 (_k)->format != KEY_FORMAT_CURRENT; }) 219 #else 220 #define bkey_packed(_k) ((_k)->format != KEY_FORMAT_CURRENT) 221 #endif 222 223 /* 224 * It's safe to treat an unpacked bkey as a packed one, but not the reverse 225 */ 226 static inline struct bkey_packed *bkey_to_packed(struct bkey_i *k) 227 { 228 return (struct bkey_packed *) k; 229 } 230 231 static inline const struct bkey_packed *bkey_to_packed_c(const struct bkey_i *k) 232 { 233 return (const struct bkey_packed *) k; 234 } 235 236 static inline struct bkey_i *packed_to_bkey(struct bkey_packed *k) 237 { 238 return bkey_packed(k) ? NULL : (struct bkey_i *) k; 239 } 240 241 static inline const struct bkey *packed_to_bkey_c(const struct bkey_packed *k) 242 { 243 return bkey_packed(k) ? NULL : (const struct bkey *) k; 244 } 245 246 static inline unsigned bkey_format_key_bits(const struct bkey_format *format) 247 { 248 return format->bits_per_field[BKEY_FIELD_INODE] + 249 format->bits_per_field[BKEY_FIELD_OFFSET] + 250 format->bits_per_field[BKEY_FIELD_SNAPSHOT]; 251 } 252 253 static inline struct bpos bpos_successor(struct bpos p) 254 { 255 if (!++p.snapshot && 256 !++p.offset && 257 !++p.inode) 258 BUG(); 259 260 return p; 261 } 262 263 static inline struct bpos bpos_predecessor(struct bpos p) 264 { 265 if (!p.snapshot-- && 266 !p.offset-- && 267 !p.inode--) 268 BUG(); 269 270 return p; 271 } 272 273 static inline struct bpos bpos_nosnap_successor(struct bpos p) 274 { 275 p.snapshot = 0; 276 277 if (!++p.offset && 278 !++p.inode) 279 BUG(); 280 281 return p; 282 } 283 284 static inline struct bpos bpos_nosnap_predecessor(struct bpos p) 285 { 286 p.snapshot = 0; 287 288 if (!p.offset-- && 289 !p.inode--) 290 BUG(); 291 292 return p; 293 } 294 295 static inline u64 bkey_start_offset(const struct bkey *k) 296 { 297 return k->p.offset - k->size; 298 } 299 300 static inline struct bpos bkey_start_pos(const struct bkey *k) 301 { 302 return (struct bpos) { 303 .inode = k->p.inode, 304 .offset = bkey_start_offset(k), 305 .snapshot = k->p.snapshot, 306 }; 307 } 308 309 /* Packed helpers */ 310 311 static inline unsigned bkeyp_key_u64s(const struct bkey_format *format, 312 const struct bkey_packed *k) 313 { 314 return bkey_packed(k) ? format->key_u64s : BKEY_U64s; 315 } 316 317 static inline unsigned bkeyp_key_bytes(const struct bkey_format *format, 318 const struct bkey_packed *k) 319 { 320 return bkeyp_key_u64s(format, k) * sizeof(u64); 321 } 322 323 static inline unsigned bkeyp_val_u64s(const struct bkey_format *format, 324 const struct bkey_packed *k) 325 { 326 return k->u64s - bkeyp_key_u64s(format, k); 327 } 328 329 static inline size_t bkeyp_val_bytes(const struct bkey_format *format, 330 const struct bkey_packed *k) 331 { 332 return bkeyp_val_u64s(format, k) * sizeof(u64); 333 } 334 335 static inline void set_bkeyp_val_u64s(const struct bkey_format *format, 336 struct bkey_packed *k, unsigned val_u64s) 337 { 338 k->u64s = bkeyp_key_u64s(format, k) + val_u64s; 339 } 340 341 #define bkeyp_val(_format, _k) \ 342 ((struct bch_val *) ((u64 *) (_k)->_data + bkeyp_key_u64s(_format, _k))) 343 344 extern const struct bkey_format bch2_bkey_format_current; 345 346 bool bch2_bkey_transform(const struct bkey_format *, 347 struct bkey_packed *, 348 const struct bkey_format *, 349 const struct bkey_packed *); 350 351 struct bkey __bch2_bkey_unpack_key(const struct bkey_format *, 352 const struct bkey_packed *); 353 354 #ifndef HAVE_BCACHEFS_COMPILED_UNPACK 355 struct bpos __bkey_unpack_pos(const struct bkey_format *, 356 const struct bkey_packed *); 357 #endif 358 359 bool bch2_bkey_pack_key(struct bkey_packed *, const struct bkey *, 360 const struct bkey_format *); 361 362 enum bkey_pack_pos_ret { 363 BKEY_PACK_POS_EXACT, 364 BKEY_PACK_POS_SMALLER, 365 BKEY_PACK_POS_FAIL, 366 }; 367 368 enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *, struct bpos, 369 const struct btree *); 370 371 static inline bool bkey_pack_pos(struct bkey_packed *out, struct bpos in, 372 const struct btree *b) 373 { 374 return bch2_bkey_pack_pos_lossy(out, in, b) == BKEY_PACK_POS_EXACT; 375 } 376 377 void bch2_bkey_unpack(const struct btree *, struct bkey_i *, 378 const struct bkey_packed *); 379 bool bch2_bkey_pack(struct bkey_packed *, const struct bkey_i *, 380 const struct bkey_format *); 381 382 typedef void (*compiled_unpack_fn)(struct bkey *, const struct bkey_packed *); 383 384 static inline void 385 __bkey_unpack_key_format_checked(const struct btree *b, 386 struct bkey *dst, 387 const struct bkey_packed *src) 388 { 389 if (IS_ENABLED(HAVE_BCACHEFS_COMPILED_UNPACK)) { 390 compiled_unpack_fn unpack_fn = b->aux_data; 391 unpack_fn(dst, src); 392 393 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && 394 bch2_expensive_debug_checks) { 395 struct bkey dst2 = __bch2_bkey_unpack_key(&b->format, src); 396 397 BUG_ON(memcmp(dst, &dst2, sizeof(*dst))); 398 } 399 } else { 400 *dst = __bch2_bkey_unpack_key(&b->format, src); 401 } 402 } 403 404 static inline struct bkey 405 bkey_unpack_key_format_checked(const struct btree *b, 406 const struct bkey_packed *src) 407 { 408 struct bkey dst; 409 410 __bkey_unpack_key_format_checked(b, &dst, src); 411 return dst; 412 } 413 414 static inline void __bkey_unpack_key(const struct btree *b, 415 struct bkey *dst, 416 const struct bkey_packed *src) 417 { 418 if (likely(bkey_packed(src))) 419 __bkey_unpack_key_format_checked(b, dst, src); 420 else 421 *dst = *packed_to_bkey_c(src); 422 } 423 424 /** 425 * bkey_unpack_key -- unpack just the key, not the value 426 */ 427 static inline struct bkey bkey_unpack_key(const struct btree *b, 428 const struct bkey_packed *src) 429 { 430 return likely(bkey_packed(src)) 431 ? bkey_unpack_key_format_checked(b, src) 432 : *packed_to_bkey_c(src); 433 } 434 435 static inline struct bpos 436 bkey_unpack_pos_format_checked(const struct btree *b, 437 const struct bkey_packed *src) 438 { 439 #ifdef HAVE_BCACHEFS_COMPILED_UNPACK 440 return bkey_unpack_key_format_checked(b, src).p; 441 #else 442 return __bkey_unpack_pos(&b->format, src); 443 #endif 444 } 445 446 static inline struct bpos bkey_unpack_pos(const struct btree *b, 447 const struct bkey_packed *src) 448 { 449 return likely(bkey_packed(src)) 450 ? bkey_unpack_pos_format_checked(b, src) 451 : packed_to_bkey_c(src)->p; 452 } 453 454 /* Disassembled bkeys */ 455 456 static inline struct bkey_s_c bkey_disassemble(const struct btree *b, 457 const struct bkey_packed *k, 458 struct bkey *u) 459 { 460 __bkey_unpack_key(b, u, k); 461 462 return (struct bkey_s_c) { u, bkeyp_val(&b->format, k), }; 463 } 464 465 /* non const version: */ 466 static inline struct bkey_s __bkey_disassemble(const struct btree *b, 467 struct bkey_packed *k, 468 struct bkey *u) 469 { 470 __bkey_unpack_key(b, u, k); 471 472 return (struct bkey_s) { .k = u, .v = bkeyp_val(&b->format, k), }; 473 } 474 475 static inline u64 bkey_field_max(const struct bkey_format *f, 476 enum bch_bkey_fields nr) 477 { 478 return f->bits_per_field[nr] < 64 479 ? (le64_to_cpu(f->field_offset[nr]) + 480 ~(~0ULL << f->bits_per_field[nr])) 481 : U64_MAX; 482 } 483 484 #ifdef HAVE_BCACHEFS_COMPILED_UNPACK 485 486 int bch2_compile_bkey_format(const struct bkey_format *, void *); 487 488 #else 489 490 static inline int bch2_compile_bkey_format(const struct bkey_format *format, 491 void *out) { return 0; } 492 493 #endif 494 495 static inline void bkey_reassemble(struct bkey_i *dst, 496 struct bkey_s_c src) 497 { 498 dst->k = *src.k; 499 memcpy_u64s_small(&dst->v, src.v, bkey_val_u64s(src.k)); 500 } 501 502 /* byte order helpers */ 503 504 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 505 506 static inline unsigned high_word_offset(const struct bkey_format *f) 507 { 508 return f->key_u64s - 1; 509 } 510 511 #define high_bit_offset 0 512 #define nth_word(p, n) ((p) - (n)) 513 514 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 515 516 static inline unsigned high_word_offset(const struct bkey_format *f) 517 { 518 return 0; 519 } 520 521 #define high_bit_offset KEY_PACKED_BITS_START 522 #define nth_word(p, n) ((p) + (n)) 523 524 #else 525 #error edit for your odd byteorder. 526 #endif 527 528 #define high_word(f, k) ((u64 *) (k)->_data + high_word_offset(f)) 529 #define next_word(p) nth_word(p, 1) 530 #define prev_word(p) nth_word(p, -1) 531 532 #ifdef CONFIG_BCACHEFS_DEBUG 533 void bch2_bkey_pack_test(void); 534 #else 535 static inline void bch2_bkey_pack_test(void) {} 536 #endif 537 538 #define bkey_fields() \ 539 x(BKEY_FIELD_INODE, p.inode) \ 540 x(BKEY_FIELD_OFFSET, p.offset) \ 541 x(BKEY_FIELD_SNAPSHOT, p.snapshot) \ 542 x(BKEY_FIELD_SIZE, size) \ 543 x(BKEY_FIELD_VERSION_HI, version.hi) \ 544 x(BKEY_FIELD_VERSION_LO, version.lo) 545 546 struct bkey_format_state { 547 u64 field_min[BKEY_NR_FIELDS]; 548 u64 field_max[BKEY_NR_FIELDS]; 549 }; 550 551 void bch2_bkey_format_init(struct bkey_format_state *); 552 553 static inline void __bkey_format_add(struct bkey_format_state *s, unsigned field, u64 v) 554 { 555 s->field_min[field] = min(s->field_min[field], v); 556 s->field_max[field] = max(s->field_max[field], v); 557 } 558 559 /* 560 * Changes @format so that @k can be successfully packed with @format 561 */ 562 static inline void bch2_bkey_format_add_key(struct bkey_format_state *s, const struct bkey *k) 563 { 564 #define x(id, field) __bkey_format_add(s, id, k->field); 565 bkey_fields() 566 #undef x 567 } 568 569 void bch2_bkey_format_add_pos(struct bkey_format_state *, struct bpos); 570 struct bkey_format bch2_bkey_format_done(struct bkey_format_state *); 571 int bch2_bkey_format_invalid(struct bch_fs *, struct bkey_format *, 572 enum bkey_invalid_flags, struct printbuf *); 573 void bch2_bkey_format_to_text(struct printbuf *, const struct bkey_format *); 574 575 #endif /* _BCACHEFS_BKEY_H */ 576