1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_EXTENTS_H 3 #define _BCACHEFS_EXTENTS_H 4 5 #include "bcachefs.h" 6 #include "bkey.h" 7 #include "extents_types.h" 8 9 struct bch_fs; 10 struct btree_trans; 11 enum bkey_invalid_flags; 12 13 /* extent entries: */ 14 15 #define extent_entry_last(_e) \ 16 ((typeof(&(_e).v->start[0])) bkey_val_end(_e)) 17 18 #define entry_to_ptr(_entry) \ 19 ({ \ 20 EBUG_ON((_entry) && !extent_entry_is_ptr(_entry)); \ 21 \ 22 __builtin_choose_expr( \ 23 type_is_exact(_entry, const union bch_extent_entry *), \ 24 (const struct bch_extent_ptr *) (_entry), \ 25 (struct bch_extent_ptr *) (_entry)); \ 26 }) 27 28 /* downcast, preserves const */ 29 #define to_entry(_entry) \ 30 ({ \ 31 BUILD_BUG_ON(!type_is(_entry, union bch_extent_crc *) && \ 32 !type_is(_entry, struct bch_extent_ptr *) && \ 33 !type_is(_entry, struct bch_extent_stripe_ptr *)); \ 34 \ 35 __builtin_choose_expr( \ 36 (type_is_exact(_entry, const union bch_extent_crc *) || \ 37 type_is_exact(_entry, const struct bch_extent_ptr *) ||\ 38 type_is_exact(_entry, const struct bch_extent_stripe_ptr *)),\ 39 (const union bch_extent_entry *) (_entry), \ 40 (union bch_extent_entry *) (_entry)); \ 41 }) 42 43 #define extent_entry_next(_entry) \ 44 ((typeof(_entry)) ((void *) (_entry) + extent_entry_bytes(_entry))) 45 46 static inline unsigned 47 __extent_entry_type(const union bch_extent_entry *e) 48 { 49 return e->type ? __ffs(e->type) : BCH_EXTENT_ENTRY_MAX; 50 } 51 52 static inline enum bch_extent_entry_type 53 extent_entry_type(const union bch_extent_entry *e) 54 { 55 int ret = __ffs(e->type); 56 57 EBUG_ON(ret < 0 || ret >= BCH_EXTENT_ENTRY_MAX); 58 59 return ret; 60 } 61 62 static inline size_t extent_entry_bytes(const union bch_extent_entry *entry) 63 { 64 switch (extent_entry_type(entry)) { 65 #define x(f, n) \ 66 case BCH_EXTENT_ENTRY_##f: \ 67 return sizeof(struct bch_extent_##f); 68 BCH_EXTENT_ENTRY_TYPES() 69 #undef x 70 default: 71 BUG(); 72 } 73 } 74 75 static inline size_t extent_entry_u64s(const union bch_extent_entry *entry) 76 { 77 return extent_entry_bytes(entry) / sizeof(u64); 78 } 79 80 static inline void __extent_entry_insert(struct bkey_i *k, 81 union bch_extent_entry *dst, 82 union bch_extent_entry *new) 83 { 84 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k)); 85 86 memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new), 87 dst, (u64 *) end - (u64 *) dst); 88 k->k.u64s += extent_entry_u64s(new); 89 memcpy_u64s_small(dst, new, extent_entry_u64s(new)); 90 } 91 92 static inline void extent_entry_drop(struct bkey_s k, union bch_extent_entry *entry) 93 { 94 union bch_extent_entry *next = extent_entry_next(entry); 95 96 /* stripes have ptrs, but their layout doesn't work with this code */ 97 BUG_ON(k.k->type == KEY_TYPE_stripe); 98 99 memmove_u64s_down(entry, next, 100 (u64 *) bkey_val_end(k) - (u64 *) next); 101 k.k->u64s -= (u64 *) next - (u64 *) entry; 102 } 103 104 static inline bool extent_entry_is_ptr(const union bch_extent_entry *e) 105 { 106 return extent_entry_type(e) == BCH_EXTENT_ENTRY_ptr; 107 } 108 109 static inline bool extent_entry_is_stripe_ptr(const union bch_extent_entry *e) 110 { 111 return extent_entry_type(e) == BCH_EXTENT_ENTRY_stripe_ptr; 112 } 113 114 static inline bool extent_entry_is_crc(const union bch_extent_entry *e) 115 { 116 switch (extent_entry_type(e)) { 117 case BCH_EXTENT_ENTRY_crc32: 118 case BCH_EXTENT_ENTRY_crc64: 119 case BCH_EXTENT_ENTRY_crc128: 120 return true; 121 default: 122 return false; 123 } 124 } 125 126 union bch_extent_crc { 127 u8 type; 128 struct bch_extent_crc32 crc32; 129 struct bch_extent_crc64 crc64; 130 struct bch_extent_crc128 crc128; 131 }; 132 133 #define __entry_to_crc(_entry) \ 134 __builtin_choose_expr( \ 135 type_is_exact(_entry, const union bch_extent_entry *), \ 136 (const union bch_extent_crc *) (_entry), \ 137 (union bch_extent_crc *) (_entry)) 138 139 #define entry_to_crc(_entry) \ 140 ({ \ 141 EBUG_ON((_entry) && !extent_entry_is_crc(_entry)); \ 142 \ 143 __entry_to_crc(_entry); \ 144 }) 145 146 static inline struct bch_extent_crc_unpacked 147 bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc) 148 { 149 #define common_fields(_crc) \ 150 .csum_type = _crc.csum_type, \ 151 .compression_type = _crc.compression_type, \ 152 .compressed_size = _crc._compressed_size + 1, \ 153 .uncompressed_size = _crc._uncompressed_size + 1, \ 154 .offset = _crc.offset, \ 155 .live_size = k->size 156 157 if (!crc) 158 return (struct bch_extent_crc_unpacked) { 159 .compressed_size = k->size, 160 .uncompressed_size = k->size, 161 .live_size = k->size, 162 }; 163 164 switch (extent_entry_type(to_entry(crc))) { 165 case BCH_EXTENT_ENTRY_crc32: { 166 struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) { 167 common_fields(crc->crc32), 168 }; 169 170 *((__le32 *) &ret.csum.lo) = (__le32 __force) crc->crc32.csum; 171 return ret; 172 } 173 case BCH_EXTENT_ENTRY_crc64: { 174 struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) { 175 common_fields(crc->crc64), 176 .nonce = crc->crc64.nonce, 177 .csum.lo = (__force __le64) crc->crc64.csum_lo, 178 }; 179 180 *((__le16 *) &ret.csum.hi) = (__le16 __force) crc->crc64.csum_hi; 181 182 return ret; 183 } 184 case BCH_EXTENT_ENTRY_crc128: { 185 struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) { 186 common_fields(crc->crc128), 187 .nonce = crc->crc128.nonce, 188 .csum = crc->crc128.csum, 189 }; 190 191 return ret; 192 } 193 default: 194 BUG(); 195 } 196 #undef common_fields 197 } 198 199 static inline bool crc_is_compressed(struct bch_extent_crc_unpacked crc) 200 { 201 return (crc.compression_type != BCH_COMPRESSION_TYPE_none && 202 crc.compression_type != BCH_COMPRESSION_TYPE_incompressible); 203 } 204 205 static inline bool crc_is_encoded(struct bch_extent_crc_unpacked crc) 206 { 207 return crc.csum_type != BCH_CSUM_none || crc_is_compressed(crc); 208 } 209 210 /* bkey_ptrs: generically over any key type that has ptrs */ 211 212 struct bkey_ptrs_c { 213 const union bch_extent_entry *start; 214 const union bch_extent_entry *end; 215 }; 216 217 struct bkey_ptrs { 218 union bch_extent_entry *start; 219 union bch_extent_entry *end; 220 }; 221 222 static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k) 223 { 224 switch (k.k->type) { 225 case KEY_TYPE_btree_ptr: { 226 struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k); 227 228 return (struct bkey_ptrs_c) { 229 to_entry(&e.v->start[0]), 230 to_entry(extent_entry_last(e)) 231 }; 232 } 233 case KEY_TYPE_extent: { 234 struct bkey_s_c_extent e = bkey_s_c_to_extent(k); 235 236 return (struct bkey_ptrs_c) { 237 e.v->start, 238 extent_entry_last(e) 239 }; 240 } 241 case KEY_TYPE_stripe: { 242 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k); 243 244 return (struct bkey_ptrs_c) { 245 to_entry(&s.v->ptrs[0]), 246 to_entry(&s.v->ptrs[s.v->nr_blocks]), 247 }; 248 } 249 case KEY_TYPE_reflink_v: { 250 struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k); 251 252 return (struct bkey_ptrs_c) { 253 r.v->start, 254 bkey_val_end(r), 255 }; 256 } 257 case KEY_TYPE_btree_ptr_v2: { 258 struct bkey_s_c_btree_ptr_v2 e = bkey_s_c_to_btree_ptr_v2(k); 259 260 return (struct bkey_ptrs_c) { 261 to_entry(&e.v->start[0]), 262 to_entry(extent_entry_last(e)) 263 }; 264 } 265 default: 266 return (struct bkey_ptrs_c) { NULL, NULL }; 267 } 268 } 269 270 static inline struct bkey_ptrs bch2_bkey_ptrs(struct bkey_s k) 271 { 272 struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k.s_c); 273 274 return (struct bkey_ptrs) { 275 (void *) p.start, 276 (void *) p.end 277 }; 278 } 279 280 #define __bkey_extent_entry_for_each_from(_start, _end, _entry) \ 281 for ((_entry) = (_start); \ 282 (_entry) < (_end); \ 283 (_entry) = extent_entry_next(_entry)) 284 285 #define __bkey_ptr_next(_ptr, _end) \ 286 ({ \ 287 typeof(_end) _entry; \ 288 \ 289 __bkey_extent_entry_for_each_from(to_entry(_ptr), _end, _entry) \ 290 if (extent_entry_is_ptr(_entry)) \ 291 break; \ 292 \ 293 _entry < (_end) ? entry_to_ptr(_entry) : NULL; \ 294 }) 295 296 #define bkey_extent_entry_for_each_from(_p, _entry, _start) \ 297 __bkey_extent_entry_for_each_from(_start, (_p).end, _entry) 298 299 #define bkey_extent_entry_for_each(_p, _entry) \ 300 bkey_extent_entry_for_each_from(_p, _entry, _p.start) 301 302 #define __bkey_for_each_ptr(_start, _end, _ptr) \ 303 for ((_ptr) = (_start); \ 304 ((_ptr) = __bkey_ptr_next(_ptr, _end)); \ 305 (_ptr)++) 306 307 #define bkey_ptr_next(_p, _ptr) \ 308 __bkey_ptr_next(_ptr, (_p).end) 309 310 #define bkey_for_each_ptr(_p, _ptr) \ 311 __bkey_for_each_ptr(&(_p).start->ptr, (_p).end, _ptr) 312 313 #define __bkey_ptr_next_decode(_k, _end, _ptr, _entry) \ 314 ({ \ 315 __label__ out; \ 316 \ 317 (_ptr).idx = 0; \ 318 (_ptr).has_ec = false; \ 319 \ 320 __bkey_extent_entry_for_each_from(_entry, _end, _entry) \ 321 switch (extent_entry_type(_entry)) { \ 322 case BCH_EXTENT_ENTRY_ptr: \ 323 (_ptr).ptr = _entry->ptr; \ 324 goto out; \ 325 case BCH_EXTENT_ENTRY_crc32: \ 326 case BCH_EXTENT_ENTRY_crc64: \ 327 case BCH_EXTENT_ENTRY_crc128: \ 328 (_ptr).crc = bch2_extent_crc_unpack(_k, \ 329 entry_to_crc(_entry)); \ 330 break; \ 331 case BCH_EXTENT_ENTRY_stripe_ptr: \ 332 (_ptr).ec = _entry->stripe_ptr; \ 333 (_ptr).has_ec = true; \ 334 break; \ 335 default: \ 336 /* nothing */ \ 337 break; \ 338 } \ 339 out: \ 340 _entry < (_end); \ 341 }) 342 343 #define __bkey_for_each_ptr_decode(_k, _start, _end, _ptr, _entry) \ 344 for ((_ptr).crc = bch2_extent_crc_unpack(_k, NULL), \ 345 (_entry) = _start; \ 346 __bkey_ptr_next_decode(_k, _end, _ptr, _entry); \ 347 (_entry) = extent_entry_next(_entry)) 348 349 #define bkey_for_each_ptr_decode(_k, _p, _ptr, _entry) \ 350 __bkey_for_each_ptr_decode(_k, (_p).start, (_p).end, \ 351 _ptr, _entry) 352 353 #define bkey_crc_next(_k, _start, _end, _crc, _iter) \ 354 ({ \ 355 __bkey_extent_entry_for_each_from(_iter, _end, _iter) \ 356 if (extent_entry_is_crc(_iter)) { \ 357 (_crc) = bch2_extent_crc_unpack(_k, \ 358 entry_to_crc(_iter)); \ 359 break; \ 360 } \ 361 \ 362 (_iter) < (_end); \ 363 }) 364 365 #define __bkey_for_each_crc(_k, _start, _end, _crc, _iter) \ 366 for ((_crc) = bch2_extent_crc_unpack(_k, NULL), \ 367 (_iter) = (_start); \ 368 bkey_crc_next(_k, _start, _end, _crc, _iter); \ 369 (_iter) = extent_entry_next(_iter)) 370 371 #define bkey_for_each_crc(_k, _p, _crc, _iter) \ 372 __bkey_for_each_crc(_k, (_p).start, (_p).end, _crc, _iter) 373 374 /* Iterate over pointers in KEY_TYPE_extent: */ 375 376 #define extent_for_each_entry_from(_e, _entry, _start) \ 377 __bkey_extent_entry_for_each_from(_start, \ 378 extent_entry_last(_e), _entry) 379 380 #define extent_for_each_entry(_e, _entry) \ 381 extent_for_each_entry_from(_e, _entry, (_e).v->start) 382 383 #define extent_ptr_next(_e, _ptr) \ 384 __bkey_ptr_next(_ptr, extent_entry_last(_e)) 385 386 #define extent_for_each_ptr(_e, _ptr) \ 387 __bkey_for_each_ptr(&(_e).v->start->ptr, extent_entry_last(_e), _ptr) 388 389 #define extent_for_each_ptr_decode(_e, _ptr, _entry) \ 390 __bkey_for_each_ptr_decode((_e).k, (_e).v->start, \ 391 extent_entry_last(_e), _ptr, _entry) 392 393 /* utility code common to all keys with pointers: */ 394 395 void bch2_mark_io_failure(struct bch_io_failures *, 396 struct extent_ptr_decoded *); 397 int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c, 398 struct bch_io_failures *, 399 struct extent_ptr_decoded *); 400 401 /* KEY_TYPE_btree_ptr: */ 402 403 int bch2_btree_ptr_invalid(struct bch_fs *, struct bkey_s_c, 404 enum bkey_invalid_flags, struct printbuf *); 405 void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *, 406 struct bkey_s_c); 407 408 int bch2_btree_ptr_v2_invalid(struct bch_fs *, struct bkey_s_c, 409 enum bkey_invalid_flags, struct printbuf *); 410 void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); 411 void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned, 412 int, struct bkey_s); 413 414 #define bch2_bkey_ops_btree_ptr ((struct bkey_ops) { \ 415 .key_invalid = bch2_btree_ptr_invalid, \ 416 .val_to_text = bch2_btree_ptr_to_text, \ 417 .swab = bch2_ptr_swab, \ 418 .trans_trigger = bch2_trans_mark_extent, \ 419 .atomic_trigger = bch2_mark_extent, \ 420 }) 421 422 #define bch2_bkey_ops_btree_ptr_v2 ((struct bkey_ops) { \ 423 .key_invalid = bch2_btree_ptr_v2_invalid, \ 424 .val_to_text = bch2_btree_ptr_v2_to_text, \ 425 .swab = bch2_ptr_swab, \ 426 .compat = bch2_btree_ptr_v2_compat, \ 427 .trans_trigger = bch2_trans_mark_extent, \ 428 .atomic_trigger = bch2_mark_extent, \ 429 .min_val_size = 40, \ 430 }) 431 432 /* KEY_TYPE_extent: */ 433 434 bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c); 435 436 #define bch2_bkey_ops_extent ((struct bkey_ops) { \ 437 .key_invalid = bch2_bkey_ptrs_invalid, \ 438 .val_to_text = bch2_bkey_ptrs_to_text, \ 439 .swab = bch2_ptr_swab, \ 440 .key_normalize = bch2_extent_normalize, \ 441 .key_merge = bch2_extent_merge, \ 442 .trans_trigger = bch2_trans_mark_extent, \ 443 .atomic_trigger = bch2_mark_extent, \ 444 }) 445 446 /* KEY_TYPE_reservation: */ 447 448 int bch2_reservation_invalid(struct bch_fs *, struct bkey_s_c, 449 enum bkey_invalid_flags, struct printbuf *); 450 void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); 451 bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c); 452 453 #define bch2_bkey_ops_reservation ((struct bkey_ops) { \ 454 .key_invalid = bch2_reservation_invalid, \ 455 .val_to_text = bch2_reservation_to_text, \ 456 .key_merge = bch2_reservation_merge, \ 457 .trans_trigger = bch2_trans_mark_reservation, \ 458 .atomic_trigger = bch2_mark_reservation, \ 459 .min_val_size = 8, \ 460 }) 461 462 /* Extent checksum entries: */ 463 464 bool bch2_can_narrow_extent_crcs(struct bkey_s_c, 465 struct bch_extent_crc_unpacked); 466 bool bch2_bkey_narrow_crcs(struct bkey_i *, struct bch_extent_crc_unpacked); 467 void bch2_extent_crc_append(struct bkey_i *, 468 struct bch_extent_crc_unpacked); 469 470 /* Generic code for keys with pointers: */ 471 472 static inline bool bkey_is_btree_ptr(const struct bkey *k) 473 { 474 switch (k->type) { 475 case KEY_TYPE_btree_ptr: 476 case KEY_TYPE_btree_ptr_v2: 477 return true; 478 default: 479 return false; 480 } 481 } 482 483 static inline bool bkey_extent_is_direct_data(const struct bkey *k) 484 { 485 switch (k->type) { 486 case KEY_TYPE_btree_ptr: 487 case KEY_TYPE_btree_ptr_v2: 488 case KEY_TYPE_extent: 489 case KEY_TYPE_reflink_v: 490 return true; 491 default: 492 return false; 493 } 494 } 495 496 static inline bool bkey_extent_is_inline_data(const struct bkey *k) 497 { 498 return k->type == KEY_TYPE_inline_data || 499 k->type == KEY_TYPE_indirect_inline_data; 500 } 501 502 static inline unsigned bkey_inline_data_offset(const struct bkey *k) 503 { 504 switch (k->type) { 505 case KEY_TYPE_inline_data: 506 return sizeof(struct bch_inline_data); 507 case KEY_TYPE_indirect_inline_data: 508 return sizeof(struct bch_indirect_inline_data); 509 default: 510 BUG(); 511 } 512 } 513 514 static inline unsigned bkey_inline_data_bytes(const struct bkey *k) 515 { 516 return bkey_val_bytes(k) - bkey_inline_data_offset(k); 517 } 518 519 #define bkey_inline_data_p(_k) (((void *) (_k).v) + bkey_inline_data_offset((_k).k)) 520 521 static inline bool bkey_extent_is_data(const struct bkey *k) 522 { 523 return bkey_extent_is_direct_data(k) || 524 bkey_extent_is_inline_data(k) || 525 k->type == KEY_TYPE_reflink_p; 526 } 527 528 /* 529 * Should extent be counted under inode->i_sectors? 530 */ 531 static inline bool bkey_extent_is_allocation(const struct bkey *k) 532 { 533 switch (k->type) { 534 case KEY_TYPE_extent: 535 case KEY_TYPE_reservation: 536 case KEY_TYPE_reflink_p: 537 case KEY_TYPE_reflink_v: 538 case KEY_TYPE_inline_data: 539 case KEY_TYPE_indirect_inline_data: 540 case KEY_TYPE_error: 541 return true; 542 default: 543 return false; 544 } 545 } 546 547 static inline bool bkey_extent_is_unwritten(struct bkey_s_c k) 548 { 549 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 550 const struct bch_extent_ptr *ptr; 551 552 bkey_for_each_ptr(ptrs, ptr) 553 if (ptr->unwritten) 554 return true; 555 return false; 556 } 557 558 static inline bool bkey_extent_is_reservation(struct bkey_s_c k) 559 { 560 return k.k->type == KEY_TYPE_reservation || 561 bkey_extent_is_unwritten(k); 562 } 563 564 static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k) 565 { 566 struct bch_devs_list ret = (struct bch_devs_list) { 0 }; 567 struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k); 568 const struct bch_extent_ptr *ptr; 569 570 bkey_for_each_ptr(p, ptr) 571 ret.devs[ret.nr++] = ptr->dev; 572 573 return ret; 574 } 575 576 static inline struct bch_devs_list bch2_bkey_dirty_devs(struct bkey_s_c k) 577 { 578 struct bch_devs_list ret = (struct bch_devs_list) { 0 }; 579 struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k); 580 const struct bch_extent_ptr *ptr; 581 582 bkey_for_each_ptr(p, ptr) 583 if (!ptr->cached) 584 ret.devs[ret.nr++] = ptr->dev; 585 586 return ret; 587 } 588 589 static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k) 590 { 591 struct bch_devs_list ret = (struct bch_devs_list) { 0 }; 592 struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k); 593 const struct bch_extent_ptr *ptr; 594 595 bkey_for_each_ptr(p, ptr) 596 if (ptr->cached) 597 ret.devs[ret.nr++] = ptr->dev; 598 599 return ret; 600 } 601 602 static inline unsigned bch2_bkey_ptr_data_type(struct bkey_s_c k, const struct bch_extent_ptr *ptr) 603 { 604 switch (k.k->type) { 605 case KEY_TYPE_btree_ptr: 606 case KEY_TYPE_btree_ptr_v2: 607 return BCH_DATA_btree; 608 case KEY_TYPE_extent: 609 case KEY_TYPE_reflink_v: 610 return BCH_DATA_user; 611 case KEY_TYPE_stripe: { 612 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k); 613 614 BUG_ON(ptr < s.v->ptrs || 615 ptr >= s.v->ptrs + s.v->nr_blocks); 616 617 return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant 618 ? BCH_DATA_parity 619 : BCH_DATA_user; 620 } 621 default: 622 BUG(); 623 } 624 } 625 626 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c); 627 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c); 628 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c); 629 bool bch2_bkey_is_incompressible(struct bkey_s_c); 630 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c); 631 632 unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c); 633 unsigned bch2_extent_ptr_desired_durability(struct bch_fs *, struct extent_ptr_decoded *); 634 unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *); 635 unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c); 636 637 void bch2_bkey_drop_device(struct bkey_s, unsigned); 638 void bch2_bkey_drop_device_noerror(struct bkey_s, unsigned); 639 640 const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c, unsigned); 641 642 static inline struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s k, unsigned dev) 643 { 644 return (void *) bch2_bkey_has_device_c(k.s_c, dev); 645 } 646 647 bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned); 648 649 void bch2_bkey_extent_entry_drop(struct bkey_i *, union bch_extent_entry *); 650 651 static inline void bch2_bkey_append_ptr(struct bkey_i *k, struct bch_extent_ptr ptr) 652 { 653 struct bch_extent_ptr *dest; 654 655 EBUG_ON(bch2_bkey_has_device(bkey_i_to_s(k), ptr.dev)); 656 657 switch (k->k.type) { 658 case KEY_TYPE_btree_ptr: 659 case KEY_TYPE_btree_ptr_v2: 660 case KEY_TYPE_extent: 661 EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX); 662 663 ptr.type = 1 << BCH_EXTENT_ENTRY_ptr; 664 dest = (struct bch_extent_ptr *)((void *) &k->v + bkey_val_bytes(&k->k)); 665 *dest = ptr; 666 k->k.u64s++; 667 break; 668 default: 669 BUG(); 670 } 671 } 672 673 void bch2_extent_ptr_decoded_append(struct bkey_i *, 674 struct extent_ptr_decoded *); 675 union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s, 676 struct bch_extent_ptr *); 677 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s, 678 struct bch_extent_ptr *); 679 680 #define bch2_bkey_drop_ptrs(_k, _ptr, _cond) \ 681 do { \ 682 struct bkey_ptrs _ptrs = bch2_bkey_ptrs(_k); \ 683 \ 684 _ptr = &_ptrs.start->ptr; \ 685 \ 686 while ((_ptr = bkey_ptr_next(_ptrs, _ptr))) { \ 687 if (_cond) { \ 688 _ptr = (void *) bch2_bkey_drop_ptr(_k, _ptr); \ 689 _ptrs = bch2_bkey_ptrs(_k); \ 690 continue; \ 691 } \ 692 \ 693 (_ptr)++; \ 694 } \ 695 } while (0) 696 697 bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c, 698 struct bch_extent_ptr, u64); 699 bool bch2_extents_match(struct bkey_s_c, struct bkey_s_c); 700 struct bch_extent_ptr * 701 bch2_extent_has_ptr(struct bkey_s_c, struct extent_ptr_decoded, struct bkey_s); 702 703 void bch2_extent_ptr_set_cached(struct bkey_s, struct bch_extent_ptr *); 704 705 bool bch2_extent_normalize(struct bch_fs *, struct bkey_s); 706 void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *, 707 struct bkey_s_c); 708 int bch2_bkey_ptrs_invalid(struct bch_fs *, struct bkey_s_c, 709 enum bkey_invalid_flags, struct printbuf *); 710 711 void bch2_ptr_swab(struct bkey_s); 712 713 const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c); 714 unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *, struct bkey_s_c, 715 unsigned, unsigned); 716 bool bch2_bkey_needs_rebalance(struct bch_fs *, struct bkey_s_c); 717 718 int bch2_bkey_set_needs_rebalance(struct bch_fs *, struct bkey_i *, 719 unsigned, unsigned); 720 721 /* Generic extent code: */ 722 723 enum bch_extent_overlap { 724 BCH_EXTENT_OVERLAP_ALL = 0, 725 BCH_EXTENT_OVERLAP_BACK = 1, 726 BCH_EXTENT_OVERLAP_FRONT = 2, 727 BCH_EXTENT_OVERLAP_MIDDLE = 3, 728 }; 729 730 /* Returns how k overlaps with m */ 731 static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k, 732 const struct bkey *m) 733 { 734 int cmp1 = bkey_lt(k->p, m->p); 735 int cmp2 = bkey_gt(bkey_start_pos(k), bkey_start_pos(m)); 736 737 return (cmp1 << 1) + cmp2; 738 } 739 740 int bch2_cut_front_s(struct bpos, struct bkey_s); 741 int bch2_cut_back_s(struct bpos, struct bkey_s); 742 743 static inline void bch2_cut_front(struct bpos where, struct bkey_i *k) 744 { 745 bch2_cut_front_s(where, bkey_i_to_s(k)); 746 } 747 748 static inline void bch2_cut_back(struct bpos where, struct bkey_i *k) 749 { 750 bch2_cut_back_s(where, bkey_i_to_s(k)); 751 } 752 753 /** 754 * bch_key_resize - adjust size of @k 755 * 756 * bkey_start_offset(k) will be preserved, modifies where the extent ends 757 */ 758 static inline void bch2_key_resize(struct bkey *k, unsigned new_size) 759 { 760 k->p.offset -= k->size; 761 k->p.offset += new_size; 762 k->size = new_size; 763 } 764 765 #endif /* _BCACHEFS_EXTENTS_H */ 766