1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_UTIL_H 3 #define _BCACHEFS_UTIL_H 4 5 #include <linux/bio.h> 6 #include <linux/blkdev.h> 7 #include <linux/closure.h> 8 #include <linux/errno.h> 9 #include <linux/freezer.h> 10 #include <linux/kernel.h> 11 #include <linux/sched/clock.h> 12 #include <linux/llist.h> 13 #include <linux/log2.h> 14 #include <linux/percpu.h> 15 #include <linux/preempt.h> 16 #include <linux/ratelimit.h> 17 #include <linux/slab.h> 18 #include <linux/vmalloc.h> 19 #include <linux/workqueue.h> 20 21 #include "mean_and_variance.h" 22 23 #include "darray.h" 24 25 struct closure; 26 27 #ifdef CONFIG_BCACHEFS_DEBUG 28 #define EBUG_ON(cond) BUG_ON(cond) 29 #else 30 #define EBUG_ON(cond) 31 #endif 32 33 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 34 #define CPU_BIG_ENDIAN 0 35 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 36 #define CPU_BIG_ENDIAN 1 37 #endif 38 39 /* type hackery */ 40 41 #define type_is_exact(_val, _type) \ 42 __builtin_types_compatible_p(typeof(_val), _type) 43 44 #define type_is(_val, _type) \ 45 (__builtin_types_compatible_p(typeof(_val), _type) || \ 46 __builtin_types_compatible_p(typeof(_val), const _type)) 47 48 /* Userspace doesn't align allocations as nicely as the kernel allocators: */ 49 static inline size_t buf_pages(void *p, size_t len) 50 { 51 return DIV_ROUND_UP(len + 52 ((unsigned long) p & (PAGE_SIZE - 1)), 53 PAGE_SIZE); 54 } 55 56 static inline void vpfree(void *p, size_t size) 57 { 58 if (is_vmalloc_addr(p)) 59 vfree(p); 60 else 61 free_pages((unsigned long) p, get_order(size)); 62 } 63 64 static inline void *vpmalloc(size_t size, gfp_t gfp_mask) 65 { 66 return (void *) __get_free_pages(gfp_mask|__GFP_NOWARN, 67 get_order(size)) ?: 68 __vmalloc(size, gfp_mask); 69 } 70 71 static inline void kvpfree(void *p, size_t size) 72 { 73 if (size < PAGE_SIZE) 74 kfree(p); 75 else 76 vpfree(p, size); 77 } 78 79 static inline void *kvpmalloc(size_t size, gfp_t gfp_mask) 80 { 81 return size < PAGE_SIZE 82 ? kmalloc(size, gfp_mask) 83 : vpmalloc(size, gfp_mask); 84 } 85 86 int mempool_init_kvpmalloc_pool(mempool_t *, int, size_t); 87 88 #define HEAP(type) \ 89 struct { \ 90 size_t size, used; \ 91 type *data; \ 92 } 93 94 #define DECLARE_HEAP(type, name) HEAP(type) name 95 96 #define init_heap(heap, _size, gfp) \ 97 ({ \ 98 (heap)->used = 0; \ 99 (heap)->size = (_size); \ 100 (heap)->data = kvpmalloc((heap)->size * sizeof((heap)->data[0]),\ 101 (gfp)); \ 102 }) 103 104 #define free_heap(heap) \ 105 do { \ 106 kvpfree((heap)->data, (heap)->size * sizeof((heap)->data[0])); \ 107 (heap)->data = NULL; \ 108 } while (0) 109 110 #define heap_set_backpointer(h, i, _fn) \ 111 do { \ 112 void (*fn)(typeof(h), size_t) = _fn; \ 113 if (fn) \ 114 fn(h, i); \ 115 } while (0) 116 117 #define heap_swap(h, i, j, set_backpointer) \ 118 do { \ 119 swap((h)->data[i], (h)->data[j]); \ 120 heap_set_backpointer(h, i, set_backpointer); \ 121 heap_set_backpointer(h, j, set_backpointer); \ 122 } while (0) 123 124 #define heap_peek(h) \ 125 ({ \ 126 EBUG_ON(!(h)->used); \ 127 (h)->data[0]; \ 128 }) 129 130 #define heap_full(h) ((h)->used == (h)->size) 131 132 #define heap_sift_down(h, i, cmp, set_backpointer) \ 133 do { \ 134 size_t _c, _j = i; \ 135 \ 136 for (; _j * 2 + 1 < (h)->used; _j = _c) { \ 137 _c = _j * 2 + 1; \ 138 if (_c + 1 < (h)->used && \ 139 cmp(h, (h)->data[_c], (h)->data[_c + 1]) >= 0) \ 140 _c++; \ 141 \ 142 if (cmp(h, (h)->data[_c], (h)->data[_j]) >= 0) \ 143 break; \ 144 heap_swap(h, _c, _j, set_backpointer); \ 145 } \ 146 } while (0) 147 148 #define heap_sift_up(h, i, cmp, set_backpointer) \ 149 do { \ 150 while (i) { \ 151 size_t p = (i - 1) / 2; \ 152 if (cmp(h, (h)->data[i], (h)->data[p]) >= 0) \ 153 break; \ 154 heap_swap(h, i, p, set_backpointer); \ 155 i = p; \ 156 } \ 157 } while (0) 158 159 #define __heap_add(h, d, cmp, set_backpointer) \ 160 ({ \ 161 size_t _i = (h)->used++; \ 162 (h)->data[_i] = d; \ 163 heap_set_backpointer(h, _i, set_backpointer); \ 164 \ 165 heap_sift_up(h, _i, cmp, set_backpointer); \ 166 _i; \ 167 }) 168 169 #define heap_add(h, d, cmp, set_backpointer) \ 170 ({ \ 171 bool _r = !heap_full(h); \ 172 if (_r) \ 173 __heap_add(h, d, cmp, set_backpointer); \ 174 _r; \ 175 }) 176 177 #define heap_add_or_replace(h, new, cmp, set_backpointer) \ 178 do { \ 179 if (!heap_add(h, new, cmp, set_backpointer) && \ 180 cmp(h, new, heap_peek(h)) >= 0) { \ 181 (h)->data[0] = new; \ 182 heap_set_backpointer(h, 0, set_backpointer); \ 183 heap_sift_down(h, 0, cmp, set_backpointer); \ 184 } \ 185 } while (0) 186 187 #define heap_del(h, i, cmp, set_backpointer) \ 188 do { \ 189 size_t _i = (i); \ 190 \ 191 BUG_ON(_i >= (h)->used); \ 192 (h)->used--; \ 193 if ((_i) < (h)->used) { \ 194 heap_swap(h, _i, (h)->used, set_backpointer); \ 195 heap_sift_up(h, _i, cmp, set_backpointer); \ 196 heap_sift_down(h, _i, cmp, set_backpointer); \ 197 } \ 198 } while (0) 199 200 #define heap_pop(h, d, cmp, set_backpointer) \ 201 ({ \ 202 bool _r = (h)->used; \ 203 if (_r) { \ 204 (d) = (h)->data[0]; \ 205 heap_del(h, 0, cmp, set_backpointer); \ 206 } \ 207 _r; \ 208 }) 209 210 #define heap_resort(heap, cmp, set_backpointer) \ 211 do { \ 212 ssize_t _i; \ 213 for (_i = (ssize_t) (heap)->used / 2 - 1; _i >= 0; --_i) \ 214 heap_sift_down(heap, _i, cmp, set_backpointer); \ 215 } while (0) 216 217 #define ANYSINT_MAX(t) \ 218 ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1) 219 220 #include "printbuf.h" 221 222 #define prt_vprintf(_out, ...) bch2_prt_vprintf(_out, __VA_ARGS__) 223 #define prt_printf(_out, ...) bch2_prt_printf(_out, __VA_ARGS__) 224 #define printbuf_str(_buf) bch2_printbuf_str(_buf) 225 #define printbuf_exit(_buf) bch2_printbuf_exit(_buf) 226 227 #define printbuf_tabstops_reset(_buf) bch2_printbuf_tabstops_reset(_buf) 228 #define printbuf_tabstop_pop(_buf) bch2_printbuf_tabstop_pop(_buf) 229 #define printbuf_tabstop_push(_buf, _n) bch2_printbuf_tabstop_push(_buf, _n) 230 231 #define printbuf_indent_add(_out, _n) bch2_printbuf_indent_add(_out, _n) 232 #define printbuf_indent_sub(_out, _n) bch2_printbuf_indent_sub(_out, _n) 233 234 #define prt_newline(_out) bch2_prt_newline(_out) 235 #define prt_tab(_out) bch2_prt_tab(_out) 236 #define prt_tab_rjust(_out) bch2_prt_tab_rjust(_out) 237 238 #define prt_bytes_indented(...) bch2_prt_bytes_indented(__VA_ARGS__) 239 #define prt_u64(_out, _v) prt_printf(_out, "%llu", (u64) (_v)) 240 #define prt_human_readable_u64(...) bch2_prt_human_readable_u64(__VA_ARGS__) 241 #define prt_human_readable_s64(...) bch2_prt_human_readable_s64(__VA_ARGS__) 242 #define prt_units_u64(...) bch2_prt_units_u64(__VA_ARGS__) 243 #define prt_units_s64(...) bch2_prt_units_s64(__VA_ARGS__) 244 #define prt_string_option(...) bch2_prt_string_option(__VA_ARGS__) 245 #define prt_bitflags(...) bch2_prt_bitflags(__VA_ARGS__) 246 247 void bch2_pr_time_units(struct printbuf *, u64); 248 void bch2_prt_datetime(struct printbuf *, time64_t); 249 250 #ifdef __KERNEL__ 251 static inline void uuid_unparse_lower(u8 *uuid, char *out) 252 { 253 sprintf(out, "%pUb", uuid); 254 } 255 #else 256 #include <uuid/uuid.h> 257 #endif 258 259 static inline void pr_uuid(struct printbuf *out, u8 *uuid) 260 { 261 char uuid_str[40]; 262 263 uuid_unparse_lower(uuid, uuid_str); 264 prt_printf(out, "%s", uuid_str); 265 } 266 267 int bch2_strtoint_h(const char *, int *); 268 int bch2_strtouint_h(const char *, unsigned int *); 269 int bch2_strtoll_h(const char *, long long *); 270 int bch2_strtoull_h(const char *, unsigned long long *); 271 int bch2_strtou64_h(const char *, u64 *); 272 273 static inline int bch2_strtol_h(const char *cp, long *res) 274 { 275 #if BITS_PER_LONG == 32 276 return bch2_strtoint_h(cp, (int *) res); 277 #else 278 return bch2_strtoll_h(cp, (long long *) res); 279 #endif 280 } 281 282 static inline int bch2_strtoul_h(const char *cp, long *res) 283 { 284 #if BITS_PER_LONG == 32 285 return bch2_strtouint_h(cp, (unsigned int *) res); 286 #else 287 return bch2_strtoull_h(cp, (unsigned long long *) res); 288 #endif 289 } 290 291 #define strtoi_h(cp, res) \ 292 ( type_is(*res, int) ? bch2_strtoint_h(cp, (void *) res)\ 293 : type_is(*res, long) ? bch2_strtol_h(cp, (void *) res)\ 294 : type_is(*res, long long) ? bch2_strtoll_h(cp, (void *) res)\ 295 : type_is(*res, unsigned) ? bch2_strtouint_h(cp, (void *) res)\ 296 : type_is(*res, unsigned long) ? bch2_strtoul_h(cp, (void *) res)\ 297 : type_is(*res, unsigned long long) ? bch2_strtoull_h(cp, (void *) res)\ 298 : -EINVAL) 299 300 #define strtoul_safe(cp, var) \ 301 ({ \ 302 unsigned long _v; \ 303 int _r = kstrtoul(cp, 10, &_v); \ 304 if (!_r) \ 305 var = _v; \ 306 _r; \ 307 }) 308 309 #define strtoul_safe_clamp(cp, var, min, max) \ 310 ({ \ 311 unsigned long _v; \ 312 int _r = kstrtoul(cp, 10, &_v); \ 313 if (!_r) \ 314 var = clamp_t(typeof(var), _v, min, max); \ 315 _r; \ 316 }) 317 318 #define strtoul_safe_restrict(cp, var, min, max) \ 319 ({ \ 320 unsigned long _v; \ 321 int _r = kstrtoul(cp, 10, &_v); \ 322 if (!_r && _v >= min && _v <= max) \ 323 var = _v; \ 324 else \ 325 _r = -EINVAL; \ 326 _r; \ 327 }) 328 329 #define snprint(out, var) \ 330 prt_printf(out, \ 331 type_is(var, int) ? "%i\n" \ 332 : type_is(var, unsigned) ? "%u\n" \ 333 : type_is(var, long) ? "%li\n" \ 334 : type_is(var, unsigned long) ? "%lu\n" \ 335 : type_is(var, s64) ? "%lli\n" \ 336 : type_is(var, u64) ? "%llu\n" \ 337 : type_is(var, char *) ? "%s\n" \ 338 : "%i\n", var) 339 340 bool bch2_is_zero(const void *, size_t); 341 342 u64 bch2_read_flag_list(char *, const char * const[]); 343 344 void bch2_prt_u64_binary(struct printbuf *, u64, unsigned); 345 346 void bch2_print_string_as_lines(const char *prefix, const char *lines); 347 348 typedef DARRAY(unsigned long) bch_stacktrace; 349 int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *); 350 void bch2_prt_backtrace(struct printbuf *, bch_stacktrace *); 351 int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *); 352 353 #define NR_QUANTILES 15 354 #define QUANTILE_IDX(i) inorder_to_eytzinger0(i, NR_QUANTILES) 355 #define QUANTILE_FIRST eytzinger0_first(NR_QUANTILES) 356 #define QUANTILE_LAST eytzinger0_last(NR_QUANTILES) 357 358 struct bch2_quantiles { 359 struct bch2_quantile_entry { 360 u64 m; 361 u64 step; 362 } entries[NR_QUANTILES]; 363 }; 364 365 struct bch2_time_stat_buffer { 366 unsigned nr; 367 struct bch2_time_stat_buffer_entry { 368 u64 start; 369 u64 end; 370 } entries[32]; 371 }; 372 373 struct bch2_time_stats { 374 spinlock_t lock; 375 /* all fields are in nanoseconds */ 376 u64 max_duration; 377 u64 min_duration; 378 u64 max_freq; 379 u64 min_freq; 380 u64 last_event; 381 struct bch2_quantiles quantiles; 382 383 struct mean_and_variance duration_stats; 384 struct mean_and_variance_weighted duration_stats_weighted; 385 struct mean_and_variance freq_stats; 386 struct mean_and_variance_weighted freq_stats_weighted; 387 struct bch2_time_stat_buffer __percpu *buffer; 388 }; 389 390 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT 391 void __bch2_time_stats_update(struct bch2_time_stats *stats, u64, u64); 392 #else 393 static inline void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end) {} 394 #endif 395 396 static inline void bch2_time_stats_update(struct bch2_time_stats *stats, u64 start) 397 { 398 __bch2_time_stats_update(stats, start, local_clock()); 399 } 400 401 void bch2_time_stats_to_text(struct printbuf *, struct bch2_time_stats *); 402 403 void bch2_time_stats_exit(struct bch2_time_stats *); 404 void bch2_time_stats_init(struct bch2_time_stats *); 405 406 #define ewma_add(ewma, val, weight) \ 407 ({ \ 408 typeof(ewma) _ewma = (ewma); \ 409 typeof(weight) _weight = (weight); \ 410 \ 411 (((_ewma << _weight) - _ewma) + (val)) >> _weight; \ 412 }) 413 414 struct bch_ratelimit { 415 /* Next time we want to do some work, in nanoseconds */ 416 u64 next; 417 418 /* 419 * Rate at which we want to do work, in units per nanosecond 420 * The units here correspond to the units passed to 421 * bch2_ratelimit_increment() 422 */ 423 unsigned rate; 424 }; 425 426 static inline void bch2_ratelimit_reset(struct bch_ratelimit *d) 427 { 428 d->next = local_clock(); 429 } 430 431 u64 bch2_ratelimit_delay(struct bch_ratelimit *); 432 void bch2_ratelimit_increment(struct bch_ratelimit *, u64); 433 434 struct bch_pd_controller { 435 struct bch_ratelimit rate; 436 unsigned long last_update; 437 438 s64 last_actual; 439 s64 smoothed_derivative; 440 441 unsigned p_term_inverse; 442 unsigned d_smooth; 443 unsigned d_term; 444 445 /* for exporting to sysfs (no effect on behavior) */ 446 s64 last_derivative; 447 s64 last_proportional; 448 s64 last_change; 449 s64 last_target; 450 451 /* 452 * If true, the rate will not increase if bch2_ratelimit_delay() 453 * is not being called often enough. 454 */ 455 bool backpressure; 456 }; 457 458 void bch2_pd_controller_update(struct bch_pd_controller *, s64, s64, int); 459 void bch2_pd_controller_init(struct bch_pd_controller *); 460 void bch2_pd_controller_debug_to_text(struct printbuf *, struct bch_pd_controller *); 461 462 #define sysfs_pd_controller_attribute(name) \ 463 rw_attribute(name##_rate); \ 464 rw_attribute(name##_rate_bytes); \ 465 rw_attribute(name##_rate_d_term); \ 466 rw_attribute(name##_rate_p_term_inverse); \ 467 read_attribute(name##_rate_debug) 468 469 #define sysfs_pd_controller_files(name) \ 470 &sysfs_##name##_rate, \ 471 &sysfs_##name##_rate_bytes, \ 472 &sysfs_##name##_rate_d_term, \ 473 &sysfs_##name##_rate_p_term_inverse, \ 474 &sysfs_##name##_rate_debug 475 476 #define sysfs_pd_controller_show(name, var) \ 477 do { \ 478 sysfs_hprint(name##_rate, (var)->rate.rate); \ 479 sysfs_print(name##_rate_bytes, (var)->rate.rate); \ 480 sysfs_print(name##_rate_d_term, (var)->d_term); \ 481 sysfs_print(name##_rate_p_term_inverse, (var)->p_term_inverse); \ 482 \ 483 if (attr == &sysfs_##name##_rate_debug) \ 484 bch2_pd_controller_debug_to_text(out, var); \ 485 } while (0) 486 487 #define sysfs_pd_controller_store(name, var) \ 488 do { \ 489 sysfs_strtoul_clamp(name##_rate, \ 490 (var)->rate.rate, 1, UINT_MAX); \ 491 sysfs_strtoul_clamp(name##_rate_bytes, \ 492 (var)->rate.rate, 1, UINT_MAX); \ 493 sysfs_strtoul(name##_rate_d_term, (var)->d_term); \ 494 sysfs_strtoul_clamp(name##_rate_p_term_inverse, \ 495 (var)->p_term_inverse, 1, INT_MAX); \ 496 } while (0) 497 498 #define container_of_or_null(ptr, type, member) \ 499 ({ \ 500 typeof(ptr) _ptr = ptr; \ 501 _ptr ? container_of(_ptr, type, member) : NULL; \ 502 }) 503 504 /* Does linear interpolation between powers of two */ 505 static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits) 506 { 507 unsigned fract = x & ~(~0 << fract_bits); 508 509 x >>= fract_bits; 510 x = 1 << x; 511 x += (x * fract) >> fract_bits; 512 513 return x; 514 } 515 516 void bch2_bio_map(struct bio *bio, void *base, size_t); 517 int bch2_bio_alloc_pages(struct bio *, size_t, gfp_t); 518 519 static inline sector_t bdev_sectors(struct block_device *bdev) 520 { 521 return bdev->bd_inode->i_size >> 9; 522 } 523 524 #define closure_bio_submit(bio, cl) \ 525 do { \ 526 closure_get(cl); \ 527 submit_bio(bio); \ 528 } while (0) 529 530 #define kthread_wait(cond) \ 531 ({ \ 532 int _ret = 0; \ 533 \ 534 while (1) { \ 535 set_current_state(TASK_INTERRUPTIBLE); \ 536 if (kthread_should_stop()) { \ 537 _ret = -1; \ 538 break; \ 539 } \ 540 \ 541 if (cond) \ 542 break; \ 543 \ 544 schedule(); \ 545 } \ 546 set_current_state(TASK_RUNNING); \ 547 _ret; \ 548 }) 549 550 #define kthread_wait_freezable(cond) \ 551 ({ \ 552 int _ret = 0; \ 553 while (1) { \ 554 set_current_state(TASK_INTERRUPTIBLE); \ 555 if (kthread_should_stop()) { \ 556 _ret = -1; \ 557 break; \ 558 } \ 559 \ 560 if (cond) \ 561 break; \ 562 \ 563 schedule(); \ 564 try_to_freeze(); \ 565 } \ 566 set_current_state(TASK_RUNNING); \ 567 _ret; \ 568 }) 569 570 size_t bch2_rand_range(size_t); 571 572 void memcpy_to_bio(struct bio *, struct bvec_iter, const void *); 573 void memcpy_from_bio(void *, struct bio *, struct bvec_iter); 574 575 static inline void memcpy_u64s_small(void *dst, const void *src, 576 unsigned u64s) 577 { 578 u64 *d = dst; 579 const u64 *s = src; 580 581 while (u64s--) 582 *d++ = *s++; 583 } 584 585 static inline void __memcpy_u64s(void *dst, const void *src, 586 unsigned u64s) 587 { 588 #ifdef CONFIG_X86_64 589 long d0, d1, d2; 590 591 asm volatile("rep ; movsq" 592 : "=&c" (d0), "=&D" (d1), "=&S" (d2) 593 : "0" (u64s), "1" (dst), "2" (src) 594 : "memory"); 595 #else 596 u64 *d = dst; 597 const u64 *s = src; 598 599 while (u64s--) 600 *d++ = *s++; 601 #endif 602 } 603 604 static inline void memcpy_u64s(void *dst, const void *src, 605 unsigned u64s) 606 { 607 EBUG_ON(!(dst >= src + u64s * sizeof(u64) || 608 dst + u64s * sizeof(u64) <= src)); 609 610 __memcpy_u64s(dst, src, u64s); 611 } 612 613 static inline void __memmove_u64s_down(void *dst, const void *src, 614 unsigned u64s) 615 { 616 __memcpy_u64s(dst, src, u64s); 617 } 618 619 static inline void memmove_u64s_down(void *dst, const void *src, 620 unsigned u64s) 621 { 622 EBUG_ON(dst > src); 623 624 __memmove_u64s_down(dst, src, u64s); 625 } 626 627 static inline void __memmove_u64s_down_small(void *dst, const void *src, 628 unsigned u64s) 629 { 630 memcpy_u64s_small(dst, src, u64s); 631 } 632 633 static inline void memmove_u64s_down_small(void *dst, const void *src, 634 unsigned u64s) 635 { 636 EBUG_ON(dst > src); 637 638 __memmove_u64s_down_small(dst, src, u64s); 639 } 640 641 static inline void __memmove_u64s_up_small(void *_dst, const void *_src, 642 unsigned u64s) 643 { 644 u64 *dst = (u64 *) _dst + u64s; 645 u64 *src = (u64 *) _src + u64s; 646 647 while (u64s--) 648 *--dst = *--src; 649 } 650 651 static inline void memmove_u64s_up_small(void *dst, const void *src, 652 unsigned u64s) 653 { 654 EBUG_ON(dst < src); 655 656 __memmove_u64s_up_small(dst, src, u64s); 657 } 658 659 static inline void __memmove_u64s_up(void *_dst, const void *_src, 660 unsigned u64s) 661 { 662 u64 *dst = (u64 *) _dst + u64s - 1; 663 u64 *src = (u64 *) _src + u64s - 1; 664 665 #ifdef CONFIG_X86_64 666 long d0, d1, d2; 667 668 asm volatile("std ;\n" 669 "rep ; movsq\n" 670 "cld ;\n" 671 : "=&c" (d0), "=&D" (d1), "=&S" (d2) 672 : "0" (u64s), "1" (dst), "2" (src) 673 : "memory"); 674 #else 675 while (u64s--) 676 *dst-- = *src--; 677 #endif 678 } 679 680 static inline void memmove_u64s_up(void *dst, const void *src, 681 unsigned u64s) 682 { 683 EBUG_ON(dst < src); 684 685 __memmove_u64s_up(dst, src, u64s); 686 } 687 688 static inline void memmove_u64s(void *dst, const void *src, 689 unsigned u64s) 690 { 691 if (dst < src) 692 __memmove_u64s_down(dst, src, u64s); 693 else 694 __memmove_u64s_up(dst, src, u64s); 695 } 696 697 /* Set the last few bytes up to a u64 boundary given an offset into a buffer. */ 698 static inline void memset_u64s_tail(void *s, int c, unsigned bytes) 699 { 700 unsigned rem = round_up(bytes, sizeof(u64)) - bytes; 701 702 memset(s + bytes, c, rem); 703 } 704 705 void sort_cmp_size(void *base, size_t num, size_t size, 706 int (*cmp_func)(const void *, const void *, size_t), 707 void (*swap_func)(void *, void *, size_t)); 708 709 /* just the memmove, doesn't update @_nr */ 710 #define __array_insert_item(_array, _nr, _pos) \ 711 memmove(&(_array)[(_pos) + 1], \ 712 &(_array)[(_pos)], \ 713 sizeof((_array)[0]) * ((_nr) - (_pos))) 714 715 #define array_insert_item(_array, _nr, _pos, _new_item) \ 716 do { \ 717 __array_insert_item(_array, _nr, _pos); \ 718 (_nr)++; \ 719 (_array)[(_pos)] = (_new_item); \ 720 } while (0) 721 722 #define array_remove_items(_array, _nr, _pos, _nr_to_remove) \ 723 do { \ 724 (_nr) -= (_nr_to_remove); \ 725 memmove(&(_array)[(_pos)], \ 726 &(_array)[(_pos) + (_nr_to_remove)], \ 727 sizeof((_array)[0]) * ((_nr) - (_pos))); \ 728 } while (0) 729 730 #define array_remove_item(_array, _nr, _pos) \ 731 array_remove_items(_array, _nr, _pos, 1) 732 733 static inline void __move_gap(void *array, size_t element_size, 734 size_t nr, size_t size, 735 size_t old_gap, size_t new_gap) 736 { 737 size_t gap_end = old_gap + size - nr; 738 739 if (new_gap < old_gap) { 740 size_t move = old_gap - new_gap; 741 742 memmove(array + element_size * (gap_end - move), 743 array + element_size * (old_gap - move), 744 element_size * move); 745 } else if (new_gap > old_gap) { 746 size_t move = new_gap - old_gap; 747 748 memmove(array + element_size * old_gap, 749 array + element_size * gap_end, 750 element_size * move); 751 } 752 } 753 754 /* Move the gap in a gap buffer: */ 755 #define move_gap(_array, _nr, _size, _old_gap, _new_gap) \ 756 __move_gap(_array, sizeof(_array[0]), _nr, _size, _old_gap, _new_gap) 757 758 #define bubble_sort(_base, _nr, _cmp) \ 759 do { \ 760 ssize_t _i, _last; \ 761 bool _swapped = true; \ 762 \ 763 for (_last= (ssize_t) (_nr) - 1; _last > 0 && _swapped; --_last) {\ 764 _swapped = false; \ 765 for (_i = 0; _i < _last; _i++) \ 766 if (_cmp((_base)[_i], (_base)[_i + 1]) > 0) { \ 767 swap((_base)[_i], (_base)[_i + 1]); \ 768 _swapped = true; \ 769 } \ 770 } \ 771 } while (0) 772 773 static inline u64 percpu_u64_get(u64 __percpu *src) 774 { 775 u64 ret = 0; 776 int cpu; 777 778 for_each_possible_cpu(cpu) 779 ret += *per_cpu_ptr(src, cpu); 780 return ret; 781 } 782 783 static inline void percpu_u64_set(u64 __percpu *dst, u64 src) 784 { 785 int cpu; 786 787 for_each_possible_cpu(cpu) 788 *per_cpu_ptr(dst, cpu) = 0; 789 this_cpu_write(*dst, src); 790 } 791 792 static inline void acc_u64s(u64 *acc, const u64 *src, unsigned nr) 793 { 794 unsigned i; 795 796 for (i = 0; i < nr; i++) 797 acc[i] += src[i]; 798 } 799 800 static inline void acc_u64s_percpu(u64 *acc, const u64 __percpu *src, 801 unsigned nr) 802 { 803 int cpu; 804 805 for_each_possible_cpu(cpu) 806 acc_u64s(acc, per_cpu_ptr(src, cpu), nr); 807 } 808 809 static inline void percpu_memset(void __percpu *p, int c, size_t bytes) 810 { 811 int cpu; 812 813 for_each_possible_cpu(cpu) 814 memset(per_cpu_ptr(p, cpu), c, bytes); 815 } 816 817 u64 *bch2_acc_percpu_u64s(u64 __percpu *, unsigned); 818 819 #define cmp_int(l, r) ((l > r) - (l < r)) 820 821 static inline int u8_cmp(u8 l, u8 r) 822 { 823 return cmp_int(l, r); 824 } 825 826 static inline int cmp_le32(__le32 l, __le32 r) 827 { 828 return cmp_int(le32_to_cpu(l), le32_to_cpu(r)); 829 } 830 831 #include <linux/uuid.h> 832 833 #endif /* _BCACHEFS_UTIL_H */ 834