xref: /linux/fs/bcachefs/util.h (revision 5e2cb28dd7e182dfa641550dfa225913509ad45d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_UTIL_H
3 #define _BCACHEFS_UTIL_H
4 
5 #include <linux/bio.h>
6 #include <linux/blkdev.h>
7 #include <linux/closure.h>
8 #include <linux/errno.h>
9 #include <linux/freezer.h>
10 #include <linux/kernel.h>
11 #include <linux/sched/clock.h>
12 #include <linux/llist.h>
13 #include <linux/log2.h>
14 #include <linux/percpu.h>
15 #include <linux/preempt.h>
16 #include <linux/ratelimit.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <linux/workqueue.h>
20 
21 #include "mean_and_variance.h"
22 
23 #include "darray.h"
24 
25 struct closure;
26 
27 #ifdef CONFIG_BCACHEFS_DEBUG
28 #define EBUG_ON(cond)		BUG_ON(cond)
29 #else
30 #define EBUG_ON(cond)
31 #endif
32 
33 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
34 #define CPU_BIG_ENDIAN		0
35 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
36 #define CPU_BIG_ENDIAN		1
37 #endif
38 
39 /* type hackery */
40 
41 #define type_is_exact(_val, _type)					\
42 	__builtin_types_compatible_p(typeof(_val), _type)
43 
44 #define type_is(_val, _type)						\
45 	(__builtin_types_compatible_p(typeof(_val), _type) ||		\
46 	 __builtin_types_compatible_p(typeof(_val), const _type))
47 
48 /* Userspace doesn't align allocations as nicely as the kernel allocators: */
49 static inline size_t buf_pages(void *p, size_t len)
50 {
51 	return DIV_ROUND_UP(len +
52 			    ((unsigned long) p & (PAGE_SIZE - 1)),
53 			    PAGE_SIZE);
54 }
55 
56 static inline void vpfree(void *p, size_t size)
57 {
58 	if (is_vmalloc_addr(p))
59 		vfree(p);
60 	else
61 		free_pages((unsigned long) p, get_order(size));
62 }
63 
64 static inline void *vpmalloc(size_t size, gfp_t gfp_mask)
65 {
66 	return (void *) __get_free_pages(gfp_mask|__GFP_NOWARN,
67 					 get_order(size)) ?:
68 		__vmalloc(size, gfp_mask);
69 }
70 
71 static inline void kvpfree(void *p, size_t size)
72 {
73 	if (size < PAGE_SIZE)
74 		kfree(p);
75 	else
76 		vpfree(p, size);
77 }
78 
79 static inline void *kvpmalloc(size_t size, gfp_t gfp_mask)
80 {
81 	return size < PAGE_SIZE
82 		? kmalloc(size, gfp_mask)
83 		: vpmalloc(size, gfp_mask);
84 }
85 
86 int mempool_init_kvpmalloc_pool(mempool_t *, int, size_t);
87 
88 #define HEAP(type)							\
89 struct {								\
90 	size_t size, used;						\
91 	type *data;							\
92 }
93 
94 #define DECLARE_HEAP(type, name) HEAP(type) name
95 
96 #define init_heap(heap, _size, gfp)					\
97 ({									\
98 	(heap)->used = 0;						\
99 	(heap)->size = (_size);						\
100 	(heap)->data = kvpmalloc((heap)->size * sizeof((heap)->data[0]),\
101 				 (gfp));				\
102 })
103 
104 #define free_heap(heap)							\
105 do {									\
106 	kvpfree((heap)->data, (heap)->size * sizeof((heap)->data[0]));	\
107 	(heap)->data = NULL;						\
108 } while (0)
109 
110 #define heap_set_backpointer(h, i, _fn)					\
111 do {									\
112 	void (*fn)(typeof(h), size_t) = _fn;				\
113 	if (fn)								\
114 		fn(h, i);						\
115 } while (0)
116 
117 #define heap_swap(h, i, j, set_backpointer)				\
118 do {									\
119 	swap((h)->data[i], (h)->data[j]);				\
120 	heap_set_backpointer(h, i, set_backpointer);			\
121 	heap_set_backpointer(h, j, set_backpointer);			\
122 } while (0)
123 
124 #define heap_peek(h)							\
125 ({									\
126 	EBUG_ON(!(h)->used);						\
127 	(h)->data[0];							\
128 })
129 
130 #define heap_full(h)	((h)->used == (h)->size)
131 
132 #define heap_sift_down(h, i, cmp, set_backpointer)			\
133 do {									\
134 	size_t _c, _j = i;						\
135 									\
136 	for (; _j * 2 + 1 < (h)->used; _j = _c) {			\
137 		_c = _j * 2 + 1;					\
138 		if (_c + 1 < (h)->used &&				\
139 		    cmp(h, (h)->data[_c], (h)->data[_c + 1]) >= 0)	\
140 			_c++;						\
141 									\
142 		if (cmp(h, (h)->data[_c], (h)->data[_j]) >= 0)		\
143 			break;						\
144 		heap_swap(h, _c, _j, set_backpointer);			\
145 	}								\
146 } while (0)
147 
148 #define heap_sift_up(h, i, cmp, set_backpointer)			\
149 do {									\
150 	while (i) {							\
151 		size_t p = (i - 1) / 2;					\
152 		if (cmp(h, (h)->data[i], (h)->data[p]) >= 0)		\
153 			break;						\
154 		heap_swap(h, i, p, set_backpointer);			\
155 		i = p;							\
156 	}								\
157 } while (0)
158 
159 #define __heap_add(h, d, cmp, set_backpointer)				\
160 ({									\
161 	size_t _i = (h)->used++;					\
162 	(h)->data[_i] = d;						\
163 	heap_set_backpointer(h, _i, set_backpointer);			\
164 									\
165 	heap_sift_up(h, _i, cmp, set_backpointer);			\
166 	_i;								\
167 })
168 
169 #define heap_add(h, d, cmp, set_backpointer)				\
170 ({									\
171 	bool _r = !heap_full(h);					\
172 	if (_r)								\
173 		__heap_add(h, d, cmp, set_backpointer);			\
174 	_r;								\
175 })
176 
177 #define heap_add_or_replace(h, new, cmp, set_backpointer)		\
178 do {									\
179 	if (!heap_add(h, new, cmp, set_backpointer) &&			\
180 	    cmp(h, new, heap_peek(h)) >= 0) {				\
181 		(h)->data[0] = new;					\
182 		heap_set_backpointer(h, 0, set_backpointer);		\
183 		heap_sift_down(h, 0, cmp, set_backpointer);		\
184 	}								\
185 } while (0)
186 
187 #define heap_del(h, i, cmp, set_backpointer)				\
188 do {									\
189 	size_t _i = (i);						\
190 									\
191 	BUG_ON(_i >= (h)->used);					\
192 	(h)->used--;							\
193 	if ((_i) < (h)->used) {						\
194 		heap_swap(h, _i, (h)->used, set_backpointer);		\
195 		heap_sift_up(h, _i, cmp, set_backpointer);		\
196 		heap_sift_down(h, _i, cmp, set_backpointer);		\
197 	}								\
198 } while (0)
199 
200 #define heap_pop(h, d, cmp, set_backpointer)				\
201 ({									\
202 	bool _r = (h)->used;						\
203 	if (_r) {							\
204 		(d) = (h)->data[0];					\
205 		heap_del(h, 0, cmp, set_backpointer);			\
206 	}								\
207 	_r;								\
208 })
209 
210 #define heap_resort(heap, cmp, set_backpointer)				\
211 do {									\
212 	ssize_t _i;							\
213 	for (_i = (ssize_t) (heap)->used / 2 -  1; _i >= 0; --_i)	\
214 		heap_sift_down(heap, _i, cmp, set_backpointer);		\
215 } while (0)
216 
217 #define ANYSINT_MAX(t)							\
218 	((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
219 
220 #include "printbuf.h"
221 
222 #define prt_vprintf(_out, ...)		bch2_prt_vprintf(_out, __VA_ARGS__)
223 #define prt_printf(_out, ...)		bch2_prt_printf(_out, __VA_ARGS__)
224 #define printbuf_str(_buf)		bch2_printbuf_str(_buf)
225 #define printbuf_exit(_buf)		bch2_printbuf_exit(_buf)
226 
227 #define printbuf_tabstops_reset(_buf)	bch2_printbuf_tabstops_reset(_buf)
228 #define printbuf_tabstop_pop(_buf)	bch2_printbuf_tabstop_pop(_buf)
229 #define printbuf_tabstop_push(_buf, _n)	bch2_printbuf_tabstop_push(_buf, _n)
230 
231 #define printbuf_indent_add(_out, _n)	bch2_printbuf_indent_add(_out, _n)
232 #define printbuf_indent_sub(_out, _n)	bch2_printbuf_indent_sub(_out, _n)
233 
234 #define prt_newline(_out)		bch2_prt_newline(_out)
235 #define prt_tab(_out)			bch2_prt_tab(_out)
236 #define prt_tab_rjust(_out)		bch2_prt_tab_rjust(_out)
237 
238 #define prt_bytes_indented(...)		bch2_prt_bytes_indented(__VA_ARGS__)
239 #define prt_u64(_out, _v)		prt_printf(_out, "%llu", (u64) (_v))
240 #define prt_human_readable_u64(...)	bch2_prt_human_readable_u64(__VA_ARGS__)
241 #define prt_human_readable_s64(...)	bch2_prt_human_readable_s64(__VA_ARGS__)
242 #define prt_units_u64(...)		bch2_prt_units_u64(__VA_ARGS__)
243 #define prt_units_s64(...)		bch2_prt_units_s64(__VA_ARGS__)
244 #define prt_string_option(...)		bch2_prt_string_option(__VA_ARGS__)
245 #define prt_bitflags(...)		bch2_prt_bitflags(__VA_ARGS__)
246 
247 void bch2_pr_time_units(struct printbuf *, u64);
248 
249 #ifdef __KERNEL__
250 static inline void pr_time(struct printbuf *out, u64 time)
251 {
252 	prt_printf(out, "%llu", time);
253 }
254 #else
255 #include <time.h>
256 static inline void pr_time(struct printbuf *out, u64 _time)
257 {
258 	char time_str[64];
259 	time_t time = _time;
260 	struct tm *tm = localtime(&time);
261 	size_t err = strftime(time_str, sizeof(time_str), "%c", tm);
262 	if (!err)
263 		prt_printf(out, "(formatting error)");
264 	else
265 		prt_printf(out, "%s", time_str);
266 }
267 #endif
268 
269 #ifdef __KERNEL__
270 static inline void uuid_unparse_lower(u8 *uuid, char *out)
271 {
272 	sprintf(out, "%pUb", uuid);
273 }
274 #else
275 #include <uuid/uuid.h>
276 #endif
277 
278 static inline void pr_uuid(struct printbuf *out, u8 *uuid)
279 {
280 	char uuid_str[40];
281 
282 	uuid_unparse_lower(uuid, uuid_str);
283 	prt_printf(out, "%s", uuid_str);
284 }
285 
286 int bch2_strtoint_h(const char *, int *);
287 int bch2_strtouint_h(const char *, unsigned int *);
288 int bch2_strtoll_h(const char *, long long *);
289 int bch2_strtoull_h(const char *, unsigned long long *);
290 int bch2_strtou64_h(const char *, u64 *);
291 
292 static inline int bch2_strtol_h(const char *cp, long *res)
293 {
294 #if BITS_PER_LONG == 32
295 	return bch2_strtoint_h(cp, (int *) res);
296 #else
297 	return bch2_strtoll_h(cp, (long long *) res);
298 #endif
299 }
300 
301 static inline int bch2_strtoul_h(const char *cp, long *res)
302 {
303 #if BITS_PER_LONG == 32
304 	return bch2_strtouint_h(cp, (unsigned int *) res);
305 #else
306 	return bch2_strtoull_h(cp, (unsigned long long *) res);
307 #endif
308 }
309 
310 #define strtoi_h(cp, res)						\
311 	( type_is(*res, int)		? bch2_strtoint_h(cp, (void *) res)\
312 	: type_is(*res, long)		? bch2_strtol_h(cp, (void *) res)\
313 	: type_is(*res, long long)	? bch2_strtoll_h(cp, (void *) res)\
314 	: type_is(*res, unsigned)	? bch2_strtouint_h(cp, (void *) res)\
315 	: type_is(*res, unsigned long)	? bch2_strtoul_h(cp, (void *) res)\
316 	: type_is(*res, unsigned long long) ? bch2_strtoull_h(cp, (void *) res)\
317 	: -EINVAL)
318 
319 #define strtoul_safe(cp, var)						\
320 ({									\
321 	unsigned long _v;						\
322 	int _r = kstrtoul(cp, 10, &_v);					\
323 	if (!_r)							\
324 		var = _v;						\
325 	_r;								\
326 })
327 
328 #define strtoul_safe_clamp(cp, var, min, max)				\
329 ({									\
330 	unsigned long _v;						\
331 	int _r = kstrtoul(cp, 10, &_v);					\
332 	if (!_r)							\
333 		var = clamp_t(typeof(var), _v, min, max);		\
334 	_r;								\
335 })
336 
337 #define strtoul_safe_restrict(cp, var, min, max)			\
338 ({									\
339 	unsigned long _v;						\
340 	int _r = kstrtoul(cp, 10, &_v);					\
341 	if (!_r && _v >= min && _v <= max)				\
342 		var = _v;						\
343 	else								\
344 		_r = -EINVAL;						\
345 	_r;								\
346 })
347 
348 #define snprint(out, var)						\
349 	prt_printf(out,							\
350 		   type_is(var, int)		? "%i\n"		\
351 		 : type_is(var, unsigned)	? "%u\n"		\
352 		 : type_is(var, long)		? "%li\n"		\
353 		 : type_is(var, unsigned long)	? "%lu\n"		\
354 		 : type_is(var, s64)		? "%lli\n"		\
355 		 : type_is(var, u64)		? "%llu\n"		\
356 		 : type_is(var, char *)		? "%s\n"		\
357 		 : "%i\n", var)
358 
359 bool bch2_is_zero(const void *, size_t);
360 
361 u64 bch2_read_flag_list(char *, const char * const[]);
362 
363 void bch2_prt_u64_binary(struct printbuf *, u64, unsigned);
364 
365 void bch2_print_string_as_lines(const char *prefix, const char *lines);
366 
367 typedef DARRAY(unsigned long) bch_stacktrace;
368 int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *);
369 void bch2_prt_backtrace(struct printbuf *, bch_stacktrace *);
370 int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *);
371 
372 #define NR_QUANTILES	15
373 #define QUANTILE_IDX(i)	inorder_to_eytzinger0(i, NR_QUANTILES)
374 #define QUANTILE_FIRST	eytzinger0_first(NR_QUANTILES)
375 #define QUANTILE_LAST	eytzinger0_last(NR_QUANTILES)
376 
377 struct bch2_quantiles {
378 	struct bch2_quantile_entry {
379 		u64	m;
380 		u64	step;
381 	}		entries[NR_QUANTILES];
382 };
383 
384 struct bch2_time_stat_buffer {
385 	unsigned	nr;
386 	struct bch2_time_stat_buffer_entry {
387 		u64	start;
388 		u64	end;
389 	}		entries[32];
390 };
391 
392 struct bch2_time_stats {
393 	spinlock_t	lock;
394 	/* all fields are in nanoseconds */
395 	u64		max_duration;
396 	u64             min_duration;
397 	u64             max_freq;
398 	u64             min_freq;
399 	u64		last_event;
400 	struct bch2_quantiles quantiles;
401 
402 	struct mean_and_variance	  duration_stats;
403 	struct mean_and_variance_weighted duration_stats_weighted;
404 	struct mean_and_variance	  freq_stats;
405 	struct mean_and_variance_weighted freq_stats_weighted;
406 	struct bch2_time_stat_buffer __percpu *buffer;
407 };
408 
409 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
410 void __bch2_time_stats_update(struct bch2_time_stats *stats, u64, u64);
411 #else
412 static inline void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end) {}
413 #endif
414 
415 static inline void bch2_time_stats_update(struct bch2_time_stats *stats, u64 start)
416 {
417 	__bch2_time_stats_update(stats, start, local_clock());
418 }
419 
420 void bch2_time_stats_to_text(struct printbuf *, struct bch2_time_stats *);
421 
422 void bch2_time_stats_exit(struct bch2_time_stats *);
423 void bch2_time_stats_init(struct bch2_time_stats *);
424 
425 #define ewma_add(ewma, val, weight)					\
426 ({									\
427 	typeof(ewma) _ewma = (ewma);					\
428 	typeof(weight) _weight = (weight);				\
429 									\
430 	(((_ewma << _weight) - _ewma) + (val)) >> _weight;		\
431 })
432 
433 struct bch_ratelimit {
434 	/* Next time we want to do some work, in nanoseconds */
435 	u64			next;
436 
437 	/*
438 	 * Rate at which we want to do work, in units per nanosecond
439 	 * The units here correspond to the units passed to
440 	 * bch2_ratelimit_increment()
441 	 */
442 	unsigned		rate;
443 };
444 
445 static inline void bch2_ratelimit_reset(struct bch_ratelimit *d)
446 {
447 	d->next = local_clock();
448 }
449 
450 u64 bch2_ratelimit_delay(struct bch_ratelimit *);
451 void bch2_ratelimit_increment(struct bch_ratelimit *, u64);
452 
453 struct bch_pd_controller {
454 	struct bch_ratelimit	rate;
455 	unsigned long		last_update;
456 
457 	s64			last_actual;
458 	s64			smoothed_derivative;
459 
460 	unsigned		p_term_inverse;
461 	unsigned		d_smooth;
462 	unsigned		d_term;
463 
464 	/* for exporting to sysfs (no effect on behavior) */
465 	s64			last_derivative;
466 	s64			last_proportional;
467 	s64			last_change;
468 	s64			last_target;
469 
470 	/*
471 	 * If true, the rate will not increase if bch2_ratelimit_delay()
472 	 * is not being called often enough.
473 	 */
474 	bool			backpressure;
475 };
476 
477 void bch2_pd_controller_update(struct bch_pd_controller *, s64, s64, int);
478 void bch2_pd_controller_init(struct bch_pd_controller *);
479 void bch2_pd_controller_debug_to_text(struct printbuf *, struct bch_pd_controller *);
480 
481 #define sysfs_pd_controller_attribute(name)				\
482 	rw_attribute(name##_rate);					\
483 	rw_attribute(name##_rate_bytes);				\
484 	rw_attribute(name##_rate_d_term);				\
485 	rw_attribute(name##_rate_p_term_inverse);			\
486 	read_attribute(name##_rate_debug)
487 
488 #define sysfs_pd_controller_files(name)					\
489 	&sysfs_##name##_rate,						\
490 	&sysfs_##name##_rate_bytes,					\
491 	&sysfs_##name##_rate_d_term,					\
492 	&sysfs_##name##_rate_p_term_inverse,				\
493 	&sysfs_##name##_rate_debug
494 
495 #define sysfs_pd_controller_show(name, var)				\
496 do {									\
497 	sysfs_hprint(name##_rate,		(var)->rate.rate);	\
498 	sysfs_print(name##_rate_bytes,		(var)->rate.rate);	\
499 	sysfs_print(name##_rate_d_term,		(var)->d_term);		\
500 	sysfs_print(name##_rate_p_term_inverse,	(var)->p_term_inverse);	\
501 									\
502 	if (attr == &sysfs_##name##_rate_debug)				\
503 		bch2_pd_controller_debug_to_text(out, var);		\
504 } while (0)
505 
506 #define sysfs_pd_controller_store(name, var)				\
507 do {									\
508 	sysfs_strtoul_clamp(name##_rate,				\
509 			    (var)->rate.rate, 1, UINT_MAX);		\
510 	sysfs_strtoul_clamp(name##_rate_bytes,				\
511 			    (var)->rate.rate, 1, UINT_MAX);		\
512 	sysfs_strtoul(name##_rate_d_term,	(var)->d_term);		\
513 	sysfs_strtoul_clamp(name##_rate_p_term_inverse,			\
514 			    (var)->p_term_inverse, 1, INT_MAX);		\
515 } while (0)
516 
517 #define container_of_or_null(ptr, type, member)				\
518 ({									\
519 	typeof(ptr) _ptr = ptr;						\
520 	_ptr ? container_of(_ptr, type, member) : NULL;			\
521 })
522 
523 /* Does linear interpolation between powers of two */
524 static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
525 {
526 	unsigned fract = x & ~(~0 << fract_bits);
527 
528 	x >>= fract_bits;
529 	x   = 1 << x;
530 	x  += (x * fract) >> fract_bits;
531 
532 	return x;
533 }
534 
535 void bch2_bio_map(struct bio *bio, void *base, size_t);
536 int bch2_bio_alloc_pages(struct bio *, size_t, gfp_t);
537 
538 static inline sector_t bdev_sectors(struct block_device *bdev)
539 {
540 	return bdev->bd_inode->i_size >> 9;
541 }
542 
543 #define closure_bio_submit(bio, cl)					\
544 do {									\
545 	closure_get(cl);						\
546 	submit_bio(bio);						\
547 } while (0)
548 
549 #define kthread_wait(cond)						\
550 ({									\
551 	int _ret = 0;							\
552 									\
553 	while (1) {							\
554 		set_current_state(TASK_INTERRUPTIBLE);			\
555 		if (kthread_should_stop()) {				\
556 			_ret = -1;					\
557 			break;						\
558 		}							\
559 									\
560 		if (cond)						\
561 			break;						\
562 									\
563 		schedule();						\
564 	}								\
565 	set_current_state(TASK_RUNNING);				\
566 	_ret;								\
567 })
568 
569 #define kthread_wait_freezable(cond)					\
570 ({									\
571 	int _ret = 0;							\
572 	while (1) {							\
573 		set_current_state(TASK_INTERRUPTIBLE);			\
574 		if (kthread_should_stop()) {				\
575 			_ret = -1;					\
576 			break;						\
577 		}							\
578 									\
579 		if (cond)						\
580 			break;						\
581 									\
582 		schedule();						\
583 		try_to_freeze();					\
584 	}								\
585 	set_current_state(TASK_RUNNING);				\
586 	_ret;								\
587 })
588 
589 size_t bch2_rand_range(size_t);
590 
591 void memcpy_to_bio(struct bio *, struct bvec_iter, const void *);
592 void memcpy_from_bio(void *, struct bio *, struct bvec_iter);
593 
594 static inline void memcpy_u64s_small(void *dst, const void *src,
595 				     unsigned u64s)
596 {
597 	u64 *d = dst;
598 	const u64 *s = src;
599 
600 	while (u64s--)
601 		*d++ = *s++;
602 }
603 
604 static inline void __memcpy_u64s(void *dst, const void *src,
605 				 unsigned u64s)
606 {
607 #ifdef CONFIG_X86_64
608 	long d0, d1, d2;
609 
610 	asm volatile("rep ; movsq"
611 		     : "=&c" (d0), "=&D" (d1), "=&S" (d2)
612 		     : "0" (u64s), "1" (dst), "2" (src)
613 		     : "memory");
614 #else
615 	u64 *d = dst;
616 	const u64 *s = src;
617 
618 	while (u64s--)
619 		*d++ = *s++;
620 #endif
621 }
622 
623 static inline void memcpy_u64s(void *dst, const void *src,
624 			       unsigned u64s)
625 {
626 	EBUG_ON(!(dst >= src + u64s * sizeof(u64) ||
627 		 dst + u64s * sizeof(u64) <= src));
628 
629 	__memcpy_u64s(dst, src, u64s);
630 }
631 
632 static inline void __memmove_u64s_down(void *dst, const void *src,
633 				       unsigned u64s)
634 {
635 	__memcpy_u64s(dst, src, u64s);
636 }
637 
638 static inline void memmove_u64s_down(void *dst, const void *src,
639 				     unsigned u64s)
640 {
641 	EBUG_ON(dst > src);
642 
643 	__memmove_u64s_down(dst, src, u64s);
644 }
645 
646 static inline void __memmove_u64s_down_small(void *dst, const void *src,
647 				       unsigned u64s)
648 {
649 	memcpy_u64s_small(dst, src, u64s);
650 }
651 
652 static inline void memmove_u64s_down_small(void *dst, const void *src,
653 				     unsigned u64s)
654 {
655 	EBUG_ON(dst > src);
656 
657 	__memmove_u64s_down_small(dst, src, u64s);
658 }
659 
660 static inline void __memmove_u64s_up_small(void *_dst, const void *_src,
661 					   unsigned u64s)
662 {
663 	u64 *dst = (u64 *) _dst + u64s;
664 	u64 *src = (u64 *) _src + u64s;
665 
666 	while (u64s--)
667 		*--dst = *--src;
668 }
669 
670 static inline void memmove_u64s_up_small(void *dst, const void *src,
671 					 unsigned u64s)
672 {
673 	EBUG_ON(dst < src);
674 
675 	__memmove_u64s_up_small(dst, src, u64s);
676 }
677 
678 static inline void __memmove_u64s_up(void *_dst, const void *_src,
679 				     unsigned u64s)
680 {
681 	u64 *dst = (u64 *) _dst + u64s - 1;
682 	u64 *src = (u64 *) _src + u64s - 1;
683 
684 #ifdef CONFIG_X86_64
685 	long d0, d1, d2;
686 
687 	asm volatile("std ;\n"
688 		     "rep ; movsq\n"
689 		     "cld ;\n"
690 		     : "=&c" (d0), "=&D" (d1), "=&S" (d2)
691 		     : "0" (u64s), "1" (dst), "2" (src)
692 		     : "memory");
693 #else
694 	while (u64s--)
695 		*dst-- = *src--;
696 #endif
697 }
698 
699 static inline void memmove_u64s_up(void *dst, const void *src,
700 				   unsigned u64s)
701 {
702 	EBUG_ON(dst < src);
703 
704 	__memmove_u64s_up(dst, src, u64s);
705 }
706 
707 static inline void memmove_u64s(void *dst, const void *src,
708 				unsigned u64s)
709 {
710 	if (dst < src)
711 		__memmove_u64s_down(dst, src, u64s);
712 	else
713 		__memmove_u64s_up(dst, src, u64s);
714 }
715 
716 /* Set the last few bytes up to a u64 boundary given an offset into a buffer. */
717 static inline void memset_u64s_tail(void *s, int c, unsigned bytes)
718 {
719 	unsigned rem = round_up(bytes, sizeof(u64)) - bytes;
720 
721 	memset(s + bytes, c, rem);
722 }
723 
724 void sort_cmp_size(void *base, size_t num, size_t size,
725 	  int (*cmp_func)(const void *, const void *, size_t),
726 	  void (*swap_func)(void *, void *, size_t));
727 
728 /* just the memmove, doesn't update @_nr */
729 #define __array_insert_item(_array, _nr, _pos)				\
730 	memmove(&(_array)[(_pos) + 1],					\
731 		&(_array)[(_pos)],					\
732 		sizeof((_array)[0]) * ((_nr) - (_pos)))
733 
734 #define array_insert_item(_array, _nr, _pos, _new_item)			\
735 do {									\
736 	__array_insert_item(_array, _nr, _pos);				\
737 	(_nr)++;							\
738 	(_array)[(_pos)] = (_new_item);					\
739 } while (0)
740 
741 #define array_remove_items(_array, _nr, _pos, _nr_to_remove)		\
742 do {									\
743 	(_nr) -= (_nr_to_remove);					\
744 	memmove(&(_array)[(_pos)],					\
745 		&(_array)[(_pos) + (_nr_to_remove)],			\
746 		sizeof((_array)[0]) * ((_nr) - (_pos)));		\
747 } while (0)
748 
749 #define array_remove_item(_array, _nr, _pos)				\
750 	array_remove_items(_array, _nr, _pos, 1)
751 
752 static inline void __move_gap(void *array, size_t element_size,
753 			      size_t nr, size_t size,
754 			      size_t old_gap, size_t new_gap)
755 {
756 	size_t gap_end = old_gap + size - nr;
757 
758 	if (new_gap < old_gap) {
759 		size_t move = old_gap - new_gap;
760 
761 		memmove(array + element_size * (gap_end - move),
762 			array + element_size * (old_gap - move),
763 				element_size * move);
764 	} else if (new_gap > old_gap) {
765 		size_t move = new_gap - old_gap;
766 
767 		memmove(array + element_size * old_gap,
768 			array + element_size * gap_end,
769 				element_size * move);
770 	}
771 }
772 
773 /* Move the gap in a gap buffer: */
774 #define move_gap(_array, _nr, _size, _old_gap, _new_gap)	\
775 	__move_gap(_array, sizeof(_array[0]), _nr, _size, _old_gap, _new_gap)
776 
777 #define bubble_sort(_base, _nr, _cmp)					\
778 do {									\
779 	ssize_t _i, _last;						\
780 	bool _swapped = true;						\
781 									\
782 	for (_last= (ssize_t) (_nr) - 1; _last > 0 && _swapped; --_last) {\
783 		_swapped = false;					\
784 		for (_i = 0; _i < _last; _i++)				\
785 			if (_cmp((_base)[_i], (_base)[_i + 1]) > 0) {	\
786 				swap((_base)[_i], (_base)[_i + 1]);	\
787 				_swapped = true;			\
788 			}						\
789 	}								\
790 } while (0)
791 
792 static inline u64 percpu_u64_get(u64 __percpu *src)
793 {
794 	u64 ret = 0;
795 	int cpu;
796 
797 	for_each_possible_cpu(cpu)
798 		ret += *per_cpu_ptr(src, cpu);
799 	return ret;
800 }
801 
802 static inline void percpu_u64_set(u64 __percpu *dst, u64 src)
803 {
804 	int cpu;
805 
806 	for_each_possible_cpu(cpu)
807 		*per_cpu_ptr(dst, cpu) = 0;
808 	this_cpu_write(*dst, src);
809 }
810 
811 static inline void acc_u64s(u64 *acc, const u64 *src, unsigned nr)
812 {
813 	unsigned i;
814 
815 	for (i = 0; i < nr; i++)
816 		acc[i] += src[i];
817 }
818 
819 static inline void acc_u64s_percpu(u64 *acc, const u64 __percpu *src,
820 				   unsigned nr)
821 {
822 	int cpu;
823 
824 	for_each_possible_cpu(cpu)
825 		acc_u64s(acc, per_cpu_ptr(src, cpu), nr);
826 }
827 
828 static inline void percpu_memset(void __percpu *p, int c, size_t bytes)
829 {
830 	int cpu;
831 
832 	for_each_possible_cpu(cpu)
833 		memset(per_cpu_ptr(p, cpu), c, bytes);
834 }
835 
836 u64 *bch2_acc_percpu_u64s(u64 __percpu *, unsigned);
837 
838 #define cmp_int(l, r)		((l > r) - (l < r))
839 
840 static inline int u8_cmp(u8 l, u8 r)
841 {
842 	return cmp_int(l, r);
843 }
844 
845 static inline int cmp_le32(__le32 l, __le32 r)
846 {
847 	return cmp_int(le32_to_cpu(l), le32_to_cpu(r));
848 }
849 
850 #include <linux/uuid.h>
851 
852 #endif /* _BCACHEFS_UTIL_H */
853