xref: /linux/fs/bcachefs/extents.h (revision 7b83601da470cfdb0a66eb9335fb6ec34d3dd876)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_EXTENTS_H
3 #define _BCACHEFS_EXTENTS_H
4 
5 #include "bcachefs.h"
6 #include "bkey.h"
7 #include "extents_types.h"
8 
9 struct bch_fs;
10 struct btree_trans;
11 enum bch_validate_flags;
12 
13 /* extent entries: */
14 
15 #define extent_entry_last(_e)						\
16 	((typeof(&(_e).v->start[0])) bkey_val_end(_e))
17 
18 #define entry_to_ptr(_entry)						\
19 ({									\
20 	EBUG_ON((_entry) && !extent_entry_is_ptr(_entry));		\
21 									\
22 	__builtin_choose_expr(						\
23 		type_is_exact(_entry, const union bch_extent_entry *),	\
24 		(const struct bch_extent_ptr *) (_entry),		\
25 		(struct bch_extent_ptr *) (_entry));			\
26 })
27 
28 /* downcast, preserves const */
29 #define to_entry(_entry)						\
30 ({									\
31 	BUILD_BUG_ON(!type_is(_entry, union bch_extent_crc *) &&	\
32 		     !type_is(_entry, struct bch_extent_ptr *) &&	\
33 		     !type_is(_entry, struct bch_extent_stripe_ptr *));	\
34 									\
35 	__builtin_choose_expr(						\
36 		(type_is_exact(_entry, const union bch_extent_crc *) ||	\
37 		 type_is_exact(_entry, const struct bch_extent_ptr *) ||\
38 		 type_is_exact(_entry, const struct bch_extent_stripe_ptr *)),\
39 		(const union bch_extent_entry *) (_entry),		\
40 		(union bch_extent_entry *) (_entry));			\
41 })
42 
43 #define extent_entry_next(_entry)					\
44 	((typeof(_entry)) ((void *) (_entry) + extent_entry_bytes(_entry)))
45 
46 #define extent_entry_next_safe(_entry, _end)				\
47 	(likely(__extent_entry_type(_entry) < BCH_EXTENT_ENTRY_MAX)	\
48 	 ? extent_entry_next(_entry)					\
49 	 : _end)
50 
51 static inline unsigned
__extent_entry_type(const union bch_extent_entry * e)52 __extent_entry_type(const union bch_extent_entry *e)
53 {
54 	return e->type ? __ffs(e->type) : BCH_EXTENT_ENTRY_MAX;
55 }
56 
57 static inline enum bch_extent_entry_type
extent_entry_type(const union bch_extent_entry * e)58 extent_entry_type(const union bch_extent_entry *e)
59 {
60 	int ret = __ffs(e->type);
61 
62 	EBUG_ON(ret < 0 || ret >= BCH_EXTENT_ENTRY_MAX);
63 
64 	return ret;
65 }
66 
extent_entry_bytes(const union bch_extent_entry * entry)67 static inline size_t extent_entry_bytes(const union bch_extent_entry *entry)
68 {
69 	switch (extent_entry_type(entry)) {
70 #define x(f, n)						\
71 	case BCH_EXTENT_ENTRY_##f:			\
72 		return sizeof(struct bch_extent_##f);
73 	BCH_EXTENT_ENTRY_TYPES()
74 #undef x
75 	default:
76 		BUG();
77 	}
78 }
79 
extent_entry_u64s(const union bch_extent_entry * entry)80 static inline size_t extent_entry_u64s(const union bch_extent_entry *entry)
81 {
82 	return extent_entry_bytes(entry) / sizeof(u64);
83 }
84 
__extent_entry_insert(struct bkey_i * k,union bch_extent_entry * dst,union bch_extent_entry * new)85 static inline void __extent_entry_insert(struct bkey_i *k,
86 					 union bch_extent_entry *dst,
87 					 union bch_extent_entry *new)
88 {
89 	union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
90 
91 	memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
92 			      dst, (u64 *) end - (u64 *) dst);
93 	k->k.u64s += extent_entry_u64s(new);
94 	memcpy_u64s_small(dst, new, extent_entry_u64s(new));
95 }
96 
extent_entry_drop(struct bkey_s k,union bch_extent_entry * entry)97 static inline void extent_entry_drop(struct bkey_s k, union bch_extent_entry *entry)
98 {
99 	union bch_extent_entry *next = extent_entry_next(entry);
100 
101 	/* stripes have ptrs, but their layout doesn't work with this code */
102 	BUG_ON(k.k->type == KEY_TYPE_stripe);
103 
104 	memmove_u64s_down(entry, next,
105 			  (u64 *) bkey_val_end(k) - (u64 *) next);
106 	k.k->u64s -= (u64 *) next - (u64 *) entry;
107 }
108 
extent_entry_is_ptr(const union bch_extent_entry * e)109 static inline bool extent_entry_is_ptr(const union bch_extent_entry *e)
110 {
111 	return __extent_entry_type(e) == BCH_EXTENT_ENTRY_ptr;
112 }
113 
extent_entry_is_stripe_ptr(const union bch_extent_entry * e)114 static inline bool extent_entry_is_stripe_ptr(const union bch_extent_entry *e)
115 {
116 	return __extent_entry_type(e) == BCH_EXTENT_ENTRY_stripe_ptr;
117 }
118 
extent_entry_is_crc(const union bch_extent_entry * e)119 static inline bool extent_entry_is_crc(const union bch_extent_entry *e)
120 {
121 	switch (__extent_entry_type(e)) {
122 	case BCH_EXTENT_ENTRY_crc32:
123 	case BCH_EXTENT_ENTRY_crc64:
124 	case BCH_EXTENT_ENTRY_crc128:
125 		return true;
126 	default:
127 		return false;
128 	}
129 }
130 
131 union bch_extent_crc {
132 	u8				type;
133 	struct bch_extent_crc32		crc32;
134 	struct bch_extent_crc64		crc64;
135 	struct bch_extent_crc128	crc128;
136 };
137 
138 #define __entry_to_crc(_entry)						\
139 	__builtin_choose_expr(						\
140 		type_is_exact(_entry, const union bch_extent_entry *),	\
141 		(const union bch_extent_crc *) (_entry),		\
142 		(union bch_extent_crc *) (_entry))
143 
144 #define entry_to_crc(_entry)						\
145 ({									\
146 	EBUG_ON((_entry) && !extent_entry_is_crc(_entry));		\
147 									\
148 	__entry_to_crc(_entry);						\
149 })
150 
151 static inline struct bch_extent_crc_unpacked
bch2_extent_crc_unpack(const struct bkey * k,const union bch_extent_crc * crc)152 bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
153 {
154 #define common_fields(_crc)						\
155 		.csum_type		= _crc.csum_type,		\
156 		.compression_type	= _crc.compression_type,	\
157 		.compressed_size	= _crc._compressed_size + 1,	\
158 		.uncompressed_size	= _crc._uncompressed_size + 1,	\
159 		.offset			= _crc.offset,			\
160 		.live_size		= k->size
161 
162 	if (!crc)
163 		return (struct bch_extent_crc_unpacked) {
164 			.compressed_size	= k->size,
165 			.uncompressed_size	= k->size,
166 			.live_size		= k->size,
167 		};
168 
169 	switch (extent_entry_type(to_entry(crc))) {
170 	case BCH_EXTENT_ENTRY_crc32: {
171 		struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
172 			common_fields(crc->crc32),
173 		};
174 
175 		*((__le32 *) &ret.csum.lo) = (__le32 __force) crc->crc32.csum;
176 		return ret;
177 	}
178 	case BCH_EXTENT_ENTRY_crc64: {
179 		struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
180 			common_fields(crc->crc64),
181 			.nonce			= crc->crc64.nonce,
182 			.csum.lo		= (__force __le64) crc->crc64.csum_lo,
183 		};
184 
185 		*((__le16 *) &ret.csum.hi) = (__le16 __force) crc->crc64.csum_hi;
186 
187 		return ret;
188 	}
189 	case BCH_EXTENT_ENTRY_crc128: {
190 		struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
191 			common_fields(crc->crc128),
192 			.nonce			= crc->crc128.nonce,
193 			.csum			= crc->crc128.csum,
194 		};
195 
196 		return ret;
197 	}
198 	default:
199 		BUG();
200 	}
201 #undef common_fields
202 }
203 
crc_is_compressed(struct bch_extent_crc_unpacked crc)204 static inline bool crc_is_compressed(struct bch_extent_crc_unpacked crc)
205 {
206 	return (crc.compression_type != BCH_COMPRESSION_TYPE_none &&
207 		crc.compression_type != BCH_COMPRESSION_TYPE_incompressible);
208 }
209 
crc_is_encoded(struct bch_extent_crc_unpacked crc)210 static inline bool crc_is_encoded(struct bch_extent_crc_unpacked crc)
211 {
212 	return crc.csum_type != BCH_CSUM_none || crc_is_compressed(crc);
213 }
214 
215 void bch2_extent_crc_unpacked_to_text(struct printbuf *, struct bch_extent_crc_unpacked *);
216 
217 /* bkey_ptrs: generically over any key type that has ptrs */
218 
219 struct bkey_ptrs_c {
220 	const union bch_extent_entry	*start;
221 	const union bch_extent_entry	*end;
222 };
223 
224 struct bkey_ptrs {
225 	union bch_extent_entry	*start;
226 	union bch_extent_entry	*end;
227 };
228 
bch2_bkey_ptrs_c(struct bkey_s_c k)229 static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
230 {
231 	switch (k.k->type) {
232 	case KEY_TYPE_btree_ptr: {
233 		struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k);
234 
235 		return (struct bkey_ptrs_c) {
236 			to_entry(&e.v->start[0]),
237 			to_entry(extent_entry_last(e))
238 		};
239 	}
240 	case KEY_TYPE_extent: {
241 		struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
242 
243 		return (struct bkey_ptrs_c) {
244 			e.v->start,
245 			extent_entry_last(e)
246 		};
247 	}
248 	case KEY_TYPE_stripe: {
249 		struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
250 
251 		return (struct bkey_ptrs_c) {
252 			to_entry(&s.v->ptrs[0]),
253 			to_entry(&s.v->ptrs[s.v->nr_blocks]),
254 		};
255 	}
256 	case KEY_TYPE_reflink_v: {
257 		struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
258 
259 		return (struct bkey_ptrs_c) {
260 			r.v->start,
261 			bkey_val_end(r),
262 		};
263 	}
264 	case KEY_TYPE_btree_ptr_v2: {
265 		struct bkey_s_c_btree_ptr_v2 e = bkey_s_c_to_btree_ptr_v2(k);
266 
267 		return (struct bkey_ptrs_c) {
268 			to_entry(&e.v->start[0]),
269 			to_entry(extent_entry_last(e))
270 		};
271 	}
272 	default:
273 		return (struct bkey_ptrs_c) { NULL, NULL };
274 	}
275 }
276 
bch2_bkey_ptrs(struct bkey_s k)277 static inline struct bkey_ptrs bch2_bkey_ptrs(struct bkey_s k)
278 {
279 	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k.s_c);
280 
281 	return (struct bkey_ptrs) {
282 		(void *) p.start,
283 		(void *) p.end
284 	};
285 }
286 
287 #define __bkey_extent_entry_for_each_from(_start, _end, _entry)		\
288 	for ((_entry) = (_start);					\
289 	     (_entry) < (_end);						\
290 	     (_entry) = extent_entry_next_safe(_entry, _end))
291 
292 #define __bkey_ptr_next(_ptr, _end)					\
293 ({									\
294 	typeof(_end) _entry;						\
295 									\
296 	__bkey_extent_entry_for_each_from(to_entry(_ptr), _end, _entry)	\
297 		if (extent_entry_is_ptr(_entry))			\
298 			break;						\
299 									\
300 	_entry < (_end) ? entry_to_ptr(_entry) : NULL;			\
301 })
302 
303 #define bkey_extent_entry_for_each_from(_p, _entry, _start)		\
304 	__bkey_extent_entry_for_each_from(_start, (_p).end, _entry)
305 
306 #define bkey_extent_entry_for_each(_p, _entry)				\
307 	bkey_extent_entry_for_each_from(_p, _entry, _p.start)
308 
309 #define __bkey_for_each_ptr(_start, _end, _ptr)				\
310 	for (typeof(_start) (_ptr) = (_start);				\
311 	     ((_ptr) = __bkey_ptr_next(_ptr, _end));			\
312 	     (_ptr)++)
313 
314 #define bkey_ptr_next(_p, _ptr)						\
315 	__bkey_ptr_next(_ptr, (_p).end)
316 
317 #define bkey_for_each_ptr(_p, _ptr)					\
318 	__bkey_for_each_ptr(&(_p).start->ptr, (_p).end, _ptr)
319 
320 #define __bkey_ptr_next_decode(_k, _end, _ptr, _entry)			\
321 ({									\
322 	__label__ out;							\
323 									\
324 	(_ptr).idx	= 0;						\
325 	(_ptr).has_ec	= false;					\
326 									\
327 	__bkey_extent_entry_for_each_from(_entry, _end, _entry)		\
328 		switch (__extent_entry_type(_entry)) {			\
329 		case BCH_EXTENT_ENTRY_ptr:				\
330 			(_ptr).ptr		= _entry->ptr;		\
331 			goto out;					\
332 		case BCH_EXTENT_ENTRY_crc32:				\
333 		case BCH_EXTENT_ENTRY_crc64:				\
334 		case BCH_EXTENT_ENTRY_crc128:				\
335 			(_ptr).crc = bch2_extent_crc_unpack(_k,		\
336 					entry_to_crc(_entry));		\
337 			break;						\
338 		case BCH_EXTENT_ENTRY_stripe_ptr:			\
339 			(_ptr).ec = _entry->stripe_ptr;			\
340 			(_ptr).has_ec	= true;				\
341 			break;						\
342 		default:						\
343 			/* nothing */					\
344 			break;						\
345 		}							\
346 out:									\
347 	_entry < (_end);						\
348 })
349 
350 #define __bkey_for_each_ptr_decode(_k, _start, _end, _ptr, _entry)	\
351 	for ((_ptr).crc = bch2_extent_crc_unpack(_k, NULL),		\
352 	     (_entry) = _start;						\
353 	     __bkey_ptr_next_decode(_k, _end, _ptr, _entry);		\
354 	     (_entry) = extent_entry_next_safe(_entry, _end))
355 
356 #define bkey_for_each_ptr_decode(_k, _p, _ptr, _entry)			\
357 	__bkey_for_each_ptr_decode(_k, (_p).start, (_p).end,		\
358 				   _ptr, _entry)
359 
360 #define bkey_crc_next(_k, _end, _crc, _iter)			\
361 ({									\
362 	__bkey_extent_entry_for_each_from(_iter, _end, _iter)		\
363 		if (extent_entry_is_crc(_iter)) {			\
364 			(_crc) = bch2_extent_crc_unpack(_k,		\
365 						entry_to_crc(_iter));	\
366 			break;						\
367 		}							\
368 									\
369 	(_iter) < (_end);						\
370 })
371 
372 #define __bkey_for_each_crc(_k, _start, _end, _crc, _iter)		\
373 	for ((_crc) = bch2_extent_crc_unpack(_k, NULL),			\
374 	     (_iter) = (_start);					\
375 	     bkey_crc_next(_k, _end, _crc, _iter);		\
376 	     (_iter) = extent_entry_next(_iter))
377 
378 #define bkey_for_each_crc(_k, _p, _crc, _iter)				\
379 	__bkey_for_each_crc(_k, (_p).start, (_p).end, _crc, _iter)
380 
381 /* Iterate over pointers in KEY_TYPE_extent: */
382 
383 #define extent_for_each_entry_from(_e, _entry, _start)			\
384 	__bkey_extent_entry_for_each_from(_start,			\
385 				extent_entry_last(_e), _entry)
386 
387 #define extent_for_each_entry(_e, _entry)				\
388 	extent_for_each_entry_from(_e, _entry, (_e).v->start)
389 
390 #define extent_ptr_next(_e, _ptr)					\
391 	__bkey_ptr_next(_ptr, extent_entry_last(_e))
392 
393 #define extent_for_each_ptr(_e, _ptr)					\
394 	__bkey_for_each_ptr(&(_e).v->start->ptr, extent_entry_last(_e), _ptr)
395 
396 #define extent_for_each_ptr_decode(_e, _ptr, _entry)			\
397 	__bkey_for_each_ptr_decode((_e).k, (_e).v->start,		\
398 				   extent_entry_last(_e), _ptr, _entry)
399 
400 /* utility code common to all keys with pointers: */
401 
402 struct bch_dev_io_failures *bch2_dev_io_failures(struct bch_io_failures *,
403 						 unsigned);
404 void bch2_mark_io_failure(struct bch_io_failures *,
405 			  struct extent_ptr_decoded *);
406 int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c,
407 			       struct bch_io_failures *,
408 			       struct extent_ptr_decoded *);
409 
410 /* KEY_TYPE_btree_ptr: */
411 
412 int bch2_btree_ptr_validate(struct bch_fs *, struct bkey_s_c,
413 			    enum bch_validate_flags);
414 void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
415 			    struct bkey_s_c);
416 
417 int bch2_btree_ptr_v2_validate(struct bch_fs *, struct bkey_s_c,
418 			       enum bch_validate_flags);
419 void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
420 void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
421 			      int, struct bkey_s);
422 
423 #define bch2_bkey_ops_btree_ptr ((struct bkey_ops) {		\
424 	.key_validate	= bch2_btree_ptr_validate,		\
425 	.val_to_text	= bch2_btree_ptr_to_text,		\
426 	.swab		= bch2_ptr_swab,			\
427 	.trigger	= bch2_trigger_extent,			\
428 })
429 
430 #define bch2_bkey_ops_btree_ptr_v2 ((struct bkey_ops) {		\
431 	.key_validate	= bch2_btree_ptr_v2_validate,		\
432 	.val_to_text	= bch2_btree_ptr_v2_to_text,		\
433 	.swab		= bch2_ptr_swab,			\
434 	.compat		= bch2_btree_ptr_v2_compat,		\
435 	.trigger	= bch2_trigger_extent,			\
436 	.min_val_size	= 40,					\
437 })
438 
439 /* KEY_TYPE_extent: */
440 
441 bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
442 
443 #define bch2_bkey_ops_extent ((struct bkey_ops) {		\
444 	.key_validate	= bch2_bkey_ptrs_validate,		\
445 	.val_to_text	= bch2_bkey_ptrs_to_text,		\
446 	.swab		= bch2_ptr_swab,			\
447 	.key_normalize	= bch2_extent_normalize,		\
448 	.key_merge	= bch2_extent_merge,			\
449 	.trigger	= bch2_trigger_extent,			\
450 })
451 
452 /* KEY_TYPE_reservation: */
453 
454 int bch2_reservation_validate(struct bch_fs *, struct bkey_s_c,
455 			      enum bch_validate_flags);
456 void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
457 bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
458 
459 #define bch2_bkey_ops_reservation ((struct bkey_ops) {		\
460 	.key_validate	= bch2_reservation_validate,		\
461 	.val_to_text	= bch2_reservation_to_text,		\
462 	.key_merge	= bch2_reservation_merge,		\
463 	.trigger	= bch2_trigger_reservation,		\
464 	.min_val_size	= 8,					\
465 })
466 
467 /* Extent checksum entries: */
468 
469 bool bch2_can_narrow_extent_crcs(struct bkey_s_c,
470 				 struct bch_extent_crc_unpacked);
471 bool bch2_bkey_narrow_crcs(struct bkey_i *, struct bch_extent_crc_unpacked);
472 void bch2_extent_crc_append(struct bkey_i *,
473 			    struct bch_extent_crc_unpacked);
474 
475 /* Generic code for keys with pointers: */
476 
bkey_is_btree_ptr(const struct bkey * k)477 static inline bool bkey_is_btree_ptr(const struct bkey *k)
478 {
479 	switch (k->type) {
480 	case KEY_TYPE_btree_ptr:
481 	case KEY_TYPE_btree_ptr_v2:
482 		return true;
483 	default:
484 		return false;
485 	}
486 }
487 
bkey_extent_is_direct_data(const struct bkey * k)488 static inline bool bkey_extent_is_direct_data(const struct bkey *k)
489 {
490 	switch (k->type) {
491 	case KEY_TYPE_btree_ptr:
492 	case KEY_TYPE_btree_ptr_v2:
493 	case KEY_TYPE_extent:
494 	case KEY_TYPE_reflink_v:
495 		return true;
496 	default:
497 		return false;
498 	}
499 }
500 
bkey_extent_is_inline_data(const struct bkey * k)501 static inline bool bkey_extent_is_inline_data(const struct bkey *k)
502 {
503 	return  k->type == KEY_TYPE_inline_data ||
504 		k->type == KEY_TYPE_indirect_inline_data;
505 }
506 
bkey_inline_data_offset(const struct bkey * k)507 static inline unsigned bkey_inline_data_offset(const struct bkey *k)
508 {
509 	switch (k->type) {
510 	case KEY_TYPE_inline_data:
511 		return sizeof(struct bch_inline_data);
512 	case KEY_TYPE_indirect_inline_data:
513 		return sizeof(struct bch_indirect_inline_data);
514 	default:
515 		BUG();
516 	}
517 }
518 
bkey_inline_data_bytes(const struct bkey * k)519 static inline unsigned bkey_inline_data_bytes(const struct bkey *k)
520 {
521 	return bkey_val_bytes(k) - bkey_inline_data_offset(k);
522 }
523 
524 #define bkey_inline_data_p(_k)	(((void *) (_k).v) + bkey_inline_data_offset((_k).k))
525 
bkey_extent_is_data(const struct bkey * k)526 static inline bool bkey_extent_is_data(const struct bkey *k)
527 {
528 	return  bkey_extent_is_direct_data(k) ||
529 		bkey_extent_is_inline_data(k) ||
530 		k->type == KEY_TYPE_reflink_p;
531 }
532 
533 /*
534  * Should extent be counted under inode->i_sectors?
535  */
bkey_extent_is_allocation(const struct bkey * k)536 static inline bool bkey_extent_is_allocation(const struct bkey *k)
537 {
538 	switch (k->type) {
539 	case KEY_TYPE_extent:
540 	case KEY_TYPE_reservation:
541 	case KEY_TYPE_reflink_p:
542 	case KEY_TYPE_reflink_v:
543 	case KEY_TYPE_inline_data:
544 	case KEY_TYPE_indirect_inline_data:
545 	case KEY_TYPE_error:
546 		return true;
547 	default:
548 		return false;
549 	}
550 }
551 
bkey_extent_is_unwritten(struct bkey_s_c k)552 static inline bool bkey_extent_is_unwritten(struct bkey_s_c k)
553 {
554 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
555 
556 	bkey_for_each_ptr(ptrs, ptr)
557 		if (ptr->unwritten)
558 			return true;
559 	return false;
560 }
561 
bkey_extent_is_reservation(struct bkey_s_c k)562 static inline bool bkey_extent_is_reservation(struct bkey_s_c k)
563 {
564 	return k.k->type == KEY_TYPE_reservation ||
565 		bkey_extent_is_unwritten(k);
566 }
567 
bch2_bkey_devs(struct bkey_s_c k)568 static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k)
569 {
570 	struct bch_devs_list ret = (struct bch_devs_list) { 0 };
571 	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
572 
573 	bkey_for_each_ptr(p, ptr)
574 		ret.data[ret.nr++] = ptr->dev;
575 
576 	return ret;
577 }
578 
bch2_bkey_dirty_devs(struct bkey_s_c k)579 static inline struct bch_devs_list bch2_bkey_dirty_devs(struct bkey_s_c k)
580 {
581 	struct bch_devs_list ret = (struct bch_devs_list) { 0 };
582 	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
583 
584 	bkey_for_each_ptr(p, ptr)
585 		if (!ptr->cached)
586 			ret.data[ret.nr++] = ptr->dev;
587 
588 	return ret;
589 }
590 
bch2_bkey_cached_devs(struct bkey_s_c k)591 static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
592 {
593 	struct bch_devs_list ret = (struct bch_devs_list) { 0 };
594 	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
595 
596 	bkey_for_each_ptr(p, ptr)
597 		if (ptr->cached)
598 			ret.data[ret.nr++] = ptr->dev;
599 
600 	return ret;
601 }
602 
603 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
604 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c);
605 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c);
606 bool bch2_bkey_is_incompressible(struct bkey_s_c);
607 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
608 
609 unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c);
610 unsigned bch2_extent_ptr_desired_durability(struct bch_fs *, struct extent_ptr_decoded *);
611 unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
612 unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
613 
614 const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c, unsigned);
615 
bch2_bkey_has_device(struct bkey_s k,unsigned dev)616 static inline struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s k, unsigned dev)
617 {
618 	return (void *) bch2_bkey_has_device_c(k.s_c, dev);
619 }
620 
621 bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned);
622 
623 void bch2_bkey_extent_entry_drop(struct bkey_i *, union bch_extent_entry *);
624 
bch2_bkey_append_ptr(struct bkey_i * k,struct bch_extent_ptr ptr)625 static inline void bch2_bkey_append_ptr(struct bkey_i *k, struct bch_extent_ptr ptr)
626 {
627 	struct bch_extent_ptr *dest;
628 
629 	EBUG_ON(bch2_bkey_has_device(bkey_i_to_s(k), ptr.dev));
630 
631 	switch (k->k.type) {
632 	case KEY_TYPE_btree_ptr:
633 	case KEY_TYPE_btree_ptr_v2:
634 	case KEY_TYPE_extent:
635 		EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
636 
637 		ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
638 		dest = (struct bch_extent_ptr *)((void *) &k->v + bkey_val_bytes(&k->k));
639 		*dest = ptr;
640 		k->k.u64s++;
641 		break;
642 	default:
643 		BUG();
644 	}
645 }
646 
647 void bch2_extent_ptr_decoded_append(struct bkey_i *,
648 				    struct extent_ptr_decoded *);
649 void bch2_bkey_drop_ptr_noerror(struct bkey_s, struct bch_extent_ptr *);
650 void bch2_bkey_drop_ptr(struct bkey_s, struct bch_extent_ptr *);
651 
652 void bch2_bkey_drop_device_noerror(struct bkey_s, unsigned);
653 void bch2_bkey_drop_device(struct bkey_s, unsigned);
654 
655 #define bch2_bkey_drop_ptrs_noerror(_k, _ptr, _cond)			\
656 do {									\
657 	__label__ _again;						\
658 	struct bkey_ptrs _ptrs;						\
659 _again:									\
660 	_ptrs = bch2_bkey_ptrs(_k);					\
661 									\
662 	bkey_for_each_ptr(_ptrs, _ptr)					\
663 		if (_cond) {						\
664 			bch2_bkey_drop_ptr_noerror(_k, _ptr);		\
665 			goto _again;					\
666 		}							\
667 } while (0)
668 
669 #define bch2_bkey_drop_ptrs(_k, _ptr, _cond)				\
670 do {									\
671 	__label__ _again;						\
672 	struct bkey_ptrs _ptrs;						\
673 _again:									\
674 	_ptrs = bch2_bkey_ptrs(_k);					\
675 									\
676 	bkey_for_each_ptr(_ptrs, _ptr)					\
677 		if (_cond) {						\
678 			bch2_bkey_drop_ptr(_k, _ptr);			\
679 			goto _again;					\
680 		}							\
681 } while (0)
682 
683 bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c,
684 			   struct bch_extent_ptr, u64);
685 bool bch2_extents_match(struct bkey_s_c, struct bkey_s_c);
686 struct bch_extent_ptr *
687 bch2_extent_has_ptr(struct bkey_s_c, struct extent_ptr_decoded, struct bkey_s);
688 
689 void bch2_extent_ptr_set_cached(struct bch_fs *, struct bch_io_opts *,
690 				struct bkey_s, struct bch_extent_ptr *);
691 
692 bool bch2_extent_normalize_by_opts(struct bch_fs *, struct bch_io_opts *, struct bkey_s);
693 bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
694 
695 void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *, const struct bch_extent_ptr *);
696 void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
697 			    struct bkey_s_c);
698 int bch2_bkey_ptrs_validate(struct bch_fs *, struct bkey_s_c,
699 			    enum bch_validate_flags);
700 
bch2_extent_ptr_eq(struct bch_extent_ptr ptr1,struct bch_extent_ptr ptr2)701 static inline bool bch2_extent_ptr_eq(struct bch_extent_ptr ptr1,
702 				      struct bch_extent_ptr ptr2)
703 {
704 	return (ptr1.cached	== ptr2.cached &&
705 		ptr1.unwritten	== ptr2.unwritten &&
706 		ptr1.offset	== ptr2.offset &&
707 		ptr1.dev	== ptr2.dev &&
708 		ptr1.dev	== ptr2.dev);
709 }
710 
711 void bch2_ptr_swab(struct bkey_s);
712 
713 const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c);
714 unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *, struct bkey_s_c,
715 				       unsigned, unsigned);
716 bool bch2_bkey_needs_rebalance(struct bch_fs *, struct bkey_s_c);
717 u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *, struct bkey_s_c);
718 
719 int bch2_bkey_set_needs_rebalance(struct bch_fs *, struct bkey_i *,
720 				  struct bch_io_opts *);
721 
722 /* Generic extent code: */
723 
724 enum bch_extent_overlap {
725 	BCH_EXTENT_OVERLAP_ALL		= 0,
726 	BCH_EXTENT_OVERLAP_BACK		= 1,
727 	BCH_EXTENT_OVERLAP_FRONT	= 2,
728 	BCH_EXTENT_OVERLAP_MIDDLE	= 3,
729 };
730 
731 /* Returns how k overlaps with m */
bch2_extent_overlap(const struct bkey * k,const struct bkey * m)732 static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k,
733 							  const struct bkey *m)
734 {
735 	int cmp1 = bkey_lt(k->p, m->p);
736 	int cmp2 = bkey_gt(bkey_start_pos(k), bkey_start_pos(m));
737 
738 	return (cmp1 << 1) + cmp2;
739 }
740 
741 int bch2_cut_front_s(struct bpos, struct bkey_s);
742 int bch2_cut_back_s(struct bpos, struct bkey_s);
743 
bch2_cut_front(struct bpos where,struct bkey_i * k)744 static inline void bch2_cut_front(struct bpos where, struct bkey_i *k)
745 {
746 	bch2_cut_front_s(where, bkey_i_to_s(k));
747 }
748 
bch2_cut_back(struct bpos where,struct bkey_i * k)749 static inline void bch2_cut_back(struct bpos where, struct bkey_i *k)
750 {
751 	bch2_cut_back_s(where, bkey_i_to_s(k));
752 }
753 
754 /**
755  * bch_key_resize - adjust size of @k
756  *
757  * bkey_start_offset(k) will be preserved, modifies where the extent ends
758  */
bch2_key_resize(struct bkey * k,unsigned new_size)759 static inline void bch2_key_resize(struct bkey *k, unsigned new_size)
760 {
761 	k->p.offset -= k->size;
762 	k->p.offset += new_size;
763 	k->size = new_size;
764 }
765 
766 #endif /* _BCACHEFS_EXTENTS_H */
767