xref: /linux/fs/bcachefs/extents.h (revision 031fba65fc202abf1f193e321be7a2c274fd88ba)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_EXTENTS_H
3 #define _BCACHEFS_EXTENTS_H
4 
5 #include "bcachefs.h"
6 #include "bkey.h"
7 #include "extents_types.h"
8 
9 struct bch_fs;
10 struct btree_trans;
11 enum bkey_invalid_flags;
12 
13 /* extent entries: */
14 
15 #define extent_entry_last(_e)						\
16 	((typeof(&(_e).v->start[0])) bkey_val_end(_e))
17 
18 #define entry_to_ptr(_entry)						\
19 ({									\
20 	EBUG_ON((_entry) && !extent_entry_is_ptr(_entry));		\
21 									\
22 	__builtin_choose_expr(						\
23 		type_is_exact(_entry, const union bch_extent_entry *),	\
24 		(const struct bch_extent_ptr *) (_entry),		\
25 		(struct bch_extent_ptr *) (_entry));			\
26 })
27 
28 /* downcast, preserves const */
29 #define to_entry(_entry)						\
30 ({									\
31 	BUILD_BUG_ON(!type_is(_entry, union bch_extent_crc *) &&	\
32 		     !type_is(_entry, struct bch_extent_ptr *) &&	\
33 		     !type_is(_entry, struct bch_extent_stripe_ptr *));	\
34 									\
35 	__builtin_choose_expr(						\
36 		(type_is_exact(_entry, const union bch_extent_crc *) ||	\
37 		 type_is_exact(_entry, const struct bch_extent_ptr *) ||\
38 		 type_is_exact(_entry, const struct bch_extent_stripe_ptr *)),\
39 		(const union bch_extent_entry *) (_entry),		\
40 		(union bch_extent_entry *) (_entry));			\
41 })
42 
43 #define extent_entry_next(_entry)					\
44 	((typeof(_entry)) ((void *) (_entry) + extent_entry_bytes(_entry)))
45 
46 static inline unsigned
47 __extent_entry_type(const union bch_extent_entry *e)
48 {
49 	return e->type ? __ffs(e->type) : BCH_EXTENT_ENTRY_MAX;
50 }
51 
52 static inline enum bch_extent_entry_type
53 extent_entry_type(const union bch_extent_entry *e)
54 {
55 	int ret = __ffs(e->type);
56 
57 	EBUG_ON(ret < 0 || ret >= BCH_EXTENT_ENTRY_MAX);
58 
59 	return ret;
60 }
61 
62 static inline size_t extent_entry_bytes(const union bch_extent_entry *entry)
63 {
64 	switch (extent_entry_type(entry)) {
65 #define x(f, n)						\
66 	case BCH_EXTENT_ENTRY_##f:			\
67 		return sizeof(struct bch_extent_##f);
68 	BCH_EXTENT_ENTRY_TYPES()
69 #undef x
70 	default:
71 		BUG();
72 	}
73 }
74 
75 static inline size_t extent_entry_u64s(const union bch_extent_entry *entry)
76 {
77 	return extent_entry_bytes(entry) / sizeof(u64);
78 }
79 
80 static inline void __extent_entry_insert(struct bkey_i *k,
81 					 union bch_extent_entry *dst,
82 					 union bch_extent_entry *new)
83 {
84 	union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
85 
86 	memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
87 			      dst, (u64 *) end - (u64 *) dst);
88 	k->k.u64s += extent_entry_u64s(new);
89 	memcpy_u64s_small(dst, new, extent_entry_u64s(new));
90 }
91 
92 static inline bool extent_entry_is_ptr(const union bch_extent_entry *e)
93 {
94 	return extent_entry_type(e) == BCH_EXTENT_ENTRY_ptr;
95 }
96 
97 static inline bool extent_entry_is_stripe_ptr(const union bch_extent_entry *e)
98 {
99 	return extent_entry_type(e) == BCH_EXTENT_ENTRY_stripe_ptr;
100 }
101 
102 static inline bool extent_entry_is_crc(const union bch_extent_entry *e)
103 {
104 	switch (extent_entry_type(e)) {
105 	case BCH_EXTENT_ENTRY_crc32:
106 	case BCH_EXTENT_ENTRY_crc64:
107 	case BCH_EXTENT_ENTRY_crc128:
108 		return true;
109 	default:
110 		return false;
111 	}
112 }
113 
114 union bch_extent_crc {
115 	u8				type;
116 	struct bch_extent_crc32		crc32;
117 	struct bch_extent_crc64		crc64;
118 	struct bch_extent_crc128	crc128;
119 };
120 
121 #define __entry_to_crc(_entry)						\
122 	__builtin_choose_expr(						\
123 		type_is_exact(_entry, const union bch_extent_entry *),	\
124 		(const union bch_extent_crc *) (_entry),		\
125 		(union bch_extent_crc *) (_entry))
126 
127 #define entry_to_crc(_entry)						\
128 ({									\
129 	EBUG_ON((_entry) && !extent_entry_is_crc(_entry));		\
130 									\
131 	__entry_to_crc(_entry);						\
132 })
133 
134 static inline struct bch_extent_crc_unpacked
135 bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
136 {
137 #define common_fields(_crc)						\
138 		.csum_type		= _crc.csum_type,		\
139 		.compression_type	= _crc.compression_type,	\
140 		.compressed_size	= _crc._compressed_size + 1,	\
141 		.uncompressed_size	= _crc._uncompressed_size + 1,	\
142 		.offset			= _crc.offset,			\
143 		.live_size		= k->size
144 
145 	if (!crc)
146 		return (struct bch_extent_crc_unpacked) {
147 			.compressed_size	= k->size,
148 			.uncompressed_size	= k->size,
149 			.live_size		= k->size,
150 		};
151 
152 	switch (extent_entry_type(to_entry(crc))) {
153 	case BCH_EXTENT_ENTRY_crc32: {
154 		struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
155 			common_fields(crc->crc32),
156 		};
157 
158 		*((__le32 *) &ret.csum.lo) = (__le32 __force) crc->crc32.csum;
159 		return ret;
160 	}
161 	case BCH_EXTENT_ENTRY_crc64: {
162 		struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
163 			common_fields(crc->crc64),
164 			.nonce			= crc->crc64.nonce,
165 			.csum.lo		= (__force __le64) crc->crc64.csum_lo,
166 		};
167 
168 		*((__le16 *) &ret.csum.hi) = (__le16 __force) crc->crc64.csum_hi;
169 
170 		return ret;
171 	}
172 	case BCH_EXTENT_ENTRY_crc128: {
173 		struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
174 			common_fields(crc->crc128),
175 			.nonce			= crc->crc128.nonce,
176 			.csum			= crc->crc128.csum,
177 		};
178 
179 		return ret;
180 	}
181 	default:
182 		BUG();
183 	}
184 #undef common_fields
185 }
186 
187 static inline bool crc_is_compressed(struct bch_extent_crc_unpacked crc)
188 {
189 	return (crc.compression_type != BCH_COMPRESSION_TYPE_none &&
190 		crc.compression_type != BCH_COMPRESSION_TYPE_incompressible);
191 }
192 
193 /* bkey_ptrs: generically over any key type that has ptrs */
194 
195 struct bkey_ptrs_c {
196 	const union bch_extent_entry	*start;
197 	const union bch_extent_entry	*end;
198 };
199 
200 struct bkey_ptrs {
201 	union bch_extent_entry	*start;
202 	union bch_extent_entry	*end;
203 };
204 
205 static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
206 {
207 	switch (k.k->type) {
208 	case KEY_TYPE_btree_ptr: {
209 		struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k);
210 
211 		return (struct bkey_ptrs_c) {
212 			to_entry(&e.v->start[0]),
213 			to_entry(extent_entry_last(e))
214 		};
215 	}
216 	case KEY_TYPE_extent: {
217 		struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
218 
219 		return (struct bkey_ptrs_c) {
220 			e.v->start,
221 			extent_entry_last(e)
222 		};
223 	}
224 	case KEY_TYPE_stripe: {
225 		struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
226 
227 		return (struct bkey_ptrs_c) {
228 			to_entry(&s.v->ptrs[0]),
229 			to_entry(&s.v->ptrs[s.v->nr_blocks]),
230 		};
231 	}
232 	case KEY_TYPE_reflink_v: {
233 		struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
234 
235 		return (struct bkey_ptrs_c) {
236 			r.v->start,
237 			bkey_val_end(r),
238 		};
239 	}
240 	case KEY_TYPE_btree_ptr_v2: {
241 		struct bkey_s_c_btree_ptr_v2 e = bkey_s_c_to_btree_ptr_v2(k);
242 
243 		return (struct bkey_ptrs_c) {
244 			to_entry(&e.v->start[0]),
245 			to_entry(extent_entry_last(e))
246 		};
247 	}
248 	default:
249 		return (struct bkey_ptrs_c) { NULL, NULL };
250 	}
251 }
252 
253 static inline struct bkey_ptrs bch2_bkey_ptrs(struct bkey_s k)
254 {
255 	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k.s_c);
256 
257 	return (struct bkey_ptrs) {
258 		(void *) p.start,
259 		(void *) p.end
260 	};
261 }
262 
263 #define __bkey_extent_entry_for_each_from(_start, _end, _entry)		\
264 	for ((_entry) = (_start);					\
265 	     (_entry) < (_end);						\
266 	     (_entry) = extent_entry_next(_entry))
267 
268 #define __bkey_ptr_next(_ptr, _end)					\
269 ({									\
270 	typeof(_end) _entry;						\
271 									\
272 	__bkey_extent_entry_for_each_from(to_entry(_ptr), _end, _entry)	\
273 		if (extent_entry_is_ptr(_entry))			\
274 			break;						\
275 									\
276 	_entry < (_end) ? entry_to_ptr(_entry) : NULL;			\
277 })
278 
279 #define bkey_extent_entry_for_each_from(_p, _entry, _start)		\
280 	__bkey_extent_entry_for_each_from(_start, (_p).end, _entry)
281 
282 #define bkey_extent_entry_for_each(_p, _entry)				\
283 	bkey_extent_entry_for_each_from(_p, _entry, _p.start)
284 
285 #define __bkey_for_each_ptr(_start, _end, _ptr)				\
286 	for ((_ptr) = (_start);						\
287 	     ((_ptr) = __bkey_ptr_next(_ptr, _end));			\
288 	     (_ptr)++)
289 
290 #define bkey_ptr_next(_p, _ptr)						\
291 	__bkey_ptr_next(_ptr, (_p).end)
292 
293 #define bkey_for_each_ptr(_p, _ptr)					\
294 	__bkey_for_each_ptr(&(_p).start->ptr, (_p).end, _ptr)
295 
296 #define __bkey_ptr_next_decode(_k, _end, _ptr, _entry)			\
297 ({									\
298 	__label__ out;							\
299 									\
300 	(_ptr).idx	= 0;						\
301 	(_ptr).has_ec	= false;					\
302 									\
303 	__bkey_extent_entry_for_each_from(_entry, _end, _entry)		\
304 		switch (extent_entry_type(_entry)) {			\
305 		case BCH_EXTENT_ENTRY_ptr:				\
306 			(_ptr).ptr		= _entry->ptr;		\
307 			goto out;					\
308 		case BCH_EXTENT_ENTRY_crc32:				\
309 		case BCH_EXTENT_ENTRY_crc64:				\
310 		case BCH_EXTENT_ENTRY_crc128:				\
311 			(_ptr).crc = bch2_extent_crc_unpack(_k,		\
312 					entry_to_crc(_entry));		\
313 			break;						\
314 		case BCH_EXTENT_ENTRY_stripe_ptr:			\
315 			(_ptr).ec = _entry->stripe_ptr;			\
316 			(_ptr).has_ec	= true;				\
317 			break;						\
318 		default:						\
319 			/* nothing */					\
320 			break;						\
321 		}							\
322 out:									\
323 	_entry < (_end);						\
324 })
325 
326 #define __bkey_for_each_ptr_decode(_k, _start, _end, _ptr, _entry)	\
327 	for ((_ptr).crc = bch2_extent_crc_unpack(_k, NULL),		\
328 	     (_entry) = _start;						\
329 	     __bkey_ptr_next_decode(_k, _end, _ptr, _entry);		\
330 	     (_entry) = extent_entry_next(_entry))
331 
332 #define bkey_for_each_ptr_decode(_k, _p, _ptr, _entry)			\
333 	__bkey_for_each_ptr_decode(_k, (_p).start, (_p).end,		\
334 				   _ptr, _entry)
335 
336 #define bkey_crc_next(_k, _start, _end, _crc, _iter)			\
337 ({									\
338 	__bkey_extent_entry_for_each_from(_iter, _end, _iter)		\
339 		if (extent_entry_is_crc(_iter)) {			\
340 			(_crc) = bch2_extent_crc_unpack(_k,		\
341 						entry_to_crc(_iter));	\
342 			break;						\
343 		}							\
344 									\
345 	(_iter) < (_end);						\
346 })
347 
348 #define __bkey_for_each_crc(_k, _start, _end, _crc, _iter)		\
349 	for ((_crc) = bch2_extent_crc_unpack(_k, NULL),			\
350 	     (_iter) = (_start);					\
351 	     bkey_crc_next(_k, _start, _end, _crc, _iter);		\
352 	     (_iter) = extent_entry_next(_iter))
353 
354 #define bkey_for_each_crc(_k, _p, _crc, _iter)				\
355 	__bkey_for_each_crc(_k, (_p).start, (_p).end, _crc, _iter)
356 
357 /* Iterate over pointers in KEY_TYPE_extent: */
358 
359 #define extent_for_each_entry_from(_e, _entry, _start)			\
360 	__bkey_extent_entry_for_each_from(_start,			\
361 				extent_entry_last(_e), _entry)
362 
363 #define extent_for_each_entry(_e, _entry)				\
364 	extent_for_each_entry_from(_e, _entry, (_e).v->start)
365 
366 #define extent_ptr_next(_e, _ptr)					\
367 	__bkey_ptr_next(_ptr, extent_entry_last(_e))
368 
369 #define extent_for_each_ptr(_e, _ptr)					\
370 	__bkey_for_each_ptr(&(_e).v->start->ptr, extent_entry_last(_e), _ptr)
371 
372 #define extent_for_each_ptr_decode(_e, _ptr, _entry)			\
373 	__bkey_for_each_ptr_decode((_e).k, (_e).v->start,		\
374 				   extent_entry_last(_e), _ptr, _entry)
375 
376 /* utility code common to all keys with pointers: */
377 
378 void bch2_mark_io_failure(struct bch_io_failures *,
379 			  struct extent_ptr_decoded *);
380 int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c,
381 			       struct bch_io_failures *,
382 			       struct extent_ptr_decoded *);
383 
384 /* KEY_TYPE_btree_ptr: */
385 
386 int bch2_btree_ptr_invalid(const struct bch_fs *, struct bkey_s_c,
387 			   enum bkey_invalid_flags, struct printbuf *);
388 void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
389 			    struct bkey_s_c);
390 
391 int bch2_btree_ptr_v2_invalid(const struct bch_fs *, struct bkey_s_c,
392 			      enum bkey_invalid_flags, struct printbuf *);
393 void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
394 void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
395 			      int, struct bkey_s);
396 
397 #define bch2_bkey_ops_btree_ptr ((struct bkey_ops) {		\
398 	.key_invalid	= bch2_btree_ptr_invalid,		\
399 	.val_to_text	= bch2_btree_ptr_to_text,		\
400 	.swab		= bch2_ptr_swab,			\
401 	.trans_trigger	= bch2_trans_mark_extent,		\
402 	.atomic_trigger	= bch2_mark_extent,			\
403 })
404 
405 #define bch2_bkey_ops_btree_ptr_v2 ((struct bkey_ops) {		\
406 	.key_invalid	= bch2_btree_ptr_v2_invalid,		\
407 	.val_to_text	= bch2_btree_ptr_v2_to_text,		\
408 	.swab		= bch2_ptr_swab,			\
409 	.compat		= bch2_btree_ptr_v2_compat,		\
410 	.trans_trigger	= bch2_trans_mark_extent,		\
411 	.atomic_trigger	= bch2_mark_extent,			\
412 	.min_val_size	= 40,					\
413 })
414 
415 /* KEY_TYPE_extent: */
416 
417 bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
418 
419 #define bch2_bkey_ops_extent ((struct bkey_ops) {		\
420 	.key_invalid	= bch2_bkey_ptrs_invalid,		\
421 	.val_to_text	= bch2_bkey_ptrs_to_text,		\
422 	.swab		= bch2_ptr_swab,			\
423 	.key_normalize	= bch2_extent_normalize,		\
424 	.key_merge	= bch2_extent_merge,			\
425 	.trans_trigger	= bch2_trans_mark_extent,		\
426 	.atomic_trigger	= bch2_mark_extent,			\
427 })
428 
429 /* KEY_TYPE_reservation: */
430 
431 int bch2_reservation_invalid(const struct bch_fs *, struct bkey_s_c,
432 			     enum bkey_invalid_flags, struct printbuf *);
433 void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
434 bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
435 
436 #define bch2_bkey_ops_reservation ((struct bkey_ops) {		\
437 	.key_invalid	= bch2_reservation_invalid,		\
438 	.val_to_text	= bch2_reservation_to_text,		\
439 	.key_merge	= bch2_reservation_merge,		\
440 	.trans_trigger	= bch2_trans_mark_reservation,		\
441 	.atomic_trigger	= bch2_mark_reservation,		\
442 	.min_val_size	= 8,					\
443 })
444 
445 /* Extent checksum entries: */
446 
447 bool bch2_can_narrow_extent_crcs(struct bkey_s_c,
448 				 struct bch_extent_crc_unpacked);
449 bool bch2_bkey_narrow_crcs(struct bkey_i *, struct bch_extent_crc_unpacked);
450 void bch2_extent_crc_append(struct bkey_i *,
451 			    struct bch_extent_crc_unpacked);
452 
453 /* Generic code for keys with pointers: */
454 
455 static inline bool bkey_is_btree_ptr(const struct bkey *k)
456 {
457 	switch (k->type) {
458 	case KEY_TYPE_btree_ptr:
459 	case KEY_TYPE_btree_ptr_v2:
460 		return true;
461 	default:
462 		return false;
463 	}
464 }
465 
466 static inline bool bkey_extent_is_direct_data(const struct bkey *k)
467 {
468 	switch (k->type) {
469 	case KEY_TYPE_btree_ptr:
470 	case KEY_TYPE_btree_ptr_v2:
471 	case KEY_TYPE_extent:
472 	case KEY_TYPE_reflink_v:
473 		return true;
474 	default:
475 		return false;
476 	}
477 }
478 
479 static inline bool bkey_extent_is_inline_data(const struct bkey *k)
480 {
481 	return  k->type == KEY_TYPE_inline_data ||
482 		k->type == KEY_TYPE_indirect_inline_data;
483 }
484 
485 static inline unsigned bkey_inline_data_offset(const struct bkey *k)
486 {
487 	switch (k->type) {
488 	case KEY_TYPE_inline_data:
489 		return sizeof(struct bch_inline_data);
490 	case KEY_TYPE_indirect_inline_data:
491 		return sizeof(struct bch_indirect_inline_data);
492 	default:
493 		BUG();
494 	}
495 }
496 
497 static inline unsigned bkey_inline_data_bytes(const struct bkey *k)
498 {
499 	return bkey_val_bytes(k) - bkey_inline_data_offset(k);
500 }
501 
502 #define bkey_inline_data_p(_k)	(((void *) (_k).v) + bkey_inline_data_offset((_k).k))
503 
504 static inline bool bkey_extent_is_data(const struct bkey *k)
505 {
506 	return  bkey_extent_is_direct_data(k) ||
507 		bkey_extent_is_inline_data(k) ||
508 		k->type == KEY_TYPE_reflink_p;
509 }
510 
511 /*
512  * Should extent be counted under inode->i_sectors?
513  */
514 static inline bool bkey_extent_is_allocation(const struct bkey *k)
515 {
516 	switch (k->type) {
517 	case KEY_TYPE_extent:
518 	case KEY_TYPE_reservation:
519 	case KEY_TYPE_reflink_p:
520 	case KEY_TYPE_reflink_v:
521 	case KEY_TYPE_inline_data:
522 	case KEY_TYPE_indirect_inline_data:
523 	case KEY_TYPE_error:
524 		return true;
525 	default:
526 		return false;
527 	}
528 }
529 
530 static inline bool bkey_extent_is_unwritten(struct bkey_s_c k)
531 {
532 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
533 	const struct bch_extent_ptr *ptr;
534 
535 	bkey_for_each_ptr(ptrs, ptr)
536 		if (ptr->unwritten)
537 			return true;
538 	return false;
539 }
540 
541 static inline bool bkey_extent_is_reservation(struct bkey_s_c k)
542 {
543 	return k.k->type == KEY_TYPE_reservation ||
544 		bkey_extent_is_unwritten(k);
545 }
546 
547 static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k)
548 {
549 	struct bch_devs_list ret = (struct bch_devs_list) { 0 };
550 	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
551 	const struct bch_extent_ptr *ptr;
552 
553 	bkey_for_each_ptr(p, ptr)
554 		ret.devs[ret.nr++] = ptr->dev;
555 
556 	return ret;
557 }
558 
559 static inline struct bch_devs_list bch2_bkey_dirty_devs(struct bkey_s_c k)
560 {
561 	struct bch_devs_list ret = (struct bch_devs_list) { 0 };
562 	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
563 	const struct bch_extent_ptr *ptr;
564 
565 	bkey_for_each_ptr(p, ptr)
566 		if (!ptr->cached)
567 			ret.devs[ret.nr++] = ptr->dev;
568 
569 	return ret;
570 }
571 
572 static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
573 {
574 	struct bch_devs_list ret = (struct bch_devs_list) { 0 };
575 	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
576 	const struct bch_extent_ptr *ptr;
577 
578 	bkey_for_each_ptr(p, ptr)
579 		if (ptr->cached)
580 			ret.devs[ret.nr++] = ptr->dev;
581 
582 	return ret;
583 }
584 
585 static inline unsigned bch2_bkey_ptr_data_type(struct bkey_s_c k, const struct bch_extent_ptr *ptr)
586 {
587 	switch (k.k->type) {
588 	case KEY_TYPE_btree_ptr:
589 	case KEY_TYPE_btree_ptr_v2:
590 		return BCH_DATA_btree;
591 	case KEY_TYPE_extent:
592 	case KEY_TYPE_reflink_v:
593 		return BCH_DATA_user;
594 	case KEY_TYPE_stripe: {
595 		struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
596 
597 		BUG_ON(ptr < s.v->ptrs ||
598 		       ptr >= s.v->ptrs + s.v->nr_blocks);
599 
600 		return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant
601 			? BCH_DATA_parity
602 			: BCH_DATA_user;
603 	}
604 	default:
605 		BUG();
606 	}
607 }
608 
609 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
610 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c);
611 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c);
612 bool bch2_bkey_is_incompressible(struct bkey_s_c);
613 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
614 
615 unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c);
616 unsigned bch2_extent_ptr_desired_durability(struct bch_fs *, struct extent_ptr_decoded *);
617 unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
618 unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
619 
620 void bch2_bkey_drop_device(struct bkey_s, unsigned);
621 void bch2_bkey_drop_device_noerror(struct bkey_s, unsigned);
622 
623 const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c, unsigned);
624 
625 static inline struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s k, unsigned dev)
626 {
627 	return (void *) bch2_bkey_has_device_c(k.s_c, dev);
628 }
629 
630 bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned);
631 
632 void bch2_bkey_extent_entry_drop(struct bkey_i *, union bch_extent_entry *);
633 
634 static inline void bch2_bkey_append_ptr(struct bkey_i *k, struct bch_extent_ptr ptr)
635 {
636 	struct bch_extent_ptr *dest;
637 
638 	EBUG_ON(bch2_bkey_has_device(bkey_i_to_s(k), ptr.dev));
639 
640 	switch (k->k.type) {
641 	case KEY_TYPE_btree_ptr:
642 	case KEY_TYPE_btree_ptr_v2:
643 	case KEY_TYPE_extent:
644 		EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
645 
646 		ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
647 		dest = (struct bch_extent_ptr *)((void *) &k->v + bkey_val_bytes(&k->k));
648 		*dest = ptr;
649 		k->k.u64s++;
650 		break;
651 	default:
652 		BUG();
653 	}
654 }
655 
656 void bch2_extent_ptr_decoded_append(struct bkey_i *,
657 				    struct extent_ptr_decoded *);
658 union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s,
659 						   struct bch_extent_ptr *);
660 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s,
661 					   struct bch_extent_ptr *);
662 
663 #define bch2_bkey_drop_ptrs(_k, _ptr, _cond)				\
664 do {									\
665 	struct bkey_ptrs _ptrs = bch2_bkey_ptrs(_k);			\
666 									\
667 	_ptr = &_ptrs.start->ptr;					\
668 									\
669 	while ((_ptr = bkey_ptr_next(_ptrs, _ptr))) {			\
670 		if (_cond) {						\
671 			_ptr = (void *) bch2_bkey_drop_ptr(_k, _ptr);	\
672 			_ptrs = bch2_bkey_ptrs(_k);			\
673 			continue;					\
674 		}							\
675 									\
676 		(_ptr)++;						\
677 	}								\
678 } while (0)
679 
680 bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c,
681 			   struct bch_extent_ptr, u64);
682 bool bch2_extents_match(struct bkey_s_c, struct bkey_s_c);
683 struct bch_extent_ptr *
684 bch2_extent_has_ptr(struct bkey_s_c, struct extent_ptr_decoded, struct bkey_s);
685 
686 void bch2_extent_ptr_set_cached(struct bkey_s, struct bch_extent_ptr *);
687 
688 bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
689 void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
690 			    struct bkey_s_c);
691 int bch2_bkey_ptrs_invalid(const struct bch_fs *, struct bkey_s_c,
692 			   enum bkey_invalid_flags, struct printbuf *);
693 
694 void bch2_ptr_swab(struct bkey_s);
695 
696 /* Generic extent code: */
697 
698 enum bch_extent_overlap {
699 	BCH_EXTENT_OVERLAP_ALL		= 0,
700 	BCH_EXTENT_OVERLAP_BACK		= 1,
701 	BCH_EXTENT_OVERLAP_FRONT	= 2,
702 	BCH_EXTENT_OVERLAP_MIDDLE	= 3,
703 };
704 
705 /* Returns how k overlaps with m */
706 static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k,
707 							  const struct bkey *m)
708 {
709 	int cmp1 = bkey_lt(k->p, m->p);
710 	int cmp2 = bkey_gt(bkey_start_pos(k), bkey_start_pos(m));
711 
712 	return (cmp1 << 1) + cmp2;
713 }
714 
715 int bch2_cut_front_s(struct bpos, struct bkey_s);
716 int bch2_cut_back_s(struct bpos, struct bkey_s);
717 
718 static inline void bch2_cut_front(struct bpos where, struct bkey_i *k)
719 {
720 	bch2_cut_front_s(where, bkey_i_to_s(k));
721 }
722 
723 static inline void bch2_cut_back(struct bpos where, struct bkey_i *k)
724 {
725 	bch2_cut_back_s(where, bkey_i_to_s(k));
726 }
727 
728 /**
729  * bch_key_resize - adjust size of @k
730  *
731  * bkey_start_offset(k) will be preserved, modifies where the extent ends
732  */
733 static inline void bch2_key_resize(struct bkey *k, unsigned new_size)
734 {
735 	k->p.offset -= k->size;
736 	k->p.offset += new_size;
737 	k->size = new_size;
738 }
739 
740 /*
741  * In extent_sort_fix_overlapping(), insert_fixup_extent(),
742  * extent_merge_inline() - we're modifying keys in place that are packed. To do
743  * that we have to unpack the key, modify the unpacked key - then this
744  * copies/repacks the unpacked to the original as necessary.
745  */
746 static inline void extent_save(struct btree *b, struct bkey_packed *dst,
747 			       struct bkey *src)
748 {
749 	struct bkey_format *f = &b->format;
750 	struct bkey_i *dst_unpacked;
751 
752 	if ((dst_unpacked = packed_to_bkey(dst)))
753 		dst_unpacked->k = *src;
754 	else
755 		BUG_ON(!bch2_bkey_pack_key(dst, src, f));
756 }
757 
758 #endif /* _BCACHEFS_EXTENTS_H */
759