xref: /linux/fs/bcachefs/extents.h (revision dd83757f6e686a2188997cb58b5975f744bb7786)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_EXTENTS_H
3 #define _BCACHEFS_EXTENTS_H
4 
5 #include "bcachefs.h"
6 #include "bkey.h"
7 #include "extents_types.h"
8 
9 struct bch_fs;
10 struct btree_trans;
11 
12 /* extent entries: */
13 
14 #define extent_entry_last(_e)						\
15 	((typeof(&(_e).v->start[0])) bkey_val_end(_e))
16 
17 #define entry_to_ptr(_entry)						\
18 ({									\
19 	EBUG_ON((_entry) && !extent_entry_is_ptr(_entry));		\
20 									\
21 	__builtin_choose_expr(						\
22 		type_is_exact(_entry, const union bch_extent_entry *),	\
23 		(const struct bch_extent_ptr *) (_entry),		\
24 		(struct bch_extent_ptr *) (_entry));			\
25 })
26 
27 /* downcast, preserves const */
28 #define to_entry(_entry)						\
29 ({									\
30 	BUILD_BUG_ON(!type_is(_entry, union bch_extent_crc *) &&	\
31 		     !type_is(_entry, struct bch_extent_ptr *) &&	\
32 		     !type_is(_entry, struct bch_extent_stripe_ptr *));	\
33 									\
34 	__builtin_choose_expr(						\
35 		(type_is_exact(_entry, const union bch_extent_crc *) ||	\
36 		 type_is_exact(_entry, const struct bch_extent_ptr *) ||\
37 		 type_is_exact(_entry, const struct bch_extent_stripe_ptr *)),\
38 		(const union bch_extent_entry *) (_entry),		\
39 		(union bch_extent_entry *) (_entry));			\
40 })
41 
42 #define extent_entry_next(_entry)					\
43 	((typeof(_entry)) ((void *) (_entry) + extent_entry_bytes(_entry)))
44 
45 #define extent_entry_next_safe(_entry, _end)				\
46 	(likely(__extent_entry_type(_entry) < BCH_EXTENT_ENTRY_MAX)	\
47 	 ? extent_entry_next(_entry)					\
48 	 : _end)
49 
50 static inline unsigned
__extent_entry_type(const union bch_extent_entry * e)51 __extent_entry_type(const union bch_extent_entry *e)
52 {
53 	return e->type ? __ffs(e->type) : BCH_EXTENT_ENTRY_MAX;
54 }
55 
56 static inline enum bch_extent_entry_type
extent_entry_type(const union bch_extent_entry * e)57 extent_entry_type(const union bch_extent_entry *e)
58 {
59 	int ret = __ffs(e->type);
60 
61 	EBUG_ON(ret < 0 || ret >= BCH_EXTENT_ENTRY_MAX);
62 
63 	return ret;
64 }
65 
extent_entry_bytes(const union bch_extent_entry * entry)66 static inline size_t extent_entry_bytes(const union bch_extent_entry *entry)
67 {
68 	switch (extent_entry_type(entry)) {
69 #define x(f, n)						\
70 	case BCH_EXTENT_ENTRY_##f:			\
71 		return sizeof(struct bch_extent_##f);
72 	BCH_EXTENT_ENTRY_TYPES()
73 #undef x
74 	default:
75 		BUG();
76 	}
77 }
78 
extent_entry_u64s(const union bch_extent_entry * entry)79 static inline size_t extent_entry_u64s(const union bch_extent_entry *entry)
80 {
81 	return extent_entry_bytes(entry) / sizeof(u64);
82 }
83 
__extent_entry_insert(struct bkey_i * k,union bch_extent_entry * dst,union bch_extent_entry * new)84 static inline void __extent_entry_insert(struct bkey_i *k,
85 					 union bch_extent_entry *dst,
86 					 union bch_extent_entry *new)
87 {
88 	union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
89 
90 	memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
91 			      dst, (u64 *) end - (u64 *) dst);
92 	k->k.u64s += extent_entry_u64s(new);
93 	memcpy_u64s_small(dst, new, extent_entry_u64s(new));
94 }
95 
extent_entry_drop(struct bkey_s k,union bch_extent_entry * entry)96 static inline void extent_entry_drop(struct bkey_s k, union bch_extent_entry *entry)
97 {
98 	union bch_extent_entry *next = extent_entry_next(entry);
99 
100 	/* stripes have ptrs, but their layout doesn't work with this code */
101 	BUG_ON(k.k->type == KEY_TYPE_stripe);
102 
103 	memmove_u64s_down(entry, next,
104 			  (u64 *) bkey_val_end(k) - (u64 *) next);
105 	k.k->u64s -= (u64 *) next - (u64 *) entry;
106 }
107 
extent_entry_is_ptr(const union bch_extent_entry * e)108 static inline bool extent_entry_is_ptr(const union bch_extent_entry *e)
109 {
110 	return __extent_entry_type(e) == BCH_EXTENT_ENTRY_ptr;
111 }
112 
extent_entry_is_stripe_ptr(const union bch_extent_entry * e)113 static inline bool extent_entry_is_stripe_ptr(const union bch_extent_entry *e)
114 {
115 	return __extent_entry_type(e) == BCH_EXTENT_ENTRY_stripe_ptr;
116 }
117 
extent_entry_is_crc(const union bch_extent_entry * e)118 static inline bool extent_entry_is_crc(const union bch_extent_entry *e)
119 {
120 	switch (__extent_entry_type(e)) {
121 	case BCH_EXTENT_ENTRY_crc32:
122 	case BCH_EXTENT_ENTRY_crc64:
123 	case BCH_EXTENT_ENTRY_crc128:
124 		return true;
125 	default:
126 		return false;
127 	}
128 }
129 
130 union bch_extent_crc {
131 	u8				type;
132 	struct bch_extent_crc32		crc32;
133 	struct bch_extent_crc64		crc64;
134 	struct bch_extent_crc128	crc128;
135 };
136 
137 #define __entry_to_crc(_entry)						\
138 	__builtin_choose_expr(						\
139 		type_is_exact(_entry, const union bch_extent_entry *),	\
140 		(const union bch_extent_crc *) (_entry),		\
141 		(union bch_extent_crc *) (_entry))
142 
143 #define entry_to_crc(_entry)						\
144 ({									\
145 	EBUG_ON((_entry) && !extent_entry_is_crc(_entry));		\
146 									\
147 	__entry_to_crc(_entry);						\
148 })
149 
150 static inline struct bch_extent_crc_unpacked
bch2_extent_crc_unpack(const struct bkey * k,const union bch_extent_crc * crc)151 bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
152 {
153 #define common_fields(_crc)						\
154 		.csum_type		= _crc.csum_type,		\
155 		.compression_type	= _crc.compression_type,	\
156 		.compressed_size	= _crc._compressed_size + 1,	\
157 		.uncompressed_size	= _crc._uncompressed_size + 1,	\
158 		.offset			= _crc.offset,			\
159 		.live_size		= k->size
160 
161 	if (!crc)
162 		return (struct bch_extent_crc_unpacked) {
163 			.compressed_size	= k->size,
164 			.uncompressed_size	= k->size,
165 			.live_size		= k->size,
166 		};
167 
168 	switch (extent_entry_type(to_entry(crc))) {
169 	case BCH_EXTENT_ENTRY_crc32: {
170 		struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
171 			common_fields(crc->crc32),
172 		};
173 
174 		*((__le32 *) &ret.csum.lo) = (__le32 __force) crc->crc32.csum;
175 		return ret;
176 	}
177 	case BCH_EXTENT_ENTRY_crc64: {
178 		struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
179 			common_fields(crc->crc64),
180 			.nonce			= crc->crc64.nonce,
181 			.csum.lo		= (__force __le64) crc->crc64.csum_lo,
182 		};
183 
184 		*((__le16 *) &ret.csum.hi) = (__le16 __force) crc->crc64.csum_hi;
185 
186 		return ret;
187 	}
188 	case BCH_EXTENT_ENTRY_crc128: {
189 		struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
190 			common_fields(crc->crc128),
191 			.nonce			= crc->crc128.nonce,
192 			.csum			= crc->crc128.csum,
193 		};
194 
195 		return ret;
196 	}
197 	default:
198 		BUG();
199 	}
200 #undef common_fields
201 }
202 
crc_is_compressed(struct bch_extent_crc_unpacked crc)203 static inline bool crc_is_compressed(struct bch_extent_crc_unpacked crc)
204 {
205 	return (crc.compression_type != BCH_COMPRESSION_TYPE_none &&
206 		crc.compression_type != BCH_COMPRESSION_TYPE_incompressible);
207 }
208 
crc_is_encoded(struct bch_extent_crc_unpacked crc)209 static inline bool crc_is_encoded(struct bch_extent_crc_unpacked crc)
210 {
211 	return crc.csum_type != BCH_CSUM_none || crc_is_compressed(crc);
212 }
213 
214 void bch2_extent_crc_unpacked_to_text(struct printbuf *, struct bch_extent_crc_unpacked *);
215 
216 /* bkey_ptrs: generically over any key type that has ptrs */
217 
218 struct bkey_ptrs_c {
219 	const union bch_extent_entry	*start;
220 	const union bch_extent_entry	*end;
221 };
222 
223 struct bkey_ptrs {
224 	union bch_extent_entry	*start;
225 	union bch_extent_entry	*end;
226 };
227 
bch2_bkey_ptrs_c(struct bkey_s_c k)228 static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
229 {
230 	switch (k.k->type) {
231 	case KEY_TYPE_btree_ptr: {
232 		struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k);
233 
234 		return (struct bkey_ptrs_c) {
235 			to_entry(&e.v->start[0]),
236 			to_entry(extent_entry_last(e))
237 		};
238 	}
239 	case KEY_TYPE_extent: {
240 		struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
241 
242 		return (struct bkey_ptrs_c) {
243 			e.v->start,
244 			extent_entry_last(e)
245 		};
246 	}
247 	case KEY_TYPE_stripe: {
248 		struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
249 
250 		return (struct bkey_ptrs_c) {
251 			to_entry(&s.v->ptrs[0]),
252 			to_entry(&s.v->ptrs[s.v->nr_blocks]),
253 		};
254 	}
255 	case KEY_TYPE_reflink_v: {
256 		struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
257 
258 		return (struct bkey_ptrs_c) {
259 			r.v->start,
260 			bkey_val_end(r),
261 		};
262 	}
263 	case KEY_TYPE_btree_ptr_v2: {
264 		struct bkey_s_c_btree_ptr_v2 e = bkey_s_c_to_btree_ptr_v2(k);
265 
266 		return (struct bkey_ptrs_c) {
267 			to_entry(&e.v->start[0]),
268 			to_entry(extent_entry_last(e))
269 		};
270 	}
271 	default:
272 		return (struct bkey_ptrs_c) { NULL, NULL };
273 	}
274 }
275 
bch2_bkey_ptrs(struct bkey_s k)276 static inline struct bkey_ptrs bch2_bkey_ptrs(struct bkey_s k)
277 {
278 	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k.s_c);
279 
280 	return (struct bkey_ptrs) {
281 		(void *) p.start,
282 		(void *) p.end
283 	};
284 }
285 
286 #define __bkey_extent_entry_for_each_from(_start, _end, _entry)		\
287 	for ((_entry) = (_start);					\
288 	     (_entry) < (_end);						\
289 	     (_entry) = extent_entry_next_safe(_entry, _end))
290 
291 #define __bkey_ptr_next(_ptr, _end)					\
292 ({									\
293 	typeof(_end) _entry;						\
294 									\
295 	__bkey_extent_entry_for_each_from(to_entry(_ptr), _end, _entry)	\
296 		if (extent_entry_is_ptr(_entry))			\
297 			break;						\
298 									\
299 	_entry < (_end) ? entry_to_ptr(_entry) : NULL;			\
300 })
301 
302 #define bkey_extent_entry_for_each_from(_p, _entry, _start)		\
303 	__bkey_extent_entry_for_each_from(_start, (_p).end, _entry)
304 
305 #define bkey_extent_entry_for_each(_p, _entry)				\
306 	bkey_extent_entry_for_each_from(_p, _entry, _p.start)
307 
308 #define __bkey_for_each_ptr(_start, _end, _ptr)				\
309 	for (typeof(_start) (_ptr) = (_start);				\
310 	     ((_ptr) = __bkey_ptr_next(_ptr, _end));			\
311 	     (_ptr)++)
312 
313 #define bkey_ptr_next(_p, _ptr)						\
314 	__bkey_ptr_next(_ptr, (_p).end)
315 
316 #define bkey_for_each_ptr(_p, _ptr)					\
317 	__bkey_for_each_ptr(&(_p).start->ptr, (_p).end, _ptr)
318 
319 #define __bkey_ptr_next_decode(_k, _end, _ptr, _entry)			\
320 ({									\
321 	__label__ out;							\
322 									\
323 	(_ptr).idx	= 0;						\
324 	(_ptr).has_ec	= false;					\
325 									\
326 	__bkey_extent_entry_for_each_from(_entry, _end, _entry)		\
327 		switch (__extent_entry_type(_entry)) {			\
328 		case BCH_EXTENT_ENTRY_ptr:				\
329 			(_ptr).ptr		= _entry->ptr;		\
330 			goto out;					\
331 		case BCH_EXTENT_ENTRY_crc32:				\
332 		case BCH_EXTENT_ENTRY_crc64:				\
333 		case BCH_EXTENT_ENTRY_crc128:				\
334 			(_ptr).crc = bch2_extent_crc_unpack(_k,		\
335 					entry_to_crc(_entry));		\
336 			break;						\
337 		case BCH_EXTENT_ENTRY_stripe_ptr:			\
338 			(_ptr).ec = _entry->stripe_ptr;			\
339 			(_ptr).has_ec	= true;				\
340 			break;						\
341 		default:						\
342 			/* nothing */					\
343 			break;						\
344 		}							\
345 out:									\
346 	_entry < (_end);						\
347 })
348 
349 #define __bkey_for_each_ptr_decode(_k, _start, _end, _ptr, _entry)	\
350 	for ((_ptr).crc = bch2_extent_crc_unpack(_k, NULL),		\
351 	     (_entry) = _start;						\
352 	     __bkey_ptr_next_decode(_k, _end, _ptr, _entry);		\
353 	     (_entry) = extent_entry_next_safe(_entry, _end))
354 
355 #define bkey_for_each_ptr_decode(_k, _p, _ptr, _entry)			\
356 	__bkey_for_each_ptr_decode(_k, (_p).start, (_p).end,		\
357 				   _ptr, _entry)
358 
359 #define bkey_crc_next(_k, _end, _crc, _iter)			\
360 ({									\
361 	__bkey_extent_entry_for_each_from(_iter, _end, _iter)		\
362 		if (extent_entry_is_crc(_iter)) {			\
363 			(_crc) = bch2_extent_crc_unpack(_k,		\
364 						entry_to_crc(_iter));	\
365 			break;						\
366 		}							\
367 									\
368 	(_iter) < (_end);						\
369 })
370 
371 #define __bkey_for_each_crc(_k, _start, _end, _crc, _iter)		\
372 	for ((_crc) = bch2_extent_crc_unpack(_k, NULL),			\
373 	     (_iter) = (_start);					\
374 	     bkey_crc_next(_k, _end, _crc, _iter);		\
375 	     (_iter) = extent_entry_next(_iter))
376 
377 #define bkey_for_each_crc(_k, _p, _crc, _iter)				\
378 	__bkey_for_each_crc(_k, (_p).start, (_p).end, _crc, _iter)
379 
380 /* Iterate over pointers in KEY_TYPE_extent: */
381 
382 #define extent_for_each_entry_from(_e, _entry, _start)			\
383 	__bkey_extent_entry_for_each_from(_start,			\
384 				extent_entry_last(_e), _entry)
385 
386 #define extent_for_each_entry(_e, _entry)				\
387 	extent_for_each_entry_from(_e, _entry, (_e).v->start)
388 
389 #define extent_ptr_next(_e, _ptr)					\
390 	__bkey_ptr_next(_ptr, extent_entry_last(_e))
391 
392 #define extent_for_each_ptr(_e, _ptr)					\
393 	__bkey_for_each_ptr(&(_e).v->start->ptr, extent_entry_last(_e), _ptr)
394 
395 #define extent_for_each_ptr_decode(_e, _ptr, _entry)			\
396 	__bkey_for_each_ptr_decode((_e).k, (_e).v->start,		\
397 				   extent_entry_last(_e), _ptr, _entry)
398 
399 /* utility code common to all keys with pointers: */
400 
401 struct bch_dev_io_failures *bch2_dev_io_failures(struct bch_io_failures *,
402 						 unsigned);
403 void bch2_mark_io_failure(struct bch_io_failures *,
404 			  struct extent_ptr_decoded *);
405 int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c,
406 			       struct bch_io_failures *,
407 			       struct extent_ptr_decoded *);
408 
409 /* KEY_TYPE_btree_ptr: */
410 
411 int bch2_btree_ptr_validate(struct bch_fs *, struct bkey_s_c,
412 			    struct bkey_validate_context);
413 void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
414 			    struct bkey_s_c);
415 
416 int bch2_btree_ptr_v2_validate(struct bch_fs *, struct bkey_s_c,
417 			       struct bkey_validate_context);
418 void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
419 void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
420 			      int, struct bkey_s);
421 
422 #define bch2_bkey_ops_btree_ptr ((struct bkey_ops) {		\
423 	.key_validate	= bch2_btree_ptr_validate,		\
424 	.val_to_text	= bch2_btree_ptr_to_text,		\
425 	.swab		= bch2_ptr_swab,			\
426 	.trigger	= bch2_trigger_extent,			\
427 })
428 
429 #define bch2_bkey_ops_btree_ptr_v2 ((struct bkey_ops) {		\
430 	.key_validate	= bch2_btree_ptr_v2_validate,		\
431 	.val_to_text	= bch2_btree_ptr_v2_to_text,		\
432 	.swab		= bch2_ptr_swab,			\
433 	.compat		= bch2_btree_ptr_v2_compat,		\
434 	.trigger	= bch2_trigger_extent,			\
435 	.min_val_size	= 40,					\
436 })
437 
438 /* KEY_TYPE_extent: */
439 
440 bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
441 
442 #define bch2_bkey_ops_extent ((struct bkey_ops) {		\
443 	.key_validate	= bch2_bkey_ptrs_validate,		\
444 	.val_to_text	= bch2_bkey_ptrs_to_text,		\
445 	.swab		= bch2_ptr_swab,			\
446 	.key_normalize	= bch2_extent_normalize,		\
447 	.key_merge	= bch2_extent_merge,			\
448 	.trigger	= bch2_trigger_extent,			\
449 })
450 
451 /* KEY_TYPE_reservation: */
452 
453 int bch2_reservation_validate(struct bch_fs *, struct bkey_s_c,
454 			      struct bkey_validate_context);
455 void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
456 bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
457 
458 #define bch2_bkey_ops_reservation ((struct bkey_ops) {		\
459 	.key_validate	= bch2_reservation_validate,		\
460 	.val_to_text	= bch2_reservation_to_text,		\
461 	.key_merge	= bch2_reservation_merge,		\
462 	.trigger	= bch2_trigger_reservation,		\
463 	.min_val_size	= 8,					\
464 })
465 
466 /* Extent checksum entries: */
467 
468 bool bch2_can_narrow_extent_crcs(struct bkey_s_c,
469 				 struct bch_extent_crc_unpacked);
470 bool bch2_bkey_narrow_crcs(struct bkey_i *, struct bch_extent_crc_unpacked);
471 void bch2_extent_crc_append(struct bkey_i *,
472 			    struct bch_extent_crc_unpacked);
473 
474 /* Generic code for keys with pointers: */
475 
bkey_is_btree_ptr(const struct bkey * k)476 static inline bool bkey_is_btree_ptr(const struct bkey *k)
477 {
478 	switch (k->type) {
479 	case KEY_TYPE_btree_ptr:
480 	case KEY_TYPE_btree_ptr_v2:
481 		return true;
482 	default:
483 		return false;
484 	}
485 }
486 
bkey_extent_is_direct_data(const struct bkey * k)487 static inline bool bkey_extent_is_direct_data(const struct bkey *k)
488 {
489 	switch (k->type) {
490 	case KEY_TYPE_btree_ptr:
491 	case KEY_TYPE_btree_ptr_v2:
492 	case KEY_TYPE_extent:
493 	case KEY_TYPE_reflink_v:
494 		return true;
495 	default:
496 		return false;
497 	}
498 }
499 
bkey_extent_is_inline_data(const struct bkey * k)500 static inline bool bkey_extent_is_inline_data(const struct bkey *k)
501 {
502 	return  k->type == KEY_TYPE_inline_data ||
503 		k->type == KEY_TYPE_indirect_inline_data;
504 }
505 
bkey_inline_data_offset(const struct bkey * k)506 static inline unsigned bkey_inline_data_offset(const struct bkey *k)
507 {
508 	switch (k->type) {
509 	case KEY_TYPE_inline_data:
510 		return sizeof(struct bch_inline_data);
511 	case KEY_TYPE_indirect_inline_data:
512 		return sizeof(struct bch_indirect_inline_data);
513 	default:
514 		BUG();
515 	}
516 }
517 
bkey_inline_data_bytes(const struct bkey * k)518 static inline unsigned bkey_inline_data_bytes(const struct bkey *k)
519 {
520 	return bkey_val_bytes(k) - bkey_inline_data_offset(k);
521 }
522 
523 #define bkey_inline_data_p(_k)	(((void *) (_k).v) + bkey_inline_data_offset((_k).k))
524 
bkey_extent_is_data(const struct bkey * k)525 static inline bool bkey_extent_is_data(const struct bkey *k)
526 {
527 	return  bkey_extent_is_direct_data(k) ||
528 		bkey_extent_is_inline_data(k) ||
529 		k->type == KEY_TYPE_reflink_p;
530 }
531 
532 /*
533  * Should extent be counted under inode->i_sectors?
534  */
bkey_extent_is_allocation(const struct bkey * k)535 static inline bool bkey_extent_is_allocation(const struct bkey *k)
536 {
537 	switch (k->type) {
538 	case KEY_TYPE_extent:
539 	case KEY_TYPE_reservation:
540 	case KEY_TYPE_reflink_p:
541 	case KEY_TYPE_reflink_v:
542 	case KEY_TYPE_inline_data:
543 	case KEY_TYPE_indirect_inline_data:
544 	case KEY_TYPE_error:
545 		return true;
546 	default:
547 		return false;
548 	}
549 }
550 
bkey_extent_is_unwritten(struct bkey_s_c k)551 static inline bool bkey_extent_is_unwritten(struct bkey_s_c k)
552 {
553 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
554 
555 	bkey_for_each_ptr(ptrs, ptr)
556 		if (ptr->unwritten)
557 			return true;
558 	return false;
559 }
560 
bkey_extent_is_reservation(struct bkey_s_c k)561 static inline bool bkey_extent_is_reservation(struct bkey_s_c k)
562 {
563 	return k.k->type == KEY_TYPE_reservation ||
564 		bkey_extent_is_unwritten(k);
565 }
566 
bch2_bkey_devs(struct bkey_s_c k)567 static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k)
568 {
569 	struct bch_devs_list ret = (struct bch_devs_list) { 0 };
570 	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
571 
572 	bkey_for_each_ptr(p, ptr)
573 		ret.data[ret.nr++] = ptr->dev;
574 
575 	return ret;
576 }
577 
bch2_bkey_dirty_devs(struct bkey_s_c k)578 static inline struct bch_devs_list bch2_bkey_dirty_devs(struct bkey_s_c k)
579 {
580 	struct bch_devs_list ret = (struct bch_devs_list) { 0 };
581 	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
582 
583 	bkey_for_each_ptr(p, ptr)
584 		if (!ptr->cached)
585 			ret.data[ret.nr++] = ptr->dev;
586 
587 	return ret;
588 }
589 
bch2_bkey_cached_devs(struct bkey_s_c k)590 static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
591 {
592 	struct bch_devs_list ret = (struct bch_devs_list) { 0 };
593 	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
594 
595 	bkey_for_each_ptr(p, ptr)
596 		if (ptr->cached)
597 			ret.data[ret.nr++] = ptr->dev;
598 
599 	return ret;
600 }
601 
602 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
603 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c);
604 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c);
605 bool bch2_bkey_is_incompressible(struct bkey_s_c);
606 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
607 
608 unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c);
609 unsigned bch2_extent_ptr_desired_durability(struct bch_fs *, struct extent_ptr_decoded *);
610 unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
611 unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
612 
613 const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c, unsigned);
614 
bch2_bkey_has_device(struct bkey_s k,unsigned dev)615 static inline struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s k, unsigned dev)
616 {
617 	return (void *) bch2_bkey_has_device_c(k.s_c, dev);
618 }
619 
620 bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned);
621 
622 void bch2_bkey_extent_entry_drop(struct bkey_i *, union bch_extent_entry *);
623 
bch2_bkey_append_ptr(struct bkey_i * k,struct bch_extent_ptr ptr)624 static inline void bch2_bkey_append_ptr(struct bkey_i *k, struct bch_extent_ptr ptr)
625 {
626 	struct bch_extent_ptr *dest;
627 
628 	EBUG_ON(bch2_bkey_has_device(bkey_i_to_s(k), ptr.dev));
629 
630 	switch (k->k.type) {
631 	case KEY_TYPE_btree_ptr:
632 	case KEY_TYPE_btree_ptr_v2:
633 	case KEY_TYPE_extent:
634 		EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
635 
636 		ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
637 		dest = (struct bch_extent_ptr *)((void *) &k->v + bkey_val_bytes(&k->k));
638 		*dest = ptr;
639 		k->k.u64s++;
640 		break;
641 	default:
642 		BUG();
643 	}
644 }
645 
646 void bch2_extent_ptr_decoded_append(struct bkey_i *,
647 				    struct extent_ptr_decoded *);
648 void bch2_bkey_drop_ptr_noerror(struct bkey_s, struct bch_extent_ptr *);
649 void bch2_bkey_drop_ptr(struct bkey_s, struct bch_extent_ptr *);
650 
651 void bch2_bkey_drop_device_noerror(struct bkey_s, unsigned);
652 void bch2_bkey_drop_device(struct bkey_s, unsigned);
653 
654 #define bch2_bkey_drop_ptrs_noerror(_k, _ptr, _cond)			\
655 do {									\
656 	__label__ _again;						\
657 	struct bkey_ptrs _ptrs;						\
658 _again:									\
659 	_ptrs = bch2_bkey_ptrs(_k);					\
660 									\
661 	bkey_for_each_ptr(_ptrs, _ptr)					\
662 		if (_cond) {						\
663 			bch2_bkey_drop_ptr_noerror(_k, _ptr);		\
664 			goto _again;					\
665 		}							\
666 } while (0)
667 
668 #define bch2_bkey_drop_ptrs(_k, _ptr, _cond)				\
669 do {									\
670 	__label__ _again;						\
671 	struct bkey_ptrs _ptrs;						\
672 _again:									\
673 	_ptrs = bch2_bkey_ptrs(_k);					\
674 									\
675 	bkey_for_each_ptr(_ptrs, _ptr)					\
676 		if (_cond) {						\
677 			bch2_bkey_drop_ptr(_k, _ptr);			\
678 			goto _again;					\
679 		}							\
680 } while (0)
681 
682 bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c,
683 			   struct bch_extent_ptr, u64);
684 bool bch2_extents_match(struct bkey_s_c, struct bkey_s_c);
685 struct bch_extent_ptr *
686 bch2_extent_has_ptr(struct bkey_s_c, struct extent_ptr_decoded, struct bkey_s);
687 
688 void bch2_extent_ptr_set_cached(struct bch_fs *, struct bch_io_opts *,
689 				struct bkey_s, struct bch_extent_ptr *);
690 
691 bool bch2_extent_normalize_by_opts(struct bch_fs *, struct bch_io_opts *, struct bkey_s);
692 bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
693 
694 void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *, const struct bch_extent_ptr *);
695 void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
696 			    struct bkey_s_c);
697 int bch2_bkey_ptrs_validate(struct bch_fs *, struct bkey_s_c,
698 			    struct bkey_validate_context);
699 
bch2_extent_ptr_eq(struct bch_extent_ptr ptr1,struct bch_extent_ptr ptr2)700 static inline bool bch2_extent_ptr_eq(struct bch_extent_ptr ptr1,
701 				      struct bch_extent_ptr ptr2)
702 {
703 	return (ptr1.cached	== ptr2.cached &&
704 		ptr1.unwritten	== ptr2.unwritten &&
705 		ptr1.offset	== ptr2.offset &&
706 		ptr1.dev	== ptr2.dev &&
707 		ptr1.gen	== ptr2.gen);
708 }
709 
710 void bch2_ptr_swab(struct bkey_s);
711 
712 /* Generic extent code: */
713 
714 enum bch_extent_overlap {
715 	BCH_EXTENT_OVERLAP_ALL		= 0,
716 	BCH_EXTENT_OVERLAP_BACK		= 1,
717 	BCH_EXTENT_OVERLAP_FRONT	= 2,
718 	BCH_EXTENT_OVERLAP_MIDDLE	= 3,
719 };
720 
721 /* Returns how k overlaps with m */
bch2_extent_overlap(const struct bkey * k,const struct bkey * m)722 static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k,
723 							  const struct bkey *m)
724 {
725 	int cmp1 = bkey_lt(k->p, m->p);
726 	int cmp2 = bkey_gt(bkey_start_pos(k), bkey_start_pos(m));
727 
728 	return (cmp1 << 1) + cmp2;
729 }
730 
731 int bch2_cut_front_s(struct bpos, struct bkey_s);
732 int bch2_cut_back_s(struct bpos, struct bkey_s);
733 
bch2_cut_front(struct bpos where,struct bkey_i * k)734 static inline void bch2_cut_front(struct bpos where, struct bkey_i *k)
735 {
736 	bch2_cut_front_s(where, bkey_i_to_s(k));
737 }
738 
bch2_cut_back(struct bpos where,struct bkey_i * k)739 static inline void bch2_cut_back(struct bpos where, struct bkey_i *k)
740 {
741 	bch2_cut_back_s(where, bkey_i_to_s(k));
742 }
743 
744 /**
745  * bch_key_resize - adjust size of @k
746  *
747  * bkey_start_offset(k) will be preserved, modifies where the extent ends
748  */
bch2_key_resize(struct bkey * k,unsigned new_size)749 static inline void bch2_key_resize(struct bkey *k, unsigned new_size)
750 {
751 	k->p.offset -= k->size;
752 	k->p.offset += new_size;
753 	k->size = new_size;
754 }
755 
756 #endif /* _BCACHEFS_EXTENTS_H */
757