xref: /linux/fs/bcachefs/btree_iter.h (revision 56770e24f678a84a21f21bcc1ae9cbc1364677bd)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_ITER_H
3 #define _BCACHEFS_BTREE_ITER_H
4 
5 #include "bset.h"
6 #include "btree_types.h"
7 #include "trace.h"
8 
9 void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
10 void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t);
11 void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
12 void bch2_dump_trans_paths_updates(struct btree_trans *);
13 
__bkey_err(const struct bkey * k)14 static inline int __bkey_err(const struct bkey *k)
15 {
16 	return PTR_ERR_OR_ZERO(k);
17 }
18 
19 #define bkey_err(_k)	__bkey_err((_k).k)
20 
__btree_path_get(struct btree_trans * trans,struct btree_path * path,bool intent)21 static inline void __btree_path_get(struct btree_trans *trans, struct btree_path *path, bool intent)
22 {
23 	unsigned idx = path - trans->paths;
24 
25 	EBUG_ON(idx >= trans->nr_paths);
26 	EBUG_ON(!test_bit(idx, trans->paths_allocated));
27 	if (unlikely(path->ref == U8_MAX)) {
28 		bch2_dump_trans_paths_updates(trans);
29 		panic("path %u refcount overflow\n", idx);
30 	}
31 
32 	path->ref++;
33 	path->intent_ref += intent;
34 	trace_btree_path_get_ll(trans, path);
35 }
36 
__btree_path_put(struct btree_trans * trans,struct btree_path * path,bool intent)37 static inline bool __btree_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
38 {
39 	EBUG_ON(path - trans->paths >= trans->nr_paths);
40 	EBUG_ON(!test_bit(path - trans->paths, trans->paths_allocated));
41 	EBUG_ON(!path->ref);
42 	EBUG_ON(!path->intent_ref && intent);
43 
44 	trace_btree_path_put_ll(trans, path);
45 	path->intent_ref -= intent;
46 	return --path->ref == 0;
47 }
48 
btree_path_set_dirty(struct btree_path * path,enum btree_path_uptodate u)49 static inline void btree_path_set_dirty(struct btree_path *path,
50 					enum btree_path_uptodate u)
51 {
52 	path->uptodate = max_t(unsigned, path->uptodate, u);
53 }
54 
btree_path_node(struct btree_path * path,unsigned level)55 static inline struct btree *btree_path_node(struct btree_path *path,
56 					    unsigned level)
57 {
58 	return level < BTREE_MAX_DEPTH ? path->l[level].b : NULL;
59 }
60 
btree_node_lock_seq_matches(const struct btree_path * path,const struct btree * b,unsigned level)61 static inline bool btree_node_lock_seq_matches(const struct btree_path *path,
62 					const struct btree *b, unsigned level)
63 {
64 	return path->l[level].lock_seq == six_lock_seq(&b->c.lock);
65 }
66 
btree_node_parent(struct btree_path * path,struct btree * b)67 static inline struct btree *btree_node_parent(struct btree_path *path,
68 					      struct btree *b)
69 {
70 	return btree_path_node(path, b->c.level + 1);
71 }
72 
73 /* Iterate over paths within a transaction: */
74 
75 void __bch2_btree_trans_sort_paths(struct btree_trans *);
76 
btree_trans_sort_paths(struct btree_trans * trans)77 static inline void btree_trans_sort_paths(struct btree_trans *trans)
78 {
79 	if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
80 	    trans->paths_sorted)
81 		return;
82 	__bch2_btree_trans_sort_paths(trans);
83 }
84 
trans_paths_nr(struct btree_path * paths)85 static inline unsigned long *trans_paths_nr(struct btree_path *paths)
86 {
87 	return &container_of(paths, struct btree_trans_paths, paths[0])->nr_paths;
88 }
89 
trans_paths_allocated(struct btree_path * paths)90 static inline unsigned long *trans_paths_allocated(struct btree_path *paths)
91 {
92 	unsigned long *v = trans_paths_nr(paths);
93 	return v - BITS_TO_LONGS(*v);
94 }
95 
96 #define trans_for_each_path_idx_from(_paths_allocated, _nr, _idx, _start)\
97 	for (_idx = _start;						\
98 	     (_idx = find_next_bit(_paths_allocated, _nr, _idx)) < _nr;	\
99 	     _idx++)
100 
101 static inline struct btree_path *
__trans_next_path(struct btree_trans * trans,unsigned * idx)102 __trans_next_path(struct btree_trans *trans, unsigned *idx)
103 {
104 	unsigned long *w = trans->paths_allocated + *idx / BITS_PER_LONG;
105 	/*
106 	 * Open coded find_next_bit(), because
107 	 *  - this is fast path, we can't afford the function call
108 	 *  - and we know that nr_paths is a multiple of BITS_PER_LONG,
109 	 */
110 	while (*idx < trans->nr_paths) {
111 		unsigned long v = *w >> (*idx & (BITS_PER_LONG - 1));
112 		if (v) {
113 			*idx += __ffs(v);
114 			return trans->paths + *idx;
115 		}
116 
117 		*idx += BITS_PER_LONG;
118 		*idx &= ~(BITS_PER_LONG - 1);
119 		w++;
120 	}
121 
122 	return NULL;
123 }
124 
125 /*
126  * This version is intended to be safe for use on a btree_trans that is owned by
127  * another thread, for bch2_btree_trans_to_text();
128  */
129 #define trans_for_each_path_from(_trans, _path, _idx, _start)		\
130 	for (_idx = _start;						\
131 	     (_path = __trans_next_path((_trans), &_idx));		\
132 	     _idx++)
133 
134 #define trans_for_each_path(_trans, _path, _idx)			\
135 	trans_for_each_path_from(_trans, _path, _idx, 1)
136 
next_btree_path(struct btree_trans * trans,struct btree_path * path)137 static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
138 {
139 	unsigned idx = path ? path->sorted_idx + 1 : 0;
140 
141 	EBUG_ON(idx > trans->nr_sorted);
142 
143 	return idx < trans->nr_sorted
144 		? trans->paths + trans->sorted[idx]
145 		: NULL;
146 }
147 
prev_btree_path(struct btree_trans * trans,struct btree_path * path)148 static inline struct btree_path *prev_btree_path(struct btree_trans *trans, struct btree_path *path)
149 {
150 	unsigned idx = path ? path->sorted_idx : trans->nr_sorted;
151 
152 	return idx
153 		? trans->paths + trans->sorted[idx - 1]
154 		: NULL;
155 }
156 
157 #define trans_for_each_path_idx_inorder(_trans, _iter)			\
158 	for (_iter = (struct trans_for_each_path_inorder_iter) { 0 };	\
159 	     (_iter.path_idx = trans->sorted[_iter.sorted_idx],		\
160 	      _iter.sorted_idx < (_trans)->nr_sorted);			\
161 	     _iter.sorted_idx++)
162 
163 struct trans_for_each_path_inorder_iter {
164 	btree_path_idx_t	sorted_idx;
165 	btree_path_idx_t	path_idx;
166 };
167 
168 #define trans_for_each_path_inorder(_trans, _path, _iter)		\
169 	for (_iter = (struct trans_for_each_path_inorder_iter) { 0 };	\
170 	     (_iter.path_idx = trans->sorted[_iter.sorted_idx],		\
171 	      _path = (_trans)->paths + _iter.path_idx,			\
172 	      _iter.sorted_idx < (_trans)->nr_sorted);			\
173 	     _iter.sorted_idx++)
174 
175 #define trans_for_each_path_inorder_reverse(_trans, _path, _i)		\
176 	for (_i = trans->nr_sorted - 1;					\
177 	     ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) >= 0;\
178 	     --_i)
179 
__path_has_node(const struct btree_path * path,const struct btree * b)180 static inline bool __path_has_node(const struct btree_path *path,
181 				   const struct btree *b)
182 {
183 	return path->l[b->c.level].b == b &&
184 		btree_node_lock_seq_matches(path, b, b->c.level);
185 }
186 
187 static inline struct btree_path *
__trans_next_path_with_node(struct btree_trans * trans,struct btree * b,unsigned * idx)188 __trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
189 			    unsigned *idx)
190 {
191 	struct btree_path *path;
192 
193 	while ((path = __trans_next_path(trans, idx)) &&
194 		!__path_has_node(path, b))
195 	       (*idx)++;
196 
197 	return path;
198 }
199 
200 #define trans_for_each_path_with_node(_trans, _b, _path, _iter)		\
201 	for (_iter = 1;							\
202 	     (_path = __trans_next_path_with_node((_trans), (_b), &_iter));\
203 	     _iter++)
204 
205 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *, btree_path_idx_t,
206 					    bool, unsigned long);
207 
208 static inline btree_path_idx_t __must_check
bch2_btree_path_make_mut(struct btree_trans * trans,btree_path_idx_t path,bool intent,unsigned long ip)209 bch2_btree_path_make_mut(struct btree_trans *trans,
210 			 btree_path_idx_t path, bool intent,
211 			 unsigned long ip)
212 {
213 	if (trans->paths[path].ref > 1 ||
214 	    trans->paths[path].preserve)
215 		path = __bch2_btree_path_make_mut(trans, path, intent, ip);
216 	trans->paths[path].should_be_locked = false;
217 	return path;
218 }
219 
220 btree_path_idx_t __must_check
221 __bch2_btree_path_set_pos(struct btree_trans *, btree_path_idx_t,
222 			  struct bpos, bool, unsigned long);
223 
224 static inline btree_path_idx_t __must_check
bch2_btree_path_set_pos(struct btree_trans * trans,btree_path_idx_t path,struct bpos new_pos,bool intent,unsigned long ip)225 bch2_btree_path_set_pos(struct btree_trans *trans,
226 			btree_path_idx_t path, struct bpos new_pos,
227 			bool intent, unsigned long ip)
228 {
229 	return !bpos_eq(new_pos, trans->paths[path].pos)
230 		? __bch2_btree_path_set_pos(trans, path, new_pos, intent, ip)
231 		: path;
232 }
233 
234 int __must_check bch2_btree_path_traverse_one(struct btree_trans *,
235 					      btree_path_idx_t,
236 					      unsigned, unsigned long);
237 
238 static inline void bch2_trans_verify_not_unlocked_or_in_restart(struct btree_trans *);
239 
bch2_btree_path_traverse(struct btree_trans * trans,btree_path_idx_t path,unsigned flags)240 static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
241 					  btree_path_idx_t path, unsigned flags)
242 {
243 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
244 
245 	if (trans->paths[path].uptodate < BTREE_ITER_NEED_RELOCK)
246 		return 0;
247 
248 	return bch2_btree_path_traverse_one(trans, path, flags, _RET_IP_);
249 }
250 
251 btree_path_idx_t bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
252 				 unsigned, unsigned, unsigned, unsigned long);
253 btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *, enum btree_id,
254 					    unsigned, struct bpos);
255 
256 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
257 
258 /*
259  * bch2_btree_path_peek_slot() for a cached iterator might return a key in a
260  * different snapshot:
261  */
bch2_btree_path_peek_slot_exact(struct btree_path * path,struct bkey * u)262 static inline struct bkey_s_c bch2_btree_path_peek_slot_exact(struct btree_path *path, struct bkey *u)
263 {
264 	struct bkey_s_c k = bch2_btree_path_peek_slot(path, u);
265 
266 	if (k.k && bpos_eq(path->pos, k.k->p))
267 		return k;
268 
269 	bkey_init(u);
270 	u->p = path->pos;
271 	return (struct bkey_s_c) { u, NULL };
272 }
273 
274 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
275 					struct btree_iter *, struct bpos);
276 
277 void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *);
278 
279 int __bch2_trans_mutex_lock(struct btree_trans *, struct mutex *);
280 
bch2_trans_mutex_lock(struct btree_trans * trans,struct mutex * lock)281 static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex *lock)
282 {
283 	return mutex_trylock(lock)
284 		? 0
285 		: __bch2_trans_mutex_lock(trans, lock);
286 }
287 
288 #ifdef CONFIG_BCACHEFS_DEBUG
289 void bch2_trans_verify_paths(struct btree_trans *);
290 void bch2_assert_pos_locked(struct btree_trans *, enum btree_id, struct bpos);
291 #else
bch2_trans_verify_paths(struct btree_trans * trans)292 static inline void bch2_trans_verify_paths(struct btree_trans *trans) {}
bch2_assert_pos_locked(struct btree_trans * trans,enum btree_id id,struct bpos pos)293 static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
294 					  struct bpos pos) {}
295 #endif
296 
297 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
298 				      struct btree *, struct bkey_packed *);
299 void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_path *,
300 			      struct btree *, struct btree_node_iter *,
301 			      struct bkey_packed *, unsigned, unsigned);
302 
303 int bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
304 
305 void bch2_path_put(struct btree_trans *, btree_path_idx_t, bool);
306 
307 int bch2_trans_relock(struct btree_trans *);
308 int bch2_trans_relock_notrace(struct btree_trans *);
309 void bch2_trans_unlock(struct btree_trans *);
310 void bch2_trans_unlock_long(struct btree_trans *);
311 
trans_was_restarted(struct btree_trans * trans,u32 restart_count)312 static inline int trans_was_restarted(struct btree_trans *trans, u32 restart_count)
313 {
314 	return restart_count != trans->restart_count
315 		? -BCH_ERR_transaction_restart_nested
316 		: 0;
317 }
318 
319 void __noreturn bch2_trans_restart_error(struct btree_trans *, u32);
320 
bch2_trans_verify_not_restarted(struct btree_trans * trans,u32 restart_count)321 static inline void bch2_trans_verify_not_restarted(struct btree_trans *trans,
322 						   u32 restart_count)
323 {
324 	if (trans_was_restarted(trans, restart_count))
325 		bch2_trans_restart_error(trans, restart_count);
326 }
327 
328 void __noreturn bch2_trans_unlocked_or_in_restart_error(struct btree_trans *);
329 
bch2_trans_verify_not_unlocked_or_in_restart(struct btree_trans * trans)330 static inline void bch2_trans_verify_not_unlocked_or_in_restart(struct btree_trans *trans)
331 {
332 	if (trans->restarted || !trans->locked)
333 		bch2_trans_unlocked_or_in_restart_error(trans);
334 }
335 
336 __always_inline
btree_trans_restart_foreign_task(struct btree_trans * trans,int err,unsigned long ip)337 static int btree_trans_restart_foreign_task(struct btree_trans *trans, int err, unsigned long ip)
338 {
339 	BUG_ON(err <= 0);
340 	BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart));
341 
342 	trans->restarted = err;
343 	trans->last_restarted_ip = ip;
344 	return -err;
345 }
346 
347 __always_inline
btree_trans_restart_ip(struct btree_trans * trans,int err,unsigned long ip)348 static int btree_trans_restart_ip(struct btree_trans *trans, int err, unsigned long ip)
349 {
350 	btree_trans_restart_foreign_task(trans, err, ip);
351 #ifdef CONFIG_BCACHEFS_DEBUG
352 	darray_exit(&trans->last_restarted_trace);
353 	bch2_save_backtrace(&trans->last_restarted_trace, current, 0, GFP_NOWAIT);
354 #endif
355 	return -err;
356 }
357 
358 __always_inline
btree_trans_restart(struct btree_trans * trans,int err)359 static int btree_trans_restart(struct btree_trans *trans, int err)
360 {
361 	return btree_trans_restart_ip(trans, err, _THIS_IP_);
362 }
363 
trans_maybe_inject_restart(struct btree_trans * trans,unsigned long ip)364 static inline int trans_maybe_inject_restart(struct btree_trans *trans, unsigned long ip)
365 {
366 #ifdef CONFIG_BCACHEFS_INJECT_TRANSACTION_RESTARTS
367 	if (!(ktime_get_ns() & ~(~0ULL << min(63, (10 + trans->restart_count_this_trans))))) {
368 		trace_and_count(trans->c, trans_restart_injected, trans, ip);
369 		return btree_trans_restart_ip(trans,
370 					BCH_ERR_transaction_restart_fault_inject, ip);
371 	}
372 #endif
373 	return 0;
374 }
375 
376 bool bch2_btree_node_upgrade(struct btree_trans *,
377 			     struct btree_path *, unsigned);
378 
379 void __bch2_btree_path_downgrade(struct btree_trans *, struct btree_path *, unsigned);
380 
bch2_btree_path_downgrade(struct btree_trans * trans,struct btree_path * path)381 static inline void bch2_btree_path_downgrade(struct btree_trans *trans,
382 					     struct btree_path *path)
383 {
384 	unsigned new_locks_want = path->level + !!path->intent_ref;
385 
386 	if (path->locks_want > new_locks_want)
387 		__bch2_btree_path_downgrade(trans, path, new_locks_want);
388 }
389 
390 void bch2_trans_downgrade(struct btree_trans *);
391 
392 void bch2_trans_node_add(struct btree_trans *trans, struct btree_path *, struct btree *);
393 void bch2_trans_node_drop(struct btree_trans *trans, struct btree *);
394 void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
395 
396 int __must_check __bch2_btree_iter_traverse(struct btree_trans *, struct btree_iter *);
397 int __must_check bch2_btree_iter_traverse(struct btree_trans *, struct btree_iter *);
398 
399 struct btree *bch2_btree_iter_peek_node(struct btree_trans *, struct btree_iter *);
400 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_trans *, struct btree_iter *);
401 struct btree *bch2_btree_iter_next_node(struct btree_trans *, struct btree_iter *);
402 
403 struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *, struct btree_iter *, struct bpos);
404 struct bkey_s_c bch2_btree_iter_next(struct btree_trans *, struct btree_iter *);
405 
bch2_btree_iter_peek(struct btree_trans * trans,struct btree_iter * iter)406 static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_trans *trans,
407 						   struct btree_iter *iter)
408 {
409 	return bch2_btree_iter_peek_max(trans, iter, SPOS_MAX);
410 }
411 
412 struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *, struct btree_iter *, struct bpos);
413 
bch2_btree_iter_peek_prev(struct btree_trans * trans,struct btree_iter * iter)414 static inline struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_trans *trans, struct btree_iter *iter)
415 {
416 	return bch2_btree_iter_peek_prev_min(trans, iter, POS_MIN);
417 }
418 
419 struct bkey_s_c bch2_btree_iter_prev(struct btree_trans *, struct btree_iter *);
420 
421 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *, struct btree_iter *);
422 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_trans *, struct btree_iter *);
423 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_trans *, struct btree_iter *);
424 
425 bool bch2_btree_iter_advance(struct btree_trans *, struct btree_iter *);
426 bool bch2_btree_iter_rewind(struct btree_trans *, struct btree_iter *);
427 
__bch2_btree_iter_set_pos(struct btree_iter * iter,struct bpos new_pos)428 static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
429 {
430 	iter->k.type = KEY_TYPE_deleted;
431 	iter->k.p.inode		= iter->pos.inode	= new_pos.inode;
432 	iter->k.p.offset	= iter->pos.offset	= new_pos.offset;
433 	iter->k.p.snapshot	= iter->pos.snapshot	= new_pos.snapshot;
434 	iter->k.size = 0;
435 }
436 
bch2_btree_iter_set_pos(struct btree_trans * trans,struct btree_iter * iter,struct bpos new_pos)437 static inline void bch2_btree_iter_set_pos(struct btree_trans *trans,
438 					   struct btree_iter *iter, struct bpos new_pos)
439 {
440 	if (unlikely(iter->update_path))
441 		bch2_path_put(trans, iter->update_path,
442 			      iter->flags & BTREE_ITER_intent);
443 	iter->update_path = 0;
444 
445 	if (!(iter->flags & BTREE_ITER_all_snapshots))
446 		new_pos.snapshot = iter->snapshot;
447 
448 	__bch2_btree_iter_set_pos(iter, new_pos);
449 }
450 
bch2_btree_iter_set_pos_to_extent_start(struct btree_iter * iter)451 static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
452 {
453 	BUG_ON(!(iter->flags & BTREE_ITER_is_extents));
454 	iter->pos = bkey_start_pos(&iter->k);
455 }
456 
bch2_btree_iter_set_snapshot(struct btree_trans * trans,struct btree_iter * iter,u32 snapshot)457 static inline void bch2_btree_iter_set_snapshot(struct btree_trans *trans,
458 						struct btree_iter *iter, u32 snapshot)
459 {
460 	struct bpos pos = iter->pos;
461 
462 	iter->snapshot = snapshot;
463 	pos.snapshot = snapshot;
464 	bch2_btree_iter_set_pos(trans, iter, pos);
465 }
466 
467 void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
468 
bch2_btree_iter_flags(struct btree_trans * trans,unsigned btree_id,unsigned level,unsigned flags)469 static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
470 					     unsigned btree_id,
471 					     unsigned level,
472 					     unsigned flags)
473 {
474 	if (level || !btree_id_cached(trans->c, btree_id)) {
475 		flags &= ~BTREE_ITER_cached;
476 		flags &= ~BTREE_ITER_with_key_cache;
477 	} else if (!(flags & BTREE_ITER_cached))
478 		flags |= BTREE_ITER_with_key_cache;
479 
480 	if (!(flags & (BTREE_ITER_all_snapshots|BTREE_ITER_not_extents)) &&
481 	    btree_id_is_extents(btree_id))
482 		flags |= BTREE_ITER_is_extents;
483 
484 	if (!(flags & BTREE_ITER_snapshot_field) &&
485 	    !btree_type_has_snapshot_field(btree_id))
486 		flags &= ~BTREE_ITER_all_snapshots;
487 
488 	if (!(flags & BTREE_ITER_all_snapshots) &&
489 	    btree_type_has_snapshots(btree_id))
490 		flags |= BTREE_ITER_filter_snapshots;
491 
492 	if (trans->journal_replay_not_finished)
493 		flags |= BTREE_ITER_with_journal;
494 
495 	return flags;
496 }
497 
bch2_trans_iter_init_common(struct btree_trans * trans,struct btree_iter * iter,unsigned btree_id,struct bpos pos,unsigned locks_want,unsigned depth,unsigned flags,unsigned long ip)498 static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
499 					  struct btree_iter *iter,
500 					  unsigned btree_id, struct bpos pos,
501 					  unsigned locks_want,
502 					  unsigned depth,
503 					  unsigned flags,
504 					  unsigned long ip)
505 {
506 	iter->update_path	= 0;
507 	iter->key_cache_path	= 0;
508 	iter->btree_id		= btree_id;
509 	iter->min_depth		= 0;
510 	iter->flags		= flags;
511 	iter->snapshot		= pos.snapshot;
512 	iter->pos		= pos;
513 	iter->k			= POS_KEY(pos);
514 	iter->journal_idx	= 0;
515 #ifdef CONFIG_BCACHEFS_DEBUG
516 	iter->ip_allocated = ip;
517 #endif
518 	iter->path = bch2_path_get(trans, btree_id, iter->pos,
519 				   locks_want, depth, flags, ip);
520 }
521 
522 void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *,
523 			  enum btree_id, struct bpos, unsigned);
524 
bch2_trans_iter_init(struct btree_trans * trans,struct btree_iter * iter,unsigned btree_id,struct bpos pos,unsigned flags)525 static inline void bch2_trans_iter_init(struct btree_trans *trans,
526 			  struct btree_iter *iter,
527 			  unsigned btree_id, struct bpos pos,
528 			  unsigned flags)
529 {
530 	if (__builtin_constant_p(btree_id) &&
531 	    __builtin_constant_p(flags))
532 		bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
533 				bch2_btree_iter_flags(trans, btree_id, 0, flags),
534 				_THIS_IP_);
535 	else
536 		bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags);
537 }
538 
539 void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
540 			       enum btree_id, struct bpos,
541 			       unsigned, unsigned, unsigned);
542 void bch2_trans_copy_iter(struct btree_trans *, struct btree_iter *, struct btree_iter *);
543 
544 void bch2_set_btree_iter_dontneed(struct btree_trans *, struct btree_iter *);
545 
546 void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
547 
548 /**
549  * bch2_trans_kmalloc - allocate memory for use by the current transaction
550  *
551  * Must be called after bch2_trans_begin, which on second and further calls
552  * frees all memory allocated in this transaction
553  */
bch2_trans_kmalloc(struct btree_trans * trans,size_t size)554 static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
555 {
556 	size = roundup(size, 8);
557 
558 	if (likely(trans->mem_top + size <= trans->mem_bytes)) {
559 		void *p = trans->mem + trans->mem_top;
560 
561 		trans->mem_top += size;
562 		memset(p, 0, size);
563 		return p;
564 	} else {
565 		return __bch2_trans_kmalloc(trans, size);
566 	}
567 }
568 
bch2_trans_kmalloc_nomemzero(struct btree_trans * trans,size_t size)569 static inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size_t size)
570 {
571 	size = round_up(size, 8);
572 
573 	if (likely(trans->mem_top + size <= trans->mem_bytes)) {
574 		void *p = trans->mem + trans->mem_top;
575 
576 		trans->mem_top += size;
577 		return p;
578 	} else {
579 		return __bch2_trans_kmalloc(trans, size);
580 	}
581 }
582 
__bch2_bkey_get_iter(struct btree_trans * trans,struct btree_iter * iter,unsigned btree_id,struct bpos pos,unsigned flags,unsigned type)583 static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans,
584 				struct btree_iter *iter,
585 				unsigned btree_id, struct bpos pos,
586 				unsigned flags, unsigned type)
587 {
588 	struct bkey_s_c k;
589 
590 	bch2_trans_iter_init(trans, iter, btree_id, pos, flags);
591 	k = bch2_btree_iter_peek_slot(trans, iter);
592 
593 	if (!bkey_err(k) && type && k.k->type != type)
594 		k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch);
595 	if (unlikely(bkey_err(k)))
596 		bch2_trans_iter_exit(trans, iter);
597 	return k;
598 }
599 
bch2_bkey_get_iter(struct btree_trans * trans,struct btree_iter * iter,unsigned btree_id,struct bpos pos,unsigned flags)600 static inline struct bkey_s_c bch2_bkey_get_iter(struct btree_trans *trans,
601 				struct btree_iter *iter,
602 				unsigned btree_id, struct bpos pos,
603 				unsigned flags)
604 {
605 	return __bch2_bkey_get_iter(trans, iter, btree_id, pos, flags, 0);
606 }
607 
608 #define bch2_bkey_get_iter_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\
609 	bkey_s_c_to_##_type(__bch2_bkey_get_iter(_trans, _iter,			\
610 				       _btree_id, _pos, _flags, KEY_TYPE_##_type))
611 
__bkey_val_copy(void * dst_v,unsigned dst_size,struct bkey_s_c src_k)612 static inline void __bkey_val_copy(void *dst_v, unsigned dst_size, struct bkey_s_c src_k)
613 {
614 	unsigned b = min_t(unsigned, dst_size, bkey_val_bytes(src_k.k));
615 	memcpy(dst_v, src_k.v, b);
616 	if (unlikely(b < dst_size))
617 		memset(dst_v + b, 0, dst_size - b);
618 }
619 
620 #define bkey_val_copy(_dst_v, _src_k)					\
621 do {									\
622 	BUILD_BUG_ON(!__typecheck(*_dst_v, *_src_k.v));			\
623 	__bkey_val_copy(_dst_v, sizeof(*_dst_v), _src_k.s_c);		\
624 } while (0)
625 
__bch2_bkey_get_val_typed(struct btree_trans * trans,unsigned btree_id,struct bpos pos,unsigned flags,unsigned type,unsigned val_size,void * val)626 static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans,
627 				unsigned btree_id, struct bpos pos,
628 				unsigned flags, unsigned type,
629 				unsigned val_size, void *val)
630 {
631 	struct btree_iter iter;
632 	struct bkey_s_c k = __bch2_bkey_get_iter(trans, &iter, btree_id, pos, flags, type);
633 	int ret = bkey_err(k);
634 	if (!ret) {
635 		__bkey_val_copy(val, val_size, k);
636 		bch2_trans_iter_exit(trans, &iter);
637 	}
638 
639 	return ret;
640 }
641 
642 #define bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, _type, _val)\
643 	__bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags,	\
644 				  KEY_TYPE_##_type, sizeof(*_val), _val)
645 
646 void bch2_trans_srcu_unlock(struct btree_trans *);
647 
648 u32 bch2_trans_begin(struct btree_trans *);
649 
650 #define __for_each_btree_node(_trans, _iter, _btree_id, _start,			\
651 			      _locks_want, _depth, _flags, _b, _do)		\
652 ({										\
653 	bch2_trans_begin((_trans));						\
654 										\
655 	struct btree_iter _iter;						\
656 	bch2_trans_node_iter_init((_trans), &_iter, (_btree_id),		\
657 				  _start, _locks_want, _depth, _flags);		\
658 	int _ret3 = 0;								\
659 	do {									\
660 		_ret3 = lockrestart_do((_trans), ({				\
661 			struct btree *_b = bch2_btree_iter_peek_node(_trans, &_iter);\
662 			if (!_b)						\
663 				break;						\
664 										\
665 			PTR_ERR_OR_ZERO(_b) ?: (_do);				\
666 		})) ?:								\
667 		lockrestart_do((_trans),					\
668 			PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(_trans, &_iter)));\
669 	} while (!_ret3);							\
670 										\
671 	bch2_trans_iter_exit((_trans), &(_iter));				\
672 	_ret3;									\
673 })
674 
675 #define for_each_btree_node(_trans, _iter, _btree_id, _start,		\
676 			    _flags, _b, _do)				\
677 	__for_each_btree_node(_trans, _iter, _btree_id, _start,	\
678 			      0, 0, _flags, _b, _do)
679 
bch2_btree_iter_peek_prev_type(struct btree_trans * trans,struct btree_iter * iter,unsigned flags)680 static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_trans *trans,
681 							     struct btree_iter *iter,
682 							     unsigned flags)
683 {
684 	return  flags & BTREE_ITER_slots      ? bch2_btree_iter_peek_slot(trans, iter) :
685 						bch2_btree_iter_peek_prev(trans, iter);
686 }
687 
bch2_btree_iter_peek_type(struct btree_trans * trans,struct btree_iter * iter,unsigned flags)688 static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_trans *trans,
689 							struct btree_iter *iter,
690 							unsigned flags)
691 {
692 	return  flags & BTREE_ITER_slots      ? bch2_btree_iter_peek_slot(trans, iter) :
693 						bch2_btree_iter_peek(trans, iter);
694 }
695 
bch2_btree_iter_peek_max_type(struct btree_trans * trans,struct btree_iter * iter,struct bpos end,unsigned flags)696 static inline struct bkey_s_c bch2_btree_iter_peek_max_type(struct btree_trans *trans,
697 							    struct btree_iter *iter,
698 							    struct bpos end,
699 							    unsigned flags)
700 {
701 	if (!(flags & BTREE_ITER_slots))
702 		return bch2_btree_iter_peek_max(trans, iter, end);
703 
704 	if (bkey_gt(iter->pos, end))
705 		return bkey_s_c_null;
706 
707 	return bch2_btree_iter_peek_slot(trans, iter);
708 }
709 
710 int __bch2_btree_trans_too_many_iters(struct btree_trans *);
711 
btree_trans_too_many_iters(struct btree_trans * trans)712 static inline int btree_trans_too_many_iters(struct btree_trans *trans)
713 {
714 	if (bitmap_weight(trans->paths_allocated, trans->nr_paths) > BTREE_ITER_NORMAL_LIMIT - 8)
715 		return __bch2_btree_trans_too_many_iters(trans);
716 
717 	return 0;
718 }
719 
720 /*
721  * goto instead of loop, so that when used inside for_each_btree_key2()
722  * break/continue work correctly
723  */
724 #define lockrestart_do(_trans, _do)					\
725 ({									\
726 	__label__ transaction_restart;					\
727 	u32 _restart_count;						\
728 	int _ret2;							\
729 transaction_restart:							\
730 	_restart_count = bch2_trans_begin(_trans);			\
731 	_ret2 = (_do);							\
732 									\
733 	if (bch2_err_matches(_ret2, BCH_ERR_transaction_restart))	\
734 		goto transaction_restart;				\
735 									\
736 	if (!_ret2)							\
737 		bch2_trans_verify_not_restarted(_trans, _restart_count);\
738 	_ret2;								\
739 })
740 
741 /*
742  * nested_lockrestart_do(), nested_commit_do():
743  *
744  * These are like lockrestart_do() and commit_do(), with two differences:
745  *
746  *  - We don't call bch2_trans_begin() unless we had a transaction restart
747  *  - We return -BCH_ERR_transaction_restart_nested if we succeeded after a
748  *  transaction restart
749  */
750 #define nested_lockrestart_do(_trans, _do)				\
751 ({									\
752 	u32 _restart_count, _orig_restart_count;			\
753 	int _ret2;							\
754 									\
755 	_restart_count = _orig_restart_count = (_trans)->restart_count;	\
756 									\
757 	while (bch2_err_matches(_ret2 = (_do), BCH_ERR_transaction_restart))\
758 		_restart_count = bch2_trans_begin(_trans);		\
759 									\
760 	if (!_ret2)							\
761 		bch2_trans_verify_not_restarted(_trans, _restart_count);\
762 									\
763 	_ret2 ?: trans_was_restarted(_trans, _orig_restart_count);		\
764 })
765 
766 #define for_each_btree_key_max_continue(_trans, _iter,			\
767 					 _end, _flags, _k, _do)		\
768 ({									\
769 	struct bkey_s_c _k;						\
770 	int _ret3 = 0;							\
771 									\
772 	do {								\
773 		_ret3 = lockrestart_do(_trans, ({			\
774 			(_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter),	\
775 						_end, (_flags));	\
776 			if (!(_k).k)					\
777 				break;					\
778 									\
779 			bkey_err(_k) ?: (_do);				\
780 		}));							\
781 	} while (!_ret3 && bch2_btree_iter_advance(_trans, &(_iter)));	\
782 									\
783 	bch2_trans_iter_exit((_trans), &(_iter));			\
784 	_ret3;								\
785 })
786 
787 #define for_each_btree_key_continue(_trans, _iter, _flags, _k, _do)	\
788 	for_each_btree_key_max_continue(_trans, _iter, SPOS_MAX, _flags, _k, _do)
789 
790 #define for_each_btree_key_max(_trans, _iter, _btree_id,		\
791 				_start, _end, _flags, _k, _do)		\
792 ({									\
793 	bch2_trans_begin(trans);					\
794 									\
795 	struct btree_iter _iter;					\
796 	bch2_trans_iter_init((_trans), &(_iter), (_btree_id),		\
797 			     (_start), (_flags));			\
798 									\
799 	for_each_btree_key_max_continue(_trans, _iter, _end, _flags, _k, _do);\
800 })
801 
802 #define for_each_btree_key(_trans, _iter, _btree_id,			\
803 			   _start, _flags, _k, _do)			\
804 	for_each_btree_key_max(_trans, _iter, _btree_id, _start,	\
805 				 SPOS_MAX, _flags, _k, _do)
806 
807 #define for_each_btree_key_reverse(_trans, _iter, _btree_id,		\
808 				   _start, _flags, _k, _do)		\
809 ({									\
810 	struct btree_iter _iter;					\
811 	struct bkey_s_c _k;						\
812 	int _ret3 = 0;							\
813 									\
814 	bch2_trans_iter_init((_trans), &(_iter), (_btree_id),		\
815 			     (_start), (_flags));			\
816 									\
817 	do {								\
818 		_ret3 = lockrestart_do(_trans, ({			\
819 			(_k) = bch2_btree_iter_peek_prev_type(_trans, &(_iter),	\
820 							(_flags));	\
821 			if (!(_k).k)					\
822 				break;					\
823 									\
824 			bkey_err(_k) ?: (_do);				\
825 		}));							\
826 	} while (!_ret3 && bch2_btree_iter_rewind(_trans, &(_iter)));	\
827 									\
828 	bch2_trans_iter_exit((_trans), &(_iter));			\
829 	_ret3;								\
830 })
831 
832 #define for_each_btree_key_commit(_trans, _iter, _btree_id,		\
833 				  _start, _iter_flags, _k,		\
834 				  _disk_res, _journal_seq, _commit_flags,\
835 				  _do)					\
836 	for_each_btree_key(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
837 			    (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
838 					(_journal_seq), (_commit_flags)))
839 
840 #define for_each_btree_key_reverse_commit(_trans, _iter, _btree_id,	\
841 				  _start, _iter_flags, _k,		\
842 				  _disk_res, _journal_seq, _commit_flags,\
843 				  _do)					\
844 	for_each_btree_key_reverse(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
845 			    (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
846 					(_journal_seq), (_commit_flags)))
847 
848 #define for_each_btree_key_max_commit(_trans, _iter, _btree_id,	\
849 				  _start, _end, _iter_flags, _k,	\
850 				  _disk_res, _journal_seq, _commit_flags,\
851 				  _do)					\
852 	for_each_btree_key_max(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\
853 			    (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
854 					(_journal_seq), (_commit_flags)))
855 
856 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_trans *,
857 							  struct btree_iter *);
858 
859 #define for_each_btree_key_max_norestart(_trans, _iter, _btree_id,	\
860 			   _start, _end, _flags, _k, _ret)		\
861 	for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id),	\
862 				  (_start), (_flags));			\
863 	     (_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter), _end, _flags),\
864 	     !((_ret) = bkey_err(_k)) && (_k).k;			\
865 	     bch2_btree_iter_advance(_trans, &(_iter)))
866 
867 #define for_each_btree_key_max_continue_norestart(_trans, _iter, _end, _flags, _k, _ret)\
868 	for (;									\
869 	     (_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter), _end, _flags),	\
870 	     !((_ret) = bkey_err(_k)) && (_k).k;				\
871 	     bch2_btree_iter_advance(_trans, &(_iter)))
872 
873 #define for_each_btree_key_norestart(_trans, _iter, _btree_id,		\
874 			   _start, _flags, _k, _ret)			\
875 	for_each_btree_key_max_norestart(_trans, _iter, _btree_id, _start,\
876 					  SPOS_MAX, _flags, _k, _ret)
877 
878 #define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id,		\
879 					     _start, _flags, _k, _ret)		\
880 	for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id),		\
881 				  (_start), (_flags));				\
882 	     (_k) = bch2_btree_iter_peek_prev_type(_trans, &(_iter), _flags),	\
883 	     !((_ret) = bkey_err(_k)) && (_k).k;				\
884 	     bch2_btree_iter_rewind(_trans, &(_iter)))
885 
886 #define for_each_btree_key_continue_norestart(_trans, _iter, _flags, _k, _ret)	\
887 	for_each_btree_key_max_continue_norestart(_trans, _iter, SPOS_MAX, _flags, _k, _ret)
888 
889 /*
890  * This should not be used in a fastpath, without first trying _do in
891  * nonblocking mode - it will cause excessive transaction restarts and
892  * potentially livelocking:
893  */
894 #define drop_locks_do(_trans, _do)					\
895 ({									\
896 	bch2_trans_unlock(_trans);					\
897 	(_do) ?: bch2_trans_relock(_trans);				\
898 })
899 
900 #define allocate_dropping_locks_errcode(_trans, _do)			\
901 ({									\
902 	gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN;				\
903 	int _ret = _do;							\
904 									\
905 	if (bch2_err_matches(_ret, ENOMEM)) {				\
906 		_gfp = GFP_KERNEL;					\
907 		_ret = drop_locks_do(_trans, _do);			\
908 	}								\
909 	_ret;								\
910 })
911 
912 #define allocate_dropping_locks(_trans, _ret, _do)			\
913 ({									\
914 	gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN;				\
915 	typeof(_do) _p = _do;						\
916 									\
917 	_ret = 0;							\
918 	if (unlikely(!_p)) {						\
919 		_gfp = GFP_KERNEL;					\
920 		_ret = drop_locks_do(_trans, ((_p = _do), 0));		\
921 	}								\
922 	_p;								\
923 })
924 
925 #define bch2_trans_run(_c, _do)						\
926 ({									\
927 	struct btree_trans *trans = bch2_trans_get(_c);			\
928 	int _ret = (_do);						\
929 	bch2_trans_put(trans);						\
930 	_ret;								\
931 })
932 
933 #define bch2_trans_do(_c, _do)	bch2_trans_run(_c, lockrestart_do(trans, _do))
934 
935 struct btree_trans *__bch2_trans_get(struct bch_fs *, unsigned);
936 void bch2_trans_put(struct btree_trans *);
937 
938 bool bch2_current_has_btree_trans(struct bch_fs *);
939 
940 extern const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
941 unsigned bch2_trans_get_fn_idx(const char *);
942 
943 #define bch2_trans_get(_c)						\
944 ({									\
945 	static unsigned trans_fn_idx;					\
946 									\
947 	if (unlikely(!trans_fn_idx))					\
948 		trans_fn_idx = bch2_trans_get_fn_idx(__func__);		\
949 	__bch2_trans_get(_c, trans_fn_idx);				\
950 })
951 
952 void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *);
953 
954 void bch2_fs_btree_iter_exit(struct bch_fs *);
955 void bch2_fs_btree_iter_init_early(struct bch_fs *);
956 int bch2_fs_btree_iter_init(struct bch_fs *);
957 
958 #endif /* _BCACHEFS_BTREE_ITER_H */
959