Lines Matching +full:non +full:- +full:live
1 // SPDX-License-Identifier: GPL-2.0
23 bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_##counter]++; \
40 if (!c->btree_roots_known[0].b) in bch2_recalc_btree_reserve()
46 if (r->b) in bch2_recalc_btree_reserve()
47 reserve += min_t(unsigned, 1, r->b->c.level) * 8; in bch2_recalc_btree_reserve()
50 c->btree_cache.nr_reserve = reserve; in bch2_recalc_btree_reserve()
55 struct btree_cache *bc = container_of(list, struct btree_cache, live[list->idx]); in btree_cache_can_free()
57 size_t can_free = list->nr; in btree_cache_can_free()
58 if (!list->idx) in btree_cache_can_free()
59 can_free = max_t(ssize_t, 0, can_free - bc->nr_reserve); in btree_cache_can_free()
65 BUG_ON(!list_empty(&b->list)); in btree_node_to_freedlist()
67 if (b->c.lock.readers) in btree_node_to_freedlist()
68 list_add(&b->list, &bc->freed_pcpu); in btree_node_to_freedlist()
70 list_add(&b->list, &bc->freed_nonpcpu); in btree_node_to_freedlist()
75 BUG_ON(!list_empty(&b->list)); in __bch2_btree_node_to_freelist()
76 BUG_ON(!b->data); in __bch2_btree_node_to_freelist()
78 bc->nr_freeable++; in __bch2_btree_node_to_freelist()
79 list_add(&b->list, &bc->freeable); in __bch2_btree_node_to_freelist()
84 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_to_freelist()
86 mutex_lock(&bc->lock); in bch2_btree_node_to_freelist()
88 mutex_unlock(&bc->lock); in bch2_btree_node_to_freelist()
90 six_unlock_write(&b->c.lock); in bch2_btree_node_to_freelist()
91 six_unlock_intent(&b->c.lock); in bch2_btree_node_to_freelist()
96 BUG_ON(!list_empty(&b->list)); in __btree_node_data_free()
104 if (b->data) in __btree_node_data_free()
106 if (b->aux_data) in __btree_node_data_free()
113 kvfree(b->data); in __btree_node_data_free()
114 b->data = NULL; in __btree_node_data_free()
116 kvfree(b->aux_data); in __btree_node_data_free()
118 munmap(b->aux_data, btree_aux_data_bytes(b)); in __btree_node_data_free()
120 b->aux_data = NULL; in __btree_node_data_free()
127 BUG_ON(list_empty(&b->list)); in btree_node_data_free()
128 list_del_init(&b->list); in btree_node_data_free()
129 --bc->nr_freeable; in btree_node_data_free()
137 const u64 *v = arg->key; in bch2_btree_cache_cmp_fn()
139 return b->hash_val == *v ? 0 : 1; in bch2_btree_cache_cmp_fn()
152 BUG_ON(b->data || b->aux_data); in btree_node_data_alloc()
156 b->data = kvmalloc(btree_buf_bytes(b), gfp); in btree_node_data_alloc()
157 if (!b->data) in btree_node_data_alloc()
158 return -BCH_ERR_ENOMEM_btree_node_mem_alloc; in btree_node_data_alloc()
160 b->aux_data = kvmalloc(btree_aux_data_bytes(b), gfp); in btree_node_data_alloc()
162 b->aux_data = mmap(NULL, btree_aux_data_bytes(b), in btree_node_data_alloc()
165 if (b->aux_data == MAP_FAILED) in btree_node_data_alloc()
166 b->aux_data = NULL; in btree_node_data_alloc()
168 if (!b->aux_data) { in btree_node_data_alloc()
169 kvfree(b->data); in btree_node_data_alloc()
170 b->data = NULL; in btree_node_data_alloc()
171 return -BCH_ERR_ENOMEM_btree_node_mem_alloc; in btree_node_data_alloc()
185 bkey_btree_ptr_init(&b->key); in __btree_node_mem_alloc()
186 INIT_LIST_HEAD(&b->list); in __btree_node_mem_alloc()
187 INIT_LIST_HEAD(&b->write_blocked); in __btree_node_mem_alloc()
188 b->byte_order = ilog2(c->opts.btree_node_size); in __btree_node_mem_alloc()
194 struct btree_cache *bc = &c->btree_cache; in __bch2_btree_node_mem_alloc()
206 bch2_btree_lock_init(&b->c, 0, GFP_KERNEL); in __bch2_btree_node_mem_alloc()
214 struct bbpos pos = BBPOS(b->c.btree_id, b->key.k.p); in __btree_node_pinned()
216 u64 mask = bc->pinned_nodes_mask[!!b->c.level]; in __btree_node_pinned()
218 return ((mask & BIT_ULL(b->c.btree_id)) && in __btree_node_pinned()
219 bbpos_cmp(bc->pinned_nodes_start, pos) < 0 && in __btree_node_pinned()
220 bbpos_cmp(bc->pinned_nodes_end, pos) >= 0); in __btree_node_pinned()
225 struct btree_cache *bc = &c->btree_cache; in bch2_node_pin()
227 mutex_lock(&bc->lock); in bch2_node_pin()
230 list_move(&b->list, &bc->live[1].list); in bch2_node_pin()
231 bc->live[0].nr--; in bch2_node_pin()
232 bc->live[1].nr++; in bch2_node_pin()
234 mutex_unlock(&bc->lock); in bch2_node_pin()
239 struct btree_cache *bc = &c->btree_cache; in bch2_btree_cache_unpin()
242 mutex_lock(&bc->lock); in bch2_btree_cache_unpin()
243 c->btree_cache.pinned_nodes_mask[0] = 0; in bch2_btree_cache_unpin()
244 c->btree_cache.pinned_nodes_mask[1] = 0; in bch2_btree_cache_unpin()
246 list_for_each_entry_safe(b, n, &bc->live[1].list, list) { in bch2_btree_cache_unpin()
248 list_move(&b->list, &bc->live[0].list); in bch2_btree_cache_unpin()
249 bc->live[0].nr++; in bch2_btree_cache_unpin()
250 bc->live[1].nr--; in bch2_btree_cache_unpin()
253 mutex_unlock(&bc->lock); in bch2_btree_cache_unpin()
256 /* Btree in memory cache - hash table */
260 lockdep_assert_held(&bc->lock); in __bch2_btree_node_hash_remove()
262 int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params); in __bch2_btree_node_hash_remove()
266 b->hash_val = 0; in __bch2_btree_node_hash_remove()
268 if (b->c.btree_id < BTREE_ID_NR) in __bch2_btree_node_hash_remove()
269 --bc->nr_by_btree[b->c.btree_id]; in __bch2_btree_node_hash_remove()
270 --bc->live[btree_node_pinned(b)].nr; in __bch2_btree_node_hash_remove()
271 list_del_init(&b->list); in __bch2_btree_node_hash_remove()
282 BUG_ON(!list_empty(&b->list)); in __bch2_btree_node_hash_insert()
283 BUG_ON(b->hash_val); in __bch2_btree_node_hash_insert()
285 b->hash_val = btree_ptr_hash_val(&b->key); in __bch2_btree_node_hash_insert()
286 int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash, in __bch2_btree_node_hash_insert()
291 if (b->c.btree_id < BTREE_ID_NR) in __bch2_btree_node_hash_insert()
292 bc->nr_by_btree[b->c.btree_id]++; in __bch2_btree_node_hash_insert()
295 mod_bit(BTREE_NODE_pinned, &b->flags, p); in __bch2_btree_node_hash_insert()
297 list_add_tail(&b->list, &bc->live[p].list); in __bch2_btree_node_hash_insert()
298 bc->live[p].nr++; in __bch2_btree_node_hash_insert()
305 b->c.level = level; in bch2_btree_node_hash_insert()
306 b->c.btree_id = id; in bch2_btree_node_hash_insert()
308 mutex_lock(&bc->lock); in bch2_btree_node_hash_insert()
310 mutex_unlock(&bc->lock); in bch2_btree_node_hash_insert()
319 struct bch_fs *c = trans->c; in bch2_btree_node_update_key_early()
329 mutex_lock(&c->btree_cache.lock); in bch2_btree_node_update_key_early()
331 __bch2_btree_node_hash_remove(&c->btree_cache, b); in bch2_btree_node_update_key_early()
333 bkey_copy(&b->key, new); in bch2_btree_node_update_key_early()
334 ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); in bch2_btree_node_update_key_early()
337 mutex_unlock(&c->btree_cache.lock); in bch2_btree_node_update_key_early()
338 six_unlock_read(&b->c.lock); in bch2_btree_node_update_key_early()
350 return rhashtable_lookup_fast(&bc->table, &v, bch_btree_cache_params); in btree_cache_find()
359 struct btree_cache *bc = &c->btree_cache; in __btree_node_reclaim()
362 lockdep_assert_held(&bc->lock); in __btree_node_reclaim()
364 if (b->flags & ((1U << BTREE_NODE_dirty)| in __btree_node_reclaim()
374 return -BCH_ERR_ENOMEM_btree_node_reclaim; in __btree_node_reclaim()
382 if (!six_trylock_intent(&b->c.lock)) { in __btree_node_reclaim()
384 return -BCH_ERR_ENOMEM_btree_node_reclaim; in __btree_node_reclaim()
387 if (!six_trylock_write(&b->c.lock)) { in __btree_node_reclaim()
393 if (b->flags & ((1U << BTREE_NODE_read_in_flight)| in __btree_node_reclaim()
402 six_unlock_write(&b->c.lock); in __btree_node_reclaim()
403 six_unlock_intent(&b->c.lock); in __btree_node_reclaim()
428 * - unless btree verify mode is enabled, since it runs out of in __btree_node_reclaim()
438 six_unlock_write(&b->c.lock); in __btree_node_reclaim()
439 six_unlock_intent(&b->c.lock); in __btree_node_reclaim()
443 if (b->hash_val && !ret) in __btree_node_reclaim()
447 six_unlock_write(&b->c.lock); in __btree_node_reclaim()
449 six_unlock_intent(&b->c.lock); in __btree_node_reclaim()
450 ret = -BCH_ERR_ENOMEM_btree_node_reclaim; in __btree_node_reclaim()
467 struct btree_cache_list *list = shrink->private_data; in bch2_btree_cache_scan()
468 struct btree_cache *bc = container_of(list, struct btree_cache, live[list->idx]); in bch2_btree_cache_scan()
471 unsigned long nr = sc->nr_to_scan; in bch2_btree_cache_scan()
477 bool trigger_writes = atomic_long_read(&bc->nr_dirty) + nr >= list->nr * 3 / 4; in bch2_btree_cache_scan()
482 mutex_lock(&bc->lock); in bch2_btree_cache_scan()
486 * It's _really_ critical that we don't free too many btree nodes - we in bch2_btree_cache_scan()
496 list_for_each_entry_safe(b, t, &bc->freeable, list) { in bch2_btree_cache_scan()
511 six_unlock_write(&b->c.lock); in bch2_btree_cache_scan()
512 six_unlock_intent(&b->c.lock); in bch2_btree_cache_scan()
514 bc->nr_freed++; in bch2_btree_cache_scan()
518 list_for_each_entry_safe(b, t, &list->list, list) { in bch2_btree_cache_scan()
523 bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_access_bit]++; in bch2_btree_cache_scan()
524 --touched;; in bch2_btree_cache_scan()
530 bc->nr_freed++; in bch2_btree_cache_scan()
532 six_unlock_write(&b->c.lock); in bch2_btree_cache_scan()
533 six_unlock_intent(&b->c.lock); in bch2_btree_cache_scan()
541 six_trylock_read(&b->c.lock)) { in bch2_btree_cache_scan()
542 list_move(&list->list, &b->list); in bch2_btree_cache_scan()
543 mutex_unlock(&bc->lock); in bch2_btree_cache_scan()
545 six_unlock_read(&b->c.lock); in bch2_btree_cache_scan()
548 mutex_lock(&bc->lock); in bch2_btree_cache_scan()
556 if (&t->list != &list->list) in bch2_btree_cache_scan()
557 list_move_tail(&list->list, &t->list); in bch2_btree_cache_scan()
559 mutex_unlock(&bc->lock); in bch2_btree_cache_scan()
563 trace_and_count(c, btree_cache_scan, sc->nr_to_scan, can_free, ret); in bch2_btree_cache_scan()
570 struct btree_cache_list *list = shrink->private_data; in bch2_btree_cache_count()
580 struct btree_cache *bc = &c->btree_cache; in bch2_fs_btree_cache_exit()
584 shrinker_free(bc->live[1].shrink); in bch2_fs_btree_cache_exit()
585 shrinker_free(bc->live[0].shrink); in bch2_fs_btree_cache_exit()
589 mutex_lock(&bc->lock); in bch2_fs_btree_cache_exit()
591 if (c->verify_data) in bch2_fs_btree_cache_exit()
592 list_move(&c->verify_data->list, &bc->live[0].list); in bch2_fs_btree_cache_exit()
594 kvfree(c->verify_ondisk); in bch2_fs_btree_cache_exit()
599 if (r->b) in bch2_fs_btree_cache_exit()
600 list_add(&r->b->list, &bc->live[0].list); in bch2_fs_btree_cache_exit()
603 list_for_each_entry_safe(b, t, &bc->live[1].list, list) in bch2_fs_btree_cache_exit()
605 list_for_each_entry_safe(b, t, &bc->live[0].list, list) in bch2_fs_btree_cache_exit()
608 list_for_each_entry_safe(b, t, &bc->freeable, list) { in bch2_fs_btree_cache_exit()
615 BUG_ON(!bch2_journal_error(&c->journal) && in bch2_fs_btree_cache_exit()
616 atomic_long_read(&c->btree_cache.nr_dirty)); in bch2_fs_btree_cache_exit()
618 list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu); in bch2_fs_btree_cache_exit()
620 list_for_each_entry_safe(b, t, &bc->freed_nonpcpu, list) { in bch2_fs_btree_cache_exit()
621 list_del(&b->list); in bch2_fs_btree_cache_exit()
622 six_lock_exit(&b->c.lock); in bch2_fs_btree_cache_exit()
626 mutex_unlock(&bc->lock); in bch2_fs_btree_cache_exit()
629 for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++) in bch2_fs_btree_cache_exit()
630 BUG_ON(bc->nr_by_btree[i]); in bch2_fs_btree_cache_exit()
631 BUG_ON(bc->live[0].nr); in bch2_fs_btree_cache_exit()
632 BUG_ON(bc->live[1].nr); in bch2_fs_btree_cache_exit()
633 BUG_ON(bc->nr_freeable); in bch2_fs_btree_cache_exit()
635 if (bc->table_init_done) in bch2_fs_btree_cache_exit()
636 rhashtable_destroy(&bc->table); in bch2_fs_btree_cache_exit()
641 struct btree_cache *bc = &c->btree_cache; in bch2_fs_btree_cache_init()
646 ret = rhashtable_init(&bc->table, &bch_btree_cache_params); in bch2_fs_btree_cache_init()
650 bc->table_init_done = true; in bch2_fs_btree_cache_init()
654 for (i = 0; i < bc->nr_reserve; i++) in bch2_fs_btree_cache_init()
658 list_splice_init(&bc->live[0].list, &bc->freeable); in bch2_fs_btree_cache_init()
660 mutex_init(&c->verify_lock); in bch2_fs_btree_cache_init()
662 shrink = shrinker_alloc(0, "%s-btree_cache", c->name); in bch2_fs_btree_cache_init()
665 bc->live[0].shrink = shrink; in bch2_fs_btree_cache_init()
666 shrink->count_objects = bch2_btree_cache_count; in bch2_fs_btree_cache_init()
667 shrink->scan_objects = bch2_btree_cache_scan; in bch2_fs_btree_cache_init()
668 shrink->seeks = 2; in bch2_fs_btree_cache_init()
669 shrink->private_data = &bc->live[0]; in bch2_fs_btree_cache_init()
672 shrink = shrinker_alloc(0, "%s-btree_cache-pinned", c->name); in bch2_fs_btree_cache_init()
675 bc->live[1].shrink = shrink; in bch2_fs_btree_cache_init()
676 shrink->count_objects = bch2_btree_cache_count; in bch2_fs_btree_cache_init()
677 shrink->scan_objects = bch2_btree_cache_scan; in bch2_fs_btree_cache_init()
678 shrink->seeks = 8; in bch2_fs_btree_cache_init()
679 shrink->private_data = &bc->live[1]; in bch2_fs_btree_cache_init()
684 return -BCH_ERR_ENOMEM_fs_btree_cache_init; in bch2_fs_btree_cache_init()
689 mutex_init(&bc->lock); in bch2_fs_btree_cache_init_early()
690 for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++) { in bch2_fs_btree_cache_init_early()
691 bc->live[i].idx = i; in bch2_fs_btree_cache_init_early()
692 INIT_LIST_HEAD(&bc->live[i].list); in bch2_fs_btree_cache_init_early()
694 INIT_LIST_HEAD(&bc->freeable); in bch2_fs_btree_cache_init_early()
695 INIT_LIST_HEAD(&bc->freed_pcpu); in bch2_fs_btree_cache_init_early()
696 INIT_LIST_HEAD(&bc->freed_nonpcpu); in bch2_fs_btree_cache_init_early()
707 struct bch_fs *c = trans->c; in bch2_btree_cache_cannibalize_unlock()
708 struct btree_cache *bc = &c->btree_cache; in bch2_btree_cache_cannibalize_unlock()
710 if (bc->alloc_lock == current) { in bch2_btree_cache_cannibalize_unlock()
712 bc->alloc_lock = NULL; in bch2_btree_cache_cannibalize_unlock()
713 closure_wake_up(&bc->alloc_wait); in bch2_btree_cache_cannibalize_unlock()
719 struct bch_fs *c = trans->c; in bch2_btree_cache_cannibalize_lock()
720 struct btree_cache *bc = &c->btree_cache; in bch2_btree_cache_cannibalize_lock()
724 if (try_cmpxchg(&bc->alloc_lock, &old, current) || old == current) in bch2_btree_cache_cannibalize_lock()
729 return -BCH_ERR_ENOMEM_btree_cache_cannibalize_lock; in bch2_btree_cache_cannibalize_lock()
732 closure_wait(&bc->alloc_wait, cl); in bch2_btree_cache_cannibalize_lock()
736 if (try_cmpxchg(&bc->alloc_lock, &old, current) || old == current) { in bch2_btree_cache_cannibalize_lock()
738 closure_wake_up(&bc->alloc_wait); in bch2_btree_cache_cannibalize_lock()
743 return -BCH_ERR_btree_cache_cannibalize_lock_blocked; in bch2_btree_cache_cannibalize_lock()
752 struct btree_cache *bc = &c->btree_cache; in btree_node_cannibalize()
755 for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++) in btree_node_cannibalize()
756 list_for_each_entry_reverse(b, &bc->live[i].list, list) in btree_node_cannibalize()
761 for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++) in btree_node_cannibalize()
762 list_for_each_entry_reverse(b, &bc->live[i].list, list) in btree_node_cannibalize()
767 * Rare case: all nodes were intent-locked. in btree_node_cannibalize()
768 * Just busy-wait. in btree_node_cannibalize()
777 struct bch_fs *c = trans->c; in bch2_btree_node_mem_alloc()
778 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_mem_alloc()
780 ? &bc->freed_pcpu in bch2_btree_node_mem_alloc()
781 : &bc->freed_nonpcpu; in bch2_btree_node_mem_alloc()
785 mutex_lock(&bc->lock); in bch2_btree_node_mem_alloc()
793 list_del_init(&b->list); in bch2_btree_node_mem_alloc()
799 bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0, GFP_NOWAIT); in bch2_btree_node_mem_alloc()
801 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
806 bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0, GFP_KERNEL); in bch2_btree_node_mem_alloc()
807 mutex_lock(&bc->lock); in bch2_btree_node_mem_alloc()
810 BUG_ON(!six_trylock_intent(&b->c.lock)); in bch2_btree_node_mem_alloc()
811 BUG_ON(!six_trylock_write(&b->c.lock)); in bch2_btree_node_mem_alloc()
818 list_for_each_entry(b2, &bc->freeable, list) in bch2_btree_node_mem_alloc()
820 swap(b->data, b2->data); in bch2_btree_node_mem_alloc()
821 swap(b->aux_data, b2->aux_data); in bch2_btree_node_mem_alloc()
823 list_del_init(&b2->list); in bch2_btree_node_mem_alloc()
824 --bc->nr_freeable; in bch2_btree_node_mem_alloc()
826 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
828 six_unlock_write(&b2->c.lock); in bch2_btree_node_mem_alloc()
829 six_unlock_intent(&b2->c.lock); in bch2_btree_node_mem_alloc()
833 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
842 BUG_ON(!list_empty(&b->list)); in bch2_btree_node_mem_alloc()
847 b->flags = 0; in bch2_btree_node_mem_alloc()
848 b->written = 0; in bch2_btree_node_mem_alloc()
849 b->nsets = 0; in bch2_btree_node_mem_alloc()
850 b->sib_u64s[0] = 0; in bch2_btree_node_mem_alloc()
851 b->sib_u64s[1] = 0; in bch2_btree_node_mem_alloc()
852 b->whiteout_u64s = 0; in bch2_btree_node_mem_alloc()
856 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_mem_alloc], in bch2_btree_node_mem_alloc()
867 mutex_lock(&bc->lock); in bch2_btree_node_mem_alloc()
870 if (bc->alloc_lock == current) { in bch2_btree_node_mem_alloc()
876 swap(b->data, b2->data); in bch2_btree_node_mem_alloc()
877 swap(b->aux_data, b2->aux_data); in bch2_btree_node_mem_alloc()
879 six_unlock_write(&b2->c.lock); in bch2_btree_node_mem_alloc()
880 six_unlock_intent(&b2->c.lock); in bch2_btree_node_mem_alloc()
885 BUG_ON(!list_empty(&b->list)); in bch2_btree_node_mem_alloc()
886 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
892 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
893 return ERR_PTR(-BCH_ERR_ENOMEM_btree_node_mem_alloc); in bch2_btree_node_mem_alloc()
905 struct bch_fs *c = trans->c; in bch2_btree_node_fill()
906 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_fill()
915 if (unlikely(!bkey_is_btree_ptr(&k->k))) { in bch2_btree_node_fill()
919 …int ret = bch2_fs_topology_error(c, "attempting to get btree node with non-btree key %s", buf.buf); in bch2_btree_node_fill()
924 if (unlikely(k->k.u64s > BKEY_BTREE_PTR_U64s_MAX)) { in bch2_btree_node_fill()
948 trans->memory_allocation_failure = true; in bch2_btree_node_fill()
956 bkey_copy(&b->key, k); in bch2_btree_node_fill()
961 b->hash_val = 0; in bch2_btree_node_fill()
963 mutex_lock(&bc->lock); in bch2_btree_node_fill()
965 mutex_unlock(&bc->lock); in bch2_btree_node_fill()
967 six_unlock_write(&b->c.lock); in bch2_btree_node_fill()
968 six_unlock_intent(&b->c.lock); in bch2_btree_node_fill()
973 six_unlock_write(&b->c.lock); in bch2_btree_node_fill()
976 u32 seq = six_lock_seq(&b->c.lock); in bch2_btree_node_fill()
979 six_unlock_intent(&b->c.lock); in bch2_btree_node_fill()
991 if (!six_relock_type(&b->c.lock, lock_type, seq)) in bch2_btree_node_fill()
996 six_lock_downgrade(&b->c.lock); in bch2_btree_node_fill()
1006 if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations) in btree_bad_header()
1011 bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level); in btree_bad_header()
1013 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); in btree_bad_header()
1016 bch2_btree_id_level_to_text(&buf, BTREE_NODE_ID(b->data), BTREE_NODE_LEVEL(b->data)); in btree_bad_header()
1018 bch2_bpos_to_text(&buf, b->data->min_key); in btree_bad_header()
1021 bch2_bpos_to_text(&buf, b->data->max_key); in btree_bad_header()
1030 if (b->c.btree_id != BTREE_NODE_ID(b->data) || in btree_check_header()
1031 b->c.level != BTREE_NODE_LEVEL(b->data) || in btree_check_header()
1032 !bpos_eq(b->data->max_key, b->key.k.p) || in btree_check_header()
1033 (b->key.k.type == KEY_TYPE_btree_ptr_v2 && in btree_check_header()
1034 !bpos_eq(b->data->min_key, in btree_check_header()
1035 bkey_i_to_btree_ptr_v2(&b->key)->v.min_key))) in btree_check_header()
1044 struct bch_fs *c = trans->c; in __bch2_btree_node_get()
1045 struct btree_cache *bc = &c->btree_cache; in __bch2_btree_node_get()
1059 b = bch2_btree_node_fill(trans, path, k, path->btree_id, in __bch2_btree_node_get()
1073 ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip); in __bch2_btree_node_get()
1079 if (unlikely(b->hash_val != btree_ptr_hash_val(k) || in __bch2_btree_node_get()
1080 b->c.level != level || in __bch2_btree_node_get()
1082 six_unlock_type(&b->c.lock, lock_type); in __bch2_btree_node_get()
1096 u32 seq = six_lock_seq(&b->c.lock); in __bch2_btree_node_get()
1098 six_unlock_type(&b->c.lock, lock_type); in __bch2_btree_node_get()
1112 if (!six_relock_type(&b->c.lock, lock_type, seq)) in __bch2_btree_node_get()
1120 six_unlock_type(&b->c.lock, lock_type); in __bch2_btree_node_get()
1125 prefetch(b->aux_data); in __bch2_btree_node_get()
1128 void *p = (u64 *) b->aux_data + t->aux_data_offset; in __bch2_btree_node_get()
1136 six_unlock_type(&b->c.lock, lock_type); in __bch2_btree_node_get()
1137 return ERR_PTR(-BCH_ERR_btree_node_read_err_cached); in __bch2_btree_node_get()
1140 EBUG_ON(b->c.btree_id != path->btree_id); in __bch2_btree_node_get()
1141 EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); in __bch2_btree_node_get()
1148 * bch2_btree_node_get - find a btree node in the cache and lock it, reading it
1168 struct bch_fs *c = trans->c; in bch2_btree_node_get()
1177 * Check b->hash_val _before_ calling btree_node_lock() - this might not in bch2_btree_node_get()
1181 if (unlikely(!c->opts.btree_node_mem_ptr_optimization || in bch2_btree_node_get()
1183 b->hash_val != btree_ptr_hash_val(k))) in bch2_btree_node_get()
1189 ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip); in bch2_btree_node_get()
1195 if (unlikely(b->hash_val != btree_ptr_hash_val(k) || in bch2_btree_node_get()
1196 b->c.level != level || in bch2_btree_node_get()
1198 six_unlock_type(&b->c.lock, lock_type); in bch2_btree_node_get()
1207 six_unlock_type(&b->c.lock, lock_type); in bch2_btree_node_get()
1211 prefetch(b->aux_data); in bch2_btree_node_get()
1214 void *p = (u64 *) b->aux_data + t->aux_data_offset; in bch2_btree_node_get()
1226 six_unlock_type(&b->c.lock, lock_type); in bch2_btree_node_get()
1227 return ERR_PTR(-BCH_ERR_btree_node_read_err_cached); in bch2_btree_node_get()
1230 EBUG_ON(b->c.btree_id != path->btree_id); in bch2_btree_node_get()
1231 EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); in bch2_btree_node_get()
1243 struct bch_fs *c = trans->c; in bch2_btree_node_get_noiter()
1244 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_get_noiter()
1250 if (c->opts.btree_node_mem_ptr_optimization) { in bch2_btree_node_get_noiter()
1276 ret = btree_node_lock_nopath(trans, &b->c, SIX_LOCK_read, _THIS_IP_); in bch2_btree_node_get_noiter()
1282 if (unlikely(b->hash_val != btree_ptr_hash_val(k) || in bch2_btree_node_get_noiter()
1283 b->c.btree_id != btree_id || in bch2_btree_node_get_noiter()
1284 b->c.level != level)) { in bch2_btree_node_get_noiter()
1285 six_unlock_read(&b->c.lock); in bch2_btree_node_get_noiter()
1293 prefetch(b->aux_data); in bch2_btree_node_get_noiter()
1296 void *p = (u64 *) b->aux_data + t->aux_data_offset; in bch2_btree_node_get_noiter()
1308 six_unlock_read(&b->c.lock); in bch2_btree_node_get_noiter()
1309 b = ERR_PTR(-BCH_ERR_btree_node_read_err_cached); in bch2_btree_node_get_noiter()
1313 EBUG_ON(b->c.btree_id != btree_id); in bch2_btree_node_get_noiter()
1314 EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); in bch2_btree_node_get_noiter()
1326 struct bch_fs *c = trans->c; in bch2_btree_node_prefetch()
1327 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_prefetch()
1342 six_unlock_read(&b->c.lock); in bch2_btree_node_prefetch()
1348 struct bch_fs *c = trans->c; in bch2_btree_node_evict()
1349 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_evict()
1356 BUG_ON(b == btree_node_root(trans->c, b)); in bch2_btree_node_evict()
1366 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); in bch2_btree_node_evict()
1367 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); in bch2_btree_node_evict()
1368 if (unlikely(b->hash_val != btree_ptr_hash_val(k))) in bch2_btree_node_evict()
1373 six_unlock_write(&b->c.lock); in bch2_btree_node_evict()
1374 six_unlock_intent(&b->c.lock); in bch2_btree_node_evict()
1380 mutex_lock(&bc->lock); in bch2_btree_node_evict()
1383 mutex_unlock(&bc->lock); in bch2_btree_node_evict()
1385 six_unlock_write(&b->c.lock); in bch2_btree_node_evict()
1386 six_unlock_intent(&b->c.lock); in bch2_btree_node_evict()
1416 prt_printf(out, "%u", r->level); in __bch2_btree_pos_to_text()
1426 __bch2_btree_pos_to_text(out, c, b->c.btree_id, b->c.level, bkey_i_to_s_c(&b->key)); in bch2_btree_pos_to_text()
1437 prt_printf(out, "l %u ", b->c.level); in bch2_btree_node_to_text()
1438 bch2_bpos_to_text(out, b->data->min_key); in bch2_btree_node_to_text()
1439 prt_printf(out, " - "); in bch2_btree_node_to_text()
1440 bch2_bpos_to_text(out, b->data->max_key); in bch2_btree_node_to_text()
1443 bch2_val_to_text(out, c, bkey_i_to_s_c(&b->key)); in bch2_btree_node_to_text()
1448 bch2_bkey_format_to_text(out, &b->format); in bch2_btree_node_to_text()
1458 b->unpack_fn_len, in bch2_btree_node_to_text()
1459 b->nr.live_u64s * sizeof(u64), in bch2_btree_node_to_text()
1460 btree_buf_bytes(b) - sizeof(struct btree_node), in bch2_btree_node_to_text()
1461 b->nr.live_u64s * 100 / btree_max_u64s(c), in bch2_btree_node_to_text()
1462 b->sib_u64s[0], in bch2_btree_node_to_text()
1463 b->sib_u64s[1], in bch2_btree_node_to_text()
1464 c->btree_foreground_merge_threshold, in bch2_btree_node_to_text()
1465 b->nr.packed_keys, in bch2_btree_node_to_text()
1466 b->nr.unpacked_keys, in bch2_btree_node_to_text()
1475 prt_human_readable_u64(out, nr * c->opts.btree_node_size); in prt_btree_cache_line()
1490 if (!out->nr_tabstops) in bch2_btree_cache_to_text()
1493 prt_btree_cache_line(out, c, "live:", bc->live[0].nr); in bch2_btree_cache_to_text()
1494 prt_btree_cache_line(out, c, "pinned:", bc->live[1].nr); in bch2_btree_cache_to_text()
1495 prt_btree_cache_line(out, c, "freeable:", bc->nr_freeable); in bch2_btree_cache_to_text()
1496 prt_btree_cache_line(out, c, "dirty:", atomic_long_read(&bc->nr_dirty)); in bch2_btree_cache_to_text()
1497 prt_printf(out, "cannibalize lock:\t%p\n", bc->alloc_lock); in bch2_btree_cache_to_text()
1500 for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++) { in bch2_btree_cache_to_text()
1503 prt_human_readable_u64(out, bc->nr_by_btree[i] * c->opts.btree_node_size); in bch2_btree_cache_to_text()
1504 prt_printf(out, " (%zu)\n", bc->nr_by_btree[i]); in bch2_btree_cache_to_text()
1508 prt_printf(out, "freed:\t%zu\n", bc->nr_freed); in bch2_btree_cache_to_text()
1511 for (unsigned i = 0; i < ARRAY_SIZE(bc->not_freed); i++) in bch2_btree_cache_to_text()
1513 bch2_btree_cache_not_freed_reasons_strs[i], bc->not_freed[i]); in bch2_btree_cache_to_text()