Home
last modified time | relevance | path

Searched refs:btree (Results 1 – 25 of 99) sorted by relevance

1234

/linux/fs/hpfs/
H A Danode.c15 struct bplus_header *btree, unsigned sec, in hpfs_bplus_lookup() argument
24 if (bp_internal(btree)) { in hpfs_bplus_lookup()
25 for (i = 0; i < btree->n_used_nodes; i++) in hpfs_bplus_lookup()
26 if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) { in hpfs_bplus_lookup()
27 a = le32_to_cpu(btree->u.internal[i].down); in hpfs_bplus_lookup()
30 btree = &anode->btree; in hpfs_bplus_lookup()
37 for (i = 0; i < btree->n_used_nodes; i++) in hpfs_bplus_lookup()
38 if (le32_to_cpu(btree->u.external[i].file_secno) <= sec && in hpfs_bplus_lookup()
39 … le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) { in hpfs_bplus_lookup()
40 …a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_sec… in hpfs_bplus_lookup()
[all …]
H A Dmap.c180 if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes != in hpfs_map_fnode()
181 (bp_internal(&fnode->btree) ? 12 : 8)) { in hpfs_map_fnode()
187 if (le16_to_cpu(fnode->btree.first_free) != in hpfs_map_fnode()
188 8 + fnode->btree.n_used_nodes * (bp_internal(&fnode->btree) ? 8 : 12)) { in hpfs_map_fnode()
235 if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes != in hpfs_map_anode()
236 (bp_internal(&anode->btree) ? 60 : 40)) { in hpfs_map_anode()
240 if (le16_to_cpu(anode->btree.first_free) != in hpfs_map_anode()
241 8 + anode->btree.n_used_nodes * (bp_internal(&anode->btree) ? 8 : 12)) { in hpfs_map_anode()
/linux/fs/bcachefs/
H A Dbset.h193 static inline size_t btree_keys_cachelines(const struct btree *b) in btree_keys_cachelines()
198 static inline size_t btree_aux_data_bytes(const struct btree *b) in btree_aux_data_bytes()
203 static inline size_t btree_aux_data_u64s(const struct btree *b) in btree_aux_data_u64s()
229 static inline void bch2_bset_set_no_aux_tree(struct btree *b, in bch2_bset_set_no_aux_tree()
241 static inline void btree_node_set_format(struct btree *b, in btree_node_set_format()
257 static inline struct bset *bset_next_set(struct btree *b, in bset_next_set()
267 void bch2_btree_keys_init(struct btree *);
269 void bch2_bset_init_first(struct btree *, struct bset *);
270 void bch2_bset_init_next(struct btree *, struct btree_node_entry *);
271 void bch2_bset_build_aux_tree(struct btree *, struct bset_tree *, bool);
[all …]
H A Dbtree_update_interior.h13 int bch2_btree_node_check_topology(struct btree_trans *, struct btree *);
73 struct btree *b;
86 struct btree *b[BTREE_UPDATE_NODES_MAX];
101 struct btree *new_nodes[BTREE_UPDATE_NODES_MAX];
104 struct btree *old_nodes[BTREE_UPDATE_NODES_MAX];
125 struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
127 struct btree *,
143 struct btree *b; in bch2_foreground_maybe_merge_sibling()
171 struct btree *, unsigned);
172 void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *);
[all …]
H A Dbtree_io.h14 struct btree;
18 static inline void set_btree_node_dirty_acct(struct bch_fs *c, struct btree *b) in set_btree_node_dirty_acct()
24 static inline void clear_btree_node_dirty_acct(struct bch_fs *c, struct btree *b) in clear_btree_node_dirty_acct()
39 struct btree *b;
58 void bch2_btree_node_io_unlock(struct btree *);
59 void bch2_btree_node_io_lock(struct btree *);
60 void __bch2_btree_node_wait_on_read(struct btree *);
61 void __bch2_btree_node_wait_on_write(struct btree *);
62 void bch2_btree_node_wait_on_read(struct btree *);
63 void bch2_btree_node_wait_on_write(struct btree *);
[all …]
H A Dbtree_cache.h15 void bch2_btree_node_to_freelist(struct bch_fs *, struct btree *);
17 void __bch2_btree_node_hash_remove(struct btree_cache *, struct btree *);
18 void bch2_btree_node_hash_remove(struct btree_cache *, struct btree *);
20 int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *);
21 int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *,
24 void bch2_node_pin(struct bch_fs *, struct btree *);
33 struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *);
34 struct btree *bch2_btree_node_mem_alloc(struct btree_trans *, bool);
36 struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_path *,
40 struct btree *bch2_btree_node_get_noiter(struct btree_trans *, const struct bkey_i *,
[all …]
H A Dbtree_write_buffer.h42 return cmp_int(l->btree, r->btree) ?: bpos_cmp(l->k.k.p, r->k.k.p); in wb_key_cmp()
49 enum btree_id btree, struct bkey_i_accounting *k) in bch2_accounting_key_to_wb() argument
53 search.btree = btree; in bch2_accounting_key_to_wb()
61 return bch2_accounting_key_to_wb_slowpath(c, btree, k); in bch2_accounting_key_to_wb()
74 enum btree_id btree, struct bkey_i *k) in __bch2_journal_key_to_wb() argument
77 return bch2_journal_key_to_wb_slowpath(c, dst, btree, k); in __bch2_journal_key_to_wb()
81 wb_k->btree = btree; in __bch2_journal_key_to_wb()
90 enum btree_id btree, struct bkey_i *k) in bch2_journal_key_to_wb() argument
95 ? bch2_accounting_key_to_wb(c, btree, bkey_i_to_accounting(k)) in bch2_journal_key_to_wb()
96 : __bch2_journal_key_to_wb(c, dst, btree, k); in bch2_journal_key_to_wb()
H A Dbtree_gc.h39 static inline struct gc_pos gc_pos_btree(enum btree_id btree, unsigned level, in gc_pos_btree() argument
44 .btree = btree, in gc_pos_btree()
50 static inline int gc_btree_order(enum btree_id btree) in gc_btree_order() argument
52 if (btree == BTREE_ID_alloc) in gc_btree_order()
54 if (btree == BTREE_ID_stripes) in gc_btree_order()
56 return btree; in gc_btree_order()
62 cmp_int(gc_btree_order(l.btree), in gc_pos_cmp()
63 gc_btree_order(r.btree)) ?: in gc_pos_cmp()
H A Dbset.c22 struct btree *);
34 struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k) in bch2_bkey_to_bset()
55 void bch2_dump_bset(struct bch_fs *c, struct btree *b, in bch2_dump_bset()
104 void bch2_dump_btree_node(struct bch_fs *c, struct btree *b) in bch2_dump_btree_node()
112 void bch2_dump_btree_node_iter(struct btree *b, in bch2_dump_btree_node_iter()
135 struct btree_nr_keys bch2_btree_node_count_keys(struct btree *b) in bch2_btree_node_count_keys()
149 void __bch2_verify_btree_nr_keys(struct btree *b) in __bch2_verify_btree_nr_keys()
157 struct btree *b) in bch2_btree_node_iter_next_check()
194 struct btree *b) in bch2_btree_node_iter_verify()
240 void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where, in bch2_verify_insert_pos()
[all …]
H A Dbtree_types.h72 struct btree { struct
336 struct btree *b;
420 ? container_of(b, struct btree, c)->key.k.p in btree_node_pos()
613 static inline bool btree_node_ ## flag(struct btree *b) \
616 static inline void set_btree_node_ ## flag(struct btree *b) \
619 static inline void clear_btree_node_ ## flag(struct btree *b) \
625 static inline struct btree_write *btree_current_write(struct btree *b) in BTREE_FLAGS()
630 static inline struct btree_write *btree_prev_write(struct btree *b) in btree_prev_write()
635 static inline struct bset_tree *bset_tree_last(struct btree *b) in bset_tree_last()
642 __btree_node_offset_to_ptr(const struct btree *b, u16 offset) in __btree_node_offset_to_ptr()
[all …]
H A Dbtree_cache.c60 static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b) in btree_node_to_freedlist()
70 static void __bch2_btree_node_to_freelist(struct btree_cache *bc, struct btree *b) in __bch2_btree_node_to_freelist()
79 void bch2_btree_node_to_freelist(struct bch_fs *c, struct btree *b) in bch2_btree_node_to_freelist()
91 static void __btree_node_data_free(struct btree_cache *bc, struct btree *b) in __btree_node_data_free()
122 static void btree_node_data_free(struct btree_cache *bc, struct btree *b) in btree_node_data_free()
133 const struct btree *b = obj; in bch2_btree_cache_cmp_fn()
140 .head_offset = offsetof(struct btree, hash),
141 .key_offset = offsetof(struct btree, hash_val),
147 static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp) in btree_node_data_alloc()
174 static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp) in __btree_node_mem_alloc()
[all …]
H A Dbkey.h55 struct btree;
58 unsigned bch2_bkey_greatest_differing_bit(const struct btree *,
62 unsigned bch2_bkey_ffs(const struct btree *, const struct bkey_packed *);
67 const struct btree *);
70 int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *,
75 int bch2_bkey_cmp_packed(const struct btree *,
80 int __bch2_bkey_cmp_left_packed(const struct btree *,
85 int bkey_cmp_left_packed(const struct btree *b, in bkey_cmp_left_packed()
97 static inline int bkey_cmp_left_packed_byval(const struct btree *b, in bkey_cmp_left_packed_byval()
383 const struct btree *);
[all …]
H A Dbbpos.h11 return cmp_int(l.btree, r.btree) ?: bpos_cmp(l.pos, r.pos); in bbpos_cmp()
21 if (pos.btree != BTREE_ID_NR) { in bbpos_successor()
22 pos.btree++; in bbpos_successor()
32 prt_str(out, bch2_btree_id_str(pos.btree)); in bch2_bbpos_to_text()
H A Ddebug.h8 struct btree;
11 void __bch2_btree_verify(struct bch_fs *, struct btree *);
13 const struct btree *);
15 static inline void bch2_btree_verify(struct bch_fs *c, struct btree *b) in bch2_btree_verify()
H A Dbtree_update.h9 struct btree;
12 struct btree_path *, struct btree *);
14 struct btree *, struct btree_node_iter *,
19 void bch2_btree_add_journal_pin(struct bch_fs *, struct btree *, u64);
73 enum btree_id btree, struct bpos pos) in bch2_btree_delete_at_buffered() argument
75 return bch2_btree_bit_mod_buffered(trans, btree, pos, false); in bch2_btree_delete_at_buffered()
89 enum btree_id btree, in bch2_insert_snapshot_whiteouts() argument
93 if (!btree_type_has_snapshots(btree) || in bch2_insert_snapshot_whiteouts()
97 return __bch2_insert_snapshot_whiteouts(trans, btree, old_pos, new_pos); in bch2_insert_snapshot_whiteouts()
132 enum btree_id btree, in bch2_trans_update_buffered() argument
[all …]
H A Dbackpointers.c340 struct btree *b = bch2_backpointer_get_node(trans, iter, bp_pos, bp); in bch2_backpointer_get_key()
350 struct btree *bch2_backpointer_get_node(struct btree_trans *trans, in bch2_backpointer_get_node()
369 struct btree *b = bch2_btree_iter_peek_node(iter); in bch2_backpointer_get_node()
447 static int drop_dev_and_update(struct btree_trans *trans, enum btree_id btree, in drop_dev_and_update() argument
456 return bch2_btree_insert_trans(trans, btree, n, 0); in drop_dev_and_update()
460 enum btree_id btree, struct bkey_s_c extent, in check_extent_checksum() argument
504 prt_printf(&buf, "\n %s ", bch2_btree_id_str(btree)); in check_extent_checksum()
514 ret = drop_dev_and_update(trans, btree, extent, dev) ?: 1; in check_extent_checksum()
662 enum btree_id btree, unsigned level, in check_extent_to_backpointers() argument
682 bch2_extent_ptr_to_bp(c, ca, btree, level, k, p, entry, &bucket_pos, &bp); in check_extent_to_backpointers()
[all …]
H A Dbtree_update_interior.c39 btree_path_idx_t, struct btree *, struct keylist *);
40 static void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);
45 int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b) in bch2_btree_node_check_topology()
161 static void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b) in __bch2_btree_calc_format()
174 static struct bkey_format bch2_btree_calc_format(struct btree *b) in bch2_btree_calc_format()
214 static bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b, in bch2_btree_node_format_fits()
225 static void __btree_node_free(struct btree_trans *trans, struct btree *b) in __btree_node_free()
244 struct btree *b) in bch2_btree_node_free_inmem()
269 struct btree *b) in bch2_btree_node_free_never_used()
303 static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans, in __bch2_btree_node_alloc()
[all …]
H A Dbbpos_types.h6 enum btree_id btree; member
10 static inline struct bbpos BBPOS(enum btree_id btree, struct bpos pos) in BBPOS() argument
12 return (struct bbpos) { btree, pos }; in BBPOS()
H A Dbtree_gc.c59 bch2_btree_id_to_text(out, p->btree); in bch2_gc_pos_to_text()
87 static void btree_ptr_to_v2(struct btree *b, struct bkey_i_btree_ptr_v2 *dst) in btree_ptr_to_v2()
111 static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min) in set_node_min()
147 static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max) in set_node_max()
194 static int btree_check_node_boundaries(struct btree_trans *trans, struct btree *b, in btree_check_node_boundaries()
195 struct btree *prev, struct btree *cur, in btree_check_node_boundaries()
270 static int btree_repair_node_end(struct btree_trans *trans, struct btree *b, in btree_repair_node_end()
271 struct btree *child, struct bpos *pulled_from_scan) in btree_repair_node_end()
308 static int bch2_btree_repair_topology_recurse(struct btree_trans *trans, struct btree *b, in bch2_btree_repair_topology_recurse()
315 struct btree *prev = NULL, *cur = NULL; in bch2_btree_repair_topology_recurse()
[all …]
/linux/fs/nilfs2/
H A Dbtree.c58 static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree, in nilfs_btree_get_new_block() argument
61 struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode; in nilfs_btree_get_new_block()
112 static int nilfs_btree_node_size(const struct nilfs_bmap *btree) in nilfs_btree_node_size() argument
114 return i_blocksize(btree->b_inode); in nilfs_btree_node_size()
117 static int nilfs_btree_nchildren_per_block(const struct nilfs_bmap *btree) in nilfs_btree_nchildren_per_block() argument
119 return btree->b_nchildren_per_block; in nilfs_btree_nchildren_per_block()
411 nilfs_btree_get_root(const struct nilfs_bmap *btree) in nilfs_btree_get_root() argument
413 return (struct nilfs_btree_node *)btree->b_u.u_data; in nilfs_btree_get_root()
428 static int nilfs_btree_height(const struct nilfs_bmap *btree) in nilfs_btree_height() argument
430 return nilfs_btree_node_get_level(nilfs_btree_get_root(btree)) + 1; in nilfs_btree_height()
[all …]
/linux/drivers/md/bcache/
H A Dbtree.h117 struct btree { struct
127 struct btree *parent; argument
152 static inline bool btree_node_ ## flag(struct btree *b) \ argument
155 static inline void set_btree_node_ ## flag(struct btree *b) \
170 static inline struct btree_write *btree_current_write(struct btree *b) in btree_current_write()
175 static inline struct btree_write *btree_prev_write(struct btree *b) in btree_prev_write()
180 static inline struct bset *btree_bset_first(struct btree *b) in btree_bset_first()
185 static inline struct bset *btree_bset_last(struct btree *b) in btree_bset_last()
190 static inline unsigned int bset_block_offset(struct btree *b, struct bset *i) in bset_block_offset()
248 static inline void rw_lock(bool w, struct btree *b, int level) in rw_lock()
[all …]
H A Dbtree.c107 static inline struct bset *write_block(struct btree *b) in write_block()
112 static void bch_btree_init_next(struct btree *b) in bch_btree_init_next()
139 static uint64_t btree_csum_set(struct btree *b, struct bset *i) in btree_csum_set()
148 void bch_btree_node_read_done(struct btree *b) in bch_btree_node_read_done()
243 static void bch_btree_node_read(struct btree *b) in bch_btree_node_read()
281 static void btree_complete_write(struct btree *b, struct btree_write *w) in btree_complete_write()
298 closure_type(b, struct btree, io); in CLOSURE_CALLBACK()
305 closure_type(b, struct btree, io); in CLOSURE_CALLBACK()
320 closure_type(b, struct btree, io); in CLOSURE_CALLBACK()
329 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_endio()
[all …]
H A Dextents.c129 struct btree *b = container_of(keys, struct btree, keys); in bch_bkey_dump()
169 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_invalid()
174 static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) in btree_ptr_bad_expensive()
208 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_bad()
233 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_insert_fixup()
335 struct cache_set *c = container_of(b, struct btree, keys)->c; in bch_extent_insert_fixup()
509 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_invalid()
514 static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, in bch_extent_bad_expensive()
546 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_bad()
592 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_merge()
/linux/fs/xfs/libxfs/
H A Dxfs_da_btree.c154 to->btree = from3->__btree; in xfs_da3_node_hdr_from_disk()
162 to->btree = from->__btree; in xfs_da3_node_hdr_from_disk()
729 struct xfs_da_node_entry *btree; in xfs_da3_root_split() local
764 btree = icnodehdr.btree; in xfs_da3_root_split()
765 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot); in xfs_da3_root_split()
803 btree = nodehdr.btree; in xfs_da3_root_split()
804 btree[0].hashval = cpu_to_be32(blk1->hashval); in xfs_da3_root_split()
805 btree[0].before = cpu_to_be32(blk1->blkno); in xfs_da3_root_split()
806 btree[1].hashval = cpu_to_be32(blk2->hashval); in xfs_da3_root_split()
807 btree[1].before = cpu_to_be32(blk2->blkno); in xfs_da3_root_split()
[all …]
/linux/Documentation/admin-guide/device-mapper/
H A Dpersistent-data.rst14 - Another btree-based caching target posted to dm-devel
72 dm-btree.[hc]
73 dm-btree-remove.c
74 dm-btree-spine.c
75 dm-btree-internal.h
77 Currently there is only one data structure, a hierarchical btree.
81 The btree is 'hierarchical' in that you can define it to be composed
83 thin-provisioning target uses a btree with two levels of nesting.

1234