/linux/fs/bcachefs/ |
H A D | journal.h | 169 static inline unsigned jset_u64s(unsigned u64s) in jset_u64s() argument 171 return u64s + sizeof(struct jset_entry) / sizeof(u64); in jset_u64s() 180 bch2_journal_add_entry_noreservation(struct journal_buf *buf, size_t u64s) in bch2_journal_add_entry_noreservation() argument 183 struct jset_entry *entry = vstruct_idx(jset, le32_to_cpu(jset->u64s)); in bch2_journal_add_entry_noreservation() 186 entry->u64s = cpu_to_le16(u64s); in bch2_journal_add_entry_noreservation() 188 le32_add_cpu(&jset->u64s, jset_u64s(u64s)); in bch2_journal_add_entry_noreservation() 201 unsigned u64s) in journal_entry_init() argument 203 entry->u64s = cpu_to_le16(u64s); in journal_entry_init() 210 return jset_u64s(u64s); in journal_entry_init() 215 const void *data, unsigned u64s) in journal_entry_set() argument [all …]
|
H A D | util.h | 410 unsigned u64s) in memcpy_u64s_small() argument 415 while (u64s--) in memcpy_u64s_small() 420 unsigned u64s) in __memcpy_u64s() argument 427 : "0" (u64s), "1" (dst), "2" (src) in __memcpy_u64s() 433 while (u64s--) in __memcpy_u64s() 439 unsigned u64s) in memcpy_u64s() argument 441 EBUG_ON(!(dst >= src + u64s * sizeof(u64) || in memcpy_u64s() 442 dst + u64s * sizeof(u64) <= src)); in memcpy_u64s() 444 __memcpy_u64s(dst, src, u64s); in memcpy_u64s() 448 unsigned u64s) in __memmove_u64s_down() argument [all …]
|
H A D | vstructs.h | 13 ( type_is((_s)->u64s, u64) ? le64_to_cpu((__force __le64) (_s)->u64s) \ 14 : type_is((_s)->u64s, u32) ? le32_to_cpu((__force __le32) (_s)->u64s) \ 15 : type_is((_s)->u64s, u16) ? le16_to_cpu((__force __le16) (_s)->u64s) \ 16 : ((__force u8) ((_s)->u64s))); \
|
H A D | btree_trans_commit.c | 165 EBUG_ON(insert->k.u64s > bch2_btree_keys_u64s_remaining(b)); in bch2_btree_bset_insert_key() 189 clobber_u64s = k->u64s; in bch2_btree_bset_insert_key() 208 clobber_u64s = k->u64s; in bch2_btree_bset_insert_key() 218 new_u64s = k->u64s; in bch2_btree_bset_insert_key() 371 struct btree *b, unsigned u64s) in btree_key_can_insert() argument 373 if (!bch2_btree_node_insert_fits(b, u64s)) in btree_key_can_insert() 404 memcpy(new_k, ck->k, ck->u64s * sizeof(u64)); in btree_key_can_insert_cached_slowpath() 411 ck->u64s = new_u64s; in btree_key_can_insert_cached_slowpath() 417 struct btree_path *path, unsigned u64s) in btree_key_can_insert_cached() argument 436 u64s += 1; in btree_key_can_insert_cached() [all …]
|
H A D | bkey_buf.h | 14 struct bch_fs *c, unsigned u64s) in bch2_bkey_buf_realloc() argument 17 u64s > ARRAY_SIZE(s->onstack)) { in bch2_bkey_buf_realloc() 27 bch2_bkey_buf_realloc(s, c, k.k->u64s); in bch2_bkey_buf_reassemble() 35 bch2_bkey_buf_realloc(s, c, src->k.u64s); in bch2_bkey_buf_copy()
|
H A D | sb-clean.c | 38 le16_to_cpu(entry->u64s), le32_to_cpu(clean->field.u64s), in bch2_sb_clean_validate_late() 78 if (!entry->u64s) in btree_root_find() 132 k1->k.u64s != k2->k.u64s || in bch2_verify_superblock_clean() 253 !entry->u64s) in bch2_sb_clean_to_text() 289 unsigned u64s; in bch2_fs_mark_clean() local 303 u64s = sizeof(*sb_clean) / sizeof(u64) + c->journal.entry_u64s_reserved; in bch2_fs_mark_clean() 305 sb_clean = bch2_sb_field_resize(&c->disk_sb, clean, u64s); in bch2_fs_mark_clean()
|
H A D | super-io.c | 111 unsigned u64s) in __bch2_sb_field_resize() argument 113 unsigned old_u64s = f ? le32_to_cpu(f->u64s) : 0; in __bch2_sb_field_resize() 114 unsigned sb_u64s = le32_to_cpu(sb->sb->u64s) + u64s - old_u64s; in __bch2_sb_field_resize() 118 if (!f && !u64s) { in __bch2_sb_field_resize() 122 memset(f, 0, sizeof(u64) * u64s); in __bch2_sb_field_resize() 123 f->u64s = cpu_to_le32(u64s); in __bch2_sb_field_resize() 130 if (u64s) { in __bch2_sb_field_resize() 131 f->u64s = cpu_to_le32(u64s); in __bch2_sb_field_resize() 143 sb->sb->u64s = cpu_to_le32(sb_u64s); in __bch2_sb_field_resize() 145 return u64s ? f : NULL; in __bch2_sb_field_resize() [all …]
|
H A D | journal_io.h | 80 unsigned u64s = DIV_ROUND_UP(size, sizeof(u64)); in jset_entry_init() local 82 memset(entry, 0, u64s * sizeof(u64)); in jset_entry_init() 87 entry->u64s = cpu_to_le16(u64s - 1); in jset_entry_init()
|
H A D | btree_io.c | 244 le16_to_cpu(src->keys.u64s) * in bch2_drop_whiteouts() 274 i->u64s = cpu_to_le16((u64 *) out - i->_data); in bch2_drop_whiteouts() 303 unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1; in btree_node_sort() local 312 u64s += le16_to_cpu(bset(b, t)->u64s); in btree_node_sort() 320 : __vstruct_bytes(struct btree_node, u64s); in btree_node_sort() 326 u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter); in btree_node_sort() 328 out->keys.u64s = cpu_to_le16(u64s); in btree_node_sort() 342 u64s = le16_to_cpu(out->keys.u64s); in btree_node_sort() 352 out->keys.u64s = cpu_to_le16(u64s); in btree_node_sort() 356 start_bset->u64s = out->keys.u64s; in btree_node_sort() [all …]
|
H A D | bkey_types.h | 17 return (struct bkey_i *) ((u64 *) k->_data + k->k.u64s); in bkey_next() 20 #define bkey_val_u64s(_k) ((_k)->u64s - BKEY_U64s) 29 unsigned u64s = BKEY_U64s + val_u64s; in set_bkey_val_u64s() local 31 BUG_ON(u64s > U8_MAX); in set_bkey_val_u64s() 32 k->u64s = u64s; in set_bkey_val_u64s()
|
H A D | btree_update.h | 116 bch2_trans_jset_entry_alloc(struct btree_trans *trans, unsigned u64s) in bch2_trans_jset_entry_alloc() argument 119 trans->journal_entries_u64s + u64s > trans->journal_entries_size) in bch2_trans_jset_entry_alloc() 120 return __bch2_trans_jset_entry_alloc(trans, u64s); in bch2_trans_jset_entry_alloc() 123 trans->journal_entries_u64s += u64s; in bch2_trans_jset_entry_alloc() 148 struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, jset_u64s(k->k.u64s)); in bch2_trans_update_buffered() 153 journal_entry_init(e, BCH_JSET_ENTRY_write_buffer_keys, btree, 0, k->k.u64s); in bch2_trans_update_buffered() 230 mut->k.u64s = DIV_ROUND_UP(bytes, sizeof(u64)); in __bch2_bkey_make_mut_noupdate()
|
H A D | journal_io.c | 292 le32_to_cpu(jset->u64s)); in journal_entry_err_msg() 340 if (journal_entry_err_on(!k->k.u64s, in journal_validate_key() 344 entry->u64s = cpu_to_le16((u64 *) k - entry->_data); in journal_validate_key() 354 entry->u64s = cpu_to_le16((u64 *) k - entry->_data); in journal_validate_key() 363 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s)); in journal_validate_key() 375 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s)); in journal_validate_key() 445 if (journal_entry_err_on(!entry->u64s || in journal_entry_btree_root_validate() 446 le16_to_cpu(entry->u64s) != k->k.u64s, in journal_entry_btree_root_validate() 456 entry->u64s = 0; in journal_entry_btree_root_validate() 497 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, in journal_entry_blacklist_validate() [all …]
|
H A D | bkey.c | 61 BUG_ON(packed->u64s < bkeyp_key_u64s(format, packed)); in bch2_bkey_pack_verify() 242 EBUG_ON(in->u64s - in_f->key_u64s + out_f->key_u64s > U8_MAX); in bch2_bkey_transform_key() 245 out->u64s = out_f->key_u64s + in->u64s - in_f->key_u64s; in bch2_bkey_transform_key() 262 (in->u64s - in_f->key_u64s)); in bch2_bkey_transform() 273 EBUG_ON(in->u64s < format->key_u64s); in __bch2_bkey_unpack_key() 275 EBUG_ON(in->u64s - format->key_u64s + BKEY_U64s > U8_MAX); in __bch2_bkey_unpack_key() 277 out.u64s = BKEY_U64s + in->u64s - format->key_u64s; in __bch2_bkey_unpack_key() 298 EBUG_ON(in->u64s < format->key_u64s); in __bkey_unpack_pos() 333 out->u64s = format->key_u64s + in->u64s - BKEY_U64s; in bch2_bkey_pack_key() 526 out->u64s = f->key_u64s; in bch2_bkey_pack_pos_lossy()
|
H A D | btree_update.c | 441 i->old_btree_u64s = !bkey_deleted(&i->old_k) ? i->old_k.u64s : 0; in bch2_trans_update_by_path() 562 struct jset_entry *__bch2_trans_jset_entry_alloc(struct btree_trans *trans, unsigned u64s) in __bch2_trans_jset_entry_alloc() argument 564 unsigned new_top = trans->journal_entries_u64s + u64s; in __bch2_trans_jset_entry_alloc() 828 unsigned u64s = DIV_ROUND_UP(buf->pos, sizeof(u64)); in bch2_trans_log_msg() local 829 prt_chars(buf, '\0', u64s * sizeof(u64) - buf->pos); in bch2_trans_log_msg() 835 struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, jset_u64s(u64s)); in bch2_trans_log_msg() 841 journal_entry_init(e, BCH_JSET_ENTRY_log, 0, 1, u64s); in bch2_trans_log_msg() 854 unsigned u64s = DIV_ROUND_UP(buf.pos, sizeof(u64)); in __bch2_fs_log_msg() local 855 prt_chars(&buf, '\0', u64s * sizeof(u64) - buf.pos); in __bch2_fs_log_msg() 862 ret = darray_make_room(&c->journal.early_journal_entries, jset_u64s(u64s)); in __bch2_fs_log_msg() [all …]
|
H A D | acl.c | 199 unsigned nr_short = 0, nr_long = 0, acl_len, u64s; in bch2_acl_to_xattr() local 219 u64s = BKEY_U64s + xattr_val_u64s(0, acl_len); in bch2_acl_to_xattr() 221 if (u64s > U8_MAX) in bch2_acl_to_xattr() 224 xattr = bch2_trans_kmalloc(trans, u64s * sizeof(u64)); in bch2_acl_to_xattr() 229 xattr->k.u64s = u64s; in bch2_acl_to_xattr()
|
H A D | bcachefs_format.h | 201 __u8 u64s; member 265 __u8 u64s; member 346 .u64s = BKEY_U64s, \ 353 .u64s = BKEY_U64s, \ 364 #define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64)) 477 __le32 u64s; member 603 __le16 u64s; member 750 __le32 u64s; member 1268 __le32 u64s; /* size of d[] in u64s */ member 1436 __le16 u64s; /* count of d[] in u64s */ member [all …]
|
H A D | bset.c | 63 if (!i->u64s) in bch2_dump_bset() 71 if (!_k->u64s) { in bch2_dump_bset() 997 if (src->u64s != clobber_u64s) { in bch2_bset_insert() 999 u64 *dst_p = (u64 *) where->_data + src->u64s; in bch2_bset_insert() 1001 EBUG_ON((int) le16_to_cpu(bset(b, t)->u64s) < in bch2_bset_insert() 1002 (int) clobber_u64s - src->u64s); in bch2_bset_insert() 1005 le16_add_cpu(&bset(b, t)->u64s, src->u64s - clobber_u64s); in bch2_bset_insert() 1014 if (src->u64s != clobber_u64s) in bch2_bset_insert() 1015 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s); in bch2_bset_insert() 1030 EBUG_ON(le16_to_cpu(bset(b, t)->u64s) < clobber_u64s); in bch2_bset_delete() [all …]
|
H A D | btree_update_interior.c | 212 size_t u64s = btree_node_u64s_with_format(nr, &b->format, new_f); in bch2_btree_node_format_fits() local 214 return __vstruct_bytes(struct btree_node, u64s) < btree_buf_bytes(b); in bch2_btree_node_format_fits() 586 BUG_ON(bch2_keylist_u64s(keys) + k->k.u64s > in btree_update_add_key() 915 BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) > in btree_update_updated_root() 922 insert, insert->k.u64s); in btree_update_updated_root() 1366 BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) > in bch2_insert_fixup_btree_ptr() 1373 insert, insert->k.u64s); in bch2_insert_fixup_btree_ptr() 1459 unsigned u64s, n1_u64s = (b->nr.live_u64s * 3) / 5; in __btree_split_node() local 1475 u64s = 0; in __btree_split_node() 1483 u64s < n1_u64s && in __btree_split_node() [all …]
|
H A D | str_hash.c | 53 unsigned u64s = BKEY_U64s + dirent_val_u64s(len); in fsck_rename_dirent() local 55 if (u64s > U8_MAX) in fsck_rename_dirent() 58 new->k.u64s = u64s; in fsck_rename_dirent()
|
H A D | btree_update_interior.h | 313 b->whiteout_u64s += k.u64s; in push_whiteout() 321 static inline bool bch2_btree_node_insert_fits(struct btree *b, unsigned u64s) in bch2_btree_node_insert_fits() argument 326 return u64s <= bch2_btree_keys_u64s_remaining(b); in bch2_btree_node_insert_fits()
|
H A D | journal.c | 263 buf->data->u64s = cpu_to_le32(old.cur_entry_offset); in __journal_entry_close() 373 int u64s; in journal_entry_open() local 413 u64s = (int) (buf->sectors << 9) / sizeof(u64) - in journal_entry_open() 415 u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1); in journal_entry_open() 417 if (u64s <= (ssize_t) j->early_journal_entries.nr) in journal_entry_open() 446 buf->data->u64s = 0; in journal_entry_open() 451 le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr); in journal_entry_open() 457 j->cur_entry_u64s = u64s; in journal_entry_open() 472 new.cur_entry_offset = le32_to_cpu(buf->data->u64s); in journal_entry_open() 681 int d = new_u64s - res->u64s; in bch2_journal_entry_res_resize() [all …]
|
H A D | bkey.h | 40 memcpy_u64s_small(dst, src, src->u64s); in bkey_p_copy() 45 memcpy_u64s_small(dst, src, src->k.u64s); in bkey_copy() 321 return ((unsigned) k->u64s - bkeyp_key_u64s(f, k) <= U8_MAX - BKEY_U64s); in bkeyp_u64s_valid() 333 return k->u64s - bkeyp_key_u64s(format, k); in bkeyp_val_u64s() 345 k->u64s = bkeyp_key_u64s(format, k) + val_u64s; in set_bkeyp_val_u64s()
|
H A D | btree_key_cache.c | 109 ck->u64s = 0; in bkey_cached_free() 131 ck->u64s = key_u64s; in __bkey_cached_alloc() 212 unsigned key_u64s = k.k->u64s + 1; in btree_key_cache_create() 242 if (unlikely(key_u64s > ck->u64s)) { in btree_key_cache_create() 258 ck->u64s = key_u64s; in btree_key_cache_create() 558 BUG_ON(insert->k.u64s > ck->u64s); in bch2_btree_insert_key_cached()
|
H A D | sb-members.c | 77 unsigned u64s = DIV_ROUND_UP((sizeof(*mi) + sizeof(mi->_members[0]) * in sb_members_v2_resize_entries() local 80 mi = bch2_sb_field_resize(&c->disk_sb, members_v2, u64s); in sb_members_v2_resize_entries() 491 unsigned u64s; in bch2_sb_member_alloc() local 523 u64s = DIV_ROUND_UP(sizeof(struct bch_sb_field_members_v2) + in bch2_sb_member_alloc() 526 mi = bch2_sb_field_resize(&c->disk_sb, members_v2, u64s); in bch2_sb_member_alloc()
|
H A D | dirent.c | 171 unsigned u64s = BKEY_U64s + dirent_val_u64s(name->len); in dirent_create_key() local 176 BUG_ON(u64s > U8_MAX); in dirent_create_key() 178 dirent = bch2_trans_kmalloc(trans, u64s * sizeof(u64)); in dirent_create_key() 183 dirent->k.u64s = u64s; in dirent_create_key()
|