/linux/fs/btrfs/ |
H A D | tree-checker.c | 10 * The objective is to do leaf/node validation checks when tree block is read 38 * @type: leaf or node 39 * @identifier: the necessary info to locate the leaf/node. 51 * Append generic "corrupt leaf/node root=%llu block=%llu slot=%d: " to @fmt. 71 btrfs_header_level(eb) == 0 ? "leaf" : "node", in generic_err() 99 btrfs_header_level(eb) == 0 ? "leaf" : "node", in file_extent_err() 109 #define CHECK_FE_ALIGNED(leaf, slot, fi, name, alignment) \ argument 111 if (unlikely(!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), \ 113 file_extent_err((leaf), (slot), \ 115 (#name), btrfs_file_extent_##name((leaf), (fi)), \ [all …]
|
H A D | dir-item.c | 32 struct extent_buffer *leaf; in insert_with_overflow() local 44 leaf = path->nodes[0]; in insert_with_overflow() 45 ptr = btrfs_item_ptr(leaf, path->slots[0], char); in insert_with_overflow() 46 ASSERT(data_size <= btrfs_item_size(leaf, path->slots[0])); in insert_with_overflow() 47 ptr += btrfs_item_size(leaf, path->slots[0]) - data_size; in insert_with_overflow() 66 struct extent_buffer *leaf; in btrfs_insert_xattr_item() local 83 leaf = path->nodes[0]; in btrfs_insert_xattr_item() 85 btrfs_set_dir_item_key(leaf, dir_item, &disk_key); in btrfs_insert_xattr_item() 86 btrfs_set_dir_flags(leaf, dir_item, BTRFS_FT_XATTR); in btrfs_insert_xattr_item() 87 btrfs_set_dir_name_len(leaf, dir_item, name_len); in btrfs_insert_xattr_item() [all …]
|
H A D | inode-item.c | 17 struct btrfs_inode_ref *btrfs_find_name_in_backref(const struct extent_buffer *leaf, in btrfs_find_name_in_backref() argument 28 item_size = btrfs_item_size(leaf, slot); in btrfs_find_name_in_backref() 29 ptr = btrfs_item_ptr_offset(leaf, slot); in btrfs_find_name_in_backref() 32 len = btrfs_inode_ref_name_len(leaf, ref); in btrfs_find_name_in_backref() 37 if (memcmp_extent_buffer(leaf, name->name, name_ptr, in btrfs_find_name_in_backref() 45 const struct extent_buffer *leaf, int slot, u64 ref_objectid, in btrfs_find_name_in_ext_backref() argument 55 item_size = btrfs_item_size(leaf, slot); in btrfs_find_name_in_ext_backref() 56 ptr = btrfs_item_ptr_offset(leaf, slot); in btrfs_find_name_in_ext_backref() 67 ref_name_len = btrfs_inode_extref_name_len(leaf, extref); in btrfs_find_name_in_ext_backref() 70 btrfs_inode_extref_parent(leaf, extref) == ref_objectid && in btrfs_find_name_in_ext_backref() [all …]
|
H A D | file-item.c | 167 struct extent_buffer *leaf; in btrfs_insert_hole_extent() local 180 leaf = path->nodes[0]; in btrfs_insert_hole_extent() 181 item = btrfs_item_ptr(leaf, path->slots[0], in btrfs_insert_hole_extent() 183 btrfs_set_file_extent_disk_bytenr(leaf, item, 0); in btrfs_insert_hole_extent() 184 btrfs_set_file_extent_disk_num_bytes(leaf, item, 0); in btrfs_insert_hole_extent() 185 btrfs_set_file_extent_offset(leaf, item, 0); in btrfs_insert_hole_extent() 186 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes); in btrfs_insert_hole_extent() 187 btrfs_set_file_extent_ram_bytes(leaf, item, num_bytes); in btrfs_insert_hole_extent() 188 btrfs_set_file_extent_generation(leaf, item, trans->transid); in btrfs_insert_hole_extent() 189 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); in btrfs_insert_hole_extent() [all …]
|
H A D | ctree.c | 41 * The leaf data grows from end-to-front in the node. this returns the address 42 * of the start of the last item, which is the stop of the leaf data stack. 44 static unsigned int leaf_data_end(const struct extent_buffer *leaf) in leaf_data_end() argument 46 u32 nr = btrfs_header_nritems(leaf); in leaf_data_end() 49 return BTRFS_LEAF_DATA_SIZE(leaf->fs_info); in leaf_data_end() 50 return btrfs_item_offset(leaf, nr - 1); in leaf_data_end() 54 * Move data in a @leaf (using memmove, safe for overlapping ranges). 56 * @leaf: leaf that we're doing a memmove on 62 * the leaf. The btrfs_item offset's start directly after the header, so we 63 * have to adjust any offsets to account for the header in the leaf. This [all …]
|
H A D | raid-stripe-tree.c | 23 struct extent_buffer *leaf; in btrfs_partially_delete_raid_extent() local 36 leaf = path->nodes[0]; in btrfs_partially_delete_raid_extent() 38 item_size = btrfs_item_size(leaf, slot); in btrfs_partially_delete_raid_extent() 44 extent = btrfs_item_ptr(leaf, slot, struct btrfs_stripe_extent); in btrfs_partially_delete_raid_extent() 50 phys = btrfs_raid_stride_physical(leaf, stride) + frontpad; in btrfs_partially_delete_raid_extent() 72 struct extent_buffer *leaf; in btrfs_delete_raid_extent() local 111 leaf = path->nodes[0]; in btrfs_delete_raid_extent() 113 btrfs_item_key_to_cpu(leaf, &key, slot); in btrfs_delete_raid_extent() 141 leaf = path->nodes[0]; in btrfs_delete_raid_extent() 143 btrfs_item_key_to_cpu(leaf, &key, slot); in btrfs_delete_raid_extent() [all …]
|
H A D | xattr.c | 33 struct extent_buffer *leaf; in btrfs_getxattr() local 52 leaf = path->nodes[0]; in btrfs_getxattr() 55 ret = btrfs_dir_data_len(leaf, di); in btrfs_getxattr() 60 if (btrfs_dir_data_len(leaf, di) > size) { in btrfs_getxattr() 66 * The way things are packed into the leaf is like this in btrfs_getxattr() 70 * where the data starts in the in memory leaf in btrfs_getxattr() 73 btrfs_dir_name_len(leaf, di)); in btrfs_getxattr() 74 read_extent_buffer(leaf, buffer, data_ptr, in btrfs_getxattr() 75 btrfs_dir_data_len(leaf, di)); in btrfs_getxattr() 76 ret = btrfs_dir_data_len(leaf, di); in btrfs_getxattr() [all …]
|
H A D | file.c | 144 struct extent_buffer *leaf; in btrfs_drop_extents() local 193 leaf = path->nodes[0]; in btrfs_drop_extents() 194 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); in btrfs_drop_extents() 201 leaf = path->nodes[0]; in btrfs_drop_extents() 202 if (path->slots[0] >= btrfs_header_nritems(leaf)) { in btrfs_drop_extents() 204 btrfs_print_leaf(leaf); in btrfs_drop_extents() 215 leaf = path->nodes[0]; in btrfs_drop_extents() 219 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in btrfs_drop_extents() 232 fi = btrfs_item_ptr(leaf, path->slots[0], in btrfs_drop_extents() 234 extent_type = btrfs_file_extent_type(leaf, fi); in btrfs_drop_extents() [all …]
|
H A D | extent-tree.c | 52 struct extent_buffer *leaf, 152 struct extent_buffer *leaf = path->nodes[0]; in btrfs_lookup_extent_info() local 154 const u32 item_size = btrfs_item_size(leaf, path->slots[0]); in btrfs_lookup_extent_info() 165 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); in btrfs_lookup_extent_info() 166 num_refs = btrfs_extent_refs(leaf, ei); in btrfs_lookup_extent_info() 175 extent_flags = btrfs_extent_flags(leaf, ei); in btrfs_lookup_extent_info() 176 owner = btrfs_get_extent_owner_root(fs_info, leaf, path->slots[0]); in btrfs_lookup_extent_info() 301 * - number of pointers in the tree leaf 304 * the tree leaf 314 * (btrfs_header_owner(leaf), inode objectid, offset in file) [all …]
|
/linux/fs/xfs/libxfs/ |
H A D | xfs_iext_tree.c | 98 * There are two types of blocks in the btree: leaf and inner (non-leaf) blocks. 100 * The leaf blocks are made up by %KEYS_PER_NODE extent records, which each 103 * leaf blocks (if there are any). 105 * The inner (non-leaf) blocks first contain KEYS_PER_NODE lookup keys, followed 109 * Leaf: | rec 1 | rec 2 | rec 3 | rec 4 | rec N | prev-ptr | next-ptr | 142 return &cur->leaf->recs[cur->pos]; in cur_rec() 148 if (!cur->leaf) in xfs_iext_valid() 202 cur->leaf = xfs_iext_find_first_leaf(ifp); in xfs_iext_first() 212 cur->leaf = xfs_iext_find_last_leaf(ifp); in xfs_iext_last() 213 if (!cur->leaf) { in xfs_iext_last() [all …]
|
H A D | xfs_attr_leaf.c | 38 * Routines to implement leaf blocks of attributes as Btrees of hashed names. 76 STATIC int xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index); 82 * of an attr leaf block. The region starts at the tail of the block and expands 84 * size for an empty leaf block and is reduced from there. 242 struct xfs_attr_leafblock *leaf, in xfs_attr3_leaf_verify_entry() argument 270 lentry = xfs_attr3_leaf_name_local(leaf, idx); in xfs_attr3_leaf_verify_entry() 277 rentry = xfs_attr3_leaf_name_remote(leaf, idx); in xfs_attr3_leaf_verify_entry() 294 * Validate an attribute leaf block. 296 * Empty leaf blocks can occur under the following circumstances: 301 * 4. The attribute is small enough to fit in a leaf block; [all …]
|
H A D | xfs_attr.h | 15 * elements are in the leaf nodes. Attribute names are hashed into an int, 106 * are we leaf form? ──y──> xfs_attr_leaf_removename ──> done 143 * remove leaf and 154 * join leaf │ 215 * │ transform to leaf 218 * │ hold the leaf buffer 223 * │ leaf form 225 * └─> release leaf buffer 260 * fits in a node leaf? ────n─────┐ │ 262 * │ │ single leaf node? │ [all …]
|
/linux/tools/arch/x86/kcpuid/ |
H A D | kcpuid.c | 56 /* Represent one leaf (basic or extended) */ 60 * then the leafs[0] is the main leaf 114 static void leaf_print_raw(struct subleaf *leaf) in leaf_print_raw() argument 116 if (has_subleafs(leaf->index)) { in leaf_print_raw() 117 if (leaf->sub == 0) in leaf_print_raw() 118 printf("0x%08x: subleafs:\n", leaf->index); in leaf_print_raw() 121 leaf->sub, leaf->eax, leaf->ebx, leaf->ecx, leaf->edx); in leaf_print_raw() 124 leaf->index, leaf->eax, leaf->ebx, leaf->ecx, leaf->edx); in leaf_print_raw() 133 struct subleaf *leaf; in cpuid_store() local 148 perror("malloc func leaf"); in cpuid_store() [all …]
|
/linux/drivers/gpu/drm/nouveau/nvkm/core/ |
H A D | intr.c | 30 enum nvkm_intr_type type, int *leaf, u32 *mask) in nvkm_intr_xlat() argument 46 *leaf = data->leaf; in nvkm_intr_xlat() 54 *leaf = data->leaf; in nvkm_intr_xlat() 66 *leaf = type / 32; in nvkm_intr_xlat() 76 nvkm_intr_find(struct nvkm_subdev *subdev, enum nvkm_intr_type type, int *leaf, u32 *mask) in nvkm_intr_find() argument 82 ret = nvkm_intr_xlat(subdev, intr, type, leaf, mask); in nvkm_intr_find() 91 nvkm_intr_allow_locked(struct nvkm_intr *intr, int leaf, u32 mask) in nvkm_intr_allow_locked() argument 93 intr->mask[leaf] |= mask; in nvkm_intr_allow_locked() 96 intr->func->reset(intr, leaf, mask); in nvkm_intr_allow_locked() 97 intr->func->allow(intr, leaf, mask); in nvkm_intr_allow_locked() [all …]
|
/linux/arch/loongarch/mm/ |
H A D | cache.c | 44 static void flush_cache_leaf(unsigned int leaf) in flush_cache_leaf() argument 48 struct cache_desc *cdesc = current_cpu_data.cache_leaves + leaf; in flush_cache_leaf() 55 flush_cache_line(leaf, addr); in flush_cache_leaf() 68 int leaf; in __flush_cache_all() local 72 leaf = cache_present - 1; in __flush_cache_all() 73 if (cache_inclusive(cdesc + leaf)) { in __flush_cache_all() 74 flush_cache_leaf(leaf); in __flush_cache_all() 78 for (leaf = 0; leaf < cache_present; leaf++) in __flush_cache_all() 79 flush_cache_leaf(leaf); in __flush_cache_all() 94 #define populate_cache_properties(cfg0, cdesc, level, leaf) \ argument [all …]
|
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ |
H A D | nv04.c | 75 int leaf; in nv04_mc_intr_rearm() local 77 for (leaf = 0; leaf < intr->leaves; leaf++) in nv04_mc_intr_rearm() 78 nvkm_wr32(mc->subdev.device, 0x000140 + (leaf * 4), 0x00000001); in nv04_mc_intr_rearm() 85 int leaf; in nv04_mc_intr_unarm() local 87 for (leaf = 0; leaf < intr->leaves; leaf++) in nv04_mc_intr_unarm() 88 nvkm_wr32(mc->subdev.device, 0x000140 + (leaf * 4), 0x00000000); in nv04_mc_intr_unarm() 98 int leaf; in nv04_mc_intr_pending() local 100 for (leaf = 0; leaf < intr->leaves; leaf++) { in nv04_mc_intr_pending() 101 intr->stat[leaf] = nvkm_rd32(mc->subdev.device, 0x000100 + (leaf * 4)); in nv04_mc_intr_pending() 102 if (intr->stat[leaf]) in nv04_mc_intr_pending()
|
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/ |
H A D | tu102.c | 29 tu102_vfn_intr_reset(struct nvkm_intr *intr, int leaf, u32 mask) in tu102_vfn_intr_reset() argument 33 nvkm_wr32(vfn->subdev.device, vfn->addr.priv + 0x1000 + (leaf * 4), mask); in tu102_vfn_intr_reset() 37 tu102_vfn_intr_allow(struct nvkm_intr *intr, int leaf, u32 mask) in tu102_vfn_intr_allow() argument 41 nvkm_wr32(vfn->subdev.device, vfn->addr.priv + 0x1200 + (leaf * 4), mask); in tu102_vfn_intr_allow() 45 tu102_vfn_intr_block(struct nvkm_intr *intr, int leaf, u32 mask) in tu102_vfn_intr_block() argument 49 nvkm_wr32(vfn->subdev.device, vfn->addr.priv + 0x1400 + (leaf * 4), mask); in tu102_vfn_intr_block() 74 int pending = 0, leaf; in tu102_vfn_intr_pending() local 76 for (leaf = 0; leaf < 8; leaf++) { in tu102_vfn_intr_pending() 77 if (intr_top & BIT(leaf / 2)) { in tu102_vfn_intr_pending() 78 intr->stat[leaf] = nvkm_rd32(device, vfn->addr.priv + 0x1000 + (leaf * 4)); in tu102_vfn_intr_pending() [all …]
|
/linux/drivers/net/can/usb/ |
H A D | Kconfig | 71 Leaf Light, Kvaser USBcan II and Kvaser Memorator Pro 5xHS. 74 - Kvaser Leaf Light 75 - Kvaser Leaf Professional HS 76 - Kvaser Leaf SemiPro HS 77 - Kvaser Leaf Professional LS 78 - Kvaser Leaf Professional SWC 79 - Kvaser Leaf Professional LIN 80 - Kvaser Leaf SemiPro LS 81 - Kvaser Leaf SemiPro SWC 84 - Kvaser Leaf Light GI [all …]
|
/linux/arch/x86/kernel/cpu/ |
H A D | topology_ext.c | 45 static inline bool topo_subleaf(struct topo_scan *tscan, u32 leaf, u32 subleaf, in topo_subleaf() argument 59 u32 level : 8, // Current topology level. Same as sub leaf number in topo_subleaf() 66 switch (leaf) { in topo_subleaf() 73 cpuid_subleaf(leaf, subleaf, &sl); in topo_subleaf() 79 pr_err_once("Topology: leaf 0x%x:%d Unknown domain type %u\n", in topo_subleaf() 80 leaf, subleaf, sl.type); in topo_subleaf() 96 pr_warn_once(FW_BUG "CPUID leaf 0x%x subleaf %d APIC ID mismatch %x != %x\n", in topo_subleaf() 97 leaf, subleaf, tscan->c->topo.initial_apicid, sl.x2apic_id); in topo_subleaf() 104 static bool parse_topology_leaf(struct topo_scan *tscan, u32 leaf) in parse_topology_leaf() argument 110 for (subleaf = 0, last_dom = 0; topo_subleaf(tscan, leaf, subleaf, &last_dom); subleaf++); in parse_topology_leaf() [all …]
|
/linux/arch/x86/include/asm/xen/ |
H A D | cpuid.h | 46 * Leaf 1 (0x40000x00) 47 * EAX: Largest Xen-information leaf. All leaves up to an including @EAX 57 * Leaf 2 (0x40000x01) 64 * Leaf 3 (0x40000x02) 77 * Leaf 4 (0x40000x03) 78 * Sub-leaf 0: EAX: bit 0: emulated tsc 85 * Sub-leaf 1: EAX: tsc offset low part 89 * Sub-leaf 2: EAX: host tsc frequency in kHz 102 * Leaf 5 (0x40000x04) 104 * Sub-leaf 0: EAX: Features [all …]
|
/linux/fs/unicode/ |
H A D | mkutf8data.c | 123 * node, otherwise it is a leaf node 148 * leaf[0]: The unicode version, stored as a generation number that is 152 * leaf[1]: Canonical Combining Class. During normalization, we need 160 * leaf[2]: Decomposition. If leaf[1] == 255, then leaf[2] is the 175 #define LEAF_GEN(LEAF) ((LEAF)[0]) argument 176 #define LEAF_CCC(LEAF) ((LEAF)[1]) argument 177 #define LEAF_STR(LEAF) ((const char*)((LEAF) + 2)) argument 343 #define LEAF 0 macro 383 void *leaf = NULL; in lookup() local 386 while (!leaf && node) { in lookup() [all …]
|
H A D | utf8-norm.c | 128 * node, otherwise it is a leaf node 153 * leaf[0]: The unicode version, stored as a generation number that is 157 * leaf[1]: Canonical Combining Class. During normalization, we need 165 * leaf[2]: Decomposition. If leaf[1] == 255, then leaf[2] is the 186 #define LEAF_GEN(LEAF) ((LEAF)[0]) argument 187 #define LEAF_CCC(LEAF) ((LEAF)[1]) argument 188 #define LEAF_STR(LEAF) ((const char *)((LEAF) + 2)) argument 197 /* Size of the synthesized leaf used for Hangul syllable decomposition. */ 272 /* Fill in base of leaf. */ in utf8hangul() 296 * Returns the leaf if one exists, NULL otherwise. [all …]
|
/linux/drivers/gpu/drm/i915/selftests/ |
H A D | i915_syncmap.c | 146 static int check_seqno(struct i915_syncmap *leaf, unsigned int idx, u32 seqno) in check_seqno() argument 148 if (leaf->height) { in check_seqno() 149 pr_err("%s: not a leaf, height is %d\n", in check_seqno() 150 __func__, leaf->height); in check_seqno() 154 if (__sync_seqno(leaf)[idx] != seqno) { in check_seqno() local 156 __func__, idx, __sync_seqno(leaf)[idx], seqno); in check_seqno() 172 pr_err("Inserting first context=%llx did not return leaf (height=%d, prefix=%llx\n", in check_one() 211 * Check that inserting a new id, creates a leaf and only that leaf. in igt_syncmap_one() 247 pr_err("Inserting context=%llx did not return leaf (height=%d, prefix=%llx\n", in check_leaf() 253 …pr_err("First entry into leaf (context=%llx) does not contain a single entry, found %x (count=%d)!… in check_leaf() [all …]
|
/linux/arch/mips/kernel/ |
H A D | cacheinfo.c | 7 /* Populates leaf and increments to next leaf */ 8 #define populate_cache(cache, leaf, c_level, c_type) \ argument 10 leaf->type = c_type; \ 11 leaf->level = c_level; \ 12 leaf->coherency_line_size = c->cache.linesz; \ 13 leaf->number_of_sets = c->cache.sets; \ 14 leaf->ways_of_associativity = c->cache.ways; \ 15 leaf->size = c->cache.linesz * c->cache.sets * \ 17 leaf++; \
|
/linux/fs/gfs2/ |
H A D | dir.c | 34 * beginning of the leaf block. The dirents reside in leaves when 41 * used as an array of 64-bit block pointers pointing to the leaf blocks. The 43 * block pointer in the array that points to the same leaf. In fact, when a 45 * point to the same leaf. 47 * When a leaf is completely full, the size of the hash table can be 773 * get_leaf_nr - Get a leaf number associated with the index 775 * @index: hash table index of the targeted leaf 776 * @leaf_out: Resulting leaf block number 819 struct gfs2_leaf *leaf; in gfs2_dirent_search() local 837 leaf = (struct gfs2_leaf *)bh->b_data; in gfs2_dirent_search() [all …]
|