| /linux/fs/ocfs2/ |
| H A D | ocfs2.h | 776 u32 clusters) in ocfs2_clusters_to_blocks() argument 781 return (u64)clusters << c_to_b_bits; in ocfs2_clusters_to_blocks() 807 unsigned int clusters; in ocfs2_clusters_for_bytes() local 811 clusters = (unsigned int)(bytes >> cl_bits); in ocfs2_clusters_for_bytes() 813 return clusters; in ocfs2_clusters_for_bytes() 820 unsigned int clusters; in ocfs2_bytes_to_clusters() local 822 clusters = (unsigned int)(bytes >> cl_bits); in ocfs2_bytes_to_clusters() 823 return clusters; in ocfs2_bytes_to_clusters() 834 u32 clusters) in ocfs2_clusters_to_bytes() argument 836 return (u64)clusters << OCFS2_SB(sb)->s_clustersize_bits; in ocfs2_clusters_to_bytes() [all …]
|
| H A D | resize.c | 178 static int update_backups(struct inode * inode, u32 clusters, char *data) in update_backups() argument 191 if (cluster >= clusters) in update_backups() 221 u32 clusters = 0; in ocfs2_update_super_and_backups() local 239 clusters = le32_to_cpu(super_di->i_clusters); in ocfs2_update_super_and_backups() 248 ret = update_backups(inode, clusters, super_bh->b_data); in ocfs2_update_super_and_backups() 397 else if (le16_to_cpu(gd->bg_bits) != input->clusters * cl_bpc) in ocfs2_check_new_group() 401 le16_to_cpu(gd->bg_bits), input->clusters); in ocfs2_check_new_group() 434 else if (total_clusters + input->clusters < total_clusters) in ocfs2_verify_group_and_input() 436 else if (input->clusters > cl_cpg) in ocfs2_verify_group_and_input() 438 else if (input->frees > input->clusters) in ocfs2_verify_group_and_input() [all …]
|
| H A D | ocfs2_trace.h | 505 unsigned int e_cpos, unsigned int clusters), 506 TP_ARGS(owner, cpos, len, index, e_cpos, clusters), 513 __field(unsigned int, clusters) 521 __entry->clusters = clusters; 525 __entry->e_cpos, __entry->clusters) 530 unsigned int clusters, unsigned int depth), 531 TP_ARGS(ino, new_cpos, clusters, depth), 535 __field(unsigned int, clusters) 541 __entry->clusters = clusters; 546 __entry->clusters, __entry->depth) [all …]
|
| H A D | file.c | 1447 u32 cpos, phys_cpos, clusters, alloc_size; in ocfs2_allocate_unwritten_extents() local 1476 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len); in ocfs2_allocate_unwritten_extents() 1477 clusters -= cpos; in ocfs2_allocate_unwritten_extents() 1479 while (clusters) { in ocfs2_allocate_unwritten_extents() 1491 if (alloc_size > clusters) in ocfs2_allocate_unwritten_extents() 1492 alloc_size = clusters; in ocfs2_allocate_unwritten_extents() 1511 clusters -= alloc_size; in ocfs2_allocate_unwritten_extents() 2153 u32 cpos, clusters, extent_len, phys_cpos; in ocfs2_check_range_for_refcount() local 2162 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos; in ocfs2_check_range_for_refcount() 2164 while (clusters) { in ocfs2_check_range_for_refcount() [all …]
|
| H A D | refcounttree.c | 2367 u32 clusters, in ocfs2_calc_refcount_meta_credits() argument 2378 while (clusters) { in ocfs2_calc_refcount_meta_credits() 2380 cpos, clusters, &rec, in ocfs2_calc_refcount_meta_credits() 2410 recs_add, (unsigned long long)cpos, clusters, in ocfs2_calc_refcount_meta_credits() 2415 len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) + in ocfs2_calc_refcount_meta_credits() 2444 if (cpos + clusters < le64_to_cpu(rec.r_cpos) + in ocfs2_calc_refcount_meta_credits() 2452 clusters -= len; in ocfs2_calc_refcount_meta_credits() 2495 (unsigned long long)start_cpos, clusters, in ocfs2_calc_refcount_meta_credits() 2517 u32 clusters, in ocfs2_prepare_refcount_change_for_del() argument 2551 start_cpos, clusters, in ocfs2_prepare_refcount_change_for_del() [all …]
|
| H A D | alloc.c | 95 u32 clusters); 144 u32 clusters); 148 u32 clusters); 189 u32 clusters) in ocfs2_dinode_update_clusters() argument 194 le32_add_cpu(&di->i_clusters, clusters); in ocfs2_dinode_update_clusters() 209 u32 clusters) in ocfs2_dinode_extent_map_truncate() argument 213 ocfs2_extent_map_trunc(inode, clusters); in ocfs2_dinode_extent_map_truncate() 275 u32 clusters) in ocfs2_xattr_value_update_clusters() argument 279 le32_add_cpu(&vb->vb_xv->xr_clusters, clusters); in ocfs2_xattr_value_update_clusters() 321 u32 clusters) in ocfs2_xattr_tree_update_clusters() argument [all …]
|
| H A D | refcounttree.h | 42 u32 clusters,
|
| H A D | suballoc.c | 326 u64 p_blkno, unsigned int clusters) in ocfs2_bg_discontig_add_extent() argument 338 rec->e_leaf_clusters = cpu_to_le16(clusters); in ocfs2_bg_discontig_add_extent() 339 le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc)); in ocfs2_bg_discontig_add_extent() 341 clusters * le16_to_cpu(cl->cl_bpc)); in ocfs2_bg_discontig_add_extent() 499 u32 p_cpos, clusters; in ocfs2_block_group_grow_discontig() local 518 &clusters); in ocfs2_block_group_grow_discontig() 526 clusters); in ocfs2_block_group_grow_discontig() 528 min_bits = clusters; in ocfs2_block_group_grow_discontig()
|
| H A D | xattr.c | 1139 u32 cpos, p_cluster, num_clusters, bpc, clusters; in ocfs2_xattr_get_value_outside() local 1147 clusters = le32_to_cpu(xv->xr_clusters); in ocfs2_xattr_get_value_outside() 1152 while (cpos < clusters) { in ocfs2_xattr_get_value_outside() 1390 u32 clusters = ocfs2_clusters_for_bytes(inode->i_sb, value_len); in __ocfs2_xattr_set_value_outside() local 1396 BUG_ON(clusters > le32_to_cpu(xv->xr_clusters)); in __ocfs2_xattr_set_value_outside() 1398 while (cpos < clusters) { in __ocfs2_xattr_set_value_outside() 4001 u32 clusters, in ocfs2_iterate_xattr_buckets() argument 4007 u32 num_buckets = clusters * bpc; in ocfs2_iterate_xattr_buckets() 4018 (unsigned long long)blkno, clusters); in ocfs2_iterate_xattr_buckets() 5921 u32 clusters = le32_to_cpu(xv->xr_clusters); in ocfs2_xattr_value_attach_refcount() local [all …]
|
| /linux/arch/arm/common/ |
| H A D | mcpm_entry.c | 36 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; in __mcpm_cpu_going_down() 37 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_going_down() 50 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; in __mcpm_cpu_down() 51 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_down() 66 mcpm_sync.clusters[cluster].cluster = state; in __mcpm_outbound_leave_critical() 67 sync_cache_w(&mcpm_sync.clusters[cluster].cluster); in __mcpm_outbound_leave_critical() 85 struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; in __mcpm_outbound_enter_critical() 137 sync_cache_r(&mcpm_sync.clusters[cluster].cluster); in __mcpm_cluster_state() 138 return mcpm_sync.clusters[cluster].cluster; in __mcpm_cluster_state() 436 mcpm_sync.clusters[i].cluster = CLUSTER_DOWN; in mcpm_sync_init() [all …]
|
| /linux/fs/ntfs3/ |
| H A D | Kconfig | 18 bool "64 bits per NTFS clusters" 21 Windows implementation of ntfs.sys uses 32 bits per clusters. 22 If activated 64 bits per clusters you will be able to use 4k cluster
|
| /linux/drivers/gpu/drm/msm/adreno/ |
| H A D | a6xx_gpu_state.c | 39 struct a6xx_gpu_state_obj *clusters; member 819 a6xx_state->clusters = state_kcalloc(a6xx_state, in a6xx_get_clusters() 820 ARRAY_SIZE(a6xx_clusters), sizeof(*a6xx_state->clusters)); in a6xx_get_clusters() 822 if (!a6xx_state->clusters) in a6xx_get_clusters() 829 &a6xx_state->clusters[i], dumper); in a6xx_get_clusters() 838 const struct gen7_cluster_registers *clusters; in a7xx_get_clusters() local 842 clusters = gen7_0_0_clusters; in a7xx_get_clusters() 845 clusters = gen7_2_0_clusters; in a7xx_get_clusters() 849 clusters = gen7_9_0_clusters; in a7xx_get_clusters() 853 a6xx_state->clusters = state_kcalloc(a6xx_state, in a7xx_get_clusters() [all …]
|
| /linux/Documentation/admin-guide/perf/ |
| H A D | qcom_l2_pmu.rst | 5 This driver supports the L2 cache clusters found in Qualcomm Technologies 6 Centriq SoCs. There are multiple physical L2 cache clusters, each with their
|
| H A D | starfive_starlink_pmu.rst | 7 clusters with an L3 memory system.
|
| H A D | hisi-pmu.rst | 102 clusters (SICL) containing multiple I/O clusters (ICLs). Each CCL/ICL in the
|
| /linux/Documentation/filesystems/ext4/ |
| H A D | bigalloc.rst | 27 on, the block bitmaps track clusters, not individual blocks. This means 31 units of clusters instead of blocks” to the extent tree, though it is
|
| /linux/Documentation/admin-guide/blockdev/drbd/ |
| H A D | index.rst | 10 clusters and in this context, is a "drop-in" replacement for shared
|
| /linux/arch/arm/include/asm/ |
| H A D | mcpm.h | 298 struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; member
|
| /linux/sound/soc/sdca/ |
| H A D | sdca_functions.c | 1965 struct sdca_cluster *clusters; in find_sdca_clusters() local 1981 clusters = devm_kcalloc(dev, num_clusters, sizeof(*clusters), GFP_KERNEL); in find_sdca_clusters() 1982 if (!clusters) in find_sdca_clusters() 1993 clusters[i].id = cluster_list[i]; in find_sdca_clusters() 2002 "mipi-sdca-cluster-id-0x%X-subproperties", clusters[i].id); in find_sdca_clusters() 2011 ret = find_sdca_cluster_channels(dev, cluster_node, &clusters[i]); in find_sdca_clusters() 2018 function->clusters = clusters; in find_sdca_clusters() 2270 struct sdca_cluster *cluster = &function->clusters[i]; in sdca_id_find_cluster()
|
| /linux/drivers/platform/mellanox/ |
| H A D | Kconfig | 38 Centers (EDC) for building Ethernet based clusters, High-Performance 72 Centers (EDC) for building Ethernet based clusters, High-Performance
|
| /linux/Documentation/driver-api/xilinx/ |
| H A D | eemi.rst | 15 components running across different processing clusters on a chip or
|
| /linux/drivers/net/hippi/ |
| H A D | Kconfig | 13 single-mode). HIPPI networks are commonly used for clusters and to
|
| /linux/Documentation/devicetree/bindings/cpu/ |
| H A D | cpu-capacity.txt | 61 Example 1 (ARM 64-bit, 6-cpu system, two clusters): 195 Example 2 (ARM 32-bit, 4-cpu system, two clusters,
|
| /linux/Documentation/arch/arm/ |
| H A D | cluster-pm-race-avoidance.rst | 22 In a system containing multiple clusters of CPUs, it is also desirable 23 to have the ability to turn off entire clusters. 25 Turning entire clusters off and on is a risky business, because it 519 clusters of clusters are not supported). The algorithm could be
|
| /linux/drivers/block/drbd/ |
| H A D | Kconfig | 21 clusters and in this context, is a "drop-in" replacement for shared
|