/linux/drivers/net/ethernet/marvell/prestera/ |
H A D | prestera_counter.c | 45 prestera_counter_is_ready(struct prestera_counter_block *block, u32 id) in prestera_counter_is_ready() argument 47 return block->counter_flag[id - block->offset] == COUNTER_FLAG_READY; in prestera_counter_is_ready() 60 static void prestera_counter_block_lock(struct prestera_counter_block *block) in prestera_counter_block_lock() argument 62 mutex_lock(&block->mtx); in prestera_counter_block_lock() 65 static void prestera_counter_block_unlock(struct prestera_counter_block *block) in prestera_counter_block_unlock() argument 67 mutex_unlock(&block->mtx); in prestera_counter_block_unlock() 70 static bool prestera_counter_block_incref(struct prestera_counter_block *block) in prestera_counter_block_incref() argument 72 return refcount_inc_not_zero(&block->refcnt); in prestera_counter_block_incref() 75 static bool prestera_counter_block_decref(struct prestera_counter_block *block) in prestera_counter_block_decref() argument 77 return refcount_dec_and_test(&block->refcnt); in prestera_counter_block_decref() [all …]
|
H A D | prestera_flow.c | 16 static int prestera_flow_block_mall_cb(struct prestera_flow_block *block, in prestera_flow_block_mall_cb() argument 21 return prestera_mall_replace(block, f); in prestera_flow_block_mall_cb() 23 prestera_mall_destroy(block); in prestera_flow_block_mall_cb() 30 static int prestera_flow_block_flower_cb(struct prestera_flow_block *block, in prestera_flow_block_flower_cb() argument 35 return prestera_flower_replace(block, f); in prestera_flow_block_flower_cb() 37 prestera_flower_destroy(block, f); in prestera_flow_block_flower_cb() 40 return prestera_flower_stats(block, f); in prestera_flow_block_flower_cb() 42 return prestera_flower_tmplt_create(block, f); in prestera_flow_block_flower_cb() 44 prestera_flower_tmplt_destroy(block, f); in prestera_flow_block_flower_cb() 54 struct prestera_flow_block *block = cb_priv; in prestera_flow_block_cb() local [all …]
|
H A D | prestera_matchall.c | 14 static int prestera_mall_prio_check(struct prestera_flow_block *block, in prestera_mall_prio_check() argument 21 err = prestera_flower_prio_get(block, f->common.chain_index, in prestera_mall_prio_check() 32 if (f->common.prio <= flower_prio_max && !block->ingress) { in prestera_mall_prio_check() 36 if (f->common.prio >= flower_prio_min && block->ingress) { in prestera_mall_prio_check() 44 int prestera_mall_prio_get(struct prestera_flow_block *block, in prestera_mall_prio_get() argument 47 if (!block->mall.bound) in prestera_mall_prio_get() 50 *prio_min = block->mall.prio_min; in prestera_mall_prio_get() 51 *prio_max = block->mall.prio_max; in prestera_mall_prio_get() 55 static void prestera_mall_prio_update(struct prestera_flow_block *block, in prestera_mall_prio_update() argument 58 block->mall.prio_min = min(block->mall.prio_min, f->common.prio); in prestera_mall_prio_update() [all …]
|
/linux/drivers/gpu/drm/ |
H A D | drm_buddy.c | 19 struct drm_buddy_block *block; in drm_block_alloc() local 23 block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL); in drm_block_alloc() 24 if (!block) in drm_block_alloc() 27 block->header = offset; in drm_block_alloc() 28 block->header |= order; in drm_block_alloc() 29 block->parent = parent; in drm_block_alloc() 31 BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED); in drm_block_alloc() 32 return block; in drm_block_alloc() 36 struct drm_buddy_block *block) in drm_block_free() argument 38 kmem_cache_free(slab_blocks, block); in drm_block_free() [all …]
|
/linux/sound/isa/gus/ |
H A D | gus_mem.c | 28 snd_gf1_mem_xalloc(struct snd_gf1_mem *alloc, struct snd_gf1_mem_block *block, in snd_gf1_mem_xalloc() argument 36 *nblock = *block; in snd_gf1_mem_xalloc() 70 int snd_gf1_mem_xfree(struct snd_gf1_mem * alloc, struct snd_gf1_mem_block * block) in snd_gf1_mem_xfree() argument 72 if (block->share) { /* ok.. shared block */ in snd_gf1_mem_xfree() 73 block->share--; in snd_gf1_mem_xfree() 77 if (alloc->first == block) { in snd_gf1_mem_xfree() 78 alloc->first = block->next; in snd_gf1_mem_xfree() 79 if (block->next) in snd_gf1_mem_xfree() 80 block->next->prev = NULL; in snd_gf1_mem_xfree() 82 block->prev->next = block->next; in snd_gf1_mem_xfree() [all …]
|
H A D | gus_dma.c | 86 struct snd_gf1_dma_block *block; in snd_gf1_dma_next_block() local 88 /* PCM block have bigger priority than synthesizer one */ in snd_gf1_dma_next_block() 90 block = gus->gf1.dma_data_pcm; in snd_gf1_dma_next_block() 91 if (gus->gf1.dma_data_pcm_last == block) { in snd_gf1_dma_next_block() 95 gus->gf1.dma_data_pcm = block->next; in snd_gf1_dma_next_block() 98 block = gus->gf1.dma_data_synth; in snd_gf1_dma_next_block() 99 if (gus->gf1.dma_data_synth_last == block) { in snd_gf1_dma_next_block() 103 gus->gf1.dma_data_synth = block->next; in snd_gf1_dma_next_block() 106 block = NULL; in snd_gf1_dma_next_block() 108 if (block) { in snd_gf1_dma_next_block() [all …]
|
/linux/Documentation/filesystems/ext4/ |
H A D | blockgroup.rst | 6 The layout of a standard block group is approximately as follows (each 14 - ext4 Super Block 17 - Data Block Bitmap 22 - 1 block 25 - 1 block 26 - 1 block 30 For the special case of block group 0, the first 1024 bytes are unused, 32 The superblock will start at offset 1024 bytes, whichever block that 33 happens to be (usually 0). However, if for some reason the block size = 34 1024, then block 0 is marked in use and the superblock goes in block 1. [all …]
|
/linux/drivers/iio/buffer/ |
H A D | industrialio-buffer-dma.c | 25 * For DMA buffers the storage is sub-divided into so called blocks. Each block 26 * has its own memory buffer. The size of the block is the granularity at which 28 * basic unit of data exchange from one sample to one block decreases the 31 * sample the overhead will be x for each sample. Whereas when using a block 39 * them with data. Block on the outgoing queue have been filled with data and 42 * A block can be in one of the following states: 44 * the block. 47 * * Owned by the DMA controller: The DMA controller is processing the block 52 * * Dead: A block that is dead has been marked as to be freed. It might still 55 * incoming or outgoing queue the block will be freed. [all …]
|
/linux/drivers/mtd/ |
H A D | nftlmount.c | 28 unsigned int block, boot_record_count = 0; in find_boot_record() local 48 for (block = 0; block < nftl->nb_blocks; block++) { in find_boot_record() 53 ret = mtd_read(mtd, block * nftl->EraseSize, SECTORSIZE, in find_boot_record() 61 printk(KERN_WARNING "Block read at 0x%x of mtd%d failed: %d\n", in find_boot_record() 62 block * nftl->EraseSize, nftl->mbd.mtd->index, ret); in find_boot_record() 64 printk(KERN_WARNING "Further failures for this block will not be printed\n"); in find_boot_record() 73 block * nftl->EraseSize, nftl->mbd.mtd->index); in find_boot_record() 79 ret = nftl_read_oob(mtd, block * nftl->EraseSize + in find_boot_record() 84 block * nftl->EraseSize, nftl->mbd.mtd->index, ret); in find_boot_record() 94 block * nftl->EraseSize, nftl->mbd.mtd->index, in find_boot_record() [all …]
|
H A D | inftlmount.c | 35 unsigned int i, block; in find_boot_record() local 55 for (block = 0; block < inftl->nb_blocks; block++) { in find_boot_record() 62 ret = mtd_read(mtd, block * inftl->EraseSize, SECTORSIZE, in find_boot_record() 70 printk(KERN_WARNING "INFTL: block read at 0x%x " in find_boot_record() 72 block * inftl->EraseSize, in find_boot_record() 76 "failures for this block will " in find_boot_record() 89 block * inftl->EraseSize + SECTORSIZE + 8, in find_boot_record() 94 "(err %d)\n", block * inftl->EraseSize, in find_boot_record() 107 mtd_read(mtd, block * inftl->EraseSize + 4096, SECTORSIZE, in find_boot_record() 172 block >>= mh->BlockMultiplierBits; in find_boot_record() [all …]
|
/linux/drivers/gpio/ |
H A D | gpio-sch311x.c | 39 struct sch311x_gpio_block { /* one GPIO block runtime data */ 44 spinlock_t lock; /* lock for this GPIO block */ 134 struct sch311x_gpio_block *block = gpiochip_get_data(chip); in sch311x_gpio_request() local 136 if (block->config_regs[offset] == 0) /* GPIO is not available */ in sch311x_gpio_request() 139 if (!request_region(block->runtime_reg + block->config_regs[offset], in sch311x_gpio_request() 142 block->runtime_reg + block->config_regs[offset]); in sch311x_gpio_request() 150 struct sch311x_gpio_block *block = gpiochip_get_data(chip); in sch311x_gpio_free() local 152 if (block->config_regs[offset] == 0) /* GPIO is not available */ in sch311x_gpio_free() 155 release_region(block->runtime_reg + block->config_regs[offset], 1); in sch311x_gpio_free() 160 struct sch311x_gpio_block *block = gpiochip_get_data(chip); in sch311x_gpio_get() local [all …]
|
/linux/drivers/w1/ |
H A D | w1_netlink.c | 41 struct w1_cb_block *block; member 49 * @block: block to calculate 55 static u16 w1_reply_len(struct w1_cb_block *block) in w1_reply_len() argument 57 if (!block->cn) in w1_reply_len() 59 return (u8 *)block->cn - (u8 *)block->first_cn + block->cn->len; in w1_reply_len() 62 static void w1_unref_block(struct w1_cb_block *block) in w1_unref_block() argument 64 if (atomic_sub_return(1, &block->refcnt) == 0) { in w1_unref_block() 65 u16 len = w1_reply_len(block); in w1_unref_block() 67 cn_netlink_send_mult(block->first_cn, len, in w1_unref_block() 68 block->portid, 0, in w1_unref_block() [all …]
|
/linux/drivers/md/dm-vdo/ |
H A D | recovery-journal.h | 24 * The recovery_journal provides a log of all block mapping and reference count changes which have 25 * not yet been stably written to the block map or slab journals. This log helps to reduce the 26 * write amplification of writes by providing amortization of slab journal and block map page 35 * journal. The 'head' is the oldest active block in the journal. The 'tail' is the end of the 36 * half-open interval containing the active blocks. 'active' is the number of the block actively 42 * less than the on-disk size. Each in-memory block is also a vdo_completion. Each in-memory block 43 * has a vio which is used to commit that block to disk. The vio's data is the on-disk 44 * representation of the journal block. In addition each in-memory block has a buffer which is used 45 * to accumulate entries while a partial commit of the block is in progress. In-memory blocks are 46 * kept on two rings. Free blocks live on the 'free_tail_blocks' ring. When a block becomes active [all …]
|
/linux/fs/afs/ |
H A D | dir_edit.c | 17 * Find a number of contiguous clear bits in a directory block bitmask. 20 * variable. The first bit doesn't count as it corresponds to the block header 23 static int afs_find_contig_bits(union afs_xdr_dir_block *block, unsigned int nr_slots) in afs_find_contig_bits() argument 29 bitmap = (u64)block->hdr.bitmap[0] << 0 * 8; in afs_find_contig_bits() 30 bitmap |= (u64)block->hdr.bitmap[1] << 1 * 8; in afs_find_contig_bits() 31 bitmap |= (u64)block->hdr.bitmap[2] << 2 * 8; in afs_find_contig_bits() 32 bitmap |= (u64)block->hdr.bitmap[3] << 3 * 8; in afs_find_contig_bits() 33 bitmap |= (u64)block->hdr.bitmap[4] << 4 * 8; in afs_find_contig_bits() 34 bitmap |= (u64)block->hdr.bitmap[5] << 5 * 8; in afs_find_contig_bits() 35 bitmap |= (u64)block->hdr.bitmap[6] << 6 * 8; in afs_find_contig_bits() [all …]
|
/linux/sound/pci/ctxfi/ |
H A D | ctvmem.c | 26 * Find or create vm block based on requested @size. 32 struct ct_vm_block *block = NULL, *entry; in get_vm_block() local 46 break; /* found a block that is big enough */ in get_vm_block() 55 block = entry; in get_vm_block() 59 block = kzalloc(sizeof(*block), GFP_KERNEL); in get_vm_block() 60 if (!block) in get_vm_block() 63 block->addr = entry->addr; in get_vm_block() 64 block->size = size; in get_vm_block() 65 list_add(&block->list, &vm->used); in get_vm_block() 72 return block; in get_vm_block() [all …]
|
/linux/drivers/pinctrl/qcom/ |
H A D | Kconfig.msm | 9 Qualcomm TLMM block found in the Qualcomm APQ8064 platform. 16 Qualcomm TLMM block found in the Qualcomm APQ8084 platform. 23 Qualcomm TLMM block found in the Qualcomm IPQ4019 platform. 30 the Qualcomm Technologies Inc. TLMM block found on the 39 Qualcomm TLMM block found in the Qualcomm IPQ8064 platform. 46 Qualcomm Technologies Inc TLMM block found on the Qualcomm 54 the Qualcomm Technologies Inc. TLMM block found on the 63 the Qualcomm Technologies Inc. TLMM block found on the 72 the Qualcomm Technologies Inc. TLMM block found on the 81 the Qualcomm Technologies Inc. TLMM block found on the [all …]
|
/linux/drivers/misc/ |
H A D | sram.c | 55 static int sram_add_pool(struct sram_dev *sram, struct sram_reserve *block, in sram_add_pool() argument 61 NUMA_NO_NODE, block->label); in sram_add_pool() 66 block->size, NUMA_NO_NODE); in sram_add_pool() 75 static int sram_add_export(struct sram_dev *sram, struct sram_reserve *block, in sram_add_export() argument 88 part->battr.size = block->size; in sram_add_export() 93 static int sram_add_partition(struct sram_dev *sram, struct sram_reserve *block, in sram_add_partition() argument 105 virt_base = devm_ioremap_resource(sram->dev, &block->res); in sram_add_partition() 107 virt_base = devm_ioremap_resource_wc(sram->dev, &block->res); in sram_add_partition() 110 dev_err(sram->dev, "could not map SRAM at %pr\n", &block->res); in sram_add_partition() 116 part->base = sram->virt_base + block->start; in sram_add_partition() [all …]
|
/linux/fs/xfs/libxfs/ |
H A D | xfs_btree.c | 121 struct xfs_btree_block *block, in __xfs_btree_check_lblock_hdr() argument 128 if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid)) in __xfs_btree_check_lblock_hdr() 130 if (block->bb_u.l.bb_blkno != in __xfs_btree_check_lblock_hdr() 133 if (block->bb_u.l.bb_pad != cpu_to_be32(0)) in __xfs_btree_check_lblock_hdr() 137 if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(mp, cur->bc_ops)) in __xfs_btree_check_lblock_hdr() 139 if (be16_to_cpu(block->bb_level) != level) in __xfs_btree_check_lblock_hdr() 141 if (be16_to_cpu(block->bb_numrecs) > in __xfs_btree_check_lblock_hdr() 149 * Check a long btree block header. Return the address of the failing check, 155 struct xfs_btree_block *block, in __xfs_btree_check_fsblock() argument 163 fa = __xfs_btree_check_lblock_hdr(cur, block, level, bp); in __xfs_btree_check_fsblock() [all …]
|
/linux/drivers/net/ethernet/mellanox/mlxsw/ |
H A D | spectrum_flow.c | 15 struct mlxsw_sp_flow_block *block; in mlxsw_sp_flow_block_create() local 17 block = kzalloc(sizeof(*block), GFP_KERNEL); in mlxsw_sp_flow_block_create() 18 if (!block) in mlxsw_sp_flow_block_create() 20 INIT_LIST_HEAD(&block->binding_list); in mlxsw_sp_flow_block_create() 21 INIT_LIST_HEAD(&block->mall.list); in mlxsw_sp_flow_block_create() 22 block->mlxsw_sp = mlxsw_sp; in mlxsw_sp_flow_block_create() 23 block->net = net; in mlxsw_sp_flow_block_create() 24 return block; in mlxsw_sp_flow_block_create() 27 void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block) in mlxsw_sp_flow_block_destroy() argument 29 WARN_ON(!list_empty(&block->binding_list)); in mlxsw_sp_flow_block_destroy() [all …]
|
H A D | core_acl_flex_actions.c | 364 /* Block structure holds a list of action sets. One action block 375 * in this block. 381 void (*destructor)(struct mlxsw_afa_block *block, 385 static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block, in mlxsw_afa_resource_add() argument 388 list_add(&resource->list, &block->resource_list); in mlxsw_afa_resource_add() 396 static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block) in mlxsw_afa_resources_destroy() argument 400 list_for_each_entry_safe(resource, tmp, &block->resource_list, list) { in mlxsw_afa_resources_destroy() 401 resource->destructor(block, resource); in mlxsw_afa_resources_destroy() 407 struct mlxsw_afa_block *block; in mlxsw_afa_block_create() local 409 block = kzalloc(sizeof(*block), GFP_KERNEL); in mlxsw_afa_block_create() [all …]
|
/linux/drivers/video/fbdev/ |
H A D | edid.h | 72 #define PIXEL_CLOCK_LO (unsigned)block[ 0 ] 73 #define PIXEL_CLOCK_HI (unsigned)block[ 1 ] 75 #define H_ACTIVE_LO (unsigned)block[ 2 ] 76 #define H_BLANKING_LO (unsigned)block[ 3 ] 77 #define H_ACTIVE_HI UPPER_NIBBLE( (unsigned)block[ 4 ] ) 79 #define H_BLANKING_HI LOWER_NIBBLE( (unsigned)block[ 4 ] ) 82 #define V_ACTIVE_LO (unsigned)block[ 5 ] 83 #define V_BLANKING_LO (unsigned)block[ 6 ] 84 #define V_ACTIVE_HI UPPER_NIBBLE( (unsigned)block[ 7 ] ) 86 #define V_BLANKING_HI LOWER_NIBBLE( (unsigned)block[ 7 ] ) [all …]
|
/linux/block/ |
H A D | Kconfig | 3 # Block layer core configuration 5 menuconfig BLOCK config 6 bool "Enable the block layer" if EXPERT 11 Provide block layer support for the kernel. 13 Disable this option to remove the block layer support from the 18 - block device files will become unusable 22 they make use of various block layer definitions and facilities. 27 if BLOCK 33 Enable loading modules and creating block device instances based on 55 bool "Block layer SG support v4 helper lib" [all …]
|
/linux/drivers/mtd/nand/raw/ |
H A D | nand_bbt.c | 4 * Bad block table support for the NAND driver 10 * When nand_scan_bbt is called, then it tries to find the bad block table 14 * Once a new bad block is discovered then the "factory" information is updated 21 * If the tables are not versioned, then we "or" the bad block information. 24 * good / bad blocks and the bad block tables are created. 29 * The auto generated bad block table is located in the last good blocks 38 * The table uses 2 bits per block 39 * 11b: block is good 40 * 00b: block is factory marked bad 41 * 01b, 10b: block is marked bad due to wear [all …]
|
/linux/Documentation/admin-guide/device-mapper/ |
H A D | dm-dust.rst | 15 in the "bad block list" will fail with EIO ("Input/output error"). 17 Writes of blocks in the "bad block list will result in the following: 19 1. Remove the block from the "bad block list". 40 Path to the block device. 46 Block size in bytes 59 (For a device with a block size of 512 bytes) 65 (For a device with a block size of 4096 bytes) 73 bad block additions, removals, and remaps will be verbosely logged):: 89 At any time (i.e.: whether the device has the "bad block" emulation 94 kernel: device-mapper: dust: badblock added at block 60 [all …]
|
/linux/drivers/s390/block/ |
H A D | dasd_genhd.c | 35 int dasd_gendisk_alloc(struct dasd_block *block) in dasd_gendisk_alloc() argument 51 base = block->base; in dasd_gendisk_alloc() 55 block->tag_set.ops = &dasd_mq_ops; in dasd_gendisk_alloc() 56 block->tag_set.cmd_size = sizeof(struct dasd_ccw_req); in dasd_gendisk_alloc() 57 block->tag_set.nr_hw_queues = nr_hw_queues; in dasd_gendisk_alloc() 58 block->tag_set.queue_depth = queue_depth; in dasd_gendisk_alloc() 59 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in dasd_gendisk_alloc() 60 block->tag_set.numa_node = NUMA_NO_NODE; in dasd_gendisk_alloc() 61 rc = blk_mq_alloc_tag_set(&block->tag_set); in dasd_gendisk_alloc() 65 gdp = blk_mq_alloc_disk(&block->tag_set, &lim, block); in dasd_gendisk_alloc() [all …]
|