Home
last modified time | relevance | path

Searched refs:pool_size (Results 1 – 25 of 71) sorted by relevance

123

/linux/arch/m68k/atari/
H A Dstram.c48 static unsigned long pool_size = 1024*1024; variable
57 pool_size = memparse(arg, NULL); in atari_stram_setup()
97 stram_pool.start = (resource_size_t)memblock_alloc_low(pool_size, in atari_stram_reserve_pages()
101 __func__, pool_size, PAGE_SIZE); in atari_stram_reserve_pages()
103 stram_pool.end = stram_pool.start + pool_size - 1; in atari_stram_reserve_pages()
107 pool_size, &stram_pool); in atari_stram_reserve_pages()
126 stram_pool.end = stram_pool.start + pool_size - 1; in atari_stram_map_pages()
131 pool_size, &stram_pool); in atari_stram_map_pages()
/linux/drivers/net/ethernet/mellanox/mlxsw/
H A Dspectrum_cnt.c22 u64 pool_size; member
139 &pool->pool_size); in mlxsw_sp_counter_pool_init()
145 pool->usage = bitmap_zalloc(pool->pool_size, GFP_KERNEL); in mlxsw_sp_counter_pool_init()
173 WARN_ON(find_first_bit(pool->usage, pool->pool_size) != in mlxsw_sp_counter_pool_fini()
174 pool->pool_size); in mlxsw_sp_counter_pool_fini()
231 if (WARN_ON(counter_index >= pool->pool_size)) in mlxsw_sp_counter_free()
250 u64 pool_size; in mlxsw_sp_counter_resources_register() local
259 pool_size = MLXSW_CORE_RES_GET(mlxsw_core, COUNTER_POOL_SIZE); in mlxsw_sp_counter_resources_register()
262 devlink_resource_size_params_init(&size_params, pool_size, in mlxsw_sp_counter_resources_register()
263 pool_size, bank_size, in mlxsw_sp_counter_resources_register()
[all …]
/linux/kernel/dma/
H A Dpool.c79 static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size, in atomic_pool_expand() argument
88 order = min(get_order(pool_size), MAX_PAGE_ORDER); in atomic_pool_expand()
91 pool_size = 1 << (PAGE_SHIFT + order); in atomic_pool_expand()
101 arch_dma_prep_coherent(page, pool_size); in atomic_pool_expand()
104 addr = dma_common_contiguous_remap(page, pool_size, in atomic_pool_expand()
121 pool_size, NUMA_NO_NODE); in atomic_pool_expand()
125 dma_atomic_pool_size_add(gfp, pool_size); in atomic_pool_expand()
137 dma_common_free_remap(addr, pool_size); in atomic_pool_expand()
162 static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size, in __dma_atomic_pool_init() argument
174 ret = atomic_pool_expand(pool, pool_size, gf in __dma_atomic_pool_init()
[all...]
/linux/drivers/net/ethernet/mscc/
H A Docelot_devlink.c359 buf_shr_i = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] - in ocelot_setup_sharing_watermarks()
361 buf_shr_e = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] - in ocelot_setup_sharing_watermarks()
363 ref_shr_i = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] - in ocelot_setup_sharing_watermarks()
365 ref_shr_e = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] - in ocelot_setup_sharing_watermarks()
391 if (buf_rsrv_i > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING]) { in ocelot_watermark_validate()
396 if (buf_rsrv_e > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR]) { in ocelot_watermark_validate()
401 if (ref_rsrv_i > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING]) { in ocelot_watermark_validate()
406 if (ref_rsrv_e > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR]) { in ocelot_watermark_validate()
546 pool_info->size = ocelot->pool_size[sb_index][pool_index]; in ocelot_sb_pool_get()
585 old_pool_size = ocelot->pool_size[sb_index][pool_index]; in ocelot_sb_pool_set()
[all …]
/linux/arch/mips/cavium-octeon/executive/
H A Dcvmx-cmd-queue.c110 int pool_size) in cvmx_cmd_queue_initialize() argument
133 if ((pool_size < 128) || (pool_size > 65536)) in cvmx_cmd_queue_initialize()
152 if ((pool_size >> 3) - 1 != qstate->pool_size_m1) { in cvmx_cmd_queue_initialize()
181 qstate->pool_size_m1 = (pool_size >> 3) - 1; in cvmx_cmd_queue_initialize()
/linux/arch/csky/mm/
H A Dtcm.c133 u32 pool_size = (u32) (TCM_NR_PAGES * PAGE_SIZE) local
139 u32 pool_size = (u32) (CONFIG_DTCM_NR_PAGES * PAGE_SIZE)
149 ret = gen_pool_add(tcm_pool, tcm_pool_start, pool_size, -1);
156 __func__, pool_size, tcm_pool_start);
/linux/block/
H A Dblk-zoned.c1395 unsigned int pool_size) in disk_alloc_zone_resources() argument
1400 min(ilog2(pool_size) + 1, BLK_ZONE_WPLUG_MAX_HASH_BITS); in disk_alloc_zone_resources()
1411 disk->zone_wplugs_pool = mempool_create_kmalloc_pool(pool_size, in disk_alloc_zone_resources()
1418 pool_size, disk->disk_name); in disk_alloc_zone_resources()
1505 unsigned int pool_size; in disk_revalidate_zone_resources() local
1514 pool_size = max(lim->max_open_zones, lim->max_active_zones); in disk_revalidate_zone_resources()
1515 if (!pool_size) in disk_revalidate_zone_resources()
1516 pool_size = min(BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE, nr_zones); in disk_revalidate_zone_resources()
1519 return disk_alloc_zone_resources(disk, pool_size); in disk_revalidate_zone_resources()
1542 unsigned int pool_size; in disk_update_zone_resources() local
[all …]
H A Dbio-integrity.c570 int bioset_integrity_create(struct bio_set *bs, int pool_size) in bioset_integrity_create() argument
576 pool_size, bip_slab)) in bioset_integrity_create()
579 if (biovec_init_pool(&bs->bvec_integrity_pool, pool_size)) { in bioset_integrity_create()
/linux/include/linux/
H A Dbio-integrity.h81 int bioset_integrity_create(struct bio_set *bs, int pool_size);
92 static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) in bioset_integrity_create() argument
/linux/drivers/md/dm-vdo/
H A Dvio.c311 int make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id, in make_vio_pool() argument
319 result = vdo_allocate_extended(struct vio_pool, pool_size, struct pooled_vio, in make_vio_pool()
328 result = vdo_allocate(pool_size * VDO_BLOCK_SIZE, char, in make_vio_pool()
336 for (pool->size = 0; pool->size < pool_size; pool->size++, ptr += VDO_BLOCK_SIZE) { in make_vio_pool()
H A Dvio.h191 int __must_check make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id,
/linux/include/linux/platform_data/
H A Ddma-iop32x.h103 size_t pool_size; member
/linux/drivers/mtd/ubi/
H A Dfastmap.c401 __be32 *pebs, int pool_size, unsigned long long *max_sqnum, in scan_pool() argument
422 dbg_bld("scanning fastmap pool: size = %i", pool_size); in scan_pool()
428 for (i = 0; i < pool_size; i++) { in scan_pool()
561 int ret, i, j, pool_size, wl_pool_size; in ubi_attach_fastmap() local
607 pool_size = be16_to_cpu(fmpl->size); in ubi_attach_fastmap()
612 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) { in ubi_attach_fastmap()
613 ubi_err(ubi, "bad pool size: %i", pool_size); in ubi_attach_fastmap()
765 ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free); in ubi_attach_fastmap()
/linux/drivers/dma/ppc4xx/
H A Dadma.h67 size_t pool_size; member
/linux/drivers/net/ethernet/intel/i40e/
H A Di40e_dcb.c1580 u32 pool_size[I40E_MAX_TRAFFIC_CLASS]; in i40e_dcb_hw_calculate_pool_sizes() local
1612 pool_size[i] = high_wm[i]; in i40e_dcb_hw_calculate_pool_sizes()
1613 pool_size[i] += I40E_BT2B(I40E_STD_DV_TC(mfs_max, in i40e_dcb_hw_calculate_pool_sizes()
1617 pool_size[i] = (I40E_DCB_WATERMARK_START_FACTOR * in i40e_dcb_hw_calculate_pool_sizes()
1619 high_wm[i] = pool_size[i]; in i40e_dcb_hw_calculate_pool_sizes()
1621 total_pool_size += pool_size[i]; in i40e_dcb_hw_calculate_pool_sizes()
1632 pb_cfg->tc_pool_size[i] = pool_size[i]; in i40e_dcb_hw_calculate_pool_sizes()
/linux/drivers/md/
H A Ddm-table.c1050 unsigned int min_pool_size = 0, pool_size; in dm_table_alloc_md_mempools() local
1065 pool_size = dm_get_reserved_rq_based_ios(); in dm_table_alloc_md_mempools()
1081 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); in dm_table_alloc_md_mempools()
1087 if (bioset_init(&pools->io_bs, pool_size, io_front_pad, bioset_flags)) in dm_table_alloc_md_mempools()
1090 bioset_integrity_create(&pools->io_bs, pool_size)) in dm_table_alloc_md_mempools()
1093 if (bioset_init(&pools->bs, pool_size, front_pad, 0)) in dm_table_alloc_md_mempools()
1096 bioset_integrity_create(&pools->bs, pool_size)) in dm_table_alloc_md_mempools()
/linux/drivers/misc/
H A Dibmvmc.h111 __be16 pool_size; /* Maximum number of buffers supported per HMC member
/linux/include/net/page_pool/
H A Dtypes.h77 unsigned int pool_size;
/linux/drivers/net/ethernet/ibm/
H A Dibmveth.h105 static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 }; variable
H A Dibmvnic.c1060 u64 pool_size; /* # of buffers in one pool */ in init_rx_pools() local
1064 pool_size = adapter->req_rx_add_entries_per_subcrq; in init_rx_pools()
1094 i, pool_size, buff_size); in init_rx_pools()
1096 rx_pool->size = pool_size; in init_rx_pools()
1118 adapter->prev_rx_pool_size = pool_size; in init_rx_pools()
1219 int pool_size, int buf_size) in init_one_tx_pool() argument
1223 tx_pool->tx_buff = kcalloc(pool_size, in init_one_tx_pool()
1229 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL); in init_one_tx_pool()
1236 for (i = 0; i < pool_size; i++) in init_one_tx_pool()
1241 tx_pool->num_buffers = pool_size; in init_one_tx_pool()
[all …]
/linux/mm/kasan/
H A Dkasan_test_c.c1111 int pool_size = 4; in mempool_prepare_kmalloc() local
1116 ret = mempool_init_kmalloc_pool(pool, pool_size, size); in mempool_prepare_kmalloc()
1134 int pool_size = 4; in mempool_prepare_slab() local
1141 ret = mempool_init_slab_pool(pool, pool_size, cache); in mempool_prepare_slab()
1154 int pool_size = 4; in mempool_prepare_page() local
1159 ret = mempool_init_page_pool(pool, pool_size, order); in mempool_prepare_page()
/linux/drivers/net/ethernet/intel/libeth/
H A Drx.c153 .pool_size = fq->count, in libeth_rx_fq_create()
/linux/arch/powerpc/platforms/pseries/
H A Dlparcfg.c291 if (mpp_data.pool_size != -1) in parse_mpp_data()
293 mpp_data.pool_size); in parse_mpp_data()
/linux/arch/mips/include/asm/octeon/
H A Dcvmx-cmd-queue.h172 int pool_size);
/linux/drivers/net/ethernet/ti/
H A Dcpsw_priv.c1155 pp_params.pool_size = size; in cpsw_create_page_pool()
1170 int ret = 0, pool_size; in cpsw_create_rx_pool() local
1172 pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch); in cpsw_create_rx_pool()
1173 pool = cpsw_create_page_pool(cpsw, pool_size); in cpsw_create_rx_pool()

123