Home
last modified time | relevance | path

Searched refs:pool (Results 1 – 25 of 646) sorted by relevance

12345678910>>...26

/linux/net/core/
H A Dpage_pool.c46 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) argument
48 #define recycle_stat_inc(pool, __stat) \ argument
50 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
54 #define recycle_stat_add(pool, __stat, val) \ argument
56 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
75 * page_pool_get_stats() - fetch page pool stats
76 * @pool: pool from which page was allocated
85 bool page_pool_get_stats(const struct page_pool *pool, in page_pool_get_stats() argument
161 page_pool_producer_lock(struct page_pool * pool) page_pool_producer_lock() argument
174 page_pool_producer_unlock(struct page_pool * pool,bool in_softirq) page_pool_producer_unlock() argument
193 page_pool_init(struct page_pool * pool,const struct page_pool_params * params,int cpuid) page_pool_init() argument
317 page_pool_uninit(struct page_pool * pool) page_pool_uninit() argument
336 struct page_pool *pool; page_pool_create_percpu() local
374 page_pool_refill_alloc_cache(struct page_pool * pool) page_pool_refill_alloc_cache() argument
427 __page_pool_get_cached(struct page_pool * pool) __page_pool_get_cached() argument
443 __page_pool_dma_sync_for_device(const struct page_pool * pool,netmem_ref netmem,u32 dma_sync_size) __page_pool_dma_sync_for_device() argument
457 page_pool_dma_sync_for_device(const struct page_pool * pool,netmem_ref netmem,u32 dma_sync_size) page_pool_dma_sync_for_device() argument
471 page_pool_register_dma_index(struct page_pool * pool,netmem_ref netmem,gfp_t gfp) page_pool_register_dma_index() argument
496 page_pool_release_dma_index(struct page_pool * pool,netmem_ref netmem) page_pool_release_dma_index() argument
521 page_pool_dma_map(struct page_pool * pool,netmem_ref netmem,gfp_t gfp) page_pool_dma_map() argument
560 __page_pool_alloc_page_order(struct page_pool * pool,gfp_t gfp) __page_pool_alloc_page_order() argument
586 __page_pool_alloc_netmems_slow(struct page_pool * pool,gfp_t gfp) __page_pool_alloc_netmems_slow() argument
650 page_pool_alloc_netmems(struct page_pool * pool,gfp_t gfp) page_pool_alloc_netmems() argument
669 page_pool_alloc_pages(struct page_pool * pool,gfp_t gfp) page_pool_alloc_pages() argument
680 page_pool_inflight(const struct page_pool * pool,bool strict) page_pool_inflight() argument
699 page_pool_set_pp_info(struct page_pool * pool,netmem_ref netmem) page_pool_set_pp_info() argument
721 __page_pool_release_netmem_dma(struct page_pool * pool,netmem_ref netmem) __page_pool_release_netmem_dma() argument
749 page_pool_return_netmem(struct page_pool * pool,netmem_ref netmem) page_pool_return_netmem() argument
776 page_pool_recycle_in_ring(struct page_pool * pool,netmem_ref netmem) page_pool_recycle_in_ring() argument
796 page_pool_recycle_in_cache(netmem_ref netmem,struct page_pool * pool) page_pool_recycle_in_cache() argument
823 __page_pool_put_page(struct page_pool * pool,netmem_ref netmem,unsigned int dma_sync_size,bool allow_direct) __page_pool_put_page() argument
868 page_pool_napi_local(const struct page_pool * pool) page_pool_napi_local() argument
895 page_pool_put_unrefed_netmem(struct page_pool * pool,netmem_ref netmem,unsigned int dma_sync_size,bool allow_direct) page_pool_put_unrefed_netmem() argument
911 page_pool_put_unrefed_page(struct page_pool * pool,struct page * page,unsigned int dma_sync_size,bool allow_direct) page_pool_put_unrefed_page() argument
919 page_pool_recycle_ring_bulk(struct page_pool * pool,netmem_ref * bulk,u32 bulk_len) page_pool_recycle_ring_bulk() argument
980 struct page_pool *pool = NULL; page_pool_put_netmem_bulk() local
1018 page_pool_drain_frag(struct page_pool * pool,netmem_ref netmem) page_pool_drain_frag() argument
1036 page_pool_free_frag(struct page_pool * pool) page_pool_free_frag() argument
1049 page_pool_alloc_frag_netmem(struct page_pool * pool,unsigned int * offset,unsigned int size,gfp_t gfp) page_pool_alloc_frag_netmem() argument
1094 page_pool_alloc_frag(struct page_pool * pool,unsigned int * offset,unsigned int size,gfp_t gfp) page_pool_alloc_frag() argument
1102 page_pool_empty_ring(struct page_pool * pool) page_pool_empty_ring() argument
1117 __page_pool_destroy(struct page_pool * pool) __page_pool_destroy() argument
1133 page_pool_empty_alloc_cache_once(struct page_pool * pool) page_pool_empty_alloc_cache_once() argument
1150 page_pool_scrub(struct page_pool * pool) page_pool_scrub() argument
1182 page_pool_release(struct page_pool * pool) page_pool_release() argument
1201 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw); page_pool_release_retry() local
1230 page_pool_use_xdp_mem(struct page_pool * pool,void (* disconnect)(void *),const struct xdp_mem_info * mem) page_pool_use_xdp_mem() argument
1254 page_pool_enable_direct_recycling(struct page_pool * pool,struct napi_struct * napi) page_pool_enable_direct_recycling() argument
1267 page_pool_disable_direct_recycling(struct page_pool * pool) page_pool_disable_direct_recycling() argument
1285 page_pool_destroy(struct page_pool * pool) page_pool_destroy() argument
1309 page_pool_update_nid(struct page_pool * pool,int new_nid) page_pool_update_nid() argument
1332 net_mp_niov_set_page_pool(struct page_pool * pool,struct net_iov * niov) net_mp_niov_set_page_pool() argument
[all...]
H A Dpage_pool_user.c36 typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool,
42 struct page_pool *pool; in netdev_nl_page_pool_get_do() local
47 pool = xa_load(&page_pools, id); in netdev_nl_page_pool_get_do()
48 if (!pool || hlist_unhashed(&pool->user.list) || in netdev_nl_page_pool_get_do()
49 !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) { in netdev_nl_page_pool_get_do()
60 err = fill(rsp, pool, info); in netdev_nl_page_pool_get_do()
88 struct page_pool *pool; in netdev_nl_page_pool_get_dump() local
94 hlist_for_each_entry(pool, &netdev->page_pools, user.list) { in netdev_nl_page_pool_get_dump()
95 if (state->pp_id && state->pp_id < pool->user.id) in netdev_nl_page_pool_get_dump()
98 state->pp_id = pool->user.id; in netdev_nl_page_pool_get_dump()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/
H A Dpool.c9 switch (resource->pool->type) { in hws_pool_free_one_resource()
11 mlx5hws_cmd_ste_destroy(resource->pool->ctx->mdev, resource->base_id); in hws_pool_free_one_resource()
14 mlx5hws_cmd_stc_destroy(resource->pool->ctx->mdev, resource->base_id); in hws_pool_free_one_resource()
23 static void hws_pool_resource_free(struct mlx5hws_pool *pool) in hws_pool_resource_free() argument
25 hws_pool_free_one_resource(pool->resource); in hws_pool_resource_free()
26 pool->resource = NULL; in hws_pool_resource_free()
28 if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) { in hws_pool_resource_free()
29 hws_pool_free_one_resource(pool->mirror_resource); in hws_pool_resource_free()
30 pool->mirror_resource = NULL; in hws_pool_resource_free()
35 hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range, in hws_pool_create_one_resource() argument
[all …]
/linux/net/xdp/
H A Dxsk_buff_pool.c13 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument
18 spin_lock(&pool->xsk_tx_list_lock); in xp_add_xsk()
19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk()
20 spin_unlock(&pool->xsk_tx_list_lock); in xp_add_xsk()
23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() argument
28 spin_lock(&pool->xsk_tx_list_lock); in xp_del_xsk()
30 spin_unlock(&pool->xsk_tx_list_lock); in xp_del_xsk()
33 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() argument
35 if (!pool) in xp_destroy()
38 kvfree(pool->tx_descs); in xp_destroy()
[all …]
/linux/drivers/net/ethernet/ti/
H A Dk3-cppi-desc-pool.c28 void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) in k3_cppi_desc_pool_destroy() argument
30 if (!pool) in k3_cppi_desc_pool_destroy()
33 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in k3_cppi_desc_pool_destroy()
35 gen_pool_size(pool->gen_pool), in k3_cppi_desc_pool_destroy()
36 gen_pool_avail(pool->gen_pool)); in k3_cppi_desc_pool_destroy()
37 if (pool->cpumem) in k3_cppi_desc_pool_destroy()
38 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_destroy()
39 pool->dma_addr); in k3_cppi_desc_pool_destroy()
41 kfree(pool->desc_infos); in k3_cppi_desc_pool_destroy()
43 gen_pool_destroy(pool->gen_pool); /* frees pool->name */ in k3_cppi_desc_pool_destroy()
[all …]
/linux/mm/
H A Dmempool.c41 static void poison_error(struct mempool *pool, void *element, size_t size, in poison_error() argument
44 const int nr = pool->curr_nr; in poison_error()
50 pr_err("Mempool %p size %zu\n", pool, size); in poison_error()
58 static void __check_element(struct mempool *pool, void *element, size_t size) in __check_element() argument
67 poison_error(pool, element, size, i); in __check_element()
74 static void check_element(struct mempool *pool, void *element) in check_element() argument
81 if (pool->free == mempool_kfree) { in check_element()
82 __check_element(pool, element, (size_t)pool->pool_data); in check_element()
83 } else if (pool->free == mempool_free_slab) { in check_element()
84 __check_element(pool, element, kmem_cache_size(pool->pool_data)); in check_element()
[all …]
/linux/drivers/md/
H A Ddm-thin.c232 struct pool { struct
290 static void metadata_operation_failed(struct pool *pool, const char *op, int r); argument
292 static enum pool_mode get_pool_mode(struct pool *pool) in get_pool_mode() argument
294 return pool->pf.mode; in get_pool_mode()
297 static void notify_of_pool_mode_change(struct pool *pool) in notify_of_pool_mode_change() argument
307 enum pool_mode mode = get_pool_mode(pool); in notify_of_pool_mode_change()
310 if (!pool->pf.error_if_no_space) in notify_of_pool_mode_change()
316 dm_table_event(pool->ti->table); in notify_of_pool_mode_change()
318 dm_device_name(pool->pool_md), in notify_of_pool_mode_change()
327 struct pool *pool; member
[all …]
/linux/kernel/cgroup/
H A Ddmem.c113 static void dmemcg_pool_get(struct dmem_cgroup_pool_state *pool) in dmemcg_pool_get() argument
115 refcount_inc(&pool->ref); in dmemcg_pool_get()
118 static bool dmemcg_pool_tryget(struct dmem_cgroup_pool_state *pool) in dmemcg_pool_tryget() argument
120 return refcount_inc_not_zero(&pool->ref); in dmemcg_pool_tryget()
123 static void dmemcg_pool_put(struct dmem_cgroup_pool_state *pool) in dmemcg_pool_put() argument
125 if (!refcount_dec_and_test(&pool->ref)) in dmemcg_pool_put()
128 call_rcu(&pool->rcu, dmemcg_pool_free_rcu); in dmemcg_pool_put()
133 struct dmem_cgroup_pool_state *pool = container_of(rcu, typeof(*pool), rcu); in dmemcg_pool_free_rcu() local
135 if (pool->parent) in dmemcg_pool_free_rcu()
136 dmemcg_pool_put(pool->parent); in dmemcg_pool_free_rcu()
[all …]
/linux/net/ceph/
H A Dmsgpool.c14 struct ceph_msgpool *pool = arg; in msgpool_alloc() local
17 msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, in msgpool_alloc()
20 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc()
22 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc()
23 msg->pool = pool; in msgpool_alloc()
30 struct ceph_msgpool *pool = arg; in msgpool_free() local
33 dout("msgpool_release %s %p\n", pool->name, msg); in msgpool_free()
34 msg->pool = NULL; in msgpool_free()
38 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() argument
43 pool->type = type; in ceph_msgpool_init()
[all …]
/linux/sound/core/seq/
H A Dseq_memory.c22 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) in snd_seq_pool_available() argument
24 return pool->total_elements - atomic_read(&pool->counter); in snd_seq_pool_available()
27 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) in snd_seq_output_ok() argument
29 return snd_seq_pool_available(pool) >= pool->room; in snd_seq_output_ok()
225 static inline void free_cell(struct snd_seq_pool *pool, in free_cell() argument
228 cell->next = pool->free; in free_cell()
229 pool->free = cell; in free_cell()
230 atomic_dec(&pool->counter); in free_cell()
235 struct snd_seq_pool *pool; in snd_seq_cell_free() local
239 pool = cell->pool; in snd_seq_cell_free()
[all …]
/linux/drivers/gpu/drm/panthor/
H A Dpanthor_heap.c114 static int panthor_get_heap_ctx_offset(struct panthor_heap_pool *pool, int id) in panthor_get_heap_ctx_offset() argument
116 return panthor_heap_ctx_stride(pool->ptdev) * id; in panthor_get_heap_ctx_offset()
119 static void *panthor_get_heap_ctx(struct panthor_heap_pool *pool, int id) in panthor_get_heap_ctx() argument
121 return pool->gpu_contexts->kmap + in panthor_get_heap_ctx()
122 panthor_get_heap_ctx_offset(pool, id); in panthor_get_heap_ctx()
125 static void panthor_free_heap_chunk(struct panthor_heap_pool *pool, in panthor_free_heap_chunk() argument
134 atomic_sub(heap->chunk_size, &pool->size); in panthor_free_heap_chunk()
140 static int panthor_alloc_heap_chunk(struct panthor_heap_pool *pool, in panthor_alloc_heap_chunk() argument
152 chunk->bo = panthor_kernel_bo_create(pool->ptdev, pool->vm, heap->chunk_size, in panthor_alloc_heap_chunk()
189 atomic_add(heap->chunk_size, &pool->size); in panthor_alloc_heap_chunk()
[all …]
/linux/lib/
H A Dobjpool.c19 objpool_init_percpu_slot(struct objpool_head *pool, in objpool_init_percpu_slot() argument
24 void *obj = (void *)&slot->entries[pool->capacity]; in objpool_init_percpu_slot()
28 slot->mask = pool->capacity - 1; in objpool_init_percpu_slot()
37 obj = obj + pool->obj_size; in objpool_init_percpu_slot()
40 pool->nr_objs++; in objpool_init_percpu_slot()
48 objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs, in objpool_init_percpu_slots() argument
63 nodes = nr_objs / pool->nr_possible_cpus; in objpool_init_percpu_slots()
64 if (cpu_count < (nr_objs % pool->nr_possible_cpus)) in objpool_init_percpu_slots()
68 size = struct_size(slot, entries, pool->capacity) + in objpool_init_percpu_slots()
69 pool->obj_size * nodes; in objpool_init_percpu_slots()
[all …]
H A Dgenalloc.c155 struct gen_pool *pool; in gen_pool_create() local
157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); in gen_pool_create()
158 if (pool != NULL) { in gen_pool_create()
159 spin_lock_init(&pool->lock); in gen_pool_create()
160 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create()
161 pool->min_alloc_order = min_alloc_order; in gen_pool_create()
162 pool->algo = gen_pool_first_fit; in gen_pool_create()
163 pool->data = NULL; in gen_pool_create()
164 pool->name = NULL; in gen_pool_create()
166 return pool; in gen_pool_create()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/
H A Ddr_icm_pool.c82 u32 offset = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_mr_addr()
94 u32 size = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_icm_addr()
102 chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_byte_size()
111 dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool) in dr_icm_pool_mr_create() argument
113 struct mlx5_core_dev *mdev = pool->dmn->mdev; in dr_icm_pool_mr_create()
123 icm_mr->dmn = pool->dmn; in dr_icm_pool_mr_create()
125 icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, in dr_icm_pool_mr_create()
126 pool->icm_type); in dr_icm_pool_mr_create()
128 switch (pool->icm_type) { in dr_icm_pool_mr_create()
144 WARN_ON(pool->icm_type); in dr_icm_pool_mr_create()
[all …]
H A Ddr_arg.c31 static int dr_arg_pool_alloc_objs(struct dr_arg_pool *pool) in dr_arg_pool_alloc_objs() argument
43 pool->dmn->info.caps.log_header_modify_argument_granularity; in dr_arg_pool_alloc_objs()
46 max_t(u32, pool->dmn->info.caps.log_header_modify_argument_granularity, in dr_arg_pool_alloc_objs()
49 min_t(u32, pool->dmn->info.caps.log_header_modify_argument_max_alloc, in dr_arg_pool_alloc_objs()
52 if (pool->log_chunk_size > object_range) { in dr_arg_pool_alloc_objs()
53 mlx5dr_err(pool->dmn, "Required chunk size (%d) is not supported\n", in dr_arg_pool_alloc_objs()
54 pool->log_chunk_size); in dr_arg_pool_alloc_objs()
58 num_of_objects = (1 << (object_range - pool->log_chunk_size)); in dr_arg_pool_alloc_objs()
60 ret = mlx5dr_cmd_create_modify_header_arg(pool->dmn->mdev, in dr_arg_pool_alloc_objs()
62 pool->dmn->pdn, in dr_arg_pool_alloc_objs()
[all …]
/linux/drivers/gpu/drm/amd/display/dc/resource/dce60/
H A Ddce60_resource.c826 static void dce60_resource_destruct(struct dce110_resource_pool *pool) in dce60_resource_destruct() argument
830 for (i = 0; i < pool->base.pipe_count; i++) { in dce60_resource_destruct()
831 if (pool->base.opps[i] != NULL) in dce60_resource_destruct()
832 dce110_opp_destroy(&pool->base.opps[i]); in dce60_resource_destruct()
834 if (pool->base.transforms[i] != NULL) in dce60_resource_destruct()
835 dce60_transform_destroy(&pool->base.transforms[i]); in dce60_resource_destruct()
837 if (pool->base.ipps[i] != NULL) in dce60_resource_destruct()
838 dce_ipp_destroy(&pool->base.ipps[i]); in dce60_resource_destruct()
840 if (pool->base.mis[i] != NULL) { in dce60_resource_destruct()
841 kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); in dce60_resource_destruct()
[all …]
/linux/drivers/gpu/drm/amd/display/dc/resource/dce80/
H A Ddce80_resource.c832 static void dce80_resource_destruct(struct dce110_resource_pool *pool) in dce80_resource_destruct() argument
836 for (i = 0; i < pool->base.pipe_count; i++) { in dce80_resource_destruct()
837 if (pool->base.opps[i] != NULL) in dce80_resource_destruct()
838 dce110_opp_destroy(&pool->base.opps[i]); in dce80_resource_destruct()
840 if (pool->base.transforms[i] != NULL) in dce80_resource_destruct()
841 dce80_transform_destroy(&pool->base.transforms[i]); in dce80_resource_destruct()
843 if (pool->base.ipps[i] != NULL) in dce80_resource_destruct()
844 dce_ipp_destroy(&pool->base.ipps[i]); in dce80_resource_destruct()
846 if (pool->base.mis[i] != NULL) { in dce80_resource_destruct()
847 kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); in dce80_resource_destruct()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Dirq_affinity.c8 static void cpu_put(struct mlx5_irq_pool *pool, int cpu) in cpu_put() argument
10 pool->irqs_per_cpu[cpu]--; in cpu_put()
13 static void cpu_get(struct mlx5_irq_pool *pool, int cpu) in cpu_get() argument
15 pool->irqs_per_cpu[cpu]++; in cpu_get()
19 static int cpu_get_least_loaded(struct mlx5_irq_pool *pool, in cpu_get_least_loaded() argument
27 if (!pool->irqs_per_cpu[cpu]) { in cpu_get_least_loaded()
33 if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu]) in cpu_get_least_loaded()
38 mlx5_core_err(pool->dev, "NO online CPUs in req_mask (%*pbl)\n", in cpu_get_least_loaded()
42 pool->irqs_per_cpu[best_cpu]++; in cpu_get_least_loaded()
48 irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc) in irq_pool_request_irq() argument
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Dcrypto.c19 #define MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) MLX5_CRYPTO_DEK_CALC_FREED(pool) argument
288 mlx5_crypto_dek_bulk_create(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_bulk_create() argument
290 struct mlx5_crypto_dek_priv *dek_priv = pool->mdev->mlx5e_res.dek_priv; in mlx5_crypto_dek_bulk_create()
291 struct mlx5_core_dev *mdev = pool->mdev; in mlx5_crypto_dek_bulk_create()
313 err = mlx5_crypto_create_dek_bulk(mdev, pool->key_purpose, in mlx5_crypto_dek_bulk_create()
334 mlx5_crypto_dek_pool_add_bulk(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_pool_add_bulk() argument
338 bulk = mlx5_crypto_dek_bulk_create(pool); in mlx5_crypto_dek_pool_add_bulk()
342 pool->avail_deks += bulk->num_deks; in mlx5_crypto_dek_pool_add_bulk()
343 pool->num_deks += bulk->num_deks; in mlx5_crypto_dek_pool_add_bulk()
344 list_add(&bulk->entry, &pool->partial_list); in mlx5_crypto_dek_pool_add_bulk()
[all …]
/linux/net/rds/
H A Dib_rdma.c194 struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool) in rds_ib_reuse_mr() argument
200 spin_lock_irqsave(&pool->clean_lock, flags); in rds_ib_reuse_mr()
201 ret = llist_del_first(&pool->clean_list); in rds_ib_reuse_mr()
202 spin_unlock_irqrestore(&pool->clean_lock, flags); in rds_ib_reuse_mr()
205 if (pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_reuse_mr()
271 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_teardown_mr() local
273 atomic_sub(pinned, &pool->free_pinned); in rds_ib_teardown_mr()
277 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) in rds_ib_flush_goal() argument
281 item_count = atomic_read(&pool->item_count); in rds_ib_flush_goal()
338 int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, in rds_ib_flush_mr_pool() argument
[all …]
/linux/include/net/
H A Dxsk_buff_pool.h30 struct xsk_buff_pool *pool; member
66 /* For performance reasons, each buff pool has its own array of dma_pages
106 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
108 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
110 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
111 void xp_destroy(struct xsk_buff_pool *pool);
112 void xp_get_pool(struct xsk_buff_pool *pool);
113 bool xp_put_pool(struct xsk_buff_pool *pool);
114 void xp_clear_dev(struct xsk_buff_pool *pool);
115 void xp_add_xsk(struct xsk_buff_pool *pool, struc
121 xp_init_xskb_addr(struct xdp_buff_xsk * xskb,struct xsk_buff_pool * pool,u64 addr) xp_init_xskb_addr() argument
127 xp_init_xskb_dma(struct xdp_buff_xsk * xskb,struct xsk_buff_pool * pool,dma_addr_t * dma_pages,u64 addr) xp_init_xskb_dma() argument
171 xp_dma_sync_for_device(struct xsk_buff_pool * pool,dma_addr_t dma,size_t size) xp_dma_sync_for_device() argument
184 xp_desc_crosses_non_contig_pg(struct xsk_buff_pool * pool,u64 addr,u32 len) xp_desc_crosses_non_contig_pg() argument
201 xp_aligned_extract_addr(struct xsk_buff_pool * pool,u64 addr) xp_aligned_extract_addr() argument
222 xp_aligned_extract_idx(struct xsk_buff_pool * pool,u64 addr) xp_aligned_extract_idx() argument
234 xp_get_handle(struct xdp_buff_xsk * xskb,struct xsk_buff_pool * pool) xp_get_handle() argument
248 xp_tx_metadata_enabled(const struct xsk_buff_pool * pool) xp_tx_metadata_enabled() argument
[all...]
/linux/kernel/
H A Dworkqueue.c261 struct worker_pool *pool; /* I: the associated pool */ member
537 static void show_one_worker_pool(struct worker_pool *pool);
547 #define for_each_bh_worker_pool(pool, cpu) \ argument
548 for ((pool) = &per_cpu(bh_worker_pools, cpu)[0]; \
549 (pool) < &per_cpu(bh_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
550 (pool)++)
552 #define for_each_cpu_worker_pool(pool, cpu) \ argument
553 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
554 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
555 (pool)++)
[all …]
/linux/drivers/net/ethernet/mellanox/mlxsw/
H A Dspectrum_cnt.c54 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_init() local
62 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_init()
63 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init()
89 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init()
99 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_fini() local
104 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_fini()
105 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_fini()
115 const struct mlxsw_sp_counter_pool *pool = priv; in mlxsw_sp_counter_pool_occ_get() local
117 return atomic_read(&pool->active_entries_count); in mlxsw_sp_counter_pool_occ_get()
124 struct mlxsw_sp_counter_pool *pool; in mlxsw_sp_counter_pool_init() local
[all …]
/linux/drivers/gpu/drm/amd/display/dc/resource/dcn302/
H A Ddcn302_resource.c711 static bool dcn302_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) in dcn302_dwbc_create() argument
714 uint32_t pipe_count = pool->res_cap->num_dwb; in dcn302_dwbc_create()
726 pool->dwbc[i] = &dwbc30->base; in dcn302_dwbc_create()
746 static bool dcn302_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) in dcn302_mmhubbub_create() argument
749 uint32_t pipe_count = pool->res_cap->num_dwb; in dcn302_mmhubbub_create()
761 pool->mcif_wb[i] = &mcif_wb30->base; in dcn302_mmhubbub_create()
957 static bool init_soc_bounding_box(struct dc *dc, struct resource_pool *pool) in init_soc_bounding_box() argument
969 loaded_ip->max_num_otg = pool->pipe_count; in init_soc_bounding_box()
970 loaded_ip->max_num_dpp = pool->pipe_count; in init_soc_bounding_box()
991 static void dcn302_resource_destruct(struct resource_pool *pool) in dcn302_resource_destruct() argument
[all …]
/linux/drivers/gpu/drm/i915/gt/
H A Dintel_gt_buffer_pool.c15 bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz) in bucket_for_size() argument
25 if (n >= ARRAY_SIZE(pool->cache_list)) in bucket_for_size()
26 n = ARRAY_SIZE(pool->cache_list) - 1; in bucket_for_size()
28 return &pool->cache_list[n]; in bucket_for_size()
38 static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep) in pool_free_older_than() argument
45 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { in pool_free_older_than()
46 struct list_head *list = &pool->cache_list[n]; in pool_free_older_than()
51 if (spin_trylock_irq(&pool->lock)) { in pool_free_older_than()
74 spin_unlock_irq(&pool->lock); in pool_free_older_than()
90 struct intel_gt_buffer_pool *pool = in pool_free_work() local
[all …]

12345678910>>...26