Home
last modified time | relevance | path

Searched full:pool (Results 1 – 25 of 1289) sorted by relevance

12345678910>>...52

/linux/net/xdp/
H A Dxsk_buff_pool.c13 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument
18 spin_lock(&pool->xsk_tx_list_lock); in xp_add_xsk()
19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk()
20 spin_unlock(&pool->xsk_tx_list_lock); in xp_add_xsk()
23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk()
28 spin_lock(&pool->xsk_tx_list_lock); in xp_del_xsk()
30 spin_unlock(&pool->xsk_tx_list_lock); in xp_del_xsk()
33 void xp_destroy(struct xsk_buff_pool *pool) in xp_del_xsk()
35 if (!pool) in xp_del_xsk()
38 kvfree(pool in xp_destroy()
25 xp_del_xsk(struct xsk_buff_pool * pool,struct xdp_sock * xs) xp_del_xsk() argument
37 xp_destroy(struct xsk_buff_pool * pool) xp_destroy() argument
47 xp_alloc_tx_descs(struct xsk_buff_pool * pool,struct xdp_sock * xs) xp_alloc_tx_descs() argument
61 struct xsk_buff_pool *pool; xp_create_and_assign_umem() local
121 xp_set_rxq_info(struct xsk_buff_pool * pool,struct xdp_rxq_info * rxq) xp_set_rxq_info() argument
130 xp_fill_cb(struct xsk_buff_pool * pool,struct xsk_cb_desc * desc) xp_fill_cb() argument
142 xp_disable_drv_zc(struct xsk_buff_pool * pool) xp_disable_drv_zc() argument
165 xp_assign_dev(struct xsk_buff_pool * pool,struct net_device * netdev,u16 queue_id,u16 flags) xp_assign_dev() argument
251 xp_assign_dev_shared(struct xsk_buff_pool * pool,struct xdp_sock * umem_xs,struct net_device * dev,u16 queue_id) xp_assign_dev_shared() argument
268 xp_clear_dev(struct xsk_buff_pool * pool) xp_clear_dev() argument
285 struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool, xp_release_deferred() local
306 xp_get_pool(struct xsk_buff_pool * pool) xp_get_pool() argument
311 xp_put_pool(struct xsk_buff_pool * pool) xp_put_pool() argument
325 xp_find_dma_map(struct xsk_buff_pool * pool) xp_find_dma_map() argument
385 xp_dma_unmap(struct xsk_buff_pool * pool,unsigned long attrs) xp_dma_unmap() argument
420 xp_init_dma_info(struct xsk_buff_pool * pool,struct xsk_dma_map * dma_map) xp_init_dma_info() argument
446 xp_dma_map(struct xsk_buff_pool * pool,struct device * dev,unsigned long attrs,struct page ** pages,u32 nr_pages) xp_dma_map() argument
491 xp_addr_crosses_non_contig_pg(struct xsk_buff_pool * pool,u64 addr) xp_addr_crosses_non_contig_pg() argument
497 xp_check_unaligned(struct xsk_buff_pool * pool,u64 * addr) xp_check_unaligned() argument
507 xp_check_aligned(struct xsk_buff_pool * pool,u64 * addr) xp_check_aligned() argument
513 xp_get_xskb(struct xsk_buff_pool * pool,u64 addr) xp_get_xskb() argument
529 __xp_alloc(struct xsk_buff_pool * pool) __xp_alloc() argument
560 xp_alloc(struct xsk_buff_pool * pool) xp_alloc() argument
586 xp_alloc_new_from_fq(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xp_alloc_new_from_fq() argument
622 xp_alloc_reused(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 nb_entries) xp_alloc_reused() argument
642 xp_alloc_slow(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xp_alloc_slow() argument
660 xp_alloc_batch(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xp_alloc_batch() argument
684 xp_can_alloc(struct xsk_buff_pool * pool,u32 count) xp_can_alloc() argument
710 __xp_raw_get_addr(const struct xsk_buff_pool * pool,u64 addr) __xp_raw_get_addr() argument
715 __xp_raw_get_data(const struct xsk_buff_pool * pool,u64 addr) __xp_raw_get_data() argument
720 xp_raw_get_data(struct xsk_buff_pool * pool,u64 addr) xp_raw_get_data() argument
726 __xp_raw_get_dma(const struct xsk_buff_pool * pool,u64 addr) __xp_raw_get_dma() argument
733 xp_raw_get_dma(struct xsk_buff_pool * pool,u64 addr) xp_raw_get_dma() argument
751 xp_raw_get_ctx(const struct xsk_buff_pool * pool,u64 addr) xp_raw_get_ctx() argument
[all...]
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/
H A Dpool.c9 switch (resource->pool->type) { in hws_pool_free_one_resource()
11 mlx5hws_cmd_ste_destroy(resource->pool->ctx->mdev, resource->base_id); in hws_pool_free_one_resource()
14 mlx5hws_cmd_stc_destroy(resource->pool->ctx->mdev, resource->base_id); in hws_pool_free_one_resource()
23 static void hws_pool_resource_free(struct mlx5hws_pool *pool) in hws_pool_resource_free() argument
25 hws_pool_free_one_resource(pool->resource); in hws_pool_resource_free()
26 pool->resource = NULL; in hws_pool_resource_free()
28 if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) { in hws_pool_resource_free()
29 hws_pool_free_one_resource(pool->mirror_resource); in hws_pool_resource_free()
30 pool->mirror_resource = NULL; in hws_pool_resource_free()
35 hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range, in hws_pool_create_one_resource() argument
[all …]
/linux/net/core/
H A Dpage_pool.c46 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) argument
48 #define recycle_stat_inc(pool, __stat) \ argument
50 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
54 #define recycle_stat_add(pool, __stat, val) \ argument
56 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
75 * page_pool_get_stats() - fetch page pool stats
76 * @pool: pool from which page was allocated
85 bool page_pool_get_stats(const struct page_pool *pool, in page_pool_get_stats() argument
161 page_pool_producer_lock(struct page_pool * pool) page_pool_producer_lock() argument
174 page_pool_producer_unlock(struct page_pool * pool,bool in_softirq) page_pool_producer_unlock() argument
193 page_pool_init(struct page_pool * pool,const struct page_pool_params * params,int cpuid) page_pool_init() argument
317 page_pool_uninit(struct page_pool * pool) page_pool_uninit() argument
336 struct page_pool *pool; page_pool_create_percpu() local
374 page_pool_refill_alloc_cache(struct page_pool * pool) page_pool_refill_alloc_cache() argument
427 __page_pool_get_cached(struct page_pool * pool) __page_pool_get_cached() argument
443 __page_pool_dma_sync_for_device(const struct page_pool * pool,netmem_ref netmem,u32 dma_sync_size) __page_pool_dma_sync_for_device() argument
457 page_pool_dma_sync_for_device(const struct page_pool * pool,netmem_ref netmem,u32 dma_sync_size) page_pool_dma_sync_for_device() argument
471 page_pool_register_dma_index(struct page_pool * pool,netmem_ref netmem,gfp_t gfp) page_pool_register_dma_index() argument
496 page_pool_release_dma_index(struct page_pool * pool,netmem_ref netmem) page_pool_release_dma_index() argument
521 page_pool_dma_map(struct page_pool * pool,netmem_ref netmem,gfp_t gfp) page_pool_dma_map() argument
560 __page_pool_alloc_page_order(struct page_pool * pool,gfp_t gfp) __page_pool_alloc_page_order() argument
586 __page_pool_alloc_netmems_slow(struct page_pool * pool,gfp_t gfp) __page_pool_alloc_netmems_slow() argument
650 page_pool_alloc_netmems(struct page_pool * pool,gfp_t gfp) page_pool_alloc_netmems() argument
669 page_pool_alloc_pages(struct page_pool * pool,gfp_t gfp) page_pool_alloc_pages() argument
680 page_pool_inflight(const struct page_pool * pool,bool strict) page_pool_inflight() argument
699 page_pool_set_pp_info(struct page_pool * pool,netmem_ref netmem) page_pool_set_pp_info() argument
721 __page_pool_release_netmem_dma(struct page_pool * pool,netmem_ref netmem) __page_pool_release_netmem_dma() argument
749 page_pool_return_netmem(struct page_pool * pool,netmem_ref netmem) page_pool_return_netmem() argument
776 page_pool_recycle_in_ring(struct page_pool * pool,netmem_ref netmem) page_pool_recycle_in_ring() argument
796 page_pool_recycle_in_cache(netmem_ref netmem,struct page_pool * pool) page_pool_recycle_in_cache() argument
823 __page_pool_put_page(struct page_pool * pool,netmem_ref netmem,unsigned int dma_sync_size,bool allow_direct) __page_pool_put_page() argument
868 page_pool_napi_local(const struct page_pool * pool) page_pool_napi_local() argument
895 page_pool_put_unrefed_netmem(struct page_pool * pool,netmem_ref netmem,unsigned int dma_sync_size,bool allow_direct) page_pool_put_unrefed_netmem() argument
911 page_pool_put_unrefed_page(struct page_pool * pool,struct page * page,unsigned int dma_sync_size,bool allow_direct) page_pool_put_unrefed_page() argument
919 page_pool_recycle_ring_bulk(struct page_pool * pool,netmem_ref * bulk,u32 bulk_len) page_pool_recycle_ring_bulk() argument
980 struct page_pool *pool = NULL; page_pool_put_netmem_bulk() local
1018 page_pool_drain_frag(struct page_pool * pool,netmem_ref netmem) page_pool_drain_frag() argument
1036 page_pool_free_frag(struct page_pool * pool) page_pool_free_frag() argument
1049 page_pool_alloc_frag_netmem(struct page_pool * pool,unsigned int * offset,unsigned int size,gfp_t gfp) page_pool_alloc_frag_netmem() argument
1094 page_pool_alloc_frag(struct page_pool * pool,unsigned int * offset,unsigned int size,gfp_t gfp) page_pool_alloc_frag() argument
1102 page_pool_empty_ring(struct page_pool * pool) page_pool_empty_ring() argument
1117 __page_pool_destroy(struct page_pool * pool) __page_pool_destroy() argument
1133 page_pool_empty_alloc_cache_once(struct page_pool * pool) page_pool_empty_alloc_cache_once() argument
1150 page_pool_scrub(struct page_pool * pool) page_pool_scrub() argument
1182 page_pool_release(struct page_pool * pool) page_pool_release() argument
1201 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw); page_pool_release_retry() local
1230 page_pool_use_xdp_mem(struct page_pool * pool,void (* disconnect)(void *),const struct xdp_mem_info * mem) page_pool_use_xdp_mem() argument
1254 page_pool_enable_direct_recycling(struct page_pool * pool,struct napi_struct * napi) page_pool_enable_direct_recycling() argument
1267 page_pool_disable_direct_recycling(struct page_pool * pool) page_pool_disable_direct_recycling() argument
1285 page_pool_destroy(struct page_pool * pool) page_pool_destroy() argument
1309 page_pool_update_nid(struct page_pool * pool,int new_nid) page_pool_update_nid() argument
1332 net_mp_niov_set_page_pool(struct page_pool * pool,struct net_iov * niov) net_mp_niov_set_page_pool() argument
[all...]
H A Dpage_pool_user.c18 /* Protects: page_pools, netdevice->page_pools, pool->p.napi, pool->slow.netdev,
19 * pool->user.
25 * linked to a netdev at creation time. Following page pool "visibility"
32 * to error, or (c) the entire namespace which owned this pool disappeared
36 typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool,
42 struct page_pool *pool; in netdev_nl_page_pool_get_do() local
47 pool = xa_load(&page_pools, id); in netdev_nl_page_pool_get_do()
48 if (!pool || hlist_unhashed(&pool->user.list) || in netdev_nl_page_pool_get_do()
49 !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) { in netdev_nl_page_pool_get_do()
60 err = fill(rsp, pool, info); in netdev_nl_page_pool_get_do()
[all …]
/linux/drivers/net/ethernet/ti/
H A Dk3-cppi-desc-pool.c2 /* TI K3 CPPI5 descriptors pool API
15 #include "k3-cppi-desc-pool.h"
28 void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) in k3_cppi_desc_pool_destroy() argument
30 if (!pool) in k3_cppi_desc_pool_destroy()
33 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in k3_cppi_desc_pool_destroy()
35 gen_pool_size(pool->gen_pool), in k3_cppi_desc_pool_destroy()
36 gen_pool_avail(pool->gen_pool)); in k3_cppi_desc_pool_destroy()
37 if (pool->cpumem) in k3_cppi_desc_pool_destroy()
38 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_destroy()
39 pool->dma_addr); in k3_cppi_desc_pool_destroy()
[all …]
/linux/mm/
H A Dmempool.c3 * memory buffer pool support. Such pools are mostly used
41 static void poison_error(struct mempool *pool, void *element, size_t size, in poison_error() argument
44 const int nr = pool->curr_nr; in poison_error()
50 pr_err("Mempool %p size %zu\n", pool, size); in poison_error()
58 static void __check_element(struct mempool *pool, void *element, size_t size) in __check_element() argument
67 poison_error(pool, element, size, i); in __check_element()
74 static void check_element(struct mempool *pool, void *element) in check_element() argument
81 if (pool->free == mempool_kfree) { in check_element()
82 __check_element(pool, element, (size_t)pool->pool_data); in check_element()
83 } else if (pool->free == mempool_free_slab) { in check_element()
[all …]
H A Dzsmalloc.c21 * pool->lock
270 struct zs_pool *pool; member
364 static void kick_deferred_free(struct zs_pool *pool);
365 static void init_deferred_free(struct zs_pool *pool);
366 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
368 static void kick_deferred_free(struct zs_pool *pool) {} in kick_deferred_free() argument
369 static void init_deferred_free(struct zs_pool *pool) {} in init_deferred_free() argument
370 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} in SetZsPageMovable() argument
373 static int create_cache(struct zs_pool *pool) in create_cache() argument
377 name = kasprintf(GFP_KERNEL, "zs_handle-%s", pool->name); in create_cache()
[all …]
/linux/lib/
H A Dgenalloc.c16 * available. If new memory is added to the pool a lock has to be
146 * gen_pool_create - create a new special memory pool
148 * @nid: node id of the node the pool structure should be allocated on, or -1
150 * Create a new special memory pool that can be used to manage special purpose
155 struct gen_pool *pool; in gen_pool_create() local
157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); in gen_pool_create()
158 if (pool != NULL) { in gen_pool_create()
159 spin_lock_init(&pool->lock); in gen_pool_create()
160 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create()
161 pool->min_alloc_order = min_alloc_order; in gen_pool_create()
[all …]
H A Dobjpool.c19 objpool_init_percpu_slot(struct objpool_head *pool, in objpool_init_percpu_slot() argument
24 void *obj = (void *)&slot->entries[pool->capacity]; in objpool_init_percpu_slot()
28 slot->mask = pool->capacity - 1; in objpool_init_percpu_slot()
37 obj = obj + pool->obj_size; in objpool_init_percpu_slot()
40 pool->nr_objs++; in objpool_init_percpu_slot()
48 objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs, in objpool_init_percpu_slots() argument
63 nodes = nr_objs / pool->nr_possible_cpus; in objpool_init_percpu_slots()
64 if (cpu_count < (nr_objs % pool->nr_possible_cpus)) in objpool_init_percpu_slots()
68 size = struct_size(slot, entries, pool->capacity) + in objpool_init_percpu_slots()
69 pool->obj_size * nodes; in objpool_init_percpu_slots()
[all …]
/linux/net/ceph/
H A Dmsgpool.c14 struct ceph_msgpool *pool = arg; in msgpool_alloc() local
17 msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, in msgpool_alloc()
20 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc()
22 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc()
23 msg->pool = pool; in msgpool_alloc()
30 struct ceph_msgpool *pool = arg; in msgpool_free() local
33 dout("msgpool_release %s %p\n", pool->name, msg); in msgpool_free()
34 msg->pool = NULL; in msgpool_free()
38 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() argument
43 pool->type = type; in ceph_msgpool_init()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Dcrypto.c13 * (for example, TLS) after last revalidation in a pool or a bulk.
19 #define MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) MLX5_CRYPTO_DEK_CALC_FREED(pool) argument
33 int num_deks; /* the total number of keys in this pool */
34 int avail_deks; /* the number of available keys in this pool */
35 int in_use_deks; /* the number of being used keys in this pool */
288 mlx5_crypto_dek_bulk_create(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_bulk_create() argument
290 struct mlx5_crypto_dek_priv *dek_priv = pool->mdev->mlx5e_res.dek_priv; in mlx5_crypto_dek_bulk_create()
291 struct mlx5_core_dev *mdev = pool->mdev; in mlx5_crypto_dek_bulk_create()
313 err = mlx5_crypto_create_dek_bulk(mdev, pool->key_purpose, in mlx5_crypto_dek_bulk_create()
334 mlx5_crypto_dek_pool_add_bulk(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_pool_add_bulk() argument
[all …]
/linux/tools/testing/selftests/drivers/net/mlxsw/
H A Dsharedbuffer_configuration.py16 objects, pool, tcbind and portpool. Provide an interface to get random
18 1. Pool:
22 - random pool number
30 for pool in pools:
31 self._pools.append(pool)
47 def _get_th(self, pool): argument
50 if pool["thtype"] == "dynamic":
58 for pool in self._pools:
59 if pool["type"] == "ingress":
60 ing_pools.append(pool)
[all …]
/linux/drivers/staging/octeon/
H A Dethernet-mem.c17 * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs
18 * @pool: Pool to allocate an skbuff for
19 * @size: Size of the buffer needed for the pool
24 static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) in cvm_oct_fill_hw_skbuff() argument
35 cvmx_fpa_free(skb->data, pool, size / 128); in cvm_oct_fill_hw_skbuff()
42 * cvm_oct_free_hw_skbuff- free hardware pool skbuffs
43 * @pool: Pool to allocate an skbuff for
44 * @size: Size of the buffer needed for the pool
47 static void cvm_oct_free_hw_skbuff(int pool, int size, int elements) in cvm_oct_free_hw_skbuff() argument
52 memory = cvmx_fpa_alloc(pool); in cvm_oct_free_hw_skbuff()
[all …]
/linux/drivers/gpu/drm/amd/display/dc/resource/dce60/
H A Ddce60_resource.c823 static void dce60_resource_destruct(struct dce110_resource_pool *pool) in dce60_resource_destruct()
827 for (i = 0; i < pool->base.pipe_count; i++) { in dce60_resource_destruct()
828 if (pool->base.opps[i] != NULL) in dce60_resource_destruct()
829 dce110_opp_destroy(&pool->base.opps[i]); in dce60_resource_destruct()
831 if (pool->base.transforms[i] != NULL) in dce60_resource_destruct()
832 dce60_transform_destroy(&pool->base.transforms[i]); in dce60_resource_destruct()
834 if (pool->base.ipps[i] != NULL) in dce60_resource_destruct()
835 dce_ipp_destroy(&pool->base.ipps[i]); in dce60_resource_destruct()
837 if (pool->base.mis[i] != NULL) { in dce60_resource_destruct()
838 kfree(TO_DCE_MEM_INPUT(pool in dce60_resource_destruct()
798 dce60_resource_destruct(struct dce110_resource_pool * pool) dce60_resource_destruct() argument
867 dce60_destroy_resource_pool(struct resource_pool ** pool) dce60_destroy_resource_pool() argument
890 dce60_construct(uint8_t num_virtual_links,struct dc * dc,struct dce110_resource_pool * pool) dce60_construct() argument
1071 struct dce110_resource_pool *pool = dce60_create_resource_pool() local
1088 dce61_construct(uint8_t num_virtual_links,struct dc * dc,struct dce110_resource_pool * pool) dce61_construct() argument
1269 struct dce110_resource_pool *pool = dce61_create_resource_pool() local
1286 dce64_construct(uint8_t num_virtual_links,struct dc * dc,struct dce110_resource_pool * pool) dce64_construct() argument
1466 struct dce110_resource_pool *pool = dce64_create_resource_pool() local
[all...]
/linux/drivers/gpu/drm/ttm/tests/
H A Dttm_pool_test.c80 struct ttm_pool *pool; in ttm_pool_pre_populated()
87 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); in ttm_pool_pre_populated()
88 KUNIT_ASSERT_NOT_NULL(test, pool); in ttm_pool_pre_populated()
90 ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC); in ttm_pool_pre_populated()
92 err = ttm_pool_alloc(pool, tt, &simple_ctx); in ttm_pool_pre_populated()
95 ttm_pool_free(pool, tt); in ttm_pool_pre_populated()
98 return pool; in ttm_pool_pre_populated()
141 struct ttm_pool *pool; in ttm_pool_alloc_basic()
151 pool in ttm_pool_alloc_basic()
79 struct ttm_pool *pool; ttm_pool_pre_populated() local
140 struct ttm_pool *pool; ttm_pool_alloc_basic() local
201 struct ttm_pool *pool; ttm_pool_alloc_basic_dma_addr() local
241 struct ttm_pool *pool; ttm_pool_alloc_order_caching_match() local
269 struct ttm_pool *pool; ttm_pool_alloc_caching_mismatch() local
303 struct ttm_pool *pool; ttm_pool_alloc_order_mismatch() local
339 struct ttm_pool *pool; ttm_pool_free_dma_alloc() local
370 struct ttm_pool *pool; ttm_pool_free_no_dma_alloc() local
398 struct ttm_pool *pool; ttm_pool_fini_basic() local
[all...]
/linux/include/linux/
H A Dgenalloc.h16 * available. If new memory is added to the pool a lock has to be
46 * @pool: the pool being allocated from
52 void *data, struct gen_pool *pool,
56 * General purpose special memory pool descriptor.
60 struct list_head chunks; /* list of chunks in this pool */
70 * General purpose special memory pool chunk descriptor.
73 struct list_head next_chunk; /* next chunk in pool */
97 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
101 static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr, in gen_pool_add_virt() argument
104 return gen_pool_add_owner(pool, addr, phys, size, nid, NULL); in gen_pool_add_virt()
[all …]
/linux/include/net/
H A Dxsk_buff_pool.h30 struct xsk_buff_pool *pool; member
66 /* For performance reasons, each buff pool has its own array of dma_pages
111 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
113 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
115 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
116 void xp_destroy(struct xsk_buff_pool *pool);
117 void xp_get_pool(struct xsk_buff_pool *pool);
118 bool xp_put_pool(struct xsk_buff_pool *pool);
119 void xp_clear_dev(struct xsk_buff_pool *pool);
120 void xp_add_xsk(struct xsk_buff_pool *pool, struc
121 xp_init_xskb_addr(struct xdp_buff_xsk * xskb,struct xsk_buff_pool * pool,u64 addr) xp_init_xskb_addr() argument
127 xp_init_xskb_dma(struct xdp_buff_xsk * xskb,struct xsk_buff_pool * pool,dma_addr_t * dma_pages,u64 addr) xp_init_xskb_dma() argument
171 xp_dma_sync_for_device(struct xsk_buff_pool * pool,dma_addr_t dma,size_t size) xp_dma_sync_for_device() argument
184 xp_desc_crosses_non_contig_pg(struct xsk_buff_pool * pool,u64 addr,u32 len) xp_desc_crosses_non_contig_pg() argument
201 xp_aligned_extract_addr(struct xsk_buff_pool * pool,u64 addr) xp_aligned_extract_addr() argument
222 xp_aligned_extract_idx(struct xsk_buff_pool * pool,u64 addr) xp_aligned_extract_idx() argument
234 xp_get_handle(struct xdp_buff_xsk * xskb,struct xsk_buff_pool * pool) xp_get_handle() argument
248 xp_tx_metadata_enabled(const struct xsk_buff_pool * pool) xp_tx_metadata_enabled() argument
[all...]
/linux/arch/mips/include/asm/octeon/
H A Dcvmx-fpa.h31 * Interface to the hardware Free Pool Allocator.
79 * Structure describing the current state of a FPA pool.
88 /* The number of elements in the pool at creation */
101 * Return the name of the pool
103 * @pool: Pool to get the name of
106 static inline const char *cvmx_fpa_get_name(uint64_t pool) in cvmx_fpa_get_name() argument
108 return cvmx_fpa_pool_info[pool].name; in cvmx_fpa_get_name()
112 * Return the base of the pool
114 * @pool: Pool to get the base of
117 static inline void *cvmx_fpa_get_base(uint64_t pool) in cvmx_fpa_get_base() argument
[all …]
/linux/sound/core/seq/
H A Dseq_memory.h34 struct snd_seq_pool *pool; /* used pool */ member
38 /* design note: the pool is a contiguous block of memory, if we dynamicly
39 want to add additional cells to the pool be better store this in another
40 pool as we need to know the base address of the pool when releasing
47 int total_elements; /* pool size actually allocated */
50 int size; /* pool size to be allocated */
64 /* Pool lock */
70 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
75 static inline int snd_seq_unused_cells(struct snd_seq_pool *pool) in snd_seq_unused_cells() argument
77 return pool ? pool->total_elements - atomic_read(&pool->counter) : 0; in snd_seq_unused_cells()
[all …]
/linux/drivers/crypto/hisilicon/
H A Dsgl.c49 * hisi_acc_create_sgl_pool() - Create a hw sgl pool.
50 * @dev: The device which hw sgl pool belongs to.
51 * @count: Count of hisi_acc_hw_sgl in pool.
54 * This function creates a hw sgl pool, after this user can get hw sgl memory
61 struct hisi_acc_sgl_pool *pool; in hisi_acc_create_sgl_pool() local
73 * the pool may allocate a block of memory of size PAGE_SIZE * 2^MAX_PAGE_ORDER, in hisi_acc_create_sgl_pool()
86 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in hisi_acc_create_sgl_pool()
87 if (!pool) in hisi_acc_create_sgl_pool()
89 block = pool->mem_block; in hisi_acc_create_sgl_pool()
115 pool->sgl_num_per_block = sgl_num_per_block; in hisi_acc_create_sgl_pool()
[all …]
/linux/drivers/staging/media/ipu3/
H A Dipu3-css-pool.c7 #include "ipu3-css-pool.h"
25 void imgu_css_pool_cleanup(struct imgu_device *imgu, struct imgu_css_pool *pool) in imgu_css_pool_cleanup() argument
30 imgu_dmamap_free(imgu, &pool->entry[i].param); in imgu_css_pool_cleanup()
33 int imgu_css_pool_init(struct imgu_device *imgu, struct imgu_css_pool *pool, in imgu_css_pool_init() argument
39 pool->entry[i].valid = false; in imgu_css_pool_init()
41 pool->entry[i].param.vaddr = NULL; in imgu_css_pool_init()
45 if (!imgu_dmamap_alloc(imgu, &pool->entry[i].param, size)) in imgu_css_pool_init()
49 pool->last = IPU3_CSS_POOL_SIZE; in imgu_css_pool_init()
54 imgu_css_pool_cleanup(imgu, pool); in imgu_css_pool_init()
59 * Allocate a new parameter via recycling the oldest entry in the pool.
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Dpci_irq.c34 struct mlx5_irq_pool *pool; member
73 * has some available in the pool. This will allow the user to increase in mlx5_get_default_msix_vec_count()
156 struct mlx5_irq_pool *pool = irq->pool; in mlx5_system_free_irq() local
167 rmap = mlx5_eq_table_get_rmap(pool->dev); in mlx5_system_free_irq()
173 if (irq->map.index && pci_msix_can_alloc_dyn(pool->dev->pdev)) in mlx5_system_free_irq()
174 pci_msix_free_irq(pool->dev->pdev, irq->map); in mlx5_system_free_irq()
179 struct mlx5_irq_pool *pool = irq->pool; in irq_release() local
181 xa_erase(&pool->irqs, irq->pool_index); in irq_release()
189 struct mlx5_irq_pool *pool = irq->pool; in mlx5_irq_put() local
192 mutex_lock(&pool->lock); in mlx5_irq_put()
[all …]
/linux/drivers/gpu/drm/amd/display/dc/resource/dcn301/
H A Ddcn301_resource.c92 #define TO_DCN301_RES_POOL(pool)\ argument
93 container_of(pool, struct dcn301_resource_pool, base)
1039 static void dcn301_destruct(struct dcn301_resource_pool *pool) in dcn301_destruct()
1043 for (i = 0; i < pool->base.stream_enc_count; i++) { in dcn301_destruct()
1044 if (pool->base.stream_enc[i] != NULL) { in dcn301_destruct()
1045 if (pool->base.stream_enc[i]->vpg != NULL) { in dcn301_destruct()
1046 kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); in dcn301_destruct()
1047 pool->base.stream_enc[i]->vpg = NULL; in dcn301_destruct()
1049 if (pool->base.stream_enc[i]->afmt != NULL) { in dcn301_destruct()
1050 kfree(DCN30_AFMT_FROM_AFMT(pool in dcn301_destruct()
1036 dcn301_destruct(struct dcn301_resource_pool * pool) dcn301_destruct() argument
1175 dcn301_dwbc_create(struct dc_context * ctx,struct resource_pool * pool) dcn301_dwbc_create() argument
1200 dcn301_mmhubbub_create(struct dc_context * ctx,struct resource_pool * pool) dcn301_mmhubbub_create() argument
1241 dcn301_destroy_resource_pool(struct resource_pool ** pool) dcn301_destroy_resource_pool() argument
1290 init_soc_bounding_box(struct dc * dc,struct dcn301_resource_pool * pool) init_soc_bounding_box() argument
1410 dcn301_resource_construct(uint8_t num_virtual_links,struct dc * dc,struct dcn301_resource_pool * pool) dcn301_resource_construct() argument
1726 struct dcn301_resource_pool *pool = dcn301_create_resource_pool() local
[all...]
/linux/include/trace/events/
H A Dpage_pool.h16 TP_PROTO(const struct page_pool *pool,
19 TP_ARGS(pool, inflight, hold, release),
22 __field(const struct page_pool *, pool)
30 __entry->pool = pool;
34 __entry->cnt = pool->destroy_cnt;
38 __entry->pool, __entry->inflight, __entry->hold,
44 TP_PROTO(const struct page_pool *pool,
47 TP_ARGS(pool, netmem, release),
50 __field(const struct page_pool *, pool)
57 __entry->pool = pool;
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
H A Dktls_tx.c96 struct list_head list_node; /* member of the pool */
260 /* Recycling pool API */
269 struct mutex lock; /* Protects access to the pool */
280 struct mlx5e_tls_tx_pool *pool = in create_work() local
287 bulk_async = mlx5e_bulk_async_init(pool->mdev, MLX5E_TLS_TX_POOL_BULK); in create_work()
292 obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async->arr[i]); in create_work()
306 atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc); in create_work()
311 mutex_lock(&pool->lock); in create_work()
312 if (pool->size + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH) { in create_work()
313 mutex_unlock(&pool->lock); in create_work()
[all …]

12345678910>>...52