Home
last modified time | relevance | path

Searched full:pool (Results 1 – 25 of 1189) sorted by relevance

12345678910>>...48

/linux/net/xdp/
H A Dxsk_buff_pool.c13 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument
20 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_add_xsk()
21 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk()
22 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_add_xsk()
25 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() argument
32 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_del_xsk()
34 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_del_xsk()
37 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() argument
39 if (!pool) in xp_destroy()
42 kvfree(pool->tx_descs); in xp_destroy()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/
H A Dpool.c9 switch (resource->pool->type) { in hws_pool_free_one_resource()
11 mlx5hws_cmd_ste_destroy(resource->pool->ctx->mdev, resource->base_id); in hws_pool_free_one_resource()
14 mlx5hws_cmd_stc_destroy(resource->pool->ctx->mdev, resource->base_id); in hws_pool_free_one_resource()
23 static void hws_pool_resource_free(struct mlx5hws_pool *pool) in hws_pool_resource_free() argument
25 hws_pool_free_one_resource(pool->resource); in hws_pool_resource_free()
26 pool->resource = NULL; in hws_pool_resource_free()
28 if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) { in hws_pool_resource_free()
29 hws_pool_free_one_resource(pool->mirror_resource); in hws_pool_resource_free()
30 pool->mirror_resource = NULL; in hws_pool_resource_free()
35 hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range, in hws_pool_create_one_resource() argument
[all …]
/linux/net/core/
H A Dpage_pool.c46 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) argument
48 #define recycle_stat_inc(pool, __stat) \ argument
50 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
54 #define recycle_stat_add(pool, __stat, val) \ argument
56 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
75 * page_pool_get_stats() - fetch page pool stats
76 * @pool: pool from which page was allocated
85 bool page_pool_get_stats(const struct page_pool *pool, in page_pool_get_stats() argument
94 stats->alloc_stats.fast += pool->alloc_stats.fast; in page_pool_get_stats()
95 stats->alloc_stats.slow += pool->alloc_stats.slow; in page_pool_get_stats()
[all …]
H A Dpage_pool_user.c18 /* Protects: page_pools, netdevice->page_pools, pool->p.napi, pool->slow.netdev,
19 * pool->user.
25 * linked to a netdev at creation time. Following page pool "visibility"
32 * to error, or (c) the entire namespace which owned this pool disappeared
36 typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool,
42 struct page_pool *pool; in netdev_nl_page_pool_get_do() local
47 pool = xa_load(&page_pools, id); in netdev_nl_page_pool_get_do()
48 if (!pool || hlist_unhashed(&pool->user.list) || in netdev_nl_page_pool_get_do()
49 !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) { in netdev_nl_page_pool_get_do()
60 err = fill(rsp, pool, info); in netdev_nl_page_pool_get_do()
[all …]
/linux/drivers/net/ethernet/ti/
H A Dk3-cppi-desc-pool.c2 /* TI K3 CPPI5 descriptors pool API
15 #include "k3-cppi-desc-pool.h"
28 void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) in k3_cppi_desc_pool_destroy() argument
30 if (!pool) in k3_cppi_desc_pool_destroy()
33 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in k3_cppi_desc_pool_destroy()
35 gen_pool_size(pool->gen_pool), in k3_cppi_desc_pool_destroy()
36 gen_pool_avail(pool->gen_pool)); in k3_cppi_desc_pool_destroy()
37 if (pool->cpumem) in k3_cppi_desc_pool_destroy()
38 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_destroy()
39 pool->dma_addr); in k3_cppi_desc_pool_destroy()
[all …]
/linux/mm/
H A Dmempool.c5 * memory buffer pool support. Such pools are mostly used
24 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument
27 const int nr = pool->curr_nr; in poison_error()
33 pr_err("Mempool %p size %zu\n", pool, size); in poison_error()
41 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument
50 poison_error(pool, element, size, i); in __check_element()
57 static void check_element(mempool_t *pool, void *element) in check_element() argument
64 if (pool->free == mempool_kfree) { in check_element()
65 __check_element(pool, element, (size_t)pool->pool_data); in check_element()
66 } else if (pool->free == mempool_free_slab) { in check_element()
[all …]
H A Ddmapool.c3 * DMA Pool allocator
14 * The current design of this allocator is fairly simple. The pool is
48 struct dma_pool { /* the pool */
75 struct dma_pool *pool; in pools_show()
81 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show()
82 /* per-pool info, no real statistics yet */ in pools_show()
84 pool->name, pool->nr_active, in pools_show()
85 pool->nr_blocks, pool in pools_show()
74 struct dma_pool *pool; pools_show() local
95 pool_check_block(struct dma_pool * pool,struct dma_block * block,gfp_t mem_flags) pool_check_block() argument
120 pool_find_page(struct dma_pool * pool,dma_addr_t dma) pool_find_page() argument
133 pool_block_err(struct dma_pool * pool,void * vaddr,dma_addr_t dma) pool_block_err() argument
159 pool_init_page(struct dma_pool * pool,struct dma_page * page) pool_init_page() argument
164 pool_check_block(struct dma_pool * pool,struct dma_block * block,gfp_t mem_flags) pool_check_block() argument
169 pool_block_err(struct dma_pool * pool,void * vaddr,dma_addr_t dma) pool_block_err() argument
176 pool_init_page(struct dma_pool * pool,struct dma_page * page) pool_init_page() argument
181 pool_block_pop(struct dma_pool * pool) pool_block_pop() argument
192 pool_block_push(struct dma_pool * pool,struct dma_block * block,dma_addr_t dma) pool_block_push() argument
300 pool_initialise_page(struct dma_pool * pool,struct dma_page * page) pool_initialise_page() argument
334 pool_alloc_page(struct dma_pool * pool,gfp_t mem_flags) pool_alloc_page() argument
360 dma_pool_destroy(struct dma_pool * pool) dma_pool_destroy() argument
404 dma_pool_alloc(struct dma_pool * pool,gfp_t mem_flags,dma_addr_t * handle) dma_pool_alloc() argument
450 dma_pool_free(struct dma_pool * pool,void * vaddr,dma_addr_t dma) dma_pool_free() argument
469 struct dma_pool *pool = *(struct dma_pool **)res; dmam_pool_release() local
496 struct dma_pool **ptr, *pool; dmam_pool_create() local
518 dmam_pool_destroy(struct dma_pool * pool) dmam_pool_destroy() argument
[all...]
/linux/lib/
H A Dgenalloc.c16 * available. If new memory is added to the pool a lock has to be
146 * gen_pool_create - create a new special memory pool
148 * @nid: node id of the node the pool structure should be allocated on, or -1
150 * Create a new special memory pool that can be used to manage special purpose
155 struct gen_pool *pool; in gen_pool_create() local
157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); in gen_pool_create()
158 if (pool != NULL) { in gen_pool_create()
159 spin_lock_init(&pool->lock); in gen_pool_create()
160 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create()
161 pool->min_alloc_order = min_alloc_order; in gen_pool_create()
[all …]
H A Dobjpool.c19 objpool_init_percpu_slot(struct objpool_head *pool, in objpool_init_percpu_slot() argument
24 void *obj = (void *)&slot->entries[pool->capacity]; in objpool_init_percpu_slot()
28 slot->mask = pool->capacity - 1; in objpool_init_percpu_slot()
37 obj = obj + pool->obj_size; in objpool_init_percpu_slot()
40 pool->nr_objs++; in objpool_init_percpu_slot()
48 objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs, in objpool_init_percpu_slots() argument
63 nodes = nr_objs / pool->nr_possible_cpus; in objpool_init_percpu_slots()
64 if (cpu_count < (nr_objs % pool->nr_possible_cpus)) in objpool_init_percpu_slots()
68 size = struct_size(slot, entries, pool->capacity) + in objpool_init_percpu_slots()
69 pool->obj_size * nodes; in objpool_init_percpu_slots()
[all …]
/linux/net/ceph/
H A Dmsgpool.c14 struct ceph_msgpool *pool = arg; in msgpool_alloc() local
17 msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, in msgpool_alloc()
20 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc()
22 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc()
23 msg->pool = pool; in msgpool_alloc()
30 struct ceph_msgpool *pool = arg; in msgpool_free() local
33 dout("msgpool_release %s %p\n", pool->name, msg); in msgpool_free()
34 msg->pool = NULL; in msgpool_free()
38 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() argument
43 pool->type = type; in ceph_msgpool_init()
[all …]
/linux/include/net/
H A Dxdp_sock_drv.h23 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
24 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
25 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
26 void xsk_tx_release(struct xsk_buff_pool *pool);
29 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
30 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
31 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
32 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
33 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
35 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool) in xsk_pool_get_headroom() argument
40 xsk_pool_get_chunk_size(struct xsk_buff_pool * pool) xsk_pool_get_chunk_size() argument
45 xsk_pool_get_rx_frame_size(struct xsk_buff_pool * pool) xsk_pool_get_rx_frame_size() argument
50 xsk_pool_set_rxq_info(struct xsk_buff_pool * pool,struct xdp_rxq_info * rxq) xsk_pool_set_rxq_info() argument
56 xsk_pool_fill_cb(struct xsk_buff_pool * pool,struct xsk_cb_desc * desc) xsk_pool_fill_cb() argument
62 xsk_pool_dma_unmap(struct xsk_buff_pool * pool,unsigned long attrs) xsk_pool_dma_unmap() argument
68 xsk_pool_dma_map(struct xsk_buff_pool * pool,struct device * dev,unsigned long attrs) xsk_pool_dma_map() argument
90 xsk_buff_alloc(struct xsk_buff_pool * pool) xsk_buff_alloc() argument
101 xsk_buff_alloc_batch(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xsk_buff_alloc_batch() argument
106 xsk_buff_can_alloc(struct xsk_buff_pool * pool,u32 count) xsk_buff_can_alloc() argument
188 xsk_buff_raw_get_dma(struct xsk_buff_pool * pool,u64 addr) xsk_buff_raw_get_dma() argument
194 xsk_buff_raw_get_data(struct xsk_buff_pool * pool,u64 addr) xsk_buff_raw_get_data() argument
211 xsk_buff_raw_get_ctx(const struct xsk_buff_pool * pool,u64 addr) xsk_buff_raw_get_ctx() argument
229 __xsk_buff_get_metadata(const struct xsk_buff_pool * pool,void * data) __xsk_buff_get_metadata() argument
244 xsk_buff_get_metadata(struct xsk_buff_pool * pool,u64 addr) xsk_buff_get_metadata() argument
256 xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool * pool,dma_addr_t dma,size_t size) xsk_buff_raw_dma_sync_for_device() argument
265 xsk_tx_completed(struct xsk_buff_pool * pool,u32 nb_entries) xsk_tx_completed() argument
269 xsk_tx_peek_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc) xsk_tx_peek_desc() argument
275 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool * pool,u32 max) xsk_tx_peek_release_desc_batch() argument
280 xsk_tx_release(struct xsk_buff_pool * pool) xsk_tx_release() argument
290 xsk_set_rx_need_wakeup(struct xsk_buff_pool * pool) xsk_set_rx_need_wakeup() argument
294 xsk_set_tx_need_wakeup(struct xsk_buff_pool * pool) xsk_set_tx_need_wakeup() argument
298 xsk_clear_rx_need_wakeup(struct xsk_buff_pool * pool) xsk_clear_rx_need_wakeup() argument
302 xsk_clear_tx_need_wakeup(struct xsk_buff_pool * pool) xsk_clear_tx_need_wakeup() argument
306 xsk_uses_need_wakeup(struct xsk_buff_pool * pool) xsk_uses_need_wakeup() argument
311 xsk_pool_get_headroom(struct xsk_buff_pool * pool) xsk_pool_get_headroom() argument
316 xsk_pool_get_chunk_size(struct xsk_buff_pool * pool) xsk_pool_get_chunk_size() argument
321 xsk_pool_get_rx_frame_size(struct xsk_buff_pool * pool) xsk_pool_get_rx_frame_size() argument
326 xsk_pool_set_rxq_info(struct xsk_buff_pool * pool,struct xdp_rxq_info * rxq) xsk_pool_set_rxq_info() argument
331 xsk_pool_fill_cb(struct xsk_buff_pool * pool,struct xsk_cb_desc * desc) xsk_pool_fill_cb() argument
336 xsk_pool_dma_unmap(struct xsk_buff_pool * pool,unsigned long attrs) xsk_pool_dma_unmap() argument
341 xsk_pool_dma_map(struct xsk_buff_pool * pool,struct device * dev,unsigned long attrs) xsk_pool_dma_map() argument
357 xsk_buff_alloc(struct xsk_buff_pool * pool) xsk_buff_alloc() argument
367 xsk_buff_alloc_batch(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xsk_buff_alloc_batch() argument
372 xsk_buff_can_alloc(struct xsk_buff_pool * pool,u32 count) xsk_buff_can_alloc() argument
405 xsk_buff_raw_get_dma(struct xsk_buff_pool * pool,u64 addr) xsk_buff_raw_get_dma() argument
411 xsk_buff_raw_get_data(struct xsk_buff_pool * pool,u64 addr) xsk_buff_raw_get_data() argument
417 xsk_buff_raw_get_ctx(const struct xsk_buff_pool * pool,u64 addr) xsk_buff_raw_get_ctx() argument
428 __xsk_buff_get_metadata(const struct xsk_buff_pool * pool,void * data) __xsk_buff_get_metadata() argument
434 xsk_buff_get_metadata(struct xsk_buff_pool * pool,u64 addr) xsk_buff_get_metadata() argument
443 xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool * pool,dma_addr_t dma,size_t size) xsk_buff_raw_dma_sync_for_device() argument
[all...]
H A Dxsk_buff_pool.h30 struct xsk_buff_pool *pool; member
66 /* For performance reasons, each buff pool has its own array of dma_pages
106 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
108 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
110 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
111 void xp_destroy(struct xsk_buff_pool *pool);
112 void xp_get_pool(struct xsk_buff_pool *pool);
113 bool xp_put_pool(struct xsk_buff_pool *pool);
114 void xp_clear_dev(struct xsk_buff_pool *pool);
115 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
[all …]
/linux/kernel/cgroup/
H A Ddmem.c107 static void free_cg_pool(struct dmem_cgroup_pool_state *pool) in free_cg_pool()
109 list_del(&pool->region_node); in free_cg_pool()
110 kfree(pool); in free_cg_pool()
114 set_resource_min(struct dmem_cgroup_pool_state *pool, u64 val) in set_resource_min()
116 page_counter_set_min(&pool->cnt, val); in set_resource_min()
120 set_resource_low(struct dmem_cgroup_pool_state *pool, u64 val) in set_resource_low()
122 page_counter_set_low(&pool->cnt, val); in set_resource_low()
126 set_resource_max(struct dmem_cgroup_pool_state *pool, u64 val) in set_resource_max()
128 page_counter_set_max(&pool->cnt, val); in set_resource_max()
131 static u64 get_resource_low(struct dmem_cgroup_pool_state *pool) in get_resource_low()
106 free_cg_pool(struct dmem_cgroup_pool_state * pool) free_cg_pool() argument
113 set_resource_min(struct dmem_cgroup_pool_state * pool,u64 val) set_resource_min() argument
119 set_resource_low(struct dmem_cgroup_pool_state * pool,u64 val) set_resource_low() argument
125 set_resource_max(struct dmem_cgroup_pool_state * pool,u64 val) set_resource_max() argument
130 get_resource_low(struct dmem_cgroup_pool_state * pool) get_resource_low() argument
135 get_resource_min(struct dmem_cgroup_pool_state * pool) get_resource_min() argument
140 get_resource_max(struct dmem_cgroup_pool_state * pool) get_resource_max() argument
145 get_resource_current(struct dmem_cgroup_pool_state * pool) get_resource_current() argument
160 struct dmem_cgroup_pool_state *pool; dmemcs_offline() local
171 struct dmem_cgroup_pool_state *pool, *next; dmemcs_free() local
201 struct dmem_cgroup_pool_state *pool; find_cg_pool_locked() local
210 pool_parent(struct dmem_cgroup_pool_state * pool) pool_parent() argument
225 struct dmem_cgroup_pool_state *pool, *found_pool; dmem_cgroup_calculate_protection() local
271 struct dmem_cgroup_pool_state *pool = test_pool; dmem_cgroup_state_evict_valuable() local
324 struct dmem_cgroup_pool_state *pool, *ppool = NULL; alloc_pool_single() local
359 struct dmem_cgroup_pool_state *pool, *ppool, *retpool; get_cg_pool_locked() local
402 struct dmem_cgroup_pool_state *pool, *next; dmemcg_free_rcu() local
436 struct dmem_cgroup_pool_state *pool = dmem_cgroup_unregister_region() local
518 dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state * pool) dmem_cgroup_pool_state_put() argument
528 struct dmem_cgroup_pool_state *pool, *allocpool = NULL; get_cg_pool_unlocked() local
571 dmem_cgroup_uncharge(struct dmem_cgroup_pool_state * pool,u64 size) dmem_cgroup_uncharge() argument
605 struct dmem_cgroup_pool_state *pool; dmem_cgroup_try_charge() local
682 struct dmem_cgroup_pool_state *pool = NULL; dmemcg_limit_write() local
738 struct dmem_cgroup_pool_state *pool = find_cg_pool_locked(dmemcs, region); dmemcg_limit_show() local
[all...]
/linux/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Dcrypto.c13 * (for example, TLS) after last revalidation in a pool or a bulk.
19 #define MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) MLX5_CRYPTO_DEK_CALC_FREED(pool) argument
33 int num_deks; /* the total number of keys in this pool */
34 int avail_deks; /* the number of available keys in this pool */
35 int in_use_deks; /* the number of being used keys in this pool */
288 mlx5_crypto_dek_bulk_create(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_bulk_create() argument
290 struct mlx5_crypto_dek_priv *dek_priv = pool->mdev->mlx5e_res.dek_priv; in mlx5_crypto_dek_bulk_create()
291 struct mlx5_core_dev *mdev = pool->mdev; in mlx5_crypto_dek_bulk_create()
313 err = mlx5_crypto_create_dek_bulk(mdev, pool->key_purpose, in mlx5_crypto_dek_bulk_create()
334 mlx5_crypto_dek_pool_add_bulk(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_pool_add_bulk() argument
[all …]
/linux/tools/testing/selftests/drivers/net/mlxsw/
H A Dsharedbuffer_configuration.py16 objects, pool, tcbind and portpool. Provide an interface to get random
18 1. Pool:
22 - random pool number
30 for pool in pools:
31 self._pools.append(pool)
47 def _get_th(self, pool): argument
50 if pool["thtype"] == "dynamic":
58 for pool in self._pools:
59 if pool["type"] == "ingress":
60 ing_pools.append(pool)
[all …]
/linux/drivers/staging/octeon/
H A Dethernet-mem.c17 * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs
18 * @pool: Pool to allocate an skbuff for
19 * @size: Size of the buffer needed for the pool
24 static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) in cvm_oct_fill_hw_skbuff() argument
35 cvmx_fpa_free(skb->data, pool, size / 128); in cvm_oct_fill_hw_skbuff()
42 * cvm_oct_free_hw_skbuff- free hardware pool skbuffs
43 * @pool: Pool to allocate an skbuff for
44 * @size: Size of the buffer needed for the pool
47 static void cvm_oct_free_hw_skbuff(int pool, int size, int elements) in cvm_oct_free_hw_skbuff() argument
52 memory = cvmx_fpa_alloc(pool); in cvm_oct_free_hw_skbuff()
[all …]
/linux/include/net/page_pool/
H A Dhelpers.h24 * allocated from page pool. There is no cache line dirtying for 'struct page'
25 * when a page is recycled back to the page pool.
29 * page allocated from page pool. Page splitting enables memory saving and thus
48 * the same page when a page is split. The API user must setup pool->p.max_len
49 * and pool->p.offset correctly and ensure that page_pool_put_page() is called
67 bool page_pool_get_stats(const struct page_pool *pool,
88 * @pool: pool from which to allocate
92 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) in page_pool_dev_alloc_pages() argument
96 return page_pool_alloc_pages(pool, gf in page_pool_dev_alloc_pages()
109 page_pool_dev_alloc_frag(struct page_pool * pool,unsigned int * offset,unsigned int size) page_pool_dev_alloc_frag() argument
118 page_pool_alloc_netmem(struct page_pool * pool,unsigned int * offset,unsigned int * size,gfp_t gfp) page_pool_alloc_netmem() argument
147 page_pool_dev_alloc_netmem(struct page_pool * pool,unsigned int * offset,unsigned int * size) page_pool_dev_alloc_netmem() argument
156 page_pool_dev_alloc_netmems(struct page_pool * pool) page_pool_dev_alloc_netmems() argument
163 page_pool_alloc(struct page_pool * pool,unsigned int * offset,unsigned int * size,gfp_t gfp) page_pool_alloc() argument
182 page_pool_dev_alloc(struct page_pool * pool,unsigned int * offset,unsigned int * size) page_pool_dev_alloc() argument
191 page_pool_alloc_va(struct page_pool * pool,unsigned int * size,gfp_t gfp) page_pool_alloc_va() argument
216 page_pool_dev_alloc_va(struct page_pool * pool,unsigned int * size) page_pool_dev_alloc_va() argument
232 page_pool_get_dma_dir(const struct page_pool * pool) page_pool_get_dma_dir() argument
331 page_pool_put_netmem(struct page_pool * pool,netmem_ref netmem,unsigned int dma_sync_size,bool allow_direct) page_pool_put_netmem() argument
360 page_pool_put_page(struct page_pool * pool,struct page * page,unsigned int dma_sync_size,bool allow_direct) page_pool_put_page() argument
369 page_pool_put_full_netmem(struct page_pool * pool,netmem_ref netmem,bool allow_direct) page_pool_put_full_netmem() argument
385 page_pool_put_full_page(struct page_pool * pool,struct page * page,bool allow_direct) page_pool_put_full_page() argument
399 page_pool_recycle_direct(struct page_pool * pool,struct page * page) page_pool_recycle_direct() argument
405 page_pool_recycle_direct_netmem(struct page_pool * pool,netmem_ref netmem) page_pool_recycle_direct_netmem() argument
422 page_pool_free_va(struct page_pool * pool,void * va,bool allow_direct) page_pool_free_va() argument
450 __page_pool_dma_sync_for_cpu(const struct page_pool * pool,const dma_addr_t dma_addr,u32 offset,u32 dma_sync_size) __page_pool_dma_sync_for_cpu() argument
471 page_pool_dma_sync_for_cpu(const struct page_pool * pool,const struct page * page,u32 offset,u32 dma_sync_size) page_pool_dma_sync_for_cpu() argument
480 page_pool_dma_sync_netmem_for_cpu(const struct page_pool * pool,const netmem_ref netmem,u32 offset,u32 dma_sync_size) page_pool_dma_sync_netmem_for_cpu() argument
492 page_pool_put(struct page_pool * pool) page_pool_put() argument
497 page_pool_nid_changed(struct page_pool * pool,int new_nid) page_pool_nid_changed() argument
503 page_pool_is_unreadable(struct page_pool * pool) page_pool_is_unreadable() argument
[all...]
/linux/drivers/gpu/drm/amd/display/dc/resource/dce60/
H A Ddce60_resource.c798 static void dce60_resource_destruct(struct dce110_resource_pool *pool) in dce60_resource_destruct() argument
802 for (i = 0; i < pool->base.pipe_count; i++) { in dce60_resource_destruct()
803 if (pool->base.opps[i] != NULL) in dce60_resource_destruct()
804 dce110_opp_destroy(&pool->base.opps[i]); in dce60_resource_destruct()
806 if (pool->base.transforms[i] != NULL) in dce60_resource_destruct()
807 dce60_transform_destroy(&pool->base.transforms[i]); in dce60_resource_destruct()
809 if (pool->base.ipps[i] != NULL) in dce60_resource_destruct()
810 dce_ipp_destroy(&pool->base.ipps[i]); in dce60_resource_destruct()
812 if (pool->base.mis[i] != NULL) { in dce60_resource_destruct()
813 kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); in dce60_resource_destruct()
[all …]
/linux/drivers/gpu/drm/ttm/tests/
H A Dttm_pool_test.c79 struct ttm_pool *pool; in ttm_pool_pre_populated() local
86 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); in ttm_pool_pre_populated()
87 KUNIT_ASSERT_NOT_NULL(test, pool); in ttm_pool_pre_populated()
89 ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false); in ttm_pool_pre_populated()
91 err = ttm_pool_alloc(pool, tt, &simple_ctx); in ttm_pool_pre_populated()
94 ttm_pool_free(pool, tt); in ttm_pool_pre_populated()
97 return pool; in ttm_pool_pre_populated()
140 struct ttm_pool *pool; in ttm_pool_alloc_basic() local
150 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); in ttm_pool_alloc_basic()
151 KUNIT_ASSERT_NOT_NULL(test, pool); in ttm_pool_alloc_basic()
[all …]
/linux/include/linux/
H A Dgenalloc.h16 * available. If new memory is added to the pool a lock has to be
46 * @pool: the pool being allocated from
52 void *data, struct gen_pool *pool,
56 * General purpose special memory pool descriptor.
60 struct list_head chunks; /* list of chunks in this pool */
70 * General purpose special memory pool chunk descriptor.
73 struct list_head next_chunk; /* next chunk in pool */
97 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
101 static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr, in gen_pool_add_virt() argument
104 return gen_pool_add_owner(pool, addr, phys, size, nid, NULL); in gen_pool_add_virt()
[all …]
/linux/arch/mips/include/asm/octeon/
H A Dcvmx-fpa.h31 * Interface to the hardware Free Pool Allocator.
79 * Structure describing the current state of a FPA pool.
88 /* The number of elements in the pool at creation */
101 * Return the name of the pool
103 * @pool: Pool to get the name of
106 static inline const char *cvmx_fpa_get_name(uint64_t pool) in cvmx_fpa_get_name() argument
108 return cvmx_fpa_pool_info[pool].name; in cvmx_fpa_get_name()
112 * Return the base of the pool
114 * @pool: Pool to get the base of
117 static inline void *cvmx_fpa_get_base(uint64_t pool) in cvmx_fpa_get_base() argument
[all …]
/linux/sound/core/seq/
H A Dseq_memory.h34 struct snd_seq_pool *pool; /* used pool */ member
38 /* design note: the pool is a contiguous block of memory, if we dynamicly
39 want to add additional cells to the pool be better store this in another
40 pool as we need to know the base address of the pool when releasing
47 int total_elements; /* pool size actually allocated */
50 int size; /* pool size to be allocated */
64 /* Pool lock */
70 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
75 static inline int snd_seq_unused_cells(struct snd_seq_pool *pool) in snd_seq_unused_cells() argument
77 return pool ? pool->total_elements - atomic_read(&pool->counter) : 0; in snd_seq_unused_cells()
[all …]
/linux/drivers/staging/media/ipu3/
H A Dipu3-css-pool.c7 #include "ipu3-css-pool.h"
25 void imgu_css_pool_cleanup(struct imgu_device *imgu, struct imgu_css_pool *pool) in imgu_css_pool_cleanup() argument
30 imgu_dmamap_free(imgu, &pool->entry[i].param); in imgu_css_pool_cleanup()
33 int imgu_css_pool_init(struct imgu_device *imgu, struct imgu_css_pool *pool, in imgu_css_pool_init() argument
39 pool->entry[i].valid = false; in imgu_css_pool_init()
41 pool->entry[i].param.vaddr = NULL; in imgu_css_pool_init()
45 if (!imgu_dmamap_alloc(imgu, &pool->entry[i].param, size)) in imgu_css_pool_init()
49 pool->last = IPU3_CSS_POOL_SIZE; in imgu_css_pool_init()
54 imgu_css_pool_cleanup(imgu, pool); in imgu_css_pool_init()
59 * Allocate a new parameter via recycling the oldest entry in the pool.
[all …]
/linux/include/trace/events/
H A Dpage_pool.h16 TP_PROTO(const struct page_pool *pool,
19 TP_ARGS(pool, inflight, hold, release),
22 __field(const struct page_pool *, pool)
30 __entry->pool = pool;
34 __entry->cnt = pool->destroy_cnt;
38 __entry->pool, __entry->inflight, __entry->hold,
44 TP_PROTO(const struct page_pool *pool,
47 TP_ARGS(pool, netmem, release),
50 __field(const struct page_pool *, pool)
57 __entry->pool = pool;
[all …]
/linux/drivers/gpu/drm/amd/display/dc/resource/dcn301/
H A Ddcn301_resource.c92 #define TO_DCN301_RES_POOL(pool)\ argument
93 container_of(pool, struct dcn301_resource_pool, base)
1036 static void dcn301_destruct(struct dcn301_resource_pool *pool) in dcn301_destruct() argument
1040 for (i = 0; i < pool->base.stream_enc_count; i++) { in dcn301_destruct()
1041 if (pool->base.stream_enc[i] != NULL) { in dcn301_destruct()
1042 if (pool->base.stream_enc[i]->vpg != NULL) { in dcn301_destruct()
1043 kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); in dcn301_destruct()
1044 pool->base.stream_enc[i]->vpg = NULL; in dcn301_destruct()
1046 if (pool->base.stream_enc[i]->afmt != NULL) { in dcn301_destruct()
1047 kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); in dcn301_destruct()
[all …]

12345678910>>...48