/linux/net/xdp/ |
H A D | xsk_buff_pool.c | 13 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument 20 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 21 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk() 22 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 25 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() argument 32 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 34 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 37 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() argument 39 if (!pool) in xp_destroy() 42 kvfree(pool->tx_descs); in xp_destroy() [all …]
|
/linux/drivers/net/ethernet/ti/ |
H A D | k3-cppi-desc-pool.c | 2 /* TI K3 CPPI5 descriptors pool API 15 #include "k3-cppi-desc-pool.h" 28 void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) in k3_cppi_desc_pool_destroy() argument 30 if (!pool) in k3_cppi_desc_pool_destroy() 33 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in k3_cppi_desc_pool_destroy() 35 gen_pool_size(pool->gen_pool), in k3_cppi_desc_pool_destroy() 36 gen_pool_avail(pool->gen_pool)); in k3_cppi_desc_pool_destroy() 37 if (pool->cpumem) in k3_cppi_desc_pool_destroy() 38 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_destroy() 39 pool->dma_addr); in k3_cppi_desc_pool_destroy() [all …]
|
/linux/net/core/ |
H A D | page_pool.c | 46 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) argument 48 #define recycle_stat_inc(pool, __stat) \ argument 50 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 54 #define recycle_stat_add(pool, __stat, val) \ argument 56 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 75 * page_pool_get_stats() - fetch page pool stats 76 * @pool: pool from which page was allocated 85 bool page_pool_get_stats(const struct page_pool *pool, in page_pool_get_stats() argument 94 stats->alloc_stats.fast += pool->alloc_stats.fast; in page_pool_get_stats() 95 stats->alloc_stats.slow += pool->alloc_stats.slow; in page_pool_get_stats() [all …]
|
H A D | page_pool_user.c | 18 /* Protects: page_pools, netdevice->page_pools, pool->p.napi, pool->slow.netdev, 19 * pool->user. 25 * linked to a netdev at creation time. Following page pool "visibility" 32 * to error, or (c) the entire namespace which owned this pool disappeared 36 typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool, 42 struct page_pool *pool; in netdev_nl_page_pool_get_do() local 47 pool = xa_load(&page_pools, id); in netdev_nl_page_pool_get_do() 48 if (!pool || hlist_unhashed(&pool->user.list) || in netdev_nl_page_pool_get_do() 49 !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) { in netdev_nl_page_pool_get_do() 60 err = fill(rsp, pool, info); in netdev_nl_page_pool_get_do() [all …]
|
/linux/mm/ |
H A D | mempool.c | 5 * memory buffer pool support. Such pools are mostly used 24 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument 27 const int nr = pool->curr_nr; in poison_error() 33 pr_err("Mempool %p size %zu\n", pool, size); in poison_error() 41 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument 50 poison_error(pool, element, size, i); in __check_element() 57 static void check_element(mempool_t *pool, void *element) in check_element() argument 64 if (pool->free == mempool_kfree) { in check_element() 65 __check_element(pool, element, (size_t)pool in check_element() 86 poison_element(mempool_t * pool,void * element) poison_element() argument 107 check_element(mempool_t * pool,void * element) check_element() argument 110 poison_element(mempool_t * pool,void * element) poison_element() argument 115 kasan_poison_element(mempool_t * pool,void * element) kasan_poison_element() argument 125 kasan_unpoison_element(mempool_t * pool,void * element) kasan_unpoison_element() argument 137 add_element(mempool_t * pool,void * element) add_element() argument 145 remove_element(mempool_t * pool) remove_element() argument 166 mempool_exit(mempool_t * pool) mempool_exit() argument 185 mempool_destroy(mempool_t * pool) mempool_destroy() argument 195 mempool_init_node(mempool_t * pool,int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data,gfp_t gfp_mask,int node_id) mempool_init_node() argument 243 mempool_init(mempool_t * pool,int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data) mempool_init() argument 280 mempool_t *pool; mempool_create_node() local 314 mempool_resize(mempool_t * pool,int new_min_nr) mempool_resize() argument 390 mempool_alloc(mempool_t * pool,gfp_t gfp_mask) mempool_alloc() argument 472 mempool_alloc_preallocated(mempool_t * pool) mempool_alloc_preallocated() argument 504 mempool_free(void * element,mempool_t * pool) mempool_free() argument [all...] |
H A D | dmapool.c | 3 * DMA Pool allocator 14 * The current design of this allocator is fairly simple. The pool is 48 struct dma_pool { /* the pool */ 75 struct dma_pool *pool; in pools_show() 81 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show() 82 /* per-pool info, no real statistics yet */ in pools_show() 84 pool->name, pool->nr_active, in pools_show() 85 pool->nr_blocks, pool in pools_show() 74 struct dma_pool *pool; pools_show() local 95 pool_check_block(struct dma_pool * pool,struct dma_block * block,gfp_t mem_flags) pool_check_block() argument 120 pool_find_page(struct dma_pool * pool,dma_addr_t dma) pool_find_page() argument 133 pool_block_err(struct dma_pool * pool,void * vaddr,dma_addr_t dma) pool_block_err() argument 159 pool_init_page(struct dma_pool * pool,struct dma_page * page) pool_init_page() argument 164 pool_check_block(struct dma_pool * pool,struct dma_block * block,gfp_t mem_flags) pool_check_block() argument 169 pool_block_err(struct dma_pool * pool,void * vaddr,dma_addr_t dma) pool_block_err() argument 176 pool_init_page(struct dma_pool * pool,struct dma_page * page) pool_init_page() argument 181 pool_block_pop(struct dma_pool * pool) pool_block_pop() argument 192 pool_block_push(struct dma_pool * pool,struct dma_block * block,dma_addr_t dma) pool_block_push() argument 300 pool_initialise_page(struct dma_pool * pool,struct dma_page * page) pool_initialise_page() argument 334 pool_alloc_page(struct dma_pool * pool,gfp_t mem_flags) pool_alloc_page() argument 360 dma_pool_destroy(struct dma_pool * pool) dma_pool_destroy() argument 404 dma_pool_alloc(struct dma_pool * pool,gfp_t mem_flags,dma_addr_t * handle) dma_pool_alloc() argument 450 dma_pool_free(struct dma_pool * pool,void * vaddr,dma_addr_t dma) dma_pool_free() argument 469 struct dma_pool *pool = *(struct dma_pool **)res; dmam_pool_release() local 496 struct dma_pool **ptr, *pool; dmam_pool_create() local 518 dmam_pool_destroy(struct dma_pool * pool) dmam_pool_destroy() argument [all...] |
/linux/drivers/md/ |
H A D | dm-thin.c | 42 * The block size of the device holding pool data must be 194 * A pool device ties together a metadata device and a data device. It 201 * The pool runs in various modes. Ordered in degraded order for comparisons. 232 struct pool { struct 234 struct dm_target *ti; /* Only set if a pool target is bound */ argument 290 static void metadata_operation_failed(struct pool *pool, const char *op, int r); argument 292 static enum pool_mode get_pool_mode(struct pool *pool) in get_pool_mode() argument 294 return pool->pf.mode; in get_pool_mode() 297 static void notify_of_pool_mode_change(struct pool *pool) in notify_of_pool_mode_change() argument 307 enum pool_mode mode = get_pool_mode(pool); in notify_of_pool_mode_change() [all …]
|
/linux/lib/ |
H A D | genalloc.c | 16 * available. If new memory is added to the pool a lock has to be 146 * gen_pool_create - create a new special memory pool 148 * @nid: node id of the node the pool structure should be allocated on, or -1 150 * Create a new special memory pool that can be used to manage special purpose 155 struct gen_pool *pool; in gen_pool_create() local 157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); in gen_pool_create() 158 if (pool != NULL) { in gen_pool_create() 159 spin_lock_init(&pool->lock); in gen_pool_create() 160 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create() 161 pool->min_alloc_order = min_alloc_order; in gen_pool_create() [all …]
|
H A D | objpool.c | 19 objpool_init_percpu_slot(struct objpool_head *pool, in objpool_init_percpu_slot() argument 24 void *obj = (void *)&slot->entries[pool->capacity]; in objpool_init_percpu_slot() 28 slot->mask = pool->capacity - 1; in objpool_init_percpu_slot() 37 obj = obj + pool->obj_size; in objpool_init_percpu_slot() 40 pool->nr_objs++; in objpool_init_percpu_slot() 48 objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs, in objpool_init_percpu_slots() argument 63 nodes = nr_objs / pool->nr_possible_cpus; in objpool_init_percpu_slots() 64 if (cpu_count < (nr_objs % pool->nr_possible_cpus)) in objpool_init_percpu_slots() 68 size = struct_size(slot, entries, pool->capacity) + in objpool_init_percpu_slots() 69 pool->obj_size * nodes; in objpool_init_percpu_slots() [all …]
|
/linux/include/net/ |
H A D | xdp_sock_drv.h | 23 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries); 24 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc); 25 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max); 26 void xsk_tx_release(struct xsk_buff_pool *pool); 29 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool); 30 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool); 31 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool); 32 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool); 33 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool); 35 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool) in xsk_pool_get_headroom() argument [all …]
|
H A D | xsk_buff_pool.h | 30 struct xsk_buff_pool *pool; member 66 /* For performance reasons, each buff pool has its own array of dma_pages 106 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, 108 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, 110 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs); 111 void xp_destroy(struct xsk_buff_pool *pool); 112 void xp_get_pool(struct xsk_buff_pool *pool); 113 bool xp_put_pool(struct xsk_buff_pool *pool); 114 void xp_clear_dev(struct xsk_buff_pool *pool); 115 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); [all …]
|
/linux/net/ceph/ |
H A D | msgpool.c | 14 struct ceph_msgpool *pool = arg; in msgpool_alloc() local 17 msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, in msgpool_alloc() 20 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc() 22 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc() 23 msg->pool = pool; in msgpool_alloc() 30 struct ceph_msgpool *pool = arg; in msgpool_free() local 33 dout("msgpool_release %s %p\n", pool->name, msg); in msgpool_free() 34 msg->pool = NULL; in msgpool_free() 38 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() argument 43 pool->type = type; in ceph_msgpool_init() [all …]
|
/linux/kernel/cgroup/ |
H A D | dmem.c | 106 static void free_cg_pool(struct dmem_cgroup_pool_state *pool) in free_cg_pool() argument 108 list_del(&pool->region_node); in free_cg_pool() 109 kfree(pool); in free_cg_pool() 113 set_resource_min(struct dmem_cgroup_pool_state *pool, u64 val) in set_resource_min() argument 115 page_counter_set_min(&pool->cnt, val); in set_resource_min() 119 set_resource_low(struct dmem_cgroup_pool_state *pool, u64 val) in set_resource_low() argument 121 page_counter_set_low(&pool->cnt, val); in set_resource_low() 125 set_resource_max(struct dmem_cgroup_pool_state *pool, u64 val) in set_resource_max() argument 127 page_counter_set_max(&pool->cnt, val); in set_resource_max() 130 static u64 get_resource_low(struct dmem_cgroup_pool_state *pool) in get_resource_low() argument [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
H A D | crypto.c | 13 * (for example, TLS) after last revalidation in a pool or a bulk. 19 #define MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) MLX5_CRYPTO_DEK_CALC_FREED(pool) argument 33 int num_deks; /* the total number of keys in this pool */ 34 int avail_deks; /* the number of available keys in this pool */ 35 int in_use_deks; /* the number of being used keys in this pool */ 288 mlx5_crypto_dek_bulk_create(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_bulk_create() argument 290 struct mlx5_crypto_dek_priv *dek_priv = pool->mdev->mlx5e_res.dek_priv; in mlx5_crypto_dek_bulk_create() 291 struct mlx5_core_dev *mdev = pool->mdev; in mlx5_crypto_dek_bulk_create() 313 err = mlx5_crypto_create_dek_bulk(mdev, pool->key_purpose, in mlx5_crypto_dek_bulk_create() 334 mlx5_crypto_dek_pool_add_bulk(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_pool_add_bulk() argument [all …]
|
/linux/tools/testing/selftests/drivers/net/mlxsw/ |
H A D | sharedbuffer_configuration.py | 16 objects, pool, tcbind and portpool. Provide an interface to get random 18 1. Pool: 22 - random pool number 30 for pool in pools: 31 self._pools.append(pool) 47 def _get_th(self, pool): argument 50 if pool["thtype"] == "dynamic": 58 for pool in self._pools: 59 if pool["type"] == "ingress": 60 ing_pools.append(pool) [all …]
|
/linux/drivers/staging/octeon/ |
H A D | ethernet-mem.c | 17 * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs 18 * @pool: Pool to allocate an skbuff for 19 * @size: Size of the buffer needed for the pool 24 static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) in cvm_oct_fill_hw_skbuff() argument 35 cvmx_fpa_free(skb->data, pool, size / 128); in cvm_oct_fill_hw_skbuff() 42 * cvm_oct_free_hw_skbuff- free hardware pool skbuffs 43 * @pool: Pool to allocate an skbuff for 44 * @size: Size of the buffer needed for the pool 47 static void cvm_oct_free_hw_skbuff(int pool, int size, int elements) in cvm_oct_free_hw_skbuff() argument 52 memory = cvmx_fpa_alloc(pool); in cvm_oct_free_hw_skbuff() [all …]
|
/linux/drivers/gpu/drm/ttm/tests/ |
H A D | ttm_pool_test.c | 79 struct ttm_pool *pool; in ttm_pool_pre_populated() local 86 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); in ttm_pool_pre_populated() 87 KUNIT_ASSERT_NOT_NULL(test, pool); in ttm_pool_pre_populated() 89 ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false); in ttm_pool_pre_populated() 91 err = ttm_pool_alloc(pool, tt, &simple_ctx); in ttm_pool_pre_populated() 94 ttm_pool_free(pool, tt); in ttm_pool_pre_populated() 97 return pool; in ttm_pool_pre_populated() 140 struct ttm_pool *pool; in ttm_pool_alloc_basic() local 150 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); in ttm_pool_alloc_basic() 151 KUNIT_ASSERT_NOT_NULL(test, pool); in ttm_pool_alloc_basic() [all …]
|
/linux/include/linux/ |
H A D | genalloc.h | 16 * available. If new memory is added to the pool a lock has to be 46 * @pool: the pool being allocated from 52 void *data, struct gen_pool *pool, 56 * General purpose special memory pool descriptor. 60 struct list_head chunks; /* list of chunks in this pool */ 70 * General purpose special memory pool chunk descriptor. 73 struct list_head next_chunk; /* next chunk in pool */ 97 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); 101 static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr, in gen_pool_add_virt() argument 104 return gen_pool_add_owner(pool, addr, phys, size, nid, NULL); in gen_pool_add_virt() [all …]
|
/linux/include/net/page_pool/ |
H A D | helpers.h | 24 * allocated from page pool. There is no cache line dirtying for 'struct page' 25 * when a page is recycled back to the page pool. 29 * page allocated from page pool. Page splitting enables memory saving and thus 48 * the same page when a page is split. The API user must setup pool->p.max_len 49 * and pool->p.offset correctly and ensure that page_pool_put_page() is called 67 bool page_pool_get_stats(const struct page_pool *pool, 88 * @pool: pool from which to allocate 92 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) in page_pool_dev_alloc_pages() argument 96 return page_pool_alloc_pages(pool, gf in page_pool_dev_alloc_pages() 109 page_pool_dev_alloc_frag(struct page_pool * pool,unsigned int * offset,unsigned int size) page_pool_dev_alloc_frag() argument 118 page_pool_alloc_netmem(struct page_pool * pool,unsigned int * offset,unsigned int * size,gfp_t gfp) page_pool_alloc_netmem() argument 147 page_pool_dev_alloc_netmem(struct page_pool * pool,unsigned int * offset,unsigned int * size) page_pool_dev_alloc_netmem() argument 156 page_pool_alloc(struct page_pool * pool,unsigned int * offset,unsigned int * size,gfp_t gfp) page_pool_alloc() argument 175 page_pool_dev_alloc(struct page_pool * pool,unsigned int * offset,unsigned int * size) page_pool_dev_alloc() argument 184 page_pool_alloc_va(struct page_pool * pool,unsigned int * size,gfp_t gfp) page_pool_alloc_va() argument 209 page_pool_dev_alloc_va(struct page_pool * pool,unsigned int * size) page_pool_dev_alloc_va() argument 225 page_pool_get_dma_dir(const struct page_pool * pool) page_pool_get_dma_dir() argument 324 page_pool_put_netmem(struct page_pool * pool,netmem_ref netmem,unsigned int dma_sync_size,bool allow_direct) page_pool_put_netmem() argument 353 page_pool_put_page(struct page_pool * pool,struct page * page,unsigned int dma_sync_size,bool allow_direct) page_pool_put_page() argument 362 page_pool_put_full_netmem(struct page_pool * pool,netmem_ref netmem,bool allow_direct) page_pool_put_full_netmem() argument 378 page_pool_put_full_page(struct page_pool * pool,struct page * page,bool allow_direct) page_pool_put_full_page() argument 392 page_pool_recycle_direct(struct page_pool * pool,struct page * page) page_pool_recycle_direct() argument 409 page_pool_free_va(struct page_pool * pool,void * va,bool allow_direct) page_pool_free_va() argument 442 __page_pool_dma_sync_for_cpu(const struct page_pool * pool,const dma_addr_t dma_addr,u32 offset,u32 dma_sync_size) __page_pool_dma_sync_for_cpu() argument 463 page_pool_dma_sync_for_cpu(const struct page_pool * pool,const struct page * page,u32 offset,u32 dma_sync_size) page_pool_dma_sync_for_cpu() argument 472 page_pool_dma_sync_netmem_for_cpu(const struct page_pool * pool,const netmem_ref netmem,u32 offset,u32 dma_sync_size) page_pool_dma_sync_netmem_for_cpu() argument 484 page_pool_put(struct page_pool * pool) page_pool_put() argument 489 page_pool_nid_changed(struct page_pool * pool,int new_nid) page_pool_nid_changed() argument [all...] |
/linux/drivers/net/ethernet/mellanox/mlxsw/ |
H A D | spectrum_cnt.c | 24 spinlock_t counter_pool_lock; /* Protects counter pool allocations */ 54 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_init() local 62 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_init() 63 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init() 89 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init() 99 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_fini() local 104 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_fini() 105 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_fini() 115 const struct mlxsw_sp_counter_pool *pool = priv; in mlxsw_sp_counter_pool_occ_get() local 117 return atomic_read(&pool->active_entries_count); in mlxsw_sp_counter_pool_occ_get() [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | irq_affinity.c | 8 static void cpu_put(struct mlx5_irq_pool *pool, int cpu) in cpu_put() argument 10 pool->irqs_per_cpu[cpu]--; in cpu_put() 13 static void cpu_get(struct mlx5_irq_pool *pool, int cpu) in cpu_get() argument 15 pool->irqs_per_cpu[cpu]++; in cpu_get() 19 static int cpu_get_least_loaded(struct mlx5_irq_pool *pool, in cpu_get_least_loaded() argument 27 if (!pool->irqs_per_cpu[cpu]) { in cpu_get_least_loaded() 33 if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu]) in cpu_get_least_loaded() 38 mlx5_core_err(pool->dev, "NO online CPUs in req_mask (%*pbl)\n", in cpu_get_least_loaded() 42 pool->irqs_per_cpu[best_cpu]++; in cpu_get_least_loaded() 48 irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc) in irq_pool_request_irq() argument [all …]
|
/linux/arch/mips/include/asm/octeon/ |
H A D | cvmx-fpa.h | 31 * Interface to the hardware Free Pool Allocator. 79 * Structure describing the current state of a FPA pool. 88 /* The number of elements in the pool at creation */ 101 * Return the name of the pool 103 * @pool: Pool to get the name of 106 static inline const char *cvmx_fpa_get_name(uint64_t pool) in cvmx_fpa_get_name() argument 108 return cvmx_fpa_pool_info[pool].name; in cvmx_fpa_get_name() 112 * Return the base of the pool 114 * @pool: Pool to get the base of 117 static inline void *cvmx_fpa_get_base(uint64_t pool) in cvmx_fpa_get_base() argument [all …]
|
/linux/drivers/gpu/drm/i915/gt/ |
H A D | intel_gt_buffer_pool.c | 14 bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz) in bucket_for_size() argument 24 if (n >= ARRAY_SIZE(pool->cache_list)) in bucket_for_size() 25 n = ARRAY_SIZE(pool->cache_list) - 1; in bucket_for_size() 27 return &pool->cache_list[n]; in bucket_for_size() 37 static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep) in pool_free_older_than() argument 44 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { in pool_free_older_than() 45 struct list_head *list = &pool->cache_list[n]; in pool_free_older_than() 50 if (spin_trylock_irq(&pool->lock)) { in pool_free_older_than() 73 spin_unlock_irq(&pool->lock); in pool_free_older_than() 89 struct intel_gt_buffer_pool *pool = in pool_free_work() local [all …]
|
/linux/drivers/md/dm-vdo/ |
H A D | vio.c | 23 /* The number of objects managed by the pool */ 27 /* The queue of requestors waiting for objects from the pool */ 33 /* The ID of the thread on which this pool may be used */ 35 /* The buffer backing the pool's vios */ 37 /* The pool entries */ 129 * Metadata vios should use direct allocation and not use the buffer pool, which is in create_multi_block_metadata_vio() 309 * make_vio_pool() - Create a new vio pool. 311 * @pool_size: The number of vios in the pool. 313 * @thread_id: The ID of the thread using this pool. 314 * @vio_type: The type of vios in the pool. [all …]
|
/linux/drivers/firmware/qcom/ |
H A D | qcom_tzmem.c | 151 static int qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool *pool, in qcom_tzmem_pool_add_memory() argument 175 ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr, in qcom_tzmem_pool_add_memory() 183 scoped_guard(spinlock_irqsave, &pool->lock) in qcom_tzmem_pool_add_memory() 184 list_add_tail(&area->list, &pool->areas); in qcom_tzmem_pool_add_memory() 191 * qcom_tzmem_pool_new() - Create a new TZ memory pool. 192 * @config: Pool configuration. 194 * Create a new pool of memory suitable for sharing with the TrustZone. 198 * Return: New memory pool address or ERR_PTR() on error. 222 struct qcom_tzmem_pool *pool __free(kfree) = kzalloc(sizeof(*pool), in qcom_tzmem_pool_new() 224 if (!pool) in qcom_tzmem_pool_new() [all …]
|