/linux/net/core/ |
H A D | page_pool.c | 43 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) argument 45 #define recycle_stat_inc(pool, __stat) \ argument 47 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 51 #define recycle_stat_add(pool, __stat, val) \ argument 53 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 82 bool page_pool_get_stats(const struct page_pool *pool, in page_pool_get_stats() argument 91 stats->alloc_stats.fast += pool->alloc_stats.fast; in page_pool_get_stats() 92 stats->alloc_stats.slow += pool->alloc_stats.slow; in page_pool_get_stats() 93 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order; in page_pool_get_stats() 94 stats->alloc_stats.empty += pool->alloc_stats.empty; in page_pool_get_stats() [all …]
|
H A D | page_pool_user.c | 34 typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool, 40 struct page_pool *pool; in netdev_nl_page_pool_get_do() local 45 pool = xa_load(&page_pools, id); in netdev_nl_page_pool_get_do() 46 if (!pool || hlist_unhashed(&pool->user.list) || in netdev_nl_page_pool_get_do() 47 !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) { in netdev_nl_page_pool_get_do() 58 err = fill(rsp, pool, info); in netdev_nl_page_pool_get_do() 86 struct page_pool *pool; in netdev_nl_page_pool_get_dump() local 92 hlist_for_each_entry(pool, &netdev->page_pools, user.list) { in netdev_nl_page_pool_get_dump() 93 if (state->pp_id && state->pp_id < pool->user.id) in netdev_nl_page_pool_get_dump() 96 state->pp_id = pool->user.id; in netdev_nl_page_pool_get_dump() [all …]
|
/linux/net/xdp/ |
H A D | xsk_buff_pool.c | 11 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument 18 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk() 20 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() argument 30 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 32 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 35 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() argument 37 if (!pool) in xp_destroy() 40 kvfree(pool in xp_destroy() 45 xp_alloc_tx_descs(struct xsk_buff_pool * pool,struct xdp_sock * xs) xp_alloc_tx_descs() argument 59 struct xsk_buff_pool *pool; xp_create_and_assign_umem() local 118 xp_set_rxq_info(struct xsk_buff_pool * pool,struct xdp_rxq_info * rxq) xp_set_rxq_info() argument 127 xp_fill_cb(struct xsk_buff_pool * pool,struct xsk_cb_desc * desc) xp_fill_cb() argument 139 xp_disable_drv_zc(struct xsk_buff_pool * pool) xp_disable_drv_zc() argument 162 xp_assign_dev(struct xsk_buff_pool * pool,struct net_device * netdev,u16 queue_id,u16 flags) xp_assign_dev() argument 247 xp_assign_dev_shared(struct xsk_buff_pool * pool,struct xdp_sock * umem_xs,struct net_device * dev,u16 queue_id) xp_assign_dev_shared() argument 264 xp_clear_dev(struct xsk_buff_pool * pool) xp_clear_dev() argument 277 struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool, xp_release_deferred() local 298 xp_get_pool(struct xsk_buff_pool * pool) xp_get_pool() argument 303 xp_put_pool(struct xsk_buff_pool * pool) xp_put_pool() argument 317 xp_find_dma_map(struct xsk_buff_pool * pool) xp_find_dma_map() argument 377 xp_dma_unmap(struct xsk_buff_pool * pool,unsigned long attrs) xp_dma_unmap() argument 413 xp_init_dma_info(struct xsk_buff_pool * pool,struct xsk_dma_map * dma_map) xp_init_dma_info() argument 439 xp_dma_map(struct xsk_buff_pool * pool,struct device * dev,unsigned long attrs,struct page ** pages,u32 nr_pages) xp_dma_map() argument 484 xp_addr_crosses_non_contig_pg(struct xsk_buff_pool * pool,u64 addr) xp_addr_crosses_non_contig_pg() argument 490 xp_check_unaligned(struct xsk_buff_pool * pool,u64 * addr) xp_check_unaligned() argument 500 xp_check_aligned(struct xsk_buff_pool * pool,u64 * addr) xp_check_aligned() argument 506 xp_get_xskb(struct xsk_buff_pool * pool,u64 addr) xp_get_xskb() argument 522 __xp_alloc(struct xsk_buff_pool * pool) __xp_alloc() argument 553 xp_alloc(struct xsk_buff_pool * pool) xp_alloc() argument 579 xp_alloc_new_from_fq(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xp_alloc_new_from_fq() argument 615 xp_alloc_reused(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 nb_entries) xp_alloc_reused() argument 635 xp_alloc_slow(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xp_alloc_slow() argument 653 xp_alloc_batch(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xp_alloc_batch() argument 677 xp_can_alloc(struct xsk_buff_pool * pool,u32 count) xp_can_alloc() argument 703 xp_raw_get_data(struct xsk_buff_pool * pool,u64 addr) xp_raw_get_data() argument 710 xp_raw_get_dma(struct xsk_buff_pool * pool,u64 addr) xp_raw_get_dma() argument [all...] |
/linux/mm/ |
H A D | mempool.c | 5 * memory buffer pool support. Such pools are mostly used 24 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument 27 const int nr = pool->curr_nr; in poison_error() 33 pr_err("Mempool %p size %zu\n", pool, size); in poison_error() 41 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument 50 poison_error(pool, element, size, i); in __check_element() 57 static void check_element(mempool_t *pool, void *element) in check_element() argument 64 if (pool->free == mempool_kfree) { in check_element() 65 __check_element(pool, element, (size_t)pool in check_element() 86 poison_element(mempool_t * pool,void * element) poison_element() argument 107 check_element(mempool_t * pool,void * element) check_element() argument 110 poison_element(mempool_t * pool,void * element) poison_element() argument 115 kasan_poison_element(mempool_t * pool,void * element) kasan_poison_element() argument 125 kasan_unpoison_element(mempool_t * pool,void * element) kasan_unpoison_element() argument 137 add_element(mempool_t * pool,void * element) add_element() argument 145 remove_element(mempool_t * pool) remove_element() argument 166 mempool_exit(mempool_t * pool) mempool_exit() argument 185 mempool_destroy(mempool_t * pool) mempool_destroy() argument 195 mempool_init_node(mempool_t * pool,int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data,gfp_t gfp_mask,int node_id) mempool_init_node() argument 243 mempool_init(mempool_t * pool,int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data) mempool_init() argument 280 mempool_t *pool; mempool_create_node() local 314 mempool_resize(mempool_t * pool,int new_min_nr) mempool_resize() argument 390 mempool_alloc(mempool_t * pool,gfp_t gfp_mask) mempool_alloc() argument 472 mempool_alloc_preallocated(mempool_t * pool) mempool_alloc_preallocated() argument 504 mempool_free(void * element,mempool_t * pool) mempool_free() argument [all...] |
H A D | dmapool.c | 74 struct dma_pool *pool; in pools_show() local 80 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show() 83 pool->name, pool->nr_active, in pools_show() 84 pool->nr_blocks, pool->size, in pools_show() 85 pool->nr_pages); in pools_show() 95 static void pool_check_block(struct dma_pool *pool, struct dma_block *block, in pool_check_block() argument 101 for (i = sizeof(struct dma_block); i < pool->size; i++) { in pool_check_block() 104 dev_err(pool->dev, "%s %s, %p (corrupted)\n", __func__, in pool_check_block() 105 pool->name, block); in pool_check_block() 112 data, pool->size, 1); in pool_check_block() [all …]
|
H A D | zbud.c | 62 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the 66 * 63 freelists per pool. 78 * struct zbud_pool - stores metadata for each zbud pool 79 * @lock: protects all pool fields and first|last_chunk fields of any 80 * zbud page in the pool 86 * @pages_nr: number of zbud pages in the pool. 88 * This structure is allocated at pool creation time and maintains metadata 89 * pertaining to a particular zbud pool. 107 * @buddy: links the zbud page into the unbuddied/buddied lists in the pool 194 * zbud_create_pool() - create a new zbud pool 202 struct zbud_pool *pool; zbud_create_pool() local 222 zbud_destroy_pool(struct zbud_pool * pool) zbud_destroy_pool() argument 246 zbud_alloc(struct zbud_pool * pool,size_t size,gfp_t gfp,unsigned long * handle) zbud_alloc() argument 311 zbud_free(struct zbud_pool * pool,unsigned long handle) zbud_free() argument 353 zbud_map(struct zbud_pool * pool,unsigned long handle) zbud_map() argument 363 zbud_unmap(struct zbud_pool * pool,unsigned long handle) zbud_unmap() argument 374 zbud_get_pool_size(struct zbud_pool * pool) zbud_get_pool_size() argument 388 zbud_zpool_destroy(void * pool) zbud_zpool_destroy() argument 393 zbud_zpool_malloc(void * pool,size_t size,gfp_t gfp,unsigned long * handle) zbud_zpool_malloc() argument 398 zbud_zpool_free(void * pool,unsigned long handle) zbud_zpool_free() argument 403 zbud_zpool_map(void * pool,unsigned long handle,enum zpool_mapmode mm) zbud_zpool_map() argument 408 zbud_zpool_unmap(void * pool,unsigned long handle) zbud_zpool_unmap() argument 413 zbud_zpool_total_size(void * pool) zbud_zpool_total_size() argument [all...] |
/linux/drivers/net/ethernet/ti/ |
H A D | k3-cppi-desc-pool.c | 28 void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) in k3_cppi_desc_pool_destroy() argument 30 if (!pool) in k3_cppi_desc_pool_destroy() 33 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in k3_cppi_desc_pool_destroy() 35 gen_pool_size(pool->gen_pool), in k3_cppi_desc_pool_destroy() 36 gen_pool_avail(pool->gen_pool)); in k3_cppi_desc_pool_destroy() 37 if (pool->cpumem) in k3_cppi_desc_pool_destroy() 38 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_destroy() 39 pool->dma_addr); in k3_cppi_desc_pool_destroy() 41 kfree(pool->desc_infos); in k3_cppi_desc_pool_destroy() 43 gen_pool_destroy(pool->gen_pool); /* frees pool->name */ in k3_cppi_desc_pool_destroy() [all …]
|
/linux/net/ceph/ |
H A D | msgpool.c | 14 struct ceph_msgpool *pool = arg; in msgpool_alloc() local 17 msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, in msgpool_alloc() 20 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc() 22 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc() 23 msg->pool = pool; in msgpool_alloc() 30 struct ceph_msgpool *pool = arg; in msgpool_free() local 33 dout("msgpool_release %s %p\n", pool->name, msg); in msgpool_free() 34 msg->pool = NULL; in msgpool_free() 38 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() argument 43 pool->type = type; in ceph_msgpool_init() [all …]
|
/linux/sound/core/seq/ |
H A D | seq_memory.c | 22 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) in snd_seq_pool_available() argument 24 return pool->total_elements - atomic_read(&pool->counter); in snd_seq_pool_available() 27 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) in snd_seq_output_ok() argument 29 return snd_seq_pool_available(pool) >= pool->room; in snd_seq_output_ok() 225 static inline void free_cell(struct snd_seq_pool *pool, in free_cell() argument 228 cell->next = pool->free; in free_cell() 229 pool->free = cell; in free_cell() 230 atomic_dec(&pool->counter); in free_cell() 235 struct snd_seq_pool *pool; in snd_seq_cell_free() local 239 pool = cell->pool; in snd_seq_cell_free() [all …]
|
/linux/lib/ |
H A D | objpool.c | 19 objpool_init_percpu_slot(struct objpool_head *pool, in objpool_init_percpu_slot() argument 24 void *obj = (void *)&slot->entries[pool->capacity]; in objpool_init_percpu_slot() 28 slot->mask = pool->capacity - 1; in objpool_init_percpu_slot() 37 obj = obj + pool->obj_size; in objpool_init_percpu_slot() 40 pool->nr_objs++; in objpool_init_percpu_slot() 48 objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs, in objpool_init_percpu_slots() argument 63 nodes = nr_objs / pool->nr_possible_cpus; in objpool_init_percpu_slots() 64 if (cpu_count < (nr_objs % pool->nr_possible_cpus)) in objpool_init_percpu_slots() 68 size = struct_size(slot, entries, pool->capacity) + in objpool_init_percpu_slots() 69 pool->obj_size * nodes; in objpool_init_percpu_slots() [all …]
|
H A D | genalloc.c | 155 struct gen_pool *pool; in gen_pool_create() local 157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); in gen_pool_create() 158 if (pool != NULL) { in gen_pool_create() 159 spin_lock_init(&pool->lock); in gen_pool_create() 160 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create() 161 pool->min_alloc_order = min_alloc_order; in gen_pool_create() 162 pool->algo = gen_pool_first_fit; in gen_pool_create() 163 pool->data = NULL; in gen_pool_create() 164 pool->name = NULL; in gen_pool_create() 166 return pool; in gen_pool_create() [all …]
|
/linux/drivers/gpu/drm/panthor/ |
H A D | panthor_heap.c | 110 static int panthor_get_heap_ctx_offset(struct panthor_heap_pool *pool, int id) in panthor_get_heap_ctx_offset() argument 112 return panthor_heap_ctx_stride(pool->ptdev) * id; in panthor_get_heap_ctx_offset() 115 static void *panthor_get_heap_ctx(struct panthor_heap_pool *pool, int id) in panthor_get_heap_ctx() argument 117 return pool->gpu_contexts->kmap + in panthor_get_heap_ctx() 118 panthor_get_heap_ctx_offset(pool, id); in panthor_get_heap_ctx() 221 panthor_heap_destroy_locked(struct panthor_heap_pool *pool, u32 handle) in panthor_heap_destroy_locked() argument 225 heap = xa_erase(&pool->xa, handle); in panthor_heap_destroy_locked() 229 panthor_free_heap_chunks(pool->vm, heap); in panthor_heap_destroy_locked() 240 int panthor_heap_destroy(struct panthor_heap_pool *pool, u32 handle) in panthor_heap_destroy() argument 244 down_write(&pool->lock); in panthor_heap_destroy() [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | irq_affinity.c | 8 static void cpu_put(struct mlx5_irq_pool *pool, int cpu) in cpu_put() argument 10 pool->irqs_per_cpu[cpu]--; in cpu_put() 13 static void cpu_get(struct mlx5_irq_pool *pool, int cpu) in cpu_get() argument 15 pool->irqs_per_cpu[cpu]++; in cpu_get() 19 static int cpu_get_least_loaded(struct mlx5_irq_pool *pool, in cpu_get_least_loaded() argument 27 if (!pool->irqs_per_cpu[cpu]) { in cpu_get_least_loaded() 33 if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu]) in cpu_get_least_loaded() 38 mlx5_core_err(pool->dev, "NO online CPUs in req_mask (%*pbl)\n", in cpu_get_least_loaded() 42 pool->irqs_per_cpu[best_cpu]++; in cpu_get_least_loaded() 48 irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc) in irq_pool_request_irq() argument [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
H A D | crypto.c | 19 #define MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) MLX5_CRYPTO_DEK_CALC_FREED(pool) argument 288 mlx5_crypto_dek_bulk_create(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_bulk_create() argument 290 struct mlx5_crypto_dek_priv *dek_priv = pool->mdev->mlx5e_res.dek_priv; in mlx5_crypto_dek_bulk_create() 291 struct mlx5_core_dev *mdev = pool->mdev; in mlx5_crypto_dek_bulk_create() 313 err = mlx5_crypto_create_dek_bulk(mdev, pool->key_purpose, in mlx5_crypto_dek_bulk_create() 334 mlx5_crypto_dek_pool_add_bulk(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_pool_add_bulk() argument 338 bulk = mlx5_crypto_dek_bulk_create(pool); in mlx5_crypto_dek_pool_add_bulk() 342 pool->avail_deks += bulk->num_deks; in mlx5_crypto_dek_pool_add_bulk() 343 pool->num_deks += bulk->num_deks; in mlx5_crypto_dek_pool_add_bulk() 344 list_add(&bulk->entry, &pool->partial_list); in mlx5_crypto_dek_pool_add_bulk() [all …]
|
/linux/arch/arm64/kvm/hyp/nvhe/ |
H A D | page_alloc.c | 33 static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool, in __find_buddy_nocheck() argument 45 if (addr < pool->range_start || addr >= pool->range_end) in __find_buddy_nocheck() 52 static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool, in __find_buddy_avail() argument 56 struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order); in __find_buddy_avail() 93 static void __hyp_attach_page(struct hyp_pool *pool, in __hyp_attach_page() argument 103 if (phys < pool->range_start || phys >= pool->range_end) in __hyp_attach_page() 113 for (; (order + 1) <= pool->max_order; order++) { in __hyp_attach_page() 114 buddy = __find_buddy_avail(pool, p, order); in __hyp_attach_page() 127 page_add_to_list(p, &pool->free_area[order]); in __hyp_attach_page() 130 static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool, in __hyp_extract_page() argument [all …]
|
/linux/include/net/page_pool/ |
H A D | helpers.h | 67 bool page_pool_get_stats(const struct page_pool *pool, 92 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) in page_pool_dev_alloc_pages() argument 96 return page_pool_alloc_pages(pool, gfp); in page_pool_dev_alloc_pages() 110 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, in page_pool_dev_alloc_frag() argument 116 return page_pool_alloc_frag(pool, offset, size, gfp); in page_pool_dev_alloc_frag() 119 static inline struct page *page_pool_alloc(struct page_pool *pool, in page_pool_alloc() argument 123 unsigned int max_size = PAGE_SIZE << pool->p.order; in page_pool_alloc() 129 return page_pool_alloc_pages(pool, gfp); in page_pool_alloc() 132 page = page_pool_alloc_frag(pool, offset, *size, gfp); in page_pool_alloc() 140 if (pool->frag_offset + *size > max_size) { in page_pool_alloc() [all …]
|
/linux/drivers/net/ethernet/mellanox/mlxsw/ |
H A D | spectrum_cnt.c | 54 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_init() local 62 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_init() 63 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init() 89 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init() 99 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_fini() local 104 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_fini() 105 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_fini() 115 const struct mlxsw_sp_counter_pool *pool = priv; in mlxsw_sp_counter_pool_occ_get() local 117 return atomic_read(&pool->active_entries_count); in mlxsw_sp_counter_pool_occ_get() 124 struct mlxsw_sp_counter_pool *pool; in mlxsw_sp_counter_pool_init() local [all …]
|
/linux/drivers/gpu/drm/ttm/tests/ |
H A D | ttm_pool_test.c | 79 struct ttm_pool *pool; in ttm_pool_pre_populated() local 86 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); in ttm_pool_pre_populated() 87 KUNIT_ASSERT_NOT_NULL(test, pool); in ttm_pool_pre_populated() 89 ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false); in ttm_pool_pre_populated() 91 err = ttm_pool_alloc(pool, tt, &simple_ctx); in ttm_pool_pre_populated() 94 ttm_pool_free(pool, tt); in ttm_pool_pre_populated() 97 return pool; in ttm_pool_pre_populated() 140 struct ttm_pool *pool; in ttm_pool_alloc_basic() local 150 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); in ttm_pool_alloc_basic() 151 KUNIT_ASSERT_NOT_NULL(test, pool); in ttm_pool_alloc_basic() [all …]
|
/linux/kernel/ |
H A D | workqueue.c | 257 struct worker_pool *pool; /* I: the associated pool */ member 528 static void show_one_worker_pool(struct worker_pool *pool); 544 #define for_each_bh_worker_pool(pool, cpu) \ argument 545 for ((pool) = &per_cpu(bh_worker_pools, cpu)[0]; \ 546 (pool) < &per_cpu(bh_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 547 (pool)++) 549 #define for_each_cpu_worker_pool(pool, cpu) \ argument 550 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 551 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 552 (pool)++) [all …]
|
/linux/drivers/gpu/drm/i915/gt/ |
H A D | intel_gt_buffer_pool.c | 14 bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz) in bucket_for_size() argument 24 if (n >= ARRAY_SIZE(pool->cache_list)) in bucket_for_size() 25 n = ARRAY_SIZE(pool->cache_list) - 1; in bucket_for_size() 27 return &pool->cache_list[n]; in bucket_for_size() 37 static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep) in pool_free_older_than() argument 44 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { in pool_free_older_than() 45 struct list_head *list = &pool->cache_list[n]; in pool_free_older_than() 50 if (spin_trylock_irq(&pool->lock)) { in pool_free_older_than() 73 spin_unlock_irq(&pool->lock); in pool_free_older_than() 89 struct intel_gt_buffer_pool *pool = in pool_free_work() local [all …]
|
/linux/include/linux/ |
H A D | genalloc.h | 52 void *data, struct gen_pool *pool, 97 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); 101 static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr, in gen_pool_add_virt() argument 104 return gen_pool_add_owner(pool, addr, phys, size, nid, NULL); in gen_pool_add_virt() 119 static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr, in gen_pool_add() argument 122 return gen_pool_add_virt(pool, addr, -1, size, nid); in gen_pool_add() 125 unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size, 128 static inline unsigned long gen_pool_alloc_owner(struct gen_pool *pool, in gen_pool_alloc_owner() argument 131 return gen_pool_alloc_algo_owner(pool, size, pool->algo, pool->data, in gen_pool_alloc_owner() 135 static inline unsigned long gen_pool_alloc_algo(struct gen_pool *pool, in gen_pool_alloc_algo() argument [all …]
|
/linux/drivers/media/platform/renesas/vsp1/ |
H A D | vsp1_dl.c | 110 struct vsp1_dl_body_pool *pool; member 227 struct vsp1_dl_body_pool *pool; member 251 struct vsp1_dl_body_pool *pool; in vsp1_dl_body_pool_create() local 255 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in vsp1_dl_body_pool_create() 256 if (!pool) in vsp1_dl_body_pool_create() 259 pool->vsp1 = vsp1; in vsp1_dl_body_pool_create() 268 pool->size = dlb_size * num_bodies; in vsp1_dl_body_pool_create() 270 pool->bodies = kcalloc(num_bodies, sizeof(*pool->bodies), GFP_KERNEL); in vsp1_dl_body_pool_create() 271 if (!pool->bodies) { in vsp1_dl_body_pool_create() 272 kfree(pool); in vsp1_dl_body_pool_create() [all …]
|
/linux/include/trace/events/ |
H A D | page_pool.h | 16 TP_PROTO(const struct page_pool *pool, 19 TP_ARGS(pool, inflight, hold, release), 22 __field(const struct page_pool *, pool) 30 __entry->pool = pool; 34 __entry->cnt = pool->destroy_cnt; 38 __entry->pool, __entry->inflight, __entry->hold, 44 TP_PROTO(const struct page_pool *pool, 47 TP_ARGS(pool, netmem, release), 50 __field(const struct page_pool *, pool) 57 __entry->pool = pool; [all …]
|
/linux/drivers/firmware/qcom/ |
H A D | qcom_tzmem.c | 150 static int qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool *pool, in qcom_tzmem_pool_add_memory() argument 174 ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr, in qcom_tzmem_pool_add_memory() 182 scoped_guard(spinlock_irqsave, &pool->lock) in qcom_tzmem_pool_add_memory() 183 list_add_tail(&area->list, &pool->areas); in qcom_tzmem_pool_add_memory() 221 struct qcom_tzmem_pool *pool __free(kfree) = kzalloc(sizeof(*pool), in qcom_tzmem_pool_new() 223 if (!pool) in qcom_tzmem_pool_new() 226 pool->genpool = gen_pool_create(PAGE_SHIFT, -1); in qcom_tzmem_pool_new() 227 if (!pool->genpool) in qcom_tzmem_pool_new() 230 gen_pool_set_algo(pool->genpool, gen_pool_best_fit, NULL); in qcom_tzmem_pool_new() 232 pool->policy = config->policy; in qcom_tzmem_pool_new() [all …]
|
/linux/drivers/infiniband/sw/rxe/ |
H A D | rxe_pool.c | 92 void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool, in rxe_pool_init() argument 97 memset(pool, 0, sizeof(*pool)); in rxe_pool_init() 99 pool->rxe = rxe; in rxe_pool_init() 100 pool->name = info->name; in rxe_pool_init() 101 pool->type = type; in rxe_pool_init() 102 pool->max_elem = info->max_elem; in rxe_pool_init() 103 pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN); in rxe_pool_init() 104 pool->elem_offset = info->elem_offset; in rxe_pool_init() 105 pool->cleanup = info->cleanup; in rxe_pool_init() 107 atomic_set(&pool->num_elem, 0); in rxe_pool_init() [all …]
|