| /linux/net/core/ |
| H A D | page_pool.c | 46 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) argument 48 #define recycle_stat_inc(pool, __stat) \ argument 50 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 54 #define recycle_stat_add(pool, __stat, val) \ argument 56 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 75 * page_pool_get_stats() - fetch page pool stats 76 * @pool: pool from which page was allocated 85 bool page_pool_get_stats(const struct page_pool *pool, in page_pool_get_stats() argument 161 page_pool_producer_lock(struct page_pool * pool) page_pool_producer_lock() argument 174 page_pool_producer_unlock(struct page_pool * pool,bool in_softirq) page_pool_producer_unlock() argument 193 page_pool_init(struct page_pool * pool,const struct page_pool_params * params,int cpuid) page_pool_init() argument 317 page_pool_uninit(struct page_pool * pool) page_pool_uninit() argument 336 struct page_pool *pool; page_pool_create_percpu() local 374 page_pool_refill_alloc_cache(struct page_pool * pool) page_pool_refill_alloc_cache() argument 427 __page_pool_get_cached(struct page_pool * pool) __page_pool_get_cached() argument 443 __page_pool_dma_sync_for_device(const struct page_pool * pool,netmem_ref netmem,u32 dma_sync_size) __page_pool_dma_sync_for_device() argument 457 page_pool_dma_sync_for_device(const struct page_pool * pool,netmem_ref netmem,u32 dma_sync_size) page_pool_dma_sync_for_device() argument 471 page_pool_register_dma_index(struct page_pool * pool,netmem_ref netmem,gfp_t gfp) page_pool_register_dma_index() argument 496 page_pool_release_dma_index(struct page_pool * pool,netmem_ref netmem) page_pool_release_dma_index() argument 521 page_pool_dma_map(struct page_pool * pool,netmem_ref netmem,gfp_t gfp) page_pool_dma_map() argument 560 __page_pool_alloc_page_order(struct page_pool * pool,gfp_t gfp) __page_pool_alloc_page_order() argument 586 __page_pool_alloc_netmems_slow(struct page_pool * pool,gfp_t gfp) __page_pool_alloc_netmems_slow() argument 650 page_pool_alloc_netmems(struct page_pool * pool,gfp_t gfp) page_pool_alloc_netmems() argument 669 page_pool_alloc_pages(struct page_pool * pool,gfp_t gfp) page_pool_alloc_pages() argument 680 page_pool_inflight(const struct page_pool * pool,bool strict) page_pool_inflight() argument 699 page_pool_set_pp_info(struct page_pool * pool,netmem_ref netmem) page_pool_set_pp_info() argument 721 __page_pool_release_netmem_dma(struct page_pool * pool,netmem_ref netmem) __page_pool_release_netmem_dma() argument 749 page_pool_return_netmem(struct page_pool * pool,netmem_ref netmem) page_pool_return_netmem() argument 776 page_pool_recycle_in_ring(struct page_pool * pool,netmem_ref netmem) page_pool_recycle_in_ring() argument 796 page_pool_recycle_in_cache(netmem_ref netmem,struct page_pool * pool) page_pool_recycle_in_cache() argument 823 __page_pool_put_page(struct page_pool * pool,netmem_ref netmem,unsigned int dma_sync_size,bool allow_direct) __page_pool_put_page() argument 868 page_pool_napi_local(const struct page_pool * pool) page_pool_napi_local() argument 895 page_pool_put_unrefed_netmem(struct page_pool * pool,netmem_ref netmem,unsigned int dma_sync_size,bool allow_direct) page_pool_put_unrefed_netmem() argument 911 page_pool_put_unrefed_page(struct page_pool * pool,struct page * page,unsigned int dma_sync_size,bool allow_direct) page_pool_put_unrefed_page() argument 919 page_pool_recycle_ring_bulk(struct page_pool * pool,netmem_ref * bulk,u32 bulk_len) page_pool_recycle_ring_bulk() argument 980 struct page_pool *pool = NULL; page_pool_put_netmem_bulk() local 1018 page_pool_drain_frag(struct page_pool * pool,netmem_ref netmem) page_pool_drain_frag() argument 1036 page_pool_free_frag(struct page_pool * pool) page_pool_free_frag() argument 1049 page_pool_alloc_frag_netmem(struct page_pool * pool,unsigned int * offset,unsigned int size,gfp_t gfp) page_pool_alloc_frag_netmem() argument 1094 page_pool_alloc_frag(struct page_pool * pool,unsigned int * offset,unsigned int size,gfp_t gfp) page_pool_alloc_frag() argument 1102 page_pool_empty_ring(struct page_pool * pool) page_pool_empty_ring() argument 1117 __page_pool_destroy(struct page_pool * pool) __page_pool_destroy() argument 1133 page_pool_empty_alloc_cache_once(struct page_pool * pool) page_pool_empty_alloc_cache_once() argument 1150 page_pool_scrub(struct page_pool * pool) page_pool_scrub() argument 1182 page_pool_release(struct page_pool * pool) page_pool_release() argument 1201 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw); page_pool_release_retry() local 1230 page_pool_use_xdp_mem(struct page_pool * pool,void (* disconnect)(void *),const struct xdp_mem_info * mem) page_pool_use_xdp_mem() argument 1254 page_pool_enable_direct_recycling(struct page_pool * pool,struct napi_struct * napi) page_pool_enable_direct_recycling() argument 1267 page_pool_disable_direct_recycling(struct page_pool * pool) page_pool_disable_direct_recycling() argument 1285 page_pool_destroy(struct page_pool * pool) page_pool_destroy() argument 1309 page_pool_update_nid(struct page_pool * pool,int new_nid) page_pool_update_nid() argument 1332 net_mp_niov_set_page_pool(struct page_pool * pool,struct net_iov * niov) net_mp_niov_set_page_pool() argument [all...] |
| H A D | page_pool_user.c | 36 typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool, 42 struct page_pool *pool; in netdev_nl_page_pool_get_do() local 47 pool = xa_load(&page_pools, id); in netdev_nl_page_pool_get_do() 48 if (!pool || hlist_unhashed(&pool->user.list) || in netdev_nl_page_pool_get_do() 49 !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) { in netdev_nl_page_pool_get_do() 60 err = fill(rsp, pool, info); in netdev_nl_page_pool_get_do() 88 struct page_pool *pool; in netdev_nl_page_pool_get_dump() local 94 hlist_for_each_entry(pool, &netdev->page_pools, user.list) { in netdev_nl_page_pool_get_dump() 95 if (state->pp_id && state->pp_id < pool->user.id) in netdev_nl_page_pool_get_dump() 98 state->pp_id = pool->user.id; in netdev_nl_page_pool_get_dump() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/ |
| H A D | pool.c | 9 switch (resource->pool->type) { in hws_pool_free_one_resource() 11 mlx5hws_cmd_ste_destroy(resource->pool->ctx->mdev, resource->base_id); in hws_pool_free_one_resource() 14 mlx5hws_cmd_stc_destroy(resource->pool->ctx->mdev, resource->base_id); in hws_pool_free_one_resource() 23 static void hws_pool_resource_free(struct mlx5hws_pool *pool) in hws_pool_resource_free() argument 25 hws_pool_free_one_resource(pool->resource); in hws_pool_resource_free() 26 pool->resource = NULL; in hws_pool_resource_free() 28 if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) { in hws_pool_resource_free() 29 hws_pool_free_one_resource(pool->mirror_resource); in hws_pool_resource_free() 30 pool->mirror_resource = NULL; in hws_pool_resource_free() 35 hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range, in hws_pool_create_one_resource() argument [all …]
|
| /linux/net/xdp/ |
| H A D | xsk_buff_pool.c | 13 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument 18 spin_lock(&pool->xsk_tx_list_lock); in xp_add_xsk() 19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk() 20 spin_unlock(&pool->xsk_tx_list_lock); in xp_add_xsk() 23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() 28 spin_lock(&pool->xsk_tx_list_lock); in xp_del_xsk() 30 spin_unlock(&pool->xsk_tx_list_lock); in xp_del_xsk() 33 void xp_destroy(struct xsk_buff_pool *pool) in xp_del_xsk() 35 if (!pool) in xp_del_xsk() 38 kvfree(pool in xp_destroy() 25 xp_del_xsk(struct xsk_buff_pool * pool,struct xdp_sock * xs) xp_del_xsk() argument 37 xp_destroy(struct xsk_buff_pool * pool) xp_destroy() argument 47 xp_alloc_tx_descs(struct xsk_buff_pool * pool,struct xdp_sock * xs) xp_alloc_tx_descs() argument 61 struct xsk_buff_pool *pool; xp_create_and_assign_umem() local 121 xp_set_rxq_info(struct xsk_buff_pool * pool,struct xdp_rxq_info * rxq) xp_set_rxq_info() argument 130 xp_fill_cb(struct xsk_buff_pool * pool,struct xsk_cb_desc * desc) xp_fill_cb() argument 142 xp_disable_drv_zc(struct xsk_buff_pool * pool) xp_disable_drv_zc() argument 165 xp_assign_dev(struct xsk_buff_pool * pool,struct net_device * netdev,u16 queue_id,u16 flags) xp_assign_dev() argument 251 xp_assign_dev_shared(struct xsk_buff_pool * pool,struct xdp_sock * umem_xs,struct net_device * dev,u16 queue_id) xp_assign_dev_shared() argument 268 xp_clear_dev(struct xsk_buff_pool * pool) xp_clear_dev() argument 285 struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool, xp_release_deferred() local 306 xp_get_pool(struct xsk_buff_pool * pool) xp_get_pool() argument 311 xp_put_pool(struct xsk_buff_pool * pool) xp_put_pool() argument 325 xp_find_dma_map(struct xsk_buff_pool * pool) xp_find_dma_map() argument 385 xp_dma_unmap(struct xsk_buff_pool * pool,unsigned long attrs) xp_dma_unmap() argument 420 xp_init_dma_info(struct xsk_buff_pool * pool,struct xsk_dma_map * dma_map) xp_init_dma_info() argument 446 xp_dma_map(struct xsk_buff_pool * pool,struct device * dev,unsigned long attrs,struct page ** pages,u32 nr_pages) xp_dma_map() argument 491 xp_addr_crosses_non_contig_pg(struct xsk_buff_pool * pool,u64 addr) xp_addr_crosses_non_contig_pg() argument 497 xp_check_unaligned(struct xsk_buff_pool * pool,u64 * addr) xp_check_unaligned() argument 507 xp_check_aligned(struct xsk_buff_pool * pool,u64 * addr) xp_check_aligned() argument 513 xp_get_xskb(struct xsk_buff_pool * pool,u64 addr) xp_get_xskb() argument 529 __xp_alloc(struct xsk_buff_pool * pool) __xp_alloc() argument 560 xp_alloc(struct xsk_buff_pool * pool) xp_alloc() argument 586 xp_alloc_new_from_fq(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xp_alloc_new_from_fq() argument 622 xp_alloc_reused(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 nb_entries) xp_alloc_reused() argument 642 xp_alloc_slow(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xp_alloc_slow() argument 660 xp_alloc_batch(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xp_alloc_batch() argument 684 xp_can_alloc(struct xsk_buff_pool * pool,u32 count) xp_can_alloc() argument 710 __xp_raw_get_addr(const struct xsk_buff_pool * pool,u64 addr) __xp_raw_get_addr() argument 715 __xp_raw_get_data(const struct xsk_buff_pool * pool,u64 addr) __xp_raw_get_data() argument 720 xp_raw_get_data(struct xsk_buff_pool * pool,u64 addr) xp_raw_get_data() argument 726 __xp_raw_get_dma(const struct xsk_buff_pool * pool,u64 addr) __xp_raw_get_dma() argument 733 xp_raw_get_dma(struct xsk_buff_pool * pool,u64 addr) xp_raw_get_dma() argument 751 xp_raw_get_ctx(const struct xsk_buff_pool * pool,u64 addr) xp_raw_get_ctx() argument [all...] |
| /linux/drivers/net/ethernet/ti/ |
| H A D | k3-cppi-desc-pool.c | 28 void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) in k3_cppi_desc_pool_destroy() argument 30 if (!pool) in k3_cppi_desc_pool_destroy() 33 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in k3_cppi_desc_pool_destroy() 35 gen_pool_size(pool->gen_pool), in k3_cppi_desc_pool_destroy() 36 gen_pool_avail(pool->gen_pool)); in k3_cppi_desc_pool_destroy() 37 if (pool->cpumem) in k3_cppi_desc_pool_destroy() 38 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_destroy() 39 pool->dma_addr); in k3_cppi_desc_pool_destroy() 41 kfree(pool->desc_infos); in k3_cppi_desc_pool_destroy() 43 gen_pool_destroy(pool->gen_pool); /* frees pool->name */ in k3_cppi_desc_pool_destroy() [all …]
|
| /linux/mm/ |
| H A D | mempool.c | 41 static void poison_error(struct mempool *pool, void *element, size_t size, in poison_error() argument 44 const int nr = pool->curr_nr; in poison_error() 50 pr_err("Mempool %p size %zu\n", pool, size); in poison_error() 58 static void __check_element(struct mempool *pool, void *element, size_t size) in __check_element() argument 67 poison_error(pool, element, size, i); in __check_element() 74 static void check_element(struct mempool *pool, void *element) in check_element() argument 81 if (pool->free == mempool_kfree) { in check_element() 82 __check_element(pool, element, (size_t)pool->pool_data); in check_element() 83 } else if (pool->free == mempool_free_slab) { in check_element() 84 __check_element(pool, element, kmem_cache_size(pool->pool_data)); in check_element() [all …]
|
| H A D | zswap.c | 194 struct zswap_pool *pool; member 247 struct zswap_pool *pool; in zswap_pool_create() local 254 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in zswap_pool_create() 255 if (!pool) in zswap_pool_create() 260 pool->zs_pool = zs_create_pool(name); in zswap_pool_create() 261 if (!pool->zs_pool) in zswap_pool_create() 264 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); in zswap_pool_create() 266 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); in zswap_pool_create() 267 if (!pool->acomp_ctx) { in zswap_pool_create() 273 mutex_init(&per_cpu_ptr(pool->acomp_ctx, cpu)->mutex); in zswap_pool_create() [all …]
|
| H A D | zsmalloc.c | 270 struct zs_pool *pool; member 364 static void kick_deferred_free(struct zs_pool *pool); 365 static void init_deferred_free(struct zs_pool *pool); 366 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage); 368 static void kick_deferred_free(struct zs_pool *pool) {} in kick_deferred_free() argument 369 static void init_deferred_free(struct zs_pool *pool) {} in init_deferred_free() argument 370 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} in SetZsPageMovable() argument 373 static int create_cache(struct zs_pool *pool) in create_cache() argument 377 name = kasprintf(GFP_KERNEL, "zs_handle-%s", pool->name); in create_cache() 380 pool->handle_cachep = kmem_cache_create(name, ZS_HANDLE_SIZE, in create_cache() [all …]
|
| /linux/drivers/md/ |
| H A D | dm-thin.c | 232 struct pool { struct 290 static void metadata_operation_failed(struct pool *pool, const char *op, int r); argument 292 static enum pool_mode get_pool_mode(struct pool *pool) in get_pool_mode() argument 294 return pool->pf.mode; in get_pool_mode() 297 static void notify_of_pool_mode_change(struct pool *pool) in notify_of_pool_mode_change() argument 307 enum pool_mode mode = get_pool_mode(pool); in notify_of_pool_mode_change() 310 if (!pool->pf.error_if_no_space) in notify_of_pool_mode_change() 316 dm_table_event(pool->ti->table); in notify_of_pool_mode_change() 318 dm_device_name(pool->pool_md), in notify_of_pool_mode_change() 327 struct pool *pool; member [all …]
|
| /linux/net/ceph/ |
| H A D | msgpool.c | 14 struct ceph_msgpool *pool = arg; in msgpool_alloc() local 17 msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, in msgpool_alloc() 20 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc() 22 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc() 23 msg->pool = pool; in msgpool_alloc() 30 struct ceph_msgpool *pool = arg; in msgpool_free() local 33 dout("msgpool_release %s %p\n", pool->name, msg); in msgpool_free() 34 msg->pool = NULL; in msgpool_free() 38 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() argument 43 pool->type = type; in ceph_msgpool_init() [all …]
|
| /linux/lib/ |
| H A D | objpool.c | 19 objpool_init_percpu_slot(struct objpool_head *pool, in objpool_init_percpu_slot() argument 24 void *obj = (void *)&slot->entries[pool->capacity]; in objpool_init_percpu_slot() 28 slot->mask = pool->capacity - 1; in objpool_init_percpu_slot() 37 obj = obj + pool->obj_size; in objpool_init_percpu_slot() 40 pool->nr_objs++; in objpool_init_percpu_slot() 48 objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs, in objpool_init_percpu_slots() argument 63 nodes = nr_objs / pool->nr_possible_cpus; in objpool_init_percpu_slots() 64 if (cpu_count < (nr_objs % pool->nr_possible_cpus)) in objpool_init_percpu_slots() 68 size = struct_size(slot, entries, pool->capacity) + in objpool_init_percpu_slots() 69 pool->obj_size * nodes; in objpool_init_percpu_slots() [all …]
|
| H A D | genalloc.c | 155 struct gen_pool *pool; in gen_pool_create() local 157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); in gen_pool_create() 158 if (pool != NULL) { in gen_pool_create() 159 spin_lock_init(&pool->lock); in gen_pool_create() 160 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create() 161 pool->min_alloc_order = min_alloc_order; in gen_pool_create() 162 pool->algo = gen_pool_first_fit; in gen_pool_create() 163 pool->data = NULL; in gen_pool_create() 164 pool->name = NULL; in gen_pool_create() 166 return pool; in gen_pool_create() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
| H A D | crypto.c | 19 #define MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) MLX5_CRYPTO_DEK_CALC_FREED(pool) argument 288 mlx5_crypto_dek_bulk_create(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_bulk_create() argument 290 struct mlx5_crypto_dek_priv *dek_priv = pool->mdev->mlx5e_res.dek_priv; in mlx5_crypto_dek_bulk_create() 291 struct mlx5_core_dev *mdev = pool->mdev; in mlx5_crypto_dek_bulk_create() 313 err = mlx5_crypto_create_dek_bulk(mdev, pool->key_purpose, in mlx5_crypto_dek_bulk_create() 334 mlx5_crypto_dek_pool_add_bulk(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_pool_add_bulk() argument 338 bulk = mlx5_crypto_dek_bulk_create(pool); in mlx5_crypto_dek_pool_add_bulk() 342 pool->avail_deks += bulk->num_deks; in mlx5_crypto_dek_pool_add_bulk() 343 pool->num_deks += bulk->num_deks; in mlx5_crypto_dek_pool_add_bulk() 344 list_add(&bulk->entry, &pool->partial_list); in mlx5_crypto_dek_pool_add_bulk() [all …]
|
| /linux/include/net/ |
| H A D | xsk_buff_pool.h | 30 struct xsk_buff_pool *pool; member 66 /* For performance reasons, each buff pool has its own array of dma_pages 111 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, 113 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, 115 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs); 116 void xp_destroy(struct xsk_buff_pool *pool); 117 void xp_get_pool(struct xsk_buff_pool *pool); 118 bool xp_put_pool(struct xsk_buff_pool *pool); 119 void xp_clear_dev(struct xsk_buff_pool *pool); 120 void xp_add_xsk(struct xsk_buff_pool *pool, struc 121 xp_init_xskb_addr(struct xdp_buff_xsk * xskb,struct xsk_buff_pool * pool,u64 addr) xp_init_xskb_addr() argument 127 xp_init_xskb_dma(struct xdp_buff_xsk * xskb,struct xsk_buff_pool * pool,dma_addr_t * dma_pages,u64 addr) xp_init_xskb_dma() argument 171 xp_dma_sync_for_device(struct xsk_buff_pool * pool,dma_addr_t dma,size_t size) xp_dma_sync_for_device() argument 184 xp_desc_crosses_non_contig_pg(struct xsk_buff_pool * pool,u64 addr,u32 len) xp_desc_crosses_non_contig_pg() argument 201 xp_aligned_extract_addr(struct xsk_buff_pool * pool,u64 addr) xp_aligned_extract_addr() argument 222 xp_aligned_extract_idx(struct xsk_buff_pool * pool,u64 addr) xp_aligned_extract_idx() argument 234 xp_get_handle(struct xdp_buff_xsk * xskb,struct xsk_buff_pool * pool) xp_get_handle() argument 248 xp_tx_metadata_enabled(const struct xsk_buff_pool * pool) xp_tx_metadata_enabled() argument [all...] |
| /linux/kernel/ |
| H A D | workqueue.c | 259 struct worker_pool *pool; /* I: the associated pool */ member 534 static void show_one_worker_pool(struct worker_pool *pool); 544 #define for_each_bh_worker_pool(pool, cpu) \ argument 545 for ((pool) = &per_cpu(bh_worker_pools, cpu)[0]; \ 546 (pool) < &per_cpu(bh_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 547 (pool)++) 549 #define for_each_cpu_worker_pool(pool, cpu) \ argument 550 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 551 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 552 (pool)++) [all …]
|
| /linux/include/linux/ |
| H A D | genalloc.h | 53 void *data, struct gen_pool *pool, 98 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); 102 static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr, in gen_pool_add_virt() argument 105 return gen_pool_add_owner(pool, addr, phys, size, nid, NULL); in gen_pool_add_virt() 120 static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr, in gen_pool_add() argument 123 return gen_pool_add_virt(pool, addr, -1, size, nid); in gen_pool_add() 126 unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size, 129 static inline unsigned long gen_pool_alloc_owner(struct gen_pool *pool, in gen_pool_alloc_owner() argument 132 return gen_pool_alloc_algo_owner(pool, size, pool->algo, pool->data, in gen_pool_alloc_owner() 136 static inline unsigned long gen_pool_alloc_algo(struct gen_pool *pool, in gen_pool_alloc_algo() argument [all …]
|
| /linux/include/trace/events/ |
| H A D | page_pool.h | 16 TP_PROTO(const struct page_pool *pool, 19 TP_ARGS(pool, inflight, hold, release), 22 __field(const struct page_pool *, pool) 30 __entry->pool = pool; 34 __entry->cnt = pool->destroy_cnt; 38 __entry->pool, __entry->inflight, __entry->hold, 44 TP_PROTO(const struct page_pool *pool, 47 TP_ARGS(pool, netmem, release), 50 __field(const struct page_pool *, pool) 57 __entry->pool = pool; [all …]
|
| /linux/drivers/staging/octeon/ |
| H A D | ethernet-mem.c | 24 static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) in cvm_oct_fill_hw_skbuff() argument 35 cvmx_fpa_free(skb->data, pool, size / 128); in cvm_oct_fill_hw_skbuff() 47 static void cvm_oct_free_hw_skbuff(int pool, int size, int elements) in cvm_oct_free_hw_skbuff() argument 52 memory = cvmx_fpa_alloc(pool); in cvm_oct_free_hw_skbuff() 63 pool, elements); in cvm_oct_free_hw_skbuff() 66 pool, elements); in cvm_oct_free_hw_skbuff() 77 static int cvm_oct_fill_hw_memory(int pool, int size, int elements) in cvm_oct_fill_hw_memory() argument 97 elements * size, pool); in cvm_oct_fill_hw_memory() 102 cvmx_fpa_free(fpa, pool, 0); in cvm_oct_fill_hw_memory() 114 static void cvm_oct_free_hw_memory(int pool, int size, int elements) in cvm_oct_free_hw_memory() argument [all …]
|
| /linux/drivers/staging/media/ipu3/ |
| H A D | ipu3-css-pool.c | 25 void imgu_css_pool_cleanup(struct imgu_device *imgu, struct imgu_css_pool *pool) in imgu_css_pool_cleanup() argument 30 imgu_dmamap_free(imgu, &pool->entry[i].param); in imgu_css_pool_cleanup() 33 int imgu_css_pool_init(struct imgu_device *imgu, struct imgu_css_pool *pool, in imgu_css_pool_init() argument 39 pool->entry[i].valid = false; in imgu_css_pool_init() 41 pool->entry[i].param.vaddr = NULL; in imgu_css_pool_init() 45 if (!imgu_dmamap_alloc(imgu, &pool->entry[i].param, size)) in imgu_css_pool_init() 49 pool->last = IPU3_CSS_POOL_SIZE; in imgu_css_pool_init() 54 imgu_css_pool_cleanup(imgu, pool); in imgu_css_pool_init() 61 void imgu_css_pool_get(struct imgu_css_pool *pool) in imgu_css_pool_get() argument 64 u32 n = (pool->last + 1) % IPU3_CSS_POOL_SIZE; in imgu_css_pool_get() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| H A D | pci_irq.c | 34 struct mlx5_irq_pool *pool; member 156 struct mlx5_irq_pool *pool = irq->pool; in mlx5_system_free_irq() local 167 rmap = mlx5_eq_table_get_rmap(pool->dev); in mlx5_system_free_irq() 173 if (irq->map.index && pci_msix_can_alloc_dyn(pool->dev->pdev)) in mlx5_system_free_irq() 174 pci_msix_free_irq(pool->dev->pdev, irq->map); in mlx5_system_free_irq() 179 struct mlx5_irq_pool *pool = irq->pool; in irq_release() local 181 xa_erase(&pool->irqs, irq->pool_index); in irq_release() 189 struct mlx5_irq_pool *pool = irq->pool; in mlx5_irq_put() local 192 mutex_lock(&pool->lock); in mlx5_irq_put() 198 mutex_unlock(&pool->lock); in mlx5_irq_put() [all …]
|
| /linux/kernel/dma/ |
| H A D | swiotlb.c | 304 static void add_mem_pool(struct io_tlb_mem *mem, struct io_tlb_pool *pool) in add_mem_pool() argument 308 list_add_rcu(&pool->node, &mem->pools); in add_mem_pool() 309 mem->nslabs += pool->nslabs; in add_mem_pool() 312 mem->nslabs = pool->nslabs; in add_mem_pool() 685 struct io_tlb_pool *pool; in swiotlb_alloc_pool() local 696 pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas); in swiotlb_alloc_pool() 697 pool = kzalloc(pool_size, gfp); in swiotlb_alloc_pool() 698 if (!pool) in swiotlb_alloc_pool() 700 pool->areas = (void *)pool + sizeof(*pool); in swiotlb_alloc_pool() 711 slot_order = get_order(array_size(sizeof(*pool->slots), nslabs)); in swiotlb_alloc_pool() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
| H A D | pool.c | 10 struct xsk_buff_pool *pool) in mlx5e_xsk_map_pool() argument 14 return xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC); in mlx5e_xsk_map_pool() 18 struct xsk_buff_pool *pool) in mlx5e_xsk_unmap_pool() argument 20 return xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC); in mlx5e_xsk_unmap_pool() 46 static int mlx5e_xsk_add_pool(struct mlx5e_xsk *xsk, struct xsk_buff_pool *pool, u16 ix) in mlx5e_xsk_add_pool() argument 54 xsk->pools[ix] = pool; in mlx5e_xsk_add_pool() 65 static bool mlx5e_xsk_is_pool_sane(struct xsk_buff_pool *pool) in mlx5e_xsk_is_pool_sane() argument 67 return xsk_pool_get_headroom(pool) <= 0xffff && in mlx5e_xsk_is_pool_sane() 68 xsk_pool_get_chunk_size(pool) <= 0xffff; in mlx5e_xsk_is_pool_sane() 71 void mlx5e_build_xsk_param(struct xsk_buff_pool *pool, struct mlx5e_xsk_param *xsk) in mlx5e_build_xsk_param() argument [all …]
|
| /linux/drivers/md/dm-vdo/ |
| H A D | vio.c | 325 struct vio_pool *pool; in make_vio_pool() local 331 __func__, &pool); in make_vio_pool() 335 pool->thread_id = thread_id; in make_vio_pool() 336 INIT_LIST_HEAD(&pool->available); in make_vio_pool() 337 INIT_LIST_HEAD(&pool->busy); in make_vio_pool() 340 "VIO pool buffer", &pool->buffer); in make_vio_pool() 342 free_vio_pool(pool); in make_vio_pool() 346 ptr = pool->buffer; in make_vio_pool() 347 for (pool->size = 0; pool->size < pool_size; pool->size++, ptr += per_vio_size) { in make_vio_pool() 348 struct pooled_vio *pooled = &pool->vios[pool->size]; in make_vio_pool() [all …]
|
| H A D | data-vio.c | 130 struct data_vio_pool *pool; member 232 static bool check_for_drain_complete_locked(struct data_vio_pool *pool) in check_for_drain_complete_locked() argument 234 if (pool->limiter.busy > 0) in check_for_drain_complete_locked() 237 VDO_ASSERT_LOG_ONLY((pool->discard_limiter.busy == 0), in check_for_drain_complete_locked() 240 return (bio_list_empty(&pool->limiter.new_waiters) && in check_for_drain_complete_locked() 241 bio_list_empty(&pool->discard_limiter.new_waiters)); in check_for_drain_complete_locked() 583 launch_bio(limiter->pool->completion.vdo, data_vio, bio); in assign_data_vio() 605 static inline struct data_vio *get_available_data_vio(struct data_vio_pool *pool) in get_available_data_vio() argument 608 list_first_entry(&pool->available, struct data_vio, pool_entry); in get_available_data_vio() 616 assign_data_vio(limiter, get_available_data_vio(limiter->pool)); in assign_data_vio_to_waiter() [all …]
|
| /linux/drivers/tee/ |
| H A D | tee_shm_pool.c | 12 static int pool_op_gen_alloc(struct tee_shm_pool *pool, struct tee_shm *shm, in pool_op_gen_alloc() argument 16 struct gen_pool *genpool = pool->private_data; in pool_op_gen_alloc() 37 static void pool_op_gen_free(struct tee_shm_pool *pool, struct tee_shm *shm) in pool_op_gen_free() argument 39 gen_pool_free(pool->private_data, (unsigned long)shm->kaddr, in pool_op_gen_free() 44 static void pool_op_gen_destroy_pool(struct tee_shm_pool *pool) in pool_op_gen_destroy_pool() argument 46 gen_pool_destroy(pool->private_data); in pool_op_gen_destroy_pool() 47 kfree(pool); in pool_op_gen_destroy_pool() 61 struct tee_shm_pool *pool; in tee_shm_pool_alloc_res_mem() local 68 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in tee_shm_pool_alloc_res_mem() 69 if (!pool) in tee_shm_pool_alloc_res_mem() [all …]
|