| /linux/net/core/ |
| H A D | page_pool.c | 46 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) argument 48 #define recycle_stat_inc(pool, __stat) \ argument 50 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 54 #define recycle_stat_add(pool, __stat, val) \ argument 56 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 85 bool page_pool_get_stats(const struct page_pool *pool, in page_pool_get_stats() argument 94 stats->alloc_stats.fast += pool->alloc_stats.fast; in page_pool_get_stats() 95 stats->alloc_stats.slow += pool->alloc_stats.slow; in page_pool_get_stats() 96 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order; in page_pool_get_stats() 97 stats->alloc_stats.empty += pool->alloc_stats.empty; in page_pool_get_stats() [all …]
|
| H A D | page_pool_user.c | 36 typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool, 42 struct page_pool *pool; in netdev_nl_page_pool_get_do() local 47 pool = xa_load(&page_pools, id); in netdev_nl_page_pool_get_do() 48 if (!pool || hlist_unhashed(&pool->user.list) || in netdev_nl_page_pool_get_do() 49 !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) { in netdev_nl_page_pool_get_do() 60 err = fill(rsp, pool, info); in netdev_nl_page_pool_get_do() 88 struct page_pool *pool; in netdev_nl_page_pool_get_dump() local 94 hlist_for_each_entry(pool, &netdev->page_pools, user.list) { in netdev_nl_page_pool_get_dump() 95 if (state->pp_id && state->pp_id < pool->user.id) in netdev_nl_page_pool_get_dump() 98 state->pp_id = pool->user.id; in netdev_nl_page_pool_get_dump() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/ |
| H A D | pool.c | 9 switch (resource->pool->type) { in hws_pool_free_one_resource() 11 mlx5hws_cmd_ste_destroy(resource->pool->ctx->mdev, resource->base_id); in hws_pool_free_one_resource() 14 mlx5hws_cmd_stc_destroy(resource->pool->ctx->mdev, resource->base_id); in hws_pool_free_one_resource() 23 static void hws_pool_resource_free(struct mlx5hws_pool *pool) in hws_pool_resource_free() argument 25 hws_pool_free_one_resource(pool->resource); in hws_pool_resource_free() 26 pool->resource = NULL; in hws_pool_resource_free() 28 if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) { in hws_pool_resource_free() 29 hws_pool_free_one_resource(pool->mirror_resource); in hws_pool_resource_free() 30 pool->mirror_resource = NULL; in hws_pool_resource_free() 35 hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range, in hws_pool_create_one_resource() argument [all …]
|
| /linux/mm/ |
| H A D | mempool.c | 24 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument 27 const int nr = pool->curr_nr; in poison_error() 33 pr_err("Mempool %p size %zu\n", pool, size); in poison_error() 41 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument 50 poison_error(pool, element, size, i); in __check_element() 57 static void check_element(mempool_t *pool, void *element) in check_element() argument 64 if (pool->free == mempool_kfree) { in check_element() 65 __check_element(pool, element, (size_t)pool->pool_data); in check_element() 66 } else if (pool->free == mempool_free_slab) { in check_element() 67 __check_element(pool, element, kmem_cache_size(pool->pool_data)); in check_element() [all …]
|
| H A D | dmapool.c | 14 * The current design of this allocator is fairly simple. The pool is 48 struct dma_pool { /* the pool */ 75 struct dma_pool *pool; in pools_show() 81 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show() 82 /* per-pool info, no real statistics yet */ in pools_show() 84 pool->name, pool->nr_active, in pools_show() 85 pool->nr_blocks, pool->size, in pools_show() 86 pool in pools_show() 74 struct dma_pool *pool; pools_show() local 95 pool_check_block(struct dma_pool * pool,struct dma_block * block,gfp_t mem_flags) pool_check_block() argument 120 pool_find_page(struct dma_pool * pool,dma_addr_t dma) pool_find_page() argument 133 pool_block_err(struct dma_pool * pool,void * vaddr,dma_addr_t dma) pool_block_err() argument 159 pool_init_page(struct dma_pool * pool,struct dma_page * page) pool_init_page() argument 164 pool_check_block(struct dma_pool * pool,struct dma_block * block,gfp_t mem_flags) pool_check_block() argument 169 pool_block_err(struct dma_pool * pool,void * vaddr,dma_addr_t dma) pool_block_err() argument 176 pool_init_page(struct dma_pool * pool,struct dma_page * page) pool_init_page() argument 181 pool_block_pop(struct dma_pool * pool) pool_block_pop() argument 192 pool_block_push(struct dma_pool * pool,struct dma_block * block,dma_addr_t dma) pool_block_push() argument 300 pool_initialise_page(struct dma_pool * pool,struct dma_page * page) pool_initialise_page() argument 334 pool_alloc_page(struct dma_pool * pool,gfp_t mem_flags) pool_alloc_page() argument 360 dma_pool_destroy(struct dma_pool * pool) dma_pool_destroy() argument 404 dma_pool_alloc(struct dma_pool * pool,gfp_t mem_flags,dma_addr_t * handle) dma_pool_alloc() argument 450 dma_pool_free(struct dma_pool * pool,void * vaddr,dma_addr_t dma) dma_pool_free() argument 469 struct dma_pool *pool = *(struct dma_pool **)res; dmam_pool_release() local 496 struct dma_pool **ptr, *pool; dmam_pool_create() local 518 dmam_pool_destroy(struct dma_pool * pool) dmam_pool_destroy() argument [all...] |
| H A D | zswap.c | 194 struct zswap_pool *pool; member 247 struct zswap_pool *pool; in zswap_pool_create() local 254 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in zswap_pool_create() 255 if (!pool) in zswap_pool_create() 260 pool->zs_pool = zs_create_pool(name); in zswap_pool_create() 261 if (!pool->zs_pool) in zswap_pool_create() 264 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); in zswap_pool_create() 266 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); in zswap_pool_create() 267 if (!pool->acomp_ctx) { in zswap_pool_create() 273 mutex_init(&per_cpu_ptr(pool->acomp_ctx, cpu)->mutex); in zswap_pool_create() [all …]
|
| H A D | zsmalloc.c | 270 struct zs_pool *pool; member 364 static void kick_deferred_free(struct zs_pool *pool); 365 static void init_deferred_free(struct zs_pool *pool); 366 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage); 368 static void kick_deferred_free(struct zs_pool *pool) {} in kick_deferred_free() argument 369 static void init_deferred_free(struct zs_pool *pool) {} in init_deferred_free() argument 370 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} in SetZsPageMovable() argument 373 static int create_cache(struct zs_pool *pool) in create_cache() argument 377 name = kasprintf(GFP_KERNEL, "zs_handle-%s", pool->name); in create_cache() 380 pool->handle_cachep = kmem_cache_create(name, ZS_HANDLE_SIZE, in create_cache() [all …]
|
| /linux/net/xdp/ |
| H A D | xsk_buff_pool.c | 13 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument 20 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 21 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk() 22 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 25 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() argument 32 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 34 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 37 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() argument 39 if (!pool) in xp_destroy() 42 kvfree(pool->tx_descs); in xp_destroy() [all …]
|
| /linux/drivers/net/ethernet/ti/ |
| H A D | k3-cppi-desc-pool.c | 28 void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) in k3_cppi_desc_pool_destroy() argument 30 if (!pool) in k3_cppi_desc_pool_destroy() 33 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in k3_cppi_desc_pool_destroy() 35 gen_pool_size(pool->gen_pool), in k3_cppi_desc_pool_destroy() 36 gen_pool_avail(pool->gen_pool)); in k3_cppi_desc_pool_destroy() 37 if (pool->cpumem) in k3_cppi_desc_pool_destroy() 38 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_destroy() 39 pool->dma_addr); in k3_cppi_desc_pool_destroy() 41 kfree(pool->desc_infos); in k3_cppi_desc_pool_destroy() 43 gen_pool_destroy(pool->gen_pool); /* frees pool->name */ in k3_cppi_desc_pool_destroy() [all …]
|
| /linux/net/ceph/ |
| H A D | msgpool.c | 14 struct ceph_msgpool *pool = arg; in msgpool_alloc() local 17 msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, in msgpool_alloc() 20 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc() 22 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc() 23 msg->pool = pool; in msgpool_alloc() 30 struct ceph_msgpool *pool = arg; in msgpool_free() local 33 dout("msgpool_release %s %p\n", pool->name, msg); in msgpool_free() 34 msg->pool = NULL; in msgpool_free() 38 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() argument 43 pool->type = type; in ceph_msgpool_init() [all …]
|
| /linux/lib/ |
| H A D | objpool.c | 19 objpool_init_percpu_slot(struct objpool_head *pool, in objpool_init_percpu_slot() argument 24 void *obj = (void *)&slot->entries[pool->capacity]; in objpool_init_percpu_slot() 28 slot->mask = pool->capacity - 1; in objpool_init_percpu_slot() 37 obj = obj + pool->obj_size; in objpool_init_percpu_slot() 40 pool->nr_objs++; in objpool_init_percpu_slot() 48 objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs, in objpool_init_percpu_slots() argument 63 nodes = nr_objs / pool->nr_possible_cpus; in objpool_init_percpu_slots() 64 if (cpu_count < (nr_objs % pool->nr_possible_cpus)) in objpool_init_percpu_slots() 68 size = struct_size(slot, entries, pool->capacity) + in objpool_init_percpu_slots() 69 pool->obj_size * nodes; in objpool_init_percpu_slots() [all …]
|
| H A D | genalloc.c | 155 struct gen_pool *pool; in gen_pool_create() local 157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); in gen_pool_create() 158 if (pool != NULL) { in gen_pool_create() 159 spin_lock_init(&pool->lock); in gen_pool_create() 160 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create() 161 pool->min_alloc_order = min_alloc_order; in gen_pool_create() 162 pool->algo = gen_pool_first_fit; in gen_pool_create() 163 pool->data = NULL; in gen_pool_create() 164 pool->name = NULL; in gen_pool_create() 166 return pool; in gen_pool_create() [all …]
|
| /linux/include/net/ |
| H A D | xdp_sock_drv.h | 23 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries); 24 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc); 25 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max); 26 void xsk_tx_release(struct xsk_buff_pool *pool); 29 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool); 30 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool); 31 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool); 32 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool); 33 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool); 35 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool) in xsk_pool_get_headroom() argument 40 xsk_pool_get_chunk_size(struct xsk_buff_pool * pool) xsk_pool_get_chunk_size() argument 45 xsk_pool_get_rx_frame_size(struct xsk_buff_pool * pool) xsk_pool_get_rx_frame_size() argument 50 xsk_pool_set_rxq_info(struct xsk_buff_pool * pool,struct xdp_rxq_info * rxq) xsk_pool_set_rxq_info() argument 56 xsk_pool_fill_cb(struct xsk_buff_pool * pool,struct xsk_cb_desc * desc) xsk_pool_fill_cb() argument 62 xsk_pool_dma_unmap(struct xsk_buff_pool * pool,unsigned long attrs) xsk_pool_dma_unmap() argument 68 xsk_pool_dma_map(struct xsk_buff_pool * pool,struct device * dev,unsigned long attrs) xsk_pool_dma_map() argument 90 xsk_buff_alloc(struct xsk_buff_pool * pool) xsk_buff_alloc() argument 101 xsk_buff_alloc_batch(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xsk_buff_alloc_batch() argument 106 xsk_buff_can_alloc(struct xsk_buff_pool * pool,u32 count) xsk_buff_can_alloc() argument 188 xsk_buff_raw_get_dma(struct xsk_buff_pool * pool,u64 addr) xsk_buff_raw_get_dma() argument 194 xsk_buff_raw_get_data(struct xsk_buff_pool * pool,u64 addr) xsk_buff_raw_get_data() argument 211 xsk_buff_raw_get_ctx(const struct xsk_buff_pool * pool,u64 addr) xsk_buff_raw_get_ctx() argument 229 __xsk_buff_get_metadata(const struct xsk_buff_pool * pool,void * data) __xsk_buff_get_metadata() argument 244 xsk_buff_get_metadata(struct xsk_buff_pool * pool,u64 addr) xsk_buff_get_metadata() argument 256 xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool * pool,dma_addr_t dma,size_t size) xsk_buff_raw_dma_sync_for_device() argument 265 xsk_tx_completed(struct xsk_buff_pool * pool,u32 nb_entries) xsk_tx_completed() argument 269 xsk_tx_peek_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc) xsk_tx_peek_desc() argument 275 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool * pool,u32 max) xsk_tx_peek_release_desc_batch() argument 280 xsk_tx_release(struct xsk_buff_pool * pool) xsk_tx_release() argument 290 xsk_set_rx_need_wakeup(struct xsk_buff_pool * pool) xsk_set_rx_need_wakeup() argument 294 xsk_set_tx_need_wakeup(struct xsk_buff_pool * pool) xsk_set_tx_need_wakeup() argument 298 xsk_clear_rx_need_wakeup(struct xsk_buff_pool * pool) xsk_clear_rx_need_wakeup() argument 302 xsk_clear_tx_need_wakeup(struct xsk_buff_pool * pool) xsk_clear_tx_need_wakeup() argument 306 xsk_uses_need_wakeup(struct xsk_buff_pool * pool) xsk_uses_need_wakeup() argument 311 xsk_pool_get_headroom(struct xsk_buff_pool * pool) xsk_pool_get_headroom() argument 316 xsk_pool_get_chunk_size(struct xsk_buff_pool * pool) xsk_pool_get_chunk_size() argument 321 xsk_pool_get_rx_frame_size(struct xsk_buff_pool * pool) xsk_pool_get_rx_frame_size() argument 326 xsk_pool_set_rxq_info(struct xsk_buff_pool * pool,struct xdp_rxq_info * rxq) xsk_pool_set_rxq_info() argument 331 xsk_pool_fill_cb(struct xsk_buff_pool * pool,struct xsk_cb_desc * desc) xsk_pool_fill_cb() argument 336 xsk_pool_dma_unmap(struct xsk_buff_pool * pool,unsigned long attrs) xsk_pool_dma_unmap() argument 341 xsk_pool_dma_map(struct xsk_buff_pool * pool,struct device * dev,unsigned long attrs) xsk_pool_dma_map() argument 357 xsk_buff_alloc(struct xsk_buff_pool * pool) xsk_buff_alloc() argument 367 xsk_buff_alloc_batch(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xsk_buff_alloc_batch() argument 372 xsk_buff_can_alloc(struct xsk_buff_pool * pool,u32 count) xsk_buff_can_alloc() argument 405 xsk_buff_raw_get_dma(struct xsk_buff_pool * pool,u64 addr) xsk_buff_raw_get_dma() argument 411 xsk_buff_raw_get_data(struct xsk_buff_pool * pool,u64 addr) xsk_buff_raw_get_data() argument 417 xsk_buff_raw_get_ctx(const struct xsk_buff_pool * pool,u64 addr) xsk_buff_raw_get_ctx() argument 428 __xsk_buff_get_metadata(const struct xsk_buff_pool * pool,void * data) __xsk_buff_get_metadata() argument 434 xsk_buff_get_metadata(struct xsk_buff_pool * pool,u64 addr) xsk_buff_get_metadata() argument 443 xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool * pool,dma_addr_t dma,size_t size) xsk_buff_raw_dma_sync_for_device() argument [all...] |
| H A D | xsk_buff_pool.h | 30 struct xsk_buff_pool *pool; member 106 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, 108 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, 110 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs); 111 void xp_destroy(struct xsk_buff_pool *pool); 112 void xp_get_pool(struct xsk_buff_pool *pool); 113 bool xp_put_pool(struct xsk_buff_pool *pool); 114 void xp_clear_dev(struct xsk_buff_pool *pool); 115 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); 116 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); [all …]
|
| /linux/kernel/cgroup/ |
| H A D | dmem.c | 107 static void free_cg_pool(struct dmem_cgroup_pool_state *pool) in free_cg_pool() 109 list_del(&pool->region_node); in free_cg_pool() 110 kfree(pool); in free_cg_pool() 114 set_resource_min(struct dmem_cgroup_pool_state *pool, u64 val) in set_resource_min() 116 page_counter_set_min(&pool->cnt, val); in set_resource_min() 120 set_resource_low(struct dmem_cgroup_pool_state *pool, u64 val) in set_resource_low() 122 page_counter_set_low(&pool->cnt, val); in set_resource_low() 126 set_resource_max(struct dmem_cgroup_pool_state *pool, u64 val) in set_resource_max() 128 page_counter_set_max(&pool->cnt, val); in set_resource_max() 131 static u64 get_resource_low(struct dmem_cgroup_pool_state *pool) in get_resource_low() 106 free_cg_pool(struct dmem_cgroup_pool_state * pool) free_cg_pool() argument 113 set_resource_min(struct dmem_cgroup_pool_state * pool,u64 val) set_resource_min() argument 119 set_resource_low(struct dmem_cgroup_pool_state * pool,u64 val) set_resource_low() argument 125 set_resource_max(struct dmem_cgroup_pool_state * pool,u64 val) set_resource_max() argument 130 get_resource_low(struct dmem_cgroup_pool_state * pool) get_resource_low() argument 135 get_resource_min(struct dmem_cgroup_pool_state * pool) get_resource_min() argument 140 get_resource_max(struct dmem_cgroup_pool_state * pool) get_resource_max() argument 145 get_resource_current(struct dmem_cgroup_pool_state * pool) get_resource_current() argument 160 struct dmem_cgroup_pool_state *pool; dmemcs_offline() local 171 struct dmem_cgroup_pool_state *pool, *next; dmemcs_free() local 201 struct dmem_cgroup_pool_state *pool; find_cg_pool_locked() local 210 pool_parent(struct dmem_cgroup_pool_state * pool) pool_parent() argument 225 struct dmem_cgroup_pool_state *pool, *found_pool; dmem_cgroup_calculate_protection() local 271 struct dmem_cgroup_pool_state *pool = test_pool; dmem_cgroup_state_evict_valuable() local 324 struct dmem_cgroup_pool_state *pool, *ppool = NULL; alloc_pool_single() local 359 struct dmem_cgroup_pool_state *pool, *ppool, *retpool; get_cg_pool_locked() local 402 struct dmem_cgroup_pool_state *pool, *next; dmemcg_free_rcu() local 436 struct dmem_cgroup_pool_state *pool = dmem_cgroup_unregister_region() local 518 dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state * pool) dmem_cgroup_pool_state_put() argument 528 struct dmem_cgroup_pool_state *pool, *allocpool = NULL; get_cg_pool_unlocked() local 571 dmem_cgroup_uncharge(struct dmem_cgroup_pool_state * pool,u64 size) dmem_cgroup_uncharge() argument 605 struct dmem_cgroup_pool_state *pool; dmem_cgroup_try_charge() local 682 struct dmem_cgroup_pool_state *pool = NULL; dmemcg_limit_write() local 738 struct dmem_cgroup_pool_state *pool = find_cg_pool_locked(dmemcs, region); dmemcg_limit_show() local [all...] |
| /linux/include/net/page_pool/ |
| H A D | helpers.h | 24 * allocated from page pool. There is no cache line dirtying for 'struct page' 25 * when a page is recycled back to the page pool. 29 * page allocated from page pool. Page splitting enables memory saving and thus 48 * the same page when a page is split. The API user must setup pool->p.max_len 49 * and pool->p.offset correctly and ensure that page_pool_put_page() is called 67 bool page_pool_get_stats(const struct page_pool *pool, 88 * @pool: pool from which to allocate 92 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) in page_pool_dev_alloc_pages() argument 96 return page_pool_alloc_pages(pool, gf in page_pool_dev_alloc_pages() 109 page_pool_dev_alloc_frag(struct page_pool * pool,unsigned int * offset,unsigned int size) page_pool_dev_alloc_frag() argument 118 page_pool_alloc_netmem(struct page_pool * pool,unsigned int * offset,unsigned int * size,gfp_t gfp) page_pool_alloc_netmem() argument 147 page_pool_dev_alloc_netmem(struct page_pool * pool,unsigned int * offset,unsigned int * size) page_pool_dev_alloc_netmem() argument 156 page_pool_dev_alloc_netmems(struct page_pool * pool) page_pool_dev_alloc_netmems() argument 163 page_pool_alloc(struct page_pool * pool,unsigned int * offset,unsigned int * size,gfp_t gfp) page_pool_alloc() argument 182 page_pool_dev_alloc(struct page_pool * pool,unsigned int * offset,unsigned int * size) page_pool_dev_alloc() argument 191 page_pool_alloc_va(struct page_pool * pool,unsigned int * size,gfp_t gfp) page_pool_alloc_va() argument 216 page_pool_dev_alloc_va(struct page_pool * pool,unsigned int * size) page_pool_dev_alloc_va() argument 232 page_pool_get_dma_dir(const struct page_pool * pool) page_pool_get_dma_dir() argument 331 page_pool_put_netmem(struct page_pool * pool,netmem_ref netmem,unsigned int dma_sync_size,bool allow_direct) page_pool_put_netmem() argument 360 page_pool_put_page(struct page_pool * pool,struct page * page,unsigned int dma_sync_size,bool allow_direct) page_pool_put_page() argument 369 page_pool_put_full_netmem(struct page_pool * pool,netmem_ref netmem,bool allow_direct) page_pool_put_full_netmem() argument 385 page_pool_put_full_page(struct page_pool * pool,struct page * page,bool allow_direct) page_pool_put_full_page() argument 399 page_pool_recycle_direct(struct page_pool * pool,struct page * page) page_pool_recycle_direct() argument 405 page_pool_recycle_direct_netmem(struct page_pool * pool,netmem_ref netmem) page_pool_recycle_direct_netmem() argument 422 page_pool_free_va(struct page_pool * pool,void * va,bool allow_direct) page_pool_free_va() argument 450 __page_pool_dma_sync_for_cpu(const struct page_pool * pool,const dma_addr_t dma_addr,u32 offset,u32 dma_sync_size) __page_pool_dma_sync_for_cpu() argument 471 page_pool_dma_sync_for_cpu(const struct page_pool * pool,const struct page * page,u32 offset,u32 dma_sync_size) page_pool_dma_sync_for_cpu() argument 480 page_pool_dma_sync_netmem_for_cpu(const struct page_pool * pool,const netmem_ref netmem,u32 offset,u32 dma_sync_size) page_pool_dma_sync_netmem_for_cpu() argument 492 page_pool_put(struct page_pool * pool) page_pool_put() argument 497 page_pool_nid_changed(struct page_pool * pool,int new_nid) page_pool_nid_changed() argument 503 page_pool_is_unreadable(struct page_pool * pool) page_pool_is_unreadable() argument [all...] |
| /linux/drivers/gpu/drm/amd/display/dc/resource/dce60/ |
| H A D | dce60_resource.c | 798 static void dce60_resource_destruct(struct dce110_resource_pool *pool) in dce60_resource_destruct() argument 802 for (i = 0; i < pool->base.pipe_count; i++) { in dce60_resource_destruct() 803 if (pool->base.opps[i] != NULL) in dce60_resource_destruct() 804 dce110_opp_destroy(&pool->base.opps[i]); in dce60_resource_destruct() 806 if (pool->base.transforms[i] != NULL) in dce60_resource_destruct() 807 dce60_transform_destroy(&pool->base.transforms[i]); in dce60_resource_destruct() 809 if (pool->base.ipps[i] != NULL) in dce60_resource_destruct() 810 dce_ipp_destroy(&pool->base.ipps[i]); in dce60_resource_destruct() 812 if (pool->base.mis[i] != NULL) { in dce60_resource_destruct() 813 kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); in dce60_resource_destruct() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
| H A D | crypto.c | 19 #define MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) MLX5_CRYPTO_DEK_CALC_FREED(pool) argument 288 mlx5_crypto_dek_bulk_create(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_bulk_create() argument 290 struct mlx5_crypto_dek_priv *dek_priv = pool->mdev->mlx5e_res.dek_priv; in mlx5_crypto_dek_bulk_create() 291 struct mlx5_core_dev *mdev = pool->mdev; in mlx5_crypto_dek_bulk_create() 313 err = mlx5_crypto_create_dek_bulk(mdev, pool->key_purpose, in mlx5_crypto_dek_bulk_create() 334 mlx5_crypto_dek_pool_add_bulk(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_pool_add_bulk() argument 338 bulk = mlx5_crypto_dek_bulk_create(pool); in mlx5_crypto_dek_pool_add_bulk() 342 pool->avail_deks += bulk->num_deks; in mlx5_crypto_dek_pool_add_bulk() 343 pool->num_deks += bulk->num_deks; in mlx5_crypto_dek_pool_add_bulk() 344 list_add(&bulk->entry, &pool->partial_list); in mlx5_crypto_dek_pool_add_bulk() [all …]
|
| /linux/kernel/ |
| H A D | workqueue.c | 259 struct worker_pool *pool; /* I: the associated pool */ member 534 static void show_one_worker_pool(struct worker_pool *pool); 550 #define for_each_bh_worker_pool(pool, cpu) \ argument 551 for ((pool) = &per_cpu(bh_worker_pools, cpu)[0]; \ 552 (pool) < &per_cpu(bh_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 553 (pool)++) 555 #define for_each_cpu_worker_pool(pool, cpu) \ argument 556 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 557 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 558 (pool)++) [all …]
|
| /linux/drivers/gpu/drm/ttm/tests/ |
| H A D | ttm_pool_test.c | 79 struct ttm_pool *pool; in ttm_pool_pre_populated() local 86 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); in ttm_pool_pre_populated() 87 KUNIT_ASSERT_NOT_NULL(test, pool); in ttm_pool_pre_populated() 89 ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false); in ttm_pool_pre_populated() 91 err = ttm_pool_alloc(pool, tt, &simple_ctx); in ttm_pool_pre_populated() 94 ttm_pool_free(pool, tt); in ttm_pool_pre_populated() 97 return pool; in ttm_pool_pre_populated() 140 struct ttm_pool *pool; in ttm_pool_alloc_basic() local 150 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); in ttm_pool_alloc_basic() 151 KUNIT_ASSERT_NOT_NULL(test, pool); in ttm_pool_alloc_basic() [all …]
|
| /linux/include/linux/ |
| H A D | genalloc.h | 52 void *data, struct gen_pool *pool, 97 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); 101 static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr, in gen_pool_add_virt() argument 104 return gen_pool_add_owner(pool, addr, phys, size, nid, NULL); in gen_pool_add_virt() 119 static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr, in gen_pool_add() argument 122 return gen_pool_add_virt(pool, addr, -1, size, nid); in gen_pool_add() 125 unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size, 128 static inline unsigned long gen_pool_alloc_owner(struct gen_pool *pool, in gen_pool_alloc_owner() argument 131 return gen_pool_alloc_algo_owner(pool, size, pool->algo, pool->data, in gen_pool_alloc_owner() 135 static inline unsigned long gen_pool_alloc_algo(struct gen_pool *pool, in gen_pool_alloc_algo() argument [all …]
|
| /linux/include/trace/events/ |
| H A D | page_pool.h | 16 TP_PROTO(const struct page_pool *pool, 19 TP_ARGS(pool, inflight, hold, release), 22 __field(const struct page_pool *, pool) 30 __entry->pool = pool; 34 __entry->cnt = pool->destroy_cnt; 38 __entry->pool, __entry->inflight, __entry->hold, 44 TP_PROTO(const struct page_pool *pool, 47 TP_ARGS(pool, netmem, release), 50 __field(const struct page_pool *, pool) 57 __entry->pool = pool; [all …]
|
| /linux/drivers/gpu/drm/amd/display/dc/resource/dcn301/ |
| H A D | dcn301_resource.c | 92 #define TO_DCN301_RES_POOL(pool)\ argument 93 container_of(pool, struct dcn301_resource_pool, base) 1036 static void dcn301_destruct(struct dcn301_resource_pool *pool) in dcn301_destruct() argument 1040 for (i = 0; i < pool->base.stream_enc_count; i++) { in dcn301_destruct() 1041 if (pool->base.stream_enc[i] != NULL) { in dcn301_destruct() 1042 if (pool->base.stream_enc[i]->vpg != NULL) { in dcn301_destruct() 1043 kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); in dcn301_destruct() 1044 pool->base.stream_enc[i]->vpg = NULL; in dcn301_destruct() 1046 if (pool->base.stream_enc[i]->afmt != NULL) { in dcn301_destruct() 1047 kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); in dcn301_destruct() [all …]
|
| /linux/drivers/staging/octeon/ |
| H A D | ethernet-mem.c | 24 static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) in cvm_oct_fill_hw_skbuff() argument 35 cvmx_fpa_free(skb->data, pool, size / 128); in cvm_oct_fill_hw_skbuff() 47 static void cvm_oct_free_hw_skbuff(int pool, int size, int elements) in cvm_oct_free_hw_skbuff() argument 52 memory = cvmx_fpa_alloc(pool); in cvm_oct_free_hw_skbuff() 63 pool, elements); in cvm_oct_free_hw_skbuff() 66 pool, elements); in cvm_oct_free_hw_skbuff() 77 static int cvm_oct_fill_hw_memory(int pool, int size, int elements) in cvm_oct_fill_hw_memory() argument 97 elements * size, pool); in cvm_oct_fill_hw_memory() 102 cvmx_fpa_free(fpa, pool, 0); in cvm_oct_fill_hw_memory() 114 static void cvm_oct_free_hw_memory(int pool, int size, int elements) in cvm_oct_free_hw_memory() argument [all …]
|
| /linux/drivers/staging/media/ipu3/ |
| H A D | ipu3-css-pool.c | 25 void imgu_css_pool_cleanup(struct imgu_device *imgu, struct imgu_css_pool *pool) in imgu_css_pool_cleanup() argument 30 imgu_dmamap_free(imgu, &pool->entry[i].param); in imgu_css_pool_cleanup() 33 int imgu_css_pool_init(struct imgu_device *imgu, struct imgu_css_pool *pool, in imgu_css_pool_init() argument 39 pool->entry[i].valid = false; in imgu_css_pool_init() 41 pool->entry[i].param.vaddr = NULL; in imgu_css_pool_init() 45 if (!imgu_dmamap_alloc(imgu, &pool->entry[i].param, size)) in imgu_css_pool_init() 49 pool->last = IPU3_CSS_POOL_SIZE; in imgu_css_pool_init() 54 imgu_css_pool_cleanup(imgu, pool); in imgu_css_pool_init() 61 void imgu_css_pool_get(struct imgu_css_pool *pool) in imgu_css_pool_get() argument 64 u32 n = (pool->last + 1) % IPU3_CSS_POOL_SIZE; in imgu_css_pool_get() [all …]
|