Home
last modified time | relevance | path

Searched full:pool (Results 1 – 25 of 1375) sorted by relevance

12345678910>>...55

/linux/net/xdp/
H A Dxsk_buff_pool.c11 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument
18 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_add_xsk()
19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk()
20 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_add_xsk()
23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() argument
30 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_del_xsk()
32 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_del_xsk()
35 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() argument
37 if (!pool) in xp_destroy()
40 kvfree(pool in xp_destroy()
45 xp_alloc_tx_descs(struct xsk_buff_pool * pool,struct xdp_sock * xs) xp_alloc_tx_descs() argument
59 struct xsk_buff_pool *pool; xp_create_and_assign_umem() local
118 xp_set_rxq_info(struct xsk_buff_pool * pool,struct xdp_rxq_info * rxq) xp_set_rxq_info() argument
127 xp_fill_cb(struct xsk_buff_pool * pool,struct xsk_cb_desc * desc) xp_fill_cb() argument
139 xp_disable_drv_zc(struct xsk_buff_pool * pool) xp_disable_drv_zc() argument
162 xp_assign_dev(struct xsk_buff_pool * pool,struct net_device * netdev,u16 queue_id,u16 flags) xp_assign_dev() argument
247 xp_assign_dev_shared(struct xsk_buff_pool * pool,struct xdp_sock * umem_xs,struct net_device * dev,u16 queue_id) xp_assign_dev_shared() argument
264 xp_clear_dev(struct xsk_buff_pool * pool) xp_clear_dev() argument
277 struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool, xp_release_deferred() local
298 xp_get_pool(struct xsk_buff_pool * pool) xp_get_pool() argument
303 xp_put_pool(struct xsk_buff_pool * pool) xp_put_pool() argument
317 xp_find_dma_map(struct xsk_buff_pool * pool) xp_find_dma_map() argument
377 xp_dma_unmap(struct xsk_buff_pool * pool,unsigned long attrs) xp_dma_unmap() argument
413 xp_init_dma_info(struct xsk_buff_pool * pool,struct xsk_dma_map * dma_map) xp_init_dma_info() argument
439 xp_dma_map(struct xsk_buff_pool * pool,struct device * dev,unsigned long attrs,struct page ** pages,u32 nr_pages) xp_dma_map() argument
484 xp_addr_crosses_non_contig_pg(struct xsk_buff_pool * pool,u64 addr) xp_addr_crosses_non_contig_pg() argument
490 xp_check_unaligned(struct xsk_buff_pool * pool,u64 * addr) xp_check_unaligned() argument
500 xp_check_aligned(struct xsk_buff_pool * pool,u64 * addr) xp_check_aligned() argument
506 xp_get_xskb(struct xsk_buff_pool * pool,u64 addr) xp_get_xskb() argument
522 __xp_alloc(struct xsk_buff_pool * pool) __xp_alloc() argument
553 xp_alloc(struct xsk_buff_pool * pool) xp_alloc() argument
579 xp_alloc_new_from_fq(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xp_alloc_new_from_fq() argument
615 xp_alloc_reused(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 nb_entries) xp_alloc_reused() argument
635 xp_alloc_slow(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xp_alloc_slow() argument
653 xp_alloc_batch(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xp_alloc_batch() argument
677 xp_can_alloc(struct xsk_buff_pool * pool,u32 count) xp_can_alloc() argument
703 xp_raw_get_data(struct xsk_buff_pool * pool,u64 addr) xp_raw_get_data() argument
710 xp_raw_get_dma(struct xsk_buff_pool * pool,u64 addr) xp_raw_get_dma() argument
[all...]
/linux/net/core/
H A Dpage_pool.c43 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) argument
45 #define recycle_stat_inc(pool, __stat) \ argument
47 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
51 #define recycle_stat_add(pool, __stat, val) \ argument
53 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
72 * page_pool_get_stats() - fetch page pool stats
73 * @pool: pool from which page was allocated
82 bool page_pool_get_stats(const struct page_pool *pool, in page_pool_get_stats() argument
91 stats->alloc_stats.fast += pool->alloc_stats.fast; in page_pool_get_stats()
92 stats->alloc_stats.slow += pool->alloc_stats.slow; in page_pool_get_stats()
[all …]
H A Dpage_pool_user.c17 /* Protects: page_pools, netdevice->page_pools, pool->slow.netdev, pool->user.
23 * linked to a netdev at creation time. Following page pool "visibility"
30 * to error, or (c) the entire namespace which owned this pool disappeared
34 typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool,
40 struct page_pool *pool; in netdev_nl_page_pool_get_do() local
45 pool = xa_load(&page_pools, id); in netdev_nl_page_pool_get_do()
46 if (!pool || hlist_unhashed(&pool->user.list) || in netdev_nl_page_pool_get_do()
47 !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) { in netdev_nl_page_pool_get_do()
58 err = fill(rsp, pool, info); in netdev_nl_page_pool_get_do()
86 struct page_pool *pool; in netdev_nl_page_pool_get_dump() local
[all …]
/linux/drivers/net/ethernet/ti/
H A Dk3-cppi-desc-pool.c2 /* TI K3 CPPI5 descriptors pool API
15 #include "k3-cppi-desc-pool.h"
28 void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) in k3_cppi_desc_pool_destroy() argument
30 if (!pool) in k3_cppi_desc_pool_destroy()
33 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in k3_cppi_desc_pool_destroy()
35 gen_pool_size(pool->gen_pool), in k3_cppi_desc_pool_destroy()
36 gen_pool_avail(pool->gen_pool)); in k3_cppi_desc_pool_destroy()
37 if (pool->cpumem) in k3_cppi_desc_pool_destroy()
38 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_destroy()
39 pool->dma_addr); in k3_cppi_desc_pool_destroy()
[all …]
/linux/mm/
H A Dmempool.c5 * memory buffer pool support. Such pools are mostly used
24 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument
27 const int nr = pool->curr_nr; in poison_error()
33 pr_err("Mempool %p size %zu\n", pool, size); in poison_error()
41 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument
50 poison_error(pool, element, size, i); in __check_element()
57 static void check_element(mempool_t *pool, void *element) in check_element() argument
64 if (pool->free == mempool_kfree) { in check_element()
65 __check_element(pool, element, (size_t)pool in check_element()
86 poison_element(mempool_t * pool,void * element) poison_element() argument
107 check_element(mempool_t * pool,void * element) check_element() argument
110 poison_element(mempool_t * pool,void * element) poison_element() argument
115 kasan_poison_element(mempool_t * pool,void * element) kasan_poison_element() argument
125 kasan_unpoison_element(mempool_t * pool,void * element) kasan_unpoison_element() argument
137 add_element(mempool_t * pool,void * element) add_element() argument
145 remove_element(mempool_t * pool) remove_element() argument
166 mempool_exit(mempool_t * pool) mempool_exit() argument
185 mempool_destroy(mempool_t * pool) mempool_destroy() argument
195 mempool_init_node(mempool_t * pool,int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data,gfp_t gfp_mask,int node_id) mempool_init_node() argument
243 mempool_init(mempool_t * pool,int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data) mempool_init() argument
280 mempool_t *pool; mempool_create_node() local
314 mempool_resize(mempool_t * pool,int new_min_nr) mempool_resize() argument
390 mempool_alloc(mempool_t * pool,gfp_t gfp_mask) mempool_alloc() argument
472 mempool_alloc_preallocated(mempool_t * pool) mempool_alloc_preallocated() argument
504 mempool_free(void * element,mempool_t * pool) mempool_free() argument
[all...]
H A Ddmapool.c3 * DMA Pool allocator
14 * The current design of this allocator is fairly simple. The pool is
48 struct dma_pool { /* the pool */
74 struct dma_pool *pool; in pools_show() local
80 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show()
81 /* per-pool info, no real statistics yet */ in pools_show()
83 pool->name, pool->nr_active, in pools_show()
84 pool->nr_blocks, pool->size, in pools_show()
85 pool->nr_pages); in pools_show()
95 static void pool_check_block(struct dma_pool *pool, struct dma_block *block, in pool_check_block() argument
[all …]
H A Dzbud.c62 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
66 * 63 freelists per pool.
78 * struct zbud_pool - stores metadata for each zbud pool
79 * @lock: protects all pool fields and first|last_chunk fields of any
80 * zbud page in the pool
86 * @pages_nr: number of zbud pages in the pool.
88 * This structure is allocated at pool creation time and maintains metadata
89 * pertaining to a particular zbud pool.
107 * @buddy: links the zbud page into the unbuddied/buddied lists in the pool
153 * Pool loc
202 struct zbud_pool *pool; zbud_create_pool() local
222 zbud_destroy_pool(struct zbud_pool * pool) zbud_destroy_pool() argument
246 zbud_alloc(struct zbud_pool * pool,size_t size,gfp_t gfp,unsigned long * handle) zbud_alloc() argument
311 zbud_free(struct zbud_pool * pool,unsigned long handle) zbud_free() argument
353 zbud_map(struct zbud_pool * pool,unsigned long handle) zbud_map() argument
363 zbud_unmap(struct zbud_pool * pool,unsigned long handle) zbud_unmap() argument
374 zbud_get_pool_size(struct zbud_pool * pool) zbud_get_pool_size() argument
388 zbud_zpool_destroy(void * pool) zbud_zpool_destroy() argument
393 zbud_zpool_malloc(void * pool,size_t size,gfp_t gfp,unsigned long * handle) zbud_zpool_malloc() argument
398 zbud_zpool_free(void * pool,unsigned long handle) zbud_zpool_free() argument
403 zbud_zpool_map(void * pool,unsigned long handle,enum zpool_mapmode mm) zbud_zpool_map() argument
408 zbud_zpool_unmap(void * pool,unsigned long handle) zbud_zpool_unmap() argument
413 zbud_zpool_total_size(void * pool) zbud_zpool_total_size() argument
[all...]
H A Dz3fold.c47 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
52 * be 63, or 62, respectively, freelists per pool.
86 unsigned long pool; /* back link */ member
95 * pool
100 * @pool: pointer to the containing pool
114 struct z3fold_pool *pool; member
126 * struct z3fold_pool - stores metadata for each z3fold pool
127 * @name: pool name
128 * @lock: protects pool unbuddied lists
129 * @stale_lock: protects pool stale page list
[all …]
/linux/sound/core/seq/
H A Dseq_memory.c22 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) in snd_seq_pool_available() argument
24 return pool->total_elements - atomic_read(&pool->counter); in snd_seq_pool_available()
27 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) in snd_seq_output_ok() argument
29 return snd_seq_pool_available(pool) >= pool->room; in snd_seq_output_ok()
225 static inline void free_cell(struct snd_seq_pool *pool, in free_cell() argument
228 cell->next = pool->free; in free_cell()
229 pool->free = cell; in free_cell()
230 atomic_dec(&pool->counter); in free_cell()
235 struct snd_seq_pool *pool; in snd_seq_cell_free() local
239 pool = cell->pool; in snd_seq_cell_free()
[all …]
/linux/lib/
H A Dgenalloc.c16 * available. If new memory is added to the pool a lock has to be
146 * gen_pool_create - create a new special memory pool
148 * @nid: node id of the node the pool structure should be allocated on, or -1
150 * Create a new special memory pool that can be used to manage special purpose
155 struct gen_pool *pool; in gen_pool_create() local
157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); in gen_pool_create()
158 if (pool != NULL) { in gen_pool_create()
159 spin_lock_init(&pool->lock); in gen_pool_create()
160 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create()
161 pool->min_alloc_order = min_alloc_order; in gen_pool_create()
[all …]
H A Dobjpool.c19 objpool_init_percpu_slot(struct objpool_head *pool, in objpool_init_percpu_slot() argument
24 void *obj = (void *)&slot->entries[pool->capacity]; in objpool_init_percpu_slot()
28 slot->mask = pool->capacity - 1; in objpool_init_percpu_slot()
37 obj = obj + pool->obj_size; in objpool_init_percpu_slot()
40 pool->nr_objs++; in objpool_init_percpu_slot()
48 objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs, in objpool_init_percpu_slots() argument
63 nodes = nr_objs / pool->nr_possible_cpus; in objpool_init_percpu_slots()
64 if (cpu_count < (nr_objs % pool->nr_possible_cpus)) in objpool_init_percpu_slots()
68 size = struct_size(slot, entries, pool->capacity) + in objpool_init_percpu_slots()
69 pool->obj_size * nodes; in objpool_init_percpu_slots()
[all …]
/linux/net/ceph/
H A Dmsgpool.c14 struct ceph_msgpool *pool = arg; in msgpool_alloc() local
17 msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, in msgpool_alloc()
20 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc()
22 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc()
23 msg->pool = pool; in msgpool_alloc()
30 struct ceph_msgpool *pool = arg; in msgpool_free() local
33 dout("msgpool_release %s %p\n", pool->name, msg); in msgpool_free()
34 msg->pool = NULL; in msgpool_free()
38 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() argument
43 pool->type = type; in ceph_msgpool_init()
[all …]
/linux/drivers/gpu/drm/panthor/
H A Dpanthor_heap.c78 * struct panthor_heap_pool - Pool of heap contexts
80 * The pool is attached to a panthor_file and can't be shared across processes.
89 /** @vm: VM this pool is bound to. */
110 static int panthor_get_heap_ctx_offset(struct panthor_heap_pool *pool, int id) in panthor_get_heap_ctx_offset() argument
112 return panthor_heap_ctx_stride(pool->ptdev) * id; in panthor_get_heap_ctx_offset()
115 static void *panthor_get_heap_ctx(struct panthor_heap_pool *pool, int id) in panthor_get_heap_ctx() argument
117 return pool->gpu_contexts->kmap + in panthor_get_heap_ctx()
118 panthor_get_heap_ctx_offset(pool, id); in panthor_get_heap_ctx()
221 panthor_heap_destroy_locked(struct panthor_heap_pool *pool, u32 handle) in panthor_heap_destroy_locked() argument
225 heap = xa_erase(&pool->xa, handle); in panthor_heap_destroy_locked()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Dcrypto.c13 * (for example, TLS) after last revalidation in a pool or a bulk.
19 #define MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) MLX5_CRYPTO_DEK_CALC_FREED(pool) argument
33 int num_deks; /* the total number of keys in this pool */
34 int avail_deks; /* the number of available keys in this pool */
35 int in_use_deks; /* the number of being used keys in this pool */
288 mlx5_crypto_dek_bulk_create(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_bulk_create() argument
290 struct mlx5_crypto_dek_priv *dek_priv = pool->mdev->mlx5e_res.dek_priv; in mlx5_crypto_dek_bulk_create()
291 struct mlx5_core_dev *mdev = pool->mdev; in mlx5_crypto_dek_bulk_create()
313 err = mlx5_crypto_create_dek_bulk(mdev, pool->key_purpose, in mlx5_crypto_dek_bulk_create()
334 mlx5_crypto_dek_pool_add_bulk(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_pool_add_bulk() argument
[all …]
/linux/tools/testing/selftests/drivers/net/mlxsw/
H A Dsharedbuffer_configuration.py16 objects, pool, tcbind and portpool. Provide an interface to get random
18 1. Pool:
22 - random pool number
30 for pool in pools:
31 self._pools.append(pool)
47 def _get_th(self, pool): argument
50 if pool["thtype"] == "dynamic":
58 for pool in self._pools:
59 if pool["type"] == "ingress":
60 ing_pools.append(pool)
[all …]
/linux/drivers/staging/octeon/
H A Dethernet-mem.c17 * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs
18 * @pool: Pool to allocate an skbuff for
19 * @size: Size of the buffer needed for the pool
24 static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) in cvm_oct_fill_hw_skbuff() argument
35 cvmx_fpa_free(skb->data, pool, size / 128); in cvm_oct_fill_hw_skbuff()
42 * cvm_oct_free_hw_skbuff- free hardware pool skbuffs
43 * @pool: Pool to allocate an skbuff for
44 * @size: Size of the buffer needed for the pool
47 static void cvm_oct_free_hw_skbuff(int pool, int size, int elements) in cvm_oct_free_hw_skbuff() argument
52 memory = cvmx_fpa_alloc(pool); in cvm_oct_free_hw_skbuff()
[all …]
/linux/drivers/gpu/drm/ttm/tests/
H A Dttm_pool_test.c79 struct ttm_pool *pool; in ttm_pool_pre_populated() local
86 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); in ttm_pool_pre_populated()
87 KUNIT_ASSERT_NOT_NULL(test, pool); in ttm_pool_pre_populated()
89 ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false); in ttm_pool_pre_populated()
91 err = ttm_pool_alloc(pool, tt, &simple_ctx); in ttm_pool_pre_populated()
94 ttm_pool_free(pool, tt); in ttm_pool_pre_populated()
97 return pool; in ttm_pool_pre_populated()
140 struct ttm_pool *pool; in ttm_pool_alloc_basic() local
150 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); in ttm_pool_alloc_basic()
151 KUNIT_ASSERT_NOT_NULL(test, pool); in ttm_pool_alloc_basic()
[all …]
/linux/include/linux/
H A Dgenalloc.h16 * available. If new memory is added to the pool a lock has to be
46 * @pool: the pool being allocated from
52 void *data, struct gen_pool *pool,
56 * General purpose special memory pool descriptor.
60 struct list_head chunks; /* list of chunks in this pool */
70 * General purpose special memory pool chunk descriptor.
73 struct list_head next_chunk; /* next chunk in pool */
97 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
101 static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr, in gen_pool_add_virt() argument
104 return gen_pool_add_owner(pool, addr, phys, size, nid, NULL); in gen_pool_add_virt()
[all …]
/linux/drivers/net/ethernet/mellanox/mlxsw/
H A Dspectrum_cnt.c24 spinlock_t counter_pool_lock; /* Protects counter pool allocations */
54 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_init() local
62 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_init()
63 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init()
89 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init()
99 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_fini() local
104 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_fini()
105 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_fini()
115 const struct mlxsw_sp_counter_pool *pool = priv; in mlxsw_sp_counter_pool_occ_get() local
117 return atomic_read(&pool->active_entries_count); in mlxsw_sp_counter_pool_occ_get()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Dirq_affinity.c8 static void cpu_put(struct mlx5_irq_pool *pool, int cpu) in cpu_put() argument
10 pool->irqs_per_cpu[cpu]--; in cpu_put()
13 static void cpu_get(struct mlx5_irq_pool *pool, int cpu) in cpu_get() argument
15 pool->irqs_per_cpu[cpu]++; in cpu_get()
19 static int cpu_get_least_loaded(struct mlx5_irq_pool *pool, in cpu_get_least_loaded() argument
27 if (!pool->irqs_per_cpu[cpu]) { in cpu_get_least_loaded()
33 if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu]) in cpu_get_least_loaded()
38 mlx5_core_err(pool->dev, "NO online CPUs in req_mask (%*pbl)\n", in cpu_get_least_loaded()
42 pool->irqs_per_cpu[best_cpu]++; in cpu_get_least_loaded()
48 irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc) in irq_pool_request_irq() argument
[all …]
/linux/include/net/page_pool/
H A Dhelpers.h24 * allocated from page pool. There is no cache line dirtying for 'struct page'
25 * when a page is recycled back to the page pool.
29 * page allocated from page pool. Page splitting enables memory saving and thus
48 * the same page when a page is split. The API user must setup pool->p.max_len
49 * and pool->p.offset correctly and ensure that page_pool_put_page() is called
67 bool page_pool_get_stats(const struct page_pool *pool,
88 * @pool: pool from which to allocate
92 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) in page_pool_dev_alloc_pages() argument
96 return page_pool_alloc_pages(pool, gfp); in page_pool_dev_alloc_pages()
101 * @pool: pool from which to allocate
[all …]
/linux/arch/arm64/kvm/hyp/nvhe/
H A Dpage_alloc.c16 * Example buddy-tree for a 4-pages physically contiguous pool:
27 * Example of requests on this pool:
28 * __find_buddy_nocheck(pool, page 0, order 0) => page 1
29 * __find_buddy_nocheck(pool, page 0, order 1) => page 2
30 * __find_buddy_nocheck(pool, page 1, order 0) => page 0
31 * __find_buddy_nocheck(pool, page 2, order 0) => page 3
33 static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool, in __find_buddy_nocheck() argument
42 * Don't return a page outside the pool range -- it belongs to in __find_buddy_nocheck()
45 if (addr < pool->range_start || addr >= pool->range_end) in __find_buddy_nocheck()
52 static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool, in __find_buddy_avail() argument
[all …]
/linux/arch/mips/include/asm/octeon/
H A Dcvmx-fpa.h31 * Interface to the hardware Free Pool Allocator.
79 * Structure describing the current state of a FPA pool.
88 /* The number of elements in the pool at creation */
101 * Return the name of the pool
103 * @pool: Pool to get the name of
106 static inline const char *cvmx_fpa_get_name(uint64_t pool) in cvmx_fpa_get_name() argument
108 return cvmx_fpa_pool_info[pool].name; in cvmx_fpa_get_name()
112 * Return the base of the pool
114 * @pool: Pool to get the base of
117 static inline void *cvmx_fpa_get_base(uint64_t pool) in cvmx_fpa_get_base() argument
[all …]
/linux/drivers/gpu/drm/i915/gt/
H A Dintel_gt_buffer_pool.c14 bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz) in bucket_for_size() argument
24 if (n >= ARRAY_SIZE(pool->cache_list)) in bucket_for_size()
25 n = ARRAY_SIZE(pool->cache_list) - 1; in bucket_for_size()
27 return &pool->cache_list[n]; in bucket_for_size()
37 static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep) in pool_free_older_than() argument
44 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { in pool_free_older_than()
45 struct list_head *list = &pool->cache_list[n]; in pool_free_older_than()
50 if (spin_trylock_irq(&pool->lock)) { in pool_free_older_than()
73 spin_unlock_irq(&pool->lock); in pool_free_older_than()
89 struct intel_gt_buffer_pool *pool = in pool_free_work() local
[all …]
/linux/drivers/firmware/qcom/
H A Dqcom_tzmem.c150 static int qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool *pool, in qcom_tzmem_pool_add_memory() argument
174 ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr, in qcom_tzmem_pool_add_memory()
182 scoped_guard(spinlock_irqsave, &pool->lock) in qcom_tzmem_pool_add_memory()
183 list_add_tail(&area->list, &pool->areas); in qcom_tzmem_pool_add_memory()
190 * qcom_tzmem_pool_new() - Create a new TZ memory pool.
191 * @config: Pool configuration.
193 * Create a new pool of memory suitable for sharing with the TrustZone.
197 * Return: New memory pool address or ERR_PTR() on error.
221 struct qcom_tzmem_pool *pool __free(kfree) = kzalloc(sizeof(*pool), in qcom_tzmem_pool_new()
223 if (!pool) in qcom_tzmem_pool_new()
[all …]

12345678910>>...55