Home
last modified time | relevance | path

Searched +full:pool +full:- +full:long (Results 1 – 25 of 680) sorted by relevance

12345678910>>...28

/linux/lib/
H A Dgenalloc.c1 // SPDX-License-Identifier: GPL-2.0-only
5 * kmalloc/kfree interface. Uses for this includes on-device special
16 * available. If new memory is added to the pool a lock has to be
20 * The basic atomic operation of this allocator is cmpxchg on long.
21 * On architectures that don't have NMI-safe cmpxchg implementation,
26 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
42 return chunk->end_addr - chunk->start_addr + 1; in chunk_size()
46 set_bits_ll(unsigned long *addr, unsigned long mask_to_set) in set_bits_ll()
48 unsigned long val = READ_ONCE(*addr); in set_bits_ll()
52 return -EBUSY; in set_bits_ll()
[all …]
H A Dtest_objpool.c1 // SPDX-License-Identifier: GPL-2.0
4 * Test module for lockless object pool
30 unsigned long nhits;
31 unsigned long nmiss;
56 unsigned long hrtimer; /* ms */
61 /* per-cpu worker */
63 struct objpool_head *pool; /* pool head */ member
85 static void *ot_kzalloc(struct ot_test *test, long size) in ot_kzalloc()
90 atomic_long_add(size, &test->data.kmalloc.alloc); in ot_kzalloc()
94 static void ot_kfree(struct ot_test *test, void *ptr, long size) in ot_kfree()
[all …]
/linux/include/linux/
H A Dgenalloc.h1 /* SPDX-License-Identifier: GPL-2.0-only */
5 * kmalloc/kfree interface. Uses for this includes on-device special
16 * available. If new memory is added to the pool a lock has to be
20 * The basic atomic operation of this allocator is cmpxchg on long.
21 * On architectures that don't have NMI-safe cmpxchg implementation,
46 * @pool: the pool being allocated from
48 typedef unsigned long (*genpool_algo_t)(unsigned long *map,
49 unsigned long size,
50 unsigned long start,
52 void *data, struct gen_pool *pool,
[all …]
H A Dswiotlb.h1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/dma-direction.h>
38 unsigned long swiotlb_size_or_default(void);
40 int (*remap)(void *tlb, unsigned long nslabs));
42 int (*remap)(void *tlb, unsigned long nslabs));
48 * struct io_tlb_pool - IO TLB memory pool descriptor
49 * @start: The start address of the swiotlb memory pool. Used to do a quick
52 * @end: The end address of the swiotlb memory pool. Used to do a quick
55 * @vaddr: The vaddr of the swiotlb memory pool. The swiotlb memory pool
62 * @nareas: Number of areas in the pool.
[all …]
H A Dzsmalloc.h10 * Released under the terms of 3-clause BSD License
27 void zs_destroy_pool(struct zs_pool *pool);
29 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags,
31 void zs_free(struct zs_pool *pool, unsigned long obj);
33 size_t zs_huge_class_size(struct zs_pool *pool);
35 unsigned long zs_get_total_pages(struct zs_pool *pool);
36 unsigned long zs_compac
[all...]
H A Dmempool.h1 /* SPDX-License-Identifier: GPL-2.0 */
3 * memory buffer pool support
30 static inline bool mempool_initialized(mempool_t *pool) in mempool_initialized() argument
32 return pool->elements != NULL; in mempool_initialized()
35 static inline bool mempool_is_saturated(mempool_t *pool) in mempool_is_saturated() argument
37 return READ_ONCE(pool->curr_nr) >= pool->min_nr; in mempool_is_saturated()
40 void mempool_exit(mempool_t *pool);
41 int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
45 int mempool_init_noprof(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
63 extern int mempool_resize(mempool_t *pool, int new_min_nr);
[all …]
/linux/mm/
H A Dzsmalloc.c1 // SPDX-License-Identifier: GPL-2.0-or-later
12 * Released under the terms of 3-clause BSD License
21 * pool->lock
22 * class->lock
23 * zspage->lock
53 #define ZS_HANDLE_SIZE (sizeof(unsigned long))
57 * a single (unsigned long) handle value.
76 #define _PFN_BITS (MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT)
82 * header keeps handle which is 4byte-aligned address so we
90 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
[all …]
H A Dmempool.c1 // SPDX-License-Identifier: GPL-2.0
5 * memory buffer pool support. Such pools are mostly used
6 * for guaranteed, deadlock-free memory allocations during
24 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument
27 const int nr = pool->curr_nr; in poison_error()
28 const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0); in poison_error()
33 pr_err("Mempool %p size %zu\n", pool, size); in poison_error()
41 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument
47 u8 exp = (i < size - 1) ? POISON_FREE : POISON_END; in __check_element()
50 poison_error(pool, element, size, i); in __check_element()
[all …]
H A Dzswap.c1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * zswap.c - zswap driver file
7 * RAM-based memory pool. This can result in a significant I/O reduction on
31 #include <linux/page-flags.h>
57 /* Pool limit was hit (see zswap_max_pool_percent) */
59 /* Pages written back when pool limit was reached */
61 /* Store failed due to a reclaim failure after pool limit was reached */
76 /* Pool limit was hit, we need to calm down */
110 /* The maximum percentage of memory that the compressed pool can occupy */
115 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
[all …]
/linux/arch/sparc/kernel/
H A Diommu-common.c1 // SPDX-License-Identifier: GPL-2.0
10 #include <linux/iommu-helper.h>
11 #include <linux/dma-mapping.h>
13 #include <asm/iommu-common.h>
15 static unsigned long iommu_large_alloc = 15;
21 return ((iommu->flags & IOMMU_NEED_FLUSH) != 0); in need_flush()
26 iommu->flags |= IOMMU_NEED_FLUSH; in set_flush()
31 iommu->flags &= ~IOMMU_NEED_FLUSH; in clear_flush()
49 * the top 1/4 of the table will be set aside for pool allocations
53 unsigned long num_entries, in iommu_tbl_pool_init()
[all …]
/linux/kernel/dma/
H A Dswiotlb.c1 // SPDX-License-Identifier: GPL-2.0-only
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
14 * unnecessary i-cache flushing.
27 #include <linux/dma-direct.h>
28 #include <linux/dma-ma
306 add_mem_pool(struct io_tlb_mem * mem,struct io_tlb_pool * pool) add_mem_pool() argument
687 struct io_tlb_pool *pool; swiotlb_alloc_pool() local
738 struct io_tlb_pool *pool; swiotlb_dyn_alloc() local
756 struct io_tlb_pool *pool = container_of(rcu, struct io_tlb_pool, rcu); swiotlb_dyn_free() local
780 struct io_tlb_pool *pool; __swiotlb_find_pool() local
803 swiotlb_del_pool(struct device * dev,struct io_tlb_pool * pool) swiotlb_del_pool() argument
1016 swiotlb_search_pool_area(struct device * dev,struct io_tlb_pool * pool,int area_index,phys_addr_t orig_addr,size_t alloc_size,unsigned int alloc_align_mask) swiotlb_search_pool_area() argument
1146 struct io_tlb_pool *pool; swiotlb_search_area() local
1186 struct io_tlb_pool *pool; swiotlb_find_slots() local
1261 struct io_tlb_pool *pool; swiotlb_find_slots() local
1306 mem_pool_used(struct io_tlb_pool * pool) mem_pool_used() argument
1328 struct io_tlb_pool *pool; mem_used() local
1375 struct io_tlb_pool *pool; swiotlb_tbl_map_single() local
1511 swiotlb_del_transient(struct device * dev,phys_addr_t tlb_addr,struct io_tlb_pool * pool) swiotlb_del_transient() argument
1525 swiotlb_del_transient(struct device * dev,phys_addr_t tlb_addr,struct io_tlb_pool * pool) swiotlb_del_transient() argument
1537 __swiotlb_tbl_unmap_single(struct device * dev,phys_addr_t tlb_addr,size_t mapping_size,enum dma_data_direction dir,unsigned long attrs,struct io_tlb_pool * pool) __swiotlb_tbl_unmap_single() argument
1554 __swiotlb_sync_single_for_device(struct device * dev,phys_addr_t tlb_addr,size_t size,enum dma_data_direction dir,struct io_tlb_pool * pool) __swiotlb_sync_single_for_device() argument
1564 __swiotlb_sync_single_for_cpu(struct device * dev,phys_addr_t tlb_addr,size_t size,enum dma_data_direction dir,struct io_tlb_pool * pool) __swiotlb_sync_single_for_cpu() argument
1754 struct io_tlb_pool *pool; swiotlb_alloc() local
1781 struct io_tlb_pool *pool; swiotlb_free() local
1812 struct io_tlb_pool *pool; rmem_swiotlb_device_init() local
[all...]
H A Dpool.c1 // SPDX-License-Identifier: GPL-2.0
8 #include <linux/dma-map-ops.h>
9 #include <linux/dma-direct.h>
17 static unsigned long pool_size_dma;
19 static unsigned long pool_size_dma32;
21 static unsigned long pool_size_kernel;
26 /* Dynamic background expansion when the atomic pool is near capacity */
58 unsigned long size; in cma_in_zone()
71 end = cma_get_base(cma) + size - 1; in cma_in_zone()
79 static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size, in atomic_pool_expand() argument
[all …]
/linux/include/trace/events/
H A Dpage_pool.h1 /* SPDX-License-Identifier: GPL-2.0 */
16 TP_PROTO(const struct page_pool *pool,
19 TP_ARGS(pool, inflight, hold, release),
22 __field(const struct page_pool *, pool)
30 __entry->pool = pool;
31 __entry->inflight = inflight;
32 __entry->hold = hold;
33 __entry->release = release;
34 __entry->cnt = pool->destroy_cnt;
38 __entry->pool, __entry->inflight, __entry->hold,
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/
H A Dpool.c1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
9 switch (resource->pool->type) { in hws_pool_free_one_resource()
11 mlx5hws_cmd_ste_destroy(resource->pool->ctx->mdev, resource->base_id); in hws_pool_free_one_resource()
14 mlx5hws_cmd_stc_destroy(resource->pool->ctx->mdev, resource->base_id); in hws_pool_free_one_resource()
23 static void hws_pool_resource_free(struct mlx5hws_pool *pool) in hws_pool_resource_free() argument
25 hws_pool_free_one_resource(pool->resource); in hws_pool_resource_free()
26 pool->resource = NULL; in hws_pool_resource_free()
28 if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) { in hws_pool_resource_free()
29 hws_pool_free_one_resource(pool->mirror_resource); in hws_pool_resource_free()
30 pool->mirror_resource = NULL; in hws_pool_resource_free()
[all …]
/linux/include/net/page_pool/
H A Dhelpers.h1 /* SPDX-License-Identifier: GPL-2.0
24 * allocated from page pool. There is no cache line dirtying for 'struct page'
25 * when a page is recycled back to the page pool.
29 * page allocated from page pool. Page splitting enables memory saving and thus
32 * 'struct page' and atomic operation for page->pp_ref_count.
34 * The API keeps track of in-flight pages, in order to let API users know when
37 * attach the page_pool object to a page_pool-aware object like skbs marked with
42 * recycle the page, or in case of page->_refcoun
92 page_pool_dev_alloc_pages(struct page_pool * pool) page_pool_dev_alloc_pages() argument
109 page_pool_dev_alloc_frag(struct page_pool * pool,unsigned int * offset,unsigned int size) page_pool_dev_alloc_frag() argument
118 page_pool_alloc_netmem(struct page_pool * pool,unsigned int * offset,unsigned int * size,gfp_t gfp) page_pool_alloc_netmem() argument
147 page_pool_dev_alloc_netmem(struct page_pool * pool,unsigned int * offset,unsigned int * size) page_pool_dev_alloc_netmem() argument
156 page_pool_dev_alloc_netmems(struct page_pool * pool) page_pool_dev_alloc_netmems() argument
163 page_pool_alloc(struct page_pool * pool,unsigned int * offset,unsigned int * size,gfp_t gfp) page_pool_alloc() argument
182 page_pool_dev_alloc(struct page_pool * pool,unsigned int * offset,unsigned int * size) page_pool_dev_alloc() argument
191 page_pool_alloc_va(struct page_pool * pool,unsigned int * size,gfp_t gfp) page_pool_alloc_va() argument
216 page_pool_dev_alloc_va(struct page_pool * pool,unsigned int * size) page_pool_dev_alloc_va() argument
232 page_pool_get_dma_dir(const struct page_pool * pool) page_pool_get_dma_dir() argument
331 page_pool_put_netmem(struct page_pool * pool,netmem_ref netmem,unsigned int dma_sync_size,bool allow_direct) page_pool_put_netmem() argument
360 page_pool_put_page(struct page_pool * pool,struct page * page,unsigned int dma_sync_size,bool allow_direct) page_pool_put_page() argument
369 page_pool_put_full_netmem(struct page_pool * pool,netmem_ref netmem,bool allow_direct) page_pool_put_full_netmem() argument
385 page_pool_put_full_page(struct page_pool * pool,struct page * page,bool allow_direct) page_pool_put_full_page() argument
399 page_pool_recycle_direct(struct page_pool * pool,struct page * page) page_pool_recycle_direct() argument
405 page_pool_recycle_direct_netmem(struct page_pool * pool,netmem_ref netmem) page_pool_recycle_direct_netmem() argument
422 page_pool_free_va(struct page_pool * pool,void * va,bool allow_direct) page_pool_free_va() argument
450 __page_pool_dma_sync_for_cpu(const struct page_pool * pool,const dma_addr_t dma_addr,u32 offset,u32 dma_sync_size) __page_pool_dma_sync_for_cpu() argument
471 page_pool_dma_sync_for_cpu(const struct page_pool * pool,const struct page * page,u32 offset,u32 dma_sync_size) page_pool_dma_sync_for_cpu() argument
480 page_pool_dma_sync_netmem_for_cpu(const struct page_pool * pool,const netmem_ref netmem,u32 offset,u32 dma_sync_size) page_pool_dma_sync_netmem_for_cpu() argument
492 page_pool_put(struct page_pool * pool) page_pool_put() argument
497 page_pool_nid_changed(struct page_pool * pool,int new_nid) page_pool_nid_changed() argument
503 page_pool_is_unreadable(struct page_pool * pool) page_pool_is_unreadable() argument
[all...]
H A Dtypes.h1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/dma-direction.h>
17 * DMA-synced-for-device according to
20 * Please note DMA-sync-for-CPU is still
38 #define PP_DMA_INDEX_LIMIT XA_LIMIT(1, BIT(PP_DMA_INDEX_BITS) - 1)
44 * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
48 * Keeping room for more objects, is due to XDP_DROP use-case. As
62 * struct page_pool_params - page pool parameters
67 * @dev: device, for DMA pre-mapping purposes
73 * @netdev: netdev this pool will serve (leave as NULL if none or multiple)
[all …]
/linux/drivers/net/ethernet/ti/
H A Ddavinci_cpdma.c1 // SPDX-License-Identifier: GPL-2.0
14 #include <linux/dma-mapping.h>
100 struct cpdma_desc_pool *pool; member
163 #define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
165 #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
166 #define chan_linear(chan) __chan_linear((chan)->chan_num)
173 #define dma_reg_read(ctlr, ofs) readl((ctlr)->dmaregs + (ofs))
174 #define chan_read(chan, fld) readl((chan)->fld)
175 #define desc_read(desc, fld) readl(&(desc)->fld)
176 #define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs))
[all …]
H A Dk3-cppi-desc-pool.c1 // SPDX-License-Identifier: GPL-2.0
2 /* TI K3 CPPI5 descriptors pool API
4 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
10 #include <linux/dma-mapping.h>
15 #include "k3-cppi-desc-pool.h"
28 void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) in k3_cppi_desc_pool_destroy() argument
30 if (!pool) in k3_cppi_desc_pool_destroy()
33 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in k3_cppi_desc_pool_destroy()
35 gen_pool_size(pool->gen_pool), in k3_cppi_desc_pool_destroy()
36 gen_pool_avail(pool->gen_pool)); in k3_cppi_desc_pool_destroy()
[all …]
/linux/kernel/
H A Dworkqueue.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * kernel/workqueue.c - generic async execution with shared worker pool
10 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
19 * executed in process context. The worker pool is shared and
22 * pools for workqueues which are not bound to any specific CPU - the
25 * Please read Documentation/core-api/workqueue.rst for details.
65 * A bound pool is either associated or disassociated with its CPU.
72 * be executing on any CPU. The pool behaves as an unbound one.
79 * BH pool is per-CPU and always DISASSOCIATED.
81 POOL_BH = 1 << 0, /* is a BH pool */
[all …]
/linux/drivers/net/ethernet/ibm/
H A Dibmveth.c1 // SPDX-License-Identifier: GPL-2.0-or-later
17 #include <linux/dma-mapping.h>
42 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
81 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
104 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off); in ibmveth_rxq_flags()
115 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle; in ibmveth_rxq_pending_buffer()
135 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); in ibmveth_rxq_frame_length()
150 /* setup the initial settings for a buffer pool */
151 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, in ibmveth_init_buffer_pool() argument
155 pool->size = pool_size; in ibmveth_init_buffer_pool()
[all …]
/linux/drivers/net/ethernet/chelsio/libcxgb/
H A Dlibcxgb_ppm.c16 * - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
53 /* Direct Data Placement -
54 * Directly place the iSCSI Data-In or Data-Out PDU's payload into
55 * pre-posted final destination host-memory buffers based on the
56 * Initiator Task Tag (ITT) in Data-In or Target Task Tag (TTT)
57 * in Data-Out PDUs. The host memory address is programmed into
63 /* Direct-Data Placement page size adjustment
65 int cxgbi_ppm_find_page_index(struct cxgbi_ppm *ppm, unsigned long pgsz) in cxgbi_ppm_find_page_index()
67 struct cxgbi_tag_format *tformat = &ppm->tformat; in cxgbi_ppm_find_page_index()
[all …]
/linux/drivers/net/ethernet/freescale/fman/
H A Dfman_muram.c1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
3 * Copyright 2008 - 2015 Freescale Semiconductor Inc.
13 struct gen_pool *pool; member
18 static unsigned long fman_muram_vbase_to_offset(struct muram_info *muram, in fman_muram_vbase_to_offset()
19 unsigned long vaddr) in fman_muram_vbase_to_offset()
21 return vaddr - (unsigned long)muram->vbase; in fman_muram_vbase_to_offset()
26 * @base: Pointer to base of memory mapped FM-MURAM.
27 * @size: Size of the FM-MURAM partition.
31 * This pointer must be passed as to all other FM-MURAM function calls.
35 * Return: pointer to FM-MURAM object, or NULL for Failure.
[all …]
/linux/drivers/tee/
H A Dtee_shm_pool.c1 // SPDX-License-Identifier: GPL-2.0-only
6 #include <linux/dma-buf.h>
12 static int pool_op_gen_alloc(struct tee_shm_pool *pool, struct tee_shm *shm, in pool_op_gen_alloc() argument
15 unsigned long va; in pool_op_gen_alloc()
16 struct gen_pool *genpool = pool->private_data; in pool_op_gen_alloc()
17 size_t a = max_t(size_t, align, BIT(genpool->min_alloc_order)); in pool_op_gen_alloc()
23 return -ENOMEM; in pool_op_gen_alloc()
26 shm->kaddr = (void *)va; in pool_op_gen_alloc()
27 shm->paddr = gen_pool_virt_to_phys(genpool, va); in pool_op_gen_alloc()
28 shm->size = s; in pool_op_gen_alloc()
[all …]
/linux/include/net/
H A Dxdp_sock_drv.h1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Interface for implementing AF_XDP zero-copy support in drivers.
23 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
24 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
25 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
26 void xsk_tx_release(struct xsk_buff_pool *pool);
29 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
30 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
35 xsk_pool_get_headroom(struct xsk_buff_pool * pool) xsk_pool_get_headroom() argument
40 xsk_pool_get_chunk_size(struct xsk_buff_pool * pool) xsk_pool_get_chunk_size() argument
45 xsk_pool_get_rx_frame_size(struct xsk_buff_pool * pool) xsk_pool_get_rx_frame_size() argument
50 xsk_pool_set_rxq_info(struct xsk_buff_pool * pool,struct xdp_rxq_info * rxq) xsk_pool_set_rxq_info() argument
56 xsk_pool_fill_cb(struct xsk_buff_pool * pool,struct xsk_cb_desc * desc) xsk_pool_fill_cb() argument
62 xsk_pool_dma_unmap(struct xsk_buff_pool * pool,unsigned long attrs) xsk_pool_dma_unmap() argument
68 xsk_pool_dma_map(struct xsk_buff_pool * pool,struct device * dev,unsigned long attrs) xsk_pool_dma_map() argument
90 xsk_buff_alloc(struct xsk_buff_pool * pool) xsk_buff_alloc() argument
101 xsk_buff_alloc_batch(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xsk_buff_alloc_batch() argument
106 xsk_buff_can_alloc(struct xsk_buff_pool * pool,u32 count) xsk_buff_can_alloc() argument
188 xsk_buff_raw_get_dma(struct xsk_buff_pool * pool,u64 addr) xsk_buff_raw_get_dma() argument
194 xsk_buff_raw_get_data(struct xsk_buff_pool * pool,u64 addr) xsk_buff_raw_get_data() argument
211 xsk_buff_raw_get_ctx(const struct xsk_buff_pool * pool,u64 addr) xsk_buff_raw_get_ctx() argument
229 __xsk_buff_get_metadata(const struct xsk_buff_pool * pool,void * data) __xsk_buff_get_metadata() argument
244 xsk_buff_get_metadata(struct xsk_buff_pool * pool,u64 addr) xsk_buff_get_metadata() argument
256 xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool * pool,dma_addr_t dma,size_t size) xsk_buff_raw_dma_sync_for_device() argument
265 xsk_tx_completed(struct xsk_buff_pool * pool,u32 nb_entries) xsk_tx_completed() argument
269 xsk_tx_peek_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc) xsk_tx_peek_desc() argument
275 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool * pool,u32 max) xsk_tx_peek_release_desc_batch() argument
280 xsk_tx_release(struct xsk_buff_pool * pool) xsk_tx_release() argument
290 xsk_set_rx_need_wakeup(struct xsk_buff_pool * pool) xsk_set_rx_need_wakeup() argument
294 xsk_set_tx_need_wakeup(struct xsk_buff_pool * pool) xsk_set_tx_need_wakeup() argument
298 xsk_clear_rx_need_wakeup(struct xsk_buff_pool * pool) xsk_clear_rx_need_wakeup() argument
302 xsk_clear_tx_need_wakeup(struct xsk_buff_pool * pool) xsk_clear_tx_need_wakeup() argument
306 xsk_uses_need_wakeup(struct xsk_buff_pool * pool) xsk_uses_need_wakeup() argument
311 xsk_pool_get_headroom(struct xsk_buff_pool * pool) xsk_pool_get_headroom() argument
316 xsk_pool_get_chunk_size(struct xsk_buff_pool * pool) xsk_pool_get_chunk_size() argument
321 xsk_pool_get_rx_frame_size(struct xsk_buff_pool * pool) xsk_pool_get_rx_frame_size() argument
326 xsk_pool_set_rxq_info(struct xsk_buff_pool * pool,struct xdp_rxq_info * rxq) xsk_pool_set_rxq_info() argument
331 xsk_pool_fill_cb(struct xsk_buff_pool * pool,struct xsk_cb_desc * desc) xsk_pool_fill_cb() argument
336 xsk_pool_dma_unmap(struct xsk_buff_pool * pool,unsigned long attrs) xsk_pool_dma_unmap() argument
341 xsk_pool_dma_map(struct xsk_buff_pool * pool,struct device * dev,unsigned long attrs) xsk_pool_dma_map() argument
357 xsk_buff_alloc(struct xsk_buff_pool * pool) xsk_buff_alloc() argument
367 xsk_buff_alloc_batch(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xsk_buff_alloc_batch() argument
372 xsk_buff_can_alloc(struct xsk_buff_pool * pool,u32 count) xsk_buff_can_alloc() argument
405 xsk_buff_raw_get_dma(struct xsk_buff_pool * pool,u64 addr) xsk_buff_raw_get_dma() argument
411 xsk_buff_raw_get_data(struct xsk_buff_pool * pool,u64 addr) xsk_buff_raw_get_data() argument
417 xsk_buff_raw_get_ctx(const struct xsk_buff_pool * pool,u64 addr) xsk_buff_raw_get_ctx() argument
428 __xsk_buff_get_metadata(const struct xsk_buff_pool * pool,void * data) __xsk_buff_get_metadata() argument
434 xsk_buff_get_metadata(struct xsk_buff_pool * pool,u64 addr) xsk_buff_get_metadata() argument
443 xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool * pool,dma_addr_t dma,size_t size) xsk_buff_raw_dma_sync_for_device() argument
[all...]
/linux/drivers/misc/
H A Dsram-exec.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * SRAM protect-exec region helper functions
5 * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
25 unsigned long base = (unsigned long)part->base; in sram_check_protect_exec()
26 unsigned long end = base + block->size; in sram_check_protect_exec()
29 dev_err(sram->dev, in sram_check_protect_exec()
30 "SRAM pool marked with 'protect-exec' is not page aligned and will not be created.\n"); in sram_check_protect_exec()
31 return -ENOMEM; in sram_check_protect_exec()
40 list_add_tail(&part->list, &exec_pool_list); in sram_add_protect_exec()
47 * sram_exec_copy - copy data to a protected executable region of sram
[all …]

12345678910>>...28