/linux/tools/testing/selftests/drivers/net/mlxsw/ |
H A D | sharedbuffer_configuration.py | 28 def __init__(self, pools): argument 30 for pool in pools: 187 pools = PoolList() 190 pools.append(Pool(pooldict)) 191 return pools 194 def do_check_pools(dlname, pools, vp): argument 195 for pool in pools: 219 def check_pools(dlname, pools): argument 221 record_vp = RecordValuePicker(pools) 224 do_check_pools(dlname, pools, RandomValuePicker(pools)) [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
H A D | pool.c | 25 if (!xsk->pools) { in mlx5e_xsk_get_pools() 26 xsk->pools = kcalloc(MLX5E_MAX_NUM_CHANNELS, in mlx5e_xsk_get_pools() 27 sizeof(*xsk->pools), GFP_KERNEL); in mlx5e_xsk_get_pools() 28 if (unlikely(!xsk->pools)) in mlx5e_xsk_get_pools() 41 kfree(xsk->pools); in mlx5e_xsk_put_pools() 42 xsk->pools = NULL; in mlx5e_xsk_put_pools() 54 xsk->pools[ix] = pool; in mlx5e_xsk_add_pool() 60 xsk->pools[ix] = NULL; in mlx5e_xsk_remove_pool()
|
H A D | pool.h | 12 if (!xsk || !xsk->pools) in mlx5e_xsk_get_pool() 18 return xsk->pools[ix]; in mlx5e_xsk_get_pool()
|
/linux/arch/sparc/kernel/ |
H A D | iommu-common.c | 82 spin_lock_init(&(iommu->pools[i].lock)); in iommu_tbl_pool_init() 83 iommu->pools[i].start = start; in iommu_tbl_pool_init() 84 iommu->pools[i].hint = start; in iommu_tbl_pool_init() 86 iommu->pools[i].end = start - 1; in iommu_tbl_pool_init() 131 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc() 161 pool = &(iommu->pools[0]); in iommu_tbl_range_alloc() 193 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc() 237 p = &tbl->pools[pool_nr]; in get_pool()
|
/linux/tools/net/ynl/samples/ |
H A D | page-pool.c | 60 struct netdev_page_pool_get_list *pools; in main() local 76 pools = netdev_page_pool_get_dump(ys); in main() 77 if (!pools) in main() 80 ynl_dump_foreach(pools, pp) { in main() 87 netdev_page_pool_get_list_free(pools); in main() 124 printf("page pools: %u (zombies: %u)\n", in main()
|
/linux/mm/ |
H A D | dmapool.c | 60 struct list_head pools; member 80 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show() 92 static DEVICE_ATTR_RO(pools); 267 INIT_LIST_HEAD(&retval->pools); in dma_pool_create() 280 list_add(&retval->pools, &dev->dma_pools); in dma_pool_create() 288 list_del(&retval->pools); in dma_pool_create() 370 list_del(&pool->pools); in dma_pool_destroy()
|
/linux/drivers/net/ethernet/chelsio/libcxgb/ |
H A D | libcxgb_ppm.c | 348 struct cxgbi_ppm_pool __percpu *pools; in ppm_alloc_cpu_pool() local 350 unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3; in ppm_alloc_cpu_pool() 367 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; in ppm_alloc_cpu_pool() 368 pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool)); in ppm_alloc_cpu_pool() 370 if (!pools) in ppm_alloc_cpu_pool() 374 struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu); in ppm_alloc_cpu_pool() 384 return pools; in ppm_alloc_cpu_pool()
|
/linux/drivers/soc/ti/ |
H A D | knav_qmss.h | 203 struct list_head pools; member 304 struct list_head pools; member 363 list_for_each_entry(pool, &kdev->pools, list)
|
H A D | knav_qmss_queue.c | 818 node = ®ion->pools; in knav_pool_create() 819 list_for_each_entry(iter, ®ion->pools, region_inst) { in knav_pool_create() 833 list_add_tail(&pool->list, &kdev->pools); in knav_pool_create() 1035 list_add(&pool->region_inst, ®ion->pools); in knav_queue_setup_region() 1120 INIT_LIST_HEAD(®ion->pools); in knav_queue_setup_regions() 1362 list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst) in knav_queue_free_regions() 1809 INIT_LIST_HEAD(&kdev->pools); in knav_queue_probe()
|
/linux/Documentation/devicetree/bindings/soc/ti/ |
H A D | keystone-navigator-qmss.txt | 6 processors(PDSP), linking RAM, descriptor pools and infrastructure 49 - queue-pools : child node classifying the queue ranges into pools. 50 Queue ranges are grouped into 3 type of pools: 151 queue-pools {
|
/linux/Documentation/core-api/ |
H A D | workqueue.rst | 60 * Use per-CPU unified worker pools shared by all wq to provide 85 worker-pools. 89 which manages worker-pools and processes the queued work items. 91 There are two worker-pools, one for normal work items and the other 93 worker-pools to serve work items queued on unbound workqueues - the 94 number of these backing pools is dynamic. 140 For unbound workqueues, the number of backing pools is dynamic. 143 backing worker pools matching the attributes. The responsibility of 188 worker-pools which host workers which are not bound to any 191 worker-pools try to start execution of work items as soon as [all …]
|
H A D | mm-api.rst | 88 Memory pools 94 DMA pools
|
/linux/Documentation/networking/ |
H A D | page_pool.rst | 46 Information about page pools on the system can be accessed via the netdev 51 The number of pools created **must** match the number of hardware queues 106 with fragmented page pools.
|
/linux/drivers/net/ethernet/wangxun/libwx/ |
H A D | wx_hw.c | 602 static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools, in wx_set_rar() argument 618 wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); in wx_set_rar() 620 wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32); in wx_set_rar() 795 wx->mac_table[i].pools, in wx_sync_mac_table() 809 wx->mac_table[0].pools = 1ULL; in wx_mac_set_default_filter() 812 wx->mac_table[0].pools, in wx_mac_set_default_filter() 828 wx->mac_table[i].pools = 0; in wx_flush_sw_mac_table() 844 if (wx->mac_table[i].pools != (1ULL << pool)) { in wx_add_mac_filter() 846 wx->mac_table[i].pools |= (1ULL << pool); in wx_add_mac_filter() 858 wx->mac_table[i].pools |= (1ULL << pool); in wx_add_mac_filter() [all …]
|
/linux/arch/sparc/include/asm/ |
H A D | iommu-common.h | 26 struct iommu_pool pools[IOMMU_NR_POOLS]; member
|
/linux/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/ |
H A D | ethernet-driver.rst | 26 - buffer pools 69 DPBPs represent hardware buffer pools. Packet I/O is performed in the context 124 The role of hardware buffer pools is storage of ingress frame data. Each network
|
H A D | overview.rst | 25 The MC uses DPAA2 hardware resources such as queues, buffer pools, and 56 | -buffer pools -DPMCP | 360 - DPBPs for network buffer pools
|
/linux/Documentation/arch/arm/keystone/ |
H A D | knav-qmss.rst | 12 processors(PDSP), linking RAM, descriptor pools and infrastructure 25 allocate descriptor pools, map the descriptors, push/pop to queues etc. For
|
/linux/kernel/dma/ |
H A D | swiotlb.c | 91 .pools = LIST_HEAD_INIT(io_tlb_default_mem.pools), 310 list_add_rcu(&pool->node, &mem->pools); in add_mem_pool() 783 list_for_each_entry_rcu(pool, &mem->pools, node) { in __swiotlb_find_pool() 1151 list_for_each_entry_rcu(pool, &mem->pools, node) { in swiotlb_search_area() 1332 list_for_each_entry_rcu(pool, &mem->pools, node) in mem_used() 1841 INIT_LIST_HEAD_RCU(&mem->pools); in rmem_swiotlb_device_init()
|
H A D | Kconfig | 99 pools as needed. To reduce run-time kernel memory requirements, you 118 This enables support for restricted DMA pools which provide a level of
|
/linux/arch/powerpc/kernel/ |
H A D | iommu.c | 253 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc() 281 pool = &(tbl->pools[0]); in iommu_range_alloc() 303 pool = &tbl->pools[pool_nr]; in iommu_range_alloc() 430 p = &tbl->pools[pool_nr]; in get_pool() 756 p = &tbl->pools[i]; in iommu_init_table()
|
/linux/arch/arm/boot/dts/ti/keystone/ |
H A D | keystone-k2g-netcp.dtsi | 36 queue-pools {
|
/linux/drivers/soc/fsl/qbman/ |
H A D | qman_priv.h | 177 u32 pools; member
|
H A D | qman_portal.c | 249 pcfg->pools = qm_get_pools_sdqcr(); in qman_portal_probe()
|
/linux/include/linux/ |
H A D | swiotlb.h | 118 struct list_head pools; member
|