/linux/tools/testing/selftests/drivers/net/mlxsw/ |
H A D | sharedbuffer_configuration.py | 28 def __init__(self, pools): argument 30 for pool in pools: 72 # The threshold type of pools 4, 8, 9 and 10 cannot be changed 110 # The threshold type of pools 4, 8, 9 and 10 cannot be changed 187 pools = PoolList() 190 pools.append(Pool(pooldict)) 191 return pools 194 def do_check_pools(dlname, pools, vp): argument 195 for pool in pools: 219 def check_pools(dlname, pools): argument [all …]
|
H A D | qos_pfc.sh | 68 # iPOOL1+ePOOL5 are overflow pools. 69 # iPOOL2+ePOOL6 are PFC pools. 125 # pools 145 # Control traffic pools. Just reduce the size. Keep them dynamic so that 150 # Overflow pools. 154 # PFC pools. As per the writ, the size of egress PFC pool should be
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
H A D | pool.c | 25 if (!xsk->pools) { in mlx5e_xsk_get_pools() 26 xsk->pools = kcalloc(MLX5E_MAX_NUM_CHANNELS, in mlx5e_xsk_get_pools() 27 sizeof(*xsk->pools), GFP_KERNEL); in mlx5e_xsk_get_pools() 28 if (unlikely(!xsk->pools)) in mlx5e_xsk_get_pools() 41 kfree(xsk->pools); in mlx5e_xsk_put_pools() 42 xsk->pools = NULL; in mlx5e_xsk_put_pools() 54 xsk->pools[ix] = pool; in mlx5e_xsk_add_pool() 60 xsk->pools[ix] = NULL; in mlx5e_xsk_remove_pool()
|
H A D | pool.h | 12 if (!xsk || !xsk->pools) in mlx5e_xsk_get_pool() 18 return xsk->pools[ix]; in mlx5e_xsk_get_pool()
|
/linux/drivers/usb/core/ |
H A D | buffer.c | 52 * hcd_buffer_create - initialize buffer pools 53 * @hcd: the bus whose buffer pools are to be initialized 58 * memory allocators. It initializes some pools of dma-coherent memory that 61 * Call hcd_buffer_destroy() to clean up after using those pools. 90 * hcd_buffer_destroy - deallocate buffer pools 91 * @hcd: the bus whose buffer pools are to be destroyed 95 * This frees the buffer pools created by hcd_buffer_create().
|
/linux/kernel/cgroup/ |
H A D | dmem.c | 36 * @pools: List of pools linked to this region. 39 struct list_head pools; member 49 * No new pools should be added to the region afterwards. 57 struct list_head pools; member 163 list_for_each_entry_rcu(pool, &dmemcs->pools, css_node) in dmemcs_offline() 174 list_for_each_entry_safe(pool, next, &dmemcs->pools, css_node) { in dmemcs_free() 194 INIT_LIST_HEAD(&dmemcs->pools); in dmemcs_alloc() 203 list_for_each_entry_rcu(pool, &dmemcs->pools, css_node, spin_is_locked(&dmemcg_lock)) in find_cg_pool_locked() 235 list_for_each_entry_rcu(pool, &dmemcg_iter->pools, css_node) { in dmem_cgroup_calculate_protection() 345 list_add_tail_rcu(&pool->css_node, &dmemcs->pools); in alloc_pool_single() [all …]
|
/linux/arch/sparc/kernel/ |
H A D | iommu-common.c | 82 spin_lock_init(&(iommu->pools[i].lock)); in iommu_tbl_pool_init() 83 iommu->pools[i].start = start; in iommu_tbl_pool_init() 84 iommu->pools[i].hint = start; in iommu_tbl_pool_init() 86 iommu->pools[i].end = start - 1; in iommu_tbl_pool_init() 131 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc() 161 pool = &(iommu->pools[0]); in iommu_tbl_range_alloc() 193 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc() 237 p = &tbl->pools[pool_nr]; in get_pool()
|
/linux/drivers/net/ethernet/ti/icssg/ |
H A D | icssg_config.h | 29 /* Defines for forwarding path buffer pools: 31 * - 8 total pools per slice 37 /* Defines for local injection path buffer pools: 39 * - 16 total pools per slice 40 * - 8 pools per port per slice and each slice handles both ports 41 * - only 4 out of 8 pools used per port (as only 4 real QoS levels in ICSSG) 42 * - switch mode: 8 total pools used 43 * - mac mode: 4 total pools used
|
H A D | icssg_config.c | 305 /* Configure buffer pools for forwarding buffers in prueth_fw_offload_buffer_setup() 307 * - 8 total pools per slice in prueth_fw_offload_buffer_setup() 315 /* Configure buffer pools for Local Injection buffers in prueth_fw_offload_buffer_setup() 317 * - 16 total pools per slice in prueth_fw_offload_buffer_setup() 392 /* Configure buffer pools for forwarding buffers in prueth_emac_buffer_setup() 393 * - in mac mode - no forwarding so initialize all pools to 0 in prueth_emac_buffer_setup() 394 * - 8 total pools per slice in prueth_emac_buffer_setup() 401 /* Configure buffer pools for Local Injection buffers in prueth_emac_buffer_setup() 403 * - 16 total pools per slice in prueth_emac_buffer_setup()
|
/linux/drivers/soc/ti/ |
H A D | knav_qmss.h | 191 * @pools: list of descriptor pools in the region 203 struct list_head pools; member 207 * struct knav_pool: qmss pools 304 struct list_head pools; member 363 list_for_each_entry(pool, &kdev->pools, list)
|
/linux/include/linux/ |
H A D | swiotlb.h | 91 * @nslabs: Total number of IO TLB slabs in all pools. 95 * @can_grow: %true if more pools can be allocated dynamically. 98 * @pools: List of IO TLB memory pool descriptors (if dynamic). 105 * @transient_nslabs: The total number of slots in all transient pools that 118 struct list_head pools; member 157 * dev->dma_uses_io_tlb here and mem->pools in __swiotlb_find_pool(). in swiotlb_find_pool()
|
/linux/mm/ |
H A D | dmapool.c | 61 struct list_head pools; 81 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show() 93 static DEVICE_ATTR_RO(pools); 212 * Given one of these pools, dma_pool_alloc() 270 INIT_LIST_HEAD(&retval->pools); in dma_pool_create() 283 list_add(&retval->pools, &dev->dma_pools); in dma_pool_create() 291 list_del(&retval->pools); in dma_pool_create() 373 list_del(&pool->pools); in dma_pool_destroy() 60 struct list_head pools; global() member
|
/linux/Documentation/devicetree/bindings/soc/ti/ |
H A D | keystone-navigator-qmss.txt | 6 processors(PDSP), linking RAM, descriptor pools and infrastructure 49 - queue-pools : child node classifying the queue ranges into pools. 50 Queue ranges are grouped into 3 type of pools: 151 queue-pools {
|
/linux/drivers/net/ethernet/chelsio/libcxgb/ |
H A D | libcxgb_ppm.c | 348 struct cxgbi_ppm_pool __percpu *pools; in ppm_alloc_cpu_pool() local 350 unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3; in ppm_alloc_cpu_pool() 367 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; in ppm_alloc_cpu_pool() 368 pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool)); in ppm_alloc_cpu_pool() 370 if (!pools) in ppm_alloc_cpu_pool() 374 struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu); in ppm_alloc_cpu_pool() 384 return pools; in ppm_alloc_cpu_pool()
|
/linux/lib/ |
H A D | stackdepot.c | 68 /* Number of pools in stack_pools. */ 213 pr_info("allocating space for %u stack pools via memblock\n", in stack_depot_early_init() 218 pr_err("stack pools allocation failed, disabling\n"); in stack_depot_early_init() 273 pr_info("allocating space for %u stack pools via kvcalloc\n", in stack_depot_init() 277 pr_err("stack pools allocation failed, disabling\n"); in stack_depot_init() 291 * Initializes new stack pool, and updates the list of pools. 323 * pools. in depot_init_pool() 346 * pools is reached, do not use the preallocated memory. in depot_keep_new_pool() 851 seq_printf(seq, "pools: %d\n", data_race(pools_num)); in stats_show()
|
/linux/include/soc/fsl/ |
H A D | bman.h | 47 * hardware pools as a dma_addr_t 77 /* Portal and Buffer Pools */ 81 #define BM_POOL_MAX 64 /* max # of buffer pools */
|
/linux/drivers/net/wireless/ath/ |
H A D | dfs_pri_detector.c | 73 * DOC: Singleton Pulse and Sequence Pools 75 * Instances of pri_sequence and pulse_elem are kept in singleton pools to 79 * Memory is freed after all references to the pools are released. 100 /* free singleton pools with no references left */ in pool_deregister_ref() 357 /* free pulse queue and sequences list and give objects back to pools */
|
/linux/drivers/net/ethernet/freescale/dpaa2/ |
H A D | dpaa2-xsk.c | 162 pools_params->pools[curr_bp].priority_mask |= (1 << j); in dpaa2_xsk_set_bp_per_qdbin() 163 if (!pools_params->pools[curr_bp].priority_mask) in dpaa2_xsk_set_bp_per_qdbin() 166 pools_params->pools[curr_bp].dpbp_id = priv->bp[i]->bpid; in dpaa2_xsk_set_bp_per_qdbin() 167 pools_params->pools[curr_bp].buffer_size = priv->rx_buf_size; in dpaa2_xsk_set_bp_per_qdbin() 168 pools_params->pools[curr_bp++].backup_pool = 0; in dpaa2_xsk_set_bp_per_qdbin()
|
H A D | dpsw.h | 196 * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration 198 * @pools: Array of buffer pools parameters; The number of valid entries 200 * @pools.dpbp_id: DPBP object ID 201 * @pools.buffer_size: Buffer size 202 * @pools.backup_pool: Backup pool 210 } pools[DPSW_MAX_DPBP]; member
|
H A D | dpni.h | 24 * DPNI_MAX_DPBP - Maximum number of buffer pools per DPNI 99 * struct dpni_pools_cfg - Structure representing buffer pools configuration 103 * @pools: Array of buffer pools parameters; The number of valid entries 105 * @pools.dpbp_id: DPBP object ID 106 * @pools.priority: Priority mask that indicates TC's used with this buffer. 108 * @pools.buffer_size: Buffer size 109 * @pools.backup_pool: Backup pool 119 } pools[DPNI_MAX_DPBP]; member
|
/linux/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/ |
H A D | ethernet-driver.rst | 26 - buffer pools 40 object and uses Buffer Pools (DPBPs), I/O Portals (DPIOs) and Concentrators 69 DPBPs represent hardware buffer pools. Packet I/O is performed in the context 124 The role of hardware buffer pools is storage of ingress frame data. Each network
|
/linux/drivers/net/ethernet/netronome/nfp/ |
H A D | nfp_abi.h | 60 * @ingress_pools_count: number of ingress pools 61 * @egress_pools_count: number of egress pools
|
/linux/fs/smb/common/smbdirect/ |
H A D | smbdirect_socket.h | 47 * Memory pools for preallocating 70 * Memory pools for preallocating
|
/linux/Documentation/networking/ |
H A D | page_pool.rst | 46 Information about page pools on the system can be accessed via the netdev 51 The number of pools created **must** match the number of hardware queues 106 with fragmented page pools.
|
/linux/drivers/net/ethernet/ibm/ |
H A D | ibmvnic.h | 39 /* A VNIC adapter has set of Rx and Tx pools (aka queues). Each Rx/Tx pool 60 * Each VNIC can have upto 16 Rx, 16 Tx and 16 TSO pools. The TSO pools 63 * The Rx and Tx pools can have upto 4096 buffers. The max size of these 70 * Tx/TSO pools but there is a limit of 255 mappings per vnic in the 76 * plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC.
|