Home
last modified time | relevance | path

Searched refs:xsk_pool (Results 1 – 25 of 40) sorted by relevance

12

/linux/drivers/net/ethernet/intel/ice/
H A Dice_xsk.c293 struct xsk_buff_pool *xsk_pool, u16 count) in __ice_alloc_rx_bufs_zc() argument
305 nb_buffs_extra = ice_fill_rx_descs(xsk_pool, xdp, rx_desc, in __ice_alloc_rx_bufs_zc()
318 nb_buffs = ice_fill_rx_descs(xsk_pool, xdp, rx_desc, count); in __ice_alloc_rx_bufs_zc()
343 struct xsk_buff_pool *xsk_pool, u16 count) in ice_alloc_rx_bufs_zc() argument
352 if (!__ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, rx_thresh)) in ice_alloc_rx_bufs_zc()
354 return __ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, leftover); in ice_alloc_rx_bufs_zc()
363 struct xsk_buff_pool *xsk_pool) in ice_clean_xdp_irq_zc() argument
414 xsk_tx_completed(xsk_pool, xsk_frames); in ice_clean_xdp_irq_zc()
434 struct xsk_buff_pool *xsk_pool) in ice_xmit_xdp_tx_zc() argument
448 free_space += ice_clean_xdp_irq_zc(xdp_ring, xsk_pool); in ice_xmit_xdp_tx_zc()
[all …]
H A Dice_txrx.c208 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in ice_clean_tx_ring()
542 if (rx_ring->xsk_pool) { in ice_clean_rx_ring()
597 if (rx_ring->xsk_pool) { in ice_free_rx_ring()
1281 struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool); in ice_napi_poll() local
1284 if (xsk_pool) in ice_napi_poll()
1285 wd = ice_xmit_zc(tx_ring, xsk_pool); in ice_napi_poll()
1311 struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool); in ice_napi_poll() local
1318 cleaned = rx_ring->xsk_pool ? in ice_napi_poll()
1319 ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) : in ice_napi_poll()
H A Dice_base.c669 err = ice_realloc_rx_xdp_bufs(ring, ring->xsk_pool); in ice_vsi_cfg_rxq()
673 if (ring->xsk_pool) { in ice_vsi_cfg_rxq()
675 xsk_pool_get_rx_frag_step(ring->xsk_pool); in ice_vsi_cfg_rxq()
687 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in ice_vsi_cfg_rxq()
716 if (ring->xsk_pool) { in ice_vsi_cfg_rxq()
719 if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) { in ice_vsi_cfg_rxq()
727 ok = ice_alloc_rx_bufs_zc(ring, ring->xsk_pool, num_bufs); in ice_vsi_cfg_rxq()
/linux/drivers/net/ethernet/intel/igb/
H A Digb_xsk.c73 if (rx_ring->xsk_pool) in igb_txrx_ring_enable()
74 igb_alloc_rx_buffers_zc(rx_ring, rx_ring->xsk_pool, in igb_txrx_ring_enable()
201 struct xsk_buff_pool *xsk_pool, u16 count) in igb_alloc_rx_buffers_zc() argument
213 nb_buffs_extra = igb_fill_rx_descs(xsk_pool, xdp, rx_desc, in igb_alloc_rx_buffers_zc()
225 nb_buffs = igb_fill_rx_descs(xsk_pool, xdp, rx_desc, count); in igb_alloc_rx_buffers_zc()
295 struct xdp_buff *xdp, struct xsk_buff_pool *xsk_pool, in igb_run_xdp_zc() argument
310 if (xsk_uses_need_wakeup(xsk_pool) && in igb_run_xdp_zc()
342 struct xsk_buff_pool *xsk_pool, const int budget) in igb_clean_rx_irq_zc() argument
392 xdp_res = igb_run_xdp_zc(adapter, rx_ring, xdp, xsk_pool, in igb_clean_rx_irq_zc()
451 failure |= !igb_alloc_rx_buffers_zc(rx_ring, xsk_pool, in igb_clean_rx_irq_zc()
[all …]
H A Digb_main.c481 if (!rx_ring->xsk_pool) { in igb_dump()
1998 if (ring->xsk_pool) in igb_configure()
1999 igb_alloc_rx_buffers_zc(ring, ring->xsk_pool, in igb_configure()
4390 WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring)); in igb_configure_tx_ring()
4750 if (ring->xsk_pool) in igb_setup_srrctl()
4751 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); in igb_setup_srrctl()
4790 WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring)); in igb_configure_rx_ring()
4791 if (ring->xsk_pool) { in igb_configure_rx_ring()
4795 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in igb_configure_rx_ring()
4827 if (ring->xsk_pool) in igb_configure_rx_ring()
[all …]
/linux/drivers/net/ethernet/intel/i40e/
H A Di40e_xsk.c211 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) in i40e_run_xdp_zc()
253 nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs); in i40e_alloc_rx_buffers_zc()
497 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { in i40e_clean_rx_irq_zc()
499 xsk_set_rx_need_wakeup(rx_ring->xsk_pool); in i40e_clean_rx_irq_zc()
501 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); in i40e_clean_rx_irq_zc()
515 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); in i40e_xmit_pkt()
516 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); in i40e_xmit_pkt()
537 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr); in i40e_xmit_pkt_batch()
538 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len); in i40e_xmit_pkt_batch()
581 struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs; in i40e_xmit_zc()
[all …]
/linux/drivers/net/ethernet/freescale/dpaa2/
H A Ddpaa2-xsk.c49 ch->xsk_pool->umem->headroom); in dpaa2_xsk_run_xdp()
183 if (!ch->xsk_pool) in dpaa2_xsk_disable_pool()
200 ch->xsk_pool = NULL; in dpaa2_xsk_disable_pool()
265 ch->xsk_pool = pool; in dpaa2_xsk_enable_pool()
354 addr = xsk_buff_raw_get_dma(ch->xsk_pool, xdp_desc->addr); in dpaa2_xsk_tx_build_fd()
355 xsk_buff_raw_dma_sync_for_device(ch->xsk_pool, addr, xdp_desc->len); in dpaa2_xsk_tx_build_fd()
393 struct xdp_desc *xdp_descs = ch->xsk_pool->tx_descs; in dpaa2_xsk_tx()
410 batch = xsk_tx_peek_release_desc_batch(ch->xsk_pool, budget); in dpaa2_xsk_tx()
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
H A Dtx.c60 if (xp_tx_metadata_enabled(sq->xsk_pool)) in mlx5e_xsk_tx_post_err()
68 struct xsk_buff_pool *pool = sq->xsk_pool; in mlx5e_xsk_tx()
116 if (xp_tx_metadata_enabled(sq->xsk_pool)) { in mlx5e_xsk_tx()
/linux/drivers/net/ethernet/ti/icssg/
H A Dicssg_common.c100 struct xsk_buff_pool *pool = tx_chn->xsk_pool; in emac_xsk_xmit_zc()
164 xsk_tx_release(tx_chn->xsk_pool); in emac_xsk_xmit_zc()
291 if (tx_chn->xsk_pool) { in emac_tx_complete_packets()
293 xsk_tx_completed(tx_chn->xsk_pool, xsk_frames_done); in emac_tx_complete_packets()
295 if (xsk_uses_need_wakeup(tx_chn->xsk_pool)) in emac_tx_complete_packets()
296 xsk_set_tx_need_wakeup(tx_chn->xsk_pool); in emac_tx_complete_packets()
858 buf_len = xsk_pool_get_rx_frame_size(rx_chn->xsk_pool); in prueth_dma_rx_push_mapped_zc()
875 xdp = xsk_buff_alloc(rx_chn->xsk_pool); in prueth_rx_alloc_zc()
1001 if (xsk_uses_need_wakeup(rx_chn->xsk_pool)) { in emac_rx_packet_zc()
1006 xsk_set_rx_need_wakeup(rx_chn->xsk_pool); in emac_rx_packet_zc()
[all …]
H A Dicssg_prueth.c609 if (rx_chn->xsk_pool) { in prueth_create_xdp_rxqs()
613 xsk_pool_set_rxq_info(rx_chn->xsk_pool, rxq); in prueth_create_xdp_rxqs()
889 rx_chn->xsk_pool = NULL; in prueth_set_xsk_pool()
890 tx_chn->xsk_pool = NULL; in prueth_set_xsk_pool()
892 rx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id); in prueth_set_xsk_pool()
893 tx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id); in prueth_set_xsk_pool()
1609 if (!tx_chn->xsk_pool) { in prueth_xsk_wakeup()
1614 if (!rx_chn->xsk_pool) { in prueth_xsk_wakeup()
/linux/drivers/net/ethernet/engleder/
H A Dtsnep_main.c769 dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr); in tsnep_xdp_tx_map_zc()
770 xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len); in tsnep_xdp_tx_map_zc()
794 struct xdp_desc *descs = tx->xsk_pool->tx_descs; in tsnep_xdp_xmit_zc()
805 batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available); in tsnep_xdp_xmit_zc()
893 if (tx->xsk_pool) { in tsnep_tx_poll()
895 xsk_tx_completed(tx->xsk_pool, xsk_frames); in tsnep_tx_poll()
896 if (xsk_uses_need_wakeup(tx->xsk_pool)) in tsnep_tx_poll()
897 xsk_set_tx_need_wakeup(tx->xsk_pool); in tsnep_tx_poll()
959 if (!rx->xsk_pool && entry->page) in tsnep_rx_ring_cleanup()
962 if (rx->xsk_pool && entry->xdp) in tsnep_rx_ring_cleanup()
[all …]
/linux/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dotx2_txrx.c452 static void otx2_zc_submit_pkts(struct otx2_nic *pfvf, struct xsk_buff_pool *xsk_pool, in otx2_zc_submit_pkts() argument
456 xsk_tx_completed(xsk_pool, *xsk_frames); in otx2_zc_submit_pkts()
458 if (xsk_uses_need_wakeup(xsk_pool)) in otx2_zc_submit_pkts()
459 xsk_set_tx_need_wakeup(xsk_pool); in otx2_zc_submit_pkts()
461 otx2_zc_napi_handler(pfvf, xsk_pool, qidx, budget); in otx2_zc_submit_pkts()
481 if (sq->xsk_pool) in otx2_tx_napi_handler()
482 otx2_zc_submit_pkts(pfvf, sq->xsk_pool, &xsk_frames, in otx2_tx_napi_handler()
540 if (sq->xsk_pool) in otx2_tx_napi_handler()
541 otx2_zc_submit_pkts(pfvf, sq->xsk_pool, &xsk_frames, qidx, budget); in otx2_tx_napi_handler()
627 if (pool->xsk_pool) in otx2_napi_handler()
[all …]
H A Dotx2_xsk.c23 xdp = xsk_buff_alloc(pool->xsk_pool); in otx2_xsk_pool_alloc_buf()
151 sq->xsk_pool = NULL; in otx2_xsk_pool_disable()
197 sq->xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, qidx); in otx2_attach_xsk_buff()
H A Dotx2_txrx.h110 struct xsk_buff_pool *xsk_pool; member
134 struct xsk_buff_pool *xsk_pool; member
H A Dotx2_common.c554 if (pool->xsk_pool) in __otx2_alloc_rbuf()
1069 if (pool->xsk_pool) { in otx2_cq_init()
1073 xsk_pool_set_rxq_info(pool->xsk_pool, &cq->xdp_rxq); in otx2_cq_init()
1300 } else if (pool->xsk_pool) { in otx2_free_bufs()
1363 pool->xsk_pool = NULL; in otx2_aura_pool_free()
1464 struct xsk_buff_pool *xsk_pool; in otx2_pool_aq_init() local
1527 xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, pool_id); in otx2_pool_aq_init()
1528 if (xsk_pool) { in otx2_pool_aq_init()
1529 pool->xsk_pool = xsk_pool; in otx2_pool_aq_init()
1597 if (pool->xsk_pool) { in otx2_sq_aura_pool_init()
[all …]
/linux/drivers/net/ethernet/netronome/nfp/
H A Dnfp_net_xsk.c22 headroom = xsk_pool_get_headroom(rx_ring->r_vec->xsk_pool); in nfp_net_xsk_rx_bufs_stash()
60 struct xsk_buff_pool *pool = r_vec->xsk_pool; in nfp_net_xsk_rx_ring_fill_freelist()
H A Dnfp_net_debugfs.c46 if (!r_vec->xsk_pool) { in nfp_rx_q_show()
/linux/drivers/net/ethernet/netronome/nfp/nfd3/
H A Drings.c25 if (tx_ring->r_vec->xsk_pool) { in nfp_nfd3_xsk_tx_bufs_free()
29 xsk_tx_completed(tx_ring->r_vec->xsk_pool, 1); in nfp_nfd3_xsk_tx_bufs_free()
/linux/drivers/net/ethernet/freescale/
H A Dfec_main.c1719 if (txq->xsk_pool) { in fec_enet_tx_queue()
1720 struct xsk_buff_pool *pool = txq->xsk_pool; in fec_enet_tx_queue()
1774 new_xdp = xsk_buff_alloc(rxq->xsk_pool); in fec_enet_update_cbd_zc()
2209 u32 headroom = txq->xsk_pool->headroom; in fec_enet_xsk_tx_xmit()
2234 xsk_buff_raw_dma_sync_for_device(txq->xsk_pool, dma, len); in fec_enet_xsk_tx_xmit()
2442 if (rxq->xsk_pool && xsk_uses_need_wakeup(rxq->xsk_pool)) { in fec_enet_rx_queue_xsk()
2444 xsk_set_rx_need_wakeup(rxq->xsk_pool); in fec_enet_rx_queue_xsk()
2446 xsk_clear_rx_need_wakeup(rxq->xsk_pool); in fec_enet_rx_queue_xsk()
2463 if (rxq->xsk_pool) in fec_enet_rx()
3827 allocator = rxq->xsk_pool ? NULL : rxq->page_pool; in fec_xdp_rxq_info_reg()
[all …]
/linux/drivers/net/ethernet/stmicro/stmmac/
H A Dstmmac_main.c301 if (rx_q->xsk_pool) { in stmmac_disable_all_queues()
438 if (rx_q->xsk_pool && rx_q->buf_alloc_num) in stmmac_set_queue_rx_buf_size()
439 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); in stmmac_set_queue_rx_buf_size()
1851 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); in stmmac_alloc_rx_buffers_zc()
1897 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_rx_desc_rings()
1899 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings()
1906 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); in __init_dma_rx_desc_rings()
1916 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings()
1966 if (rx_q->xsk_pool) in init_dma_rx_desc_rings()
1972 rx_q->xsk_pool = NULL; in init_dma_rx_desc_rings()
[all …]
H A Dstmmac.h77 struct xsk_buff_pool *xsk_pool; member
122 struct xsk_buff_pool *xsk_pool; member
/linux/drivers/net/ethernet/intel/igc/
H A Digc_ptp.c783 struct xsk_buff_pool *xsk_pool; in igc_ptp_tx_reg_to_stamp() local
785 xsk_pool = adapter->tx_ring[tstamp->xsk_queue_index]->xsk_pool; in igc_ptp_tx_reg_to_stamp()
786 if (xsk_pool && xp_tx_metadata_enabled(xsk_pool)) { in igc_ptp_tx_reg_to_stamp()
H A Digc_main.c251 if (tx_ring->xsk_pool && xsk_frames) in igc_clean_tx_ring()
252 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); in igc_clean_tx_ring()
471 if (ring->xsk_pool) in igc_clean_rx_ring()
643 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_rx_ring()
644 if (ring->xsk_pool) { in igc_configure_rx_ring()
648 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in igc_configure_rx_ring()
677 if (ring->xsk_pool) in igc_configure_rx_ring()
678 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); in igc_configure_rx_ring()
743 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_tx_ring()
2339 bi->xdp = xsk_buff_alloc(ring->xsk_pool); in igc_alloc_rx_buffers_zc()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_main.c532 u32 xsk_chunk_size = rq->xsk_pool ? rq->xsk_pool->chunk_size : 0; in mlx5e_create_rq_umr_mkey()
561 WARN_ON(rq->xsk_pool); in mlx5e_init_frags_partition()
624 if (rq->xsk_pool) in mlx5e_init_wqe_alloc_info()
642 if (rq->xsk_pool) in mlx5e_init_wqe_alloc_info()
1002 xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq); in mlx5e_alloc_rq()
1540 struct xsk_buff_pool *xsk_pool, in mlx5e_alloc_xdpsq() argument
1556 sq->xsk_pool = xsk_pool; in mlx5e_alloc_xdpsq()
1558 sq->stats = sq->xsk_pool ? in mlx5e_alloc_xdpsq()
2108 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, in mlx5e_open_xdpsq() argument
2114 err = mlx5e_alloc_xdpsq(c, params, xsk_pool, param, sq, is_redirect); in mlx5e_open_xdpsq()
[all …]
/linux/drivers/net/
H A Dvirtio_net.c314 struct xsk_buff_pool *xsk_pool; member
368 struct xsk_buff_pool *xsk_pool; member
945 if (rq->xsk_pool) { in virtnet_rq_unmap_free_buf()
1063 bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool); in buf_to_xdp()
1424 xsk_tx_completed(sq->xsk_pool, nb_pkts - i); in virtnet_xsk_xmit_batch()
1452 xsk_tx_completed(sq->xsk_pool, stats.xsk); in virtnet_xsk_xmit()
1516 xsk_tx_completed(sq->xsk_pool, num); in virtnet_xsk_completed()
2760 if (rq->xsk_pool) { in try_fill_recv()
2761 err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk_pool, gfp); in try_fill_recv()
2921 if (rq->xsk_pool) in virtnet_receive()
[all …]

12