Lines Matching +full:data +full:- +full:mapping

1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2016-2017 Broadcom Limited
27 dma_addr_t mapping, u32 len, in bnxt_xmit_bd() argument
40 num_frags = sinfo->nr_frags; in bnxt_xmit_bd()
44 prod = txr->tx_prod; in bnxt_xmit_bd()
45 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; in bnxt_xmit_bd()
46 tx_buf->nr_frags = num_frags; in bnxt_xmit_bd()
48 tx_buf->page = virt_to_head_page(xdp->data); in bnxt_xmit_bd()
50 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; in bnxt_xmit_bd()
54 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); in bnxt_xmit_bd()
55 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags); in bnxt_xmit_bd()
56 txbd->tx_bd_haddr = cpu_to_le64(mapping); in bnxt_xmit_bd()
60 skb_frag_t *frag = &sinfo->frags[i]; in bnxt_xmit_bd()
66 WRITE_ONCE(txr->tx_prod, prod); in bnxt_xmit_bd()
69 frag_tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; in bnxt_xmit_bd()
70 frag_tx_buf->page = skb_frag_page(frag); in bnxt_xmit_bd()
72 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; in bnxt_xmit_bd()
76 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); in bnxt_xmit_bd()
79 txbd->tx_bd_haddr = cpu_to_le64(frag_mapping); in bnxt_xmit_bd()
85 txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags | in bnxt_xmit_bd()
90 WRITE_ONCE(txr->tx_prod, prod); in bnxt_xmit_bd()
96 dma_addr_t mapping, u32 len, u16 rx_prod, in __bnxt_xmit_xdp() argument
101 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp); in __bnxt_xmit_xdp()
102 tx_buf->rx_prod = rx_prod; in __bnxt_xmit_xdp()
103 tx_buf->action = XDP_TX; in __bnxt_xmit_xdp()
109 dma_addr_t mapping, u32 len, in __bnxt_xmit_xdp_redirect() argument
114 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL); in __bnxt_xmit_xdp_redirect()
115 tx_buf->action = XDP_REDIRECT; in __bnxt_xmit_xdp_redirect()
116 tx_buf->xdpf = xdpf; in __bnxt_xmit_xdp_redirect()
117 dma_unmap_addr_set(tx_buf, mapping, mapping); in __bnxt_xmit_xdp_redirect()
123 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; in bnxt_tx_int_xdp()
124 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; in bnxt_tx_int_xdp()
125 u16 tx_hw_cons = txr->tx_hw_cons; in bnxt_tx_int_xdp()
128 u16 tx_cons = txr->tx_cons; in bnxt_tx_int_xdp()
136 tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)]; in bnxt_tx_int_xdp()
138 if (tx_buf->action == XDP_REDIRECT) { in bnxt_tx_int_xdp()
139 struct pci_dev *pdev = bp->pdev; in bnxt_tx_int_xdp()
141 dma_unmap_single(&pdev->dev, in bnxt_tx_int_xdp()
142 dma_unmap_addr(tx_buf, mapping), in bnxt_tx_int_xdp()
145 xdp_return_frame(tx_buf->xdpf); in bnxt_tx_int_xdp()
146 tx_buf->action = 0; in bnxt_tx_int_xdp()
147 tx_buf->xdpf = NULL; in bnxt_tx_int_xdp()
148 } else if (tx_buf->action == XDP_TX) { in bnxt_tx_int_xdp()
149 tx_buf->action = 0; in bnxt_tx_int_xdp()
153 frags = tx_buf->nr_frags; in bnxt_tx_int_xdp()
156 tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)]; in bnxt_tx_int_xdp()
157 page_pool_recycle_direct(rxr->page_pool, tx_buf->page); in bnxt_tx_int_xdp()
166 bnapi->events &= ~BNXT_TX_CMP_EVENT; in bnxt_tx_int_xdp()
167 WRITE_ONCE(txr->tx_cons, tx_cons); in bnxt_tx_int_xdp()
169 tx_buf = &txr->tx_buf_ring[RING_TX(bp, last_tx_cons)]; in bnxt_tx_int_xdp()
170 bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod); in bnxt_tx_int_xdp()
177 struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); in bnxt_xdp_attached()
189 dma_addr_t mapping; in bnxt_xdp_buff_init() local
192 pdev = bp->pdev; in bnxt_xdp_buff_init()
193 rx_buf = &rxr->rx_buf_ring[cons]; in bnxt_xdp_buff_init()
194 offset = bp->rx_offset; in bnxt_xdp_buff_init()
196 mapping = rx_buf->mapping - bp->rx_dma_offset; in bnxt_xdp_buff_init()
197 dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir); in bnxt_xdp_buff_init()
199 xdp_init_buff(xdp, buflen, &rxr->xdp_rxq); in bnxt_xdp_buff_init()
200 xdp_prepare_buff(xdp, data_ptr - offset, offset, len, true); in bnxt_xdp_buff_init()
212 for (i = 0; i < shinfo->nr_frags; i++) { in bnxt_xdp_buff_frags_free()
213 struct page *page = skb_frag_page(&shinfo->frags[i]); in bnxt_xdp_buff_frags_free()
215 page_pool_recycle_direct(rxr->page_pool, page); in bnxt_xdp_buff_frags_free()
217 shinfo->nr_frags = 0; in bnxt_xdp_buff_frags_free()
221 * true - packet consumed by XDP and new buffer is allocated.
222 * false - packet should be passed to the stack.
228 struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); in bnxt_rx_xdp()
232 dma_addr_t mapping; in bnxt_rx_xdp() local
242 pdev = bp->pdev; in bnxt_rx_xdp()
243 offset = bp->rx_offset; in bnxt_rx_xdp()
245 txr = rxr->bnapi->tx_ring[0]; in bnxt_rx_xdp()
247 orig_data = xdp->data; in bnxt_rx_xdp()
255 if (tx_avail != bp->tx_ring_size) in bnxt_rx_xdp()
258 *len = xdp->data_end - xdp->data; in bnxt_rx_xdp()
259 if (orig_data != xdp->data) { in bnxt_rx_xdp()
260 offset = xdp->data - xdp->data_hard_start; in bnxt_rx_xdp()
261 *data_ptr = xdp->data_hard_start + offset; in bnxt_rx_xdp()
269 rx_buf = &rxr->rx_buf_ring[cons]; in bnxt_rx_xdp()
270 mapping = rx_buf->mapping - bp->rx_dma_offset; in bnxt_rx_xdp()
276 tx_needed += sinfo->nr_frags; in bnxt_rx_xdp()
281 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
287 dma_sync_single_for_device(&pdev->dev, mapping + offset, *len, in bnxt_rx_xdp()
288 bp->rx_dir); in bnxt_rx_xdp()
291 __bnxt_xmit_xdp(bp, txr, mapping + offset, *len, in bnxt_rx_xdp()
292 NEXT_RX(rxr->rx_prod), xdp); in bnxt_rx_xdp()
302 if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) { in bnxt_rx_xdp()
303 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
309 if (xdp_do_redirect(bp->dev, xdp, xdp_prog)) { in bnxt_rx_xdp()
310 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
311 page_pool_recycle_direct(rxr->page_pool, page); in bnxt_rx_xdp()
318 bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
321 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
335 struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog); in bnxt_xdp_xmit()
336 struct pci_dev *pdev = bp->pdev; in bnxt_xdp_xmit()
338 dma_addr_t mapping; in bnxt_xdp_xmit() local
343 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || in bnxt_xdp_xmit()
344 !bp->tx_nr_rings_xdp || in bnxt_xdp_xmit()
346 return -EINVAL; in bnxt_xdp_xmit()
348 ring = smp_processor_id() % bp->tx_nr_rings_xdp; in bnxt_xdp_xmit()
349 txr = &bp->tx_ring[ring]; in bnxt_xdp_xmit()
351 if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING) in bnxt_xdp_xmit()
352 return -EINVAL; in bnxt_xdp_xmit()
355 spin_lock(&txr->xdp_tx_lock); in bnxt_xdp_xmit()
363 mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len, in bnxt_xdp_xmit()
366 if (dma_mapping_error(&pdev->dev, mapping)) in bnxt_xdp_xmit()
369 __bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp); in bnxt_xdp_xmit()
374 /* Sync BD data before updating doorbell */ in bnxt_xdp_xmit()
376 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); in bnxt_xdp_xmit()
380 spin_unlock(&txr->xdp_tx_lock); in bnxt_xdp_xmit()
388 struct net_device *dev = bp->dev; in bnxt_xdp_set()
392 if (prog && !prog->aux->xdp_has_frags && in bnxt_xdp_set()
393 bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { in bnxt_xdp_set()
395 bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU); in bnxt_xdp_set()
396 return -EOPNOTSUPP; in bnxt_xdp_set()
398 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) { in bnxt_xdp_set()
400 return -EOPNOTSUPP; in bnxt_xdp_set()
403 tx_xdp = bp->rx_nr_rings; in bnxt_xdp_set()
405 tc = bp->num_tc; in bnxt_xdp_set()
408 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, in bnxt_xdp_set()
417 old = xchg(&bp->xdp_prog, prog); in bnxt_xdp_set()
431 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS; in bnxt_xdp_set()
432 bp->dev->hw_features |= NETIF_F_LRO; in bnxt_xdp_set()
435 bp->tx_nr_rings_xdp = tx_xdp; in bnxt_xdp_set()
436 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp; in bnxt_xdp_set()
437 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); in bnxt_xdp_set()
438 bp->cp_nr_rings = max_t(int, tx_cp, bp->rx_nr_rings); in bnxt_xdp_set()
453 switch (xdp->command) { in bnxt_xdp()
455 rc = bnxt_xdp_set(bp, xdp->prog); in bnxt_xdp()
458 rc = -EINVAL; in bnxt_xdp()
475 if (bp->dev->features & NETIF_F_RXCSUM) { in bnxt_xdp_build_skb()
476 skb->ip_summed = CHECKSUM_UNNECESSARY; in bnxt_xdp_build_skb()
477 skb->csum_level = RX_CMP_ENCAP(rxcmp1); in bnxt_xdp_build_skb()
481 sinfo->xdp_frags_size, in bnxt_xdp_build_skb()
482 BNXT_RX_PAGE_SIZE * sinfo->nr_frags, in bnxt_xdp_build_skb()