14d6a6005SBhargava Marreddy // SPDX-License-Identifier: GPL-2.0 24d6a6005SBhargava Marreddy // Copyright (c) 2025 Broadcom. 34d6a6005SBhargava Marreddy 44d6a6005SBhargava Marreddy #include <asm/byteorder.h> 54d6a6005SBhargava Marreddy #include <linux/dma-mapping.h> 64d6a6005SBhargava Marreddy #include <linux/dmapool.h> 74d6a6005SBhargava Marreddy #include <linux/delay.h> 84d6a6005SBhargava Marreddy #include <linux/errno.h> 94d6a6005SBhargava Marreddy #include <linux/kernel.h> 104d6a6005SBhargava Marreddy #include <linux/list.h> 114d6a6005SBhargava Marreddy #include <linux/pci.h> 124d6a6005SBhargava Marreddy #include <linux/netdevice.h> 134d6a6005SBhargava Marreddy #include <linux/etherdevice.h> 144d6a6005SBhargava Marreddy #include <linux/if.h> 154d6a6005SBhargava Marreddy #include <net/ip.h> 16c858ac87SBhargava Marreddy #include <net/tcp.h> 17*c2effd12SBhargava Marreddy #include <net/gro.h> 184d6a6005SBhargava Marreddy #include <linux/skbuff.h> 194d6a6005SBhargava Marreddy #include <net/page_pool/helpers.h> 204d6a6005SBhargava Marreddy #include <linux/if_vlan.h> 214d6a6005SBhargava Marreddy #include <net/udp_tunnel.h> 224d6a6005SBhargava Marreddy #include <net/dst_metadata.h> 234d6a6005SBhargava Marreddy #include <net/netdev_queues.h> 244d6a6005SBhargava Marreddy 254d6a6005SBhargava Marreddy #include "bnge.h" 264d6a6005SBhargava Marreddy #include "bnge_hwrm.h" 274d6a6005SBhargava Marreddy #include "bnge_hwrm_lib.h" 284d6a6005SBhargava Marreddy #include "bnge_netdev.h" 294d6a6005SBhargava Marreddy #include "bnge_rmem.h" 304d6a6005SBhargava Marreddy #include "bnge_txrx.h" 314d6a6005SBhargava Marreddy 324d6a6005SBhargava Marreddy irqreturn_t bnge_msix(int irq, void *dev_instance) 334d6a6005SBhargava Marreddy { 344d6a6005SBhargava Marreddy struct bnge_napi *bnapi = dev_instance; 354d6a6005SBhargava Marreddy struct bnge_nq_ring_info *nqr; 364d6a6005SBhargava Marreddy struct bnge_net *bn; 374d6a6005SBhargava Marreddy u32 cons; 384d6a6005SBhargava Marreddy 394d6a6005SBhargava Marreddy bn = bnapi->bn; 404d6a6005SBhargava Marreddy nqr = &bnapi->nq_ring; 414d6a6005SBhargava Marreddy cons = RING_CMP(bn, nqr->nq_raw_cons); 424d6a6005SBhargava Marreddy 434d6a6005SBhargava Marreddy prefetch(&nqr->desc_ring[CP_RING(cons)][CP_IDX(cons)]); 444d6a6005SBhargava Marreddy napi_schedule(&bnapi->napi); 454d6a6005SBhargava Marreddy return IRQ_HANDLED; 464d6a6005SBhargava Marreddy } 474d6a6005SBhargava Marreddy 48*c2effd12SBhargava Marreddy static struct rx_agg_cmp *bnge_get_tpa_agg(struct bnge_net *bn, 49*c2effd12SBhargava Marreddy struct bnge_rx_ring_info *rxr, 50*c2effd12SBhargava Marreddy u16 agg_id, u16 curr) 51*c2effd12SBhargava Marreddy { 52*c2effd12SBhargava Marreddy struct bnge_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; 53*c2effd12SBhargava Marreddy 54*c2effd12SBhargava Marreddy return &tpa_info->agg_arr[curr]; 55*c2effd12SBhargava Marreddy } 56*c2effd12SBhargava Marreddy 57c858ac87SBhargava Marreddy static struct rx_agg_cmp *bnge_get_agg(struct bnge_net *bn, 58c858ac87SBhargava Marreddy struct bnge_cp_ring_info *cpr, 59c858ac87SBhargava Marreddy u16 cp_cons, u16 curr) 60c858ac87SBhargava Marreddy { 61c858ac87SBhargava Marreddy struct rx_agg_cmp *agg; 62c858ac87SBhargava Marreddy 63c858ac87SBhargava Marreddy cp_cons = RING_CMP(bn, ADV_RAW_CMP(cp_cons, curr)); 64c858ac87SBhargava Marreddy agg = (struct rx_agg_cmp *) 65c858ac87SBhargava Marreddy &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 66c858ac87SBhargava Marreddy return agg; 67c858ac87SBhargava Marreddy } 68c858ac87SBhargava Marreddy 69c858ac87SBhargava Marreddy static void bnge_reuse_rx_agg_bufs(struct bnge_cp_ring_info *cpr, u16 idx, 70*c2effd12SBhargava Marreddy u16 start, u32 agg_bufs, bool tpa) 71c858ac87SBhargava Marreddy { 72c858ac87SBhargava Marreddy struct bnge_napi *bnapi = cpr->bnapi; 73c858ac87SBhargava Marreddy struct bnge_net *bn = bnapi->bn; 74c858ac87SBhargava Marreddy struct bnge_rx_ring_info *rxr; 75c858ac87SBhargava Marreddy u16 prod, sw_prod; 76c858ac87SBhargava Marreddy u32 i; 77c858ac87SBhargava Marreddy 78c858ac87SBhargava Marreddy rxr = bnapi->rx_ring; 79c858ac87SBhargava Marreddy sw_prod = rxr->rx_sw_agg_prod; 80c858ac87SBhargava Marreddy prod = rxr->rx_agg_prod; 81c858ac87SBhargava Marreddy 82c858ac87SBhargava Marreddy for (i = 0; i < agg_bufs; i++) { 83c858ac87SBhargava Marreddy struct bnge_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 84c858ac87SBhargava Marreddy struct rx_agg_cmp *agg; 85c858ac87SBhargava Marreddy struct rx_bd *prod_bd; 86c858ac87SBhargava Marreddy netmem_ref netmem; 87c858ac87SBhargava Marreddy u16 cons; 88c858ac87SBhargava Marreddy 89*c2effd12SBhargava Marreddy if (tpa) 90*c2effd12SBhargava Marreddy agg = bnge_get_tpa_agg(bn, rxr, idx, start + i); 91*c2effd12SBhargava Marreddy else 92c858ac87SBhargava Marreddy agg = bnge_get_agg(bn, cpr, idx, start + i); 93c858ac87SBhargava Marreddy cons = agg->rx_agg_cmp_opaque; 94c858ac87SBhargava Marreddy __clear_bit(cons, rxr->rx_agg_bmap); 95c858ac87SBhargava Marreddy 96c858ac87SBhargava Marreddy if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 97c858ac87SBhargava Marreddy sw_prod = bnge_find_next_agg_idx(rxr, sw_prod); 98c858ac87SBhargava Marreddy 99c858ac87SBhargava Marreddy __set_bit(sw_prod, rxr->rx_agg_bmap); 100c858ac87SBhargava Marreddy prod_rx_buf = &rxr->rx_agg_buf_ring[sw_prod]; 101c858ac87SBhargava Marreddy cons_rx_buf = &rxr->rx_agg_buf_ring[cons]; 102c858ac87SBhargava Marreddy 103c858ac87SBhargava Marreddy /* It is possible for sw_prod to be equal to cons, so 104c858ac87SBhargava Marreddy * set cons_rx_buf->netmem to 0 first. 105c858ac87SBhargava Marreddy */ 106c858ac87SBhargava Marreddy netmem = cons_rx_buf->netmem; 107c858ac87SBhargava Marreddy cons_rx_buf->netmem = 0; 108c858ac87SBhargava Marreddy prod_rx_buf->netmem = netmem; 109c858ac87SBhargava Marreddy prod_rx_buf->offset = cons_rx_buf->offset; 110c858ac87SBhargava Marreddy 111c858ac87SBhargava Marreddy prod_rx_buf->mapping = cons_rx_buf->mapping; 112c858ac87SBhargava Marreddy 113c858ac87SBhargava Marreddy prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bn, prod)] 114c858ac87SBhargava Marreddy [RX_IDX(prod)]; 115c858ac87SBhargava Marreddy 116c858ac87SBhargava Marreddy prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 117c858ac87SBhargava Marreddy prod_bd->rx_bd_opaque = sw_prod; 118c858ac87SBhargava Marreddy 119c858ac87SBhargava Marreddy prod = NEXT_RX_AGG(prod); 120c858ac87SBhargava Marreddy sw_prod = RING_RX_AGG(bn, NEXT_RX_AGG(sw_prod)); 121c858ac87SBhargava Marreddy } 122c858ac87SBhargava Marreddy rxr->rx_agg_prod = prod; 123c858ac87SBhargava Marreddy rxr->rx_sw_agg_prod = sw_prod; 124c858ac87SBhargava Marreddy } 125c858ac87SBhargava Marreddy 126c858ac87SBhargava Marreddy static int bnge_agg_bufs_valid(struct bnge_net *bn, 127c858ac87SBhargava Marreddy struct bnge_cp_ring_info *cpr, 128c858ac87SBhargava Marreddy u8 agg_bufs, u32 *raw_cons) 129c858ac87SBhargava Marreddy { 130c858ac87SBhargava Marreddy struct rx_agg_cmp *agg; 131c858ac87SBhargava Marreddy u16 last; 132c858ac87SBhargava Marreddy 133c858ac87SBhargava Marreddy *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 134c858ac87SBhargava Marreddy last = RING_CMP(bn, *raw_cons); 135c858ac87SBhargava Marreddy agg = (struct rx_agg_cmp *) 136c858ac87SBhargava Marreddy &cpr->desc_ring[CP_RING(last)][CP_IDX(last)]; 137c858ac87SBhargava Marreddy return RX_AGG_CMP_VALID(bn, agg, *raw_cons); 138c858ac87SBhargava Marreddy } 139c858ac87SBhargava Marreddy 140c858ac87SBhargava Marreddy static int bnge_discard_rx(struct bnge_net *bn, struct bnge_cp_ring_info *cpr, 141c858ac87SBhargava Marreddy u32 *raw_cons, void *cmp) 142c858ac87SBhargava Marreddy { 143c858ac87SBhargava Marreddy u32 tmp_raw_cons = *raw_cons; 144c858ac87SBhargava Marreddy struct rx_cmp *rxcmp = cmp; 145c858ac87SBhargava Marreddy u8 cmp_type, agg_bufs = 0; 146c858ac87SBhargava Marreddy 147c858ac87SBhargava Marreddy cmp_type = RX_CMP_TYPE(rxcmp); 148c858ac87SBhargava Marreddy 149c858ac87SBhargava Marreddy if (cmp_type == CMP_TYPE_RX_L2_CMP) { 150c858ac87SBhargava Marreddy agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 151c858ac87SBhargava Marreddy RX_CMP_AGG_BUFS) >> 152c858ac87SBhargava Marreddy RX_CMP_AGG_BUFS_SHIFT; 153*c2effd12SBhargava Marreddy } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 154*c2effd12SBhargava Marreddy return 0; 155c858ac87SBhargava Marreddy } 156c858ac87SBhargava Marreddy 157c858ac87SBhargava Marreddy if (agg_bufs) { 158c858ac87SBhargava Marreddy if (!bnge_agg_bufs_valid(bn, cpr, agg_bufs, &tmp_raw_cons)) 159c858ac87SBhargava Marreddy return -EBUSY; 160c858ac87SBhargava Marreddy } 161c858ac87SBhargava Marreddy *raw_cons = tmp_raw_cons; 162c858ac87SBhargava Marreddy return 0; 163c858ac87SBhargava Marreddy } 164c858ac87SBhargava Marreddy 165c858ac87SBhargava Marreddy static u32 __bnge_rx_agg_netmems(struct bnge_net *bn, 166c858ac87SBhargava Marreddy struct bnge_cp_ring_info *cpr, 167*c2effd12SBhargava Marreddy u16 idx, u32 agg_bufs, bool tpa, 168c858ac87SBhargava Marreddy struct sk_buff *skb) 169c858ac87SBhargava Marreddy { 170c858ac87SBhargava Marreddy struct bnge_napi *bnapi = cpr->bnapi; 171c858ac87SBhargava Marreddy struct skb_shared_info *shinfo; 172c858ac87SBhargava Marreddy struct bnge_rx_ring_info *rxr; 173c858ac87SBhargava Marreddy u32 i, total_frag_len = 0; 174c858ac87SBhargava Marreddy u16 prod; 175c858ac87SBhargava Marreddy 176c858ac87SBhargava Marreddy rxr = bnapi->rx_ring; 177c858ac87SBhargava Marreddy prod = rxr->rx_agg_prod; 178c858ac87SBhargava Marreddy shinfo = skb_shinfo(skb); 179c858ac87SBhargava Marreddy 180c858ac87SBhargava Marreddy for (i = 0; i < agg_bufs; i++) { 181c858ac87SBhargava Marreddy struct bnge_sw_rx_agg_bd *cons_rx_buf; 182c858ac87SBhargava Marreddy struct rx_agg_cmp *agg; 183c858ac87SBhargava Marreddy u16 cons, frag_len; 184c858ac87SBhargava Marreddy netmem_ref netmem; 185c858ac87SBhargava Marreddy 186*c2effd12SBhargava Marreddy if (tpa) 187*c2effd12SBhargava Marreddy agg = bnge_get_tpa_agg(bn, rxr, idx, i); 188*c2effd12SBhargava Marreddy else 189c858ac87SBhargava Marreddy agg = bnge_get_agg(bn, cpr, idx, i); 190c858ac87SBhargava Marreddy cons = agg->rx_agg_cmp_opaque; 191c858ac87SBhargava Marreddy frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 192c858ac87SBhargava Marreddy RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 193c858ac87SBhargava Marreddy 194c858ac87SBhargava Marreddy cons_rx_buf = &rxr->rx_agg_buf_ring[cons]; 195c858ac87SBhargava Marreddy skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem, 196c858ac87SBhargava Marreddy cons_rx_buf->offset, 197c858ac87SBhargava Marreddy frag_len, BNGE_RX_PAGE_SIZE); 198c858ac87SBhargava Marreddy __clear_bit(cons, rxr->rx_agg_bmap); 199c858ac87SBhargava Marreddy 200c858ac87SBhargava Marreddy /* It is possible for bnge_alloc_rx_netmem() to allocate 201c858ac87SBhargava Marreddy * a sw_prod index that equals the cons index, so we 202c858ac87SBhargava Marreddy * need to clear the cons entry now. 203c858ac87SBhargava Marreddy */ 204c858ac87SBhargava Marreddy netmem = cons_rx_buf->netmem; 205c858ac87SBhargava Marreddy cons_rx_buf->netmem = 0; 206c858ac87SBhargava Marreddy 207c858ac87SBhargava Marreddy if (bnge_alloc_rx_netmem(bn, rxr, prod, GFP_ATOMIC) != 0) { 208c858ac87SBhargava Marreddy skb->len -= frag_len; 209c858ac87SBhargava Marreddy skb->data_len -= frag_len; 210c858ac87SBhargava Marreddy skb->truesize -= BNGE_RX_PAGE_SIZE; 211c858ac87SBhargava Marreddy 212c858ac87SBhargava Marreddy --shinfo->nr_frags; 213c858ac87SBhargava Marreddy cons_rx_buf->netmem = netmem; 214c858ac87SBhargava Marreddy 215c858ac87SBhargava Marreddy /* Update prod since possibly some netmems have been 216c858ac87SBhargava Marreddy * allocated already. 217c858ac87SBhargava Marreddy */ 218c858ac87SBhargava Marreddy rxr->rx_agg_prod = prod; 219*c2effd12SBhargava Marreddy bnge_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); 220c858ac87SBhargava Marreddy return 0; 221c858ac87SBhargava Marreddy } 222c858ac87SBhargava Marreddy 223c858ac87SBhargava Marreddy page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0, 224c858ac87SBhargava Marreddy BNGE_RX_PAGE_SIZE); 225c858ac87SBhargava Marreddy 226c858ac87SBhargava Marreddy total_frag_len += frag_len; 227c858ac87SBhargava Marreddy prod = NEXT_RX_AGG(prod); 228c858ac87SBhargava Marreddy } 229c858ac87SBhargava Marreddy rxr->rx_agg_prod = prod; 230c858ac87SBhargava Marreddy return total_frag_len; 231c858ac87SBhargava Marreddy } 232c858ac87SBhargava Marreddy 233c858ac87SBhargava Marreddy static struct sk_buff *bnge_rx_agg_netmems_skb(struct bnge_net *bn, 234c858ac87SBhargava Marreddy struct bnge_cp_ring_info *cpr, 235c858ac87SBhargava Marreddy struct sk_buff *skb, u16 idx, 236*c2effd12SBhargava Marreddy u32 agg_bufs, bool tpa) 237c858ac87SBhargava Marreddy { 238c858ac87SBhargava Marreddy u32 total_frag_len; 239c858ac87SBhargava Marreddy 240*c2effd12SBhargava Marreddy total_frag_len = __bnge_rx_agg_netmems(bn, cpr, idx, agg_bufs, 241*c2effd12SBhargava Marreddy tpa, skb); 242c858ac87SBhargava Marreddy if (!total_frag_len) { 243c858ac87SBhargava Marreddy skb_mark_for_recycle(skb); 244c858ac87SBhargava Marreddy dev_kfree_skb(skb); 245c858ac87SBhargava Marreddy return NULL; 246c858ac87SBhargava Marreddy } 247c858ac87SBhargava Marreddy 248c858ac87SBhargava Marreddy return skb; 249c858ac87SBhargava Marreddy } 250c858ac87SBhargava Marreddy 2514d6a6005SBhargava Marreddy static void bnge_sched_reset_rxr(struct bnge_net *bn, 2524d6a6005SBhargava Marreddy struct bnge_rx_ring_info *rxr) 2534d6a6005SBhargava Marreddy { 2544d6a6005SBhargava Marreddy if (!rxr->bnapi->in_reset) { 2554d6a6005SBhargava Marreddy rxr->bnapi->in_reset = true; 2564d6a6005SBhargava Marreddy 2574d6a6005SBhargava Marreddy /* TODO: Initiate reset task */ 2584d6a6005SBhargava Marreddy } 2594d6a6005SBhargava Marreddy rxr->rx_next_cons = 0xffff; 2604d6a6005SBhargava Marreddy } 2614d6a6005SBhargava Marreddy 262bd5ad9c0SBhargava Marreddy static void bnge_sched_reset_txr(struct bnge_net *bn, 263bd5ad9c0SBhargava Marreddy struct bnge_tx_ring_info *txr, 264bd5ad9c0SBhargava Marreddy u16 curr) 265bd5ad9c0SBhargava Marreddy { 266bd5ad9c0SBhargava Marreddy struct bnge_napi *bnapi = txr->bnapi; 267bd5ad9c0SBhargava Marreddy 268bd5ad9c0SBhargava Marreddy if (bnapi->tx_fault) 269bd5ad9c0SBhargava Marreddy return; 270bd5ad9c0SBhargava Marreddy 271bd5ad9c0SBhargava Marreddy netdev_err(bn->netdev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)", 272bd5ad9c0SBhargava Marreddy txr->txq_index, txr->tx_hw_cons, 273bd5ad9c0SBhargava Marreddy txr->tx_cons, txr->tx_prod, curr); 274bd5ad9c0SBhargava Marreddy WARN_ON_ONCE(1); 275bd5ad9c0SBhargava Marreddy bnapi->tx_fault = 1; 276bd5ad9c0SBhargava Marreddy /* TODO: Initiate reset task */ 277bd5ad9c0SBhargava Marreddy } 278bd5ad9c0SBhargava Marreddy 279*c2effd12SBhargava Marreddy static u16 bnge_tpa_alloc_agg_idx(struct bnge_rx_ring_info *rxr, u16 agg_id) 280*c2effd12SBhargava Marreddy { 281*c2effd12SBhargava Marreddy struct bnge_tpa_idx_map *map = rxr->rx_tpa_idx_map; 282*c2effd12SBhargava Marreddy u16 idx = agg_id & MAX_TPA_MASK; 283*c2effd12SBhargava Marreddy 284*c2effd12SBhargava Marreddy if (test_bit(idx, map->agg_idx_bmap)) { 285*c2effd12SBhargava Marreddy idx = find_first_zero_bit(map->agg_idx_bmap, MAX_TPA); 286*c2effd12SBhargava Marreddy if (idx >= MAX_TPA) 287*c2effd12SBhargava Marreddy return INVALID_HW_RING_ID; 288*c2effd12SBhargava Marreddy } 289*c2effd12SBhargava Marreddy __set_bit(idx, map->agg_idx_bmap); 290*c2effd12SBhargava Marreddy map->agg_id_tbl[agg_id] = idx; 291*c2effd12SBhargava Marreddy return idx; 292*c2effd12SBhargava Marreddy } 293*c2effd12SBhargava Marreddy 294*c2effd12SBhargava Marreddy static void bnge_free_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx) 295*c2effd12SBhargava Marreddy { 296*c2effd12SBhargava Marreddy struct bnge_tpa_idx_map *map = rxr->rx_tpa_idx_map; 297*c2effd12SBhargava Marreddy 298*c2effd12SBhargava Marreddy __clear_bit(idx, map->agg_idx_bmap); 299*c2effd12SBhargava Marreddy } 300*c2effd12SBhargava Marreddy 301*c2effd12SBhargava Marreddy static u16 bnge_lookup_agg_idx(struct bnge_rx_ring_info *rxr, u16 agg_id) 302*c2effd12SBhargava Marreddy { 303*c2effd12SBhargava Marreddy struct bnge_tpa_idx_map *map = rxr->rx_tpa_idx_map; 304*c2effd12SBhargava Marreddy 305*c2effd12SBhargava Marreddy return map->agg_id_tbl[agg_id]; 306*c2effd12SBhargava Marreddy } 307*c2effd12SBhargava Marreddy 308*c2effd12SBhargava Marreddy static void bnge_tpa_metadata(struct bnge_tpa_info *tpa_info, 309*c2effd12SBhargava Marreddy struct rx_tpa_start_cmp *tpa_start, 310*c2effd12SBhargava Marreddy struct rx_tpa_start_cmp_ext *tpa_start1) 311*c2effd12SBhargava Marreddy { 312*c2effd12SBhargava Marreddy tpa_info->cfa_code_valid = 1; 313*c2effd12SBhargava Marreddy tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); 314*c2effd12SBhargava Marreddy tpa_info->vlan_valid = 0; 315*c2effd12SBhargava Marreddy if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { 316*c2effd12SBhargava Marreddy tpa_info->vlan_valid = 1; 317*c2effd12SBhargava Marreddy tpa_info->metadata = 318*c2effd12SBhargava Marreddy le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 319*c2effd12SBhargava Marreddy } 320*c2effd12SBhargava Marreddy } 321*c2effd12SBhargava Marreddy 322*c2effd12SBhargava Marreddy static void bnge_tpa_metadata_v2(struct bnge_tpa_info *tpa_info, 323*c2effd12SBhargava Marreddy struct rx_tpa_start_cmp *tpa_start, 324*c2effd12SBhargava Marreddy struct rx_tpa_start_cmp_ext *tpa_start1) 325*c2effd12SBhargava Marreddy { 326*c2effd12SBhargava Marreddy tpa_info->vlan_valid = 0; 327*c2effd12SBhargava Marreddy if (TPA_START_VLAN_VALID(tpa_start)) { 328*c2effd12SBhargava Marreddy u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start); 329*c2effd12SBhargava Marreddy u32 vlan_proto = ETH_P_8021Q; 330*c2effd12SBhargava Marreddy 331*c2effd12SBhargava Marreddy tpa_info->vlan_valid = 1; 332*c2effd12SBhargava Marreddy if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD) 333*c2effd12SBhargava Marreddy vlan_proto = ETH_P_8021AD; 334*c2effd12SBhargava Marreddy tpa_info->metadata = vlan_proto << 16 | 335*c2effd12SBhargava Marreddy TPA_START_METADATA0_TCI(tpa_start1); 336*c2effd12SBhargava Marreddy } 337*c2effd12SBhargava Marreddy } 338*c2effd12SBhargava Marreddy 339*c2effd12SBhargava Marreddy static void bnge_tpa_start(struct bnge_net *bn, struct bnge_rx_ring_info *rxr, 340*c2effd12SBhargava Marreddy u8 cmp_type, struct rx_tpa_start_cmp *tpa_start, 341*c2effd12SBhargava Marreddy struct rx_tpa_start_cmp_ext *tpa_start1) 342*c2effd12SBhargava Marreddy { 343*c2effd12SBhargava Marreddy struct bnge_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 344*c2effd12SBhargava Marreddy struct bnge_tpa_info *tpa_info; 345*c2effd12SBhargava Marreddy u16 cons, prod, agg_id; 346*c2effd12SBhargava Marreddy struct rx_bd *prod_bd; 347*c2effd12SBhargava Marreddy dma_addr_t mapping; 348*c2effd12SBhargava Marreddy 349*c2effd12SBhargava Marreddy agg_id = TPA_START_AGG_ID(tpa_start); 350*c2effd12SBhargava Marreddy agg_id = bnge_tpa_alloc_agg_idx(rxr, agg_id); 351*c2effd12SBhargava Marreddy if (unlikely(agg_id == INVALID_HW_RING_ID)) { 352*c2effd12SBhargava Marreddy netdev_warn(bn->netdev, "Unable to allocate agg ID for ring %d, agg 0x%lx\n", 353*c2effd12SBhargava Marreddy rxr->bnapi->index, TPA_START_AGG_ID(tpa_start)); 354*c2effd12SBhargava Marreddy bnge_sched_reset_rxr(bn, rxr); 355*c2effd12SBhargava Marreddy return; 356*c2effd12SBhargava Marreddy } 357*c2effd12SBhargava Marreddy cons = tpa_start->rx_tpa_start_cmp_opaque; 358*c2effd12SBhargava Marreddy prod = rxr->rx_prod; 359*c2effd12SBhargava Marreddy cons_rx_buf = &rxr->rx_buf_ring[cons]; 360*c2effd12SBhargava Marreddy prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)]; 361*c2effd12SBhargava Marreddy tpa_info = &rxr->rx_tpa[agg_id]; 362*c2effd12SBhargava Marreddy 363*c2effd12SBhargava Marreddy if (unlikely(cons != rxr->rx_next_cons || 364*c2effd12SBhargava Marreddy TPA_START_ERROR(tpa_start))) { 365*c2effd12SBhargava Marreddy netdev_warn(bn->netdev, "TPA cons %x, expected cons %x, error code %lx\n", 366*c2effd12SBhargava Marreddy cons, rxr->rx_next_cons, 367*c2effd12SBhargava Marreddy TPA_START_ERROR_CODE(tpa_start1)); 368*c2effd12SBhargava Marreddy bnge_sched_reset_rxr(bn, rxr); 369*c2effd12SBhargava Marreddy return; 370*c2effd12SBhargava Marreddy } 371*c2effd12SBhargava Marreddy prod_rx_buf->data = tpa_info->data; 372*c2effd12SBhargava Marreddy prod_rx_buf->data_ptr = tpa_info->data_ptr; 373*c2effd12SBhargava Marreddy 374*c2effd12SBhargava Marreddy mapping = tpa_info->mapping; 375*c2effd12SBhargava Marreddy prod_rx_buf->mapping = mapping; 376*c2effd12SBhargava Marreddy 377*c2effd12SBhargava Marreddy prod_bd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)]; 378*c2effd12SBhargava Marreddy 379*c2effd12SBhargava Marreddy prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 380*c2effd12SBhargava Marreddy 381*c2effd12SBhargava Marreddy tpa_info->data = cons_rx_buf->data; 382*c2effd12SBhargava Marreddy tpa_info->data_ptr = cons_rx_buf->data_ptr; 383*c2effd12SBhargava Marreddy cons_rx_buf->data = NULL; 384*c2effd12SBhargava Marreddy tpa_info->mapping = cons_rx_buf->mapping; 385*c2effd12SBhargava Marreddy 386*c2effd12SBhargava Marreddy tpa_info->len = 387*c2effd12SBhargava Marreddy le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 388*c2effd12SBhargava Marreddy RX_TPA_START_CMP_LEN_SHIFT; 389*c2effd12SBhargava Marreddy if (likely(TPA_START_HASH_VALID(tpa_start))) { 390*c2effd12SBhargava Marreddy tpa_info->hash_type = PKT_HASH_TYPE_L4; 391*c2effd12SBhargava Marreddy if (TPA_START_IS_IPV6(tpa_start1)) 392*c2effd12SBhargava Marreddy tpa_info->gso_type = SKB_GSO_TCPV6; 393*c2effd12SBhargava Marreddy else 394*c2effd12SBhargava Marreddy tpa_info->gso_type = SKB_GSO_TCPV4; 395*c2effd12SBhargava Marreddy tpa_info->rss_hash = 396*c2effd12SBhargava Marreddy le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 397*c2effd12SBhargava Marreddy } else { 398*c2effd12SBhargava Marreddy tpa_info->hash_type = PKT_HASH_TYPE_NONE; 399*c2effd12SBhargava Marreddy tpa_info->gso_type = 0; 400*c2effd12SBhargava Marreddy netif_warn(bn, rx_err, bn->netdev, "TPA packet without valid hash\n"); 401*c2effd12SBhargava Marreddy } 402*c2effd12SBhargava Marreddy tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 403*c2effd12SBhargava Marreddy tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 404*c2effd12SBhargava Marreddy if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) 405*c2effd12SBhargava Marreddy bnge_tpa_metadata(tpa_info, tpa_start, tpa_start1); 406*c2effd12SBhargava Marreddy else 407*c2effd12SBhargava Marreddy bnge_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1); 408*c2effd12SBhargava Marreddy tpa_info->agg_count = 0; 409*c2effd12SBhargava Marreddy 410*c2effd12SBhargava Marreddy rxr->rx_prod = NEXT_RX(prod); 411*c2effd12SBhargava Marreddy cons = RING_RX(bn, NEXT_RX(cons)); 412*c2effd12SBhargava Marreddy rxr->rx_next_cons = RING_RX(bn, NEXT_RX(cons)); 413*c2effd12SBhargava Marreddy cons_rx_buf = &rxr->rx_buf_ring[cons]; 414*c2effd12SBhargava Marreddy 415*c2effd12SBhargava Marreddy bnge_reuse_rx_data(rxr, cons, cons_rx_buf->data); 416*c2effd12SBhargava Marreddy rxr->rx_prod = NEXT_RX(rxr->rx_prod); 417*c2effd12SBhargava Marreddy cons_rx_buf->data = NULL; 418*c2effd12SBhargava Marreddy } 419*c2effd12SBhargava Marreddy 420*c2effd12SBhargava Marreddy static void bnge_abort_tpa(struct bnge_cp_ring_info *cpr, u16 idx, u32 agg_bufs) 421*c2effd12SBhargava Marreddy { 422*c2effd12SBhargava Marreddy if (agg_bufs) 423*c2effd12SBhargava Marreddy bnge_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); 424*c2effd12SBhargava Marreddy } 425*c2effd12SBhargava Marreddy 426*c2effd12SBhargava Marreddy static void bnge_tpa_agg(struct bnge_net *bn, struct bnge_rx_ring_info *rxr, 427*c2effd12SBhargava Marreddy struct rx_agg_cmp *rx_agg) 428*c2effd12SBhargava Marreddy { 429*c2effd12SBhargava Marreddy u16 agg_id = TPA_AGG_AGG_ID(rx_agg); 430*c2effd12SBhargava Marreddy struct bnge_tpa_info *tpa_info; 431*c2effd12SBhargava Marreddy 432*c2effd12SBhargava Marreddy agg_id = bnge_lookup_agg_idx(rxr, agg_id); 433*c2effd12SBhargava Marreddy tpa_info = &rxr->rx_tpa[agg_id]; 434*c2effd12SBhargava Marreddy 435*c2effd12SBhargava Marreddy if (unlikely(tpa_info->agg_count >= MAX_SKB_FRAGS)) { 436*c2effd12SBhargava Marreddy netdev_warn(bn->netdev, 437*c2effd12SBhargava Marreddy "TPA completion count %d exceeds limit for ring %d\n", 438*c2effd12SBhargava Marreddy tpa_info->agg_count, rxr->bnapi->index); 439*c2effd12SBhargava Marreddy 440*c2effd12SBhargava Marreddy bnge_sched_reset_rxr(bn, rxr); 441*c2effd12SBhargava Marreddy return; 442*c2effd12SBhargava Marreddy } 443*c2effd12SBhargava Marreddy 444*c2effd12SBhargava Marreddy tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; 445*c2effd12SBhargava Marreddy } 446*c2effd12SBhargava Marreddy 4474d6a6005SBhargava Marreddy void bnge_reuse_rx_data(struct bnge_rx_ring_info *rxr, u16 cons, void *data) 4484d6a6005SBhargava Marreddy { 4494d6a6005SBhargava Marreddy struct bnge_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 4504d6a6005SBhargava Marreddy struct bnge_net *bn = rxr->bnapi->bn; 4514d6a6005SBhargava Marreddy struct rx_bd *cons_bd, *prod_bd; 4524d6a6005SBhargava Marreddy u16 prod = rxr->rx_prod; 4534d6a6005SBhargava Marreddy 4544d6a6005SBhargava Marreddy prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)]; 4554d6a6005SBhargava Marreddy cons_rx_buf = &rxr->rx_buf_ring[cons]; 4564d6a6005SBhargava Marreddy 4574d6a6005SBhargava Marreddy prod_rx_buf->data = data; 4584d6a6005SBhargava Marreddy prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 4594d6a6005SBhargava Marreddy 4604d6a6005SBhargava Marreddy prod_rx_buf->mapping = cons_rx_buf->mapping; 4614d6a6005SBhargava Marreddy 4624d6a6005SBhargava Marreddy prod_bd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)]; 4634d6a6005SBhargava Marreddy cons_bd = &rxr->rx_desc_ring[RX_RING(bn, cons)][RX_IDX(cons)]; 4644d6a6005SBhargava Marreddy 4654d6a6005SBhargava Marreddy prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 4664d6a6005SBhargava Marreddy } 4674d6a6005SBhargava Marreddy 4684d6a6005SBhargava Marreddy static void bnge_deliver_skb(struct bnge_net *bn, struct bnge_napi *bnapi, 4694d6a6005SBhargava Marreddy struct sk_buff *skb) 4704d6a6005SBhargava Marreddy { 4714d6a6005SBhargava Marreddy skb_mark_for_recycle(skb); 4724d6a6005SBhargava Marreddy skb_record_rx_queue(skb, bnapi->index); 4734d6a6005SBhargava Marreddy napi_gro_receive(&bnapi->napi, skb); 4744d6a6005SBhargava Marreddy } 4754d6a6005SBhargava Marreddy 4764d6a6005SBhargava Marreddy static struct sk_buff *bnge_copy_skb(struct bnge_napi *bnapi, u8 *data, 4774d6a6005SBhargava Marreddy unsigned int len, dma_addr_t mapping) 4784d6a6005SBhargava Marreddy { 4794d6a6005SBhargava Marreddy struct bnge_net *bn = bnapi->bn; 4804d6a6005SBhargava Marreddy struct bnge_dev *bd = bn->bd; 4814d6a6005SBhargava Marreddy struct sk_buff *skb; 4824d6a6005SBhargava Marreddy 4834d6a6005SBhargava Marreddy skb = napi_alloc_skb(&bnapi->napi, len); 4844d6a6005SBhargava Marreddy if (!skb) 4854d6a6005SBhargava Marreddy return NULL; 4864d6a6005SBhargava Marreddy 4874d6a6005SBhargava Marreddy dma_sync_single_for_cpu(bd->dev, mapping, len, bn->rx_dir); 4884d6a6005SBhargava Marreddy 4894d6a6005SBhargava Marreddy memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 4904d6a6005SBhargava Marreddy len + NET_IP_ALIGN); 4914d6a6005SBhargava Marreddy 4924d6a6005SBhargava Marreddy dma_sync_single_for_device(bd->dev, mapping, len, bn->rx_dir); 4934d6a6005SBhargava Marreddy 4944d6a6005SBhargava Marreddy skb_put(skb, len); 4954d6a6005SBhargava Marreddy 4964d6a6005SBhargava Marreddy return skb; 4974d6a6005SBhargava Marreddy } 4984d6a6005SBhargava Marreddy 499*c2effd12SBhargava Marreddy #ifdef CONFIG_INET 500*c2effd12SBhargava Marreddy static void bnge_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) 501*c2effd12SBhargava Marreddy { 502*c2effd12SBhargava Marreddy struct udphdr *uh = NULL; 503*c2effd12SBhargava Marreddy 504*c2effd12SBhargava Marreddy if (ip_proto == htons(ETH_P_IP)) { 505*c2effd12SBhargava Marreddy struct iphdr *iph = (struct iphdr *)skb->data; 506*c2effd12SBhargava Marreddy 507*c2effd12SBhargava Marreddy if (iph->protocol == IPPROTO_UDP) 508*c2effd12SBhargava Marreddy uh = (struct udphdr *)(iph + 1); 509*c2effd12SBhargava Marreddy } else { 510*c2effd12SBhargava Marreddy struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 511*c2effd12SBhargava Marreddy 512*c2effd12SBhargava Marreddy if (iph->nexthdr == IPPROTO_UDP) 513*c2effd12SBhargava Marreddy uh = (struct udphdr *)(iph + 1); 514*c2effd12SBhargava Marreddy } 515*c2effd12SBhargava Marreddy if (uh) { 516*c2effd12SBhargava Marreddy if (uh->check) 517*c2effd12SBhargava Marreddy skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; 518*c2effd12SBhargava Marreddy else 519*c2effd12SBhargava Marreddy skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 520*c2effd12SBhargava Marreddy } 521*c2effd12SBhargava Marreddy } 522*c2effd12SBhargava Marreddy 523*c2effd12SBhargava Marreddy static struct sk_buff *bnge_gro_func(struct bnge_tpa_info *tpa_info, 524*c2effd12SBhargava Marreddy int payload_off, int tcp_ts, 525*c2effd12SBhargava Marreddy struct sk_buff *skb) 526*c2effd12SBhargava Marreddy { 527*c2effd12SBhargava Marreddy u16 outer_ip_off, inner_ip_off, inner_mac_off; 528*c2effd12SBhargava Marreddy u32 hdr_info = tpa_info->hdr_info; 529*c2effd12SBhargava Marreddy int iphdr_len, nw_off; 530*c2effd12SBhargava Marreddy 531*c2effd12SBhargava Marreddy inner_ip_off = BNGE_TPA_INNER_L3_OFF(hdr_info); 532*c2effd12SBhargava Marreddy inner_mac_off = BNGE_TPA_INNER_L2_OFF(hdr_info); 533*c2effd12SBhargava Marreddy outer_ip_off = BNGE_TPA_OUTER_L3_OFF(hdr_info); 534*c2effd12SBhargava Marreddy 535*c2effd12SBhargava Marreddy nw_off = inner_ip_off - ETH_HLEN; 536*c2effd12SBhargava Marreddy skb_set_network_header(skb, nw_off); 537*c2effd12SBhargava Marreddy iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? 538*c2effd12SBhargava Marreddy sizeof(struct ipv6hdr) : sizeof(struct iphdr); 539*c2effd12SBhargava Marreddy skb_set_transport_header(skb, nw_off + iphdr_len); 540*c2effd12SBhargava Marreddy 541*c2effd12SBhargava Marreddy if (inner_mac_off) { /* tunnel */ 542*c2effd12SBhargava Marreddy __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 543*c2effd12SBhargava Marreddy ETH_HLEN - 2)); 544*c2effd12SBhargava Marreddy 545*c2effd12SBhargava Marreddy bnge_gro_tunnel(skb, proto); 546*c2effd12SBhargava Marreddy } 547*c2effd12SBhargava Marreddy 548*c2effd12SBhargava Marreddy return skb; 549*c2effd12SBhargava Marreddy } 550*c2effd12SBhargava Marreddy 551*c2effd12SBhargava Marreddy static struct sk_buff *bnge_gro_skb(struct bnge_net *bn, 552*c2effd12SBhargava Marreddy struct bnge_tpa_info *tpa_info, 553*c2effd12SBhargava Marreddy struct rx_tpa_end_cmp *tpa_end, 554*c2effd12SBhargava Marreddy struct rx_tpa_end_cmp_ext *tpa_end1, 555*c2effd12SBhargava Marreddy struct sk_buff *skb) 556*c2effd12SBhargava Marreddy { 557*c2effd12SBhargava Marreddy int payload_off; 558*c2effd12SBhargava Marreddy u16 segs; 559*c2effd12SBhargava Marreddy 560*c2effd12SBhargava Marreddy segs = TPA_END_TPA_SEGS(tpa_end); 561*c2effd12SBhargava Marreddy if (segs == 1) 562*c2effd12SBhargava Marreddy return skb; 563*c2effd12SBhargava Marreddy 564*c2effd12SBhargava Marreddy NAPI_GRO_CB(skb)->count = segs; 565*c2effd12SBhargava Marreddy skb_shinfo(skb)->gso_size = 566*c2effd12SBhargava Marreddy le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 567*c2effd12SBhargava Marreddy skb_shinfo(skb)->gso_type = tpa_info->gso_type; 568*c2effd12SBhargava Marreddy payload_off = TPA_END_PAYLOAD_OFF(tpa_end1); 569*c2effd12SBhargava Marreddy skb = bnge_gro_func(tpa_info, payload_off, 570*c2effd12SBhargava Marreddy TPA_END_GRO_TS(tpa_end), skb); 571*c2effd12SBhargava Marreddy if (likely(skb)) 572*c2effd12SBhargava Marreddy tcp_gro_complete(skb); 573*c2effd12SBhargava Marreddy 574*c2effd12SBhargava Marreddy return skb; 575*c2effd12SBhargava Marreddy } 576*c2effd12SBhargava Marreddy #endif 577*c2effd12SBhargava Marreddy 578*c2effd12SBhargava Marreddy static struct sk_buff *bnge_tpa_end(struct bnge_net *bn, 579*c2effd12SBhargava Marreddy struct bnge_cp_ring_info *cpr, 580*c2effd12SBhargava Marreddy u32 *raw_cons, 581*c2effd12SBhargava Marreddy struct rx_tpa_end_cmp *tpa_end, 582*c2effd12SBhargava Marreddy struct rx_tpa_end_cmp_ext *tpa_end1, 583*c2effd12SBhargava Marreddy u8 *event) 584*c2effd12SBhargava Marreddy { 585*c2effd12SBhargava Marreddy struct bnge_napi *bnapi = cpr->bnapi; 586*c2effd12SBhargava Marreddy struct net_device *dev = bn->netdev; 587*c2effd12SBhargava Marreddy struct bnge_tpa_info *tpa_info; 588*c2effd12SBhargava Marreddy struct bnge_rx_ring_info *rxr; 589*c2effd12SBhargava Marreddy u8 *data_ptr, agg_bufs; 590*c2effd12SBhargava Marreddy struct sk_buff *skb; 591*c2effd12SBhargava Marreddy u16 idx = 0, agg_id; 592*c2effd12SBhargava Marreddy dma_addr_t mapping; 593*c2effd12SBhargava Marreddy unsigned int len; 594*c2effd12SBhargava Marreddy void *data; 595*c2effd12SBhargava Marreddy 596*c2effd12SBhargava Marreddy if (unlikely(bnapi->in_reset)) { 597*c2effd12SBhargava Marreddy int rc = bnge_discard_rx(bn, cpr, raw_cons, tpa_end); 598*c2effd12SBhargava Marreddy 599*c2effd12SBhargava Marreddy if (rc < 0) 600*c2effd12SBhargava Marreddy return ERR_PTR(-EBUSY); 601*c2effd12SBhargava Marreddy return NULL; 602*c2effd12SBhargava Marreddy } 603*c2effd12SBhargava Marreddy 604*c2effd12SBhargava Marreddy rxr = bnapi->rx_ring; 605*c2effd12SBhargava Marreddy agg_id = TPA_END_AGG_ID(tpa_end); 606*c2effd12SBhargava Marreddy agg_id = bnge_lookup_agg_idx(rxr, agg_id); 607*c2effd12SBhargava Marreddy agg_bufs = TPA_END_AGG_BUFS(tpa_end1); 608*c2effd12SBhargava Marreddy tpa_info = &rxr->rx_tpa[agg_id]; 609*c2effd12SBhargava Marreddy if (unlikely(agg_bufs != tpa_info->agg_count)) { 610*c2effd12SBhargava Marreddy netdev_warn(bn->netdev, "TPA end agg_buf %d != expected agg_bufs %d\n", 611*c2effd12SBhargava Marreddy agg_bufs, tpa_info->agg_count); 612*c2effd12SBhargava Marreddy agg_bufs = tpa_info->agg_count; 613*c2effd12SBhargava Marreddy } 614*c2effd12SBhargava Marreddy tpa_info->agg_count = 0; 615*c2effd12SBhargava Marreddy *event |= BNGE_AGG_EVENT; 616*c2effd12SBhargava Marreddy bnge_free_agg_idx(rxr, agg_id); 617*c2effd12SBhargava Marreddy idx = agg_id; 618*c2effd12SBhargava Marreddy data = tpa_info->data; 619*c2effd12SBhargava Marreddy data_ptr = tpa_info->data_ptr; 620*c2effd12SBhargava Marreddy prefetch(data_ptr); 621*c2effd12SBhargava Marreddy len = tpa_info->len; 622*c2effd12SBhargava Marreddy mapping = tpa_info->mapping; 623*c2effd12SBhargava Marreddy 624*c2effd12SBhargava Marreddy if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 625*c2effd12SBhargava Marreddy bnge_abort_tpa(cpr, idx, agg_bufs); 626*c2effd12SBhargava Marreddy if (agg_bufs > MAX_SKB_FRAGS) 627*c2effd12SBhargava Marreddy netdev_warn(bn->netdev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 628*c2effd12SBhargava Marreddy agg_bufs, (int)MAX_SKB_FRAGS); 629*c2effd12SBhargava Marreddy return NULL; 630*c2effd12SBhargava Marreddy } 631*c2effd12SBhargava Marreddy 632*c2effd12SBhargava Marreddy if (len <= bn->rx_copybreak) { 633*c2effd12SBhargava Marreddy skb = bnge_copy_skb(bnapi, data_ptr, len, mapping); 634*c2effd12SBhargava Marreddy if (!skb) { 635*c2effd12SBhargava Marreddy bnge_abort_tpa(cpr, idx, agg_bufs); 636*c2effd12SBhargava Marreddy return NULL; 637*c2effd12SBhargava Marreddy } 638*c2effd12SBhargava Marreddy } else { 639*c2effd12SBhargava Marreddy dma_addr_t new_mapping; 640*c2effd12SBhargava Marreddy u8 *new_data; 641*c2effd12SBhargava Marreddy 642*c2effd12SBhargava Marreddy new_data = __bnge_alloc_rx_frag(bn, &new_mapping, rxr, 643*c2effd12SBhargava Marreddy GFP_ATOMIC); 644*c2effd12SBhargava Marreddy if (!new_data) { 645*c2effd12SBhargava Marreddy bnge_abort_tpa(cpr, idx, agg_bufs); 646*c2effd12SBhargava Marreddy return NULL; 647*c2effd12SBhargava Marreddy } 648*c2effd12SBhargava Marreddy 649*c2effd12SBhargava Marreddy tpa_info->data = new_data; 650*c2effd12SBhargava Marreddy tpa_info->data_ptr = new_data + bn->rx_offset; 651*c2effd12SBhargava Marreddy tpa_info->mapping = new_mapping; 652*c2effd12SBhargava Marreddy 653*c2effd12SBhargava Marreddy skb = napi_build_skb(data, bn->rx_buf_size); 654*c2effd12SBhargava Marreddy dma_sync_single_for_cpu(bn->bd->dev, mapping, 655*c2effd12SBhargava Marreddy bn->rx_buf_use_size, bn->rx_dir); 656*c2effd12SBhargava Marreddy 657*c2effd12SBhargava Marreddy if (!skb) { 658*c2effd12SBhargava Marreddy page_pool_free_va(rxr->head_pool, data, true); 659*c2effd12SBhargava Marreddy bnge_abort_tpa(cpr, idx, agg_bufs); 660*c2effd12SBhargava Marreddy return NULL; 661*c2effd12SBhargava Marreddy } 662*c2effd12SBhargava Marreddy skb_mark_for_recycle(skb); 663*c2effd12SBhargava Marreddy skb_reserve(skb, bn->rx_offset); 664*c2effd12SBhargava Marreddy skb_put(skb, len); 665*c2effd12SBhargava Marreddy } 666*c2effd12SBhargava Marreddy 667*c2effd12SBhargava Marreddy if (agg_bufs) { 668*c2effd12SBhargava Marreddy skb = bnge_rx_agg_netmems_skb(bn, cpr, skb, idx, agg_bufs, 669*c2effd12SBhargava Marreddy true); 670*c2effd12SBhargava Marreddy /* Page reuse already handled by bnge_rx_agg_netmems_skb(). */ 671*c2effd12SBhargava Marreddy if (!skb) 672*c2effd12SBhargava Marreddy return NULL; 673*c2effd12SBhargava Marreddy } 674*c2effd12SBhargava Marreddy 675*c2effd12SBhargava Marreddy skb->protocol = eth_type_trans(skb, dev); 676*c2effd12SBhargava Marreddy 677*c2effd12SBhargava Marreddy if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 678*c2effd12SBhargava Marreddy skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 679*c2effd12SBhargava Marreddy 680*c2effd12SBhargava Marreddy if (tpa_info->vlan_valid && 681*c2effd12SBhargava Marreddy (dev->features & BNGE_HW_FEATURE_VLAN_ALL_RX)) { 682*c2effd12SBhargava Marreddy __be16 vlan_proto = htons(tpa_info->metadata >> 683*c2effd12SBhargava Marreddy RX_CMP_FLAGS2_METADATA_TPID_SFT); 684*c2effd12SBhargava Marreddy u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; 685*c2effd12SBhargava Marreddy 686*c2effd12SBhargava Marreddy if (eth_type_vlan(vlan_proto)) { 687*c2effd12SBhargava Marreddy __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 688*c2effd12SBhargava Marreddy } else { 689*c2effd12SBhargava Marreddy dev_kfree_skb(skb); 690*c2effd12SBhargava Marreddy return NULL; 691*c2effd12SBhargava Marreddy } 692*c2effd12SBhargava Marreddy } 693*c2effd12SBhargava Marreddy 694*c2effd12SBhargava Marreddy skb_checksum_none_assert(skb); 695*c2effd12SBhargava Marreddy if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 696*c2effd12SBhargava Marreddy skb->ip_summed = CHECKSUM_UNNECESSARY; 697*c2effd12SBhargava Marreddy skb->csum_level = 698*c2effd12SBhargava Marreddy (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 699*c2effd12SBhargava Marreddy } 700*c2effd12SBhargava Marreddy 701*c2effd12SBhargava Marreddy #ifdef CONFIG_INET 702*c2effd12SBhargava Marreddy if (bn->priv_flags & BNGE_NET_EN_GRO) 703*c2effd12SBhargava Marreddy skb = bnge_gro_skb(bn, tpa_info, tpa_end, tpa_end1, skb); 704*c2effd12SBhargava Marreddy #endif 705*c2effd12SBhargava Marreddy 706*c2effd12SBhargava Marreddy return skb; 707*c2effd12SBhargava Marreddy } 708*c2effd12SBhargava Marreddy 7094d6a6005SBhargava Marreddy static enum pkt_hash_types bnge_rss_ext_op(struct bnge_net *bn, 7104d6a6005SBhargava Marreddy struct rx_cmp *rxcmp) 7114d6a6005SBhargava Marreddy { 7124d6a6005SBhargava Marreddy u8 ext_op = RX_CMP_V3_HASH_TYPE(bn->bd, rxcmp); 7134d6a6005SBhargava Marreddy 7144d6a6005SBhargava Marreddy switch (ext_op) { 7154d6a6005SBhargava Marreddy case EXT_OP_INNER_4: 7164d6a6005SBhargava Marreddy case EXT_OP_OUTER_4: 7174d6a6005SBhargava Marreddy case EXT_OP_INNFL_3: 7184d6a6005SBhargava Marreddy case EXT_OP_OUTFL_3: 7194d6a6005SBhargava Marreddy return PKT_HASH_TYPE_L4; 7204d6a6005SBhargava Marreddy default: 7214d6a6005SBhargava Marreddy return PKT_HASH_TYPE_L3; 7224d6a6005SBhargava Marreddy } 7234d6a6005SBhargava Marreddy } 7244d6a6005SBhargava Marreddy 7254d6a6005SBhargava Marreddy static struct sk_buff *bnge_rx_vlan(struct sk_buff *skb, u8 cmp_type, 7264d6a6005SBhargava Marreddy struct rx_cmp *rxcmp, 7274d6a6005SBhargava Marreddy struct rx_cmp_ext *rxcmp1) 7284d6a6005SBhargava Marreddy { 7294d6a6005SBhargava Marreddy __be16 vlan_proto; 7304d6a6005SBhargava Marreddy u16 vtag; 7314d6a6005SBhargava Marreddy 7324d6a6005SBhargava Marreddy if (cmp_type == CMP_TYPE_RX_L2_CMP) { 7334d6a6005SBhargava Marreddy __le32 flags2 = rxcmp1->rx_cmp_flags2; 7344d6a6005SBhargava Marreddy u32 meta_data; 7354d6a6005SBhargava Marreddy 7364d6a6005SBhargava Marreddy if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN))) 7374d6a6005SBhargava Marreddy return skb; 7384d6a6005SBhargava Marreddy 7394d6a6005SBhargava Marreddy meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 7404d6a6005SBhargava Marreddy vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; 7414d6a6005SBhargava Marreddy vlan_proto = 7424d6a6005SBhargava Marreddy htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT); 7434d6a6005SBhargava Marreddy if (eth_type_vlan(vlan_proto)) 7444d6a6005SBhargava Marreddy __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 7454d6a6005SBhargava Marreddy else 7464d6a6005SBhargava Marreddy goto vlan_err; 7474d6a6005SBhargava Marreddy } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 7484d6a6005SBhargava Marreddy if (RX_CMP_VLAN_VALID(rxcmp)) { 7494d6a6005SBhargava Marreddy u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp); 7504d6a6005SBhargava Marreddy 7514d6a6005SBhargava Marreddy if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q) 7524d6a6005SBhargava Marreddy vlan_proto = htons(ETH_P_8021Q); 7534d6a6005SBhargava Marreddy else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD) 7544d6a6005SBhargava Marreddy vlan_proto = htons(ETH_P_8021AD); 7554d6a6005SBhargava Marreddy else 7564d6a6005SBhargava Marreddy goto vlan_err; 7574d6a6005SBhargava Marreddy vtag = RX_CMP_METADATA0_TCI(rxcmp1); 7584d6a6005SBhargava Marreddy __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 7594d6a6005SBhargava Marreddy } 7604d6a6005SBhargava Marreddy } 7614d6a6005SBhargava Marreddy return skb; 7624d6a6005SBhargava Marreddy 7634d6a6005SBhargava Marreddy vlan_err: 7644d6a6005SBhargava Marreddy skb_mark_for_recycle(skb); 7654d6a6005SBhargava Marreddy dev_kfree_skb(skb); 7664d6a6005SBhargava Marreddy return NULL; 7674d6a6005SBhargava Marreddy } 7684d6a6005SBhargava Marreddy 7694d6a6005SBhargava Marreddy static struct sk_buff *bnge_rx_skb(struct bnge_net *bn, 7704d6a6005SBhargava Marreddy struct bnge_rx_ring_info *rxr, u16 cons, 7714d6a6005SBhargava Marreddy void *data, u8 *data_ptr, 7724d6a6005SBhargava Marreddy dma_addr_t dma_addr, 7734d6a6005SBhargava Marreddy unsigned int len) 7744d6a6005SBhargava Marreddy { 7754d6a6005SBhargava Marreddy struct bnge_dev *bd = bn->bd; 7764d6a6005SBhargava Marreddy u16 prod = rxr->rx_prod; 7774d6a6005SBhargava Marreddy struct sk_buff *skb; 7784d6a6005SBhargava Marreddy int err; 7794d6a6005SBhargava Marreddy 7804d6a6005SBhargava Marreddy err = bnge_alloc_rx_data(bn, rxr, prod, GFP_ATOMIC); 7814d6a6005SBhargava Marreddy if (unlikely(err)) { 7824d6a6005SBhargava Marreddy bnge_reuse_rx_data(rxr, cons, data); 7834d6a6005SBhargava Marreddy return NULL; 7844d6a6005SBhargava Marreddy } 7854d6a6005SBhargava Marreddy 7864d6a6005SBhargava Marreddy dma_sync_single_for_cpu(bd->dev, dma_addr, len, bn->rx_dir); 7874d6a6005SBhargava Marreddy skb = napi_build_skb(data, bn->rx_buf_size); 7884d6a6005SBhargava Marreddy if (!skb) { 7894d6a6005SBhargava Marreddy page_pool_free_va(rxr->head_pool, data, true); 7904d6a6005SBhargava Marreddy return NULL; 7914d6a6005SBhargava Marreddy } 7924d6a6005SBhargava Marreddy 7934d6a6005SBhargava Marreddy skb_mark_for_recycle(skb); 7944d6a6005SBhargava Marreddy skb_reserve(skb, bn->rx_offset); 7954d6a6005SBhargava Marreddy skb_put(skb, len); 7964d6a6005SBhargava Marreddy return skb; 7974d6a6005SBhargava Marreddy } 7984d6a6005SBhargava Marreddy 7994d6a6005SBhargava Marreddy /* returns the following: 8004d6a6005SBhargava Marreddy * 1 - 1 packet successfully received 801*c2effd12SBhargava Marreddy * 0 - successful TPA_START, packet not completed yet 8024d6a6005SBhargava Marreddy * -EBUSY - completion ring does not have all the agg buffers yet 8034d6a6005SBhargava Marreddy * -ENOMEM - packet aborted due to out of memory 8044d6a6005SBhargava Marreddy * -EIO - packet aborted due to hw error indicated in BD 8054d6a6005SBhargava Marreddy */ 8064d6a6005SBhargava Marreddy static int bnge_rx_pkt(struct bnge_net *bn, struct bnge_cp_ring_info *cpr, 8074d6a6005SBhargava Marreddy u32 *raw_cons, u8 *event) 8084d6a6005SBhargava Marreddy { 8094d6a6005SBhargava Marreddy struct bnge_napi *bnapi = cpr->bnapi; 8104d6a6005SBhargava Marreddy struct net_device *dev = bn->netdev; 8114d6a6005SBhargava Marreddy struct bnge_rx_ring_info *rxr; 812c858ac87SBhargava Marreddy u32 tmp_raw_cons, flags, misc; 8134d6a6005SBhargava Marreddy struct bnge_sw_rx_bd *rx_buf; 8144d6a6005SBhargava Marreddy struct rx_cmp_ext *rxcmp1; 8154d6a6005SBhargava Marreddy u16 cons, prod, cp_cons; 8164d6a6005SBhargava Marreddy u8 *data_ptr, cmp_type; 8174d6a6005SBhargava Marreddy struct rx_cmp *rxcmp; 8184d6a6005SBhargava Marreddy dma_addr_t dma_addr; 8194d6a6005SBhargava Marreddy struct sk_buff *skb; 8204d6a6005SBhargava Marreddy unsigned int len; 821c858ac87SBhargava Marreddy u8 agg_bufs; 8224d6a6005SBhargava Marreddy void *data; 8234d6a6005SBhargava Marreddy int rc = 0; 8244d6a6005SBhargava Marreddy 8254d6a6005SBhargava Marreddy rxr = bnapi->rx_ring; 8264d6a6005SBhargava Marreddy 8274d6a6005SBhargava Marreddy tmp_raw_cons = *raw_cons; 8284d6a6005SBhargava Marreddy cp_cons = RING_CMP(bn, tmp_raw_cons); 8294d6a6005SBhargava Marreddy rxcmp = (struct rx_cmp *) 8304d6a6005SBhargava Marreddy &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 8314d6a6005SBhargava Marreddy 8324d6a6005SBhargava Marreddy cmp_type = RX_CMP_TYPE(rxcmp); 8334d6a6005SBhargava Marreddy 834*c2effd12SBhargava Marreddy if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { 835*c2effd12SBhargava Marreddy bnge_tpa_agg(bn, rxr, (struct rx_agg_cmp *)rxcmp); 836*c2effd12SBhargava Marreddy goto next_rx_no_prod_no_len; 837*c2effd12SBhargava Marreddy } 838*c2effd12SBhargava Marreddy 8394d6a6005SBhargava Marreddy tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 8404d6a6005SBhargava Marreddy cp_cons = RING_CMP(bn, tmp_raw_cons); 8414d6a6005SBhargava Marreddy rxcmp1 = (struct rx_cmp_ext *) 8424d6a6005SBhargava Marreddy &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 8434d6a6005SBhargava Marreddy 8444d6a6005SBhargava Marreddy if (!RX_CMP_VALID(bn, rxcmp1, tmp_raw_cons)) 8454d6a6005SBhargava Marreddy return -EBUSY; 8464d6a6005SBhargava Marreddy 8474d6a6005SBhargava Marreddy /* The valid test of the entry must be done first before 8484d6a6005SBhargava Marreddy * reading any further. 8494d6a6005SBhargava Marreddy */ 8504d6a6005SBhargava Marreddy dma_rmb(); 8514d6a6005SBhargava Marreddy prod = rxr->rx_prod; 8524d6a6005SBhargava Marreddy 853*c2effd12SBhargava Marreddy if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP || 854*c2effd12SBhargava Marreddy cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) { 855*c2effd12SBhargava Marreddy bnge_tpa_start(bn, rxr, cmp_type, 856*c2effd12SBhargava Marreddy (struct rx_tpa_start_cmp *)rxcmp, 857*c2effd12SBhargava Marreddy (struct rx_tpa_start_cmp_ext *)rxcmp1); 858*c2effd12SBhargava Marreddy 859*c2effd12SBhargava Marreddy *event |= BNGE_RX_EVENT; 860*c2effd12SBhargava Marreddy goto next_rx_no_prod_no_len; 861*c2effd12SBhargava Marreddy 862*c2effd12SBhargava Marreddy } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 863*c2effd12SBhargava Marreddy skb = bnge_tpa_end(bn, cpr, &tmp_raw_cons, 864*c2effd12SBhargava Marreddy (struct rx_tpa_end_cmp *)rxcmp, 865*c2effd12SBhargava Marreddy (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 866*c2effd12SBhargava Marreddy if (IS_ERR(skb)) 867*c2effd12SBhargava Marreddy return -EBUSY; 868*c2effd12SBhargava Marreddy 869*c2effd12SBhargava Marreddy rc = -ENOMEM; 870*c2effd12SBhargava Marreddy if (likely(skb)) { 871*c2effd12SBhargava Marreddy bnge_deliver_skb(bn, bnapi, skb); 872*c2effd12SBhargava Marreddy rc = 1; 873*c2effd12SBhargava Marreddy } 874*c2effd12SBhargava Marreddy *event |= BNGE_RX_EVENT; 875*c2effd12SBhargava Marreddy goto next_rx_no_prod_no_len; 876*c2effd12SBhargava Marreddy } 877*c2effd12SBhargava Marreddy 8784d6a6005SBhargava Marreddy cons = rxcmp->rx_cmp_opaque; 8794d6a6005SBhargava Marreddy if (unlikely(cons != rxr->rx_next_cons)) { 880c858ac87SBhargava Marreddy int rc1 = bnge_discard_rx(bn, cpr, &tmp_raw_cons, rxcmp); 881c858ac87SBhargava Marreddy 8824d6a6005SBhargava Marreddy /* 0xffff is forced error, don't print it */ 8834d6a6005SBhargava Marreddy if (rxr->rx_next_cons != 0xffff) 8844d6a6005SBhargava Marreddy netdev_warn(bn->netdev, "RX cons %x != expected cons %x\n", 8854d6a6005SBhargava Marreddy cons, rxr->rx_next_cons); 8864d6a6005SBhargava Marreddy bnge_sched_reset_rxr(bn, rxr); 887c858ac87SBhargava Marreddy if (rc1) 888c858ac87SBhargava Marreddy return rc1; 8894d6a6005SBhargava Marreddy goto next_rx_no_prod_no_len; 8904d6a6005SBhargava Marreddy } 8914d6a6005SBhargava Marreddy rx_buf = &rxr->rx_buf_ring[cons]; 8924d6a6005SBhargava Marreddy data = rx_buf->data; 8934d6a6005SBhargava Marreddy data_ptr = rx_buf->data_ptr; 8944d6a6005SBhargava Marreddy prefetch(data_ptr); 8954d6a6005SBhargava Marreddy 896c858ac87SBhargava Marreddy misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 897c858ac87SBhargava Marreddy agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 898c858ac87SBhargava Marreddy 899c858ac87SBhargava Marreddy if (agg_bufs) { 900c858ac87SBhargava Marreddy if (!bnge_agg_bufs_valid(bn, cpr, agg_bufs, &tmp_raw_cons)) 901c858ac87SBhargava Marreddy return -EBUSY; 902c858ac87SBhargava Marreddy 903c858ac87SBhargava Marreddy cp_cons = NEXT_CMP(bn, cp_cons); 904c858ac87SBhargava Marreddy *event |= BNGE_AGG_EVENT; 905c858ac87SBhargava Marreddy } 9064d6a6005SBhargava Marreddy *event |= BNGE_RX_EVENT; 9074d6a6005SBhargava Marreddy 9084d6a6005SBhargava Marreddy rx_buf->data = NULL; 9094d6a6005SBhargava Marreddy if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 9104d6a6005SBhargava Marreddy bnge_reuse_rx_data(rxr, cons, data); 911c858ac87SBhargava Marreddy if (agg_bufs) 912*c2effd12SBhargava Marreddy bnge_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, 913*c2effd12SBhargava Marreddy false); 9144d6a6005SBhargava Marreddy rc = -EIO; 9154d6a6005SBhargava Marreddy goto next_rx_no_len; 9164d6a6005SBhargava Marreddy } 9174d6a6005SBhargava Marreddy 9184d6a6005SBhargava Marreddy flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type); 9194d6a6005SBhargava Marreddy len = flags >> RX_CMP_LEN_SHIFT; 9204d6a6005SBhargava Marreddy dma_addr = rx_buf->mapping; 9214d6a6005SBhargava Marreddy 9224d6a6005SBhargava Marreddy if (len <= bn->rx_copybreak) { 9234d6a6005SBhargava Marreddy skb = bnge_copy_skb(bnapi, data_ptr, len, dma_addr); 9244d6a6005SBhargava Marreddy bnge_reuse_rx_data(rxr, cons, data); 9254d6a6005SBhargava Marreddy } else { 9264d6a6005SBhargava Marreddy skb = bnge_rx_skb(bn, rxr, cons, data, data_ptr, dma_addr, len); 927c858ac87SBhargava Marreddy } 928c858ac87SBhargava Marreddy 929c858ac87SBhargava Marreddy if (!skb) { 930c858ac87SBhargava Marreddy if (agg_bufs) 931*c2effd12SBhargava Marreddy bnge_reuse_rx_agg_bufs(cpr, cp_cons, 0, 932*c2effd12SBhargava Marreddy agg_bufs, false); 933c858ac87SBhargava Marreddy goto oom_next_rx; 934c858ac87SBhargava Marreddy } 935c858ac87SBhargava Marreddy 936c858ac87SBhargava Marreddy if (agg_bufs) { 937c858ac87SBhargava Marreddy skb = bnge_rx_agg_netmems_skb(bn, cpr, skb, cp_cons, 938*c2effd12SBhargava Marreddy agg_bufs, false); 9394d6a6005SBhargava Marreddy if (!skb) 9404d6a6005SBhargava Marreddy goto oom_next_rx; 9414d6a6005SBhargava Marreddy } 9424d6a6005SBhargava Marreddy 9434d6a6005SBhargava Marreddy if (RX_CMP_HASH_VALID(rxcmp)) { 9444d6a6005SBhargava Marreddy enum pkt_hash_types type; 9454d6a6005SBhargava Marreddy 9464d6a6005SBhargava Marreddy if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 9474d6a6005SBhargava Marreddy type = bnge_rss_ext_op(bn, rxcmp); 9484d6a6005SBhargava Marreddy } else { 9494d6a6005SBhargava Marreddy u32 itypes = RX_CMP_ITYPES(rxcmp); 9504d6a6005SBhargava Marreddy 9514d6a6005SBhargava Marreddy if (itypes == RX_CMP_FLAGS_ITYPE_TCP || 9524d6a6005SBhargava Marreddy itypes == RX_CMP_FLAGS_ITYPE_UDP) 9534d6a6005SBhargava Marreddy type = PKT_HASH_TYPE_L4; 9544d6a6005SBhargava Marreddy else 9554d6a6005SBhargava Marreddy type = PKT_HASH_TYPE_L3; 9564d6a6005SBhargava Marreddy } 9574d6a6005SBhargava Marreddy skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 9584d6a6005SBhargava Marreddy } 9594d6a6005SBhargava Marreddy 9604d6a6005SBhargava Marreddy skb->protocol = eth_type_trans(skb, dev); 9614d6a6005SBhargava Marreddy 9624d6a6005SBhargava Marreddy if (skb->dev->features & BNGE_HW_FEATURE_VLAN_ALL_RX) { 9634d6a6005SBhargava Marreddy skb = bnge_rx_vlan(skb, cmp_type, rxcmp, rxcmp1); 9644d6a6005SBhargava Marreddy if (!skb) 9654d6a6005SBhargava Marreddy goto next_rx; 9664d6a6005SBhargava Marreddy } 9674d6a6005SBhargava Marreddy 9684d6a6005SBhargava Marreddy skb_checksum_none_assert(skb); 9694d6a6005SBhargava Marreddy if (RX_CMP_L4_CS_OK(rxcmp1)) { 9704d6a6005SBhargava Marreddy if (dev->features & NETIF_F_RXCSUM) { 9714d6a6005SBhargava Marreddy skb->ip_summed = CHECKSUM_UNNECESSARY; 9724d6a6005SBhargava Marreddy skb->csum_level = RX_CMP_ENCAP(rxcmp1); 9734d6a6005SBhargava Marreddy } 9744d6a6005SBhargava Marreddy } 9754d6a6005SBhargava Marreddy 9764d6a6005SBhargava Marreddy bnge_deliver_skb(bn, bnapi, skb); 9774d6a6005SBhargava Marreddy rc = 1; 9784d6a6005SBhargava Marreddy 9794d6a6005SBhargava Marreddy next_rx: 9804d6a6005SBhargava Marreddy /* Update Stats */ 9814d6a6005SBhargava Marreddy next_rx_no_len: 9824d6a6005SBhargava Marreddy rxr->rx_prod = NEXT_RX(prod); 9834d6a6005SBhargava Marreddy rxr->rx_next_cons = RING_RX(bn, NEXT_RX(cons)); 9844d6a6005SBhargava Marreddy 9854d6a6005SBhargava Marreddy next_rx_no_prod_no_len: 9864d6a6005SBhargava Marreddy *raw_cons = tmp_raw_cons; 9874d6a6005SBhargava Marreddy return rc; 9884d6a6005SBhargava Marreddy 9894d6a6005SBhargava Marreddy oom_next_rx: 9904d6a6005SBhargava Marreddy rc = -ENOMEM; 9914d6a6005SBhargava Marreddy goto next_rx; 9924d6a6005SBhargava Marreddy } 9934d6a6005SBhargava Marreddy 9944d6a6005SBhargava Marreddy /* In netpoll mode, if we are using a combined completion ring, we need to 9954d6a6005SBhargava Marreddy * discard the rx packets and recycle the buffers. 9964d6a6005SBhargava Marreddy */ 9974d6a6005SBhargava Marreddy static int bnge_force_rx_discard(struct bnge_net *bn, 9984d6a6005SBhargava Marreddy struct bnge_cp_ring_info *cpr, 9994d6a6005SBhargava Marreddy u32 *raw_cons, u8 *event) 10004d6a6005SBhargava Marreddy { 10014d6a6005SBhargava Marreddy u32 tmp_raw_cons = *raw_cons; 10024d6a6005SBhargava Marreddy struct rx_cmp_ext *rxcmp1; 10034d6a6005SBhargava Marreddy struct rx_cmp *rxcmp; 10044d6a6005SBhargava Marreddy u16 cp_cons; 10054d6a6005SBhargava Marreddy u8 cmp_type; 10064d6a6005SBhargava Marreddy int rc; 10074d6a6005SBhargava Marreddy 10084d6a6005SBhargava Marreddy cp_cons = RING_CMP(bn, tmp_raw_cons); 10094d6a6005SBhargava Marreddy rxcmp = (struct rx_cmp *) 10104d6a6005SBhargava Marreddy &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 10114d6a6005SBhargava Marreddy 10124d6a6005SBhargava Marreddy tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 10134d6a6005SBhargava Marreddy cp_cons = RING_CMP(bn, tmp_raw_cons); 10144d6a6005SBhargava Marreddy rxcmp1 = (struct rx_cmp_ext *) 10154d6a6005SBhargava Marreddy &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 10164d6a6005SBhargava Marreddy 10174d6a6005SBhargava Marreddy if (!RX_CMP_VALID(bn, rxcmp1, tmp_raw_cons)) 10184d6a6005SBhargava Marreddy return -EBUSY; 10194d6a6005SBhargava Marreddy 10204d6a6005SBhargava Marreddy /* The valid test of the entry must be done first before 10214d6a6005SBhargava Marreddy * reading any further. 10224d6a6005SBhargava Marreddy */ 10234d6a6005SBhargava Marreddy dma_rmb(); 10244d6a6005SBhargava Marreddy cmp_type = RX_CMP_TYPE(rxcmp); 10254d6a6005SBhargava Marreddy if (cmp_type == CMP_TYPE_RX_L2_CMP || 10264d6a6005SBhargava Marreddy cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 10274d6a6005SBhargava Marreddy rxcmp1->rx_cmp_cfa_code_errors_v2 |= 10284d6a6005SBhargava Marreddy cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1029*c2effd12SBhargava Marreddy } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1030*c2effd12SBhargava Marreddy struct rx_tpa_end_cmp_ext *tpa_end1; 1031*c2effd12SBhargava Marreddy 1032*c2effd12SBhargava Marreddy tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 1033*c2effd12SBhargava Marreddy tpa_end1->rx_tpa_end_cmp_errors_v2 |= 1034*c2effd12SBhargava Marreddy cpu_to_le32(RX_TPA_END_CMP_ERRORS); 10354d6a6005SBhargava Marreddy } 10364d6a6005SBhargava Marreddy rc = bnge_rx_pkt(bn, cpr, raw_cons, event); 10374d6a6005SBhargava Marreddy return rc; 10384d6a6005SBhargava Marreddy } 10394d6a6005SBhargava Marreddy 1040bd5ad9c0SBhargava Marreddy static void __bnge_tx_int(struct bnge_net *bn, struct bnge_tx_ring_info *txr, 1041bd5ad9c0SBhargava Marreddy int budget) 1042bd5ad9c0SBhargava Marreddy { 1043bd5ad9c0SBhargava Marreddy u16 hw_cons = txr->tx_hw_cons; 1044bd5ad9c0SBhargava Marreddy struct bnge_dev *bd = bn->bd; 1045bd5ad9c0SBhargava Marreddy unsigned int tx_bytes = 0; 1046bd5ad9c0SBhargava Marreddy unsigned int tx_pkts = 0; 1047bd5ad9c0SBhargava Marreddy struct netdev_queue *txq; 1048bd5ad9c0SBhargava Marreddy u16 cons = txr->tx_cons; 1049bd5ad9c0SBhargava Marreddy skb_frag_t *frag; 1050bd5ad9c0SBhargava Marreddy 1051bd5ad9c0SBhargava Marreddy txq = netdev_get_tx_queue(bn->netdev, txr->txq_index); 1052bd5ad9c0SBhargava Marreddy 1053bd5ad9c0SBhargava Marreddy while (SW_TX_RING(bn, cons) != hw_cons) { 1054bd5ad9c0SBhargava Marreddy struct bnge_sw_tx_bd *tx_buf; 1055bd5ad9c0SBhargava Marreddy struct sk_buff *skb; 1056bd5ad9c0SBhargava Marreddy int j, last; 1057bd5ad9c0SBhargava Marreddy 1058bd5ad9c0SBhargava Marreddy tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, cons)]; 1059bd5ad9c0SBhargava Marreddy skb = tx_buf->skb; 1060bd5ad9c0SBhargava Marreddy if (unlikely(!skb)) { 1061bd5ad9c0SBhargava Marreddy bnge_sched_reset_txr(bn, txr, cons); 1062bd5ad9c0SBhargava Marreddy return; 1063bd5ad9c0SBhargava Marreddy } 1064bd5ad9c0SBhargava Marreddy 1065bd5ad9c0SBhargava Marreddy cons = NEXT_TX(cons); 1066bd5ad9c0SBhargava Marreddy tx_pkts++; 1067bd5ad9c0SBhargava Marreddy tx_bytes += skb->len; 1068bd5ad9c0SBhargava Marreddy tx_buf->skb = NULL; 1069bd5ad9c0SBhargava Marreddy 1070bd5ad9c0SBhargava Marreddy dma_unmap_single(bd->dev, dma_unmap_addr(tx_buf, mapping), 1071bd5ad9c0SBhargava Marreddy skb_headlen(skb), DMA_TO_DEVICE); 1072bd5ad9c0SBhargava Marreddy last = tx_buf->nr_frags; 1073bd5ad9c0SBhargava Marreddy 1074bd5ad9c0SBhargava Marreddy for (j = 0; j < last; j++) { 1075bd5ad9c0SBhargava Marreddy frag = &skb_shinfo(skb)->frags[j]; 1076bd5ad9c0SBhargava Marreddy cons = NEXT_TX(cons); 1077bd5ad9c0SBhargava Marreddy tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, cons)]; 1078bd5ad9c0SBhargava Marreddy netmem_dma_unmap_page_attrs(bd->dev, 1079bd5ad9c0SBhargava Marreddy dma_unmap_addr(tx_buf, 1080bd5ad9c0SBhargava Marreddy mapping), 1081bd5ad9c0SBhargava Marreddy skb_frag_size(frag), 1082bd5ad9c0SBhargava Marreddy DMA_TO_DEVICE, 0); 1083bd5ad9c0SBhargava Marreddy } 1084bd5ad9c0SBhargava Marreddy 1085bd5ad9c0SBhargava Marreddy cons = NEXT_TX(cons); 1086bd5ad9c0SBhargava Marreddy 1087bd5ad9c0SBhargava Marreddy napi_consume_skb(skb, budget); 1088bd5ad9c0SBhargava Marreddy } 1089bd5ad9c0SBhargava Marreddy 1090bd5ad9c0SBhargava Marreddy WRITE_ONCE(txr->tx_cons, cons); 1091bd5ad9c0SBhargava Marreddy 1092bd5ad9c0SBhargava Marreddy __netif_txq_completed_wake(txq, tx_pkts, tx_bytes, 1093bd5ad9c0SBhargava Marreddy bnge_tx_avail(bn, txr), bn->tx_wake_thresh, 1094bd5ad9c0SBhargava Marreddy (READ_ONCE(txr->dev_state) == 1095bd5ad9c0SBhargava Marreddy BNGE_DEV_STATE_CLOSING)); 1096bd5ad9c0SBhargava Marreddy } 1097bd5ad9c0SBhargava Marreddy 1098bd5ad9c0SBhargava Marreddy static void bnge_tx_int(struct bnge_net *bn, struct bnge_napi *bnapi, 1099bd5ad9c0SBhargava Marreddy int budget) 1100bd5ad9c0SBhargava Marreddy { 1101bd5ad9c0SBhargava Marreddy struct bnge_tx_ring_info *txr; 1102bd5ad9c0SBhargava Marreddy int i; 1103bd5ad9c0SBhargava Marreddy 1104bd5ad9c0SBhargava Marreddy bnge_for_each_napi_tx(i, bnapi, txr) { 1105bd5ad9c0SBhargava Marreddy if (txr->tx_hw_cons != SW_TX_RING(bn, txr->tx_cons)) 1106bd5ad9c0SBhargava Marreddy __bnge_tx_int(bn, txr, budget); 1107bd5ad9c0SBhargava Marreddy } 1108bd5ad9c0SBhargava Marreddy 1109bd5ad9c0SBhargava Marreddy bnapi->events &= ~BNGE_TX_CMP_EVENT; 1110bd5ad9c0SBhargava Marreddy } 1111bd5ad9c0SBhargava Marreddy 11124d6a6005SBhargava Marreddy static void __bnge_poll_work_done(struct bnge_net *bn, struct bnge_napi *bnapi, 11134d6a6005SBhargava Marreddy int budget) 11144d6a6005SBhargava Marreddy { 11154d6a6005SBhargava Marreddy struct bnge_rx_ring_info *rxr = bnapi->rx_ring; 11164d6a6005SBhargava Marreddy 1117bd5ad9c0SBhargava Marreddy if ((bnapi->events & BNGE_TX_CMP_EVENT) && !bnapi->tx_fault) 1118bd5ad9c0SBhargava Marreddy bnge_tx_int(bn, bnapi, budget); 1119bd5ad9c0SBhargava Marreddy 11204d6a6005SBhargava Marreddy if ((bnapi->events & BNGE_RX_EVENT)) { 11214d6a6005SBhargava Marreddy bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod); 11224d6a6005SBhargava Marreddy bnapi->events &= ~BNGE_RX_EVENT; 11234d6a6005SBhargava Marreddy } 1124c858ac87SBhargava Marreddy 1125c858ac87SBhargava Marreddy if (bnapi->events & BNGE_AGG_EVENT) { 1126c858ac87SBhargava Marreddy bnge_db_write(bn->bd, &rxr->rx_agg_db, rxr->rx_agg_prod); 1127c858ac87SBhargava Marreddy bnapi->events &= ~BNGE_AGG_EVENT; 1128c858ac87SBhargava Marreddy } 11294d6a6005SBhargava Marreddy } 11304d6a6005SBhargava Marreddy 113123cfc4e8SBhargava Marreddy static void 113223cfc4e8SBhargava Marreddy bnge_hwrm_update_token(struct bnge_dev *bd, u16 seq_id, 113323cfc4e8SBhargava Marreddy enum bnge_hwrm_wait_state state) 113423cfc4e8SBhargava Marreddy { 113523cfc4e8SBhargava Marreddy struct bnge_hwrm_wait_token *token; 113623cfc4e8SBhargava Marreddy 113723cfc4e8SBhargava Marreddy rcu_read_lock(); 113823cfc4e8SBhargava Marreddy hlist_for_each_entry_rcu(token, &bd->hwrm_pending_list, node) { 113923cfc4e8SBhargava Marreddy if (token->seq_id == seq_id) { 114023cfc4e8SBhargava Marreddy WRITE_ONCE(token->state, state); 114123cfc4e8SBhargava Marreddy rcu_read_unlock(); 114223cfc4e8SBhargava Marreddy return; 114323cfc4e8SBhargava Marreddy } 114423cfc4e8SBhargava Marreddy } 114523cfc4e8SBhargava Marreddy rcu_read_unlock(); 114623cfc4e8SBhargava Marreddy dev_err(bd->dev, "Invalid hwrm seq id %d\n", seq_id); 114723cfc4e8SBhargava Marreddy } 114823cfc4e8SBhargava Marreddy 114923cfc4e8SBhargava Marreddy static int bnge_hwrm_handler(struct bnge_dev *bd, struct tx_cmp *txcmp) 115023cfc4e8SBhargava Marreddy { 115123cfc4e8SBhargava Marreddy struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 115223cfc4e8SBhargava Marreddy u16 cmpl_type = TX_CMP_TYPE(txcmp), seq_id; 115323cfc4e8SBhargava Marreddy 115423cfc4e8SBhargava Marreddy switch (cmpl_type) { 115523cfc4e8SBhargava Marreddy case CMPL_BASE_TYPE_HWRM_DONE: 115623cfc4e8SBhargava Marreddy seq_id = le16_to_cpu(h_cmpl->sequence_id); 115723cfc4e8SBhargava Marreddy bnge_hwrm_update_token(bd, seq_id, BNGE_HWRM_COMPLETE); 115823cfc4e8SBhargava Marreddy break; 115923cfc4e8SBhargava Marreddy 116023cfc4e8SBhargava Marreddy case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 116123cfc4e8SBhargava Marreddy default: 116223cfc4e8SBhargava Marreddy break; 116323cfc4e8SBhargava Marreddy } 116423cfc4e8SBhargava Marreddy 116523cfc4e8SBhargava Marreddy return 0; 116623cfc4e8SBhargava Marreddy } 116723cfc4e8SBhargava Marreddy 11684d6a6005SBhargava Marreddy static int __bnge_poll_work(struct bnge_net *bn, struct bnge_cp_ring_info *cpr, 11694d6a6005SBhargava Marreddy int budget) 11704d6a6005SBhargava Marreddy { 11714d6a6005SBhargava Marreddy struct bnge_napi *bnapi = cpr->bnapi; 11724d6a6005SBhargava Marreddy u32 raw_cons = cpr->cp_raw_cons; 11734d6a6005SBhargava Marreddy struct tx_cmp *txcmp; 11744d6a6005SBhargava Marreddy int rx_pkts = 0; 11754d6a6005SBhargava Marreddy u8 event = 0; 11764d6a6005SBhargava Marreddy u32 cons; 11774d6a6005SBhargava Marreddy 11784d6a6005SBhargava Marreddy cpr->has_more_work = 0; 11794d6a6005SBhargava Marreddy cpr->had_work_done = 1; 11804d6a6005SBhargava Marreddy while (1) { 11814d6a6005SBhargava Marreddy u8 cmp_type; 11824d6a6005SBhargava Marreddy int rc; 11834d6a6005SBhargava Marreddy 11844d6a6005SBhargava Marreddy cons = RING_CMP(bn, raw_cons); 11854d6a6005SBhargava Marreddy txcmp = &cpr->desc_ring[CP_RING(cons)][CP_IDX(cons)]; 11864d6a6005SBhargava Marreddy 11874d6a6005SBhargava Marreddy if (!TX_CMP_VALID(bn, txcmp, raw_cons)) 11884d6a6005SBhargava Marreddy break; 11894d6a6005SBhargava Marreddy 11904d6a6005SBhargava Marreddy /* The valid test of the entry must be done first before 11914d6a6005SBhargava Marreddy * reading any further. 11924d6a6005SBhargava Marreddy */ 11934d6a6005SBhargava Marreddy dma_rmb(); 11944d6a6005SBhargava Marreddy cmp_type = TX_CMP_TYPE(txcmp); 11954d6a6005SBhargava Marreddy if (cmp_type == CMP_TYPE_TX_L2_CMP || 11964d6a6005SBhargava Marreddy cmp_type == CMP_TYPE_TX_L2_COAL_CMP) { 1197bd5ad9c0SBhargava Marreddy u32 opaque = txcmp->tx_cmp_opaque; 1198bd5ad9c0SBhargava Marreddy struct bnge_tx_ring_info *txr; 1199bd5ad9c0SBhargava Marreddy u16 tx_freed; 1200bd5ad9c0SBhargava Marreddy 1201bd5ad9c0SBhargava Marreddy txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)]; 1202bd5ad9c0SBhargava Marreddy event |= BNGE_TX_CMP_EVENT; 1203bd5ad9c0SBhargava Marreddy if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP) 1204bd5ad9c0SBhargava Marreddy txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp); 1205bd5ad9c0SBhargava Marreddy else 1206bd5ad9c0SBhargava Marreddy txr->tx_hw_cons = TX_OPAQUE_PROD(bn, opaque); 1207bd5ad9c0SBhargava Marreddy tx_freed = ((txr->tx_hw_cons - txr->tx_cons) & 1208bd5ad9c0SBhargava Marreddy bn->tx_ring_mask); 1209bd5ad9c0SBhargava Marreddy /* return full budget so NAPI will complete. */ 1210bd5ad9c0SBhargava Marreddy if (unlikely(tx_freed >= bn->tx_wake_thresh)) { 1211bd5ad9c0SBhargava Marreddy rx_pkts = budget; 1212bd5ad9c0SBhargava Marreddy raw_cons = NEXT_RAW_CMP(raw_cons); 1213bd5ad9c0SBhargava Marreddy if (budget) 1214bd5ad9c0SBhargava Marreddy cpr->has_more_work = 1; 1215bd5ad9c0SBhargava Marreddy break; 1216bd5ad9c0SBhargava Marreddy } 12174d6a6005SBhargava Marreddy } else if (cmp_type >= CMP_TYPE_RX_L2_CMP && 12184d6a6005SBhargava Marreddy cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) { 12194d6a6005SBhargava Marreddy if (likely(budget)) 12204d6a6005SBhargava Marreddy rc = bnge_rx_pkt(bn, cpr, &raw_cons, &event); 12214d6a6005SBhargava Marreddy else 12224d6a6005SBhargava Marreddy rc = bnge_force_rx_discard(bn, cpr, &raw_cons, 12234d6a6005SBhargava Marreddy &event); 12244d6a6005SBhargava Marreddy if (likely(rc >= 0)) 12254d6a6005SBhargava Marreddy rx_pkts += rc; 12264d6a6005SBhargava Marreddy /* Increment rx_pkts when rc is -ENOMEM to count towards 12274d6a6005SBhargava Marreddy * the NAPI budget. Otherwise, we may potentially loop 12284d6a6005SBhargava Marreddy * here forever if we consistently cannot allocate 12294d6a6005SBhargava Marreddy * buffers. 12304d6a6005SBhargava Marreddy */ 12314d6a6005SBhargava Marreddy else if (rc == -ENOMEM && budget) 12324d6a6005SBhargava Marreddy rx_pkts++; 12334d6a6005SBhargava Marreddy else if (rc == -EBUSY) /* partial completion */ 12344d6a6005SBhargava Marreddy break; 123523cfc4e8SBhargava Marreddy } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE || 123623cfc4e8SBhargava Marreddy cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ || 123723cfc4e8SBhargava Marreddy cmp_type == CMPL_BA_TY_HWRM_ASY_EVT)) { 123823cfc4e8SBhargava Marreddy bnge_hwrm_handler(bn->bd, txcmp); 12394d6a6005SBhargava Marreddy } 12404d6a6005SBhargava Marreddy raw_cons = NEXT_RAW_CMP(raw_cons); 12414d6a6005SBhargava Marreddy 12424d6a6005SBhargava Marreddy if (rx_pkts && rx_pkts == budget) { 12434d6a6005SBhargava Marreddy cpr->has_more_work = 1; 12444d6a6005SBhargava Marreddy break; 12454d6a6005SBhargava Marreddy } 12464d6a6005SBhargava Marreddy } 12474d6a6005SBhargava Marreddy 12484d6a6005SBhargava Marreddy cpr->cp_raw_cons = raw_cons; 12494d6a6005SBhargava Marreddy bnapi->events |= event; 12504d6a6005SBhargava Marreddy return rx_pkts; 12514d6a6005SBhargava Marreddy } 12524d6a6005SBhargava Marreddy 12534d6a6005SBhargava Marreddy static void __bnge_poll_cqs_done(struct bnge_net *bn, struct bnge_napi *bnapi, 12544d6a6005SBhargava Marreddy u64 dbr_type, int budget) 12554d6a6005SBhargava Marreddy { 12564d6a6005SBhargava Marreddy struct bnge_nq_ring_info *nqr = &bnapi->nq_ring; 12574d6a6005SBhargava Marreddy int i; 12584d6a6005SBhargava Marreddy 12594d6a6005SBhargava Marreddy for (i = 0; i < nqr->cp_ring_count; i++) { 12604d6a6005SBhargava Marreddy struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[i]; 12614d6a6005SBhargava Marreddy struct bnge_db_info *db; 12624d6a6005SBhargava Marreddy 12634d6a6005SBhargava Marreddy if (cpr->had_work_done) { 12644d6a6005SBhargava Marreddy u32 tgl = 0; 12654d6a6005SBhargava Marreddy 12664d6a6005SBhargava Marreddy if (dbr_type == DBR_TYPE_CQ_ARMALL) { 12674d6a6005SBhargava Marreddy cpr->had_nqe_notify = 0; 12684d6a6005SBhargava Marreddy tgl = cpr->toggle; 12694d6a6005SBhargava Marreddy } 12704d6a6005SBhargava Marreddy db = &cpr->cp_db; 12714d6a6005SBhargava Marreddy bnge_writeq(bn->bd, 12724d6a6005SBhargava Marreddy db->db_key64 | dbr_type | DB_TOGGLE(tgl) | 12734d6a6005SBhargava Marreddy DB_RING_IDX(db, cpr->cp_raw_cons), 12744d6a6005SBhargava Marreddy db->doorbell); 12754d6a6005SBhargava Marreddy cpr->had_work_done = 0; 12764d6a6005SBhargava Marreddy } 12774d6a6005SBhargava Marreddy } 12784d6a6005SBhargava Marreddy __bnge_poll_work_done(bn, bnapi, budget); 12794d6a6005SBhargava Marreddy } 12804d6a6005SBhargava Marreddy 12814d6a6005SBhargava Marreddy static int __bnge_poll_cqs(struct bnge_net *bn, struct bnge_napi *bnapi, 12824d6a6005SBhargava Marreddy int budget) 12834d6a6005SBhargava Marreddy { 12844d6a6005SBhargava Marreddy struct bnge_nq_ring_info *nqr = &bnapi->nq_ring; 12854d6a6005SBhargava Marreddy int i, work_done = 0; 12864d6a6005SBhargava Marreddy 12874d6a6005SBhargava Marreddy for (i = 0; i < nqr->cp_ring_count; i++) { 12884d6a6005SBhargava Marreddy struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[i]; 12894d6a6005SBhargava Marreddy 12904d6a6005SBhargava Marreddy if (cpr->had_nqe_notify) { 12914d6a6005SBhargava Marreddy work_done += __bnge_poll_work(bn, cpr, 12924d6a6005SBhargava Marreddy budget - work_done); 12934d6a6005SBhargava Marreddy nqr->has_more_work |= cpr->has_more_work; 12944d6a6005SBhargava Marreddy } 12954d6a6005SBhargava Marreddy } 12964d6a6005SBhargava Marreddy return work_done; 12974d6a6005SBhargava Marreddy } 12984d6a6005SBhargava Marreddy 12994d6a6005SBhargava Marreddy int bnge_napi_poll(struct napi_struct *napi, int budget) 13004d6a6005SBhargava Marreddy { 13014d6a6005SBhargava Marreddy struct bnge_napi *bnapi = container_of(napi, struct bnge_napi, napi); 13024d6a6005SBhargava Marreddy struct bnge_nq_ring_info *nqr = &bnapi->nq_ring; 13034d6a6005SBhargava Marreddy u32 raw_cons = nqr->nq_raw_cons; 13044d6a6005SBhargava Marreddy struct bnge_net *bn = bnapi->bn; 13054d6a6005SBhargava Marreddy struct bnge_dev *bd = bn->bd; 13064d6a6005SBhargava Marreddy struct nqe_cn *nqcmp; 13074d6a6005SBhargava Marreddy int work_done = 0; 13084d6a6005SBhargava Marreddy u32 cons; 13094d6a6005SBhargava Marreddy 13104d6a6005SBhargava Marreddy if (nqr->has_more_work) { 13114d6a6005SBhargava Marreddy nqr->has_more_work = 0; 13124d6a6005SBhargava Marreddy work_done = __bnge_poll_cqs(bn, bnapi, budget); 13134d6a6005SBhargava Marreddy } 13144d6a6005SBhargava Marreddy 13154d6a6005SBhargava Marreddy while (1) { 13164d6a6005SBhargava Marreddy u16 type; 13174d6a6005SBhargava Marreddy 13184d6a6005SBhargava Marreddy cons = RING_CMP(bn, raw_cons); 13194d6a6005SBhargava Marreddy nqcmp = &nqr->desc_ring[CP_RING(cons)][CP_IDX(cons)]; 13204d6a6005SBhargava Marreddy 13214d6a6005SBhargava Marreddy if (!NQ_CMP_VALID(bn, nqcmp, raw_cons)) { 13224d6a6005SBhargava Marreddy if (nqr->has_more_work) 13234d6a6005SBhargava Marreddy break; 13244d6a6005SBhargava Marreddy 13254d6a6005SBhargava Marreddy __bnge_poll_cqs_done(bn, bnapi, DBR_TYPE_CQ_ARMALL, 13264d6a6005SBhargava Marreddy budget); 13274d6a6005SBhargava Marreddy nqr->nq_raw_cons = raw_cons; 13284d6a6005SBhargava Marreddy if (napi_complete_done(napi, work_done)) 13294d6a6005SBhargava Marreddy BNGE_DB_NQ_ARM(bd, &nqr->nq_db, 13304d6a6005SBhargava Marreddy nqr->nq_raw_cons); 13314d6a6005SBhargava Marreddy goto poll_done; 13324d6a6005SBhargava Marreddy } 13334d6a6005SBhargava Marreddy 13344d6a6005SBhargava Marreddy /* The valid test of the entry must be done first before 13354d6a6005SBhargava Marreddy * reading any further. 13364d6a6005SBhargava Marreddy */ 13374d6a6005SBhargava Marreddy dma_rmb(); 13384d6a6005SBhargava Marreddy 13394d6a6005SBhargava Marreddy type = le16_to_cpu(nqcmp->type); 13404d6a6005SBhargava Marreddy if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) { 13414d6a6005SBhargava Marreddy u32 idx = le32_to_cpu(nqcmp->cq_handle_low); 13424d6a6005SBhargava Marreddy u32 cq_type = BNGE_NQ_HDL_TYPE(idx); 13434d6a6005SBhargava Marreddy struct bnge_cp_ring_info *cpr; 13444d6a6005SBhargava Marreddy 13454d6a6005SBhargava Marreddy /* No more budget for RX work */ 13464d6a6005SBhargava Marreddy if (budget && work_done >= budget && 13474d6a6005SBhargava Marreddy cq_type == BNGE_NQ_HDL_TYPE_RX) 13484d6a6005SBhargava Marreddy break; 13494d6a6005SBhargava Marreddy 13504d6a6005SBhargava Marreddy idx = BNGE_NQ_HDL_IDX(idx); 13514d6a6005SBhargava Marreddy cpr = &nqr->cp_ring_arr[idx]; 13524d6a6005SBhargava Marreddy cpr->had_nqe_notify = 1; 13534d6a6005SBhargava Marreddy cpr->toggle = NQE_CN_TOGGLE(type); 13544d6a6005SBhargava Marreddy work_done += __bnge_poll_work(bn, cpr, 13554d6a6005SBhargava Marreddy budget - work_done); 13564d6a6005SBhargava Marreddy nqr->has_more_work |= cpr->has_more_work; 135723cfc4e8SBhargava Marreddy } else { 135823cfc4e8SBhargava Marreddy bnge_hwrm_handler(bn->bd, (struct tx_cmp *)nqcmp); 13594d6a6005SBhargava Marreddy } 13604d6a6005SBhargava Marreddy raw_cons = NEXT_RAW_CMP(raw_cons); 13614d6a6005SBhargava Marreddy } 13624d6a6005SBhargava Marreddy 13634d6a6005SBhargava Marreddy __bnge_poll_cqs_done(bn, bnapi, DBR_TYPE_CQ, budget); 13644d6a6005SBhargava Marreddy if (raw_cons != nqr->nq_raw_cons) { 13654d6a6005SBhargava Marreddy nqr->nq_raw_cons = raw_cons; 13664d6a6005SBhargava Marreddy BNGE_DB_NQ(bd, &nqr->nq_db, raw_cons); 13674d6a6005SBhargava Marreddy } 13684d6a6005SBhargava Marreddy poll_done: 13694d6a6005SBhargava Marreddy return work_done; 13704d6a6005SBhargava Marreddy } 1371bd5ad9c0SBhargava Marreddy 1372bd5ad9c0SBhargava Marreddy static u16 bnge_xmit_get_cfa_action(struct sk_buff *skb) 1373bd5ad9c0SBhargava Marreddy { 1374bd5ad9c0SBhargava Marreddy struct metadata_dst *md_dst = skb_metadata_dst(skb); 1375bd5ad9c0SBhargava Marreddy 1376bd5ad9c0SBhargava Marreddy if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) 1377bd5ad9c0SBhargava Marreddy return 0; 1378bd5ad9c0SBhargava Marreddy 1379bd5ad9c0SBhargava Marreddy return md_dst->u.port_info.port_id; 1380bd5ad9c0SBhargava Marreddy } 1381bd5ad9c0SBhargava Marreddy 1382bd5ad9c0SBhargava Marreddy static const u16 bnge_lhint_arr[] = { 1383bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_LHINT_512_AND_SMALLER, 1384bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_LHINT_512_TO_1023, 1385bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_LHINT_1024_TO_2047, 1386bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_LHINT_1024_TO_2047, 1387bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_LHINT_2048_AND_LARGER, 1388bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_LHINT_2048_AND_LARGER, 1389bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_LHINT_2048_AND_LARGER, 1390bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_LHINT_2048_AND_LARGER, 1391bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_LHINT_2048_AND_LARGER, 1392bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_LHINT_2048_AND_LARGER, 1393bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_LHINT_2048_AND_LARGER, 1394bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_LHINT_2048_AND_LARGER, 1395bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_LHINT_2048_AND_LARGER, 1396bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_LHINT_2048_AND_LARGER, 1397bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_LHINT_2048_AND_LARGER, 1398bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_LHINT_2048_AND_LARGER, 1399bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_LHINT_2048_AND_LARGER, 1400bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_LHINT_2048_AND_LARGER, 1401bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_LHINT_2048_AND_LARGER, 1402bd5ad9c0SBhargava Marreddy }; 1403bd5ad9c0SBhargava Marreddy 1404bd5ad9c0SBhargava Marreddy static void bnge_txr_db_kick(struct bnge_net *bn, struct bnge_tx_ring_info *txr, 1405bd5ad9c0SBhargava Marreddy u16 prod) 1406bd5ad9c0SBhargava Marreddy { 1407bd5ad9c0SBhargava Marreddy /* Sync BD data before updating doorbell */ 1408bd5ad9c0SBhargava Marreddy wmb(); 1409bd5ad9c0SBhargava Marreddy bnge_db_write(bn->bd, &txr->tx_db, prod); 1410bd5ad9c0SBhargava Marreddy txr->kick_pending = 0; 1411bd5ad9c0SBhargava Marreddy } 1412bd5ad9c0SBhargava Marreddy 1413bd5ad9c0SBhargava Marreddy static u32 bnge_get_gso_hdr_len(struct sk_buff *skb) 1414bd5ad9c0SBhargava Marreddy { 1415bd5ad9c0SBhargava Marreddy bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4); 1416bd5ad9c0SBhargava Marreddy u32 hdr_len; 1417bd5ad9c0SBhargava Marreddy 1418bd5ad9c0SBhargava Marreddy if (skb->encapsulation) { 1419bd5ad9c0SBhargava Marreddy if (udp_gso) 1420bd5ad9c0SBhargava Marreddy hdr_len = skb_inner_transport_offset(skb) + 1421bd5ad9c0SBhargava Marreddy sizeof(struct udphdr); 1422bd5ad9c0SBhargava Marreddy else 1423bd5ad9c0SBhargava Marreddy hdr_len = skb_inner_tcp_all_headers(skb); 1424bd5ad9c0SBhargava Marreddy } else if (udp_gso) { 1425bd5ad9c0SBhargava Marreddy hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr); 1426bd5ad9c0SBhargava Marreddy } else { 1427bd5ad9c0SBhargava Marreddy hdr_len = skb_tcp_all_headers(skb); 1428bd5ad9c0SBhargava Marreddy } 1429bd5ad9c0SBhargava Marreddy 1430bd5ad9c0SBhargava Marreddy return hdr_len; 1431bd5ad9c0SBhargava Marreddy } 1432bd5ad9c0SBhargava Marreddy 1433bd5ad9c0SBhargava Marreddy netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev) 1434bd5ad9c0SBhargava Marreddy { 1435bd5ad9c0SBhargava Marreddy u32 len, free_size, vlan_tag_flags, cfa_action, flags; 1436bd5ad9c0SBhargava Marreddy struct bnge_net *bn = netdev_priv(dev); 1437bd5ad9c0SBhargava Marreddy struct bnge_tx_ring_info *txr; 1438bd5ad9c0SBhargava Marreddy struct bnge_dev *bd = bn->bd; 1439bd5ad9c0SBhargava Marreddy struct bnge_sw_tx_bd *tx_buf; 1440bd5ad9c0SBhargava Marreddy struct tx_bd *txbd, *txbd0; 1441bd5ad9c0SBhargava Marreddy struct netdev_queue *txq; 1442bd5ad9c0SBhargava Marreddy struct tx_bd_ext *txbd1; 1443bd5ad9c0SBhargava Marreddy u16 prod, last_frag; 1444bd5ad9c0SBhargava Marreddy unsigned int length; 1445bd5ad9c0SBhargava Marreddy dma_addr_t mapping; 1446bd5ad9c0SBhargava Marreddy __le32 lflags = 0; 1447bd5ad9c0SBhargava Marreddy skb_frag_t *frag; 1448bd5ad9c0SBhargava Marreddy int i; 1449bd5ad9c0SBhargava Marreddy 1450bd5ad9c0SBhargava Marreddy i = skb_get_queue_mapping(skb); 1451bd5ad9c0SBhargava Marreddy txq = netdev_get_tx_queue(dev, i); 1452bd5ad9c0SBhargava Marreddy txr = &bn->tx_ring[bn->tx_ring_map[i]]; 1453bd5ad9c0SBhargava Marreddy prod = txr->tx_prod; 1454bd5ad9c0SBhargava Marreddy 1455bd5ad9c0SBhargava Marreddy free_size = bnge_tx_avail(bn, txr); 1456bd5ad9c0SBhargava Marreddy if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 1457bd5ad9c0SBhargava Marreddy /* We must have raced with NAPI cleanup */ 1458bd5ad9c0SBhargava Marreddy if (net_ratelimit() && txr->kick_pending) 1459bd5ad9c0SBhargava Marreddy netif_warn(bn, tx_err, dev, 1460bd5ad9c0SBhargava Marreddy "bnge: ring busy w/ flush pending!\n"); 1461bd5ad9c0SBhargava Marreddy if (!netif_txq_try_stop(txq, bnge_tx_avail(bn, txr), 1462bd5ad9c0SBhargava Marreddy bn->tx_wake_thresh)) 1463bd5ad9c0SBhargava Marreddy return NETDEV_TX_BUSY; 1464bd5ad9c0SBhargava Marreddy } 1465bd5ad9c0SBhargava Marreddy 1466bd5ad9c0SBhargava Marreddy last_frag = skb_shinfo(skb)->nr_frags; 1467bd5ad9c0SBhargava Marreddy 1468bd5ad9c0SBhargava Marreddy txbd = &txr->tx_desc_ring[TX_RING(bn, prod)][TX_IDX(prod)]; 1469bd5ad9c0SBhargava Marreddy 1470bd5ad9c0SBhargava Marreddy tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, prod)]; 1471bd5ad9c0SBhargava Marreddy tx_buf->skb = skb; 1472bd5ad9c0SBhargava Marreddy tx_buf->nr_frags = last_frag; 1473bd5ad9c0SBhargava Marreddy 1474bd5ad9c0SBhargava Marreddy vlan_tag_flags = 0; 1475bd5ad9c0SBhargava Marreddy cfa_action = bnge_xmit_get_cfa_action(skb); 1476bd5ad9c0SBhargava Marreddy if (skb_vlan_tag_present(skb)) { 1477bd5ad9c0SBhargava Marreddy vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 1478bd5ad9c0SBhargava Marreddy skb_vlan_tag_get(skb); 1479bd5ad9c0SBhargava Marreddy /* Currently supports 8021Q, 8021AD vlan offloads 1480bd5ad9c0SBhargava Marreddy * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 1481bd5ad9c0SBhargava Marreddy */ 1482bd5ad9c0SBhargava Marreddy if (skb->vlan_proto == htons(ETH_P_8021Q)) 1483bd5ad9c0SBhargava Marreddy vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 1484bd5ad9c0SBhargava Marreddy } 1485bd5ad9c0SBhargava Marreddy 1486bd5ad9c0SBhargava Marreddy if (unlikely(skb->no_fcs)) 1487bd5ad9c0SBhargava Marreddy lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC); 1488bd5ad9c0SBhargava Marreddy 1489bd5ad9c0SBhargava Marreddy if (eth_skb_pad(skb)) 1490bd5ad9c0SBhargava Marreddy goto tx_kick_pending; 1491bd5ad9c0SBhargava Marreddy 1492bd5ad9c0SBhargava Marreddy len = skb_headlen(skb); 1493bd5ad9c0SBhargava Marreddy 1494bd5ad9c0SBhargava Marreddy mapping = dma_map_single(bd->dev, skb->data, len, DMA_TO_DEVICE); 1495bd5ad9c0SBhargava Marreddy 1496bd5ad9c0SBhargava Marreddy if (unlikely(dma_mapping_error(bd->dev, mapping))) 1497bd5ad9c0SBhargava Marreddy goto tx_free; 1498bd5ad9c0SBhargava Marreddy 1499bd5ad9c0SBhargava Marreddy dma_unmap_addr_set(tx_buf, mapping, mapping); 1500bd5ad9c0SBhargava Marreddy flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 1501bd5ad9c0SBhargava Marreddy TX_BD_CNT(last_frag + 2); 1502bd5ad9c0SBhargava Marreddy 1503bd5ad9c0SBhargava Marreddy txbd->tx_bd_haddr = cpu_to_le64(mapping); 1504bd5ad9c0SBhargava Marreddy txbd->tx_bd_opaque = SET_TX_OPAQUE(bn, txr, prod, 2 + last_frag); 1505bd5ad9c0SBhargava Marreddy 1506bd5ad9c0SBhargava Marreddy prod = NEXT_TX(prod); 1507bd5ad9c0SBhargava Marreddy txbd1 = (struct tx_bd_ext *) 1508bd5ad9c0SBhargava Marreddy &txr->tx_desc_ring[TX_RING(bn, prod)][TX_IDX(prod)]; 1509bd5ad9c0SBhargava Marreddy 1510bd5ad9c0SBhargava Marreddy if (skb_is_gso(skb)) { 1511bd5ad9c0SBhargava Marreddy u32 hdr_len = bnge_get_gso_hdr_len(skb); 1512bd5ad9c0SBhargava Marreddy 1513bd5ad9c0SBhargava Marreddy lflags |= cpu_to_le32(TX_BD_FLAGS_LSO | TX_BD_FLAGS_T_IPID | 1514bd5ad9c0SBhargava Marreddy (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 1515bd5ad9c0SBhargava Marreddy length = skb_shinfo(skb)->gso_size; 1516bd5ad9c0SBhargava Marreddy txbd1->tx_bd_mss = cpu_to_le32(length); 1517bd5ad9c0SBhargava Marreddy length += hdr_len; 1518bd5ad9c0SBhargava Marreddy } else { 1519bd5ad9c0SBhargava Marreddy length = skb->len; 1520bd5ad9c0SBhargava Marreddy if (skb->ip_summed == CHECKSUM_PARTIAL) { 1521bd5ad9c0SBhargava Marreddy lflags |= cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 1522bd5ad9c0SBhargava Marreddy txbd1->tx_bd_mss = 0; 1523bd5ad9c0SBhargava Marreddy } 1524bd5ad9c0SBhargava Marreddy } 1525bd5ad9c0SBhargava Marreddy 1526bd5ad9c0SBhargava Marreddy flags |= bnge_lhint_arr[length >> 9]; 1527bd5ad9c0SBhargava Marreddy 1528bd5ad9c0SBhargava Marreddy txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 1529bd5ad9c0SBhargava Marreddy txbd1->tx_bd_hsize_lflags = lflags; 1530bd5ad9c0SBhargava Marreddy txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 1531bd5ad9c0SBhargava Marreddy txbd1->tx_bd_cfa_action = 1532bd5ad9c0SBhargava Marreddy cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 1533bd5ad9c0SBhargava Marreddy txbd0 = txbd; 1534bd5ad9c0SBhargava Marreddy for (i = 0; i < last_frag; i++) { 1535bd5ad9c0SBhargava Marreddy frag = &skb_shinfo(skb)->frags[i]; 1536bd5ad9c0SBhargava Marreddy 1537bd5ad9c0SBhargava Marreddy prod = NEXT_TX(prod); 1538bd5ad9c0SBhargava Marreddy txbd = &txr->tx_desc_ring[TX_RING(bn, prod)][TX_IDX(prod)]; 1539bd5ad9c0SBhargava Marreddy 1540bd5ad9c0SBhargava Marreddy len = skb_frag_size(frag); 1541bd5ad9c0SBhargava Marreddy mapping = skb_frag_dma_map(bd->dev, frag, 0, len, 1542bd5ad9c0SBhargava Marreddy DMA_TO_DEVICE); 1543bd5ad9c0SBhargava Marreddy 1544bd5ad9c0SBhargava Marreddy if (unlikely(dma_mapping_error(bd->dev, mapping))) 1545bd5ad9c0SBhargava Marreddy goto tx_dma_error; 1546bd5ad9c0SBhargava Marreddy 1547bd5ad9c0SBhargava Marreddy tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, prod)]; 1548bd5ad9c0SBhargava Marreddy netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf, 1549bd5ad9c0SBhargava Marreddy mapping, mapping); 1550bd5ad9c0SBhargava Marreddy 1551bd5ad9c0SBhargava Marreddy txbd->tx_bd_haddr = cpu_to_le64(mapping); 1552bd5ad9c0SBhargava Marreddy 1553bd5ad9c0SBhargava Marreddy flags = len << TX_BD_LEN_SHIFT; 1554bd5ad9c0SBhargava Marreddy txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 1555bd5ad9c0SBhargava Marreddy } 1556bd5ad9c0SBhargava Marreddy 1557bd5ad9c0SBhargava Marreddy flags &= ~TX_BD_LEN; 1558bd5ad9c0SBhargava Marreddy txbd->tx_bd_len_flags_type = 1559bd5ad9c0SBhargava Marreddy cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags | 1560bd5ad9c0SBhargava Marreddy TX_BD_FLAGS_PACKET_END); 1561bd5ad9c0SBhargava Marreddy 1562bd5ad9c0SBhargava Marreddy netdev_tx_sent_queue(txq, skb->len); 1563bd5ad9c0SBhargava Marreddy 1564bd5ad9c0SBhargava Marreddy prod = NEXT_TX(prod); 1565bd5ad9c0SBhargava Marreddy WRITE_ONCE(txr->tx_prod, prod); 1566bd5ad9c0SBhargava Marreddy 1567bd5ad9c0SBhargava Marreddy if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { 1568bd5ad9c0SBhargava Marreddy bnge_txr_db_kick(bn, txr, prod); 1569bd5ad9c0SBhargava Marreddy } else { 1570bd5ad9c0SBhargava Marreddy if (free_size >= bn->tx_wake_thresh) 1571bd5ad9c0SBhargava Marreddy txbd0->tx_bd_len_flags_type |= 1572bd5ad9c0SBhargava Marreddy cpu_to_le32(TX_BD_FLAGS_NO_CMPL); 1573bd5ad9c0SBhargava Marreddy txr->kick_pending = 1; 1574bd5ad9c0SBhargava Marreddy } 1575bd5ad9c0SBhargava Marreddy 1576bd5ad9c0SBhargava Marreddy if (unlikely(bnge_tx_avail(bn, txr) <= MAX_SKB_FRAGS + 1)) { 1577bd5ad9c0SBhargava Marreddy if (netdev_xmit_more()) { 1578bd5ad9c0SBhargava Marreddy txbd0->tx_bd_len_flags_type &= 1579bd5ad9c0SBhargava Marreddy cpu_to_le32(~TX_BD_FLAGS_NO_CMPL); 1580bd5ad9c0SBhargava Marreddy bnge_txr_db_kick(bn, txr, prod); 1581bd5ad9c0SBhargava Marreddy } 1582bd5ad9c0SBhargava Marreddy 1583bd5ad9c0SBhargava Marreddy netif_txq_try_stop(txq, bnge_tx_avail(bn, txr), 1584bd5ad9c0SBhargava Marreddy bn->tx_wake_thresh); 1585bd5ad9c0SBhargava Marreddy } 1586bd5ad9c0SBhargava Marreddy return NETDEV_TX_OK; 1587bd5ad9c0SBhargava Marreddy 1588bd5ad9c0SBhargava Marreddy tx_dma_error: 1589bd5ad9c0SBhargava Marreddy last_frag = i; 1590bd5ad9c0SBhargava Marreddy 1591bd5ad9c0SBhargava Marreddy /* start back at beginning and unmap skb */ 1592bd5ad9c0SBhargava Marreddy prod = txr->tx_prod; 1593bd5ad9c0SBhargava Marreddy tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, prod)]; 1594bd5ad9c0SBhargava Marreddy dma_unmap_single(bd->dev, dma_unmap_addr(tx_buf, mapping), 1595bd5ad9c0SBhargava Marreddy skb_headlen(skb), DMA_TO_DEVICE); 1596bd5ad9c0SBhargava Marreddy prod = NEXT_TX(prod); 1597bd5ad9c0SBhargava Marreddy 1598bd5ad9c0SBhargava Marreddy /* unmap remaining mapped pages */ 1599bd5ad9c0SBhargava Marreddy for (i = 0; i < last_frag; i++) { 1600bd5ad9c0SBhargava Marreddy prod = NEXT_TX(prod); 1601bd5ad9c0SBhargava Marreddy tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, prod)]; 1602bd5ad9c0SBhargava Marreddy frag = &skb_shinfo(skb)->frags[i]; 1603bd5ad9c0SBhargava Marreddy netmem_dma_unmap_page_attrs(bd->dev, 1604bd5ad9c0SBhargava Marreddy dma_unmap_addr(tx_buf, mapping), 1605bd5ad9c0SBhargava Marreddy skb_frag_size(frag), 1606bd5ad9c0SBhargava Marreddy DMA_TO_DEVICE, 0); 1607bd5ad9c0SBhargava Marreddy } 1608bd5ad9c0SBhargava Marreddy 1609bd5ad9c0SBhargava Marreddy tx_free: 1610bd5ad9c0SBhargava Marreddy dev_kfree_skb_any(skb); 1611bd5ad9c0SBhargava Marreddy 1612bd5ad9c0SBhargava Marreddy tx_kick_pending: 1613bd5ad9c0SBhargava Marreddy if (txr->kick_pending) 1614bd5ad9c0SBhargava Marreddy bnge_txr_db_kick(bn, txr, txr->tx_prod); 1615bd5ad9c0SBhargava Marreddy txr->tx_buf_ring[SW_TX_RING(bn, txr->tx_prod)].skb = NULL; 1616bd5ad9c0SBhargava Marreddy dev_core_stats_tx_dropped_inc(dev); 1617bd5ad9c0SBhargava Marreddy return NETDEV_TX_OK; 1618bd5ad9c0SBhargava Marreddy } 16195deaeae1SBhargava Marreddy 16205deaeae1SBhargava Marreddy netdev_features_t bnge_features_check(struct sk_buff *skb, 16215deaeae1SBhargava Marreddy struct net_device *dev, 16225deaeae1SBhargava Marreddy netdev_features_t features) 16235deaeae1SBhargava Marreddy { 16245deaeae1SBhargava Marreddy u32 len; 16255deaeae1SBhargava Marreddy 16265deaeae1SBhargava Marreddy features = vlan_features_check(skb, features); 16275deaeae1SBhargava Marreddy #if (MAX_SKB_FRAGS > TX_MAX_FRAGS) 16285deaeae1SBhargava Marreddy if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) 16295deaeae1SBhargava Marreddy features &= ~NETIF_F_SG; 16305deaeae1SBhargava Marreddy #endif 16315deaeae1SBhargava Marreddy 16325deaeae1SBhargava Marreddy if (skb_is_gso(skb)) 16335deaeae1SBhargava Marreddy len = bnge_get_gso_hdr_len(skb) + skb_shinfo(skb)->gso_size; 16345deaeae1SBhargava Marreddy else 16355deaeae1SBhargava Marreddy len = skb->len; 16365deaeae1SBhargava Marreddy 16375deaeae1SBhargava Marreddy len >>= 9; 16385deaeae1SBhargava Marreddy if (unlikely(len >= ARRAY_SIZE(bnge_lhint_arr))) 16395deaeae1SBhargava Marreddy features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 16405deaeae1SBhargava Marreddy 16415deaeae1SBhargava Marreddy return features; 16425deaeae1SBhargava Marreddy } 1643