Lines Matching +full:data +full:- +full:mapping

1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
31 dma_addr_t mapping; in qede_alloc_rx_buffer() local
32 struct page *data; in qede_alloc_rx_buffer() local
34 /* In case lazy-allocation is allowed, postpone allocation until the in qede_alloc_rx_buffer()
38 if (allow_lazy && likely(rxq->filled_buffers > 12)) { in qede_alloc_rx_buffer()
39 rxq->filled_buffers--; in qede_alloc_rx_buffer()
43 data = alloc_pages(GFP_ATOMIC, 0); in qede_alloc_rx_buffer()
44 if (unlikely(!data)) in qede_alloc_rx_buffer()
45 return -ENOMEM; in qede_alloc_rx_buffer()
48 * for multiple RX buffer segment size mapping. in qede_alloc_rx_buffer()
50 mapping = dma_map_page(rxq->dev, data, 0, in qede_alloc_rx_buffer()
51 PAGE_SIZE, rxq->data_direction); in qede_alloc_rx_buffer()
52 if (unlikely(dma_mapping_error(rxq->dev, mapping))) { in qede_alloc_rx_buffer()
53 __free_page(data); in qede_alloc_rx_buffer()
54 return -ENOMEM; in qede_alloc_rx_buffer()
57 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; in qede_alloc_rx_buffer()
58 sw_rx_data->page_offset = 0; in qede_alloc_rx_buffer()
59 sw_rx_data->data = data; in qede_alloc_rx_buffer()
60 sw_rx_data->mapping = mapping; in qede_alloc_rx_buffer()
63 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); in qede_alloc_rx_buffer()
65 rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); in qede_alloc_rx_buffer()
66 rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) + in qede_alloc_rx_buffer()
67 rxq->rx_headroom); in qede_alloc_rx_buffer()
69 rxq->sw_rx_prod++; in qede_alloc_rx_buffer()
70 rxq->filled_buffers++; in qede_alloc_rx_buffer()
75 /* Unmap the data and free skb */
78 u16 idx = txq->sw_tx_cons; in qede_free_tx_pkt()
79 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_tx_pkt()
84 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; in qede_free_tx_pkt()
89 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n", in qede_free_tx_pkt()
90 idx, txq->sw_tx_cons, txq->sw_tx_prod); in qede_free_tx_pkt()
91 return -1; in qede_free_tx_pkt()
94 *len = skb->len; in qede_free_tx_pkt()
96 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
100 nbds = first_bd->data.nbds; in qede_free_tx_pkt()
104 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
108 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), in qede_free_tx_pkt()
111 /* Unmap the data of the skb frags */ in qede_free_tx_pkt()
112 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { in qede_free_tx_pkt()
114 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
115 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), in qede_free_tx_pkt()
120 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
124 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_tx_pkt()
125 txq->sw_tx_ring.skbs[idx].flags = 0; in qede_free_tx_pkt()
130 /* Unmap the data and free skb when mapping failed during start_xmit */
135 u16 idx = txq->sw_tx_prod; in qede_free_failed_tx_pkt()
136 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_failed_tx_pkt()
141 qed_chain_set_prod(&txq->tx_pbl, in qede_free_failed_tx_pkt()
142 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); in qede_free_failed_tx_pkt()
144 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); in qede_free_failed_tx_pkt()
148 qed_chain_produce(&txq->tx_pbl); in qede_free_failed_tx_pkt()
150 nbd--; in qede_free_failed_tx_pkt()
153 dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd), in qede_free_failed_tx_pkt()
156 /* Unmap the data of the skb frags */ in qede_free_failed_tx_pkt()
159 qed_chain_produce(&txq->tx_pbl); in qede_free_failed_tx_pkt()
160 if (tx_data_bd->nbytes) in qede_free_failed_tx_pkt()
161 dma_unmap_page(txq->dev, in qede_free_failed_tx_pkt()
167 qed_chain_set_prod(&txq->tx_pbl, in qede_free_failed_tx_pkt()
168 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); in qede_free_failed_tx_pkt()
172 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_failed_tx_pkt()
173 txq->sw_tx_ring.skbs[idx].flags = 0; in qede_free_failed_tx_pkt()
181 if (skb->ip_summed != CHECKSUM_PARTIAL) in qede_xmit_type()
186 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) in qede_xmit_type()
189 if (skb->encapsulation) { in qede_xmit_type()
192 unsigned short gso_type = skb_shinfo(skb)->gso_type; in qede_xmit_type()
226 l4_proto = ipv6_hdr(skb)->nexthdr; in qede_set_params_for_ipv6_ext()
228 l4_proto = ip_hdr(skb)->protocol; in qede_set_params_for_ipv6_ext()
234 third_bd->data.bitfields |= in qede_set_params_for_ipv6_ext()
239 second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1); in qede_set_params_for_ipv6_ext()
240 second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); in qede_set_params_for_ipv6_ext()
246 dma_addr_t mapping; in map_frag_to_bd() local
248 /* Map skb non-linear frag data for DMA */ in map_frag_to_bd()
249 mapping = skb_frag_dma_map(txq->dev, frag, 0, in map_frag_to_bd()
251 if (unlikely(dma_mapping_error(txq->dev, mapping))) in map_frag_to_bd()
252 return -ENOMEM; in map_frag_to_bd()
254 /* Setup the data pointer of the frag data */ in map_frag_to_bd()
255 BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag)); in map_frag_to_bd()
272 int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1; in qede_pkt_req_lin()
281 allowed_frags--; in qede_pkt_req_lin()
284 return (skb_shinfo(skb)->nr_frags > allowed_frags); in qede_pkt_req_lin()
290 /* wmb makes sure that the BDs data is updated before updating the in qede_update_tx_producer()
291 * producer, otherwise FW may read old data from the BDs. in qede_update_tx_producer()
295 writel(txq->tx_db.raw, txq->doorbell_addr); in qede_update_tx_producer()
298 * CPU may write to the same doorbell address and data may be lost in qede_update_tx_producer()
311 if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >= in qede_xdp_xmit()
312 txq->num_tx_buffers)) { in qede_xdp_xmit()
313 txq->stopped_cnt++; in qede_xdp_xmit()
314 return -ENOMEM; in qede_xdp_xmit()
317 bd = qed_chain_produce(&txq->tx_pbl); in qede_xdp_xmit()
318 bd->data.nbds = 1; in qede_xdp_xmit()
319 bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); in qede_xdp_xmit()
324 bd->data.bitfields = cpu_to_le16(val); in qede_xdp_xmit()
329 xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod; in qede_xdp_xmit()
330 xdp->mapping = dma; in qede_xdp_xmit()
331 xdp->page = page; in qede_xdp_xmit()
332 xdp->xdpf = xdpf; in qede_xdp_xmit()
334 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; in qede_xdp_xmit()
343 struct device *dmadev = &edev->pdev->dev; in qede_xdp_transmit()
346 dma_addr_t mapping; in qede_xdp_transmit() local
351 return -EINVAL; in qede_xdp_transmit()
354 return -ENETDOWN; in qede_xdp_transmit()
356 i = smp_processor_id() % edev->total_xdp_queues; in qede_xdp_transmit()
357 xdp_tx = edev->fp_array[i].xdp_tx; in qede_xdp_transmit()
359 spin_lock(&xdp_tx->xdp_tx_lock); in qede_xdp_transmit()
364 mapping = dma_map_single(dmadev, xdpf->data, xdpf->len, in qede_xdp_transmit()
366 if (unlikely(dma_mapping_error(dmadev, mapping))) in qede_xdp_transmit()
369 if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len, in qede_xdp_transmit()
376 xdp_prod = qed_chain_get_prod_idx(&xdp_tx->tx_pbl); in qede_xdp_transmit()
378 xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); in qede_xdp_transmit()
382 spin_unlock(&xdp_tx->xdp_tx_lock); in qede_xdp_transmit()
393 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); in qede_txq_has_work()
394 if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1) in qede_txq_has_work()
397 return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); in qede_txq_has_work()
402 struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp; in qede_xdp_tx_int()
403 struct device *dev = &edev->pdev->dev; in qede_xdp_tx_int()
407 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); in qede_xdp_tx_int()
410 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { in qede_xdp_tx_int()
411 xdp_info = xdp_arr + txq->sw_tx_cons; in qede_xdp_tx_int()
412 xdpf = xdp_info->xdpf; in qede_xdp_tx_int()
415 dma_unmap_single(dev, xdp_info->mapping, xdpf->len, in qede_xdp_tx_int()
419 xdp_info->xdpf = NULL; in qede_xdp_tx_int()
421 dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE, in qede_xdp_tx_int()
423 __free_page(xdp_info->page); in qede_xdp_tx_int()
426 qed_chain_consume(&txq->tx_pbl); in qede_xdp_tx_int()
427 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; in qede_xdp_tx_int()
428 txq->xmit_pkts++; in qede_xdp_tx_int()
439 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); in qede_tx_int()
441 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); in qede_tx_int()
444 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { in qede_tx_int()
451 qed_chain_get_cons_idx(&txq->tx_pbl)); in qede_tx_int()
457 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; in qede_tx_int()
458 txq->xmit_pkts++; in qede_tx_int()
480 * stops the queue->sees fresh tx_bd_cons->releases the queue-> in qede_tx_int()
481 * sends some packets consuming the whole queue again-> in qede_tx_int()
488 (edev->state == QEDE_STATE_OPEN) && in qede_tx_int()
489 (qed_chain_get_elem_left(&txq->tx_pbl) in qede_tx_int()
509 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); in qede_has_rx_work()
510 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); in qede_has_rx_work()
517 qed_chain_consume(&rxq->rx_bd_ring); in qede_rx_bd_ring_consume()
518 rxq->sw_rx_cons++; in qede_rx_bd_ring_consume()
527 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); in qede_reuse_page()
531 curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; in qede_reuse_page()
534 new_mapping = curr_prod->mapping + curr_prod->page_offset; in qede_reuse_page()
536 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping)); in qede_reuse_page()
537 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) + in qede_reuse_page()
538 rxq->rx_headroom); in qede_reuse_page()
540 rxq->sw_rx_prod++; in qede_reuse_page()
541 curr_cons->data = NULL; in qede_reuse_page()
551 for (; count > 0; count--) { in qede_recycle_rx_bd_ring()
552 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; in qede_recycle_rx_bd_ring()
562 curr_cons->page_offset += rxq->rx_buf_seg_size; in qede_realloc_rx_buffer()
564 if (curr_cons->page_offset == PAGE_SIZE) { in qede_realloc_rx_buffer()
569 curr_cons->page_offset -= rxq->rx_buf_seg_size; in qede_realloc_rx_buffer()
571 return -ENOMEM; in qede_realloc_rx_buffer()
574 dma_unmap_page(rxq->dev, curr_cons->mapping, in qede_realloc_rx_buffer()
575 PAGE_SIZE, rxq->data_direction); in qede_realloc_rx_buffer()
581 page_ref_inc(curr_cons->data); in qede_realloc_rx_buffer()
590 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); in qede_update_rx_prod()
591 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); in qede_update_rx_prod()
598 /* Make sure that the BD and SGE data is updated before updating the in qede_update_rx_prod()
604 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), in qede_update_rx_prod()
629 skb->ip_summed = CHECKSUM_UNNECESSARY; in qede_set_skb_csum()
632 skb->csum_level = 1; in qede_set_skb_csum()
633 skb->encapsulation = 1; in qede_set_skb_csum()
645 napi_gro_receive(&fp->napi, skb); in qede_skb_receive()
652 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); in qede_set_gro_params()
656 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in qede_set_gro_params()
658 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in qede_set_gro_params()
660 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - in qede_set_gro_params()
661 cqe->header_len; in qede_set_gro_params()
668 struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & in qede_fill_frag_skb()
670 struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index]; in qede_fill_frag_skb()
671 struct sk_buff *skb = tpa_info->skb; in qede_fill_frag_skb()
673 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) in qede_fill_frag_skb()
677 skb_fill_page_desc(skb, tpa_info->frag_id++, in qede_fill_frag_skb()
678 current_bd->data, in qede_fill_frag_skb()
679 current_bd->page_offset + rxq->rx_headroom, in qede_fill_frag_skb()
686 page_ref_inc(current_bd->data); in qede_fill_frag_skb()
692 skb->data_len += len_on_bd; in qede_fill_frag_skb()
693 skb->truesize += rxq->rx_buf_seg_size; in qede_fill_frag_skb()
694 skb->len += len_on_bd; in qede_fill_frag_skb()
699 tpa_info->state = QEDE_AGG_STATE_ERROR; in qede_fill_frag_skb()
702 return -ENOMEM; in qede_fill_frag_skb()
746 buf = page_address(bd->data) + bd->page_offset; in qede_build_skb()
747 skb = build_skb(buf, rxq->rx_buf_seg_size); in qede_build_skb()
767 bd->page_offset += rxq->rx_buf_seg_size; in qede_tpa_rx_build_skb()
769 if (bd->page_offset == PAGE_SIZE) { in qede_tpa_rx_build_skb()
773 bd->page_offset -= rxq->rx_buf_seg_size; in qede_tpa_rx_build_skb()
774 page_ref_inc(bd->data); in qede_tpa_rx_build_skb()
779 page_ref_inc(bd->data); in qede_tpa_rx_build_skb()
797 * data and benefit in reusing the page segment instead of in qede_rx_build_skb()
798 * un-mapping it. in qede_rx_build_skb()
800 if ((len + pad <= edev->rx_copybreak)) { in qede_rx_build_skb()
801 unsigned int offset = bd->page_offset + pad; in qede_rx_build_skb()
803 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); in qede_rx_build_skb()
808 skb_put_data(skb, page_address(bd->data) + offset, len); in qede_rx_build_skb()
820 page_ref_inc(bd->data); in qede_rx_build_skb()
835 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_start()
839 sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; in qede_tpa_start()
840 pad = cqe->placement_offset + rxq->rx_headroom; in qede_tpa_start()
842 tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons, in qede_tpa_start()
843 le16_to_cpu(cqe->len_on_first_bd), in qede_tpa_start()
845 tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset; in qede_tpa_start()
846 tpa_info->buffer.mapping = sw_rx_data_cons->mapping; in qede_tpa_start()
848 if (unlikely(!tpa_info->skb)) { in qede_tpa_start()
852 * this might be used by FW still, it will be re-used in qede_tpa_start()
855 tpa_info->tpa_start_fail = true; in qede_tpa_start()
857 tpa_info->state = QEDE_AGG_STATE_ERROR; in qede_tpa_start()
861 tpa_info->frag_id = 0; in qede_tpa_start()
862 tpa_info->state = QEDE_AGG_STATE_START; in qede_tpa_start()
864 if ((le16_to_cpu(cqe->pars_flags.flags) >> in qede_tpa_start()
867 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); in qede_tpa_start()
869 tpa_info->vlan_tag = 0; in qede_tpa_start()
871 qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash); in qede_tpa_start()
874 qede_set_gro_params(edev, tpa_info->skb, cqe); in qede_tpa_start()
877 if (likely(cqe->bw_ext_bd_len_list[0])) in qede_tpa_start()
878 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_start()
879 le16_to_cpu(cqe->bw_ext_bd_len_list[0])); in qede_tpa_start()
881 if (unlikely(cqe->bw_ext_bd_len_list[1])) { in qede_tpa_start()
883 …"Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n"); in qede_tpa_start()
884 tpa_info->state = QEDE_AGG_STATE_ERROR; in qede_tpa_start()
897 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), in qede_gro_ip_csum()
898 iph->saddr, iph->daddr, 0); in qede_gro_ip_csum()
911 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), in qede_gro_ipv6_csum()
912 &iph->saddr, &iph->daddr, 0); in qede_gro_ipv6_csum()
927 if (unlikely(!skb->data_len)) { in qede_gro_receive()
928 skb_shinfo(skb)->gso_type = 0; in qede_gro_receive()
929 skb_shinfo(skb)->gso_size = 0; in qede_gro_receive()
934 if (skb_shinfo(skb)->gso_size) { in qede_gro_receive()
937 switch (skb->protocol) { in qede_gro_receive()
947 ntohs(skb->protocol)); in qede_gro_receive()
953 skb_record_rx_queue(skb, fp->rxq->rxq_id); in qede_gro_receive()
954 qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag); in qede_gro_receive()
963 for (i = 0; cqe->len_list[i]; i++) in qede_tpa_cont()
964 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_cont()
965 le16_to_cpu(cqe->len_list[i])); in qede_tpa_cont()
969 "Strange - TPA cont with more than a single len_list entry\n"); in qede_tpa_cont()
976 struct qede_rx_queue *rxq = fp->rxq; in qede_tpa_end()
981 tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_end()
982 skb = tpa_info->skb; in qede_tpa_end()
984 if (tpa_info->buffer.page_offset == PAGE_SIZE) in qede_tpa_end()
985 dma_unmap_page(rxq->dev, tpa_info->buffer.mapping, in qede_tpa_end()
986 PAGE_SIZE, rxq->data_direction); in qede_tpa_end()
988 for (i = 0; cqe->len_list[i]; i++) in qede_tpa_end()
989 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_end()
990 le16_to_cpu(cqe->len_list[i])); in qede_tpa_end()
993 "Strange - TPA emd with more than a single len_list entry\n"); in qede_tpa_end()
995 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) in qede_tpa_end()
999 if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1)) in qede_tpa_end()
1001 "Strange - TPA had %02x BDs, but SKB has only %d frags\n", in qede_tpa_end()
1002 cqe->num_of_bds, tpa_info->frag_id); in qede_tpa_end()
1003 if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len))) in qede_tpa_end()
1005 "Strange - total packet len [cqe] is %4x but SKB has len %04x\n", in qede_tpa_end()
1006 le16_to_cpu(cqe->total_packet_len), skb->len); in qede_tpa_end()
1009 skb->protocol = eth_type_trans(skb, edev->ndev); in qede_tpa_end()
1010 skb->ip_summed = CHECKSUM_UNNECESSARY; in qede_tpa_end()
1012 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count in qede_tpa_end()
1013 * to skb_shinfo(skb)->gso_segs in qede_tpa_end()
1015 NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs); in qede_tpa_end()
1017 qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag); in qede_tpa_end()
1019 tpa_info->state = QEDE_AGG_STATE_NONE; in qede_tpa_end()
1023 tpa_info->state = QEDE_AGG_STATE_NONE; in qede_tpa_end()
1025 if (tpa_info->tpa_start_fail) { in qede_tpa_end()
1026 qede_reuse_page(rxq, &tpa_info->buffer); in qede_tpa_end()
1027 tpa_info->tpa_start_fail = false; in qede_tpa_end()
1030 dev_kfree_skb_any(tpa_info->skb); in qede_tpa_end()
1031 tpa_info->skb = NULL; in qede_tpa_end()
1067 u8 tun_pars_flg = cqe->tunnel_pars_flags.flags; in qede_pkt_is_ip_fragmented()
1090 xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq); in qede_rx_xdp()
1091 xdp_prepare_buff(&xdp, page_address(bd->data), *data_offset, in qede_rx_xdp()
1097 *data_offset = xdp.data - xdp.data_hard_start; in qede_rx_xdp()
1098 *len = xdp.data_end - xdp.data; in qede_rx_xdp()
1104 rxq->xdp_no_pass++; in qede_rx_xdp()
1112 trace_xdp_exception(edev->ndev, prog, act); in qede_rx_xdp()
1119 if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping, in qede_rx_xdp()
1120 *data_offset, *len, bd->data, in qede_rx_xdp()
1122 dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, in qede_rx_xdp()
1123 rxq->data_direction); in qede_rx_xdp()
1124 __free_page(bd->data); in qede_rx_xdp()
1126 trace_xdp_exception(edev->ndev, prog, act); in qede_rx_xdp()
1128 dma_sync_single_for_device(rxq->dev, in qede_rx_xdp()
1129 bd->mapping + *data_offset, in qede_rx_xdp()
1130 *len, rxq->data_direction); in qede_rx_xdp()
1131 fp->xdp_xmit |= QEDE_XDP_TX; in qede_rx_xdp()
1142 trace_xdp_exception(edev->ndev, prog, act); in qede_rx_xdp()
1146 dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, in qede_rx_xdp()
1147 rxq->data_direction); in qede_rx_xdp()
1149 if (unlikely(xdp_do_redirect(edev->ndev, &xdp, prog))) in qede_rx_xdp()
1152 fp->xdp_xmit |= QEDE_XDP_REDIRECT; in qede_rx_xdp()
1157 bpf_warn_invalid_xdp_action(edev->ndev, prog, act); in qede_rx_xdp()
1160 trace_xdp_exception(edev->ndev, prog, act); in qede_rx_xdp()
1163 qede_recycle_rx_bd_ring(rxq, cqe->bd_num); in qede_rx_xdp()
1175 u16 pkt_len = le16_to_cpu(cqe->pkt_len); in qede_rx_build_jumbo()
1180 pkt_len -= first_bd_len; in qede_rx_build_jumbo()
1183 for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) { in qede_rx_build_jumbo()
1184 u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : in qede_rx_build_jumbo()
1189 "Still got %d BDs for mapping jumbo, but length became 0\n", in qede_rx_build_jumbo()
1201 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; in qede_rx_build_jumbo()
1202 bd = &rxq->sw_rx_ring[bd_cons_idx]; in qede_rx_build_jumbo()
1205 dma_unmap_page(rxq->dev, bd->mapping, in qede_rx_build_jumbo()
1208 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, bd->data, in qede_rx_build_jumbo()
1209 rxq->rx_headroom, cur_size, PAGE_SIZE); in qede_rx_build_jumbo()
1211 pkt_len -= cur_size; in qede_rx_build_jumbo()
1231 qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start); in qede_rx_process_tpa_cqe()
1234 qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont); in qede_rx_process_tpa_cqe()
1237 return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end); in qede_rx_process_tpa_cqe()
1247 struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog); in qede_rx_process_cqe()
1258 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); in qede_rx_process_cqe()
1259 cqe_type = cqe->fast_path_regular.type; in qede_rx_process_cqe()
1266 edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe); in qede_rx_process_cqe()
1274 /* Get the data from the SW ring; Consume it only after it's evident in qede_rx_process_cqe()
1277 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; in qede_rx_process_cqe()
1278 bd = &rxq->sw_rx_ring[bd_cons_idx]; in qede_rx_process_cqe()
1280 fp_cqe = &cqe->fast_path_regular; in qede_rx_process_cqe()
1281 len = le16_to_cpu(fp_cqe->len_on_first_bd); in qede_rx_process_cqe()
1282 pad = fp_cqe->placement_offset + rxq->rx_headroom; in qede_rx_process_cqe()
1291 flags = cqe->fast_path_regular.pars_flags.flags; in qede_rx_process_cqe()
1297 rxq->rx_ip_frags++; in qede_rx_process_cqe()
1299 rxq->rx_hw_errors++; in qede_rx_process_cqe()
1307 rxq->rx_alloc_errors++; in qede_rx_process_cqe()
1308 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); in qede_rx_process_cqe()
1315 if (fp_cqe->bd_num > 1) { in qede_rx_process_cqe()
1326 /* The SKB contains all the data. Now prepare meta-magic */ in qede_rx_process_cqe()
1327 skb->protocol = eth_type_trans(skb, edev->ndev); in qede_rx_process_cqe()
1328 qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash); in qede_rx_process_cqe()
1330 skb_record_rx_queue(skb, rxq->rxq_id); in qede_rx_process_cqe()
1333 /* SKB is prepared - pass it to stack */ in qede_rx_process_cqe()
1334 qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag)); in qede_rx_process_cqe()
1341 struct qede_rx_queue *rxq = fp->rxq; in qede_rx_int()
1342 struct qede_dev *edev = fp->edev; in qede_rx_int()
1346 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); in qede_rx_int()
1347 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); in qede_rx_int()
1350 * / BD in the while-loop before reading hw_comp_cons. If the CQE is in qede_rx_int()
1359 qed_chain_recycle_consumed(&rxq->rx_comp_ring); in qede_rx_int()
1360 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); in qede_rx_int()
1364 rxq->rcv_pkts += rcv_pkts; in qede_rx_int()
1367 while (rxq->num_rx_buffers - rxq->filled_buffers) in qede_rx_int()
1379 qed_sb_update_sb_idx(fp->sb_info); in qede_poll_is_more_work()
1393 if (likely(fp->type & QEDE_FASTPATH_RX)) in qede_poll_is_more_work()
1394 if (qede_has_rx_work(fp->rxq)) in qede_poll_is_more_work()
1397 if (fp->type & QEDE_FASTPATH_XDP) in qede_poll_is_more_work()
1398 if (qede_txq_has_work(fp->xdp_tx)) in qede_poll_is_more_work()
1401 if (likely(fp->type & QEDE_FASTPATH_TX)) { in qede_poll_is_more_work()
1404 for_each_cos_in_txq(fp->edev, cos) { in qede_poll_is_more_work()
1405 if (qede_txq_has_work(&fp->txq[cos])) in qede_poll_is_more_work()
1420 struct qede_dev *edev = fp->edev; in qede_poll()
1424 fp->xdp_xmit = 0; in qede_poll()
1426 if (likely(fp->type & QEDE_FASTPATH_TX)) { in qede_poll()
1429 for_each_cos_in_txq(fp->edev, cos) { in qede_poll()
1430 if (qede_txq_has_work(&fp->txq[cos])) in qede_poll()
1431 qede_tx_int(edev, &fp->txq[cos]); in qede_poll()
1435 if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx)) in qede_poll()
1436 qede_xdp_tx_int(edev, fp->xdp_tx); in qede_poll()
1438 rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && in qede_poll()
1439 qede_has_rx_work(fp->rxq)) ? in qede_poll()
1442 if (fp->xdp_xmit & QEDE_XDP_REDIRECT) in qede_poll()
1451 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); in qede_poll()
1457 if (fp->xdp_xmit & QEDE_XDP_TX) { in qede_poll()
1458 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); in qede_poll()
1460 fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); in qede_poll()
1461 qede_update_tx_producer(fp->xdp_tx); in qede_poll()
1471 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); in qede_msix_fp_int()
1473 napi_schedule_irqoff(&fp->napi); in qede_msix_fp_int()
1489 dma_addr_t mapping; in qede_start_xmit() local
1496 /* Get tx-queue context and netdev index */ in qede_start_xmit()
1498 WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc); in qede_start_xmit()
1502 WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); in qede_start_xmit()
1509 txq->tx_mem_alloc_err++; in qede_start_xmit()
1518 idx = txq->sw_tx_prod; in qede_start_xmit()
1519 txq->sw_tx_ring.skbs[idx].skb = skb; in qede_start_xmit()
1521 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1523 first_bd->data.bd_flags.bitfields = in qede_start_xmit()
1526 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) in qede_start_xmit()
1529 /* Map skb linear data for DMA and set in the first BD */ in qede_start_xmit()
1530 mapping = dma_map_single(txq->dev, skb->data, in qede_start_xmit()
1532 if (unlikely(dma_mapping_error(txq->dev, mapping))) { in qede_start_xmit()
1533 DP_NOTICE(edev, "SKB mapping failed\n"); in qede_start_xmit()
1539 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); in qede_start_xmit()
1546 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1551 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1555 /* We need to fill in additional data in second_bd... */ in qede_start_xmit()
1560 first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb)); in qede_start_xmit()
1561 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1567 /* We don't re-calculate IP checksum as it is already done by in qede_start_xmit()
1570 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1574 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1580 /* Legacy FW had flipped behavior in regard to this bit - in qede_start_xmit()
1584 if (unlikely(txq->is_legacy)) in qede_start_xmit()
1596 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1598 third_bd->data.lso_mss = in qede_start_xmit()
1599 cpu_to_le16(skb_shinfo(skb)->gso_size); in qede_start_xmit()
1602 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1608 first_bd->data.bd_flags.bitfields |= 1 << tmp; in qede_start_xmit()
1612 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1617 /* @@@TBD - if will not be removed need to check */ in qede_start_xmit()
1618 third_bd->data.bitfields |= in qede_start_xmit()
1622 * data on same BD. If we need to split, use the second bd... in qede_start_xmit()
1627 first_bd->nbytes, first_bd->addr.hi, in qede_start_xmit()
1628 first_bd->addr.lo); in qede_start_xmit()
1630 mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi), in qede_start_xmit()
1631 le32_to_cpu(first_bd->addr.lo)) + in qede_start_xmit()
1634 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping, in qede_start_xmit()
1635 le16_to_cpu(first_bd->nbytes) - in qede_start_xmit()
1639 * individual mapping in qede_start_xmit()
1641 txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD; in qede_start_xmit()
1643 first_bd->nbytes = cpu_to_le16(hlen); in qede_start_xmit()
1649 if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) { in qede_start_xmit()
1650 DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len); in qede_start_xmit()
1656 val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << in qede_start_xmit()
1660 first_bd->data.bitfields = cpu_to_le16(val); in qede_start_xmit()
1664 while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) { in qede_start_xmit()
1666 &skb_shinfo(skb)->frags[frag_idx], in qede_start_xmit()
1683 for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) { in qede_start_xmit()
1685 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1690 &skb_shinfo(skb)->frags[frag_idx], in qede_start_xmit()
1700 first_bd->data.nbds = nbd; in qede_start_xmit()
1702 netdev_tx_sent_queue(netdev_txq, skb->len); in qede_start_xmit()
1706 /* Advance packet producer only before sending the packet since mapping in qede_start_xmit()
1709 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; in qede_start_xmit()
1712 txq->tx_db.data.bd_prod = in qede_start_xmit()
1713 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); in qede_start_xmit()
1718 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) in qede_start_xmit()
1724 txq->stopped_cnt++; in qede_start_xmit()
1729 * fp->bd_tx_cons in qede_start_xmit()
1733 if ((qed_chain_get_elem_left(&txq->tx_pbl) >= in qede_start_xmit()
1735 (edev->state == QEDE_STATE_OPEN)) { in qede_start_xmit()
1751 total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc; in qede_select_queue()
1764 if (skb->encapsulation) { in qede_features_check()
1769 l4_proto = ip_hdr(skb)->protocol; in qede_features_check()
1772 l4_proto = ipv6_hdr(skb)->nexthdr; in qede_features_check()
1787 vxln_port = edev->vxlan_dst_port; in qede_features_check()
1788 gnv_port = edev->geneve_dst_port; in qede_features_check()
1790 if ((skb_inner_mac_header(skb) - in qede_features_check()
1792 (ntohs(udp_hdr(skb)->dest) != vxln_port && in qede_features_check()
1793 ntohs(udp_hdr(skb)->dest) != gnv_port)) in qede_features_check()