17113afc8SEmmanuel Vadot /*- 24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause 37113afc8SEmmanuel Vadot * 47113afc8SEmmanuel Vadot * Copyright (c) 2020 Advanced Micro Devices, Inc. 57113afc8SEmmanuel Vadot * 67113afc8SEmmanuel Vadot * Redistribution and use in source and binary forms, with or without 77113afc8SEmmanuel Vadot * modification, are permitted provided that the following conditions 87113afc8SEmmanuel Vadot * are met: 97113afc8SEmmanuel Vadot * 1. Redistributions of source code must retain the above copyright 107113afc8SEmmanuel Vadot * notice, this list of conditions and the following disclaimer. 117113afc8SEmmanuel Vadot * 2. Redistributions in binary form must reproduce the above copyright 127113afc8SEmmanuel Vadot * notice, this list of conditions and the following disclaimer in the 137113afc8SEmmanuel Vadot * documentation and/or other materials provided with the distribution. 147113afc8SEmmanuel Vadot * 157113afc8SEmmanuel Vadot * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 167113afc8SEmmanuel Vadot * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 177113afc8SEmmanuel Vadot * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 187113afc8SEmmanuel Vadot * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 197113afc8SEmmanuel Vadot * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 207113afc8SEmmanuel Vadot * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 217113afc8SEmmanuel Vadot * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 227113afc8SEmmanuel Vadot * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 237113afc8SEmmanuel Vadot * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 247113afc8SEmmanuel Vadot * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 257113afc8SEmmanuel Vadot * SUCH DAMAGE. 267113afc8SEmmanuel Vadot * 277113afc8SEmmanuel Vadot * Contact Information : 287113afc8SEmmanuel Vadot * Rajesh Kumar <rajesh1.kumar@amd.com> 297113afc8SEmmanuel Vadot * Shreyank Amartya <Shreyank.Amartya@amd.com> 307113afc8SEmmanuel Vadot * 317113afc8SEmmanuel Vadot */ 327113afc8SEmmanuel Vadot 337113afc8SEmmanuel Vadot #include <sys/cdefs.h> 347113afc8SEmmanuel Vadot #include "xgbe.h" 357113afc8SEmmanuel Vadot #include "xgbe-common.h" 367113afc8SEmmanuel Vadot 377113afc8SEmmanuel Vadot /* 387113afc8SEmmanuel Vadot * IFLIB interfaces 397113afc8SEmmanuel Vadot */ 407113afc8SEmmanuel Vadot static int axgbe_isc_txd_encap(void *, if_pkt_info_t); 417113afc8SEmmanuel Vadot static void axgbe_isc_txd_flush(void *, uint16_t, qidx_t); 427113afc8SEmmanuel Vadot static int axgbe_isc_txd_credits_update(void *, uint16_t, bool); 437113afc8SEmmanuel Vadot static void axgbe_isc_rxd_refill(void *, if_rxd_update_t); 447113afc8SEmmanuel Vadot static void axgbe_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t); 457113afc8SEmmanuel Vadot static int axgbe_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t); 467113afc8SEmmanuel Vadot static int axgbe_isc_rxd_pkt_get(void *, if_rxd_info_t); 477113afc8SEmmanuel Vadot 487113afc8SEmmanuel Vadot struct if_txrx axgbe_txrx = { 497113afc8SEmmanuel Vadot .ift_txd_encap = axgbe_isc_txd_encap, 507113afc8SEmmanuel Vadot .ift_txd_flush = axgbe_isc_txd_flush, 517113afc8SEmmanuel Vadot .ift_txd_credits_update = axgbe_isc_txd_credits_update, 527113afc8SEmmanuel Vadot .ift_rxd_available = axgbe_isc_rxd_available, 537113afc8SEmmanuel Vadot .ift_rxd_pkt_get = axgbe_isc_rxd_pkt_get, 547113afc8SEmmanuel Vadot .ift_rxd_refill = axgbe_isc_rxd_refill, 557113afc8SEmmanuel Vadot .ift_rxd_flush = axgbe_isc_rxd_flush, 567113afc8SEmmanuel Vadot .ift_legacy_intr = NULL 577113afc8SEmmanuel Vadot }; 587113afc8SEmmanuel Vadot 597113afc8SEmmanuel Vadot static void 607113afc8SEmmanuel Vadot xgbe_print_pkt_info(struct xgbe_prv_data *pdata, if_pkt_info_t pi) 617113afc8SEmmanuel Vadot { 627113afc8SEmmanuel Vadot 637113afc8SEmmanuel Vadot axgbe_printf(1, "------Packet Info Start------\n"); 647113afc8SEmmanuel Vadot axgbe_printf(1, "pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n", 657113afc8SEmmanuel Vadot pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx); 667113afc8SEmmanuel Vadot axgbe_printf(1, "pi new_pidx: %d csum_flags: %x mflags: %x vtag: %d\n", 677113afc8SEmmanuel Vadot pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_mflags, pi->ipi_vtag); 687113afc8SEmmanuel Vadot axgbe_printf(1, "pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n", 697113afc8SEmmanuel Vadot pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto); 707113afc8SEmmanuel Vadot axgbe_printf(1, "pi tcp_hlen: %d tcp_hflags: %x tcp_seq: %d tso_segsz %d\n", 717113afc8SEmmanuel Vadot pi->ipi_tcp_hlen, pi->ipi_tcp_hflags, pi->ipi_tcp_seq, pi->ipi_tso_segsz); 727113afc8SEmmanuel Vadot } 737113afc8SEmmanuel Vadot 747113afc8SEmmanuel Vadot static bool 757113afc8SEmmanuel Vadot axgbe_ctx_desc_setup(struct xgbe_prv_data *pdata, struct xgbe_ring *ring, 767113afc8SEmmanuel Vadot if_pkt_info_t pi) 777113afc8SEmmanuel Vadot { 787113afc8SEmmanuel Vadot struct xgbe_ring_desc *rdesc; 797113afc8SEmmanuel Vadot struct xgbe_ring_data *rdata; 807113afc8SEmmanuel Vadot bool inc_cur = false; 817113afc8SEmmanuel Vadot 827113afc8SEmmanuel Vadot rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 837113afc8SEmmanuel Vadot rdesc = rdata->rdesc; 847113afc8SEmmanuel Vadot 857113afc8SEmmanuel Vadot axgbe_printf(1, "ipi_tso_segsz %d cur_mss %d idx %d\n", 867113afc8SEmmanuel Vadot pi->ipi_tso_segsz, ring->tx.cur_mss, ring->cur); 877113afc8SEmmanuel Vadot 887113afc8SEmmanuel Vadot axgbe_printf(1, "ipi_vtag 0x%x cur_vlan_ctag 0x%x\n", 897113afc8SEmmanuel Vadot pi->ipi_vtag, ring->tx.cur_vlan_ctag); 907113afc8SEmmanuel Vadot 917113afc8SEmmanuel Vadot if ((pi->ipi_csum_flags & CSUM_TSO) && 927113afc8SEmmanuel Vadot (pi->ipi_tso_segsz != ring->tx.cur_mss)) { 937113afc8SEmmanuel Vadot /* 947113afc8SEmmanuel Vadot * Set TSO maximum segment size 957113afc8SEmmanuel Vadot * Mark as context descriptor 967113afc8SEmmanuel Vadot * Indicate this descriptor contains MSS 977113afc8SEmmanuel Vadot */ 987113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2, 997113afc8SEmmanuel Vadot MSS, pi->ipi_tso_segsz); 1007113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1); 1017113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, TCMSSV, 1); 1027113afc8SEmmanuel Vadot ring->tx.cur_mss = pi->ipi_tso_segsz; 1037113afc8SEmmanuel Vadot inc_cur = true; 1047113afc8SEmmanuel Vadot } 1057113afc8SEmmanuel Vadot 1067113afc8SEmmanuel Vadot if (pi->ipi_vtag && (pi->ipi_vtag != ring->tx.cur_vlan_ctag)) { 1077113afc8SEmmanuel Vadot /* 1087113afc8SEmmanuel Vadot * Mark it as context descriptor 1097113afc8SEmmanuel Vadot * Set the VLAN tag 1107113afc8SEmmanuel Vadot * Indicate this descriptor contains the VLAN tag 1117113afc8SEmmanuel Vadot */ 1127113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1); 1137113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1147113afc8SEmmanuel Vadot VT, pi->ipi_vtag); 1157113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, VLTV, 1); 1167113afc8SEmmanuel Vadot ring->tx.cur_vlan_ctag = pi->ipi_vtag; 1177113afc8SEmmanuel Vadot inc_cur = true; 1187113afc8SEmmanuel Vadot } 1197113afc8SEmmanuel Vadot 1207113afc8SEmmanuel Vadot return (inc_cur); 1217113afc8SEmmanuel Vadot } 1227113afc8SEmmanuel Vadot 1237113afc8SEmmanuel Vadot static uint16_t 1247113afc8SEmmanuel Vadot axgbe_calculate_tx_parms(struct xgbe_prv_data *pdata, if_pkt_info_t pi, 1257113afc8SEmmanuel Vadot struct xgbe_packet_data *packet) 1267113afc8SEmmanuel Vadot { 1277113afc8SEmmanuel Vadot uint32_t tcp_payload_len = 0, bytes = 0; 1287113afc8SEmmanuel Vadot uint16_t max_len, hlen, payload_len, pkts = 0; 1297113afc8SEmmanuel Vadot 1307113afc8SEmmanuel Vadot packet->tx_packets = packet->tx_bytes = 0; 1317113afc8SEmmanuel Vadot 1327113afc8SEmmanuel Vadot hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen; 1337113afc8SEmmanuel Vadot if (pi->ipi_csum_flags & CSUM_TSO) { 1347113afc8SEmmanuel Vadot 1357113afc8SEmmanuel Vadot tcp_payload_len = pi->ipi_len - hlen; 1367113afc8SEmmanuel Vadot axgbe_printf(1, "%s: ipi_len %x elen %d iplen %d tcplen %d\n", 1377113afc8SEmmanuel Vadot __func__, pi->ipi_len, pi->ipi_ehdrlen, pi->ipi_ip_hlen, 1387113afc8SEmmanuel Vadot pi->ipi_tcp_hlen); 1397113afc8SEmmanuel Vadot 1407113afc8SEmmanuel Vadot max_len = if_getmtu(pdata->netdev) + ETH_HLEN; 1417113afc8SEmmanuel Vadot if (pi->ipi_vtag) 1427113afc8SEmmanuel Vadot max_len += VLAN_HLEN; 1437113afc8SEmmanuel Vadot 1447113afc8SEmmanuel Vadot while (tcp_payload_len) { 1457113afc8SEmmanuel Vadot 1467113afc8SEmmanuel Vadot payload_len = max_len - hlen; 1477113afc8SEmmanuel Vadot payload_len = min(payload_len, tcp_payload_len); 1487113afc8SEmmanuel Vadot tcp_payload_len -= payload_len; 1497113afc8SEmmanuel Vadot pkts++; 1507113afc8SEmmanuel Vadot bytes += (hlen + payload_len); 1517113afc8SEmmanuel Vadot axgbe_printf(1, "%s: max_len %d payload_len %d " 1527113afc8SEmmanuel Vadot "tcp_len %d\n", __func__, max_len, payload_len, 1537113afc8SEmmanuel Vadot tcp_payload_len); 1547113afc8SEmmanuel Vadot } 1557113afc8SEmmanuel Vadot } else { 1567113afc8SEmmanuel Vadot pkts = 1; 1577113afc8SEmmanuel Vadot bytes = pi->ipi_len; 1587113afc8SEmmanuel Vadot } 1597113afc8SEmmanuel Vadot 1607113afc8SEmmanuel Vadot packet->tx_packets = pkts; 1617113afc8SEmmanuel Vadot packet->tx_bytes = bytes; 1627113afc8SEmmanuel Vadot 1637113afc8SEmmanuel Vadot axgbe_printf(1, "%s: packets %d bytes %d hlen %d\n", __func__, 1647113afc8SEmmanuel Vadot packet->tx_packets, packet->tx_bytes, hlen); 1657113afc8SEmmanuel Vadot 1667113afc8SEmmanuel Vadot return (hlen); 1677113afc8SEmmanuel Vadot } 1687113afc8SEmmanuel Vadot 1697113afc8SEmmanuel Vadot static int 1707113afc8SEmmanuel Vadot axgbe_isc_txd_encap(void *arg, if_pkt_info_t pi) 1717113afc8SEmmanuel Vadot { 1727113afc8SEmmanuel Vadot struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; 1737113afc8SEmmanuel Vadot struct xgbe_prv_data *pdata = &sc->pdata; 1747113afc8SEmmanuel Vadot struct xgbe_channel *channel; 1757113afc8SEmmanuel Vadot struct xgbe_ring *ring; 1767113afc8SEmmanuel Vadot struct xgbe_ring_desc *rdesc; 1777113afc8SEmmanuel Vadot struct xgbe_ring_data *rdata; 1787113afc8SEmmanuel Vadot struct xgbe_packet_data *packet; 1797113afc8SEmmanuel Vadot unsigned int cur, start, tx_set_ic; 1807113afc8SEmmanuel Vadot uint16_t offset, hlen, datalen, tcp_payload_len = 0; 1817113afc8SEmmanuel Vadot int cur_seg = 0; 1827113afc8SEmmanuel Vadot 1837113afc8SEmmanuel Vadot xgbe_print_pkt_info(pdata, pi); 1847113afc8SEmmanuel Vadot 1857113afc8SEmmanuel Vadot channel = pdata->channel[pi->ipi_qsidx]; 1867113afc8SEmmanuel Vadot ring = channel->tx_ring; 1877113afc8SEmmanuel Vadot packet = &ring->packet_data; 1887113afc8SEmmanuel Vadot cur = start = ring->cur; 1897113afc8SEmmanuel Vadot 1907113afc8SEmmanuel Vadot axgbe_printf(1, "--> %s: txq %d cur %d dirty %d\n", 1917113afc8SEmmanuel Vadot __func__, pi->ipi_qsidx, ring->cur, ring->dirty); 1927113afc8SEmmanuel Vadot 1937113afc8SEmmanuel Vadot MPASS(pi->ipi_len != 0); 1947113afc8SEmmanuel Vadot if (__predict_false(pi->ipi_len == 0)) { 1957113afc8SEmmanuel Vadot axgbe_error("empty packet received from stack\n"); 1967113afc8SEmmanuel Vadot return (0); 1977113afc8SEmmanuel Vadot } 1987113afc8SEmmanuel Vadot 1997113afc8SEmmanuel Vadot MPASS(ring->cur == pi->ipi_pidx); 2007113afc8SEmmanuel Vadot if (__predict_false(ring->cur != pi->ipi_pidx)) { 2017113afc8SEmmanuel Vadot axgbe_error("--> %s: cur(%d) ne pidx(%d)\n", __func__, 2027113afc8SEmmanuel Vadot ring->cur, pi->ipi_pidx); 2037113afc8SEmmanuel Vadot } 2047113afc8SEmmanuel Vadot 2057113afc8SEmmanuel Vadot /* Determine if an interrupt should be generated for this Tx: 2067113afc8SEmmanuel Vadot * Interrupt: 2077113afc8SEmmanuel Vadot * - Tx frame count exceeds the frame count setting 2087113afc8SEmmanuel Vadot * - Addition of Tx frame count to the frame count since the 2097113afc8SEmmanuel Vadot * last interrupt was set exceeds the frame count setting 2107113afc8SEmmanuel Vadot * No interrupt: 2117113afc8SEmmanuel Vadot * - No frame count setting specified (ethtool -C ethX tx-frames 0) 2127113afc8SEmmanuel Vadot * - Addition of Tx frame count to the frame count since the 2137113afc8SEmmanuel Vadot * last interrupt was set does not exceed the frame count setting 2147113afc8SEmmanuel Vadot */ 2157113afc8SEmmanuel Vadot memset(packet, 0, sizeof(*packet)); 2167113afc8SEmmanuel Vadot hlen = axgbe_calculate_tx_parms(pdata, pi, packet); 2177113afc8SEmmanuel Vadot axgbe_printf(1, "%s: ipi_len %d tx_pkts %d tx_bytes %d hlen %d\n", 2187113afc8SEmmanuel Vadot __func__, pi->ipi_len, packet->tx_packets, packet->tx_bytes, hlen); 2197113afc8SEmmanuel Vadot 2207113afc8SEmmanuel Vadot ring->coalesce_count += packet->tx_packets; 2217113afc8SEmmanuel Vadot if (!pdata->tx_frames) 2227113afc8SEmmanuel Vadot tx_set_ic = 0; 2237113afc8SEmmanuel Vadot else if (packet->tx_packets > pdata->tx_frames) 2247113afc8SEmmanuel Vadot tx_set_ic = 1; 2257113afc8SEmmanuel Vadot else if ((ring->coalesce_count % pdata->tx_frames) < (packet->tx_packets)) 2267113afc8SEmmanuel Vadot tx_set_ic = 1; 2277113afc8SEmmanuel Vadot else 2287113afc8SEmmanuel Vadot tx_set_ic = 0; 2297113afc8SEmmanuel Vadot 2307113afc8SEmmanuel Vadot /* Add Context descriptor if needed (for TSO, VLAN cases) */ 2317113afc8SEmmanuel Vadot if (axgbe_ctx_desc_setup(pdata, ring, pi)) 2327113afc8SEmmanuel Vadot cur++; 2337113afc8SEmmanuel Vadot 2347113afc8SEmmanuel Vadot rdata = XGBE_GET_DESC_DATA(ring, cur); 2357113afc8SEmmanuel Vadot rdesc = rdata->rdesc; 2367113afc8SEmmanuel Vadot 2377113afc8SEmmanuel Vadot axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x " 2387113afc8SEmmanuel Vadot "ipi_len 0x%x\n", __func__, cur, 2397113afc8SEmmanuel Vadot lower_32_bits(pi->ipi_segs[cur_seg].ds_addr), 2407113afc8SEmmanuel Vadot upper_32_bits(pi->ipi_segs[cur_seg].ds_addr), 2417113afc8SEmmanuel Vadot (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len); 2427113afc8SEmmanuel Vadot 2437113afc8SEmmanuel Vadot /* Update buffer address (for TSO this is the header) */ 2447113afc8SEmmanuel Vadot rdesc->desc0 = cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr)); 2457113afc8SEmmanuel Vadot rdesc->desc1 = cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr)); 2467113afc8SEmmanuel Vadot 2477113afc8SEmmanuel Vadot /* Update the buffer length */ 2487113afc8SEmmanuel Vadot if (hlen == 0) 2497113afc8SEmmanuel Vadot hlen = pi->ipi_segs[cur_seg].ds_len; 2507113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, hlen); 2517113afc8SEmmanuel Vadot 2527113afc8SEmmanuel Vadot /* VLAN tag insertion check */ 2537113afc8SEmmanuel Vadot if (pi->ipi_vtag) { 2547113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR, 2557113afc8SEmmanuel Vadot TX_NORMAL_DESC2_VLAN_INSERT); 2567113afc8SEmmanuel Vadot } 2577113afc8SEmmanuel Vadot 2587113afc8SEmmanuel Vadot /* Mark it as First Descriptor */ 2597113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1); 2607113afc8SEmmanuel Vadot 2617113afc8SEmmanuel Vadot /* Mark it as a NORMAL descriptor */ 2627113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); 2637113afc8SEmmanuel Vadot 2647113afc8SEmmanuel Vadot /* 2657113afc8SEmmanuel Vadot * Set the OWN bit if this is not the first descriptor. For first 2667113afc8SEmmanuel Vadot * descriptor, OWN bit will be set at last so that hardware will 2677113afc8SEmmanuel Vadot * process the descriptors only after the OWN bit for the first 2687113afc8SEmmanuel Vadot * descriptor is set 2697113afc8SEmmanuel Vadot */ 2707113afc8SEmmanuel Vadot if (cur != start) 2717113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 2727113afc8SEmmanuel Vadot 2737113afc8SEmmanuel Vadot if (pi->ipi_csum_flags & CSUM_TSO) { 2747113afc8SEmmanuel Vadot /* Enable TSO */ 2757113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1); 2767113afc8SEmmanuel Vadot 2777113afc8SEmmanuel Vadot tcp_payload_len = pi->ipi_len - hlen; 2787113afc8SEmmanuel Vadot 2797113afc8SEmmanuel Vadot /* Set TCP payload length*/ 2807113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL, 2817113afc8SEmmanuel Vadot tcp_payload_len); 2827113afc8SEmmanuel Vadot 2837113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, 2847113afc8SEmmanuel Vadot pi->ipi_tcp_hlen/4); 2857113afc8SEmmanuel Vadot 2867113afc8SEmmanuel Vadot axgbe_printf(1, "tcp_payload %d tcp_hlen %d\n", tcp_payload_len, 2877113afc8SEmmanuel Vadot pi->ipi_tcp_hlen/4); 2887113afc8SEmmanuel Vadot } else { 2897113afc8SEmmanuel Vadot /* Enable CRC and Pad Insertion */ 2907113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); 2917113afc8SEmmanuel Vadot 2927113afc8SEmmanuel Vadot /* Enable HW CSUM*/ 2937113afc8SEmmanuel Vadot if (pi->ipi_csum_flags) 2947113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3); 2957113afc8SEmmanuel Vadot 2967113afc8SEmmanuel Vadot /* Set total length to be transmitted */ 2977113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, pi->ipi_len); 2987113afc8SEmmanuel Vadot } 2997113afc8SEmmanuel Vadot 3007113afc8SEmmanuel Vadot cur++; 3017113afc8SEmmanuel Vadot 3027113afc8SEmmanuel Vadot for (cur_seg = 0 ; cur_seg < pi->ipi_nsegs ; cur_seg++) { 3037113afc8SEmmanuel Vadot 3047113afc8SEmmanuel Vadot if (cur_seg == 0) { 3057113afc8SEmmanuel Vadot offset = hlen; 3067113afc8SEmmanuel Vadot datalen = pi->ipi_segs[cur_seg].ds_len - hlen; 3077113afc8SEmmanuel Vadot } else { 3087113afc8SEmmanuel Vadot offset = 0; 3097113afc8SEmmanuel Vadot datalen = pi->ipi_segs[cur_seg].ds_len; 3107113afc8SEmmanuel Vadot } 3117113afc8SEmmanuel Vadot 3127113afc8SEmmanuel Vadot if (datalen) { 3137113afc8SEmmanuel Vadot rdata = XGBE_GET_DESC_DATA(ring, cur); 3147113afc8SEmmanuel Vadot rdesc = rdata->rdesc; 3157113afc8SEmmanuel Vadot 3167113afc8SEmmanuel Vadot 3177113afc8SEmmanuel Vadot /* Update buffer address */ 3187113afc8SEmmanuel Vadot rdesc->desc0 = 3197113afc8SEmmanuel Vadot cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset)); 3207113afc8SEmmanuel Vadot rdesc->desc1 = 3217113afc8SEmmanuel Vadot cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset)); 3227113afc8SEmmanuel Vadot 3237113afc8SEmmanuel Vadot /* Update the buffer length */ 3247113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, datalen); 3257113afc8SEmmanuel Vadot 3267113afc8SEmmanuel Vadot /* Set OWN bit */ 3277113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 3287113afc8SEmmanuel Vadot 3297113afc8SEmmanuel Vadot /* Mark it as NORMAL descriptor */ 3307113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); 3317113afc8SEmmanuel Vadot 3327113afc8SEmmanuel Vadot /* Enable HW CSUM*/ 3337113afc8SEmmanuel Vadot if (pi->ipi_csum_flags) 3347113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3); 3357113afc8SEmmanuel Vadot 3367113afc8SEmmanuel Vadot axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x " 3377113afc8SEmmanuel Vadot "ipi_len 0x%x\n", __func__, cur, 3387113afc8SEmmanuel Vadot lower_32_bits(pi->ipi_segs[cur_seg].ds_addr), 3397113afc8SEmmanuel Vadot upper_32_bits(pi->ipi_segs[cur_seg].ds_addr), 3407113afc8SEmmanuel Vadot (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len); 3417113afc8SEmmanuel Vadot 3427113afc8SEmmanuel Vadot cur++; 3437113afc8SEmmanuel Vadot } 3447113afc8SEmmanuel Vadot } 3457113afc8SEmmanuel Vadot 3467113afc8SEmmanuel Vadot /* Set LAST bit for the last descriptor */ 3477113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1); 3487113afc8SEmmanuel Vadot 3497113afc8SEmmanuel Vadot /* Set IC bit based on Tx coalescing settings */ 3507113afc8SEmmanuel Vadot if (tx_set_ic) 3517113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); 3527113afc8SEmmanuel Vadot 3537113afc8SEmmanuel Vadot wmb(); 3547113afc8SEmmanuel Vadot 3557113afc8SEmmanuel Vadot /* Set OWN bit for the first descriptor */ 3567113afc8SEmmanuel Vadot rdata = XGBE_GET_DESC_DATA(ring, start); 3577113afc8SEmmanuel Vadot rdesc = rdata->rdesc; 3587113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 3597113afc8SEmmanuel Vadot 3607113afc8SEmmanuel Vadot ring->cur = pi->ipi_new_pidx = (cur & (ring->rdesc_count - 1)); 3617113afc8SEmmanuel Vadot 3627113afc8SEmmanuel Vadot axgbe_printf(1, "<-- %s: end cur %d dirty %d\n", __func__, ring->cur, 3637113afc8SEmmanuel Vadot ring->dirty); 3647113afc8SEmmanuel Vadot 3657113afc8SEmmanuel Vadot return (0); 3667113afc8SEmmanuel Vadot } 3677113afc8SEmmanuel Vadot 3687113afc8SEmmanuel Vadot static void 3697113afc8SEmmanuel Vadot axgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx) 3707113afc8SEmmanuel Vadot { 3717113afc8SEmmanuel Vadot struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; 3727113afc8SEmmanuel Vadot struct xgbe_prv_data *pdata = &sc->pdata; 3737113afc8SEmmanuel Vadot struct xgbe_channel *channel = pdata->channel[txqid]; 3747113afc8SEmmanuel Vadot struct xgbe_ring *ring = channel->tx_ring; 3757113afc8SEmmanuel Vadot struct xgbe_ring_data *rdata = XGBE_GET_DESC_DATA(ring, pidx); 3767113afc8SEmmanuel Vadot 3777113afc8SEmmanuel Vadot axgbe_printf(1, "--> %s: flush txq %d pidx %d cur %d dirty %d\n", 3787113afc8SEmmanuel Vadot __func__, txqid, pidx, ring->cur, ring->dirty); 3797113afc8SEmmanuel Vadot 3807113afc8SEmmanuel Vadot /* Ring Doorbell */ 3817113afc8SEmmanuel Vadot XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, 3827113afc8SEmmanuel Vadot lower_32_bits(rdata->rdata_paddr)); 3837113afc8SEmmanuel Vadot } 3847113afc8SEmmanuel Vadot 3857113afc8SEmmanuel Vadot static int 3867113afc8SEmmanuel Vadot axgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear) 3877113afc8SEmmanuel Vadot { 3887113afc8SEmmanuel Vadot struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; 3897113afc8SEmmanuel Vadot struct xgbe_hw_if *hw_if = &sc->pdata.hw_if; 3907113afc8SEmmanuel Vadot struct xgbe_prv_data *pdata = &sc->pdata; 3917113afc8SEmmanuel Vadot struct xgbe_channel *channel = pdata->channel[txqid]; 3927113afc8SEmmanuel Vadot struct xgbe_ring *ring = channel->tx_ring; 3937113afc8SEmmanuel Vadot struct xgbe_ring_data *rdata; 3947113afc8SEmmanuel Vadot int processed = 0; 3957113afc8SEmmanuel Vadot 3967113afc8SEmmanuel Vadot axgbe_printf(1, "%s: txq %d clear %d cur %d dirty %d\n", 3977113afc8SEmmanuel Vadot __func__, txqid, clear, ring->cur, ring->dirty); 3987113afc8SEmmanuel Vadot 3997113afc8SEmmanuel Vadot if (__predict_false(ring->cur == ring->dirty)) { 4007113afc8SEmmanuel Vadot axgbe_printf(1, "<-- %s: cur(%d) equals dirty(%d)\n", 4017113afc8SEmmanuel Vadot __func__, ring->cur, ring->dirty); 4027113afc8SEmmanuel Vadot return (0); 4037113afc8SEmmanuel Vadot } 4047113afc8SEmmanuel Vadot 4057113afc8SEmmanuel Vadot /* Check whether the first dirty descriptor is Tx complete */ 4067113afc8SEmmanuel Vadot rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); 4077113afc8SEmmanuel Vadot if (!hw_if->tx_complete(rdata->rdesc)) { 4087113afc8SEmmanuel Vadot axgbe_printf(1, "<-- %s: (dirty %d)\n", __func__, ring->dirty); 4097113afc8SEmmanuel Vadot return (0); 4107113afc8SEmmanuel Vadot } 4117113afc8SEmmanuel Vadot 4127113afc8SEmmanuel Vadot /* 4137113afc8SEmmanuel Vadot * If clear is false just let the caller know that there 4147113afc8SEmmanuel Vadot * are descriptors to reclaim 4157113afc8SEmmanuel Vadot */ 4167113afc8SEmmanuel Vadot if (!clear) { 4177113afc8SEmmanuel Vadot axgbe_printf(1, "<-- %s: (!clear)\n", __func__); 4187113afc8SEmmanuel Vadot return (1); 4197113afc8SEmmanuel Vadot } 4207113afc8SEmmanuel Vadot 4217113afc8SEmmanuel Vadot do { 4227113afc8SEmmanuel Vadot hw_if->tx_desc_reset(rdata); 4237113afc8SEmmanuel Vadot processed++; 4247113afc8SEmmanuel Vadot ring->dirty = (ring->dirty + 1) & (ring->rdesc_count - 1); 4257113afc8SEmmanuel Vadot 4267113afc8SEmmanuel Vadot /* 4277113afc8SEmmanuel Vadot * tx_complete will return true for unused descriptors also. 4287113afc8SEmmanuel Vadot * so, check tx_complete only until used descriptors. 4297113afc8SEmmanuel Vadot */ 4307113afc8SEmmanuel Vadot if (ring->cur == ring->dirty) 4317113afc8SEmmanuel Vadot break; 4327113afc8SEmmanuel Vadot 4337113afc8SEmmanuel Vadot rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); 4347113afc8SEmmanuel Vadot } while (hw_if->tx_complete(rdata->rdesc)); 4357113afc8SEmmanuel Vadot 4367113afc8SEmmanuel Vadot axgbe_printf(1, "<-- %s: processed %d cur %d dirty %d\n", __func__, 4377113afc8SEmmanuel Vadot processed, ring->cur, ring->dirty); 4387113afc8SEmmanuel Vadot 4397113afc8SEmmanuel Vadot return (processed); 4407113afc8SEmmanuel Vadot } 4417113afc8SEmmanuel Vadot 4427113afc8SEmmanuel Vadot static void 4437113afc8SEmmanuel Vadot axgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru) 4447113afc8SEmmanuel Vadot { 4457113afc8SEmmanuel Vadot struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; 4467113afc8SEmmanuel Vadot struct xgbe_prv_data *pdata = &sc->pdata; 4477113afc8SEmmanuel Vadot struct xgbe_channel *channel = pdata->channel[iru->iru_qsidx]; 4487113afc8SEmmanuel Vadot struct xgbe_ring *ring = channel->rx_ring; 4497113afc8SEmmanuel Vadot struct xgbe_ring_data *rdata; 4507113afc8SEmmanuel Vadot struct xgbe_ring_desc *rdesc; 4517113afc8SEmmanuel Vadot unsigned int rx_usecs = pdata->rx_usecs; 4527113afc8SEmmanuel Vadot unsigned int rx_frames = pdata->rx_frames; 4537113afc8SEmmanuel Vadot unsigned int inte; 4547113afc8SEmmanuel Vadot uint8_t count = iru->iru_count; 4557113afc8SEmmanuel Vadot int i, j; 4562968dde3SVincenzo Maffione bool config_intr = false; 4577113afc8SEmmanuel Vadot 4587113afc8SEmmanuel Vadot axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d count %d ring cur %d " 4597113afc8SEmmanuel Vadot "dirty %d\n", __func__, iru->iru_qsidx, iru->iru_flidx, 4607113afc8SEmmanuel Vadot iru->iru_pidx, count, ring->cur, ring->dirty); 4617113afc8SEmmanuel Vadot 4627113afc8SEmmanuel Vadot for (i = iru->iru_pidx, j = 0 ; j < count ; i++, j++) { 4637113afc8SEmmanuel Vadot 4642968dde3SVincenzo Maffione if (i == sc->scctx->isc_nrxd[0]) 4657113afc8SEmmanuel Vadot i = 0; 4667113afc8SEmmanuel Vadot 4677113afc8SEmmanuel Vadot rdata = XGBE_GET_DESC_DATA(ring, i); 4687113afc8SEmmanuel Vadot rdesc = rdata->rdesc; 4697113afc8SEmmanuel Vadot 4707113afc8SEmmanuel Vadot if (__predict_false(XGMAC_GET_BITS_LE(rdesc->desc3, 4717113afc8SEmmanuel Vadot RX_NORMAL_DESC3, OWN))) { 4727113afc8SEmmanuel Vadot axgbe_error("%s: refill clash, cur %d dirty %d index %d" 4737113afc8SEmmanuel Vadot "pidx %d\n", __func__, ring->cur, ring->dirty, j, i); 4747113afc8SEmmanuel Vadot } 4757113afc8SEmmanuel Vadot 4762968dde3SVincenzo Maffione if (pdata->sph_enable) { 4777113afc8SEmmanuel Vadot if (iru->iru_flidx == 0) { 4787113afc8SEmmanuel Vadot 4797113afc8SEmmanuel Vadot /* Fill header/buffer1 address */ 4807113afc8SEmmanuel Vadot rdesc->desc0 = 4817113afc8SEmmanuel Vadot cpu_to_le32(lower_32_bits(iru->iru_paddrs[j])); 4827113afc8SEmmanuel Vadot rdesc->desc1 = 4837113afc8SEmmanuel Vadot cpu_to_le32(upper_32_bits(iru->iru_paddrs[j])); 4847113afc8SEmmanuel Vadot } else { 4857113afc8SEmmanuel Vadot 4867113afc8SEmmanuel Vadot /* Fill data/buffer2 address */ 4877113afc8SEmmanuel Vadot rdesc->desc2 = 4887113afc8SEmmanuel Vadot cpu_to_le32(lower_32_bits(iru->iru_paddrs[j])); 4897113afc8SEmmanuel Vadot rdesc->desc3 = 4907113afc8SEmmanuel Vadot cpu_to_le32(upper_32_bits(iru->iru_paddrs[j])); 4917113afc8SEmmanuel Vadot 4922968dde3SVincenzo Maffione config_intr = true; 4932968dde3SVincenzo Maffione } 4942968dde3SVincenzo Maffione } else { 4952968dde3SVincenzo Maffione /* Fill header/buffer1 address */ 4962968dde3SVincenzo Maffione rdesc->desc0 = rdesc->desc2 = 4972968dde3SVincenzo Maffione cpu_to_le32(lower_32_bits(iru->iru_paddrs[j])); 4982968dde3SVincenzo Maffione rdesc->desc1 = rdesc->desc3 = 4992968dde3SVincenzo Maffione cpu_to_le32(upper_32_bits(iru->iru_paddrs[j])); 5002968dde3SVincenzo Maffione 5012968dde3SVincenzo Maffione config_intr = true; 5022968dde3SVincenzo Maffione } 5032968dde3SVincenzo Maffione 5042968dde3SVincenzo Maffione if (config_intr) { 5052968dde3SVincenzo Maffione 5067113afc8SEmmanuel Vadot if (!rx_usecs && !rx_frames) { 5077113afc8SEmmanuel Vadot /* No coalescing, interrupt for every descriptor */ 5087113afc8SEmmanuel Vadot inte = 1; 5097113afc8SEmmanuel Vadot } else { 5107113afc8SEmmanuel Vadot /* Set interrupt based on Rx frame coalescing setting */ 5112968dde3SVincenzo Maffione if (rx_frames && !((ring->dirty + 1) % rx_frames)) 5127113afc8SEmmanuel Vadot inte = 1; 5137113afc8SEmmanuel Vadot else 5147113afc8SEmmanuel Vadot inte = 0; 5157113afc8SEmmanuel Vadot } 5167113afc8SEmmanuel Vadot 5177113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); 5187113afc8SEmmanuel Vadot 5197113afc8SEmmanuel Vadot XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); 5207113afc8SEmmanuel Vadot 5217113afc8SEmmanuel Vadot wmb(); 5227113afc8SEmmanuel Vadot 5237113afc8SEmmanuel Vadot ring->dirty = ((ring->dirty + 1) & (ring->rdesc_count - 1)); 5242968dde3SVincenzo Maffione 5252968dde3SVincenzo Maffione config_intr = false; 5267113afc8SEmmanuel Vadot } 5277113afc8SEmmanuel Vadot } 5287113afc8SEmmanuel Vadot 5297113afc8SEmmanuel Vadot axgbe_printf(1, "<-- %s: rxq: %d cur: %d dirty: %d\n", __func__, 5307113afc8SEmmanuel Vadot channel->queue_index, ring->cur, ring->dirty); 5317113afc8SEmmanuel Vadot } 5327113afc8SEmmanuel Vadot 5337113afc8SEmmanuel Vadot static void 5347113afc8SEmmanuel Vadot axgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx, qidx_t pidx) 5357113afc8SEmmanuel Vadot { 5367113afc8SEmmanuel Vadot struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; 5377113afc8SEmmanuel Vadot struct xgbe_prv_data *pdata = &sc->pdata; 5387113afc8SEmmanuel Vadot struct xgbe_channel *channel = pdata->channel[qsidx]; 5397113afc8SEmmanuel Vadot struct xgbe_ring *ring = channel->rx_ring; 5407113afc8SEmmanuel Vadot struct xgbe_ring_data *rdata; 5417113afc8SEmmanuel Vadot 5427113afc8SEmmanuel Vadot axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d cur %d dirty %d\n", 5437113afc8SEmmanuel Vadot __func__, qsidx, flidx, pidx, ring->cur, ring->dirty); 5447113afc8SEmmanuel Vadot 5457113afc8SEmmanuel Vadot rdata = XGBE_GET_DESC_DATA(ring, pidx); 5467113afc8SEmmanuel Vadot 5472968dde3SVincenzo Maffione /* 5482968dde3SVincenzo Maffione * update RX descriptor tail pointer in hardware to indicate 5492968dde3SVincenzo Maffione * that new buffers are present in the allocated memory region 5502968dde3SVincenzo Maffione */ 5512968dde3SVincenzo Maffione if (!pdata->sph_enable || flidx == 1) { 5527113afc8SEmmanuel Vadot XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, 5537113afc8SEmmanuel Vadot lower_32_bits(rdata->rdata_paddr)); 5547113afc8SEmmanuel Vadot } 5557113afc8SEmmanuel Vadot } 5567113afc8SEmmanuel Vadot 5577113afc8SEmmanuel Vadot static int 5587113afc8SEmmanuel Vadot axgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t idx, qidx_t budget) 5597113afc8SEmmanuel Vadot { 5607113afc8SEmmanuel Vadot struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; 5617113afc8SEmmanuel Vadot struct xgbe_prv_data *pdata = &sc->pdata; 5627113afc8SEmmanuel Vadot struct xgbe_channel *channel = pdata->channel[qsidx]; 5637113afc8SEmmanuel Vadot struct xgbe_ring *ring = channel->rx_ring; 5647113afc8SEmmanuel Vadot struct xgbe_ring_data *rdata; 5657113afc8SEmmanuel Vadot struct xgbe_ring_desc *rdesc; 5667113afc8SEmmanuel Vadot unsigned int cur; 5672968dde3SVincenzo Maffione int count = 0; 5687113afc8SEmmanuel Vadot uint8_t incomplete = 1, context_next = 0, running = 0; 5697113afc8SEmmanuel Vadot 5707113afc8SEmmanuel Vadot axgbe_printf(1, "--> %s: rxq %d idx %d budget %d cur %d dirty %d\n", 5717113afc8SEmmanuel Vadot __func__, qsidx, idx, budget, ring->cur, ring->dirty); 5727113afc8SEmmanuel Vadot 5732968dde3SVincenzo Maffione if (__predict_false(test_bit(XGBE_DOWN, &pdata->dev_state))) { 5742968dde3SVincenzo Maffione axgbe_printf(0, "%s: Polling when XGBE_DOWN\n", __func__); 5752968dde3SVincenzo Maffione return (count); 5762968dde3SVincenzo Maffione } 5772968dde3SVincenzo Maffione 5787113afc8SEmmanuel Vadot cur = ring->cur; 5797113afc8SEmmanuel Vadot for (count = 0; count <= budget; ) { 5807113afc8SEmmanuel Vadot 5817113afc8SEmmanuel Vadot rdata = XGBE_GET_DESC_DATA(ring, cur); 5827113afc8SEmmanuel Vadot rdesc = rdata->rdesc; 5837113afc8SEmmanuel Vadot 5847113afc8SEmmanuel Vadot if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) 5857113afc8SEmmanuel Vadot break; 5867113afc8SEmmanuel Vadot 5877113afc8SEmmanuel Vadot running = 1; 5887113afc8SEmmanuel Vadot 5897113afc8SEmmanuel Vadot if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) 5907113afc8SEmmanuel Vadot incomplete = 0; 5917113afc8SEmmanuel Vadot 5927113afc8SEmmanuel Vadot if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA)) 5937113afc8SEmmanuel Vadot context_next = 1; 5947113afc8SEmmanuel Vadot 5957113afc8SEmmanuel Vadot if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) 5967113afc8SEmmanuel Vadot context_next = 0; 5977113afc8SEmmanuel Vadot 5987113afc8SEmmanuel Vadot cur = (cur + 1) & (ring->rdesc_count - 1); 5997113afc8SEmmanuel Vadot 6007113afc8SEmmanuel Vadot if (incomplete || context_next) 6017113afc8SEmmanuel Vadot continue; 6027113afc8SEmmanuel Vadot 6037113afc8SEmmanuel Vadot /* Increment pkt count & reset variables for next full packet */ 6047113afc8SEmmanuel Vadot count++; 6057113afc8SEmmanuel Vadot incomplete = 1; 6067113afc8SEmmanuel Vadot context_next = 0; 6077113afc8SEmmanuel Vadot running = 0; 6087113afc8SEmmanuel Vadot } 6097113afc8SEmmanuel Vadot 6107113afc8SEmmanuel Vadot axgbe_printf(1, "--> %s: rxq %d cur %d incomp %d con_next %d running %d " 6117113afc8SEmmanuel Vadot "count %d\n", __func__, qsidx, cur, incomplete, context_next, 6127113afc8SEmmanuel Vadot running, count); 6137113afc8SEmmanuel Vadot 6147113afc8SEmmanuel Vadot return (count); 6157113afc8SEmmanuel Vadot } 6167113afc8SEmmanuel Vadot 6177113afc8SEmmanuel Vadot static unsigned int 6187113afc8SEmmanuel Vadot xgbe_rx_buf1_len(struct xgbe_prv_data *pdata, struct xgbe_ring_data *rdata, 6197113afc8SEmmanuel Vadot struct xgbe_packet_data *packet) 6207113afc8SEmmanuel Vadot { 6212968dde3SVincenzo Maffione unsigned int ret = 0; 6227113afc8SEmmanuel Vadot 6232968dde3SVincenzo Maffione if (pdata->sph_enable) { 6247113afc8SEmmanuel Vadot /* Always zero if not the first descriptor */ 6257113afc8SEmmanuel Vadot if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST)) { 6267113afc8SEmmanuel Vadot axgbe_printf(1, "%s: Not First\n", __func__); 6277113afc8SEmmanuel Vadot return (0); 6287113afc8SEmmanuel Vadot } 6292968dde3SVincenzo Maffione } 6307113afc8SEmmanuel Vadot 6317113afc8SEmmanuel Vadot /* First descriptor with split header, return header length */ 6327113afc8SEmmanuel Vadot if (rdata->rx.hdr_len) { 6337113afc8SEmmanuel Vadot axgbe_printf(1, "%s: hdr_len %d\n", __func__, rdata->rx.hdr_len); 6347113afc8SEmmanuel Vadot return (rdata->rx.hdr_len); 6357113afc8SEmmanuel Vadot } 6367113afc8SEmmanuel Vadot 6377113afc8SEmmanuel Vadot /* First descriptor but not the last descriptor and no split header, 6382968dde3SVincenzo Maffione * so the full buffer was used, 256 represents the hardcoded value of 6392968dde3SVincenzo Maffione * a max header split defined in the hardware 6407113afc8SEmmanuel Vadot */ 6417113afc8SEmmanuel Vadot if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) { 6427113afc8SEmmanuel Vadot axgbe_printf(1, "%s: Not last %d\n", __func__, 6437113afc8SEmmanuel Vadot pdata->rx_buf_size); 6442968dde3SVincenzo Maffione if (pdata->sph_enable) { 6457113afc8SEmmanuel Vadot return (256); 6462968dde3SVincenzo Maffione } else { 6472968dde3SVincenzo Maffione return (pdata->rx_buf_size); 6482968dde3SVincenzo Maffione } 6497113afc8SEmmanuel Vadot } 6507113afc8SEmmanuel Vadot 6517113afc8SEmmanuel Vadot /* First descriptor and last descriptor and no split header, so 6522968dde3SVincenzo Maffione * calculate how much of the buffer was used, we can return the 6532968dde3SVincenzo Maffione * segment length or the remaining bytes of the packet 6547113afc8SEmmanuel Vadot */ 6557113afc8SEmmanuel Vadot axgbe_printf(1, "%s: pkt_len %d buf_size %d\n", __func__, rdata->rx.len, 6567113afc8SEmmanuel Vadot pdata->rx_buf_size); 6577113afc8SEmmanuel Vadot 6582968dde3SVincenzo Maffione if (pdata->sph_enable) { 6592968dde3SVincenzo Maffione ret = min_t(unsigned int, 256, rdata->rx.len); 6602968dde3SVincenzo Maffione } else { 6612968dde3SVincenzo Maffione ret = rdata->rx.len; 6622968dde3SVincenzo Maffione } 6632968dde3SVincenzo Maffione 6642968dde3SVincenzo Maffione return (ret); 6657113afc8SEmmanuel Vadot } 6667113afc8SEmmanuel Vadot 6677113afc8SEmmanuel Vadot static unsigned int 6687113afc8SEmmanuel Vadot xgbe_rx_buf2_len(struct xgbe_prv_data *pdata, struct xgbe_ring_data *rdata, 6697113afc8SEmmanuel Vadot struct xgbe_packet_data *packet, unsigned int len) 6707113afc8SEmmanuel Vadot { 6717113afc8SEmmanuel Vadot 6727113afc8SEmmanuel Vadot /* Always the full buffer if not the last descriptor */ 6737113afc8SEmmanuel Vadot if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) { 6747113afc8SEmmanuel Vadot axgbe_printf(1, "%s: Not last %d\n", __func__, pdata->rx_buf_size); 6757113afc8SEmmanuel Vadot return (pdata->rx_buf_size); 6767113afc8SEmmanuel Vadot } 6777113afc8SEmmanuel Vadot 6787113afc8SEmmanuel Vadot /* Last descriptor so calculate how much of the buffer was used 6797113afc8SEmmanuel Vadot * for the last bit of data 6807113afc8SEmmanuel Vadot */ 6817113afc8SEmmanuel Vadot return ((rdata->rx.len != 0)? (rdata->rx.len - len) : 0); 6827113afc8SEmmanuel Vadot } 6837113afc8SEmmanuel Vadot 6847113afc8SEmmanuel Vadot static inline void 6857113afc8SEmmanuel Vadot axgbe_add_frag(struct xgbe_prv_data *pdata, if_rxd_info_t ri, int idx, int len, 6867113afc8SEmmanuel Vadot int pos, int flid) 6877113afc8SEmmanuel Vadot { 6887113afc8SEmmanuel Vadot axgbe_printf(2, "idx %d len %d pos %d flid %d\n", idx, len, pos, flid); 6897113afc8SEmmanuel Vadot ri->iri_frags[pos].irf_flid = flid; 6907113afc8SEmmanuel Vadot ri->iri_frags[pos].irf_idx = idx; 6917113afc8SEmmanuel Vadot ri->iri_frags[pos].irf_len = len; 6927113afc8SEmmanuel Vadot } 6937113afc8SEmmanuel Vadot 6947113afc8SEmmanuel Vadot static int 6957113afc8SEmmanuel Vadot axgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri) 6967113afc8SEmmanuel Vadot { 6977113afc8SEmmanuel Vadot struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; 6987113afc8SEmmanuel Vadot struct xgbe_prv_data *pdata = &sc->pdata; 6997113afc8SEmmanuel Vadot struct xgbe_hw_if *hw_if = &pdata->hw_if; 7007113afc8SEmmanuel Vadot struct xgbe_channel *channel = pdata->channel[ri->iri_qsidx]; 7017113afc8SEmmanuel Vadot struct xgbe_ring *ring = channel->rx_ring; 7027113afc8SEmmanuel Vadot struct xgbe_packet_data *packet = &ring->packet_data; 7037113afc8SEmmanuel Vadot struct xgbe_ring_data *rdata; 7047113afc8SEmmanuel Vadot unsigned int last, context_next, context; 705*2b8df536SStephan de Wit unsigned int buf1_len, buf2_len, len = 0, prev_cur; 7067113afc8SEmmanuel Vadot int i = 0; 7077113afc8SEmmanuel Vadot 7087113afc8SEmmanuel Vadot axgbe_printf(2, "%s: rxq %d cidx %d cur %d dirty %d\n", __func__, 7097113afc8SEmmanuel Vadot ri->iri_qsidx, ri->iri_cidx, ring->cur, ring->dirty); 7107113afc8SEmmanuel Vadot 7117113afc8SEmmanuel Vadot memset(packet, 0, sizeof(struct xgbe_packet_data)); 7127113afc8SEmmanuel Vadot 7137113afc8SEmmanuel Vadot while (1) { 7147113afc8SEmmanuel Vadot 7157113afc8SEmmanuel Vadot read_again: 7167113afc8SEmmanuel Vadot if (hw_if->dev_read(channel)) { 7177113afc8SEmmanuel Vadot axgbe_printf(2, "<-- %s: OWN bit seen on %d\n", 7187113afc8SEmmanuel Vadot __func__, ring->cur); 7197113afc8SEmmanuel Vadot break; 7207113afc8SEmmanuel Vadot } 7217113afc8SEmmanuel Vadot 7227113afc8SEmmanuel Vadot rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 7237113afc8SEmmanuel Vadot prev_cur = ring->cur; 7247113afc8SEmmanuel Vadot ring->cur = (ring->cur + 1) & (ring->rdesc_count - 1); 7257113afc8SEmmanuel Vadot 7267113afc8SEmmanuel Vadot last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 7277113afc8SEmmanuel Vadot LAST); 7287113afc8SEmmanuel Vadot 7297113afc8SEmmanuel Vadot context_next = XGMAC_GET_BITS(packet->attributes, 7307113afc8SEmmanuel Vadot RX_PACKET_ATTRIBUTES, CONTEXT_NEXT); 7317113afc8SEmmanuel Vadot 7327113afc8SEmmanuel Vadot context = XGMAC_GET_BITS(packet->attributes, 7337113afc8SEmmanuel Vadot RX_PACKET_ATTRIBUTES, CONTEXT); 7347113afc8SEmmanuel Vadot 7357113afc8SEmmanuel Vadot if (!context) { 7367113afc8SEmmanuel Vadot /* Get the data length in the descriptor buffers */ 7377113afc8SEmmanuel Vadot buf1_len = xgbe_rx_buf1_len(pdata, rdata, packet); 7387113afc8SEmmanuel Vadot len += buf1_len; 7392968dde3SVincenzo Maffione if (pdata->sph_enable) { 7407113afc8SEmmanuel Vadot buf2_len = xgbe_rx_buf2_len(pdata, rdata, packet, len); 7417113afc8SEmmanuel Vadot len += buf2_len; 7422968dde3SVincenzo Maffione } 7437113afc8SEmmanuel Vadot } else 7447113afc8SEmmanuel Vadot buf1_len = buf2_len = 0; 7457113afc8SEmmanuel Vadot 7467113afc8SEmmanuel Vadot if (packet->errors) 7477113afc8SEmmanuel Vadot axgbe_printf(1, "%s: last %d context %d con_next %d buf1 %d " 7487113afc8SEmmanuel Vadot "buf2 %d len %d frags %d error %d\n", __func__, last, context, 7497113afc8SEmmanuel Vadot context_next, buf1_len, buf2_len, len, i, packet->errors); 7507113afc8SEmmanuel Vadot 7517113afc8SEmmanuel Vadot axgbe_add_frag(pdata, ri, prev_cur, buf1_len, i, 0); 7527113afc8SEmmanuel Vadot i++; 7532968dde3SVincenzo Maffione if (pdata->sph_enable) { 7547113afc8SEmmanuel Vadot axgbe_add_frag(pdata, ri, prev_cur, buf2_len, i, 1); 7557113afc8SEmmanuel Vadot i++; 7562968dde3SVincenzo Maffione } 7577113afc8SEmmanuel Vadot 7587113afc8SEmmanuel Vadot if (!last || context_next) 7597113afc8SEmmanuel Vadot goto read_again; 7607113afc8SEmmanuel Vadot 7617113afc8SEmmanuel Vadot break; 7627113afc8SEmmanuel Vadot } 7637113afc8SEmmanuel Vadot 7647113afc8SEmmanuel Vadot if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE)) { 7657113afc8SEmmanuel Vadot ri->iri_csum_flags |= CSUM_IP_CHECKED; 7667113afc8SEmmanuel Vadot ri->iri_csum_flags |= CSUM_IP_VALID; 7677113afc8SEmmanuel Vadot axgbe_printf(2, "%s: csum flags 0x%x\n", __func__, ri->iri_csum_flags); 7687113afc8SEmmanuel Vadot } 7697113afc8SEmmanuel Vadot 7707113afc8SEmmanuel Vadot if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, VLAN_CTAG)) { 7717113afc8SEmmanuel Vadot ri->iri_flags |= M_VLANTAG; 7727113afc8SEmmanuel Vadot ri->iri_vtag = packet->vlan_ctag; 7737113afc8SEmmanuel Vadot axgbe_printf(2, "%s: iri_flags 0x%x vtag 0x%x\n", __func__, 7747113afc8SEmmanuel Vadot ri->iri_flags, ri->iri_vtag); 7757113afc8SEmmanuel Vadot } 7767113afc8SEmmanuel Vadot 7777113afc8SEmmanuel Vadot 7787113afc8SEmmanuel Vadot if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, RSS_HASH)) { 7797113afc8SEmmanuel Vadot ri->iri_flowid = packet->rss_hash; 7807113afc8SEmmanuel Vadot ri->iri_rsstype = packet->rss_hash_type; 7817113afc8SEmmanuel Vadot axgbe_printf(2, "%s: hash 0x%x/0x%x rsstype 0x%x/0x%x\n", 7827113afc8SEmmanuel Vadot __func__, packet->rss_hash, ri->iri_flowid, 7837113afc8SEmmanuel Vadot packet->rss_hash_type, ri->iri_rsstype); 7847113afc8SEmmanuel Vadot } 7857113afc8SEmmanuel Vadot 7867113afc8SEmmanuel Vadot if (__predict_false(len == 0)) 7872968dde3SVincenzo Maffione axgbe_printf(1, "%s: Discarding Zero len packet\n", __func__); 7887113afc8SEmmanuel Vadot 7897113afc8SEmmanuel Vadot if (__predict_false(packet->errors)) 7907113afc8SEmmanuel Vadot axgbe_printf(1, "<-- %s: rxq: %d len: %d frags: %d cidx %d cur: %d " 7917113afc8SEmmanuel Vadot "dirty: %d error 0x%x\n", __func__, ri->iri_qsidx, len, i, 7927113afc8SEmmanuel Vadot ri->iri_cidx, ring->cur, ring->dirty, packet->errors); 7937113afc8SEmmanuel Vadot 7947113afc8SEmmanuel Vadot axgbe_printf(1, "%s: Packet len %d frags %d\n", __func__, len, i); 7957113afc8SEmmanuel Vadot 7967113afc8SEmmanuel Vadot ri->iri_len = len; 7977113afc8SEmmanuel Vadot ri->iri_nfrags = i; 7987113afc8SEmmanuel Vadot 7997113afc8SEmmanuel Vadot return (0); 8007113afc8SEmmanuel Vadot } 801