Lines Matching +full:1 +full:- +full:d

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
9 * 1. Redistributions of source code must retain the above copyright
35 #include "xgbe-common.h"
63 axgbe_printf(1, "------Packet Info Start------\n");
64 axgbe_printf(1, "pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
65 pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
66 axgbe_printf(1, "pi new_pidx: %d csum_flags: %x mflags: %x vtag: %d\n",
67 pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_mflags, pi->ipi_vtag);
68 axgbe_printf(1, "pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
69 pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
70 axgbe_printf(1, "pi tcp_hlen: %d tcp_hflags: %x tcp_seq: %d tso_segsz %d\n",
71 pi->ipi_tcp_hlen, pi->ipi_tcp_hflags, pi->ipi_tcp_seq, pi->ipi_tso_segsz);
82 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
83 rdesc = rdata->rdesc;
85 axgbe_printf(1, "ipi_tso_segsz %d cur_mss %d idx %d\n",
86 pi->ipi_tso_segsz, ring->tx.cur_mss, ring->cur);
88 axgbe_printf(1, "ipi_vtag 0x%x cur_vlan_ctag 0x%x\n",
89 pi->ipi_vtag, ring->tx.cur_vlan_ctag);
91 if ((pi->ipi_csum_flags & CSUM_TSO) &&
92 (pi->ipi_tso_segsz != ring->tx.cur_mss)) {
98 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
99 MSS, pi->ipi_tso_segsz);
100 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1);
101 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, TCMSSV, 1);
102 ring->tx.cur_mss = pi->ipi_tso_segsz;
106 if (pi->ipi_vtag && (pi->ipi_vtag != ring->tx.cur_vlan_ctag)) {
112 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1);
113 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
114 VT, pi->ipi_vtag);
115 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, VLTV, 1);
116 ring->tx.cur_vlan_ctag = pi->ipi_vtag;
130 packet->tx_packets = packet->tx_bytes = 0;
132 hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
133 if (pi->ipi_csum_flags & CSUM_TSO) {
135 tcp_payload_len = pi->ipi_len - hlen;
136 axgbe_printf(1, "%s: ipi_len %x elen %d iplen %d tcplen %d\n",
137 __func__, pi->ipi_len, pi->ipi_ehdrlen, pi->ipi_ip_hlen,
138 pi->ipi_tcp_hlen);
140 max_len = if_getmtu(pdata->netdev) + ETH_HLEN;
141 if (pi->ipi_vtag)
146 payload_len = max_len - hlen;
148 tcp_payload_len -= payload_len;
151 axgbe_printf(1, "%s: max_len %d payload_len %d "
152 "tcp_len %d\n", __func__, max_len, payload_len,
156 pkts = 1;
157 bytes = pi->ipi_len;
160 packet->tx_packets = pkts;
161 packet->tx_bytes = bytes;
163 axgbe_printf(1, "%s: packets %d bytes %d hlen %d\n", __func__,
164 packet->tx_packets, packet->tx_bytes, hlen);
173 struct xgbe_prv_data *pdata = &sc->pdata;
185 channel = pdata->channel[pi->ipi_qsidx];
186 ring = channel->tx_ring;
187 packet = &ring->packet_data;
188 cur = start = ring->cur;
190 axgbe_printf(1, "--> %s: txq %d cur %d dirty %d\n",
191 __func__, pi->ipi_qsidx, ring->cur, ring->dirty);
193 MPASS(pi->ipi_len != 0);
194 if (__predict_false(pi->ipi_len == 0)) {
199 MPASS(ring->cur == pi->ipi_pidx);
200 if (__predict_false(ring->cur != pi->ipi_pidx)) {
201 axgbe_error("--> %s: cur(%d) ne pidx(%d)\n", __func__,
202 ring->cur, pi->ipi_pidx);
207 * - Tx frame count exceeds the frame count setting
208 * - Addition of Tx frame count to the frame count since the
211 * - No frame count setting specified (ethtool -C ethX tx-frames 0)
212 * - Addition of Tx frame count to the frame count since the
217 axgbe_printf(1, "%s: ipi_len %d tx_pkts %d tx_bytes %d hlen %d\n",
218 __func__, pi->ipi_len, packet->tx_packets, packet->tx_bytes, hlen);
220 ring->coalesce_count += packet->tx_packets;
221 if (!pdata->tx_frames)
223 else if (packet->tx_packets > pdata->tx_frames)
224 tx_set_ic = 1;
225 else if ((ring->coalesce_count % pdata->tx_frames) < (packet->tx_packets))
226 tx_set_ic = 1;
235 rdesc = rdata->rdesc;
237 axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x "
239 lower_32_bits(pi->ipi_segs[cur_seg].ds_addr),
240 upper_32_bits(pi->ipi_segs[cur_seg].ds_addr),
241 (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len);
244 rdesc->desc0 = cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr));
245 rdesc->desc1 = cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr));
249 hlen = pi->ipi_segs[cur_seg].ds_len;
250 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, hlen);
253 if (pi->ipi_vtag) {
254 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
259 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
262 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
271 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
273 if (pi->ipi_csum_flags & CSUM_TSO) {
275 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
277 tcp_payload_len = pi->ipi_len - hlen;
280 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
283 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
284 pi->ipi_tcp_hlen/4);
286 axgbe_printf(1, "tcp_payload %d tcp_hlen %d\n", tcp_payload_len,
287 pi->ipi_tcp_hlen/4);
290 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
293 if (pi->ipi_csum_flags)
294 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
297 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, pi->ipi_len);
302 for (cur_seg = 0 ; cur_seg < pi->ipi_nsegs ; cur_seg++) {
306 datalen = pi->ipi_segs[cur_seg].ds_len - hlen;
309 datalen = pi->ipi_segs[cur_seg].ds_len;
314 rdesc = rdata->rdesc;
318 rdesc->desc0 =
319 cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset));
320 rdesc->desc1 =
321 cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset));
324 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, datalen);
327 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
330 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
333 if (pi->ipi_csum_flags)
334 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
336 axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x "
338 lower_32_bits(pi->ipi_segs[cur_seg].ds_addr),
339 upper_32_bits(pi->ipi_segs[cur_seg].ds_addr),
340 (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len);
347 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
351 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
357 rdesc = rdata->rdesc;
358 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
360 ring->cur = pi->ipi_new_pidx = (cur & (ring->rdesc_count - 1));
362 axgbe_printf(1, "<-- %s: end cur %d dirty %d\n", __func__, ring->cur,
363 ring->dirty);
372 struct xgbe_prv_data *pdata = &sc->pdata;
373 struct xgbe_channel *channel = pdata->channel[txqid];
374 struct xgbe_ring *ring = channel->tx_ring;
377 axgbe_printf(1, "--> %s: flush txq %d pidx %d cur %d dirty %d\n",
378 __func__, txqid, pidx, ring->cur, ring->dirty);
382 lower_32_bits(rdata->rdata_paddr));
389 struct xgbe_hw_if *hw_if = &sc->pdata.hw_if;
390 struct xgbe_prv_data *pdata = &sc->pdata;
391 struct xgbe_channel *channel = pdata->channel[txqid];
392 struct xgbe_ring *ring = channel->tx_ring;
396 axgbe_printf(1, "%s: txq %d clear %d cur %d dirty %d\n",
397 __func__, txqid, clear, ring->cur, ring->dirty);
399 if (__predict_false(ring->cur == ring->dirty)) {
400 axgbe_printf(1, "<-- %s: cur(%d) equals dirty(%d)\n",
401 __func__, ring->cur, ring->dirty);
406 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
407 if (!hw_if->tx_complete(rdata->rdesc)) {
408 axgbe_printf(1, "<-- %s: (dirty %d)\n", __func__, ring->dirty);
417 axgbe_printf(1, "<-- %s: (!clear)\n", __func__);
418 return (1);
422 hw_if->tx_desc_reset(rdata);
424 ring->dirty = (ring->dirty + 1) & (ring->rdesc_count - 1);
430 if (ring->cur == ring->dirty)
433 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
434 } while (hw_if->tx_complete(rdata->rdesc));
436 axgbe_printf(1, "<-- %s: processed %d cur %d dirty %d\n", __func__,
437 processed, ring->cur, ring->dirty);
446 struct xgbe_prv_data *pdata = &sc->pdata;
447 struct xgbe_channel *channel = pdata->channel[iru->iru_qsidx];
448 struct xgbe_ring *ring = channel->rx_ring;
451 unsigned int rx_usecs = pdata->rx_usecs;
452 unsigned int rx_frames = pdata->rx_frames;
454 uint8_t count = iru->iru_count;
458 axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d count %d ring cur %d "
459 "dirty %d\n", __func__, iru->iru_qsidx, iru->iru_flidx,
460 iru->iru_pidx, count, ring->cur, ring->dirty);
462 for (i = iru->iru_pidx, j = 0 ; j < count ; i++, j++) {
464 if (i == sc->scctx->isc_nrxd[0])
468 rdesc = rdata->rdesc;
470 if (__predict_false(XGMAC_GET_BITS_LE(rdesc->desc3,
472 axgbe_error("%s: refill clash, cur %d dirty %d index %d"
473 "pidx %d\n", __func__, ring->cur, ring->dirty, j, i);
476 if (pdata->sph_enable) {
477 if (iru->iru_flidx == 0) {
480 rdesc->desc0 =
481 cpu_to_le32(lower_32_bits(iru->iru_paddrs[j]));
482 rdesc->desc1 =
483 cpu_to_le32(upper_32_bits(iru->iru_paddrs[j]));
487 rdesc->desc2 =
488 cpu_to_le32(lower_32_bits(iru->iru_paddrs[j]));
489 rdesc->desc3 =
490 cpu_to_le32(upper_32_bits(iru->iru_paddrs[j]));
496 rdesc->desc0 = rdesc->desc2 =
497 cpu_to_le32(lower_32_bits(iru->iru_paddrs[j]));
498 rdesc->desc1 = rdesc->desc3 =
499 cpu_to_le32(upper_32_bits(iru->iru_paddrs[j]));
508 inte = 1;
511 if (rx_frames && !((ring->dirty + 1) % rx_frames))
512 inte = 1;
517 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
519 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
523 ring->dirty = ((ring->dirty + 1) & (ring->rdesc_count - 1));
529 axgbe_printf(1, "<-- %s: rxq: %d cur: %d dirty: %d\n", __func__,
530 channel->queue_index, ring->cur, ring->dirty);
537 struct xgbe_prv_data *pdata = &sc->pdata;
538 struct xgbe_channel *channel = pdata->channel[qsidx];
539 struct xgbe_ring *ring = channel->rx_ring;
542 axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d cur %d dirty %d\n",
543 __func__, qsidx, flidx, pidx, ring->cur, ring->dirty);
551 if (!pdata->sph_enable || flidx == 1) {
553 lower_32_bits(rdata->rdata_paddr));
561 struct xgbe_prv_data *pdata = &sc->pdata;
562 struct xgbe_channel *channel = pdata->channel[qsidx];
563 struct xgbe_ring *ring = channel->rx_ring;
568 uint8_t incomplete = 1, context_next = 0, running = 0;
570 axgbe_printf(1, "--> %s: rxq %d idx %d budget %d cur %d dirty %d\n",
571 __func__, qsidx, idx, budget, ring->cur, ring->dirty);
573 if (__predict_false(test_bit(XGBE_DOWN, &pdata->dev_state))) {
578 cur = ring->cur;
582 rdesc = rdata->rdesc;
584 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
587 running = 1;
589 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
592 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
593 context_next = 1;
595 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT))
598 cur = (cur + 1) & (ring->rdesc_count - 1);
605 incomplete = 1;
610 axgbe_printf(1, "--> %s: rxq %d cur %d incomp %d con_next %d running %d "
611 "count %d\n", __func__, qsidx, cur, incomplete, context_next,
623 if (pdata->sph_enable) {
625 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST)) {
626 axgbe_printf(1, "%s: Not First\n", __func__);
632 if (rdata->rx.hdr_len) {
633 axgbe_printf(1, "%s: hdr_len %d\n", __func__, rdata->rx.hdr_len);
634 return (rdata->rx.hdr_len);
641 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) {
642 axgbe_printf(1, "%s: Not last %d\n", __func__,
643 pdata->rx_buf_size);
644 if (pdata->sph_enable) {
647 return (pdata->rx_buf_size);
655 axgbe_printf(1, "%s: pkt_len %d buf_size %d\n", __func__, rdata->rx.len,
656 pdata->rx_buf_size);
658 if (pdata->sph_enable) {
659 ret = min_t(unsigned int, 256, rdata->rx.len);
661 ret = rdata->rx.len;
673 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) {
674 axgbe_printf(1, "%s: Not last %d\n", __func__, pdata->rx_buf_size);
675 return (pdata->rx_buf_size);
681 return ((rdata->rx.len != 0)? (rdata->rx.len - len) : 0);
688 axgbe_printf(2, "idx %d len %d pos %d flid %d\n", idx, len, pos, flid);
689 ri->iri_frags[pos].irf_flid = flid;
690 ri->iri_frags[pos].irf_idx = idx;
691 ri->iri_frags[pos].irf_len = len;
698 struct xgbe_prv_data *pdata = &sc->pdata;
699 struct xgbe_hw_if *hw_if = &pdata->hw_if;
700 struct xgbe_channel *channel = pdata->channel[ri->iri_qsidx];
701 struct xgbe_ring *ring = channel->rx_ring;
702 struct xgbe_packet_data *packet = &ring->packet_data;
708 axgbe_printf(2, "%s: rxq %d cidx %d cur %d dirty %d\n", __func__,
709 ri->iri_qsidx, ri->iri_cidx, ring->cur, ring->dirty);
713 while (1) {
716 if (hw_if->dev_read(channel)) {
717 axgbe_printf(2, "<-- %s: OWN bit seen on %d\n",
718 __func__, ring->cur);
722 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
723 prev_cur = ring->cur;
724 ring->cur = (ring->cur + 1) & (ring->rdesc_count - 1);
726 last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
729 context_next = XGMAC_GET_BITS(packet->attributes,
732 context = XGMAC_GET_BITS(packet->attributes,
739 if (pdata->sph_enable) {
746 if (packet->errors)
747 axgbe_printf(1, "%s: last %d context %d con_next %d buf1 %d "
748 "buf2 %d len %d frags %d error %d\n", __func__, last, context,
749 context_next, buf1_len, buf2_len, len, i, packet->errors);
753 if (pdata->sph_enable) {
754 axgbe_add_frag(pdata, ri, prev_cur, buf2_len, i, 1);
764 if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE)) {
765 ri->iri_csum_flags |= CSUM_IP_CHECKED;
766 ri->iri_csum_flags |= CSUM_IP_VALID;
767 axgbe_printf(2, "%s: csum flags 0x%x\n", __func__, ri->iri_csum_flags);
770 if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, VLAN_CTAG)) {
771 ri->iri_flags |= M_VLANTAG;
772 ri->iri_vtag = packet->vlan_ctag;
774 ri->iri_flags, ri->iri_vtag);
778 if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, RSS_HASH)) {
779 ri->iri_flowid = packet->rss_hash;
780 ri->iri_rsstype = packet->rss_hash_type;
782 __func__, packet->rss_hash, ri->iri_flowid,
783 packet->rss_hash_type, ri->iri_rsstype);
787 axgbe_printf(1, "%s: Discarding Zero len packet\n", __func__);
789 if (__predict_false(packet->errors))
790 axgbe_printf(1, "<-- %s: rxq: %d len: %d frags: %d cidx %d cur: %d "
791 "dirty: %d error 0x%x\n", __func__, ri->iri_qsidx, len, i,
792 ri->iri_cidx, ring->cur, ring->dirty, packet->errors);
794 axgbe_printf(1, "%s: Packet len %d frags %d\n", __func__, len, i);
796 ri->iri_len = len;
797 ri->iri_nfrags = i;