1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2020 Advanced Micro Devices, Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * Contact Information : 28 * Rajesh Kumar <rajesh1.kumar@amd.com> 29 * Shreyank Amartya <Shreyank.Amartya@amd.com> 30 * 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "xgbe.h" 37 #include "xgbe-common.h" 38 39 /* 40 * IFLIB interfaces 41 */ 42 static int axgbe_isc_txd_encap(void *, if_pkt_info_t); 43 static void axgbe_isc_txd_flush(void *, uint16_t, qidx_t); 44 static int axgbe_isc_txd_credits_update(void *, uint16_t, bool); 45 static void axgbe_isc_rxd_refill(void *, if_rxd_update_t); 46 static void axgbe_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t); 47 static int axgbe_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t); 48 static int axgbe_isc_rxd_pkt_get(void *, if_rxd_info_t); 49 50 struct if_txrx axgbe_txrx = { 51 .ift_txd_encap = axgbe_isc_txd_encap, 52 .ift_txd_flush = axgbe_isc_txd_flush, 53 .ift_txd_credits_update = axgbe_isc_txd_credits_update, 54 .ift_rxd_available = axgbe_isc_rxd_available, 55 .ift_rxd_pkt_get = axgbe_isc_rxd_pkt_get, 56 .ift_rxd_refill = axgbe_isc_rxd_refill, 57 .ift_rxd_flush = axgbe_isc_rxd_flush, 58 .ift_legacy_intr = NULL 59 }; 60 61 static void 62 xgbe_print_pkt_info(struct xgbe_prv_data *pdata, if_pkt_info_t pi) 63 { 64 65 axgbe_printf(1, "------Packet Info Start------\n"); 66 axgbe_printf(1, "pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n", 67 pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx); 68 axgbe_printf(1, "pi new_pidx: %d csum_flags: %x mflags: %x vtag: %d\n", 69 pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_mflags, pi->ipi_vtag); 70 axgbe_printf(1, "pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n", 71 pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto); 72 axgbe_printf(1, "pi tcp_hlen: %d tcp_hflags: %x tcp_seq: %d tso_segsz %d\n", 73 pi->ipi_tcp_hlen, pi->ipi_tcp_hflags, pi->ipi_tcp_seq, pi->ipi_tso_segsz); 74 } 75 76 static bool 77 axgbe_ctx_desc_setup(struct xgbe_prv_data *pdata, struct xgbe_ring *ring, 78 if_pkt_info_t pi) 79 { 80 struct xgbe_ring_desc *rdesc; 81 struct xgbe_ring_data *rdata; 82 bool inc_cur = false; 83 84 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 85 rdesc = rdata->rdesc; 86 87 axgbe_printf(1, "ipi_tso_segsz %d cur_mss %d idx %d\n", 88 pi->ipi_tso_segsz, ring->tx.cur_mss, ring->cur); 89 90 axgbe_printf(1, "ipi_vtag 0x%x cur_vlan_ctag 0x%x\n", 91 pi->ipi_vtag, ring->tx.cur_vlan_ctag); 92 93 if ((pi->ipi_csum_flags & CSUM_TSO) && 94 (pi->ipi_tso_segsz != ring->tx.cur_mss)) { 95 /* 96 * Set TSO maximum segment size 97 * Mark as context descriptor 98 * Indicate this descriptor contains MSS 99 */ 100 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2, 101 MSS, pi->ipi_tso_segsz); 102 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1); 103 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, TCMSSV, 1); 104 ring->tx.cur_mss = pi->ipi_tso_segsz; 105 inc_cur = true; 106 } 107 108 if (pi->ipi_vtag && (pi->ipi_vtag != ring->tx.cur_vlan_ctag)) { 109 /* 110 * Mark it as context descriptor 111 * Set the VLAN tag 112 * Indicate this descriptor contains the VLAN tag 113 */ 114 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1); 115 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 116 VT, pi->ipi_vtag); 117 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, VLTV, 1); 118 ring->tx.cur_vlan_ctag = pi->ipi_vtag; 119 inc_cur = true; 120 } 121 122 return (inc_cur); 123 } 124 125 static uint16_t 126 axgbe_calculate_tx_parms(struct xgbe_prv_data *pdata, if_pkt_info_t pi, 127 struct xgbe_packet_data *packet) 128 { 129 uint32_t tcp_payload_len = 0, bytes = 0; 130 uint16_t max_len, hlen, payload_len, pkts = 0; 131 132 packet->tx_packets = packet->tx_bytes = 0; 133 134 hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen; 135 if (pi->ipi_csum_flags & CSUM_TSO) { 136 137 tcp_payload_len = pi->ipi_len - hlen; 138 axgbe_printf(1, "%s: ipi_len %x elen %d iplen %d tcplen %d\n", 139 __func__, pi->ipi_len, pi->ipi_ehdrlen, pi->ipi_ip_hlen, 140 pi->ipi_tcp_hlen); 141 142 max_len = if_getmtu(pdata->netdev) + ETH_HLEN; 143 if (pi->ipi_vtag) 144 max_len += VLAN_HLEN; 145 146 while (tcp_payload_len) { 147 148 payload_len = max_len - hlen; 149 payload_len = min(payload_len, tcp_payload_len); 150 tcp_payload_len -= payload_len; 151 pkts++; 152 bytes += (hlen + payload_len); 153 axgbe_printf(1, "%s: max_len %d payload_len %d " 154 "tcp_len %d\n", __func__, max_len, payload_len, 155 tcp_payload_len); 156 } 157 } else { 158 pkts = 1; 159 bytes = pi->ipi_len; 160 } 161 162 packet->tx_packets = pkts; 163 packet->tx_bytes = bytes; 164 165 axgbe_printf(1, "%s: packets %d bytes %d hlen %d\n", __func__, 166 packet->tx_packets, packet->tx_bytes, hlen); 167 168 return (hlen); 169 } 170 171 static int 172 axgbe_isc_txd_encap(void *arg, if_pkt_info_t pi) 173 { 174 struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; 175 struct xgbe_prv_data *pdata = &sc->pdata; 176 struct xgbe_channel *channel; 177 struct xgbe_ring *ring; 178 struct xgbe_ring_desc *rdesc; 179 struct xgbe_ring_data *rdata; 180 struct xgbe_packet_data *packet; 181 unsigned int cur, start, tx_set_ic; 182 uint16_t offset, hlen, datalen, tcp_payload_len = 0; 183 int cur_seg = 0; 184 185 xgbe_print_pkt_info(pdata, pi); 186 187 channel = pdata->channel[pi->ipi_qsidx]; 188 ring = channel->tx_ring; 189 packet = &ring->packet_data; 190 cur = start = ring->cur; 191 192 axgbe_printf(1, "--> %s: txq %d cur %d dirty %d\n", 193 __func__, pi->ipi_qsidx, ring->cur, ring->dirty); 194 195 MPASS(pi->ipi_len != 0); 196 if (__predict_false(pi->ipi_len == 0)) { 197 axgbe_error("empty packet received from stack\n"); 198 return (0); 199 } 200 201 MPASS(ring->cur == pi->ipi_pidx); 202 if (__predict_false(ring->cur != pi->ipi_pidx)) { 203 axgbe_error("--> %s: cur(%d) ne pidx(%d)\n", __func__, 204 ring->cur, pi->ipi_pidx); 205 } 206 207 /* Determine if an interrupt should be generated for this Tx: 208 * Interrupt: 209 * - Tx frame count exceeds the frame count setting 210 * - Addition of Tx frame count to the frame count since the 211 * last interrupt was set exceeds the frame count setting 212 * No interrupt: 213 * - No frame count setting specified (ethtool -C ethX tx-frames 0) 214 * - Addition of Tx frame count to the frame count since the 215 * last interrupt was set does not exceed the frame count setting 216 */ 217 memset(packet, 0, sizeof(*packet)); 218 hlen = axgbe_calculate_tx_parms(pdata, pi, packet); 219 axgbe_printf(1, "%s: ipi_len %d tx_pkts %d tx_bytes %d hlen %d\n", 220 __func__, pi->ipi_len, packet->tx_packets, packet->tx_bytes, hlen); 221 222 ring->coalesce_count += packet->tx_packets; 223 if (!pdata->tx_frames) 224 tx_set_ic = 0; 225 else if (packet->tx_packets > pdata->tx_frames) 226 tx_set_ic = 1; 227 else if ((ring->coalesce_count % pdata->tx_frames) < (packet->tx_packets)) 228 tx_set_ic = 1; 229 else 230 tx_set_ic = 0; 231 232 /* Add Context descriptor if needed (for TSO, VLAN cases) */ 233 if (axgbe_ctx_desc_setup(pdata, ring, pi)) 234 cur++; 235 236 rdata = XGBE_GET_DESC_DATA(ring, cur); 237 rdesc = rdata->rdesc; 238 239 axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x " 240 "ipi_len 0x%x\n", __func__, cur, 241 lower_32_bits(pi->ipi_segs[cur_seg].ds_addr), 242 upper_32_bits(pi->ipi_segs[cur_seg].ds_addr), 243 (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len); 244 245 /* Update buffer address (for TSO this is the header) */ 246 rdesc->desc0 = cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr)); 247 rdesc->desc1 = cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr)); 248 249 /* Update the buffer length */ 250 if (hlen == 0) 251 hlen = pi->ipi_segs[cur_seg].ds_len; 252 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, hlen); 253 254 /* VLAN tag insertion check */ 255 if (pi->ipi_vtag) { 256 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR, 257 TX_NORMAL_DESC2_VLAN_INSERT); 258 } 259 260 /* Mark it as First Descriptor */ 261 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1); 262 263 /* Mark it as a NORMAL descriptor */ 264 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); 265 266 /* 267 * Set the OWN bit if this is not the first descriptor. For first 268 * descriptor, OWN bit will be set at last so that hardware will 269 * process the descriptors only after the OWN bit for the first 270 * descriptor is set 271 */ 272 if (cur != start) 273 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 274 275 if (pi->ipi_csum_flags & CSUM_TSO) { 276 /* Enable TSO */ 277 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1); 278 279 tcp_payload_len = pi->ipi_len - hlen; 280 281 /* Set TCP payload length*/ 282 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL, 283 tcp_payload_len); 284 285 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, 286 pi->ipi_tcp_hlen/4); 287 288 axgbe_printf(1, "tcp_payload %d tcp_hlen %d\n", tcp_payload_len, 289 pi->ipi_tcp_hlen/4); 290 } else { 291 /* Enable CRC and Pad Insertion */ 292 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); 293 294 /* Enable HW CSUM*/ 295 if (pi->ipi_csum_flags) 296 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3); 297 298 /* Set total length to be transmitted */ 299 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, pi->ipi_len); 300 } 301 302 cur++; 303 304 for (cur_seg = 0 ; cur_seg < pi->ipi_nsegs ; cur_seg++) { 305 306 if (cur_seg == 0) { 307 offset = hlen; 308 datalen = pi->ipi_segs[cur_seg].ds_len - hlen; 309 } else { 310 offset = 0; 311 datalen = pi->ipi_segs[cur_seg].ds_len; 312 } 313 314 if (datalen) { 315 rdata = XGBE_GET_DESC_DATA(ring, cur); 316 rdesc = rdata->rdesc; 317 318 319 /* Update buffer address */ 320 rdesc->desc0 = 321 cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset)); 322 rdesc->desc1 = 323 cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset)); 324 325 /* Update the buffer length */ 326 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, datalen); 327 328 /* Set OWN bit */ 329 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 330 331 /* Mark it as NORMAL descriptor */ 332 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); 333 334 /* Enable HW CSUM*/ 335 if (pi->ipi_csum_flags) 336 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3); 337 338 axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x " 339 "ipi_len 0x%x\n", __func__, cur, 340 lower_32_bits(pi->ipi_segs[cur_seg].ds_addr), 341 upper_32_bits(pi->ipi_segs[cur_seg].ds_addr), 342 (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len); 343 344 cur++; 345 } 346 } 347 348 /* Set LAST bit for the last descriptor */ 349 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1); 350 351 /* Set IC bit based on Tx coalescing settings */ 352 if (tx_set_ic) 353 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); 354 355 wmb(); 356 357 /* Set OWN bit for the first descriptor */ 358 rdata = XGBE_GET_DESC_DATA(ring, start); 359 rdesc = rdata->rdesc; 360 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 361 362 ring->cur = pi->ipi_new_pidx = (cur & (ring->rdesc_count - 1)); 363 364 axgbe_printf(1, "<-- %s: end cur %d dirty %d\n", __func__, ring->cur, 365 ring->dirty); 366 367 return (0); 368 } 369 370 static void 371 axgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx) 372 { 373 struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; 374 struct xgbe_prv_data *pdata = &sc->pdata; 375 struct xgbe_channel *channel = pdata->channel[txqid]; 376 struct xgbe_ring *ring = channel->tx_ring; 377 struct xgbe_ring_data *rdata = XGBE_GET_DESC_DATA(ring, pidx); 378 379 axgbe_printf(1, "--> %s: flush txq %d pidx %d cur %d dirty %d\n", 380 __func__, txqid, pidx, ring->cur, ring->dirty); 381 382 /* Ring Doorbell */ 383 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, 384 lower_32_bits(rdata->rdata_paddr)); 385 } 386 387 static int 388 axgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear) 389 { 390 struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; 391 struct xgbe_hw_if *hw_if = &sc->pdata.hw_if; 392 struct xgbe_prv_data *pdata = &sc->pdata; 393 struct xgbe_channel *channel = pdata->channel[txqid]; 394 struct xgbe_ring *ring = channel->tx_ring; 395 struct xgbe_ring_data *rdata; 396 int processed = 0; 397 398 axgbe_printf(1, "%s: txq %d clear %d cur %d dirty %d\n", 399 __func__, txqid, clear, ring->cur, ring->dirty); 400 401 if (__predict_false(ring->cur == ring->dirty)) { 402 axgbe_printf(1, "<-- %s: cur(%d) equals dirty(%d)\n", 403 __func__, ring->cur, ring->dirty); 404 return (0); 405 } 406 407 /* Check whether the first dirty descriptor is Tx complete */ 408 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); 409 if (!hw_if->tx_complete(rdata->rdesc)) { 410 axgbe_printf(1, "<-- %s: (dirty %d)\n", __func__, ring->dirty); 411 return (0); 412 } 413 414 /* 415 * If clear is false just let the caller know that there 416 * are descriptors to reclaim 417 */ 418 if (!clear) { 419 axgbe_printf(1, "<-- %s: (!clear)\n", __func__); 420 return (1); 421 } 422 423 do { 424 hw_if->tx_desc_reset(rdata); 425 processed++; 426 ring->dirty = (ring->dirty + 1) & (ring->rdesc_count - 1); 427 428 /* 429 * tx_complete will return true for unused descriptors also. 430 * so, check tx_complete only until used descriptors. 431 */ 432 if (ring->cur == ring->dirty) 433 break; 434 435 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); 436 } while (hw_if->tx_complete(rdata->rdesc)); 437 438 axgbe_printf(1, "<-- %s: processed %d cur %d dirty %d\n", __func__, 439 processed, ring->cur, ring->dirty); 440 441 return (processed); 442 } 443 444 static void 445 axgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru) 446 { 447 struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; 448 struct xgbe_prv_data *pdata = &sc->pdata; 449 struct xgbe_channel *channel = pdata->channel[iru->iru_qsidx]; 450 struct xgbe_ring *ring = channel->rx_ring; 451 struct xgbe_ring_data *rdata; 452 struct xgbe_ring_desc *rdesc; 453 unsigned int rx_usecs = pdata->rx_usecs; 454 unsigned int rx_frames = pdata->rx_frames; 455 unsigned int inte; 456 uint8_t count = iru->iru_count; 457 int i, j; 458 bool config_intr = false; 459 460 axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d count %d ring cur %d " 461 "dirty %d\n", __func__, iru->iru_qsidx, iru->iru_flidx, 462 iru->iru_pidx, count, ring->cur, ring->dirty); 463 464 for (i = iru->iru_pidx, j = 0 ; j < count ; i++, j++) { 465 466 if (i == sc->scctx->isc_nrxd[0]) 467 i = 0; 468 469 rdata = XGBE_GET_DESC_DATA(ring, i); 470 rdesc = rdata->rdesc; 471 472 if (__predict_false(XGMAC_GET_BITS_LE(rdesc->desc3, 473 RX_NORMAL_DESC3, OWN))) { 474 axgbe_error("%s: refill clash, cur %d dirty %d index %d" 475 "pidx %d\n", __func__, ring->cur, ring->dirty, j, i); 476 } 477 478 if (pdata->sph_enable) { 479 if (iru->iru_flidx == 0) { 480 481 /* Fill header/buffer1 address */ 482 rdesc->desc0 = 483 cpu_to_le32(lower_32_bits(iru->iru_paddrs[j])); 484 rdesc->desc1 = 485 cpu_to_le32(upper_32_bits(iru->iru_paddrs[j])); 486 } else { 487 488 /* Fill data/buffer2 address */ 489 rdesc->desc2 = 490 cpu_to_le32(lower_32_bits(iru->iru_paddrs[j])); 491 rdesc->desc3 = 492 cpu_to_le32(upper_32_bits(iru->iru_paddrs[j])); 493 494 config_intr = true; 495 } 496 } else { 497 /* Fill header/buffer1 address */ 498 rdesc->desc0 = rdesc->desc2 = 499 cpu_to_le32(lower_32_bits(iru->iru_paddrs[j])); 500 rdesc->desc1 = rdesc->desc3 = 501 cpu_to_le32(upper_32_bits(iru->iru_paddrs[j])); 502 503 config_intr = true; 504 } 505 506 if (config_intr) { 507 508 if (!rx_usecs && !rx_frames) { 509 /* No coalescing, interrupt for every descriptor */ 510 inte = 1; 511 } else { 512 /* Set interrupt based on Rx frame coalescing setting */ 513 if (rx_frames && !((ring->dirty + 1) % rx_frames)) 514 inte = 1; 515 else 516 inte = 0; 517 } 518 519 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); 520 521 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); 522 523 wmb(); 524 525 ring->dirty = ((ring->dirty + 1) & (ring->rdesc_count - 1)); 526 527 config_intr = false; 528 } 529 } 530 531 axgbe_printf(1, "<-- %s: rxq: %d cur: %d dirty: %d\n", __func__, 532 channel->queue_index, ring->cur, ring->dirty); 533 } 534 535 static void 536 axgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx, qidx_t pidx) 537 { 538 struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; 539 struct xgbe_prv_data *pdata = &sc->pdata; 540 struct xgbe_channel *channel = pdata->channel[qsidx]; 541 struct xgbe_ring *ring = channel->rx_ring; 542 struct xgbe_ring_data *rdata; 543 544 axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d cur %d dirty %d\n", 545 __func__, qsidx, flidx, pidx, ring->cur, ring->dirty); 546 547 rdata = XGBE_GET_DESC_DATA(ring, pidx); 548 549 /* 550 * update RX descriptor tail pointer in hardware to indicate 551 * that new buffers are present in the allocated memory region 552 */ 553 if (!pdata->sph_enable || flidx == 1) { 554 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, 555 lower_32_bits(rdata->rdata_paddr)); 556 } 557 } 558 559 static int 560 axgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t idx, qidx_t budget) 561 { 562 struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; 563 struct xgbe_prv_data *pdata = &sc->pdata; 564 struct xgbe_channel *channel = pdata->channel[qsidx]; 565 struct xgbe_ring *ring = channel->rx_ring; 566 struct xgbe_ring_data *rdata; 567 struct xgbe_ring_desc *rdesc; 568 unsigned int cur; 569 int count = 0; 570 uint8_t incomplete = 1, context_next = 0, running = 0; 571 572 axgbe_printf(1, "--> %s: rxq %d idx %d budget %d cur %d dirty %d\n", 573 __func__, qsidx, idx, budget, ring->cur, ring->dirty); 574 575 if (__predict_false(test_bit(XGBE_DOWN, &pdata->dev_state))) { 576 axgbe_printf(0, "%s: Polling when XGBE_DOWN\n", __func__); 577 return (count); 578 } 579 580 cur = ring->cur; 581 for (count = 0; count <= budget; ) { 582 583 rdata = XGBE_GET_DESC_DATA(ring, cur); 584 rdesc = rdata->rdesc; 585 586 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) 587 break; 588 589 running = 1; 590 591 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) 592 incomplete = 0; 593 594 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA)) 595 context_next = 1; 596 597 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) 598 context_next = 0; 599 600 cur = (cur + 1) & (ring->rdesc_count - 1); 601 602 if (incomplete || context_next) 603 continue; 604 605 /* Increment pkt count & reset variables for next full packet */ 606 count++; 607 incomplete = 1; 608 context_next = 0; 609 running = 0; 610 } 611 612 axgbe_printf(1, "--> %s: rxq %d cur %d incomp %d con_next %d running %d " 613 "count %d\n", __func__, qsidx, cur, incomplete, context_next, 614 running, count); 615 616 return (count); 617 } 618 619 static unsigned int 620 xgbe_rx_buf1_len(struct xgbe_prv_data *pdata, struct xgbe_ring_data *rdata, 621 struct xgbe_packet_data *packet) 622 { 623 unsigned int ret = 0; 624 625 if (pdata->sph_enable) { 626 /* Always zero if not the first descriptor */ 627 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST)) { 628 axgbe_printf(1, "%s: Not First\n", __func__); 629 return (0); 630 } 631 } 632 633 /* First descriptor with split header, return header length */ 634 if (rdata->rx.hdr_len) { 635 axgbe_printf(1, "%s: hdr_len %d\n", __func__, rdata->rx.hdr_len); 636 return (rdata->rx.hdr_len); 637 } 638 639 /* First descriptor but not the last descriptor and no split header, 640 * so the full buffer was used, 256 represents the hardcoded value of 641 * a max header split defined in the hardware 642 */ 643 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) { 644 axgbe_printf(1, "%s: Not last %d\n", __func__, 645 pdata->rx_buf_size); 646 if (pdata->sph_enable) { 647 return (256); 648 } else { 649 return (pdata->rx_buf_size); 650 } 651 } 652 653 /* First descriptor and last descriptor and no split header, so 654 * calculate how much of the buffer was used, we can return the 655 * segment length or the remaining bytes of the packet 656 */ 657 axgbe_printf(1, "%s: pkt_len %d buf_size %d\n", __func__, rdata->rx.len, 658 pdata->rx_buf_size); 659 660 if (pdata->sph_enable) { 661 ret = min_t(unsigned int, 256, rdata->rx.len); 662 } else { 663 ret = rdata->rx.len; 664 } 665 666 return (ret); 667 } 668 669 static unsigned int 670 xgbe_rx_buf2_len(struct xgbe_prv_data *pdata, struct xgbe_ring_data *rdata, 671 struct xgbe_packet_data *packet, unsigned int len) 672 { 673 674 /* Always the full buffer if not the last descriptor */ 675 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) { 676 axgbe_printf(1, "%s: Not last %d\n", __func__, pdata->rx_buf_size); 677 return (pdata->rx_buf_size); 678 } 679 680 /* Last descriptor so calculate how much of the buffer was used 681 * for the last bit of data 682 */ 683 return ((rdata->rx.len != 0)? (rdata->rx.len - len) : 0); 684 } 685 686 static inline void 687 axgbe_add_frag(struct xgbe_prv_data *pdata, if_rxd_info_t ri, int idx, int len, 688 int pos, int flid) 689 { 690 axgbe_printf(2, "idx %d len %d pos %d flid %d\n", idx, len, pos, flid); 691 ri->iri_frags[pos].irf_flid = flid; 692 ri->iri_frags[pos].irf_idx = idx; 693 ri->iri_frags[pos].irf_len = len; 694 } 695 696 static int 697 axgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri) 698 { 699 struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; 700 struct xgbe_prv_data *pdata = &sc->pdata; 701 struct xgbe_hw_if *hw_if = &pdata->hw_if; 702 struct xgbe_channel *channel = pdata->channel[ri->iri_qsidx]; 703 struct xgbe_ring *ring = channel->rx_ring; 704 struct xgbe_packet_data *packet = &ring->packet_data; 705 struct xgbe_ring_data *rdata; 706 unsigned int last, context_next, context; 707 unsigned int buf1_len, buf2_len, max_len, len = 0, prev_cur; 708 int i = 0; 709 710 axgbe_printf(2, "%s: rxq %d cidx %d cur %d dirty %d\n", __func__, 711 ri->iri_qsidx, ri->iri_cidx, ring->cur, ring->dirty); 712 713 memset(packet, 0, sizeof(struct xgbe_packet_data)); 714 715 while (1) { 716 717 read_again: 718 if (hw_if->dev_read(channel)) { 719 axgbe_printf(2, "<-- %s: OWN bit seen on %d\n", 720 __func__, ring->cur); 721 break; 722 } 723 724 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 725 prev_cur = ring->cur; 726 ring->cur = (ring->cur + 1) & (ring->rdesc_count - 1); 727 728 last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 729 LAST); 730 731 context_next = XGMAC_GET_BITS(packet->attributes, 732 RX_PACKET_ATTRIBUTES, CONTEXT_NEXT); 733 734 context = XGMAC_GET_BITS(packet->attributes, 735 RX_PACKET_ATTRIBUTES, CONTEXT); 736 737 if (!context) { 738 /* Get the data length in the descriptor buffers */ 739 buf1_len = xgbe_rx_buf1_len(pdata, rdata, packet); 740 len += buf1_len; 741 if (pdata->sph_enable) { 742 buf2_len = xgbe_rx_buf2_len(pdata, rdata, packet, len); 743 len += buf2_len; 744 } 745 } else 746 buf1_len = buf2_len = 0; 747 748 if (packet->errors) 749 axgbe_printf(1, "%s: last %d context %d con_next %d buf1 %d " 750 "buf2 %d len %d frags %d error %d\n", __func__, last, context, 751 context_next, buf1_len, buf2_len, len, i, packet->errors); 752 753 axgbe_add_frag(pdata, ri, prev_cur, buf1_len, i, 0); 754 i++; 755 if (pdata->sph_enable) { 756 axgbe_add_frag(pdata, ri, prev_cur, buf2_len, i, 1); 757 i++; 758 } 759 760 if (!last || context_next) 761 goto read_again; 762 763 break; 764 } 765 766 if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE)) { 767 ri->iri_csum_flags |= CSUM_IP_CHECKED; 768 ri->iri_csum_flags |= CSUM_IP_VALID; 769 axgbe_printf(2, "%s: csum flags 0x%x\n", __func__, ri->iri_csum_flags); 770 } 771 772 max_len = if_getmtu(pdata->netdev) + ETH_HLEN; 773 if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, VLAN_CTAG)) { 774 ri->iri_flags |= M_VLANTAG; 775 ri->iri_vtag = packet->vlan_ctag; 776 max_len += VLAN_HLEN; 777 axgbe_printf(2, "%s: iri_flags 0x%x vtag 0x%x\n", __func__, 778 ri->iri_flags, ri->iri_vtag); 779 } 780 781 782 if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, RSS_HASH)) { 783 ri->iri_flowid = packet->rss_hash; 784 ri->iri_rsstype = packet->rss_hash_type; 785 axgbe_printf(2, "%s: hash 0x%x/0x%x rsstype 0x%x/0x%x\n", 786 __func__, packet->rss_hash, ri->iri_flowid, 787 packet->rss_hash_type, ri->iri_rsstype); 788 } 789 790 if (__predict_false(len == 0)) 791 axgbe_printf(1, "%s: Discarding Zero len packet\n", __func__); 792 793 if (__predict_false(len > max_len)) 794 axgbe_error("%s: Big packet %d/%d\n", __func__, len, max_len); 795 796 if (__predict_false(packet->errors)) 797 axgbe_printf(1, "<-- %s: rxq: %d len: %d frags: %d cidx %d cur: %d " 798 "dirty: %d error 0x%x\n", __func__, ri->iri_qsidx, len, i, 799 ri->iri_cidx, ring->cur, ring->dirty, packet->errors); 800 801 axgbe_printf(1, "%s: Packet len %d frags %d\n", __func__, len, i); 802 803 ri->iri_len = len; 804 ri->iri_nfrags = i; 805 806 return (0); 807 } 808