1 /*- 2 * Copyright (c) 2010-2011 Solarflare Communications, Inc. 3 * All rights reserved. 4 * 5 * This software was developed in part by Philip Paeps under contract for 6 * Solarflare Communications, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* Theory of operation: 31 * 32 * Tx queues allocation and mapping 33 * 34 * One Tx queue with enabled checksum offload is allocated per Rx channel 35 * (event queue). Also 2 Tx queues (one without checksum offload and one 36 * with IP checksum offload only) are allocated and bound to event queue 0. 37 * sfxge_txq_type is used as Tx queue label. 38 * 39 * So, event queue plus label mapping to Tx queue index is: 40 * if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES) 41 * else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1 42 * See sfxge_get_txq_by_label() sfxge_ev.c 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/types.h> 49 #include <sys/mbuf.h> 50 #include <sys/smp.h> 51 #include <sys/socket.h> 52 #include <sys/sysctl.h> 53 54 #include <net/bpf.h> 55 #include <net/ethernet.h> 56 #include <net/if.h> 57 #include <net/if_vlan_var.h> 58 59 #include <netinet/in.h> 60 #include <netinet/ip.h> 61 #include <netinet/ip6.h> 62 #include <netinet/tcp.h> 63 64 #include "common/efx.h" 65 66 #include "sfxge.h" 67 #include "sfxge_tx.h" 68 69 /* Set the block level to ensure there is space to generate a 70 * large number of descriptors for TSO. With minimum MSS and 71 * maximum mbuf length we might need more than a ring-ful of 72 * descriptors, but this should not happen in practice except 73 * due to deliberate attack. In that case we will truncate 74 * the output at a packet boundary. Allow for a reasonable 75 * minimum MSS of 512. 76 */ 77 #define SFXGE_TSO_MAX_DESC ((65535 / 512) * 2 + SFXGE_TX_MAPPING_MAX_SEG - 1) 78 #define SFXGE_TXQ_BLOCK_LEVEL (SFXGE_NDESCS - SFXGE_TSO_MAX_DESC) 79 80 /* Forward declarations. */ 81 static inline void sfxge_tx_qdpl_service(struct sfxge_txq *txq); 82 static void sfxge_tx_qlist_post(struct sfxge_txq *txq); 83 static void sfxge_tx_qunblock(struct sfxge_txq *txq); 84 static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, 85 const bus_dma_segment_t *dma_seg, int n_dma_seg); 86 87 void 88 sfxge_tx_qcomplete(struct sfxge_txq *txq) 89 { 90 struct sfxge_softc *sc; 91 struct sfxge_evq *evq; 92 unsigned int completed; 93 94 sc = txq->sc; 95 evq = sc->evq[txq->evq_index]; 96 97 mtx_assert(&evq->lock, MA_OWNED); 98 99 completed = txq->completed; 100 while (completed != txq->pending) { 101 struct sfxge_tx_mapping *stmp; 102 unsigned int id; 103 104 id = completed++ & (SFXGE_NDESCS - 1); 105 106 stmp = &txq->stmp[id]; 107 if (stmp->flags & TX_BUF_UNMAP) { 108 bus_dmamap_unload(txq->packet_dma_tag, stmp->map); 109 if (stmp->flags & TX_BUF_MBUF) { 110 struct mbuf *m = stmp->u.mbuf; 111 do 112 m = m_free(m); 113 while (m != NULL); 114 } else { 115 free(stmp->u.heap_buf, M_SFXGE); 116 } 117 stmp->flags = 0; 118 } 119 } 120 txq->completed = completed; 121 122 /* Check whether we need to unblock the queue. */ 123 mb(); 124 if (txq->blocked) { 125 unsigned int level; 126 127 level = txq->added - txq->completed; 128 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL) 129 sfxge_tx_qunblock(txq); 130 } 131 } 132 133 #ifdef SFXGE_HAVE_MQ 134 135 /* 136 * Reorder the put list and append it to the get list. 137 */ 138 static void 139 sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq) 140 { 141 struct sfxge_tx_dpl *stdp; 142 struct mbuf *mbuf, *get_next, **get_tailp; 143 volatile uintptr_t *putp; 144 uintptr_t put; 145 unsigned int count; 146 147 mtx_assert(&txq->lock, MA_OWNED); 148 149 stdp = &txq->dpl; 150 151 /* Acquire the put list. */ 152 putp = &stdp->std_put; 153 put = atomic_readandclear_ptr(putp); 154 mbuf = (void *)put; 155 156 if (mbuf == NULL) 157 return; 158 159 /* Reverse the put list. */ 160 get_tailp = &mbuf->m_nextpkt; 161 get_next = NULL; 162 163 count = 0; 164 do { 165 struct mbuf *put_next; 166 167 put_next = mbuf->m_nextpkt; 168 mbuf->m_nextpkt = get_next; 169 get_next = mbuf; 170 mbuf = put_next; 171 172 count++; 173 } while (mbuf != NULL); 174 175 /* Append the reversed put list to the get list. */ 176 KASSERT(*get_tailp == NULL, ("*get_tailp != NULL")); 177 *stdp->std_getp = get_next; 178 stdp->std_getp = get_tailp; 179 stdp->std_count += count; 180 } 181 182 #endif /* SFXGE_HAVE_MQ */ 183 184 static void 185 sfxge_tx_qreap(struct sfxge_txq *txq) 186 { 187 mtx_assert(SFXGE_TXQ_LOCK(txq), MA_OWNED); 188 189 txq->reaped = txq->completed; 190 } 191 192 static void 193 sfxge_tx_qlist_post(struct sfxge_txq *txq) 194 { 195 unsigned int old_added; 196 unsigned int level; 197 int rc; 198 199 mtx_assert(SFXGE_TXQ_LOCK(txq), MA_OWNED); 200 201 KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0")); 202 KASSERT(txq->n_pend_desc <= SFXGE_TSO_MAX_DESC, 203 ("txq->n_pend_desc too large")); 204 KASSERT(!txq->blocked, ("txq->blocked")); 205 206 old_added = txq->added; 207 208 /* Post the fragment list. */ 209 rc = efx_tx_qpost(txq->common, txq->pend_desc, txq->n_pend_desc, 210 txq->reaped, &txq->added); 211 KASSERT(rc == 0, ("efx_tx_qpost() failed")); 212 213 /* If efx_tx_qpost() had to refragment, our information about 214 * buffers to free may be associated with the wrong 215 * descriptors. 216 */ 217 KASSERT(txq->added - old_added == txq->n_pend_desc, 218 ("efx_tx_qpost() refragmented descriptors")); 219 220 level = txq->added - txq->reaped; 221 KASSERT(level <= SFXGE_NDESCS, ("overfilled TX queue")); 222 223 /* Clear the fragment list. */ 224 txq->n_pend_desc = 0; 225 226 /* Have we reached the block level? */ 227 if (level < SFXGE_TXQ_BLOCK_LEVEL) 228 return; 229 230 /* Reap, and check again */ 231 sfxge_tx_qreap(txq); 232 level = txq->added - txq->reaped; 233 if (level < SFXGE_TXQ_BLOCK_LEVEL) 234 return; 235 236 txq->blocked = 1; 237 238 /* 239 * Avoid a race with completion interrupt handling that could leave 240 * the queue blocked. 241 */ 242 mb(); 243 sfxge_tx_qreap(txq); 244 level = txq->added - txq->reaped; 245 if (level < SFXGE_TXQ_BLOCK_LEVEL) { 246 mb(); 247 txq->blocked = 0; 248 } 249 } 250 251 static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf) 252 { 253 bus_dmamap_t *used_map; 254 bus_dmamap_t map; 255 bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG]; 256 unsigned int id; 257 struct sfxge_tx_mapping *stmp; 258 efx_buffer_t *desc; 259 int n_dma_seg; 260 int rc; 261 int i; 262 263 KASSERT(!txq->blocked, ("txq->blocked")); 264 265 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) 266 prefetch_read_many(mbuf->m_data); 267 268 if (txq->init_state != SFXGE_TXQ_STARTED) { 269 rc = EINTR; 270 goto reject; 271 } 272 273 /* Load the packet for DMA. */ 274 id = txq->added & (SFXGE_NDESCS - 1); 275 stmp = &txq->stmp[id]; 276 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map, 277 mbuf, dma_seg, &n_dma_seg, 0); 278 if (rc == EFBIG) { 279 /* Try again. */ 280 struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT, 281 SFXGE_TX_MAPPING_MAX_SEG); 282 if (new_mbuf == NULL) 283 goto reject; 284 ++txq->collapses; 285 mbuf = new_mbuf; 286 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, 287 stmp->map, mbuf, 288 dma_seg, &n_dma_seg, 0); 289 } 290 if (rc != 0) 291 goto reject; 292 293 /* Make the packet visible to the hardware. */ 294 bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE); 295 296 used_map = &stmp->map; 297 298 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) { 299 rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg); 300 if (rc < 0) 301 goto reject_mapped; 302 stmp = &txq->stmp[rc]; 303 } else { 304 /* Add the mapping to the fragment list, and set flags 305 * for the buffer. 306 */ 307 i = 0; 308 for (;;) { 309 desc = &txq->pend_desc[i]; 310 desc->eb_addr = dma_seg[i].ds_addr; 311 desc->eb_size = dma_seg[i].ds_len; 312 if (i == n_dma_seg - 1) { 313 desc->eb_eop = 1; 314 break; 315 } 316 desc->eb_eop = 0; 317 i++; 318 319 stmp->flags = 0; 320 if (__predict_false(stmp == 321 &txq->stmp[SFXGE_NDESCS - 1])) 322 stmp = &txq->stmp[0]; 323 else 324 stmp++; 325 } 326 txq->n_pend_desc = n_dma_seg; 327 } 328 329 /* 330 * If the mapping required more than one descriptor 331 * then we need to associate the DMA map with the last 332 * descriptor, not the first. 333 */ 334 if (used_map != &stmp->map) { 335 map = stmp->map; 336 stmp->map = *used_map; 337 *used_map = map; 338 } 339 340 stmp->u.mbuf = mbuf; 341 stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF; 342 343 /* Post the fragment list. */ 344 sfxge_tx_qlist_post(txq); 345 346 return 0; 347 348 reject_mapped: 349 bus_dmamap_unload(txq->packet_dma_tag, *used_map); 350 reject: 351 /* Drop the packet on the floor. */ 352 m_freem(mbuf); 353 ++txq->drops; 354 355 return rc; 356 } 357 358 #ifdef SFXGE_HAVE_MQ 359 360 /* 361 * Drain the deferred packet list into the transmit queue. 362 */ 363 static void 364 sfxge_tx_qdpl_drain(struct sfxge_txq *txq) 365 { 366 struct sfxge_softc *sc; 367 struct sfxge_tx_dpl *stdp; 368 struct mbuf *mbuf, *next; 369 unsigned int count; 370 unsigned int pushed; 371 int rc; 372 373 mtx_assert(&txq->lock, MA_OWNED); 374 375 sc = txq->sc; 376 stdp = &txq->dpl; 377 pushed = txq->added; 378 379 prefetch_read_many(sc->enp); 380 prefetch_read_many(txq->common); 381 382 mbuf = stdp->std_get; 383 count = stdp->std_count; 384 385 while (count != 0) { 386 KASSERT(mbuf != NULL, ("mbuf == NULL")); 387 388 next = mbuf->m_nextpkt; 389 mbuf->m_nextpkt = NULL; 390 391 ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */ 392 393 if (next != NULL) 394 prefetch_read_many(next); 395 396 rc = sfxge_tx_queue_mbuf(txq, mbuf); 397 --count; 398 mbuf = next; 399 if (rc != 0) 400 continue; 401 402 if (txq->blocked) 403 break; 404 405 /* Push the fragments to the hardware in batches. */ 406 if (txq->added - pushed >= SFXGE_TX_BATCH) { 407 efx_tx_qpush(txq->common, txq->added); 408 pushed = txq->added; 409 } 410 } 411 412 if (count == 0) { 413 KASSERT(mbuf == NULL, ("mbuf != NULL")); 414 stdp->std_get = NULL; 415 stdp->std_count = 0; 416 stdp->std_getp = &stdp->std_get; 417 } else { 418 stdp->std_get = mbuf; 419 stdp->std_count = count; 420 } 421 422 if (txq->added != pushed) 423 efx_tx_qpush(txq->common, txq->added); 424 425 KASSERT(txq->blocked || stdp->std_count == 0, 426 ("queue unblocked but count is non-zero")); 427 } 428 429 #define SFXGE_TX_QDPL_PENDING(_txq) \ 430 ((_txq)->dpl.std_put != 0) 431 432 /* 433 * Service the deferred packet list. 434 * 435 * NOTE: drops the txq mutex! 436 */ 437 static inline void 438 sfxge_tx_qdpl_service(struct sfxge_txq *txq) 439 { 440 mtx_assert(&txq->lock, MA_OWNED); 441 442 do { 443 if (SFXGE_TX_QDPL_PENDING(txq)) 444 sfxge_tx_qdpl_swizzle(txq); 445 446 if (!txq->blocked) 447 sfxge_tx_qdpl_drain(txq); 448 449 mtx_unlock(&txq->lock); 450 } while (SFXGE_TX_QDPL_PENDING(txq) && 451 mtx_trylock(&txq->lock)); 452 } 453 454 /* 455 * Put a packet on the deferred packet list. 456 * 457 * If we are called with the txq lock held, we put the packet on the "get 458 * list", otherwise we atomically push it on the "put list". The swizzle 459 * function takes care of ordering. 460 * 461 * The length of the put list is bounded by SFXGE_TX_MAX_DEFFERED. We 462 * overload the csum_data field in the mbuf to keep track of this length 463 * because there is no cheap alternative to avoid races. 464 */ 465 static inline int 466 sfxge_tx_qdpl_put(struct sfxge_txq *txq, struct mbuf *mbuf, int locked) 467 { 468 struct sfxge_tx_dpl *stdp; 469 470 stdp = &txq->dpl; 471 472 KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL")); 473 474 if (locked) { 475 mtx_assert(&txq->lock, MA_OWNED); 476 477 sfxge_tx_qdpl_swizzle(txq); 478 479 if (stdp->std_count >= SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT) 480 return (ENOBUFS); 481 482 *(stdp->std_getp) = mbuf; 483 stdp->std_getp = &mbuf->m_nextpkt; 484 stdp->std_count++; 485 } else { 486 volatile uintptr_t *putp; 487 uintptr_t old; 488 uintptr_t new; 489 unsigned old_len; 490 491 putp = &stdp->std_put; 492 new = (uintptr_t)mbuf; 493 494 do { 495 old = *putp; 496 if (old) { 497 struct mbuf *mp = (struct mbuf *)old; 498 old_len = mp->m_pkthdr.csum_data; 499 } else 500 old_len = 0; 501 if (old_len >= SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT) 502 return (ENOBUFS); 503 mbuf->m_pkthdr.csum_data = old_len + 1; 504 mbuf->m_nextpkt = (void *)old; 505 } while (atomic_cmpset_ptr(putp, old, new) == 0); 506 } 507 508 return (0); 509 } 510 511 /* 512 * Called from if_transmit - will try to grab the txq lock and enqueue to the 513 * put list if it succeeds, otherwise will push onto the defer list. 514 */ 515 int 516 sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m) 517 { 518 int locked; 519 int rc; 520 521 if (!SFXGE_LINK_UP(txq->sc)) { 522 rc = ENETDOWN; 523 goto fail; 524 } 525 526 /* 527 * Try to grab the txq lock. If we are able to get the lock, 528 * the packet will be appended to the "get list" of the deferred 529 * packet list. Otherwise, it will be pushed on the "put list". 530 */ 531 locked = mtx_trylock(&txq->lock); 532 533 if (sfxge_tx_qdpl_put(txq, m, locked) != 0) { 534 if (locked) 535 mtx_unlock(&txq->lock); 536 rc = ENOBUFS; 537 goto fail; 538 } 539 540 /* 541 * Try to grab the lock again. 542 * 543 * If we are able to get the lock, we need to process the deferred 544 * packet list. If we are not able to get the lock, another thread 545 * is processing the list. 546 */ 547 if (!locked) 548 locked = mtx_trylock(&txq->lock); 549 550 if (locked) { 551 /* Try to service the list. */ 552 sfxge_tx_qdpl_service(txq); 553 /* Lock has been dropped. */ 554 } 555 556 return (0); 557 558 fail: 559 m_freem(m); 560 atomic_add_long(&txq->early_drops, 1); 561 return (rc); 562 563 } 564 565 static void 566 sfxge_tx_qdpl_flush(struct sfxge_txq *txq) 567 { 568 struct sfxge_tx_dpl *stdp = &txq->dpl; 569 struct mbuf *mbuf, *next; 570 571 mtx_lock(&txq->lock); 572 573 sfxge_tx_qdpl_swizzle(txq); 574 for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) { 575 next = mbuf->m_nextpkt; 576 m_freem(mbuf); 577 } 578 stdp->std_get = NULL; 579 stdp->std_count = 0; 580 stdp->std_getp = &stdp->std_get; 581 582 mtx_unlock(&txq->lock); 583 } 584 585 void 586 sfxge_if_qflush(struct ifnet *ifp) 587 { 588 struct sfxge_softc *sc; 589 int i; 590 591 sc = ifp->if_softc; 592 593 for (i = 0; i < SFXGE_TX_SCALE(sc); i++) 594 sfxge_tx_qdpl_flush(sc->txq[i]); 595 } 596 597 /* 598 * TX start -- called by the stack. 599 */ 600 int 601 sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m) 602 { 603 struct sfxge_softc *sc; 604 struct sfxge_txq *txq; 605 int rc; 606 607 sc = (struct sfxge_softc *)ifp->if_softc; 608 609 KASSERT(ifp->if_flags & IFF_UP, ("interface not up")); 610 611 /* Pick the desired transmit queue. */ 612 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) { 613 int index = 0; 614 615 if (m->m_flags & M_FLOWID) { 616 uint32_t hash = m->m_pkthdr.flowid; 617 618 index = sc->rx_indir_table[hash % SFXGE_RX_SCALE_MAX]; 619 } 620 txq = sc->txq[SFXGE_TXQ_IP_TCP_UDP_CKSUM + index]; 621 } else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) { 622 txq = sc->txq[SFXGE_TXQ_IP_CKSUM]; 623 } else { 624 txq = sc->txq[SFXGE_TXQ_NON_CKSUM]; 625 } 626 627 rc = sfxge_tx_packet_add(txq, m); 628 629 return (rc); 630 } 631 632 #else /* !SFXGE_HAVE_MQ */ 633 634 static void sfxge_if_start_locked(struct ifnet *ifp) 635 { 636 struct sfxge_softc *sc = ifp->if_softc; 637 struct sfxge_txq *txq; 638 struct mbuf *mbuf; 639 unsigned int pushed[SFXGE_TXQ_NTYPES]; 640 unsigned int q_index; 641 642 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 643 IFF_DRV_RUNNING) 644 return; 645 646 if (!sc->port.link_up) 647 return; 648 649 for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) { 650 txq = sc->txq[q_index]; 651 pushed[q_index] = txq->added; 652 } 653 654 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 655 IFQ_DRV_DEQUEUE(&ifp->if_snd, mbuf); 656 if (mbuf == NULL) 657 break; 658 659 ETHER_BPF_MTAP(ifp, mbuf); /* packet capture */ 660 661 /* Pick the desired transmit queue. */ 662 if (mbuf->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) 663 q_index = SFXGE_TXQ_IP_TCP_UDP_CKSUM; 664 else if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP) 665 q_index = SFXGE_TXQ_IP_CKSUM; 666 else 667 q_index = SFXGE_TXQ_NON_CKSUM; 668 txq = sc->txq[q_index]; 669 670 if (sfxge_tx_queue_mbuf(txq, mbuf) != 0) 671 continue; 672 673 if (txq->blocked) { 674 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 675 break; 676 } 677 678 /* Push the fragments to the hardware in batches. */ 679 if (txq->added - pushed[q_index] >= SFXGE_TX_BATCH) { 680 efx_tx_qpush(txq->common, txq->added); 681 pushed[q_index] = txq->added; 682 } 683 } 684 685 for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) { 686 txq = sc->txq[q_index]; 687 if (txq->added != pushed[q_index]) 688 efx_tx_qpush(txq->common, txq->added); 689 } 690 } 691 692 void sfxge_if_start(struct ifnet *ifp) 693 { 694 struct sfxge_softc *sc = ifp->if_softc; 695 696 mtx_lock(&sc->tx_lock); 697 sfxge_if_start_locked(ifp); 698 mtx_unlock(&sc->tx_lock); 699 } 700 701 static inline void 702 sfxge_tx_qdpl_service(struct sfxge_txq *txq) 703 { 704 struct sfxge_softc *sc = txq->sc; 705 struct ifnet *ifp = sc->ifnet; 706 707 mtx_assert(&sc->tx_lock, MA_OWNED); 708 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 709 sfxge_if_start_locked(ifp); 710 mtx_unlock(&sc->tx_lock); 711 } 712 713 #endif /* SFXGE_HAVE_MQ */ 714 715 /* 716 * Software "TSO". Not quite as good as doing it in hardware, but 717 * still faster than segmenting in the stack. 718 */ 719 720 struct sfxge_tso_state { 721 /* Output position */ 722 unsigned out_len; /* Remaining length in current segment */ 723 unsigned seqnum; /* Current sequence number */ 724 unsigned packet_space; /* Remaining space in current packet */ 725 726 /* Input position */ 727 unsigned dma_seg_i; /* Current DMA segment number */ 728 uint64_t dma_addr; /* DMA address of current position */ 729 unsigned in_len; /* Remaining length in current mbuf */ 730 731 const struct mbuf *mbuf; /* Input mbuf (head of chain) */ 732 u_short protocol; /* Network protocol (after VLAN decap) */ 733 ssize_t nh_off; /* Offset of network header */ 734 ssize_t tcph_off; /* Offset of TCP header */ 735 unsigned header_len; /* Number of bytes of header */ 736 int full_packet_size; /* Number of bytes to put in each outgoing 737 * segment */ 738 }; 739 740 static inline const struct ip *tso_iph(const struct sfxge_tso_state *tso) 741 { 742 KASSERT(tso->protocol == htons(ETHERTYPE_IP), 743 ("tso_iph() in non-IPv4 state")); 744 return (const struct ip *)(tso->mbuf->m_data + tso->nh_off); 745 } 746 static inline const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso) 747 { 748 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), 749 ("tso_ip6h() in non-IPv6 state")); 750 return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off); 751 } 752 static inline const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso) 753 { 754 return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off); 755 } 756 757 /* Size of preallocated TSO header buffers. Larger blocks must be 758 * allocated from the heap. 759 */ 760 #define TSOH_STD_SIZE 128 761 762 /* At most half the descriptors in the queue at any time will refer to 763 * a TSO header buffer, since they must always be followed by a 764 * payload descriptor referring to an mbuf. 765 */ 766 #define TSOH_COUNT (SFXGE_NDESCS / 2u) 767 #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE) 768 #define TSOH_PAGE_COUNT ((TSOH_COUNT + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE) 769 770 static int tso_init(struct sfxge_txq *txq) 771 { 772 struct sfxge_softc *sc = txq->sc; 773 int i, rc; 774 775 /* Allocate TSO header buffers */ 776 txq->tsoh_buffer = malloc(TSOH_PAGE_COUNT * sizeof(txq->tsoh_buffer[0]), 777 M_SFXGE, M_WAITOK); 778 779 for (i = 0; i < TSOH_PAGE_COUNT; i++) { 780 rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]); 781 if (rc) 782 goto fail; 783 } 784 785 return 0; 786 787 fail: 788 while (i-- > 0) 789 sfxge_dma_free(&txq->tsoh_buffer[i]); 790 free(txq->tsoh_buffer, M_SFXGE); 791 txq->tsoh_buffer = NULL; 792 return rc; 793 } 794 795 static void tso_fini(struct sfxge_txq *txq) 796 { 797 int i; 798 799 if (txq->tsoh_buffer) { 800 for (i = 0; i < TSOH_PAGE_COUNT; i++) 801 sfxge_dma_free(&txq->tsoh_buffer[i]); 802 free(txq->tsoh_buffer, M_SFXGE); 803 } 804 } 805 806 static void tso_start(struct sfxge_tso_state *tso, struct mbuf *mbuf) 807 { 808 struct ether_header *eh = mtod(mbuf, struct ether_header *); 809 810 tso->mbuf = mbuf; 811 812 /* Find network protocol and header */ 813 tso->protocol = eh->ether_type; 814 if (tso->protocol == htons(ETHERTYPE_VLAN)) { 815 struct ether_vlan_header *veh = 816 mtod(mbuf, struct ether_vlan_header *); 817 tso->protocol = veh->evl_proto; 818 tso->nh_off = sizeof(*veh); 819 } else { 820 tso->nh_off = sizeof(*eh); 821 } 822 823 /* Find TCP header */ 824 if (tso->protocol == htons(ETHERTYPE_IP)) { 825 KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP, 826 ("TSO required on non-TCP packet")); 827 tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl; 828 } else { 829 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), 830 ("TSO required on non-IP packet")); 831 KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP, 832 ("TSO required on non-TCP packet")); 833 tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr); 834 } 835 836 /* We assume all headers are linear in the head mbuf */ 837 tso->header_len = tso->tcph_off + 4 * tso_tcph(tso)->th_off; 838 KASSERT(tso->header_len <= mbuf->m_len, ("packet headers fragmented")); 839 tso->full_packet_size = tso->header_len + mbuf->m_pkthdr.tso_segsz; 840 841 tso->seqnum = ntohl(tso_tcph(tso)->th_seq); 842 843 /* These flags must not be duplicated */ 844 KASSERT(!(tso_tcph(tso)->th_flags & (TH_URG | TH_SYN | TH_RST)), 845 ("incompatible TCP flag on TSO packet")); 846 847 tso->out_len = mbuf->m_pkthdr.len - tso->header_len; 848 } 849 850 /* 851 * tso_fill_packet_with_fragment - form descriptors for the current fragment 852 * 853 * Form descriptors for the current fragment, until we reach the end 854 * of fragment or end-of-packet. Return 0 on success, 1 if not enough 855 * space. 856 */ 857 static void tso_fill_packet_with_fragment(struct sfxge_txq *txq, 858 struct sfxge_tso_state *tso) 859 { 860 efx_buffer_t *desc; 861 int n; 862 863 if (tso->in_len == 0 || tso->packet_space == 0) 864 return; 865 866 KASSERT(tso->in_len > 0, ("TSO input length went negative")); 867 KASSERT(tso->packet_space > 0, ("TSO packet space went negative")); 868 869 n = min(tso->in_len, tso->packet_space); 870 871 tso->packet_space -= n; 872 tso->out_len -= n; 873 tso->in_len -= n; 874 875 desc = &txq->pend_desc[txq->n_pend_desc++]; 876 desc->eb_addr = tso->dma_addr; 877 desc->eb_size = n; 878 desc->eb_eop = tso->out_len == 0 || tso->packet_space == 0; 879 880 tso->dma_addr += n; 881 } 882 883 /* Callback from bus_dmamap_load() for long TSO headers. */ 884 static void tso_map_long_header(void *dma_addr_ret, 885 bus_dma_segment_t *segs, int nseg, 886 int error) 887 { 888 *(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) && 889 __predict_true(nseg == 1)) ? 890 segs->ds_addr : 0); 891 } 892 893 /* 894 * tso_start_new_packet - generate a new header and prepare for the new packet 895 * 896 * Generate a new header and prepare for the new packet. Return 0 on 897 * success, or an error code if failed to alloc header. 898 */ 899 static int tso_start_new_packet(struct sfxge_txq *txq, 900 struct sfxge_tso_state *tso, 901 unsigned int id) 902 { 903 struct sfxge_tx_mapping *stmp = &txq->stmp[id]; 904 struct tcphdr *tsoh_th; 905 unsigned ip_length; 906 caddr_t header; 907 uint64_t dma_addr; 908 bus_dmamap_t map; 909 efx_buffer_t *desc; 910 int rc; 911 912 /* Allocate a DMA-mapped header buffer. */ 913 if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) { 914 unsigned int page_index = (id / 2) / TSOH_PER_PAGE; 915 unsigned int buf_index = (id / 2) % TSOH_PER_PAGE; 916 917 header = (txq->tsoh_buffer[page_index].esm_base + 918 buf_index * TSOH_STD_SIZE); 919 dma_addr = (txq->tsoh_buffer[page_index].esm_addr + 920 buf_index * TSOH_STD_SIZE); 921 map = txq->tsoh_buffer[page_index].esm_map; 922 923 stmp->flags = 0; 924 } else { 925 /* We cannot use bus_dmamem_alloc() as that may sleep */ 926 header = malloc(tso->header_len, M_SFXGE, M_NOWAIT); 927 if (__predict_false(!header)) 928 return ENOMEM; 929 rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map, 930 header, tso->header_len, 931 tso_map_long_header, &dma_addr, 932 BUS_DMA_NOWAIT); 933 if (__predict_false(dma_addr == 0)) { 934 if (rc == 0) { 935 /* Succeeded but got >1 segment */ 936 bus_dmamap_unload(txq->packet_dma_tag, 937 stmp->map); 938 rc = EINVAL; 939 } 940 free(header, M_SFXGE); 941 return rc; 942 } 943 map = stmp->map; 944 945 txq->tso_long_headers++; 946 stmp->u.heap_buf = header; 947 stmp->flags = TX_BUF_UNMAP; 948 } 949 950 tsoh_th = (struct tcphdr *)(header + tso->tcph_off); 951 952 /* Copy and update the headers. */ 953 memcpy(header, tso->mbuf->m_data, tso->header_len); 954 955 tsoh_th->th_seq = htonl(tso->seqnum); 956 tso->seqnum += tso->mbuf->m_pkthdr.tso_segsz; 957 if (tso->out_len > tso->mbuf->m_pkthdr.tso_segsz) { 958 /* This packet will not finish the TSO burst. */ 959 ip_length = tso->full_packet_size - tso->nh_off; 960 tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH); 961 } else { 962 /* This packet will be the last in the TSO burst. */ 963 ip_length = tso->header_len - tso->nh_off + tso->out_len; 964 } 965 966 if (tso->protocol == htons(ETHERTYPE_IP)) { 967 struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off); 968 tsoh_iph->ip_len = htons(ip_length); 969 /* XXX We should increment ip_id, but FreeBSD doesn't 970 * currently allocate extra IDs for multiple segments. 971 */ 972 } else { 973 struct ip6_hdr *tsoh_iph = 974 (struct ip6_hdr *)(header + tso->nh_off); 975 tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph)); 976 } 977 978 /* Make the header visible to the hardware. */ 979 bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE); 980 981 tso->packet_space = tso->mbuf->m_pkthdr.tso_segsz; 982 txq->tso_packets++; 983 984 /* Form a descriptor for this header. */ 985 desc = &txq->pend_desc[txq->n_pend_desc++]; 986 desc->eb_addr = dma_addr; 987 desc->eb_size = tso->header_len; 988 desc->eb_eop = 0; 989 990 return 0; 991 } 992 993 static int 994 sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, 995 const bus_dma_segment_t *dma_seg, int n_dma_seg) 996 { 997 struct sfxge_tso_state tso; 998 unsigned int id, next_id; 999 1000 tso_start(&tso, mbuf); 1001 1002 /* Grab the first payload fragment. */ 1003 if (dma_seg->ds_len == tso.header_len) { 1004 --n_dma_seg; 1005 KASSERT(n_dma_seg, ("no payload found in TSO packet")); 1006 ++dma_seg; 1007 tso.in_len = dma_seg->ds_len; 1008 tso.dma_addr = dma_seg->ds_addr; 1009 } else { 1010 tso.in_len = dma_seg->ds_len - tso.header_len; 1011 tso.dma_addr = dma_seg->ds_addr + tso.header_len; 1012 } 1013 1014 id = txq->added & (SFXGE_NDESCS - 1); 1015 if (__predict_false(tso_start_new_packet(txq, &tso, id))) 1016 return -1; 1017 1018 while (1) { 1019 id = (id + 1) & (SFXGE_NDESCS - 1); 1020 tso_fill_packet_with_fragment(txq, &tso); 1021 1022 /* Move onto the next fragment? */ 1023 if (tso.in_len == 0) { 1024 --n_dma_seg; 1025 if (n_dma_seg == 0) 1026 break; 1027 ++dma_seg; 1028 tso.in_len = dma_seg->ds_len; 1029 tso.dma_addr = dma_seg->ds_addr; 1030 } 1031 1032 /* End of packet? */ 1033 if (tso.packet_space == 0) { 1034 /* If the queue is now full due to tiny MSS, 1035 * or we can't create another header, discard 1036 * the remainder of the input mbuf but do not 1037 * roll back the work we have done. 1038 */ 1039 if (txq->n_pend_desc > 1040 SFXGE_TSO_MAX_DESC - (1 + SFXGE_TX_MAPPING_MAX_SEG)) 1041 break; 1042 next_id = (id + 1) & (SFXGE_NDESCS - 1); 1043 if (__predict_false(tso_start_new_packet(txq, &tso, 1044 next_id))) 1045 break; 1046 id = next_id; 1047 } 1048 } 1049 1050 txq->tso_bursts++; 1051 return id; 1052 } 1053 1054 static void 1055 sfxge_tx_qunblock(struct sfxge_txq *txq) 1056 { 1057 struct sfxge_softc *sc; 1058 struct sfxge_evq *evq; 1059 1060 sc = txq->sc; 1061 evq = sc->evq[txq->evq_index]; 1062 1063 mtx_assert(&evq->lock, MA_OWNED); 1064 1065 if (txq->init_state != SFXGE_TXQ_STARTED) 1066 return; 1067 1068 mtx_lock(SFXGE_TXQ_LOCK(txq)); 1069 1070 if (txq->blocked) { 1071 unsigned int level; 1072 1073 level = txq->added - txq->completed; 1074 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL) 1075 txq->blocked = 0; 1076 } 1077 1078 sfxge_tx_qdpl_service(txq); 1079 /* note: lock has been dropped */ 1080 } 1081 1082 void 1083 sfxge_tx_qflush_done(struct sfxge_txq *txq) 1084 { 1085 1086 txq->flush_state = SFXGE_FLUSH_DONE; 1087 } 1088 1089 static void 1090 sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index) 1091 { 1092 struct sfxge_txq *txq; 1093 struct sfxge_evq *evq; 1094 unsigned int count; 1095 1096 txq = sc->txq[index]; 1097 evq = sc->evq[txq->evq_index]; 1098 1099 mtx_lock(SFXGE_TXQ_LOCK(txq)); 1100 1101 KASSERT(txq->init_state == SFXGE_TXQ_STARTED, 1102 ("txq->init_state != SFXGE_TXQ_STARTED")); 1103 1104 txq->init_state = SFXGE_TXQ_INITIALIZED; 1105 txq->flush_state = SFXGE_FLUSH_PENDING; 1106 1107 /* Flush the transmit queue. */ 1108 efx_tx_qflush(txq->common); 1109 1110 mtx_unlock(SFXGE_TXQ_LOCK(txq)); 1111 1112 count = 0; 1113 do { 1114 /* Spin for 100ms. */ 1115 DELAY(100000); 1116 1117 if (txq->flush_state != SFXGE_FLUSH_PENDING) 1118 break; 1119 } while (++count < 20); 1120 1121 mtx_lock(&evq->lock); 1122 mtx_lock(SFXGE_TXQ_LOCK(txq)); 1123 1124 KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED, 1125 ("txq->flush_state == SFXGE_FLUSH_FAILED")); 1126 1127 txq->flush_state = SFXGE_FLUSH_DONE; 1128 1129 txq->blocked = 0; 1130 txq->pending = txq->added; 1131 1132 sfxge_tx_qcomplete(txq); 1133 KASSERT(txq->completed == txq->added, 1134 ("txq->completed != txq->added")); 1135 1136 sfxge_tx_qreap(txq); 1137 KASSERT(txq->reaped == txq->completed, 1138 ("txq->reaped != txq->completed")); 1139 1140 txq->added = 0; 1141 txq->pending = 0; 1142 txq->completed = 0; 1143 txq->reaped = 0; 1144 1145 /* Destroy the common code transmit queue. */ 1146 efx_tx_qdestroy(txq->common); 1147 txq->common = NULL; 1148 1149 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, 1150 EFX_TXQ_NBUFS(SFXGE_NDESCS)); 1151 1152 mtx_unlock(&evq->lock); 1153 mtx_unlock(SFXGE_TXQ_LOCK(txq)); 1154 } 1155 1156 static int 1157 sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index) 1158 { 1159 struct sfxge_txq *txq; 1160 efsys_mem_t *esmp; 1161 uint16_t flags; 1162 struct sfxge_evq *evq; 1163 int rc; 1164 1165 txq = sc->txq[index]; 1166 esmp = &txq->mem; 1167 evq = sc->evq[txq->evq_index]; 1168 1169 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, 1170 ("txq->init_state != SFXGE_TXQ_INITIALIZED")); 1171 KASSERT(evq->init_state == SFXGE_EVQ_STARTED, 1172 ("evq->init_state != SFXGE_EVQ_STARTED")); 1173 1174 /* Program the buffer table. */ 1175 if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp, 1176 EFX_TXQ_NBUFS(SFXGE_NDESCS))) != 0) 1177 return rc; 1178 1179 /* Determine the kind of queue we are creating. */ 1180 switch (txq->type) { 1181 case SFXGE_TXQ_NON_CKSUM: 1182 flags = 0; 1183 break; 1184 case SFXGE_TXQ_IP_CKSUM: 1185 flags = EFX_CKSUM_IPV4; 1186 break; 1187 case SFXGE_TXQ_IP_TCP_UDP_CKSUM: 1188 flags = EFX_CKSUM_IPV4 | EFX_CKSUM_TCPUDP; 1189 break; 1190 default: 1191 KASSERT(0, ("Impossible TX queue")); 1192 flags = 0; 1193 break; 1194 } 1195 1196 /* Create the common code transmit queue. */ 1197 if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp, 1198 SFXGE_NDESCS, txq->buf_base_id, flags, evq->common, 1199 &txq->common)) != 0) 1200 goto fail; 1201 1202 mtx_lock(SFXGE_TXQ_LOCK(txq)); 1203 1204 /* Enable the transmit queue. */ 1205 efx_tx_qenable(txq->common); 1206 1207 txq->init_state = SFXGE_TXQ_STARTED; 1208 1209 mtx_unlock(SFXGE_TXQ_LOCK(txq)); 1210 1211 return (0); 1212 1213 fail: 1214 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, 1215 EFX_TXQ_NBUFS(SFXGE_NDESCS)); 1216 return rc; 1217 } 1218 1219 void 1220 sfxge_tx_stop(struct sfxge_softc *sc) 1221 { 1222 const efx_nic_cfg_t *encp; 1223 int index; 1224 1225 index = SFXGE_TX_SCALE(sc); 1226 while (--index >= 0) 1227 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); 1228 1229 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM); 1230 1231 encp = efx_nic_cfg_get(sc->enp); 1232 sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM); 1233 1234 /* Tear down the transmit module */ 1235 efx_tx_fini(sc->enp); 1236 } 1237 1238 int 1239 sfxge_tx_start(struct sfxge_softc *sc) 1240 { 1241 int index; 1242 int rc; 1243 1244 /* Initialize the common code transmit module. */ 1245 if ((rc = efx_tx_init(sc->enp)) != 0) 1246 return (rc); 1247 1248 if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_NON_CKSUM)) != 0) 1249 goto fail; 1250 1251 if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_IP_CKSUM)) != 0) 1252 goto fail2; 1253 1254 for (index = 0; index < SFXGE_TX_SCALE(sc); index++) { 1255 if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + 1256 index)) != 0) 1257 goto fail3; 1258 } 1259 1260 return (0); 1261 1262 fail3: 1263 while (--index >= 0) 1264 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); 1265 1266 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM); 1267 1268 fail2: 1269 sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM); 1270 1271 fail: 1272 efx_tx_fini(sc->enp); 1273 1274 return (rc); 1275 } 1276 1277 /** 1278 * Destroy a transmit queue. 1279 */ 1280 static void 1281 sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index) 1282 { 1283 struct sfxge_txq *txq; 1284 unsigned int nmaps = SFXGE_NDESCS; 1285 1286 txq = sc->txq[index]; 1287 1288 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, 1289 ("txq->init_state != SFXGE_TXQ_INITIALIZED")); 1290 1291 if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) 1292 tso_fini(txq); 1293 1294 /* Free the context arrays. */ 1295 free(txq->pend_desc, M_SFXGE); 1296 while (nmaps--) 1297 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); 1298 free(txq->stmp, M_SFXGE); 1299 1300 /* Release DMA memory mapping. */ 1301 sfxge_dma_free(&txq->mem); 1302 1303 sc->txq[index] = NULL; 1304 1305 #ifdef SFXGE_HAVE_MQ 1306 mtx_destroy(&txq->lock); 1307 #endif 1308 1309 free(txq, M_SFXGE); 1310 } 1311 1312 static int 1313 sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index, 1314 enum sfxge_txq_type type, unsigned int evq_index) 1315 { 1316 struct sfxge_txq *txq; 1317 struct sfxge_evq *evq; 1318 #ifdef SFXGE_HAVE_MQ 1319 struct sfxge_tx_dpl *stdp; 1320 #endif 1321 efsys_mem_t *esmp; 1322 unsigned int nmaps; 1323 int rc; 1324 1325 txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK); 1326 txq->sc = sc; 1327 1328 sc->txq[txq_index] = txq; 1329 esmp = &txq->mem; 1330 1331 evq = sc->evq[evq_index]; 1332 1333 /* Allocate and zero DMA space for the descriptor ring. */ 1334 if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(SFXGE_NDESCS), esmp)) != 0) 1335 return (rc); 1336 (void)memset(esmp->esm_base, 0, EFX_TXQ_SIZE(SFXGE_NDESCS)); 1337 1338 /* Allocate buffer table entries. */ 1339 sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(SFXGE_NDESCS), 1340 &txq->buf_base_id); 1341 1342 /* Create a DMA tag for packet mappings. */ 1343 if (bus_dma_tag_create(sc->parent_dma_tag, 1, 0x1000, 1344 MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL, 1345 NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 0x1000, 0, NULL, NULL, 1346 &txq->packet_dma_tag) != 0) { 1347 device_printf(sc->dev, "Couldn't allocate txq DMA tag\n"); 1348 rc = ENOMEM; 1349 goto fail; 1350 } 1351 1352 /* Allocate pending descriptor array for batching writes. */ 1353 txq->pend_desc = malloc(sizeof(efx_buffer_t) * SFXGE_NDESCS, 1354 M_SFXGE, M_ZERO | M_WAITOK); 1355 1356 /* Allocate and initialise mbuf DMA mapping array. */ 1357 txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * SFXGE_NDESCS, 1358 M_SFXGE, M_ZERO | M_WAITOK); 1359 for (nmaps = 0; nmaps < SFXGE_NDESCS; nmaps++) { 1360 rc = bus_dmamap_create(txq->packet_dma_tag, 0, 1361 &txq->stmp[nmaps].map); 1362 if (rc != 0) 1363 goto fail2; 1364 } 1365 1366 if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM && 1367 (rc = tso_init(txq)) != 0) 1368 goto fail3; 1369 1370 #ifdef SFXGE_HAVE_MQ 1371 /* Initialize the deferred packet list. */ 1372 stdp = &txq->dpl; 1373 stdp->std_getp = &stdp->std_get; 1374 1375 mtx_init(&txq->lock, "txq", NULL, MTX_DEF); 1376 #endif 1377 1378 txq->type = type; 1379 txq->evq_index = evq_index; 1380 txq->txq_index = txq_index; 1381 txq->init_state = SFXGE_TXQ_INITIALIZED; 1382 1383 return (0); 1384 1385 fail3: 1386 free(txq->pend_desc, M_SFXGE); 1387 fail2: 1388 while (nmaps--) 1389 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); 1390 free(txq->stmp, M_SFXGE); 1391 bus_dma_tag_destroy(txq->packet_dma_tag); 1392 1393 fail: 1394 sfxge_dma_free(esmp); 1395 1396 return (rc); 1397 } 1398 1399 static const struct { 1400 const char *name; 1401 size_t offset; 1402 } sfxge_tx_stats[] = { 1403 #define SFXGE_TX_STAT(name, member) \ 1404 { #name, offsetof(struct sfxge_txq, member) } 1405 SFXGE_TX_STAT(tso_bursts, tso_bursts), 1406 SFXGE_TX_STAT(tso_packets, tso_packets), 1407 SFXGE_TX_STAT(tso_long_headers, tso_long_headers), 1408 SFXGE_TX_STAT(tx_collapses, collapses), 1409 SFXGE_TX_STAT(tx_drops, drops), 1410 SFXGE_TX_STAT(tx_early_drops, early_drops), 1411 }; 1412 1413 static int 1414 sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS) 1415 { 1416 struct sfxge_softc *sc = arg1; 1417 unsigned int id = arg2; 1418 unsigned long sum; 1419 unsigned int index; 1420 1421 /* Sum across all TX queues */ 1422 sum = 0; 1423 for (index = 0; 1424 index < SFXGE_TXQ_IP_TCP_UDP_CKSUM + SFXGE_TX_SCALE(sc); 1425 index++) 1426 sum += *(unsigned long *)((caddr_t)sc->txq[index] + 1427 sfxge_tx_stats[id].offset); 1428 1429 return SYSCTL_OUT(req, &sum, sizeof(sum)); 1430 } 1431 1432 static void 1433 sfxge_tx_stat_init(struct sfxge_softc *sc) 1434 { 1435 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); 1436 struct sysctl_oid_list *stat_list; 1437 unsigned int id; 1438 1439 stat_list = SYSCTL_CHILDREN(sc->stats_node); 1440 1441 for (id = 0; 1442 id < sizeof(sfxge_tx_stats) / sizeof(sfxge_tx_stats[0]); 1443 id++) { 1444 SYSCTL_ADD_PROC( 1445 ctx, stat_list, 1446 OID_AUTO, sfxge_tx_stats[id].name, 1447 CTLTYPE_ULONG|CTLFLAG_RD, 1448 sc, id, sfxge_tx_stat_handler, "LU", 1449 ""); 1450 } 1451 } 1452 1453 void 1454 sfxge_tx_fini(struct sfxge_softc *sc) 1455 { 1456 int index; 1457 1458 index = SFXGE_TX_SCALE(sc); 1459 while (--index >= 0) 1460 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); 1461 1462 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM); 1463 sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM); 1464 } 1465 1466 1467 int 1468 sfxge_tx_init(struct sfxge_softc *sc) 1469 { 1470 struct sfxge_intr *intr; 1471 int index; 1472 int rc; 1473 1474 intr = &sc->intr; 1475 1476 KASSERT(intr->state == SFXGE_INTR_INITIALIZED, 1477 ("intr->state != SFXGE_INTR_INITIALIZED")); 1478 1479 /* Initialize the transmit queues */ 1480 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM, 1481 SFXGE_TXQ_NON_CKSUM, 0)) != 0) 1482 goto fail; 1483 1484 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM, 1485 SFXGE_TXQ_IP_CKSUM, 0)) != 0) 1486 goto fail2; 1487 1488 for (index = 0; index < SFXGE_TX_SCALE(sc); index++) { 1489 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index, 1490 SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0) 1491 goto fail3; 1492 } 1493 1494 sfxge_tx_stat_init(sc); 1495 1496 return (0); 1497 1498 fail3: 1499 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM); 1500 1501 while (--index >= 0) 1502 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); 1503 1504 fail2: 1505 sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM); 1506 1507 fail: 1508 return (rc); 1509 } 1510