1 /*- 2 * Copyright (c) 2010-2011 Solarflare Communications, Inc. 3 * All rights reserved. 4 * 5 * This software was developed in part by Philip Paeps under contract for 6 * Solarflare Communications, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* Theory of operation: 31 * 32 * Tx queues allocation and mapping 33 * 34 * One Tx queue with enabled checksum offload is allocated per Rx channel 35 * (event queue). Also 2 Tx queues (one without checksum offload and one 36 * with IP checksum offload only) are allocated and bound to event queue 0. 37 * sfxge_txq_type is used as Tx queue label. 38 * 39 * So, event queue plus label mapping to Tx queue index is: 40 * if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES) 41 * else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1 42 * See sfxge_get_txq_by_label() sfxge_ev.c 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/types.h> 49 #include <sys/mbuf.h> 50 #include <sys/smp.h> 51 #include <sys/socket.h> 52 #include <sys/sysctl.h> 53 #include <sys/syslog.h> 54 55 #include <net/bpf.h> 56 #include <net/ethernet.h> 57 #include <net/if.h> 58 #include <net/if_vlan_var.h> 59 60 #include <netinet/in.h> 61 #include <netinet/ip.h> 62 #include <netinet/ip6.h> 63 #include <netinet/tcp.h> 64 65 #include "common/efx.h" 66 67 #include "sfxge.h" 68 #include "sfxge_tx.h" 69 70 /* Set the block level to ensure there is space to generate a 71 * large number of descriptors for TSO. With minimum MSS and 72 * maximum mbuf length we might need more than a ring-ful of 73 * descriptors, but this should not happen in practice except 74 * due to deliberate attack. In that case we will truncate 75 * the output at a packet boundary. Allow for a reasonable 76 * minimum MSS of 512. 77 */ 78 #define SFXGE_TSO_MAX_DESC ((65535 / 512) * 2 + SFXGE_TX_MAPPING_MAX_SEG - 1) 79 #define SFXGE_TXQ_BLOCK_LEVEL(_entries) ((_entries) - SFXGE_TSO_MAX_DESC) 80 81 #ifdef SFXGE_HAVE_MQ 82 83 #define SFXGE_PARAM_TX_DPL_GET_MAX SFXGE_PARAM(tx_dpl_get_max) 84 static int sfxge_tx_dpl_get_max = SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT; 85 TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_MAX, &sfxge_tx_dpl_get_max); 86 SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_max, CTLFLAG_RDTUN, 87 &sfxge_tx_dpl_get_max, 0, 88 "Maximum number of packets in deferred packet get-list"); 89 90 #define SFXGE_PARAM_TX_DPL_PUT_MAX SFXGE_PARAM(tx_dpl_put_max) 91 static int sfxge_tx_dpl_put_max = SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT; 92 TUNABLE_INT(SFXGE_PARAM_TX_DPL_PUT_MAX, &sfxge_tx_dpl_put_max); 93 SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_put_max, CTLFLAG_RDTUN, 94 &sfxge_tx_dpl_put_max, 0, 95 "Maximum number of packets in deferred packet put-list"); 96 97 #endif 98 99 100 /* Forward declarations. */ 101 static inline void sfxge_tx_qdpl_service(struct sfxge_txq *txq); 102 static void sfxge_tx_qlist_post(struct sfxge_txq *txq); 103 static void sfxge_tx_qunblock(struct sfxge_txq *txq); 104 static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, 105 const bus_dma_segment_t *dma_seg, int n_dma_seg); 106 107 void 108 sfxge_tx_qcomplete(struct sfxge_txq *txq) 109 { 110 struct sfxge_softc *sc; 111 struct sfxge_evq *evq; 112 unsigned int completed; 113 114 sc = txq->sc; 115 evq = sc->evq[txq->evq_index]; 116 117 mtx_assert(&evq->lock, MA_OWNED); 118 119 completed = txq->completed; 120 while (completed != txq->pending) { 121 struct sfxge_tx_mapping *stmp; 122 unsigned int id; 123 124 id = completed++ & txq->ptr_mask; 125 126 stmp = &txq->stmp[id]; 127 if (stmp->flags & TX_BUF_UNMAP) { 128 bus_dmamap_unload(txq->packet_dma_tag, stmp->map); 129 if (stmp->flags & TX_BUF_MBUF) { 130 struct mbuf *m = stmp->u.mbuf; 131 do 132 m = m_free(m); 133 while (m != NULL); 134 } else { 135 free(stmp->u.heap_buf, M_SFXGE); 136 } 137 stmp->flags = 0; 138 } 139 } 140 txq->completed = completed; 141 142 /* Check whether we need to unblock the queue. */ 143 mb(); 144 if (txq->blocked) { 145 unsigned int level; 146 147 level = txq->added - txq->completed; 148 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) 149 sfxge_tx_qunblock(txq); 150 } 151 } 152 153 #ifdef SFXGE_HAVE_MQ 154 155 /* 156 * Reorder the put list and append it to the get list. 157 */ 158 static void 159 sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq) 160 { 161 struct sfxge_tx_dpl *stdp; 162 struct mbuf *mbuf, *get_next, **get_tailp; 163 volatile uintptr_t *putp; 164 uintptr_t put; 165 unsigned int count; 166 167 mtx_assert(&txq->lock, MA_OWNED); 168 169 stdp = &txq->dpl; 170 171 /* Acquire the put list. */ 172 putp = &stdp->std_put; 173 put = atomic_readandclear_ptr(putp); 174 mbuf = (void *)put; 175 176 if (mbuf == NULL) 177 return; 178 179 /* Reverse the put list. */ 180 get_tailp = &mbuf->m_nextpkt; 181 get_next = NULL; 182 183 count = 0; 184 do { 185 struct mbuf *put_next; 186 187 put_next = mbuf->m_nextpkt; 188 mbuf->m_nextpkt = get_next; 189 get_next = mbuf; 190 mbuf = put_next; 191 192 count++; 193 } while (mbuf != NULL); 194 195 /* Append the reversed put list to the get list. */ 196 KASSERT(*get_tailp == NULL, ("*get_tailp != NULL")); 197 *stdp->std_getp = get_next; 198 stdp->std_getp = get_tailp; 199 stdp->std_get_count += count; 200 } 201 202 #endif /* SFXGE_HAVE_MQ */ 203 204 static void 205 sfxge_tx_qreap(struct sfxge_txq *txq) 206 { 207 mtx_assert(SFXGE_TXQ_LOCK(txq), MA_OWNED); 208 209 txq->reaped = txq->completed; 210 } 211 212 static void 213 sfxge_tx_qlist_post(struct sfxge_txq *txq) 214 { 215 unsigned int old_added; 216 unsigned int level; 217 int rc; 218 219 mtx_assert(SFXGE_TXQ_LOCK(txq), MA_OWNED); 220 221 KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0")); 222 KASSERT(txq->n_pend_desc <= SFXGE_TSO_MAX_DESC, 223 ("txq->n_pend_desc too large")); 224 KASSERT(!txq->blocked, ("txq->blocked")); 225 226 old_added = txq->added; 227 228 /* Post the fragment list. */ 229 rc = efx_tx_qpost(txq->common, txq->pend_desc, txq->n_pend_desc, 230 txq->reaped, &txq->added); 231 KASSERT(rc == 0, ("efx_tx_qpost() failed")); 232 233 /* If efx_tx_qpost() had to refragment, our information about 234 * buffers to free may be associated with the wrong 235 * descriptors. 236 */ 237 KASSERT(txq->added - old_added == txq->n_pend_desc, 238 ("efx_tx_qpost() refragmented descriptors")); 239 240 level = txq->added - txq->reaped; 241 KASSERT(level <= txq->entries, ("overfilled TX queue")); 242 243 /* Clear the fragment list. */ 244 txq->n_pend_desc = 0; 245 246 /* Have we reached the block level? */ 247 if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) 248 return; 249 250 /* Reap, and check again */ 251 sfxge_tx_qreap(txq); 252 level = txq->added - txq->reaped; 253 if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) 254 return; 255 256 txq->blocked = 1; 257 258 /* 259 * Avoid a race with completion interrupt handling that could leave 260 * the queue blocked. 261 */ 262 mb(); 263 sfxge_tx_qreap(txq); 264 level = txq->added - txq->reaped; 265 if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) { 266 mb(); 267 txq->blocked = 0; 268 } 269 } 270 271 static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf) 272 { 273 bus_dmamap_t *used_map; 274 bus_dmamap_t map; 275 bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG]; 276 unsigned int id; 277 struct sfxge_tx_mapping *stmp; 278 efx_buffer_t *desc; 279 int n_dma_seg; 280 int rc; 281 int i; 282 283 KASSERT(!txq->blocked, ("txq->blocked")); 284 285 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) 286 prefetch_read_many(mbuf->m_data); 287 288 if (txq->init_state != SFXGE_TXQ_STARTED) { 289 rc = EINTR; 290 goto reject; 291 } 292 293 /* Load the packet for DMA. */ 294 id = txq->added & txq->ptr_mask; 295 stmp = &txq->stmp[id]; 296 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map, 297 mbuf, dma_seg, &n_dma_seg, 0); 298 if (rc == EFBIG) { 299 /* Try again. */ 300 struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT, 301 SFXGE_TX_MAPPING_MAX_SEG); 302 if (new_mbuf == NULL) 303 goto reject; 304 ++txq->collapses; 305 mbuf = new_mbuf; 306 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, 307 stmp->map, mbuf, 308 dma_seg, &n_dma_seg, 0); 309 } 310 if (rc != 0) 311 goto reject; 312 313 /* Make the packet visible to the hardware. */ 314 bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE); 315 316 used_map = &stmp->map; 317 318 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) { 319 rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg); 320 if (rc < 0) 321 goto reject_mapped; 322 stmp = &txq->stmp[rc]; 323 } else { 324 /* Add the mapping to the fragment list, and set flags 325 * for the buffer. 326 */ 327 i = 0; 328 for (;;) { 329 desc = &txq->pend_desc[i]; 330 desc->eb_addr = dma_seg[i].ds_addr; 331 desc->eb_size = dma_seg[i].ds_len; 332 if (i == n_dma_seg - 1) { 333 desc->eb_eop = 1; 334 break; 335 } 336 desc->eb_eop = 0; 337 i++; 338 339 stmp->flags = 0; 340 if (__predict_false(stmp == 341 &txq->stmp[txq->ptr_mask])) 342 stmp = &txq->stmp[0]; 343 else 344 stmp++; 345 } 346 txq->n_pend_desc = n_dma_seg; 347 } 348 349 /* 350 * If the mapping required more than one descriptor 351 * then we need to associate the DMA map with the last 352 * descriptor, not the first. 353 */ 354 if (used_map != &stmp->map) { 355 map = stmp->map; 356 stmp->map = *used_map; 357 *used_map = map; 358 } 359 360 stmp->u.mbuf = mbuf; 361 stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF; 362 363 /* Post the fragment list. */ 364 sfxge_tx_qlist_post(txq); 365 366 return (0); 367 368 reject_mapped: 369 bus_dmamap_unload(txq->packet_dma_tag, *used_map); 370 reject: 371 /* Drop the packet on the floor. */ 372 m_freem(mbuf); 373 ++txq->drops; 374 375 return (rc); 376 } 377 378 #ifdef SFXGE_HAVE_MQ 379 380 /* 381 * Drain the deferred packet list into the transmit queue. 382 */ 383 static void 384 sfxge_tx_qdpl_drain(struct sfxge_txq *txq) 385 { 386 struct sfxge_softc *sc; 387 struct sfxge_tx_dpl *stdp; 388 struct mbuf *mbuf, *next; 389 unsigned int count; 390 unsigned int pushed; 391 int rc; 392 393 mtx_assert(&txq->lock, MA_OWNED); 394 395 sc = txq->sc; 396 stdp = &txq->dpl; 397 pushed = txq->added; 398 399 prefetch_read_many(sc->enp); 400 prefetch_read_many(txq->common); 401 402 mbuf = stdp->std_get; 403 count = stdp->std_get_count; 404 405 while (count != 0) { 406 KASSERT(mbuf != NULL, ("mbuf == NULL")); 407 408 next = mbuf->m_nextpkt; 409 mbuf->m_nextpkt = NULL; 410 411 ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */ 412 413 if (next != NULL) 414 prefetch_read_many(next); 415 416 rc = sfxge_tx_queue_mbuf(txq, mbuf); 417 --count; 418 mbuf = next; 419 if (rc != 0) 420 continue; 421 422 if (txq->blocked) 423 break; 424 425 /* Push the fragments to the hardware in batches. */ 426 if (txq->added - pushed >= SFXGE_TX_BATCH) { 427 efx_tx_qpush(txq->common, txq->added); 428 pushed = txq->added; 429 } 430 } 431 432 if (count == 0) { 433 KASSERT(mbuf == NULL, ("mbuf != NULL")); 434 stdp->std_get = NULL; 435 stdp->std_get_count = 0; 436 stdp->std_getp = &stdp->std_get; 437 } else { 438 stdp->std_get = mbuf; 439 stdp->std_get_count = count; 440 } 441 442 if (txq->added != pushed) 443 efx_tx_qpush(txq->common, txq->added); 444 445 KASSERT(txq->blocked || stdp->std_get_count == 0, 446 ("queue unblocked but count is non-zero")); 447 } 448 449 #define SFXGE_TX_QDPL_PENDING(_txq) \ 450 ((_txq)->dpl.std_put != 0) 451 452 /* 453 * Service the deferred packet list. 454 * 455 * NOTE: drops the txq mutex! 456 */ 457 static inline void 458 sfxge_tx_qdpl_service(struct sfxge_txq *txq) 459 { 460 mtx_assert(&txq->lock, MA_OWNED); 461 462 do { 463 if (SFXGE_TX_QDPL_PENDING(txq)) 464 sfxge_tx_qdpl_swizzle(txq); 465 466 if (!txq->blocked) 467 sfxge_tx_qdpl_drain(txq); 468 469 mtx_unlock(&txq->lock); 470 } while (SFXGE_TX_QDPL_PENDING(txq) && 471 mtx_trylock(&txq->lock)); 472 } 473 474 /* 475 * Put a packet on the deferred packet list. 476 * 477 * If we are called with the txq lock held, we put the packet on the "get 478 * list", otherwise we atomically push it on the "put list". The swizzle 479 * function takes care of ordering. 480 * 481 * The length of the put list is bounded by SFXGE_TX_MAX_DEFFERED. We 482 * overload the csum_data field in the mbuf to keep track of this length 483 * because there is no cheap alternative to avoid races. 484 */ 485 static inline int 486 sfxge_tx_qdpl_put(struct sfxge_txq *txq, struct mbuf *mbuf, int locked) 487 { 488 struct sfxge_tx_dpl *stdp; 489 490 stdp = &txq->dpl; 491 492 KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL")); 493 494 if (locked) { 495 mtx_assert(&txq->lock, MA_OWNED); 496 497 sfxge_tx_qdpl_swizzle(txq); 498 499 if (stdp->std_get_count >= stdp->std_get_max) 500 return (ENOBUFS); 501 502 *(stdp->std_getp) = mbuf; 503 stdp->std_getp = &mbuf->m_nextpkt; 504 stdp->std_get_count++; 505 } else { 506 volatile uintptr_t *putp; 507 uintptr_t old; 508 uintptr_t new; 509 unsigned old_len; 510 511 putp = &stdp->std_put; 512 new = (uintptr_t)mbuf; 513 514 do { 515 old = *putp; 516 if (old != 0) { 517 struct mbuf *mp = (struct mbuf *)old; 518 old_len = mp->m_pkthdr.csum_data; 519 } else 520 old_len = 0; 521 if (old_len >= stdp->std_put_max) 522 return (ENOBUFS); 523 mbuf->m_pkthdr.csum_data = old_len + 1; 524 mbuf->m_nextpkt = (void *)old; 525 } while (atomic_cmpset_ptr(putp, old, new) == 0); 526 } 527 528 return (0); 529 } 530 531 /* 532 * Called from if_transmit - will try to grab the txq lock and enqueue to the 533 * put list if it succeeds, otherwise will push onto the defer list. 534 */ 535 int 536 sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m) 537 { 538 int locked; 539 int rc; 540 541 if (!SFXGE_LINK_UP(txq->sc)) { 542 rc = ENETDOWN; 543 goto fail; 544 } 545 546 /* 547 * Try to grab the txq lock. If we are able to get the lock, 548 * the packet will be appended to the "get list" of the deferred 549 * packet list. Otherwise, it will be pushed on the "put list". 550 */ 551 locked = mtx_trylock(&txq->lock); 552 553 if (sfxge_tx_qdpl_put(txq, m, locked) != 0) { 554 if (locked) 555 mtx_unlock(&txq->lock); 556 rc = ENOBUFS; 557 goto fail; 558 } 559 560 /* 561 * Try to grab the lock again. 562 * 563 * If we are able to get the lock, we need to process the deferred 564 * packet list. If we are not able to get the lock, another thread 565 * is processing the list. 566 */ 567 if (!locked) 568 locked = mtx_trylock(&txq->lock); 569 570 if (locked) { 571 /* Try to service the list. */ 572 sfxge_tx_qdpl_service(txq); 573 /* Lock has been dropped. */ 574 } 575 576 return (0); 577 578 fail: 579 m_freem(m); 580 atomic_add_long(&txq->early_drops, 1); 581 return (rc); 582 } 583 584 static void 585 sfxge_tx_qdpl_flush(struct sfxge_txq *txq) 586 { 587 struct sfxge_tx_dpl *stdp = &txq->dpl; 588 struct mbuf *mbuf, *next; 589 590 mtx_lock(&txq->lock); 591 592 sfxge_tx_qdpl_swizzle(txq); 593 for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) { 594 next = mbuf->m_nextpkt; 595 m_freem(mbuf); 596 } 597 stdp->std_get = NULL; 598 stdp->std_get_count = 0; 599 stdp->std_getp = &stdp->std_get; 600 601 mtx_unlock(&txq->lock); 602 } 603 604 void 605 sfxge_if_qflush(struct ifnet *ifp) 606 { 607 struct sfxge_softc *sc; 608 int i; 609 610 sc = ifp->if_softc; 611 612 for (i = 0; i < SFXGE_TX_SCALE(sc); i++) 613 sfxge_tx_qdpl_flush(sc->txq[i]); 614 } 615 616 /* 617 * TX start -- called by the stack. 618 */ 619 int 620 sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m) 621 { 622 struct sfxge_softc *sc; 623 struct sfxge_txq *txq; 624 int rc; 625 626 sc = (struct sfxge_softc *)ifp->if_softc; 627 628 KASSERT(ifp->if_flags & IFF_UP, ("interface not up")); 629 630 /* Pick the desired transmit queue. */ 631 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) { 632 int index = 0; 633 634 /* check if flowid is set */ 635 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 636 uint32_t hash = m->m_pkthdr.flowid; 637 638 index = sc->rx_indir_table[hash % SFXGE_RX_SCALE_MAX]; 639 } 640 txq = sc->txq[SFXGE_TXQ_IP_TCP_UDP_CKSUM + index]; 641 } else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) { 642 txq = sc->txq[SFXGE_TXQ_IP_CKSUM]; 643 } else { 644 txq = sc->txq[SFXGE_TXQ_NON_CKSUM]; 645 } 646 647 rc = sfxge_tx_packet_add(txq, m); 648 649 return (rc); 650 } 651 652 #else /* !SFXGE_HAVE_MQ */ 653 654 static void sfxge_if_start_locked(struct ifnet *ifp) 655 { 656 struct sfxge_softc *sc = ifp->if_softc; 657 struct sfxge_txq *txq; 658 struct mbuf *mbuf; 659 unsigned int pushed[SFXGE_TXQ_NTYPES]; 660 unsigned int q_index; 661 662 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 663 IFF_DRV_RUNNING) 664 return; 665 666 if (!sc->port.link_up) 667 return; 668 669 for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) { 670 txq = sc->txq[q_index]; 671 pushed[q_index] = txq->added; 672 } 673 674 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 675 IFQ_DRV_DEQUEUE(&ifp->if_snd, mbuf); 676 if (mbuf == NULL) 677 break; 678 679 ETHER_BPF_MTAP(ifp, mbuf); /* packet capture */ 680 681 /* Pick the desired transmit queue. */ 682 if (mbuf->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) 683 q_index = SFXGE_TXQ_IP_TCP_UDP_CKSUM; 684 else if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP) 685 q_index = SFXGE_TXQ_IP_CKSUM; 686 else 687 q_index = SFXGE_TXQ_NON_CKSUM; 688 txq = sc->txq[q_index]; 689 690 if (sfxge_tx_queue_mbuf(txq, mbuf) != 0) 691 continue; 692 693 if (txq->blocked) { 694 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 695 break; 696 } 697 698 /* Push the fragments to the hardware in batches. */ 699 if (txq->added - pushed[q_index] >= SFXGE_TX_BATCH) { 700 efx_tx_qpush(txq->common, txq->added); 701 pushed[q_index] = txq->added; 702 } 703 } 704 705 for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) { 706 txq = sc->txq[q_index]; 707 if (txq->added != pushed[q_index]) 708 efx_tx_qpush(txq->common, txq->added); 709 } 710 } 711 712 void sfxge_if_start(struct ifnet *ifp) 713 { 714 struct sfxge_softc *sc = ifp->if_softc; 715 716 mtx_lock(&sc->tx_lock); 717 sfxge_if_start_locked(ifp); 718 mtx_unlock(&sc->tx_lock); 719 } 720 721 static inline void 722 sfxge_tx_qdpl_service(struct sfxge_txq *txq) 723 { 724 struct sfxge_softc *sc = txq->sc; 725 struct ifnet *ifp = sc->ifnet; 726 727 mtx_assert(&sc->tx_lock, MA_OWNED); 728 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 729 sfxge_if_start_locked(ifp); 730 mtx_unlock(&sc->tx_lock); 731 } 732 733 #endif /* SFXGE_HAVE_MQ */ 734 735 /* 736 * Software "TSO". Not quite as good as doing it in hardware, but 737 * still faster than segmenting in the stack. 738 */ 739 740 struct sfxge_tso_state { 741 /* Output position */ 742 unsigned out_len; /* Remaining length in current segment */ 743 unsigned seqnum; /* Current sequence number */ 744 unsigned packet_space; /* Remaining space in current packet */ 745 746 /* Input position */ 747 unsigned dma_seg_i; /* Current DMA segment number */ 748 uint64_t dma_addr; /* DMA address of current position */ 749 unsigned in_len; /* Remaining length in current mbuf */ 750 751 const struct mbuf *mbuf; /* Input mbuf (head of chain) */ 752 u_short protocol; /* Network protocol (after VLAN decap) */ 753 ssize_t nh_off; /* Offset of network header */ 754 ssize_t tcph_off; /* Offset of TCP header */ 755 unsigned header_len; /* Number of bytes of header */ 756 int full_packet_size; /* Number of bytes to put in each outgoing 757 * segment */ 758 }; 759 760 static inline const struct ip *tso_iph(const struct sfxge_tso_state *tso) 761 { 762 KASSERT(tso->protocol == htons(ETHERTYPE_IP), 763 ("tso_iph() in non-IPv4 state")); 764 return (const struct ip *)(tso->mbuf->m_data + tso->nh_off); 765 } 766 static inline const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso) 767 { 768 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), 769 ("tso_ip6h() in non-IPv6 state")); 770 return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off); 771 } 772 static inline const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso) 773 { 774 return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off); 775 } 776 777 /* Size of preallocated TSO header buffers. Larger blocks must be 778 * allocated from the heap. 779 */ 780 #define TSOH_STD_SIZE 128 781 782 /* At most half the descriptors in the queue at any time will refer to 783 * a TSO header buffer, since they must always be followed by a 784 * payload descriptor referring to an mbuf. 785 */ 786 #define TSOH_COUNT(_txq_entries) ((_txq_entries) / 2u) 787 #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE) 788 #define TSOH_PAGE_COUNT(_txq_entries) \ 789 ((TSOH_COUNT(_txq_entries) + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE) 790 791 static int tso_init(struct sfxge_txq *txq) 792 { 793 struct sfxge_softc *sc = txq->sc; 794 unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries); 795 int i, rc; 796 797 /* Allocate TSO header buffers */ 798 txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]), 799 M_SFXGE, M_WAITOK); 800 801 for (i = 0; i < tsoh_page_count; i++) { 802 rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]); 803 if (rc != 0) 804 goto fail; 805 } 806 807 return (0); 808 809 fail: 810 while (i-- > 0) 811 sfxge_dma_free(&txq->tsoh_buffer[i]); 812 free(txq->tsoh_buffer, M_SFXGE); 813 txq->tsoh_buffer = NULL; 814 return (rc); 815 } 816 817 static void tso_fini(struct sfxge_txq *txq) 818 { 819 int i; 820 821 if (txq->tsoh_buffer != NULL) { 822 for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++) 823 sfxge_dma_free(&txq->tsoh_buffer[i]); 824 free(txq->tsoh_buffer, M_SFXGE); 825 } 826 } 827 828 static void tso_start(struct sfxge_tso_state *tso, struct mbuf *mbuf) 829 { 830 struct ether_header *eh = mtod(mbuf, struct ether_header *); 831 832 tso->mbuf = mbuf; 833 834 /* Find network protocol and header */ 835 tso->protocol = eh->ether_type; 836 if (tso->protocol == htons(ETHERTYPE_VLAN)) { 837 struct ether_vlan_header *veh = 838 mtod(mbuf, struct ether_vlan_header *); 839 tso->protocol = veh->evl_proto; 840 tso->nh_off = sizeof(*veh); 841 } else { 842 tso->nh_off = sizeof(*eh); 843 } 844 845 /* Find TCP header */ 846 if (tso->protocol == htons(ETHERTYPE_IP)) { 847 KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP, 848 ("TSO required on non-TCP packet")); 849 tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl; 850 } else { 851 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), 852 ("TSO required on non-IP packet")); 853 KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP, 854 ("TSO required on non-TCP packet")); 855 tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr); 856 } 857 858 /* We assume all headers are linear in the head mbuf */ 859 tso->header_len = tso->tcph_off + 4 * tso_tcph(tso)->th_off; 860 KASSERT(tso->header_len <= mbuf->m_len, ("packet headers fragmented")); 861 tso->full_packet_size = tso->header_len + mbuf->m_pkthdr.tso_segsz; 862 863 tso->seqnum = ntohl(tso_tcph(tso)->th_seq); 864 865 /* These flags must not be duplicated */ 866 KASSERT(!(tso_tcph(tso)->th_flags & (TH_URG | TH_SYN | TH_RST)), 867 ("incompatible TCP flag on TSO packet")); 868 869 tso->out_len = mbuf->m_pkthdr.len - tso->header_len; 870 } 871 872 /* 873 * tso_fill_packet_with_fragment - form descriptors for the current fragment 874 * 875 * Form descriptors for the current fragment, until we reach the end 876 * of fragment or end-of-packet. Return 0 on success, 1 if not enough 877 * space. 878 */ 879 static void tso_fill_packet_with_fragment(struct sfxge_txq *txq, 880 struct sfxge_tso_state *tso) 881 { 882 efx_buffer_t *desc; 883 int n; 884 885 if (tso->in_len == 0 || tso->packet_space == 0) 886 return; 887 888 KASSERT(tso->in_len > 0, ("TSO input length went negative")); 889 KASSERT(tso->packet_space > 0, ("TSO packet space went negative")); 890 891 n = min(tso->in_len, tso->packet_space); 892 893 tso->packet_space -= n; 894 tso->out_len -= n; 895 tso->in_len -= n; 896 897 desc = &txq->pend_desc[txq->n_pend_desc++]; 898 desc->eb_addr = tso->dma_addr; 899 desc->eb_size = n; 900 desc->eb_eop = tso->out_len == 0 || tso->packet_space == 0; 901 902 tso->dma_addr += n; 903 } 904 905 /* Callback from bus_dmamap_load() for long TSO headers. */ 906 static void tso_map_long_header(void *dma_addr_ret, 907 bus_dma_segment_t *segs, int nseg, 908 int error) 909 { 910 *(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) && 911 __predict_true(nseg == 1)) ? 912 segs->ds_addr : 0); 913 } 914 915 /* 916 * tso_start_new_packet - generate a new header and prepare for the new packet 917 * 918 * Generate a new header and prepare for the new packet. Return 0 on 919 * success, or an error code if failed to alloc header. 920 */ 921 static int tso_start_new_packet(struct sfxge_txq *txq, 922 struct sfxge_tso_state *tso, 923 unsigned int id) 924 { 925 struct sfxge_tx_mapping *stmp = &txq->stmp[id]; 926 struct tcphdr *tsoh_th; 927 unsigned ip_length; 928 caddr_t header; 929 uint64_t dma_addr; 930 bus_dmamap_t map; 931 efx_buffer_t *desc; 932 int rc; 933 934 /* Allocate a DMA-mapped header buffer. */ 935 if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) { 936 unsigned int page_index = (id / 2) / TSOH_PER_PAGE; 937 unsigned int buf_index = (id / 2) % TSOH_PER_PAGE; 938 939 header = (txq->tsoh_buffer[page_index].esm_base + 940 buf_index * TSOH_STD_SIZE); 941 dma_addr = (txq->tsoh_buffer[page_index].esm_addr + 942 buf_index * TSOH_STD_SIZE); 943 map = txq->tsoh_buffer[page_index].esm_map; 944 945 stmp->flags = 0; 946 } else { 947 /* We cannot use bus_dmamem_alloc() as that may sleep */ 948 header = malloc(tso->header_len, M_SFXGE, M_NOWAIT); 949 if (__predict_false(!header)) 950 return (ENOMEM); 951 rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map, 952 header, tso->header_len, 953 tso_map_long_header, &dma_addr, 954 BUS_DMA_NOWAIT); 955 if (__predict_false(dma_addr == 0)) { 956 if (rc == 0) { 957 /* Succeeded but got >1 segment */ 958 bus_dmamap_unload(txq->packet_dma_tag, 959 stmp->map); 960 rc = EINVAL; 961 } 962 free(header, M_SFXGE); 963 return (rc); 964 } 965 map = stmp->map; 966 967 txq->tso_long_headers++; 968 stmp->u.heap_buf = header; 969 stmp->flags = TX_BUF_UNMAP; 970 } 971 972 tsoh_th = (struct tcphdr *)(header + tso->tcph_off); 973 974 /* Copy and update the headers. */ 975 memcpy(header, tso->mbuf->m_data, tso->header_len); 976 977 tsoh_th->th_seq = htonl(tso->seqnum); 978 tso->seqnum += tso->mbuf->m_pkthdr.tso_segsz; 979 if (tso->out_len > tso->mbuf->m_pkthdr.tso_segsz) { 980 /* This packet will not finish the TSO burst. */ 981 ip_length = tso->full_packet_size - tso->nh_off; 982 tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH); 983 } else { 984 /* This packet will be the last in the TSO burst. */ 985 ip_length = tso->header_len - tso->nh_off + tso->out_len; 986 } 987 988 if (tso->protocol == htons(ETHERTYPE_IP)) { 989 struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off); 990 tsoh_iph->ip_len = htons(ip_length); 991 /* XXX We should increment ip_id, but FreeBSD doesn't 992 * currently allocate extra IDs for multiple segments. 993 */ 994 } else { 995 struct ip6_hdr *tsoh_iph = 996 (struct ip6_hdr *)(header + tso->nh_off); 997 tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph)); 998 } 999 1000 /* Make the header visible to the hardware. */ 1001 bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE); 1002 1003 tso->packet_space = tso->mbuf->m_pkthdr.tso_segsz; 1004 txq->tso_packets++; 1005 1006 /* Form a descriptor for this header. */ 1007 desc = &txq->pend_desc[txq->n_pend_desc++]; 1008 desc->eb_addr = dma_addr; 1009 desc->eb_size = tso->header_len; 1010 desc->eb_eop = 0; 1011 1012 return (0); 1013 } 1014 1015 static int 1016 sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, 1017 const bus_dma_segment_t *dma_seg, int n_dma_seg) 1018 { 1019 struct sfxge_tso_state tso; 1020 unsigned int id, next_id; 1021 1022 tso_start(&tso, mbuf); 1023 1024 /* Grab the first payload fragment. */ 1025 if (dma_seg->ds_len == tso.header_len) { 1026 --n_dma_seg; 1027 KASSERT(n_dma_seg, ("no payload found in TSO packet")); 1028 ++dma_seg; 1029 tso.in_len = dma_seg->ds_len; 1030 tso.dma_addr = dma_seg->ds_addr; 1031 } else { 1032 tso.in_len = dma_seg->ds_len - tso.header_len; 1033 tso.dma_addr = dma_seg->ds_addr + tso.header_len; 1034 } 1035 1036 id = txq->added & txq->ptr_mask; 1037 if (__predict_false(tso_start_new_packet(txq, &tso, id))) 1038 return (-1); 1039 1040 while (1) { 1041 id = (id + 1) & txq->ptr_mask; 1042 tso_fill_packet_with_fragment(txq, &tso); 1043 1044 /* Move onto the next fragment? */ 1045 if (tso.in_len == 0) { 1046 --n_dma_seg; 1047 if (n_dma_seg == 0) 1048 break; 1049 ++dma_seg; 1050 tso.in_len = dma_seg->ds_len; 1051 tso.dma_addr = dma_seg->ds_addr; 1052 } 1053 1054 /* End of packet? */ 1055 if (tso.packet_space == 0) { 1056 /* If the queue is now full due to tiny MSS, 1057 * or we can't create another header, discard 1058 * the remainder of the input mbuf but do not 1059 * roll back the work we have done. 1060 */ 1061 if (txq->n_pend_desc > 1062 SFXGE_TSO_MAX_DESC - (1 + SFXGE_TX_MAPPING_MAX_SEG)) 1063 break; 1064 next_id = (id + 1) & txq->ptr_mask; 1065 if (__predict_false(tso_start_new_packet(txq, &tso, 1066 next_id))) 1067 break; 1068 id = next_id; 1069 } 1070 } 1071 1072 txq->tso_bursts++; 1073 return (id); 1074 } 1075 1076 static void 1077 sfxge_tx_qunblock(struct sfxge_txq *txq) 1078 { 1079 struct sfxge_softc *sc; 1080 struct sfxge_evq *evq; 1081 1082 sc = txq->sc; 1083 evq = sc->evq[txq->evq_index]; 1084 1085 mtx_assert(&evq->lock, MA_OWNED); 1086 1087 if (txq->init_state != SFXGE_TXQ_STARTED) 1088 return; 1089 1090 mtx_lock(SFXGE_TXQ_LOCK(txq)); 1091 1092 if (txq->blocked) { 1093 unsigned int level; 1094 1095 level = txq->added - txq->completed; 1096 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) 1097 txq->blocked = 0; 1098 } 1099 1100 sfxge_tx_qdpl_service(txq); 1101 /* note: lock has been dropped */ 1102 } 1103 1104 void 1105 sfxge_tx_qflush_done(struct sfxge_txq *txq) 1106 { 1107 1108 txq->flush_state = SFXGE_FLUSH_DONE; 1109 } 1110 1111 static void 1112 sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index) 1113 { 1114 struct sfxge_txq *txq; 1115 struct sfxge_evq *evq; 1116 unsigned int count; 1117 1118 txq = sc->txq[index]; 1119 evq = sc->evq[txq->evq_index]; 1120 1121 mtx_lock(SFXGE_TXQ_LOCK(txq)); 1122 1123 KASSERT(txq->init_state == SFXGE_TXQ_STARTED, 1124 ("txq->init_state != SFXGE_TXQ_STARTED")); 1125 1126 txq->init_state = SFXGE_TXQ_INITIALIZED; 1127 txq->flush_state = SFXGE_FLUSH_PENDING; 1128 1129 /* Flush the transmit queue. */ 1130 efx_tx_qflush(txq->common); 1131 1132 mtx_unlock(SFXGE_TXQ_LOCK(txq)); 1133 1134 count = 0; 1135 do { 1136 /* Spin for 100ms. */ 1137 DELAY(100000); 1138 1139 if (txq->flush_state != SFXGE_FLUSH_PENDING) 1140 break; 1141 } while (++count < 20); 1142 1143 mtx_lock(&evq->lock); 1144 mtx_lock(SFXGE_TXQ_LOCK(txq)); 1145 1146 KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED, 1147 ("txq->flush_state == SFXGE_FLUSH_FAILED")); 1148 1149 txq->flush_state = SFXGE_FLUSH_DONE; 1150 1151 txq->blocked = 0; 1152 txq->pending = txq->added; 1153 1154 sfxge_tx_qcomplete(txq); 1155 KASSERT(txq->completed == txq->added, 1156 ("txq->completed != txq->added")); 1157 1158 sfxge_tx_qreap(txq); 1159 KASSERT(txq->reaped == txq->completed, 1160 ("txq->reaped != txq->completed")); 1161 1162 txq->added = 0; 1163 txq->pending = 0; 1164 txq->completed = 0; 1165 txq->reaped = 0; 1166 1167 /* Destroy the common code transmit queue. */ 1168 efx_tx_qdestroy(txq->common); 1169 txq->common = NULL; 1170 1171 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, 1172 EFX_TXQ_NBUFS(sc->txq_entries)); 1173 1174 mtx_unlock(&evq->lock); 1175 mtx_unlock(SFXGE_TXQ_LOCK(txq)); 1176 } 1177 1178 static int 1179 sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index) 1180 { 1181 struct sfxge_txq *txq; 1182 efsys_mem_t *esmp; 1183 uint16_t flags; 1184 struct sfxge_evq *evq; 1185 int rc; 1186 1187 txq = sc->txq[index]; 1188 esmp = &txq->mem; 1189 evq = sc->evq[txq->evq_index]; 1190 1191 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, 1192 ("txq->init_state != SFXGE_TXQ_INITIALIZED")); 1193 KASSERT(evq->init_state == SFXGE_EVQ_STARTED, 1194 ("evq->init_state != SFXGE_EVQ_STARTED")); 1195 1196 /* Program the buffer table. */ 1197 if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp, 1198 EFX_TXQ_NBUFS(sc->txq_entries))) != 0) 1199 return (rc); 1200 1201 /* Determine the kind of queue we are creating. */ 1202 switch (txq->type) { 1203 case SFXGE_TXQ_NON_CKSUM: 1204 flags = 0; 1205 break; 1206 case SFXGE_TXQ_IP_CKSUM: 1207 flags = EFX_CKSUM_IPV4; 1208 break; 1209 case SFXGE_TXQ_IP_TCP_UDP_CKSUM: 1210 flags = EFX_CKSUM_IPV4 | EFX_CKSUM_TCPUDP; 1211 break; 1212 default: 1213 KASSERT(0, ("Impossible TX queue")); 1214 flags = 0; 1215 break; 1216 } 1217 1218 /* Create the common code transmit queue. */ 1219 if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp, 1220 sc->txq_entries, txq->buf_base_id, flags, evq->common, 1221 &txq->common)) != 0) 1222 goto fail; 1223 1224 mtx_lock(SFXGE_TXQ_LOCK(txq)); 1225 1226 /* Enable the transmit queue. */ 1227 efx_tx_qenable(txq->common); 1228 1229 txq->init_state = SFXGE_TXQ_STARTED; 1230 1231 mtx_unlock(SFXGE_TXQ_LOCK(txq)); 1232 1233 return (0); 1234 1235 fail: 1236 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, 1237 EFX_TXQ_NBUFS(sc->txq_entries)); 1238 return (rc); 1239 } 1240 1241 void 1242 sfxge_tx_stop(struct sfxge_softc *sc) 1243 { 1244 const efx_nic_cfg_t *encp; 1245 int index; 1246 1247 index = SFXGE_TX_SCALE(sc); 1248 while (--index >= 0) 1249 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); 1250 1251 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM); 1252 1253 encp = efx_nic_cfg_get(sc->enp); 1254 sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM); 1255 1256 /* Tear down the transmit module */ 1257 efx_tx_fini(sc->enp); 1258 } 1259 1260 int 1261 sfxge_tx_start(struct sfxge_softc *sc) 1262 { 1263 int index; 1264 int rc; 1265 1266 /* Initialize the common code transmit module. */ 1267 if ((rc = efx_tx_init(sc->enp)) != 0) 1268 return (rc); 1269 1270 if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_NON_CKSUM)) != 0) 1271 goto fail; 1272 1273 if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_IP_CKSUM)) != 0) 1274 goto fail2; 1275 1276 for (index = 0; index < SFXGE_TX_SCALE(sc); index++) { 1277 if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + 1278 index)) != 0) 1279 goto fail3; 1280 } 1281 1282 return (0); 1283 1284 fail3: 1285 while (--index >= 0) 1286 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); 1287 1288 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM); 1289 1290 fail2: 1291 sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM); 1292 1293 fail: 1294 efx_tx_fini(sc->enp); 1295 1296 return (rc); 1297 } 1298 1299 /** 1300 * Destroy a transmit queue. 1301 */ 1302 static void 1303 sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index) 1304 { 1305 struct sfxge_txq *txq; 1306 unsigned int nmaps; 1307 1308 txq = sc->txq[index]; 1309 1310 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, 1311 ("txq->init_state != SFXGE_TXQ_INITIALIZED")); 1312 1313 if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) 1314 tso_fini(txq); 1315 1316 /* Free the context arrays. */ 1317 free(txq->pend_desc, M_SFXGE); 1318 nmaps = sc->txq_entries; 1319 while (nmaps-- != 0) 1320 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); 1321 free(txq->stmp, M_SFXGE); 1322 1323 /* Release DMA memory mapping. */ 1324 sfxge_dma_free(&txq->mem); 1325 1326 sc->txq[index] = NULL; 1327 1328 #ifdef SFXGE_HAVE_MQ 1329 mtx_destroy(&txq->lock); 1330 #endif 1331 1332 free(txq, M_SFXGE); 1333 } 1334 1335 static int 1336 sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index, 1337 enum sfxge_txq_type type, unsigned int evq_index) 1338 { 1339 char name[16]; 1340 struct sysctl_oid *txq_node; 1341 struct sfxge_txq *txq; 1342 struct sfxge_evq *evq; 1343 #ifdef SFXGE_HAVE_MQ 1344 struct sfxge_tx_dpl *stdp; 1345 #endif 1346 efsys_mem_t *esmp; 1347 unsigned int nmaps; 1348 int rc; 1349 1350 txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK); 1351 txq->sc = sc; 1352 txq->entries = sc->txq_entries; 1353 txq->ptr_mask = txq->entries - 1; 1354 1355 sc->txq[txq_index] = txq; 1356 esmp = &txq->mem; 1357 1358 evq = sc->evq[evq_index]; 1359 1360 /* Allocate and zero DMA space for the descriptor ring. */ 1361 if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0) 1362 return (rc); 1363 (void)memset(esmp->esm_base, 0, EFX_TXQ_SIZE(sc->txq_entries)); 1364 1365 /* Allocate buffer table entries. */ 1366 sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries), 1367 &txq->buf_base_id); 1368 1369 /* Create a DMA tag for packet mappings. */ 1370 if (bus_dma_tag_create(sc->parent_dma_tag, 1, 0x1000, 1371 MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL, 1372 NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 0x1000, 0, NULL, NULL, 1373 &txq->packet_dma_tag) != 0) { 1374 device_printf(sc->dev, "Couldn't allocate txq DMA tag\n"); 1375 rc = ENOMEM; 1376 goto fail; 1377 } 1378 1379 /* Allocate pending descriptor array for batching writes. */ 1380 txq->pend_desc = malloc(sizeof(efx_buffer_t) * sc->txq_entries, 1381 M_SFXGE, M_ZERO | M_WAITOK); 1382 1383 /* Allocate and initialise mbuf DMA mapping array. */ 1384 txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries, 1385 M_SFXGE, M_ZERO | M_WAITOK); 1386 for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) { 1387 rc = bus_dmamap_create(txq->packet_dma_tag, 0, 1388 &txq->stmp[nmaps].map); 1389 if (rc != 0) 1390 goto fail2; 1391 } 1392 1393 snprintf(name, sizeof(name), "%u", txq_index); 1394 txq_node = SYSCTL_ADD_NODE( 1395 device_get_sysctl_ctx(sc->dev), 1396 SYSCTL_CHILDREN(sc->txqs_node), 1397 OID_AUTO, name, CTLFLAG_RD, NULL, ""); 1398 if (txq_node == NULL) { 1399 rc = ENOMEM; 1400 goto fail_txq_node; 1401 } 1402 1403 if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM && 1404 (rc = tso_init(txq)) != 0) 1405 goto fail3; 1406 1407 #ifdef SFXGE_HAVE_MQ 1408 if (sfxge_tx_dpl_get_max <= 0) { 1409 log(LOG_ERR, "%s=%d must be greater than 0", 1410 SFXGE_PARAM_TX_DPL_GET_MAX, sfxge_tx_dpl_get_max); 1411 rc = EINVAL; 1412 goto fail_tx_dpl_get_max; 1413 } 1414 if (sfxge_tx_dpl_put_max < 0) { 1415 log(LOG_ERR, "%s=%d must be greater or equal to 0", 1416 SFXGE_PARAM_TX_DPL_PUT_MAX, sfxge_tx_dpl_put_max); 1417 rc = EINVAL; 1418 goto fail_tx_dpl_put_max; 1419 } 1420 1421 /* Initialize the deferred packet list. */ 1422 stdp = &txq->dpl; 1423 stdp->std_put_max = sfxge_tx_dpl_put_max; 1424 stdp->std_get_max = sfxge_tx_dpl_get_max; 1425 stdp->std_getp = &stdp->std_get; 1426 1427 mtx_init(&txq->lock, "txq", NULL, MTX_DEF); 1428 1429 SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev), 1430 SYSCTL_CHILDREN(txq_node), OID_AUTO, 1431 "dpl_get_count", CTLFLAG_RD | CTLFLAG_STATS, 1432 &stdp->std_get_count, 0, ""); 1433 #endif 1434 1435 txq->type = type; 1436 txq->evq_index = evq_index; 1437 txq->txq_index = txq_index; 1438 txq->init_state = SFXGE_TXQ_INITIALIZED; 1439 1440 return (0); 1441 1442 fail_tx_dpl_put_max: 1443 fail_tx_dpl_get_max: 1444 fail3: 1445 fail_txq_node: 1446 free(txq->pend_desc, M_SFXGE); 1447 fail2: 1448 while (nmaps-- != 0) 1449 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); 1450 free(txq->stmp, M_SFXGE); 1451 bus_dma_tag_destroy(txq->packet_dma_tag); 1452 1453 fail: 1454 sfxge_dma_free(esmp); 1455 1456 return (rc); 1457 } 1458 1459 static const struct { 1460 const char *name; 1461 size_t offset; 1462 } sfxge_tx_stats[] = { 1463 #define SFXGE_TX_STAT(name, member) \ 1464 { #name, offsetof(struct sfxge_txq, member) } 1465 SFXGE_TX_STAT(tso_bursts, tso_bursts), 1466 SFXGE_TX_STAT(tso_packets, tso_packets), 1467 SFXGE_TX_STAT(tso_long_headers, tso_long_headers), 1468 SFXGE_TX_STAT(tx_collapses, collapses), 1469 SFXGE_TX_STAT(tx_drops, drops), 1470 SFXGE_TX_STAT(tx_early_drops, early_drops), 1471 }; 1472 1473 static int 1474 sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS) 1475 { 1476 struct sfxge_softc *sc = arg1; 1477 unsigned int id = arg2; 1478 unsigned long sum; 1479 unsigned int index; 1480 1481 /* Sum across all TX queues */ 1482 sum = 0; 1483 for (index = 0; 1484 index < SFXGE_TXQ_IP_TCP_UDP_CKSUM + SFXGE_TX_SCALE(sc); 1485 index++) 1486 sum += *(unsigned long *)((caddr_t)sc->txq[index] + 1487 sfxge_tx_stats[id].offset); 1488 1489 return (SYSCTL_OUT(req, &sum, sizeof(sum))); 1490 } 1491 1492 static void 1493 sfxge_tx_stat_init(struct sfxge_softc *sc) 1494 { 1495 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); 1496 struct sysctl_oid_list *stat_list; 1497 unsigned int id; 1498 1499 stat_list = SYSCTL_CHILDREN(sc->stats_node); 1500 1501 for (id = 0; 1502 id < sizeof(sfxge_tx_stats) / sizeof(sfxge_tx_stats[0]); 1503 id++) { 1504 SYSCTL_ADD_PROC( 1505 ctx, stat_list, 1506 OID_AUTO, sfxge_tx_stats[id].name, 1507 CTLTYPE_ULONG|CTLFLAG_RD, 1508 sc, id, sfxge_tx_stat_handler, "LU", 1509 ""); 1510 } 1511 } 1512 1513 void 1514 sfxge_tx_fini(struct sfxge_softc *sc) 1515 { 1516 int index; 1517 1518 index = SFXGE_TX_SCALE(sc); 1519 while (--index >= 0) 1520 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); 1521 1522 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM); 1523 sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM); 1524 } 1525 1526 1527 int 1528 sfxge_tx_init(struct sfxge_softc *sc) 1529 { 1530 struct sfxge_intr *intr; 1531 int index; 1532 int rc; 1533 1534 intr = &sc->intr; 1535 1536 KASSERT(intr->state == SFXGE_INTR_INITIALIZED, 1537 ("intr->state != SFXGE_INTR_INITIALIZED")); 1538 1539 sc->txqs_node = SYSCTL_ADD_NODE( 1540 device_get_sysctl_ctx(sc->dev), 1541 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), 1542 OID_AUTO, "txq", CTLFLAG_RD, NULL, "Tx queues"); 1543 if (sc->txqs_node == NULL) { 1544 rc = ENOMEM; 1545 goto fail_txq_node; 1546 } 1547 1548 /* Initialize the transmit queues */ 1549 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM, 1550 SFXGE_TXQ_NON_CKSUM, 0)) != 0) 1551 goto fail; 1552 1553 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM, 1554 SFXGE_TXQ_IP_CKSUM, 0)) != 0) 1555 goto fail2; 1556 1557 for (index = 0; index < SFXGE_TX_SCALE(sc); index++) { 1558 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index, 1559 SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0) 1560 goto fail3; 1561 } 1562 1563 sfxge_tx_stat_init(sc); 1564 1565 return (0); 1566 1567 fail3: 1568 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM); 1569 1570 while (--index >= 0) 1571 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); 1572 1573 fail2: 1574 sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM); 1575 1576 fail: 1577 fail_txq_node: 1578 return (rc); 1579 } 1580