Lines Matching +full:kind +full:- +full:of +full:- +full:intr

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2010-2016 Solarflare Communications Inc.
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
16 * this list of conditions and the following disclaimer in the documentation
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * those of the authors and should not be interpreted as representing official
33 * policies, either expressed or implied, of the FreeBSD Project.
36 /* Theory of operation:
46 * if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES)
47 * else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1
59 * TxQ-index = EvQ-index
98 "Maximum number of any packets in deferred packet get-list");
107 "Maximum number of non-TCP packets in deferred packet get-list");
114 "Maximum number of any packets in deferred packet put-list");
121 "Bitmask of FW-assisted TSO allowed to use if supported by NIC firmware");
153 KASSERT((*pstmp)->flags == 0, ("stmp flags are not 0")); in sfxge_next_stmp()
155 &txq->stmp[txq->ptr_mask])) in sfxge_next_stmp()
156 *pstmp = &txq->stmp[0]; in sfxge_next_stmp()
168 if (mbuf->m_pkthdr.csum_flags & in sfxge_tx_maybe_toggle_cksum_offload()
173 * cause any trouble in case of IPv6 traffic anyway. in sfxge_tx_maybe_toggle_cksum_offload()
176 } else if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP) { in sfxge_tx_maybe_toggle_cksum_offload()
182 if (new_hw_cksum_flags == txq->hw_cksum_flags) in sfxge_tx_maybe_toggle_cksum_offload()
185 desc = &txq->pend_desc[txq->n_pend_desc]; in sfxge_tx_maybe_toggle_cksum_offload()
186 efx_tx_qdesc_checksum_create(txq->common, new_hw_cksum_flags, desc); in sfxge_tx_maybe_toggle_cksum_offload()
187 txq->hw_cksum_flags = new_hw_cksum_flags; in sfxge_tx_maybe_toggle_cksum_offload()
188 txq->n_pend_desc++; in sfxge_tx_maybe_toggle_cksum_offload()
199 uint16_t this_tag = ((mbuf->m_flags & M_VLANTAG) ? in sfxge_tx_maybe_insert_tag()
200 mbuf->m_pkthdr.ether_vtag : in sfxge_tx_maybe_insert_tag()
204 if (this_tag == txq->hw_vlan_tci) in sfxge_tx_maybe_insert_tag()
207 desc = &txq->pend_desc[txq->n_pend_desc]; in sfxge_tx_maybe_insert_tag()
208 efx_tx_qdesc_vlantci_create(txq->common, bswap16(this_tag), desc); in sfxge_tx_maybe_insert_tag()
209 txq->hw_vlan_tci = this_tag; in sfxge_tx_maybe_insert_tag()
210 txq->n_pend_desc++; in sfxge_tx_maybe_insert_tag()
224 completed = txq->completed; in sfxge_tx_qcomplete()
225 while (completed != txq->pending) { in sfxge_tx_qcomplete()
229 id = completed++ & txq->ptr_mask; in sfxge_tx_qcomplete()
231 stmp = &txq->stmp[id]; in sfxge_tx_qcomplete()
232 if (stmp->flags & TX_BUF_UNMAP) { in sfxge_tx_qcomplete()
233 bus_dmamap_unload(txq->packet_dma_tag, stmp->map); in sfxge_tx_qcomplete()
234 if (stmp->flags & TX_BUF_MBUF) { in sfxge_tx_qcomplete()
235 struct mbuf *m = stmp->u.mbuf; in sfxge_tx_qcomplete()
240 free(stmp->u.heap_buf, M_SFXGE); in sfxge_tx_qcomplete()
242 stmp->flags = 0; in sfxge_tx_qcomplete()
245 txq->completed = completed; in sfxge_tx_qcomplete()
249 if (txq->blocked) { in sfxge_tx_qcomplete()
252 level = txq->added - txq->completed; in sfxge_tx_qcomplete()
253 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) in sfxge_tx_qcomplete()
261 /* Absence of TCP checksum flags does not mean that it is non-TCP in sfxge_is_mbuf_non_tcp()
264 return (!(mbuf->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))); in sfxge_is_mbuf_non_tcp()
282 stdp = &txq->dpl; in sfxge_tx_qdpl_swizzle()
285 putp = &stdp->std_put; in sfxge_tx_qdpl_swizzle()
293 get_tailp = &mbuf->m_nextpkt; in sfxge_tx_qdpl_swizzle()
302 put_next = mbuf->m_nextpkt; in sfxge_tx_qdpl_swizzle()
303 mbuf->m_nextpkt = get_next; in sfxge_tx_qdpl_swizzle()
310 if (count > stdp->std_put_hiwat) in sfxge_tx_qdpl_swizzle()
311 stdp->std_put_hiwat = count; in sfxge_tx_qdpl_swizzle()
315 *stdp->std_getp = get_next; in sfxge_tx_qdpl_swizzle()
316 stdp->std_getp = get_tailp; in sfxge_tx_qdpl_swizzle()
317 stdp->std_get_count += count; in sfxge_tx_qdpl_swizzle()
318 stdp->std_get_non_tcp_count += non_tcp_count; in sfxge_tx_qdpl_swizzle()
326 txq->reaped = txq->completed; in sfxge_tx_qreap()
339 KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0")); in sfxge_tx_qlist_post()
340 KASSERT(txq->n_pend_desc <= txq->max_pkt_desc, in sfxge_tx_qlist_post()
341 ("txq->n_pend_desc too large")); in sfxge_tx_qlist_post()
342 KASSERT(!txq->blocked, ("txq->blocked")); in sfxge_tx_qlist_post()
344 old_added = txq->added; in sfxge_tx_qlist_post()
347 rc = efx_tx_qdesc_post(txq->common, txq->pend_desc, txq->n_pend_desc, in sfxge_tx_qlist_post()
348 txq->reaped, &txq->added); in sfxge_tx_qlist_post()
355 KASSERT(txq->added - old_added == txq->n_pend_desc, in sfxge_tx_qlist_post()
358 level = txq->added - txq->reaped; in sfxge_tx_qlist_post()
359 KASSERT(level <= txq->entries, ("overfilled TX queue")); in sfxge_tx_qlist_post()
362 txq->n_pend_desc = 0; in sfxge_tx_qlist_post()
366 * large number of descriptors for TSO. in sfxge_tx_qlist_post()
368 block_level = EFX_TXQ_LIMIT(txq->entries) - txq->max_pkt_desc; in sfxge_tx_qlist_post()
376 level = txq->added - txq->reaped; in sfxge_tx_qlist_post()
380 txq->blocked = 1; in sfxge_tx_qlist_post()
388 level = txq->added - txq->reaped; in sfxge_tx_qlist_post()
391 txq->blocked = 0; in sfxge_tx_qlist_post()
411 KASSERT(!txq->blocked, ("txq->blocked")); in sfxge_tx_queue_mbuf()
418 if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) && in sfxge_tx_queue_mbuf()
419 (txq->tso_fw_assisted == 0)) in sfxge_tx_queue_mbuf()
420 prefetch_read_many(mbuf->m_data); in sfxge_tx_queue_mbuf()
426 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) in sfxge_tx_queue_mbuf()
427 prefetch_read_many(mbuf->m_data); in sfxge_tx_queue_mbuf()
430 if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) { in sfxge_tx_queue_mbuf()
436 id = txq->added & txq->ptr_mask; in sfxge_tx_queue_mbuf()
437 stmp = &txq->stmp[id]; in sfxge_tx_queue_mbuf()
438 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map, in sfxge_tx_queue_mbuf()
446 ++txq->collapses; in sfxge_tx_queue_mbuf()
448 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, in sfxge_tx_queue_mbuf()
449 stmp->map, mbuf, in sfxge_tx_queue_mbuf()
456 bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE); in sfxge_tx_queue_mbuf()
458 used_map = &stmp->map; in sfxge_tx_queue_mbuf()
460 hw_cksum_flags_prev = txq->hw_cksum_flags; in sfxge_tx_queue_mbuf()
461 hw_vlan_tci_prev = txq->hw_vlan_tci; in sfxge_tx_queue_mbuf()
464 * The order of option descriptors, which are used to leverage VLAN tag in sfxge_tx_queue_mbuf()
471 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) { in sfxge_tx_queue_mbuf()
476 stmp = &txq->stmp[(rc - 1) & txq->ptr_mask]; in sfxge_tx_queue_mbuf()
484 desc = &txq->pend_desc[i + n_extra_descs]; in sfxge_tx_queue_mbuf()
485 eop = (i == n_dma_seg - 1); in sfxge_tx_queue_mbuf()
486 efx_tx_qdesc_dma_create(txq->common, in sfxge_tx_queue_mbuf()
496 txq->n_pend_desc = n_dma_seg + n_extra_descs; in sfxge_tx_queue_mbuf()
504 if (used_map != &stmp->map) { in sfxge_tx_queue_mbuf()
505 map = stmp->map; in sfxge_tx_queue_mbuf()
506 stmp->map = *used_map; in sfxge_tx_queue_mbuf()
510 stmp->u.mbuf = mbuf; in sfxge_tx_queue_mbuf()
511 stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF; in sfxge_tx_queue_mbuf()
519 txq->hw_vlan_tci = hw_vlan_tci_prev; in sfxge_tx_queue_mbuf()
520 txq->hw_cksum_flags = hw_cksum_flags_prev; in sfxge_tx_queue_mbuf()
521 bus_dmamap_unload(txq->packet_dma_tag, *used_map); in sfxge_tx_queue_mbuf()
525 ++txq->drops; in sfxge_tx_queue_mbuf()
546 sc = txq->sc; in sfxge_tx_qdpl_drain()
547 stdp = &txq->dpl; in sfxge_tx_qdpl_drain()
548 pushed = txq->added; in sfxge_tx_qdpl_drain()
550 if (__predict_true(txq->init_state == SFXGE_TXQ_STARTED)) { in sfxge_tx_qdpl_drain()
551 prefetch_read_many(sc->enp); in sfxge_tx_qdpl_drain()
552 prefetch_read_many(txq->common); in sfxge_tx_qdpl_drain()
555 mbuf = stdp->std_get; in sfxge_tx_qdpl_drain()
556 count = stdp->std_get_count; in sfxge_tx_qdpl_drain()
557 non_tcp_count = stdp->std_get_non_tcp_count; in sfxge_tx_qdpl_drain()
559 if (count > stdp->std_get_hiwat) in sfxge_tx_qdpl_drain()
560 stdp->std_get_hiwat = count; in sfxge_tx_qdpl_drain()
565 next = mbuf->m_nextpkt; in sfxge_tx_qdpl_drain()
566 mbuf->m_nextpkt = NULL; in sfxge_tx_qdpl_drain()
568 ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */ in sfxge_tx_qdpl_drain()
574 --count; in sfxge_tx_qdpl_drain()
575 non_tcp_count -= sfxge_is_mbuf_non_tcp(mbuf); in sfxge_tx_qdpl_drain()
580 if (txq->blocked) in sfxge_tx_qdpl_drain()
584 if (txq->added - pushed >= SFXGE_TX_BATCH) { in sfxge_tx_qdpl_drain()
585 efx_tx_qpush(txq->common, txq->added, pushed); in sfxge_tx_qdpl_drain()
586 pushed = txq->added; in sfxge_tx_qdpl_drain()
593 ("inconsistent TCP/non-TCP detection")); in sfxge_tx_qdpl_drain()
594 stdp->std_get = NULL; in sfxge_tx_qdpl_drain()
595 stdp->std_get_count = 0; in sfxge_tx_qdpl_drain()
596 stdp->std_get_non_tcp_count = 0; in sfxge_tx_qdpl_drain()
597 stdp->std_getp = &stdp->std_get; in sfxge_tx_qdpl_drain()
599 stdp->std_get = mbuf; in sfxge_tx_qdpl_drain()
600 stdp->std_get_count = count; in sfxge_tx_qdpl_drain()
601 stdp->std_get_non_tcp_count = non_tcp_count; in sfxge_tx_qdpl_drain()
604 if (txq->added != pushed) in sfxge_tx_qdpl_drain()
605 efx_tx_qpush(txq->common, txq->added, pushed); in sfxge_tx_qdpl_drain()
607 KASSERT(txq->blocked || stdp->std_get_count == 0, in sfxge_tx_qdpl_drain()
608 ("queue unblocked but count is non-zero")); in sfxge_tx_qdpl_drain()
611 #define SFXGE_TX_QDPL_PENDING(_txq) ((_txq)->dpl.std_put != 0)
627 if (!txq->blocked) in sfxge_tx_qdpl_service()
636 * Put a packet on the deferred packet get-list.
643 stdp = &txq->dpl; in sfxge_tx_qdpl_put_locked()
645 KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL")); in sfxge_tx_qdpl_put_locked()
649 if (stdp->std_get_count >= stdp->std_get_max) { in sfxge_tx_qdpl_put_locked()
650 txq->get_overflow++; in sfxge_tx_qdpl_put_locked()
654 if (stdp->std_get_non_tcp_count >= in sfxge_tx_qdpl_put_locked()
655 stdp->std_get_non_tcp_max) { in sfxge_tx_qdpl_put_locked()
656 txq->get_non_tcp_overflow++; in sfxge_tx_qdpl_put_locked()
659 stdp->std_get_non_tcp_count++; in sfxge_tx_qdpl_put_locked()
662 *(stdp->std_getp) = mbuf; in sfxge_tx_qdpl_put_locked()
663 stdp->std_getp = &mbuf->m_nextpkt; in sfxge_tx_qdpl_put_locked()
664 stdp->std_get_count++; in sfxge_tx_qdpl_put_locked()
670 * Put a packet on the deferred packet put-list.
672 * We overload the csum_data field in the mbuf to keep track of this length
684 KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL")); in sfxge_tx_qdpl_put_unlocked()
688 stdp = &txq->dpl; in sfxge_tx_qdpl_put_unlocked()
689 putp = &stdp->std_put; in sfxge_tx_qdpl_put_unlocked()
696 put_count = mp->m_pkthdr.csum_data; in sfxge_tx_qdpl_put_unlocked()
699 if (put_count >= stdp->std_put_max) { in sfxge_tx_qdpl_put_unlocked()
700 atomic_add_long(&txq->put_overflow, 1); in sfxge_tx_qdpl_put_unlocked()
703 mbuf->m_pkthdr.csum_data = put_count + 1; in sfxge_tx_qdpl_put_unlocked()
704 mbuf->m_nextpkt = (void *)old; in sfxge_tx_qdpl_put_unlocked()
711 * Called from if_transmit - will try to grab the txq lock and enqueue to the
719 if (!SFXGE_LINK_UP(txq->sc)) { in sfxge_tx_packet_add()
720 atomic_add_long(&txq->netdown_drops, 1); in sfxge_tx_packet_add()
726 * the packet will be appended to the "get list" of the deferred in sfxge_tx_packet_add()
730 /* First swizzle put-list to get-list to keep order */ in sfxge_tx_packet_add()
762 struct sfxge_tx_dpl *stdp = &txq->dpl; in sfxge_tx_qdpl_flush()
768 for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) { in sfxge_tx_qdpl_flush()
769 next = mbuf->m_nextpkt; in sfxge_tx_qdpl_flush()
772 stdp->std_get = NULL; in sfxge_tx_qdpl_flush()
773 stdp->std_get_count = 0; in sfxge_tx_qdpl_flush()
774 stdp->std_get_non_tcp_count = 0; in sfxge_tx_qdpl_flush()
775 stdp->std_getp = &stdp->std_get; in sfxge_tx_qdpl_flush()
788 for (i = 0; i < sc->txq_count; i++) in sfxge_if_qflush()
789 sfxge_tx_qdpl_flush(sc->txq[i]); in sfxge_if_qflush()
797 * The fields are 8-bit, but it's ok, no header may be longer than 255 bytes.
800 #define TSO_MBUF_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[0])
801 /* We abuse l5hlen here because PH_loc can hold only 64 bits of data */
802 #define TSO_MBUF_FLAGS(_mbuf) ((_mbuf)->m_pkthdr.l5hlen)
803 #define TSO_MBUF_PACKETID(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[1])
804 #define TSO_MBUF_SEQNUM(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.thirtytwo[1])
813 TSO_MBUF_PROTO(mbuf) = eh->ether_type; in sfxge_parse_tx_packet()
817 TSO_MBUF_PROTO(mbuf) = veh->evl_proto; in sfxge_parse_tx_packet()
818 mbuf->m_pkthdr.l2hlen = sizeof(*veh); in sfxge_parse_tx_packet()
820 mbuf->m_pkthdr.l2hlen = sizeof(*eh); in sfxge_parse_tx_packet()
825 const struct ip *iph = (const struct ip *)mtodo(mbuf, mbuf->m_pkthdr.l2hlen); in sfxge_parse_tx_packet()
827 KASSERT(iph->ip_p == IPPROTO_TCP, in sfxge_parse_tx_packet()
828 ("TSO required on non-TCP packet")); in sfxge_parse_tx_packet()
829 mbuf->m_pkthdr.l3hlen = mbuf->m_pkthdr.l2hlen + 4 * iph->ip_hl; in sfxge_parse_tx_packet()
830 TSO_MBUF_PACKETID(mbuf) = iph->ip_id; in sfxge_parse_tx_packet()
833 ("TSO required on non-IP packet")); in sfxge_parse_tx_packet()
834 KASSERT(((const struct ip6_hdr *)mtodo(mbuf, mbuf->m_pkthdr.l2hlen))->ip6_nxt == in sfxge_parse_tx_packet()
836 ("TSO required on non-TCP packet")); in sfxge_parse_tx_packet()
837 mbuf->m_pkthdr.l3hlen = mbuf->m_pkthdr.l2hlen + sizeof(struct ip6_hdr); in sfxge_parse_tx_packet()
841 KASSERT(mbuf->m_len >= mbuf->m_pkthdr.l3hlen, in sfxge_parse_tx_packet()
845 if (mbuf->m_len < mbuf->m_pkthdr.l3hlen + offsetof(struct tcphdr, th_win)) { in sfxge_parse_tx_packet()
846 m_copydata(mbuf, mbuf->m_pkthdr.l3hlen, sizeof(th_copy), in sfxge_parse_tx_packet()
850 th = (const struct tcphdr *)mtodo(mbuf, mbuf->m_pkthdr.l3hlen); in sfxge_parse_tx_packet()
853 mbuf->m_pkthdr.l4hlen = mbuf->m_pkthdr.l3hlen + 4 * th->th_off; in sfxge_parse_tx_packet()
854 TSO_MBUF_SEQNUM(mbuf) = ntohl(th->th_seq); in sfxge_parse_tx_packet()
870 * TX start -- called by the stack.
883 * point of view, but not yet up (in progress) from the driver in sfxge_if_transmit()
884 * point of view. I.e. link aggregation bring up. in sfxge_if_transmit()
886 * point of view, but already down from the kernel point of in sfxge_if_transmit()
889 KASSERT((if_getflags(ifp) & IFF_UP) || (sc->if_flags & IFF_UP), in sfxge_if_transmit()
893 if (sc->txq_dynamic_cksum_toggle_supported | in sfxge_if_transmit()
894 (m->m_pkthdr.csum_flags & in sfxge_if_transmit()
904 * TX and RX parts of the flow to the same CPU in sfxge_if_transmit()
907 index = bucket_id % (sc->txq_count - (SFXGE_TXQ_NTYPES - 1)); in sfxge_if_transmit()
911 uint32_t hash = m->m_pkthdr.flowid; in sfxge_if_transmit()
912 uint32_t idx = hash % nitems(sc->rx_indir_table); in sfxge_if_transmit()
914 index = sc->rx_indir_table[idx]; in sfxge_if_transmit()
918 if (m->m_pkthdr.csum_flags & CSUM_TSO) in sfxge_if_transmit()
921 index += (sc->txq_dynamic_cksum_toggle_supported == B_FALSE) ? in sfxge_if_transmit()
923 txq = sc->txq[index]; in sfxge_if_transmit()
924 } else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) { in sfxge_if_transmit()
925 txq = sc->txq[SFXGE_TXQ_IP_CKSUM]; in sfxge_if_transmit()
927 txq = sc->txq[SFXGE_TXQ_NON_CKSUM]; in sfxge_if_transmit()
947 unsigned segs_space; /* Remaining number of DMA segments
951 uint64_t dma_addr; /* DMA address of current position */
954 const struct mbuf *mbuf; /* Input mbuf (head of chain) */
956 ssize_t nh_off; /* Offset of network header */
957 ssize_t tcph_off; /* Offset of TCP header */
958 unsigned header_len; /* Number of bytes of header */
960 int fw_assisted; /* Use FW-assisted TSO */
964 * FW-assisted TSO */
970 KASSERT(tso->protocol == htons(ETHERTYPE_IP), in tso_iph()
971 ("tso_iph() in non-IPv4 state")); in tso_iph()
972 return (const struct ip *)(tso->mbuf->m_data + tso->nh_off); in tso_iph()
977 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), in tso_ip6h()
978 ("tso_ip6h() in non-IPv6 state")); in tso_ip6h()
979 return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off); in tso_ip6h()
984 return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off); in tso_tcph()
988 /* Size of preallocated TSO header buffers. Larger blocks must be
1004 struct sfxge_softc *sc = txq->sc; in tso_init()
1005 unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries); in tso_init()
1009 txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]), in tso_init()
1013 rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]); in tso_init()
1021 while (i-- > 0) in tso_init()
1022 sfxge_dma_free(&txq->tsoh_buffer[i]); in tso_init()
1023 free(txq->tsoh_buffer, M_SFXGE); in tso_init()
1024 txq->tsoh_buffer = NULL; in tso_init()
1032 if (txq->tsoh_buffer != NULL) { in tso_fini()
1033 for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++) in tso_fini()
1034 sfxge_dma_free(&txq->tsoh_buffer[i]); in tso_fini()
1035 free(txq->tsoh_buffer, M_SFXGE); in tso_fini()
1043 const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->sc->enp); in tso_start()
1050 tso->fw_assisted = txq->tso_fw_assisted; in tso_start()
1051 tso->mbuf = mbuf; in tso_start()
1055 tso->protocol = eh->ether_type; in tso_start()
1056 if (tso->protocol == htons(ETHERTYPE_VLAN)) { in tso_start()
1059 tso->protocol = veh->evl_proto; in tso_start()
1060 tso->nh_off = sizeof(*veh); in tso_start()
1062 tso->nh_off = sizeof(*eh); in tso_start()
1065 tso->protocol = TSO_MBUF_PROTO(mbuf); in tso_start()
1066 tso->nh_off = mbuf->m_pkthdr.l2hlen; in tso_start()
1067 tso->tcph_off = mbuf->m_pkthdr.l3hlen; in tso_start()
1068 tso->packet_id = ntohs(TSO_MBUF_PACKETID(mbuf)); in tso_start()
1073 if (tso->protocol == htons(ETHERTYPE_IP)) { in tso_start()
1074 KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP, in tso_start()
1075 ("TSO required on non-TCP packet")); in tso_start()
1076 tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl; in tso_start()
1077 tso->packet_id = ntohs(tso_iph(tso)->ip_id); in tso_start()
1079 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), in tso_start()
1080 ("TSO required on non-IP packet")); in tso_start()
1081 KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP, in tso_start()
1082 ("TSO required on non-TCP packet")); in tso_start()
1083 tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr); in tso_start()
1084 tso->packet_id = 0; in tso_start()
1088 if (tso->fw_assisted && in tso_start()
1089 __predict_false(tso->tcph_off > in tso_start()
1090 encp->enc_tx_tso_tcp_header_offset_limit)) { in tso_start()
1091 tso->fw_assisted = 0; in tso_start()
1095 KASSERT(mbuf->m_len >= tso->tcph_off, in tso_start()
1098 if (mbuf->m_len < tso->tcph_off + offsetof(struct tcphdr, th_win)) { in tso_start()
1099 m_copydata(tso->mbuf, tso->tcph_off, sizeof(th_copy), in tso_start()
1105 tso->header_len = tso->tcph_off + 4 * th->th_off; in tso_start()
1107 tso->header_len = mbuf->m_pkthdr.l4hlen; in tso_start()
1109 tso->seg_size = mbuf->m_pkthdr.tso_segsz; in tso_start()
1112 tso->seqnum = ntohl(th->th_seq); in tso_start()
1123 tso->tcp_flags = tcp_get_flags(th); in tso_start()
1125 tso->seqnum = TSO_MBUF_SEQNUM(mbuf); in tso_start()
1126 tso->tcp_flags = TSO_MBUF_FLAGS(mbuf); in tso_start()
1129 tso->out_len = mbuf->m_pkthdr.len - tso->header_len; in tso_start()
1131 if (tso->fw_assisted) { in tso_start()
1132 if (hdr_dma_seg->ds_len >= tso->header_len) in tso_start()
1133 efx_tx_qdesc_dma_create(txq->common, in tso_start()
1134 hdr_dma_seg->ds_addr, in tso_start()
1135 tso->header_len, in tso_start()
1137 &tso->header_desc); in tso_start()
1139 tso->fw_assisted = 0; in tso_start()
1144 * tso_fill_packet_with_fragment - form descriptors for the current fragment
1147 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
1155 uint64_t dma_addr = tso->dma_addr; in tso_fill_packet_with_fragment()
1158 if (tso->in_len == 0 || tso->packet_space == 0) in tso_fill_packet_with_fragment()
1161 KASSERT(tso->in_len > 0, ("TSO input length went negative")); in tso_fill_packet_with_fragment()
1162 KASSERT(tso->packet_space > 0, ("TSO packet space went negative")); in tso_fill_packet_with_fragment()
1164 if (tso->fw_assisted & SFXGE_FATSOV2) { in tso_fill_packet_with_fragment()
1165 n = tso->in_len; in tso_fill_packet_with_fragment()
1166 tso->out_len -= n; in tso_fill_packet_with_fragment()
1167 tso->seqnum += n; in tso_fill_packet_with_fragment()
1168 tso->in_len = 0; in tso_fill_packet_with_fragment()
1169 if (n < tso->packet_space) { in tso_fill_packet_with_fragment()
1170 tso->packet_space -= n; in tso_fill_packet_with_fragment()
1171 tso->segs_space--; in tso_fill_packet_with_fragment()
1173 tso->packet_space = tso->seg_size - in tso_fill_packet_with_fragment()
1174 (n - tso->packet_space) % tso->seg_size; in tso_fill_packet_with_fragment()
1175 tso->segs_space = in tso_fill_packet_with_fragment()
1176 EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1 - in tso_fill_packet_with_fragment()
1177 (tso->packet_space != tso->seg_size); in tso_fill_packet_with_fragment()
1180 n = min(tso->in_len, tso->packet_space); in tso_fill_packet_with_fragment()
1181 tso->packet_space -= n; in tso_fill_packet_with_fragment()
1182 tso->out_len -= n; in tso_fill_packet_with_fragment()
1183 tso->dma_addr += n; in tso_fill_packet_with_fragment()
1184 tso->in_len -= n; in tso_fill_packet_with_fragment()
1191 eop = (tso->out_len == 0) | (tso->packet_space == 0) | in tso_fill_packet_with_fragment()
1192 (tso->segs_space == 0); in tso_fill_packet_with_fragment()
1194 desc = &txq->pend_desc[txq->n_pend_desc++]; in tso_fill_packet_with_fragment()
1195 efx_tx_qdesc_dma_create(txq->common, dma_addr, n, eop, desc); in tso_fill_packet_with_fragment()
1205 segs->ds_addr : 0); in tso_map_long_header()
1209 * tso_start_new_packet - generate a new header and prepare for the new packet
1227 if (tso->fw_assisted) { in tso_start_new_packet()
1228 if (tso->fw_assisted & SFXGE_FATSOV2) { in tso_start_new_packet()
1230 desc = &txq->pend_desc[txq->n_pend_desc]; in tso_start_new_packet()
1231 efx_tx_qdesc_tso2_create(txq->common, in tso_start_new_packet()
1232 tso->packet_id, in tso_start_new_packet()
1234 tso->seqnum, in tso_start_new_packet()
1235 tso->seg_size, in tso_start_new_packet()
1239 txq->n_pend_desc += EFX_TX_FATSOV2_OPT_NDESCS; in tso_start_new_packet()
1240 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); in tso_start_new_packet()
1241 id = (id + EFX_TX_FATSOV2_OPT_NDESCS) & txq->ptr_mask; in tso_start_new_packet()
1243 tso->segs_space = in tso_start_new_packet()
1244 EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1; in tso_start_new_packet()
1246 uint8_t tcp_flags = tso->tcp_flags; in tso_start_new_packet()
1248 if (tso->out_len > tso->seg_size) in tso_start_new_packet()
1252 desc = &txq->pend_desc[txq->n_pend_desc++]; in tso_start_new_packet()
1253 efx_tx_qdesc_tso_create(txq->common, in tso_start_new_packet()
1254 tso->packet_id, in tso_start_new_packet()
1255 tso->seqnum, in tso_start_new_packet()
1258 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); in tso_start_new_packet()
1259 id = (id + 1) & txq->ptr_mask; in tso_start_new_packet()
1261 tso->seqnum += tso->seg_size; in tso_start_new_packet()
1262 tso->segs_space = UINT_MAX; in tso_start_new_packet()
1266 *desc = tso->header_desc; in tso_start_new_packet()
1267 txq->n_pend_desc++; in tso_start_new_packet()
1268 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); in tso_start_new_packet()
1269 id = (id + 1) & txq->ptr_mask; in tso_start_new_packet()
1271 /* Allocate a DMA-mapped header buffer. */ in tso_start_new_packet()
1272 if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) { in tso_start_new_packet()
1276 header = (txq->tsoh_buffer[page_index].esm_base + in tso_start_new_packet()
1278 dma_addr = (txq->tsoh_buffer[page_index].esm_addr + in tso_start_new_packet()
1280 map = txq->tsoh_buffer[page_index].esm_map; in tso_start_new_packet()
1282 KASSERT(txq->stmp[id].flags == 0, in tso_start_new_packet()
1285 struct sfxge_tx_mapping *stmp = &txq->stmp[id]; in tso_start_new_packet()
1288 header = malloc(tso->header_len, M_SFXGE, M_NOWAIT); in tso_start_new_packet()
1291 rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map, in tso_start_new_packet()
1292 header, tso->header_len, in tso_start_new_packet()
1298 bus_dmamap_unload(txq->packet_dma_tag, in tso_start_new_packet()
1299 stmp->map); in tso_start_new_packet()
1305 map = stmp->map; in tso_start_new_packet()
1307 txq->tso_long_headers++; in tso_start_new_packet()
1308 stmp->u.heap_buf = header; in tso_start_new_packet()
1309 stmp->flags = TX_BUF_UNMAP; in tso_start_new_packet()
1312 tsoh_th = (struct tcphdr *)(header + tso->tcph_off); in tso_start_new_packet()
1315 m_copydata(tso->mbuf, 0, tso->header_len, header); in tso_start_new_packet()
1317 tsoh_th->th_seq = htonl(tso->seqnum); in tso_start_new_packet()
1318 tso->seqnum += tso->seg_size; in tso_start_new_packet()
1319 if (tso->out_len > tso->seg_size) { in tso_start_new_packet()
1321 ip_length = tso->header_len - tso->nh_off + tso->seg_size; in tso_start_new_packet()
1325 ip_length = tso->header_len - tso->nh_off + tso->out_len; in tso_start_new_packet()
1328 if (tso->protocol == htons(ETHERTYPE_IP)) { in tso_start_new_packet()
1329 struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off); in tso_start_new_packet()
1330 tsoh_iph->ip_len = htons(ip_length); in tso_start_new_packet()
1336 (struct ip6_hdr *)(header + tso->nh_off); in tso_start_new_packet()
1337 tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph)); in tso_start_new_packet()
1341 bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE); in tso_start_new_packet()
1344 desc = &txq->pend_desc[txq->n_pend_desc++]; in tso_start_new_packet()
1345 efx_tx_qdesc_dma_create(txq->common, in tso_start_new_packet()
1347 tso->header_len, in tso_start_new_packet()
1350 id = (id + 1) & txq->ptr_mask; in tso_start_new_packet()
1352 tso->segs_space = UINT_MAX; in tso_start_new_packet()
1354 tso->packet_space = tso->seg_size; in tso_start_new_packet()
1355 txq->tso_packets++; in tso_start_new_packet()
1372 while (dma_seg->ds_len + skipped <= tso.header_len) { in sfxge_tx_queue_tso()
1373 skipped += dma_seg->ds_len; in sfxge_tx_queue_tso()
1374 --n_dma_seg; in sfxge_tx_queue_tso()
1378 tso.in_len = dma_seg->ds_len - (tso.header_len - skipped); in sfxge_tx_queue_tso()
1379 tso.dma_addr = dma_seg->ds_addr + (tso.header_len - skipped); in sfxge_tx_queue_tso()
1381 id = (txq->added + n_extra_descs) & txq->ptr_mask; in sfxge_tx_queue_tso()
1383 return (-1); in sfxge_tx_queue_tso()
1388 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); in sfxge_tx_queue_tso()
1389 id = (id + 1) & txq->ptr_mask; in sfxge_tx_queue_tso()
1393 --n_dma_seg; in sfxge_tx_queue_tso()
1397 tso.in_len = dma_seg->ds_len; in sfxge_tx_queue_tso()
1398 tso.dma_addr = dma_seg->ds_addr; in sfxge_tx_queue_tso()
1401 /* End of packet? */ in sfxge_tx_queue_tso()
1410 * the remainder of the input mbuf but do not in sfxge_tx_queue_tso()
1413 if (txq->n_pend_desc + n_fatso_opt_desc + in sfxge_tx_queue_tso()
1414 1 /* header */ + n_dma_seg > txq->max_pkt_desc) { in sfxge_tx_queue_tso()
1415 txq->tso_pdrop_too_many++; in sfxge_tx_queue_tso()
1420 txq->tso_pdrop_no_rsrc++; in sfxge_tx_queue_tso()
1426 txq->tso_bursts++; in sfxge_tx_queue_tso()
1436 sc = txq->sc; in sfxge_tx_qunblock()
1437 evq = sc->evq[txq->evq_index]; in sfxge_tx_qunblock()
1441 if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) in sfxge_tx_qunblock()
1446 if (txq->blocked) { in sfxge_tx_qunblock()
1449 level = txq->added - txq->completed; in sfxge_tx_qunblock()
1450 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) { in sfxge_tx_qunblock()
1453 txq->blocked = 0; in sfxge_tx_qunblock()
1465 txq->flush_state = SFXGE_FLUSH_DONE; in sfxge_tx_qflush_done()
1477 txq = sc->txq[index]; in sfxge_tx_qstop()
1478 evq = sc->evq[txq->evq_index]; in sfxge_tx_qstop()
1483 KASSERT(txq->init_state == SFXGE_TXQ_STARTED, in sfxge_tx_qstop()
1484 ("txq->init_state != SFXGE_TXQ_STARTED")); in sfxge_tx_qstop()
1486 txq->init_state = SFXGE_TXQ_INITIALIZED; in sfxge_tx_qstop()
1488 if (txq->flush_state != SFXGE_FLUSH_DONE) { in sfxge_tx_qstop()
1489 txq->flush_state = SFXGE_FLUSH_PENDING; in sfxge_tx_qstop()
1495 if (efx_tx_qflush(txq->common) != 0) { in sfxge_tx_qstop()
1497 device_get_nameunit(sc->dev), index); in sfxge_tx_qstop()
1498 txq->flush_state = SFXGE_FLUSH_DONE; in sfxge_tx_qstop()
1504 if (txq->flush_state != SFXGE_FLUSH_PENDING) in sfxge_tx_qstop()
1511 KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED, in sfxge_tx_qstop()
1512 ("txq->flush_state == SFXGE_FLUSH_FAILED")); in sfxge_tx_qstop()
1514 if (txq->flush_state != SFXGE_FLUSH_DONE) { in sfxge_tx_qstop()
1517 device_get_nameunit(sc->dev), index); in sfxge_tx_qstop()
1518 txq->flush_state = SFXGE_FLUSH_DONE; in sfxge_tx_qstop()
1522 txq->blocked = 0; in sfxge_tx_qstop()
1523 txq->pending = txq->added; in sfxge_tx_qstop()
1526 KASSERT(txq->completed == txq->added, in sfxge_tx_qstop()
1527 ("txq->completed != txq->added")); in sfxge_tx_qstop()
1530 KASSERT(txq->reaped == txq->completed, in sfxge_tx_qstop()
1531 ("txq->reaped != txq->completed")); in sfxge_tx_qstop()
1533 txq->added = 0; in sfxge_tx_qstop()
1534 txq->pending = 0; in sfxge_tx_qstop()
1535 txq->completed = 0; in sfxge_tx_qstop()
1536 txq->reaped = 0; in sfxge_tx_qstop()
1539 efx_tx_qdestroy(txq->common); in sfxge_tx_qstop()
1540 txq->common = NULL; in sfxge_tx_qstop()
1542 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, in sfxge_tx_qstop()
1543 EFX_TXQ_NBUFS(sc->txq_entries)); in sfxge_tx_qstop()
1545 txq->hw_cksum_flags = 0; in sfxge_tx_qstop()
1552 * Estimate maximum number of Tx descriptors required for TSO packet.
1554 * than a ring-ful of descriptors), but this should not happen in
1569 if (sc->txq_dynamic_cksum_toggle_supported) in sfxge_tx_max_pkt_desc()
1573 if (efx_nic_cfg_get(sc->enp)->enc_hw_tx_insert_vlan_enabled) in sfxge_tx_max_pkt_desc()
1583 sw_tso_max_descs = SFXGE_TSO_MAX_SEGS * 2 - 1; in sfxge_tx_max_pkt_desc()
1592 * descriptors per superframe limited by number of DMA fetches in sfxge_tx_max_pkt_desc()
1598 EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1) * in sfxge_tx_max_pkt_desc()
1599 (EFX_TX_FATSOV2_OPT_NDESCS + 1) - 1; in sfxge_tx_max_pkt_desc()
1623 txq = sc->txq[index]; in sfxge_tx_qstart()
1624 esmp = &txq->mem; in sfxge_tx_qstart()
1625 evq = sc->evq[txq->evq_index]; in sfxge_tx_qstart()
1627 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, in sfxge_tx_qstart()
1628 ("txq->init_state != SFXGE_TXQ_INITIALIZED")); in sfxge_tx_qstart()
1629 KASSERT(evq->init_state == SFXGE_EVQ_STARTED, in sfxge_tx_qstart()
1630 ("evq->init_state != SFXGE_EVQ_STARTED")); in sfxge_tx_qstart()
1633 if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp, in sfxge_tx_qstart()
1634 EFX_TXQ_NBUFS(sc->txq_entries))) != 0) in sfxge_tx_qstart()
1637 /* Determine the kind of queue we are creating. */ in sfxge_tx_qstart()
1639 switch (txq->type) { in sfxge_tx_qstart()
1648 tso_fw_assisted = sc->tso_fw_assisted; in sfxge_tx_qstart()
1658 label = (sc->txq_dynamic_cksum_toggle_supported) ? 0 : txq->type; in sfxge_tx_qstart()
1661 if ((rc = efx_tx_qcreate(sc->enp, index, label, esmp, in sfxge_tx_qstart()
1662 sc->txq_entries, txq->buf_base_id, flags, evq->common, in sfxge_tx_qstart()
1663 &txq->common, &desc_index)) != 0) { in sfxge_tx_qstart()
1671 if ((rc = efx_tx_qcreate(sc->enp, index, label, esmp, in sfxge_tx_qstart()
1672 sc->txq_entries, txq->buf_base_id, flags, evq->common, in sfxge_tx_qstart()
1673 &txq->common, &desc_index)) != 0) in sfxge_tx_qstart()
1678 txq->added = txq->pending = txq->completed = txq->reaped = desc_index; in sfxge_tx_qstart()
1683 efx_tx_qenable(txq->common); in sfxge_tx_qstart()
1685 txq->init_state = SFXGE_TXQ_STARTED; in sfxge_tx_qstart()
1686 txq->flush_state = SFXGE_FLUSH_REQUIRED; in sfxge_tx_qstart()
1687 txq->tso_fw_assisted = tso_fw_assisted; in sfxge_tx_qstart()
1689 txq->max_pkt_desc = sfxge_tx_max_pkt_desc(sc, txq->type, in sfxge_tx_qstart()
1692 txq->hw_vlan_tci = 0; in sfxge_tx_qstart()
1694 txq->hw_cksum_flags = flags & in sfxge_tx_qstart()
1702 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, in sfxge_tx_qstart()
1703 EFX_TXQ_NBUFS(sc->txq_entries)); in sfxge_tx_qstart()
1712 index = sc->txq_count; in sfxge_tx_stop()
1713 while (--index >= 0) in sfxge_tx_stop()
1717 efx_tx_fini(sc->enp); in sfxge_tx_stop()
1727 if ((rc = efx_tx_init(sc->enp)) != 0) in sfxge_tx_start()
1730 for (index = 0; index < sc->txq_count; index++) { in sfxge_tx_start()
1738 while (--index >= 0) in sfxge_tx_start()
1741 efx_tx_fini(sc->enp); in sfxge_tx_start()
1749 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(txq->sc->dev); in sfxge_txq_stat_init()
1778 txq = sc->txq[index]; in sfxge_tx_qfini()
1780 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, in sfxge_tx_qfini()
1781 ("txq->init_state != SFXGE_TXQ_INITIALIZED")); in sfxge_tx_qfini()
1783 if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) in sfxge_tx_qfini()
1787 free(txq->pend_desc, M_SFXGE); in sfxge_tx_qfini()
1788 nmaps = sc->txq_entries; in sfxge_tx_qfini()
1789 while (nmaps-- != 0) in sfxge_tx_qfini()
1790 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); in sfxge_tx_qfini()
1791 free(txq->stmp, M_SFXGE); in sfxge_tx_qfini()
1794 sfxge_dma_free(&txq->mem); in sfxge_tx_qfini()
1796 sc->txq[index] = NULL; in sfxge_tx_qfini()
1807 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp); in sfxge_tx_qinit()
1809 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); in sfxge_tx_qinit()
1819 txq->sc = sc; in sfxge_tx_qinit()
1820 txq->entries = sc->txq_entries; in sfxge_tx_qinit()
1821 txq->ptr_mask = txq->entries - 1; in sfxge_tx_qinit()
1823 sc->txq[txq_index] = txq; in sfxge_tx_qinit()
1824 esmp = &txq->mem; in sfxge_tx_qinit()
1827 if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0) in sfxge_tx_qinit()
1831 sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries), in sfxge_tx_qinit()
1832 &txq->buf_base_id); in sfxge_tx_qinit()
1835 if (bus_dma_tag_create(sc->parent_dma_tag, 1, in sfxge_tx_qinit()
1836 encp->enc_tx_dma_desc_boundary, in sfxge_tx_qinit()
1839 encp->enc_tx_dma_desc_size_max, 0, NULL, NULL, in sfxge_tx_qinit()
1840 &txq->packet_dma_tag) != 0) { in sfxge_tx_qinit()
1841 device_printf(sc->dev, "Couldn't allocate txq DMA tag\n"); in sfxge_tx_qinit()
1847 txq->pend_desc = malloc(sizeof(efx_desc_t) * sc->txq_entries, in sfxge_tx_qinit()
1851 txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries, in sfxge_tx_qinit()
1853 for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) { in sfxge_tx_qinit()
1854 rc = bus_dmamap_create(txq->packet_dma_tag, 0, in sfxge_tx_qinit()
1855 &txq->stmp[nmaps].map); in sfxge_tx_qinit()
1861 txq_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->txqs_node), in sfxge_tx_qinit()
1873 stdp = &txq->dpl; in sfxge_tx_qinit()
1874 stdp->std_put_max = sfxge_tx_dpl_put_max; in sfxge_tx_qinit()
1875 stdp->std_get_max = sfxge_tx_dpl_get_max; in sfxge_tx_qinit()
1876 stdp->std_get_non_tcp_max = sfxge_tx_dpl_get_non_tcp_max; in sfxge_tx_qinit()
1877 stdp->std_getp = &stdp->std_get; in sfxge_tx_qinit()
1879 SFXGE_TXQ_LOCK_INIT(txq, device_get_nameunit(sc->dev), txq_index); in sfxge_tx_qinit()
1891 &stdp->std_get_count, 0, ""); in sfxge_tx_qinit()
1894 &stdp->std_get_non_tcp_count, 0, ""); in sfxge_tx_qinit()
1897 &stdp->std_get_hiwat, 0, ""); in sfxge_tx_qinit()
1900 &stdp->std_put_hiwat, 0, ""); in sfxge_tx_qinit()
1906 txq->type = type; in sfxge_tx_qinit()
1907 txq->evq_index = evq_index; in sfxge_tx_qinit()
1908 txq->init_state = SFXGE_TXQ_INITIALIZED; in sfxge_tx_qinit()
1916 free(txq->pend_desc, M_SFXGE); in sfxge_tx_qinit()
1918 while (nmaps-- != 0) in sfxge_tx_qinit()
1919 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); in sfxge_tx_qinit()
1920 free(txq->stmp, M_SFXGE); in sfxge_tx_qinit()
1921 bus_dma_tag_destroy(txq->packet_dma_tag); in sfxge_tx_qinit()
1939 for (index = 0; index < sc->txq_count; index++) in sfxge_tx_stat_handler()
1940 sum += *(unsigned long *)((caddr_t)sc->txq[index] + in sfxge_tx_stat_handler()
1949 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); in sfxge_tx_stat_init()
1953 stat_list = SYSCTL_CHILDREN(sc->stats_node); in sfxge_tx_stat_init()
1971 for (index = 0; index < sc->txq_count; index++) { in sfxge_tx_get_drops()
1972 txq = sc->txq[index]; in sfxge_tx_get_drops()
1974 * In theory, txq->put_overflow and txq->netdown_drops in sfxge_tx_get_drops()
1978 drops += txq->drops + txq->get_overflow + in sfxge_tx_get_drops()
1979 txq->get_non_tcp_overflow + in sfxge_tx_get_drops()
1980 txq->put_overflow + txq->netdown_drops + in sfxge_tx_get_drops()
1981 txq->tso_pdrop_too_many + txq->tso_pdrop_no_rsrc; in sfxge_tx_get_drops()
1991 index = sc->txq_count; in sfxge_tx_fini()
1992 while (--index >= 0) in sfxge_tx_fini()
1995 sc->txq_count = 0; in sfxge_tx_fini()
2001 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp); in sfxge_tx_init()
2002 struct sfxge_intr *intr __diagused; in sfxge_tx_init()
2006 intr = &sc->intr; in sfxge_tx_init()
2008 KASSERT(intr->state == SFXGE_INTR_INITIALIZED, in sfxge_tx_init()
2009 ("intr->state != SFXGE_INTR_INITIALIZED")); in sfxge_tx_init()
2031 sc->txq_count = SFXGE_EVQ0_N_TXQ(sc) - 1 + sc->intr.n_alloc; in sfxge_tx_init()
2033 sc->tso_fw_assisted = sfxge_tso_fw_assisted; in sfxge_tx_init()
2034 if ((~encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO) || in sfxge_tx_init()
2035 (!encp->enc_fw_assisted_tso_enabled)) in sfxge_tx_init()
2036 sc->tso_fw_assisted &= ~SFXGE_FATSOV1; in sfxge_tx_init()
2037 if ((~encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO_V2) || in sfxge_tx_init()
2038 (!encp->enc_fw_assisted_tso_v2_enabled)) in sfxge_tx_init()
2039 sc->tso_fw_assisted &= ~SFXGE_FATSOV2; in sfxge_tx_init()
2041 sc->txqs_node = SYSCTL_ADD_NODE(device_get_sysctl_ctx(sc->dev), in sfxge_tx_init()
2042 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, in sfxge_tx_init()
2044 if (sc->txqs_node == NULL) { in sfxge_tx_init()
2050 if (sc->txq_dynamic_cksum_toggle_supported == B_FALSE) { in sfxge_tx_init()
2061 index < sc->txq_count - SFXGE_EVQ0_N_TXQ(sc) + 1; in sfxge_tx_init()
2063 if ((rc = sfxge_tx_qinit(sc, SFXGE_EVQ0_N_TXQ(sc) - 1 + index, in sfxge_tx_init()
2073 while (--index >= 0) in sfxge_tx_init()
2083 sc->txq_count = 0; in sfxge_tx_init()