xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c (revision e23731db48ef9c6568d4768b1f87d48514339faa)
1dc7e38acSHans Petter Selasky /*-
22d5e5a0dSHans Petter Selasky  * Copyright (c) 2015-2021 Mellanox Technologies. All rights reserved.
3ebdb7006SHans Petter Selasky  * Copyright (c) 2022 NVIDIA corporation & affiliates.
4dc7e38acSHans Petter Selasky  *
5dc7e38acSHans Petter Selasky  * Redistribution and use in source and binary forms, with or without
6dc7e38acSHans Petter Selasky  * modification, are permitted provided that the following conditions
7dc7e38acSHans Petter Selasky  * are met:
8dc7e38acSHans Petter Selasky  * 1. Redistributions of source code must retain the above copyright
9dc7e38acSHans Petter Selasky  *    notice, this list of conditions and the following disclaimer.
10dc7e38acSHans Petter Selasky  * 2. Redistributions in binary form must reproduce the above copyright
11dc7e38acSHans Petter Selasky  *    notice, this list of conditions and the following disclaimer in the
12dc7e38acSHans Petter Selasky  *    documentation and/or other materials provided with the distribution.
13dc7e38acSHans Petter Selasky  *
14dc7e38acSHans Petter Selasky  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
15dc7e38acSHans Petter Selasky  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16dc7e38acSHans Petter Selasky  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17dc7e38acSHans Petter Selasky  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18dc7e38acSHans Petter Selasky  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19dc7e38acSHans Petter Selasky  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20dc7e38acSHans Petter Selasky  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21dc7e38acSHans Petter Selasky  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22dc7e38acSHans Petter Selasky  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23dc7e38acSHans Petter Selasky  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24dc7e38acSHans Petter Selasky  * SUCH DAMAGE.
25dc7e38acSHans Petter Selasky  */
26dc7e38acSHans Petter Selasky 
277272f9cdSHans Petter Selasky #include "opt_kern_tls.h"
28b984b956SKonstantin Belousov #include "opt_rss.h"
29b984b956SKonstantin Belousov #include "opt_ratelimit.h"
307272f9cdSHans Petter Selasky 
3189918a23SKonstantin Belousov #include <dev/mlx5/mlx5_en/en.h>
32dc7e38acSHans Petter Selasky #include <machine/atomic.h>
33*e23731dbSKonstantin Belousov #include <dev/mlx5/mlx5_accel/ipsec.h>
34dc7e38acSHans Petter Selasky 
35376bcf63SHans Petter Selasky static inline bool
367272f9cdSHans Petter Selasky mlx5e_do_send_cqe_inline(struct mlx5e_sq *sq)
37376bcf63SHans Petter Selasky {
38376bcf63SHans Petter Selasky 	sq->cev_counter++;
39376bcf63SHans Petter Selasky 	/* interleave the CQEs */
40376bcf63SHans Petter Selasky 	if (sq->cev_counter >= sq->cev_factor) {
41376bcf63SHans Petter Selasky 		sq->cev_counter = 0;
427272f9cdSHans Petter Selasky 		return (true);
43376bcf63SHans Petter Selasky 	}
447272f9cdSHans Petter Selasky 	return (false);
457272f9cdSHans Petter Selasky }
467272f9cdSHans Petter Selasky 
477272f9cdSHans Petter Selasky bool
487272f9cdSHans Petter Selasky mlx5e_do_send_cqe(struct mlx5e_sq *sq)
497272f9cdSHans Petter Selasky {
507272f9cdSHans Petter Selasky 
517272f9cdSHans Petter Selasky 	return (mlx5e_do_send_cqe_inline(sq));
52376bcf63SHans Petter Selasky }
53376bcf63SHans Petter Selasky 
54dc7e38acSHans Petter Selasky void
55af89c4afSHans Petter Selasky mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt)
56dc7e38acSHans Petter Selasky {
57dc7e38acSHans Petter Selasky 	u16 pi = sq->pc & sq->wq.sz_m1;
58dc7e38acSHans Petter Selasky 	struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
59dc7e38acSHans Petter Selasky 
60dc7e38acSHans Petter Selasky 	memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
61dc7e38acSHans Petter Selasky 
62dc7e38acSHans Petter Selasky 	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
63dc7e38acSHans Petter Selasky 	wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
647272f9cdSHans Petter Selasky 	if (mlx5e_do_send_cqe_inline(sq))
65dc7e38acSHans Petter Selasky 		wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
66376bcf63SHans Petter Selasky 	else
67376bcf63SHans Petter Selasky 		wqe->ctrl.fm_ce_se = 0;
68dc7e38acSHans Petter Selasky 
69af89c4afSHans Petter Selasky 	/* Copy data for doorbell */
70af89c4afSHans Petter Selasky 	memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
71af89c4afSHans Petter Selasky 
72dc7e38acSHans Petter Selasky 	sq->mbuf[pi].mbuf = NULL;
73dc7e38acSHans Petter Selasky 	sq->mbuf[pi].num_bytes = 0;
74dc7e38acSHans Petter Selasky 	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
75dc7e38acSHans Petter Selasky 	sq->pc += sq->mbuf[pi].num_wqebbs;
76dc7e38acSHans Petter Selasky }
77dc7e38acSHans Petter Selasky 
78dc7e38acSHans Petter Selasky static uint32_t mlx5e_hash_value;
79dc7e38acSHans Petter Selasky 
80dc7e38acSHans Petter Selasky static void
81dc7e38acSHans Petter Selasky mlx5e_hash_init(void *arg)
82dc7e38acSHans Petter Selasky {
83dc7e38acSHans Petter Selasky 	mlx5e_hash_value = m_ether_tcpip_hash_init();
84dc7e38acSHans Petter Selasky }
85dc7e38acSHans Petter Selasky 
86dc7e38acSHans Petter Selasky /* Make kernel call mlx5e_hash_init after the random stack finished initializing */
87dc7e38acSHans Petter Selasky SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL);
88dc7e38acSHans Petter Selasky 
89dc7e38acSHans Petter Selasky static struct mlx5e_sq *
905dc00f00SJustin Hibbits mlx5e_select_queue_by_send_tag(if_t ifp, struct mbuf *mb)
91cc971b22SSlava Shwartsman {
927272f9cdSHans Petter Selasky 	struct m_snd_tag *mb_tag;
93cc971b22SSlava Shwartsman 	struct mlx5e_sq *sq;
94cc971b22SSlava Shwartsman 
957272f9cdSHans Petter Selasky 	mb_tag = mb->m_pkthdr.snd_tag;
967272f9cdSHans Petter Selasky 
977272f9cdSHans Petter Selasky #ifdef KERN_TLS
987272f9cdSHans Petter Selasky top:
997272f9cdSHans Petter Selasky #endif
100cc971b22SSlava Shwartsman 	/* get pointer to sendqueue */
101c782ea8bSJohn Baldwin 	switch (mb_tag->sw->type) {
102cc971b22SSlava Shwartsman #ifdef RATELIMIT
103cc971b22SSlava Shwartsman 	case IF_SND_TAG_TYPE_RATE_LIMIT:
10456fb710fSJohn Baldwin 		sq = container_of(mb_tag,
105cc971b22SSlava Shwartsman 		    struct mlx5e_rl_channel, tag)->sq;
106cc971b22SSlava Shwartsman 		break;
107b7d92a66SJohn Baldwin #ifdef KERN_TLS
1087272f9cdSHans Petter Selasky 	case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
10956fb710fSJohn Baldwin 		mb_tag = container_of(mb_tag, struct mlx5e_tls_tag, tag)->rl_tag;
1107272f9cdSHans Petter Selasky 		goto top;
1117272f9cdSHans Petter Selasky #endif
112cc971b22SSlava Shwartsman #endif
113cc971b22SSlava Shwartsman 	case IF_SND_TAG_TYPE_UNLIMITED:
11456fb710fSJohn Baldwin 		sq = &container_of(mb_tag,
115cc971b22SSlava Shwartsman 		    struct mlx5e_channel, tag)->sq[0];
11656fb710fSJohn Baldwin 		KASSERT((mb_tag->refcount > 0),
117cc971b22SSlava Shwartsman 		    ("mlx5e_select_queue: Channel refs are zero for unlimited tag"));
118cc971b22SSlava Shwartsman 		break;
1197272f9cdSHans Petter Selasky #ifdef KERN_TLS
1207272f9cdSHans Petter Selasky 	case IF_SND_TAG_TYPE_TLS:
12156fb710fSJohn Baldwin 		mb_tag = container_of(mb_tag, struct mlx5e_tls_tag, tag)->rl_tag;
1227272f9cdSHans Petter Selasky 		goto top;
1237272f9cdSHans Petter Selasky #endif
124cc971b22SSlava Shwartsman 	default:
125cc971b22SSlava Shwartsman 		sq = NULL;
126cc971b22SSlava Shwartsman 		break;
127cc971b22SSlava Shwartsman 	}
128cc971b22SSlava Shwartsman 
129cc971b22SSlava Shwartsman 	/* check if valid */
130cc971b22SSlava Shwartsman 	if (sq != NULL && READ_ONCE(sq->running) != 0)
131cc971b22SSlava Shwartsman 		return (sq);
132cc971b22SSlava Shwartsman 
133cc971b22SSlava Shwartsman 	return (NULL);
134cc971b22SSlava Shwartsman }
135cc971b22SSlava Shwartsman 
136cc971b22SSlava Shwartsman static struct mlx5e_sq *
1375dc00f00SJustin Hibbits mlx5e_select_queue(if_t ifp, struct mbuf *mb)
138dc7e38acSHans Petter Selasky {
1395dc00f00SJustin Hibbits 	struct mlx5e_priv *priv = if_getsoftc(ifp);
1403230c29dSSlava Shwartsman 	struct mlx5e_sq *sq;
141dc7e38acSHans Petter Selasky 	u32 ch;
142dc7e38acSHans Petter Selasky 	u32 tc;
143dc7e38acSHans Petter Selasky 
144dc7e38acSHans Petter Selasky 	/* obtain VLAN information if present */
145dc7e38acSHans Petter Selasky 	if (mb->m_flags & M_VLANTAG) {
146dc7e38acSHans Petter Selasky 		tc = (mb->m_pkthdr.ether_vtag >> 13);
147dc7e38acSHans Petter Selasky 		if (tc >= priv->num_tc)
148dc7e38acSHans Petter Selasky 			tc = priv->default_vlan_prio;
149dc7e38acSHans Petter Selasky 	} else {
150dc7e38acSHans Petter Selasky 		tc = priv->default_vlan_prio;
151dc7e38acSHans Petter Selasky 	}
152dc7e38acSHans Petter Selasky 
153dc7e38acSHans Petter Selasky 	ch = priv->params.num_channels;
154dc7e38acSHans Petter Selasky 
155dc7e38acSHans Petter Selasky 	/* check if flowid is set */
156dc7e38acSHans Petter Selasky 	if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) {
157278ce1c9SHans Petter Selasky #ifdef RSS
158278ce1c9SHans Petter Selasky 		u32 temp;
159278ce1c9SHans Petter Selasky 
160278ce1c9SHans Petter Selasky 		if (rss_hash2bucket(mb->m_pkthdr.flowid,
161278ce1c9SHans Petter Selasky 		    M_HASHTYPE_GET(mb), &temp) == 0)
162278ce1c9SHans Petter Selasky 			ch = temp % ch;
163278ce1c9SHans Petter Selasky 		else
164278ce1c9SHans Petter Selasky #endif
165dc7e38acSHans Petter Selasky 			ch = (mb->m_pkthdr.flowid % 128) % ch;
166dc7e38acSHans Petter Selasky 	} else {
167dc7e38acSHans Petter Selasky 		ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 |
168dc7e38acSHans Petter Selasky 		    MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch;
169dc7e38acSHans Petter Selasky 	}
170dc7e38acSHans Petter Selasky 
1713230c29dSSlava Shwartsman 	/* check if send queue is running */
1723230c29dSSlava Shwartsman 	sq = &priv->channel[ch].sq[tc];
1733230c29dSSlava Shwartsman 	if (likely(READ_ONCE(sq->running) != 0))
1743230c29dSSlava Shwartsman 		return (sq);
175dc7e38acSHans Petter Selasky 	return (NULL);
176dc7e38acSHans Petter Selasky }
177dc7e38acSHans Petter Selasky 
178dc7e38acSHans Petter Selasky static inline u16
1793e581cabSSlava Shwartsman mlx5e_get_l2_header_size(struct mlx5e_sq *sq, struct mbuf *mb)
180dc7e38acSHans Petter Selasky {
1813e581cabSSlava Shwartsman 	struct ether_vlan_header *eh;
1823e581cabSSlava Shwartsman 	uint16_t eth_type;
1833e581cabSSlava Shwartsman 	int min_inline;
18405399002SHans Petter Selasky 
1853e581cabSSlava Shwartsman 	eh = mtod(mb, struct ether_vlan_header *);
1863e581cabSSlava Shwartsman 	if (unlikely(mb->m_len < ETHER_HDR_LEN)) {
1873e581cabSSlava Shwartsman 		goto max_inline;
1883e581cabSSlava Shwartsman 	} else if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1893e581cabSSlava Shwartsman 		if (unlikely(mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)))
1903e581cabSSlava Shwartsman 			goto max_inline;
1913e581cabSSlava Shwartsman 		eth_type = ntohs(eh->evl_proto);
1923e581cabSSlava Shwartsman 		min_inline = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1933e581cabSSlava Shwartsman 	} else {
1943e581cabSSlava Shwartsman 		eth_type = ntohs(eh->evl_encap_proto);
1953e581cabSSlava Shwartsman 		min_inline = ETHER_HDR_LEN;
19605399002SHans Petter Selasky 	}
1973e581cabSSlava Shwartsman 
1983e581cabSSlava Shwartsman 	switch (eth_type) {
1993e581cabSSlava Shwartsman 	case ETHERTYPE_IP:
2003e581cabSSlava Shwartsman 	case ETHERTYPE_IPV6:
2013e581cabSSlava Shwartsman 		/*
2023e581cabSSlava Shwartsman 		 * Make sure the TOS(IPv4) or traffic class(IPv6)
2033e581cabSSlava Shwartsman 		 * field gets inlined. Else the SQ may stall.
2043e581cabSSlava Shwartsman 		 */
2053e581cabSSlava Shwartsman 		min_inline += 4;
2063e581cabSSlava Shwartsman 		break;
2073e581cabSSlava Shwartsman 	default:
2083e581cabSSlava Shwartsman 		goto max_inline;
2093e581cabSSlava Shwartsman 	}
2103e581cabSSlava Shwartsman 
2113e581cabSSlava Shwartsman 	/*
2123e581cabSSlava Shwartsman 	 * m_copydata() will be used on the remaining header which
2133e581cabSSlava Shwartsman 	 * does not need to reside within the first m_len bytes of
2143e581cabSSlava Shwartsman 	 * data:
2153e581cabSSlava Shwartsman 	 */
2163e581cabSSlava Shwartsman 	if (mb->m_pkthdr.len < min_inline)
2173e581cabSSlava Shwartsman 		goto max_inline;
2183e581cabSSlava Shwartsman 	return (min_inline);
2193e581cabSSlava Shwartsman 
2203e581cabSSlava Shwartsman max_inline:
2213e581cabSSlava Shwartsman 	return (MIN(mb->m_pkthdr.len, sq->max_inline));
222dc7e38acSHans Petter Selasky }
223dc7e38acSHans Petter Selasky 
2247272f9cdSHans Petter Selasky /*
2257272f9cdSHans Petter Selasky  * This function parse IPv4 and IPv6 packets looking for TCP and UDP
2267272f9cdSHans Petter Selasky  * headers.
2277272f9cdSHans Petter Selasky  *
2287272f9cdSHans Petter Selasky  * Upon return the pointer at which the "ppth" argument points, is set
2297272f9cdSHans Petter Selasky  * to the location of the TCP header. NULL is used if no TCP header is
2307272f9cdSHans Petter Selasky  * present.
2317272f9cdSHans Petter Selasky  *
2327272f9cdSHans Petter Selasky  * The return value indicates the number of bytes from the beginning
2337272f9cdSHans Petter Selasky  * of the packet until the first byte after the TCP or UDP header. If
2347272f9cdSHans Petter Selasky  * this function returns zero, the parsing failed.
2357272f9cdSHans Petter Selasky  */
2367272f9cdSHans Petter Selasky int
2379eb1e4aaSHans Petter Selasky mlx5e_get_full_header_size(const struct mbuf *mb, const struct tcphdr **ppth)
238dc7e38acSHans Petter Selasky {
2399eb1e4aaSHans Petter Selasky 	const struct ether_vlan_header *eh;
2409eb1e4aaSHans Petter Selasky 	const struct tcphdr *th;
2419eb1e4aaSHans Petter Selasky 	const struct ip *ip;
242dc7e38acSHans Petter Selasky 	int ip_hlen, tcp_hlen;
2439eb1e4aaSHans Petter Selasky 	const struct ip6_hdr *ip6;
244dc7e38acSHans Petter Selasky 	uint16_t eth_type;
245dc7e38acSHans Petter Selasky 	int eth_hdr_len;
246dc7e38acSHans Petter Selasky 
2479eb1e4aaSHans Petter Selasky 	eh = mtod(mb, const struct ether_vlan_header *);
248b63b61ccSHans Petter Selasky 	if (unlikely(mb->m_len < ETHER_HDR_LEN))
2497272f9cdSHans Petter Selasky 		goto failure;
250dc7e38acSHans Petter Selasky 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
25116816f96SKonstantin Belousov 		if (unlikely(mb->m_len < ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))
2527272f9cdSHans Petter Selasky 			goto failure;
253dc7e38acSHans Petter Selasky 		eth_type = ntohs(eh->evl_proto);
254dc7e38acSHans Petter Selasky 		eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
255dc7e38acSHans Petter Selasky 	} else {
256dc7e38acSHans Petter Selasky 		eth_type = ntohs(eh->evl_encap_proto);
257dc7e38acSHans Petter Selasky 		eth_hdr_len = ETHER_HDR_LEN;
258dc7e38acSHans Petter Selasky 	}
2597272f9cdSHans Petter Selasky 
260dc7e38acSHans Petter Selasky 	switch (eth_type) {
261dc7e38acSHans Petter Selasky 	case ETHERTYPE_IP:
2629eb1e4aaSHans Petter Selasky 		ip = (const struct ip *)(mb->m_data + eth_hdr_len);
263b63b61ccSHans Petter Selasky 		if (unlikely(mb->m_len < eth_hdr_len + sizeof(*ip)))
2647272f9cdSHans Petter Selasky 			goto failure;
2653e581cabSSlava Shwartsman 		switch (ip->ip_p) {
2663e581cabSSlava Shwartsman 		case IPPROTO_TCP:
267dc7e38acSHans Petter Selasky 			ip_hlen = ip->ip_hl << 2;
268dc7e38acSHans Petter Selasky 			eth_hdr_len += ip_hlen;
2697272f9cdSHans Petter Selasky 			goto tcp_packet;
2703e581cabSSlava Shwartsman 		case IPPROTO_UDP:
2713e581cabSSlava Shwartsman 			ip_hlen = ip->ip_hl << 2;
2721b36b386SKonstantin Belousov 			eth_hdr_len += ip_hlen + sizeof(struct udphdr);
2737272f9cdSHans Petter Selasky 			th = NULL;
2747272f9cdSHans Petter Selasky 			goto udp_packet;
2753e581cabSSlava Shwartsman 		default:
2767272f9cdSHans Petter Selasky 			goto failure;
2773e581cabSSlava Shwartsman 		}
2783e581cabSSlava Shwartsman 		break;
279dc7e38acSHans Petter Selasky 	case ETHERTYPE_IPV6:
2809eb1e4aaSHans Petter Selasky 		ip6 = (const struct ip6_hdr *)(mb->m_data + eth_hdr_len);
281b63b61ccSHans Petter Selasky 		if (unlikely(mb->m_len < eth_hdr_len + sizeof(*ip6)))
2827272f9cdSHans Petter Selasky 			goto failure;
2833e581cabSSlava Shwartsman 		switch (ip6->ip6_nxt) {
2843e581cabSSlava Shwartsman 		case IPPROTO_TCP:
285dc7e38acSHans Petter Selasky 			eth_hdr_len += sizeof(*ip6);
2867272f9cdSHans Petter Selasky 			goto tcp_packet;
2873e581cabSSlava Shwartsman 		case IPPROTO_UDP:
2881b36b386SKonstantin Belousov 			eth_hdr_len += sizeof(*ip6) + sizeof(struct udphdr);
2897272f9cdSHans Petter Selasky 			th = NULL;
2907272f9cdSHans Petter Selasky 			goto udp_packet;
2913e581cabSSlava Shwartsman 		default:
2927272f9cdSHans Petter Selasky 			goto failure;
2933e581cabSSlava Shwartsman 		}
2943e581cabSSlava Shwartsman 		break;
295dc7e38acSHans Petter Selasky 	default:
2967272f9cdSHans Petter Selasky 		goto failure;
297dc7e38acSHans Petter Selasky 	}
2987272f9cdSHans Petter Selasky tcp_packet:
2996fe9e470SHans Petter Selasky 	if (unlikely(mb->m_len < eth_hdr_len + sizeof(*th))) {
3006fe9e470SHans Petter Selasky 		const struct mbuf *m_th = mb->m_next;
3016fe9e470SHans Petter Selasky 		if (unlikely(mb->m_len != eth_hdr_len ||
3026fe9e470SHans Petter Selasky 		    m_th == NULL || m_th->m_len < sizeof(*th)))
3037272f9cdSHans Petter Selasky 			goto failure;
3046fe9e470SHans Petter Selasky 		th = (const struct tcphdr *)(m_th->m_data);
3056fe9e470SHans Petter Selasky 	} else {
3069eb1e4aaSHans Petter Selasky 		th = (const struct tcphdr *)(mb->m_data + eth_hdr_len);
3076fe9e470SHans Petter Selasky 	}
308dc7e38acSHans Petter Selasky 	tcp_hlen = th->th_off << 2;
309dc7e38acSHans Petter Selasky 	eth_hdr_len += tcp_hlen;
3107272f9cdSHans Petter Selasky udp_packet:
3113e581cabSSlava Shwartsman 	/*
3123e581cabSSlava Shwartsman 	 * m_copydata() will be used on the remaining header which
3133e581cabSSlava Shwartsman 	 * does not need to reside within the first m_len bytes of
3143e581cabSSlava Shwartsman 	 * data:
3153e581cabSSlava Shwartsman 	 */
316b63b61ccSHans Petter Selasky 	if (unlikely(mb->m_pkthdr.len < eth_hdr_len))
3177272f9cdSHans Petter Selasky 		goto failure;
3187272f9cdSHans Petter Selasky 	if (ppth != NULL)
3197272f9cdSHans Petter Selasky 		*ppth = th;
320dc7e38acSHans Petter Selasky 	return (eth_hdr_len);
3217272f9cdSHans Petter Selasky failure:
3227272f9cdSHans Petter Selasky 	if (ppth != NULL)
3237272f9cdSHans Petter Selasky 		*ppth = NULL;
3247272f9cdSHans Petter Selasky 	return (0);
325dc7e38acSHans Petter Selasky }
326dc7e38acSHans Petter Selasky 
3277c9febf9SKonstantin Belousov /*
3287c9febf9SKonstantin Belousov  * Locate a pointer inside a mbuf chain. Returns NULL upon failure.
3297c9febf9SKonstantin Belousov  */
3307c9febf9SKonstantin Belousov static inline void *
3317c9febf9SKonstantin Belousov mlx5e_parse_mbuf_chain(const struct mbuf **mb, int *poffset, int eth_hdr_len,
3327c9febf9SKonstantin Belousov     int min_len)
3337c9febf9SKonstantin Belousov {
3347c9febf9SKonstantin Belousov 	if (unlikely(mb[0]->m_len == eth_hdr_len)) {
3357c9febf9SKonstantin Belousov 		poffset[0] = eth_hdr_len;
3367c9febf9SKonstantin Belousov 		if (unlikely((mb[0] = mb[0]->m_next) == NULL))
3377c9febf9SKonstantin Belousov 			return (NULL);
3387c9febf9SKonstantin Belousov 	}
3397c9febf9SKonstantin Belousov 	if (unlikely(mb[0]->m_len < eth_hdr_len - poffset[0] + min_len))
3407c9febf9SKonstantin Belousov 		return (NULL);
3417c9febf9SKonstantin Belousov 	return (mb[0]->m_data + eth_hdr_len - poffset[0]);
3427c9febf9SKonstantin Belousov }
3437c9febf9SKonstantin Belousov 
3447c9febf9SKonstantin Belousov /*
3457c9febf9SKonstantin Belousov  * This function parse IPv4 and IPv6 packets looking for UDP, VXLAN
3467c9febf9SKonstantin Belousov  * and TCP headers.
3477c9febf9SKonstantin Belousov  *
3487c9febf9SKonstantin Belousov  * The return value indicates the number of bytes from the beginning
3497c9febf9SKonstantin Belousov  * of the packet until the first byte after the TCP header. If this
3507c9febf9SKonstantin Belousov  * function returns zero, the parsing failed.
3517c9febf9SKonstantin Belousov  */
3527c9febf9SKonstantin Belousov static int
3537c9febf9SKonstantin Belousov mlx5e_get_vxlan_header_size(const struct mbuf *mb, struct mlx5e_tx_wqe *wqe,
3547c9febf9SKonstantin Belousov     uint8_t cs_mask, uint8_t opcode)
3557c9febf9SKonstantin Belousov {
3567c9febf9SKonstantin Belousov 	const struct ether_vlan_header *eh;
3577c9febf9SKonstantin Belousov 	struct ip *ip4;
3587c9febf9SKonstantin Belousov 	struct ip6_hdr *ip6;
3597c9febf9SKonstantin Belousov 	struct tcphdr *th;
3607c9febf9SKonstantin Belousov 	struct udphdr *udp;
3617c9febf9SKonstantin Belousov 	bool has_outer_vlan_tag;
3627c9febf9SKonstantin Belousov 	uint16_t eth_type;
3637c9febf9SKonstantin Belousov 	uint8_t ip_type;
3647c9febf9SKonstantin Belousov 	int pkt_hdr_len;
3657c9febf9SKonstantin Belousov 	int eth_hdr_len;
3667c9febf9SKonstantin Belousov 	int tcp_hlen;
3677c9febf9SKonstantin Belousov 	int ip_hlen;
3687c9febf9SKonstantin Belousov 	int offset;
3697c9febf9SKonstantin Belousov 
3707c9febf9SKonstantin Belousov 	pkt_hdr_len = mb->m_pkthdr.len;
3717c9febf9SKonstantin Belousov 	has_outer_vlan_tag = (mb->m_flags & M_VLANTAG) != 0;
3727c9febf9SKonstantin Belousov 	offset = 0;
3737c9febf9SKonstantin Belousov 
3747c9febf9SKonstantin Belousov 	eh = mtod(mb, const struct ether_vlan_header *);
3757c9febf9SKonstantin Belousov 	if (unlikely(mb->m_len < ETHER_HDR_LEN))
3767c9febf9SKonstantin Belousov 		return (0);
3777c9febf9SKonstantin Belousov 
3787c9febf9SKonstantin Belousov 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3797c9febf9SKonstantin Belousov 		if (unlikely(mb->m_len < ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))
3807c9febf9SKonstantin Belousov 			return (0);
3817c9febf9SKonstantin Belousov 		eth_type = eh->evl_proto;
3827c9febf9SKonstantin Belousov 		eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3837c9febf9SKonstantin Belousov 	} else {
3847c9febf9SKonstantin Belousov 		eth_type = eh->evl_encap_proto;
3857c9febf9SKonstantin Belousov 		eth_hdr_len = ETHER_HDR_LEN;
3867c9febf9SKonstantin Belousov 	}
3877c9febf9SKonstantin Belousov 
3887c9febf9SKonstantin Belousov 	switch (eth_type) {
3897c9febf9SKonstantin Belousov 	case htons(ETHERTYPE_IP):
3907c9febf9SKonstantin Belousov 		ip4 = mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len,
3917c9febf9SKonstantin Belousov 		    sizeof(*ip4));
3927c9febf9SKonstantin Belousov 		if (unlikely(ip4 == NULL))
3937c9febf9SKonstantin Belousov 			return (0);
3947c9febf9SKonstantin Belousov 		ip_type = ip4->ip_p;
3957c9febf9SKonstantin Belousov 		if (unlikely(ip_type != IPPROTO_UDP))
3967c9febf9SKonstantin Belousov 			return (0);
3977c9febf9SKonstantin Belousov 		wqe->eth.swp_outer_l3_offset = eth_hdr_len / 2;
3987c9febf9SKonstantin Belousov 		wqe->eth.cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
3997c9febf9SKonstantin Belousov 		ip_hlen = ip4->ip_hl << 2;
4007c9febf9SKonstantin Belousov 		eth_hdr_len += ip_hlen;
4017c9febf9SKonstantin Belousov 		udp = mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len,
4027c9febf9SKonstantin Belousov 		    sizeof(*udp));
4037c9febf9SKonstantin Belousov 		if (unlikely(udp == NULL))
4047c9febf9SKonstantin Belousov 			return (0);
4057c9febf9SKonstantin Belousov 		wqe->eth.swp_outer_l4_offset = eth_hdr_len / 2;
4067c9febf9SKonstantin Belousov 		wqe->eth.swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_TYPE;
4077c9febf9SKonstantin Belousov 		eth_hdr_len += sizeof(*udp);
4087c9febf9SKonstantin Belousov 		break;
4097c9febf9SKonstantin Belousov 	case htons(ETHERTYPE_IPV6):
4107c9febf9SKonstantin Belousov 		ip6 = mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len,
4117c9febf9SKonstantin Belousov 		    sizeof(*ip6));
4127c9febf9SKonstantin Belousov 		if (unlikely(ip6 == NULL))
4137c9febf9SKonstantin Belousov 			return (0);
4147c9febf9SKonstantin Belousov 		ip_type = ip6->ip6_nxt;
4157c9febf9SKonstantin Belousov 		if (unlikely(ip_type != IPPROTO_UDP))
4167c9febf9SKonstantin Belousov 			return (0);
4177c9febf9SKonstantin Belousov 		wqe->eth.swp_outer_l3_offset = eth_hdr_len / 2;
4181918b253SHans Petter Selasky 		wqe->eth.cs_flags = MLX5_ETH_WQE_L4_CSUM;
4197c9febf9SKonstantin Belousov 		eth_hdr_len += sizeof(*ip6);
4207c9febf9SKonstantin Belousov 		udp = mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len,
4217c9febf9SKonstantin Belousov 		    sizeof(*udp));
4227c9febf9SKonstantin Belousov 		if (unlikely(udp == NULL))
4237c9febf9SKonstantin Belousov 			return (0);
4247c9febf9SKonstantin Belousov 		wqe->eth.swp_outer_l4_offset = eth_hdr_len / 2;
4257c9febf9SKonstantin Belousov 		wqe->eth.swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_TYPE |
4267c9febf9SKonstantin Belousov 		    MLX5_ETH_WQE_SWP_OUTER_L3_TYPE;
4277c9febf9SKonstantin Belousov 		eth_hdr_len += sizeof(*udp);
4287c9febf9SKonstantin Belousov 		break;
4297c9febf9SKonstantin Belousov 	default:
4307c9febf9SKonstantin Belousov 		return (0);
4317c9febf9SKonstantin Belousov 	}
4327c9febf9SKonstantin Belousov 
4337c9febf9SKonstantin Belousov 	/*
4347c9febf9SKonstantin Belousov 	 * If the hardware is not computing inner IP checksum, then
4357c9febf9SKonstantin Belousov 	 * skip inlining the inner outer UDP and VXLAN header:
4367c9febf9SKonstantin Belousov 	 */
4377c9febf9SKonstantin Belousov 	if (unlikely((cs_mask & MLX5_ETH_WQE_L3_INNER_CSUM) == 0))
4387c9febf9SKonstantin Belousov 		goto done;
4397c9febf9SKonstantin Belousov 	if (unlikely(mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len,
4407c9febf9SKonstantin Belousov 	    8) == NULL))
4417c9febf9SKonstantin Belousov 		return (0);
4427c9febf9SKonstantin Belousov 	eth_hdr_len += 8;
4437c9febf9SKonstantin Belousov 
4447c9febf9SKonstantin Belousov 	/* Check for ethernet header again. */
4457c9febf9SKonstantin Belousov 	eh = mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len, ETHER_HDR_LEN);
4467c9febf9SKonstantin Belousov 	if (unlikely(eh == NULL))
4477c9febf9SKonstantin Belousov 		return (0);
4487c9febf9SKonstantin Belousov 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4497c9febf9SKonstantin Belousov 		if (unlikely(mb->m_len < eth_hdr_len - offset + ETHER_HDR_LEN +
4507c9febf9SKonstantin Belousov 		    ETHER_VLAN_ENCAP_LEN))
4517c9febf9SKonstantin Belousov 			return (0);
4527c9febf9SKonstantin Belousov 		eth_type = eh->evl_proto;
4537c9febf9SKonstantin Belousov 		eth_hdr_len += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4547c9febf9SKonstantin Belousov 	} else {
4557c9febf9SKonstantin Belousov 		eth_type = eh->evl_encap_proto;
4567c9febf9SKonstantin Belousov 		eth_hdr_len += ETHER_HDR_LEN;
4577c9febf9SKonstantin Belousov 	}
4587c9febf9SKonstantin Belousov 
4597c9febf9SKonstantin Belousov 	/* Check for IP header again. */
4607c9febf9SKonstantin Belousov 	switch (eth_type) {
4617c9febf9SKonstantin Belousov 	case htons(ETHERTYPE_IP):
4627c9febf9SKonstantin Belousov 		ip4 = mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len,
4637c9febf9SKonstantin Belousov 		    sizeof(*ip4));
4647c9febf9SKonstantin Belousov 		if (unlikely(ip4 == NULL))
4657c9febf9SKonstantin Belousov 			return (0);
4667c9febf9SKonstantin Belousov 		wqe->eth.swp_inner_l3_offset = eth_hdr_len / 2;
4677c9febf9SKonstantin Belousov 		wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
4687c9febf9SKonstantin Belousov 		ip_type = ip4->ip_p;
4697c9febf9SKonstantin Belousov 		ip_hlen = ip4->ip_hl << 2;
4707c9febf9SKonstantin Belousov 		eth_hdr_len += ip_hlen;
4717c9febf9SKonstantin Belousov 		break;
4727c9febf9SKonstantin Belousov 	case htons(ETHERTYPE_IPV6):
4737c9febf9SKonstantin Belousov 		ip6 = mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len,
4747c9febf9SKonstantin Belousov 		    sizeof(*ip6));
4757c9febf9SKonstantin Belousov 		if (unlikely(ip6 == NULL))
4767c9febf9SKonstantin Belousov 			return (0);
4777c9febf9SKonstantin Belousov 		wqe->eth.swp_inner_l3_offset = eth_hdr_len / 2;
4787c9febf9SKonstantin Belousov 		wqe->eth.swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_TYPE;
4797c9febf9SKonstantin Belousov 		ip_type = ip6->ip6_nxt;
4807c9febf9SKonstantin Belousov 		eth_hdr_len += sizeof(*ip6);
4817c9febf9SKonstantin Belousov 		break;
4827c9febf9SKonstantin Belousov 	default:
4837c9febf9SKonstantin Belousov 		return (0);
4847c9febf9SKonstantin Belousov 	}
4857c9febf9SKonstantin Belousov 
4867c9febf9SKonstantin Belousov 	/*
4877c9febf9SKonstantin Belousov 	 * If the hardware is not computing inner UDP/TCP checksum,
4887c9febf9SKonstantin Belousov 	 * then skip inlining the inner UDP/TCP header:
4897c9febf9SKonstantin Belousov 	 */
4907c9febf9SKonstantin Belousov 	if (unlikely((cs_mask & MLX5_ETH_WQE_L4_INNER_CSUM) == 0))
4917c9febf9SKonstantin Belousov 		goto done;
4927c9febf9SKonstantin Belousov 
4937c9febf9SKonstantin Belousov 	switch (ip_type) {
4947c9febf9SKonstantin Belousov 	case IPPROTO_UDP:
4957c9febf9SKonstantin Belousov 		udp = mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len,
4967c9febf9SKonstantin Belousov 		    sizeof(*udp));
4977c9febf9SKonstantin Belousov 		if (unlikely(udp == NULL))
4987c9febf9SKonstantin Belousov 			return (0);
4997c9febf9SKonstantin Belousov 		wqe->eth.swp_inner_l4_offset = (eth_hdr_len / 2);
5007c9febf9SKonstantin Belousov 		wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
5017c9febf9SKonstantin Belousov 		wqe->eth.swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_TYPE;
5027c9febf9SKonstantin Belousov 		eth_hdr_len += sizeof(*udp);
5037c9febf9SKonstantin Belousov 		break;
5047c9febf9SKonstantin Belousov 	case IPPROTO_TCP:
5057c9febf9SKonstantin Belousov 		th = mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len,
5067c9febf9SKonstantin Belousov 		    sizeof(*th));
5077c9febf9SKonstantin Belousov 		if (unlikely(th == NULL))
5087c9febf9SKonstantin Belousov 			return (0);
5097c9febf9SKonstantin Belousov 		wqe->eth.swp_inner_l4_offset = eth_hdr_len / 2;
5107c9febf9SKonstantin Belousov 		wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
5117c9febf9SKonstantin Belousov 		tcp_hlen = th->th_off << 2;
5127c9febf9SKonstantin Belousov 		eth_hdr_len += tcp_hlen;
5137c9febf9SKonstantin Belousov 		break;
5147c9febf9SKonstantin Belousov 	default:
5157c9febf9SKonstantin Belousov 		return (0);
5167c9febf9SKonstantin Belousov 	}
5177c9febf9SKonstantin Belousov done:
5187c9febf9SKonstantin Belousov 	if (unlikely(pkt_hdr_len < eth_hdr_len))
5197c9febf9SKonstantin Belousov 		return (0);
5207c9febf9SKonstantin Belousov 
5217c9febf9SKonstantin Belousov 	/* Account for software inserted VLAN tag, if any. */
5227c9febf9SKonstantin Belousov 	if (unlikely(has_outer_vlan_tag)) {
5237c9febf9SKonstantin Belousov 		wqe->eth.swp_outer_l3_offset += ETHER_VLAN_ENCAP_LEN / 2;
5247c9febf9SKonstantin Belousov 		wqe->eth.swp_outer_l4_offset += ETHER_VLAN_ENCAP_LEN / 2;
5257c9febf9SKonstantin Belousov 		wqe->eth.swp_inner_l3_offset += ETHER_VLAN_ENCAP_LEN / 2;
5267c9febf9SKonstantin Belousov 		wqe->eth.swp_inner_l4_offset += ETHER_VLAN_ENCAP_LEN / 2;
5277c9febf9SKonstantin Belousov 	}
5287c9febf9SKonstantin Belousov 
5297c9febf9SKonstantin Belousov 	/*
5307c9febf9SKonstantin Belousov 	 * When inner checksums are set, outer L4 checksum flag must
5317c9febf9SKonstantin Belousov 	 * be disabled.
5327c9febf9SKonstantin Belousov 	 */
5337c9febf9SKonstantin Belousov 	if (wqe->eth.cs_flags & (MLX5_ETH_WQE_L3_INNER_CSUM |
5347c9febf9SKonstantin Belousov 	    MLX5_ETH_WQE_L4_INNER_CSUM))
5357c9febf9SKonstantin Belousov 		wqe->eth.cs_flags &= ~MLX5_ETH_WQE_L4_CSUM;
5367c9febf9SKonstantin Belousov 
5377c9febf9SKonstantin Belousov 	return (eth_hdr_len);
5387c9febf9SKonstantin Belousov }
5397c9febf9SKonstantin Belousov 
5407272f9cdSHans Petter Selasky struct mlx5_wqe_dump_seg {
5417272f9cdSHans Petter Selasky 	struct mlx5_wqe_ctrl_seg ctrl;
5427272f9cdSHans Petter Selasky 	struct mlx5_wqe_data_seg data;
5437272f9cdSHans Petter Selasky } __aligned(MLX5_SEND_WQE_BB);
5447272f9cdSHans Petter Selasky 
5457272f9cdSHans Petter Selasky CTASSERT(DIV_ROUND_UP(2, MLX5_SEND_WQEBB_NUM_DS) == 1);
5467272f9cdSHans Petter Selasky 
5477272f9cdSHans Petter Selasky int
5487272f9cdSHans Petter Selasky mlx5e_sq_dump_xmit(struct mlx5e_sq *sq, struct mlx5e_xmit_args *parg, struct mbuf **mbp)
5497272f9cdSHans Petter Selasky {
5507272f9cdSHans Petter Selasky 	bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS];
5517272f9cdSHans Petter Selasky 	struct mlx5_wqe_dump_seg *wqe;
5527272f9cdSHans Petter Selasky 	struct mlx5_wqe_dump_seg *wqe_last;
5537272f9cdSHans Petter Selasky 	int nsegs;
5547272f9cdSHans Petter Selasky 	int xsegs;
5557272f9cdSHans Petter Selasky 	u32 off;
5567272f9cdSHans Petter Selasky 	u32 msb;
5577272f9cdSHans Petter Selasky 	int err;
5587272f9cdSHans Petter Selasky 	int x;
5597272f9cdSHans Petter Selasky 	struct mbuf *mb;
5607272f9cdSHans Petter Selasky 	const u32 ds_cnt = 2;
5617272f9cdSHans Petter Selasky 	u16 pi;
5627272f9cdSHans Petter Selasky 	const u8 opcode = MLX5_OPCODE_DUMP;
5637272f9cdSHans Petter Selasky 
5647272f9cdSHans Petter Selasky 	/* get pointer to mbuf */
5657272f9cdSHans Petter Selasky 	mb = *mbp;
5667272f9cdSHans Petter Selasky 
5677272f9cdSHans Petter Selasky 	/* get producer index */
5687272f9cdSHans Petter Selasky 	pi = sq->pc & sq->wq.sz_m1;
5697272f9cdSHans Petter Selasky 
5707272f9cdSHans Petter Selasky 	sq->mbuf[pi].num_bytes = mb->m_pkthdr.len;
5717272f9cdSHans Petter Selasky 	sq->mbuf[pi].num_wqebbs = 0;
5727272f9cdSHans Petter Selasky 
5737272f9cdSHans Petter Selasky 	/* check number of segments in mbuf */
5747272f9cdSHans Petter Selasky 	err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
5757272f9cdSHans Petter Selasky 	    mb, segs, &nsegs, BUS_DMA_NOWAIT);
5767272f9cdSHans Petter Selasky 	if (err == EFBIG) {
5777272f9cdSHans Petter Selasky 		/* update statistics */
5787272f9cdSHans Petter Selasky 		sq->stats.defragged++;
5797272f9cdSHans Petter Selasky 		/* too many mbuf fragments */
5807272f9cdSHans Petter Selasky 		mb = m_defrag(*mbp, M_NOWAIT);
5817272f9cdSHans Petter Selasky 		if (mb == NULL) {
5827272f9cdSHans Petter Selasky 			mb = *mbp;
5837272f9cdSHans Petter Selasky 			goto tx_drop;
5847272f9cdSHans Petter Selasky 		}
5857272f9cdSHans Petter Selasky 		/* try again */
5867272f9cdSHans Petter Selasky 		err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
5877272f9cdSHans Petter Selasky 		    mb, segs, &nsegs, BUS_DMA_NOWAIT);
5887272f9cdSHans Petter Selasky 	}
5897272f9cdSHans Petter Selasky 
5907272f9cdSHans Petter Selasky 	if (err != 0)
5917272f9cdSHans Petter Selasky 		goto tx_drop;
5927272f9cdSHans Petter Selasky 
5937272f9cdSHans Petter Selasky 	/* make sure all mbuf data, if any, is visible to the bus */
5947272f9cdSHans Petter Selasky 	bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map,
5957272f9cdSHans Petter Selasky 	    BUS_DMASYNC_PREWRITE);
5967272f9cdSHans Petter Selasky 
5977272f9cdSHans Petter Selasky 	/* compute number of real DUMP segments */
5987272f9cdSHans Petter Selasky 	msb = sq->priv->params_ethtool.hw_mtu_msb;
5997272f9cdSHans Petter Selasky 	for (x = xsegs = 0; x != nsegs; x++)
6007272f9cdSHans Petter Selasky 		xsegs += howmany((u32)segs[x].ds_len, msb);
6017272f9cdSHans Petter Selasky 
6027272f9cdSHans Petter Selasky 	/* check if there are no segments */
6037272f9cdSHans Petter Selasky 	if (unlikely(xsegs == 0)) {
6047272f9cdSHans Petter Selasky 		bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
6057272f9cdSHans Petter Selasky 		m_freem(mb);
6067272f9cdSHans Petter Selasky 		*mbp = NULL;	/* safety clear */
6077272f9cdSHans Petter Selasky 		return (0);
6087272f9cdSHans Petter Selasky 	}
6097272f9cdSHans Petter Selasky 
6107272f9cdSHans Petter Selasky 	/* return ENOBUFS if the queue is full */
6117272f9cdSHans Petter Selasky 	if (unlikely(!mlx5e_sq_has_room_for(sq, xsegs))) {
612f5049490SHans Petter Selasky 		sq->stats.enobuf++;
6137272f9cdSHans Petter Selasky 		bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
6147272f9cdSHans Petter Selasky 		m_freem(mb);
6157272f9cdSHans Petter Selasky 		*mbp = NULL;	/* safety clear */
6167272f9cdSHans Petter Selasky 		return (ENOBUFS);
6177272f9cdSHans Petter Selasky 	}
6187272f9cdSHans Petter Selasky 
6197272f9cdSHans Petter Selasky 	wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
6207272f9cdSHans Petter Selasky 	wqe_last = mlx5_wq_cyc_get_wqe(&sq->wq, sq->wq.sz_m1);
6217272f9cdSHans Petter Selasky 
6227272f9cdSHans Petter Selasky 	for (x = 0; x != nsegs; x++) {
6237272f9cdSHans Petter Selasky 		for (off = 0; off < segs[x].ds_len; off += msb) {
6247272f9cdSHans Petter Selasky 			u32 len = segs[x].ds_len - off;
6257272f9cdSHans Petter Selasky 
6267272f9cdSHans Petter Selasky 			/* limit length */
6277272f9cdSHans Petter Selasky 			if (likely(len > msb))
6287272f9cdSHans Petter Selasky 				len = msb;
6297272f9cdSHans Petter Selasky 
6307272f9cdSHans Petter Selasky 			memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
6317272f9cdSHans Petter Selasky 
6327272f9cdSHans Petter Selasky 			/* fill control segment */
6337272f9cdSHans Petter Selasky 			wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
6347272f9cdSHans Petter Selasky 			wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
6357272f9cdSHans Petter Selasky 			wqe->ctrl.imm = cpu_to_be32(parg->tisn << 8);
6367272f9cdSHans Petter Selasky 
6377272f9cdSHans Petter Selasky 			/* fill data segment */
6387272f9cdSHans Petter Selasky 			wqe->data.addr = cpu_to_be64((uint64_t)segs[x].ds_addr + off);
6397272f9cdSHans Petter Selasky 			wqe->data.lkey = sq->mkey_be;
6407272f9cdSHans Petter Selasky 			wqe->data.byte_count = cpu_to_be32(len);
6417272f9cdSHans Petter Selasky 
6427272f9cdSHans Petter Selasky 			/* advance to next building block */
6437272f9cdSHans Petter Selasky 			if (unlikely(wqe == wqe_last))
6447272f9cdSHans Petter Selasky 				wqe = mlx5_wq_cyc_get_wqe(&sq->wq, 0);
6457272f9cdSHans Petter Selasky 			else
6467272f9cdSHans Petter Selasky 				wqe++;
6477272f9cdSHans Petter Selasky 
6487272f9cdSHans Petter Selasky 			sq->mbuf[pi].num_wqebbs++;
6497272f9cdSHans Petter Selasky 			sq->pc++;
6507272f9cdSHans Petter Selasky 		}
6517272f9cdSHans Petter Selasky 	}
6527272f9cdSHans Petter Selasky 
6537272f9cdSHans Petter Selasky 	wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
6547272f9cdSHans Petter Selasky 	wqe_last = mlx5_wq_cyc_get_wqe(&sq->wq, (sq->pc - 1) & sq->wq.sz_m1);
6557272f9cdSHans Petter Selasky 
6567272f9cdSHans Petter Selasky 	/* put in place data fence */
6577272f9cdSHans Petter Selasky 	wqe->ctrl.fm_ce_se |= MLX5_FENCE_MODE_INITIATOR_SMALL;
6587272f9cdSHans Petter Selasky 
6597272f9cdSHans Petter Selasky 	/* check if we should generate a completion event */
6607272f9cdSHans Petter Selasky 	if (mlx5e_do_send_cqe_inline(sq))
6617272f9cdSHans Petter Selasky 		wqe_last->ctrl.fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
6627272f9cdSHans Petter Selasky 
6637272f9cdSHans Petter Selasky 	/* copy data for doorbell */
6647272f9cdSHans Petter Selasky 	memcpy(sq->doorbell.d32, wqe_last, sizeof(sq->doorbell.d32));
6657272f9cdSHans Petter Selasky 
6667272f9cdSHans Petter Selasky 	/* store pointer to mbuf */
6677272f9cdSHans Petter Selasky 	sq->mbuf[pi].mbuf = mb;
668ebdb7006SHans Petter Selasky 	sq->mbuf[pi].mst = m_snd_tag_ref(parg->mst);
6697272f9cdSHans Petter Selasky 
6707272f9cdSHans Petter Selasky 	/* count all traffic going out */
6717272f9cdSHans Petter Selasky 	sq->stats.packets++;
6727272f9cdSHans Petter Selasky 	sq->stats.bytes += sq->mbuf[pi].num_bytes;
6737272f9cdSHans Petter Selasky 
6747272f9cdSHans Petter Selasky 	*mbp = NULL;	/* safety clear */
6757272f9cdSHans Petter Selasky 	return (0);
6767272f9cdSHans Petter Selasky 
6777272f9cdSHans Petter Selasky tx_drop:
6787272f9cdSHans Petter Selasky 	sq->stats.dropped++;
6797272f9cdSHans Petter Selasky 	*mbp = NULL;
6807272f9cdSHans Petter Selasky 	m_freem(mb);
6817272f9cdSHans Petter Selasky 	return err;
6827272f9cdSHans Petter Selasky }
6837272f9cdSHans Petter Selasky 
6847272f9cdSHans Petter Selasky int
685dc7e38acSHans Petter Selasky mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
686dc7e38acSHans Petter Selasky {
687dc7e38acSHans Petter Selasky 	bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS];
6887272f9cdSHans Petter Selasky 	struct mlx5e_xmit_args args = {};
689dc7e38acSHans Petter Selasky 	struct mlx5_wqe_data_seg *dseg;
690dc7e38acSHans Petter Selasky 	struct mlx5e_tx_wqe *wqe;
6915dc00f00SJustin Hibbits 	if_t ifp;
692dc7e38acSHans Petter Selasky 	int nsegs;
693dc7e38acSHans Petter Selasky 	int err;
694dc7e38acSHans Petter Selasky 	int x;
6957272f9cdSHans Petter Selasky 	struct mbuf *mb;
696dc7e38acSHans Petter Selasky 	u16 ds_cnt;
697dc7e38acSHans Petter Selasky 	u16 pi;
698dc7e38acSHans Petter Selasky 	u8 opcode;
699dc7e38acSHans Petter Selasky 
7007272f9cdSHans Petter Selasky #ifdef KERN_TLS
7017272f9cdSHans Petter Selasky top:
7027272f9cdSHans Petter Selasky #endif
703d51ced5fSSlava Shwartsman 	/* Return ENOBUFS if the queue is full */
704f5049490SHans Petter Selasky 	if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) {
705f5049490SHans Petter Selasky 		sq->stats.enobuf++;
706dc7e38acSHans Petter Selasky 		return (ENOBUFS);
707f5049490SHans Petter Selasky 	}
708dc7e38acSHans Petter Selasky 
709dc7e38acSHans Petter Selasky 	/* Align SQ edge with NOPs to avoid WQE wrap around */
710dc7e38acSHans Petter Selasky 	pi = ((~sq->pc) & sq->wq.sz_m1);
711dc7e38acSHans Petter Selasky 	if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
712bb3853c6SHans Petter Selasky 		/* Send one multi NOP message instead of many */
713af89c4afSHans Petter Selasky 		mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS);
714dc7e38acSHans Petter Selasky 		pi = ((~sq->pc) & sq->wq.sz_m1);
715f5049490SHans Petter Selasky 		if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
716f5049490SHans Petter Selasky 			sq->stats.enobuf++;
717dc7e38acSHans Petter Selasky 			return (ENOMEM);
718dc7e38acSHans Petter Selasky 		}
719f5049490SHans Petter Selasky 	}
720dc7e38acSHans Petter Selasky 
7217272f9cdSHans Petter Selasky #ifdef KERN_TLS
7227272f9cdSHans Petter Selasky 	/* Special handling for TLS packets, if any */
7237272f9cdSHans Petter Selasky 	switch (mlx5e_sq_tls_xmit(sq, &args, mbp)) {
7247272f9cdSHans Petter Selasky 	case MLX5E_TLS_LOOP:
7257272f9cdSHans Petter Selasky 		goto top;
7267272f9cdSHans Petter Selasky 	case MLX5E_TLS_FAILURE:
7277272f9cdSHans Petter Selasky 		mb = *mbp;
7287272f9cdSHans Petter Selasky 		err = ENOMEM;
7297272f9cdSHans Petter Selasky 		goto tx_drop;
7307272f9cdSHans Petter Selasky 	case MLX5E_TLS_DEFERRED:
7317272f9cdSHans Petter Selasky 		return (0);
7327272f9cdSHans Petter Selasky 	case MLX5E_TLS_CONTINUE:
7337272f9cdSHans Petter Selasky 	default:
7347272f9cdSHans Petter Selasky 		break;
7357272f9cdSHans Petter Selasky 	}
7367272f9cdSHans Petter Selasky #endif
7377272f9cdSHans Petter Selasky 
738dc7e38acSHans Petter Selasky 	/* Setup local variables */
739dc7e38acSHans Petter Selasky 	pi = sq->pc & sq->wq.sz_m1;
740dc7e38acSHans Petter Selasky 	wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
74198626886SHans Petter Selasky 	ifp = sq->ifp;
742dc7e38acSHans Petter Selasky 
743dc7e38acSHans Petter Selasky 	memset(wqe, 0, sizeof(*wqe));
744dc7e38acSHans Petter Selasky 
7457272f9cdSHans Petter Selasky 	/* get pointer to mbuf */
7467272f9cdSHans Petter Selasky 	mb = *mbp;
7477272f9cdSHans Petter Selasky 
748*e23731dbSKonstantin Belousov 	mlx5e_accel_ipsec_handle_tx(mb, wqe);
749*e23731dbSKonstantin Belousov 
750bb3853c6SHans Petter Selasky 	/* Send a copy of the frame to the BPF listener, if any */
7512439ae94SZhenlei Huang 	if (ifp != NULL)
752dc7e38acSHans Petter Selasky 		ETHER_BPF_MTAP(ifp, mb);
753dc7e38acSHans Petter Selasky 
754dc7e38acSHans Petter Selasky 	if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) {
755dc7e38acSHans Petter Selasky 		wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM;
756dc7e38acSHans Petter Selasky 	}
757dc7e38acSHans Petter Selasky 	if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) {
758dc7e38acSHans Petter Selasky 		wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM;
759dc7e38acSHans Petter Selasky 	}
760dc7e38acSHans Petter Selasky 	if (wqe->eth.cs_flags == 0) {
761dc7e38acSHans Petter Selasky 		sq->stats.csum_offload_none++;
762dc7e38acSHans Petter Selasky 	}
763dc7e38acSHans Petter Selasky 	if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
764dc7e38acSHans Petter Selasky 		u32 payload_len;
765dc7e38acSHans Petter Selasky 		u32 mss = mb->m_pkthdr.tso_segsz;
766dc7e38acSHans Petter Selasky 		u32 num_pkts;
767dc7e38acSHans Petter Selasky 
768dc7e38acSHans Petter Selasky 		wqe->eth.mss = cpu_to_be16(mss);
769dc7e38acSHans Petter Selasky 		opcode = MLX5_OPCODE_LSO;
7707272f9cdSHans Petter Selasky 		if (args.ihs == 0)
7717272f9cdSHans Petter Selasky 			args.ihs = mlx5e_get_full_header_size(mb, NULL);
7727272f9cdSHans Petter Selasky 		if (unlikely(args.ihs == 0)) {
7733e581cabSSlava Shwartsman 			err = EINVAL;
7743e581cabSSlava Shwartsman 			goto tx_drop;
7753e581cabSSlava Shwartsman 		}
7767272f9cdSHans Petter Selasky 		payload_len = mb->m_pkthdr.len - args.ihs;
777dc7e38acSHans Petter Selasky 		if (payload_len == 0)
778dc7e38acSHans Petter Selasky 			num_pkts = 1;
779dc7e38acSHans Petter Selasky 		else
780dc7e38acSHans Petter Selasky 			num_pkts = DIV_ROUND_UP(payload_len, mss);
7817272f9cdSHans Petter Selasky 		sq->mbuf[pi].num_bytes = payload_len + (num_pkts * args.ihs);
782dc7e38acSHans Petter Selasky 
7837c9febf9SKonstantin Belousov 
784dc7e38acSHans Petter Selasky 		sq->stats.tso_packets++;
785dc7e38acSHans Petter Selasky 		sq->stats.tso_bytes += payload_len;
7867c9febf9SKonstantin Belousov 	} else if (mb->m_pkthdr.csum_flags & CSUM_ENCAP_VXLAN) {
7877c9febf9SKonstantin Belousov 		/* check for inner TCP TSO first */
7887c9febf9SKonstantin Belousov 		if (mb->m_pkthdr.csum_flags & (CSUM_INNER_IP_TSO |
7897c9febf9SKonstantin Belousov 		    CSUM_INNER_IP6_TSO)) {
7907c9febf9SKonstantin Belousov 			u32 payload_len;
7917c9febf9SKonstantin Belousov 			u32 mss = mb->m_pkthdr.tso_segsz;
7927c9febf9SKonstantin Belousov 			u32 num_pkts;
7937c9febf9SKonstantin Belousov 
7947c9febf9SKonstantin Belousov 			wqe->eth.mss = cpu_to_be16(mss);
7957c9febf9SKonstantin Belousov 			opcode = MLX5_OPCODE_LSO;
7967c9febf9SKonstantin Belousov 
7977c9febf9SKonstantin Belousov 			if (likely(args.ihs == 0)) {
7987c9febf9SKonstantin Belousov 				args.ihs = mlx5e_get_vxlan_header_size(mb, wqe,
7997c9febf9SKonstantin Belousov 				       MLX5_ETH_WQE_L3_INNER_CSUM |
8007c9febf9SKonstantin Belousov 				       MLX5_ETH_WQE_L4_INNER_CSUM |
8017c9febf9SKonstantin Belousov 				       MLX5_ETH_WQE_L4_CSUM |
8027c9febf9SKonstantin Belousov 				       MLX5_ETH_WQE_L3_CSUM,
8037c9febf9SKonstantin Belousov 				       opcode);
8047c9febf9SKonstantin Belousov 				if (unlikely(args.ihs == 0)) {
8057c9febf9SKonstantin Belousov 					err = EINVAL;
8067c9febf9SKonstantin Belousov 					goto tx_drop;
8077c9febf9SKonstantin Belousov 				}
8087c9febf9SKonstantin Belousov 			}
8097c9febf9SKonstantin Belousov 
8107c9febf9SKonstantin Belousov 			payload_len = mb->m_pkthdr.len - args.ihs;
8117c9febf9SKonstantin Belousov 			if (payload_len == 0)
8127c9febf9SKonstantin Belousov 				num_pkts = 1;
8137c9febf9SKonstantin Belousov 			else
8147c9febf9SKonstantin Belousov 				num_pkts = DIV_ROUND_UP(payload_len, mss);
8157c9febf9SKonstantin Belousov 			sq->mbuf[pi].num_bytes = payload_len +
8167c9febf9SKonstantin Belousov 			    num_pkts * args.ihs;
8177c9febf9SKonstantin Belousov 
8187c9febf9SKonstantin Belousov 			sq->stats.tso_packets++;
8197c9febf9SKonstantin Belousov 			sq->stats.tso_bytes += payload_len;
8207c9febf9SKonstantin Belousov 		} else {
8217c9febf9SKonstantin Belousov 			opcode = MLX5_OPCODE_SEND;
8227c9febf9SKonstantin Belousov 
8237c9febf9SKonstantin Belousov 			if (likely(args.ihs == 0)) {
8247c9febf9SKonstantin Belousov 				uint8_t cs_mask;
8257c9febf9SKonstantin Belousov 
8267c9febf9SKonstantin Belousov 				if (mb->m_pkthdr.csum_flags &
8271918b253SHans Petter Selasky 				    (CSUM_INNER_IP_TCP | CSUM_INNER_IP_UDP |
8281918b253SHans Petter Selasky 				     CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_UDP)) {
8297c9febf9SKonstantin Belousov 					cs_mask =
8307c9febf9SKonstantin Belousov 					    MLX5_ETH_WQE_L3_INNER_CSUM |
8317c9febf9SKonstantin Belousov 					    MLX5_ETH_WQE_L4_INNER_CSUM |
8327c9febf9SKonstantin Belousov 					    MLX5_ETH_WQE_L4_CSUM |
8337c9febf9SKonstantin Belousov 					    MLX5_ETH_WQE_L3_CSUM;
8347c9febf9SKonstantin Belousov 				} else if (mb->m_pkthdr.csum_flags & CSUM_INNER_IP) {
8357c9febf9SKonstantin Belousov 					cs_mask =
8367c9febf9SKonstantin Belousov 					    MLX5_ETH_WQE_L3_INNER_CSUM |
8377c9febf9SKonstantin Belousov 					    MLX5_ETH_WQE_L4_CSUM |
8387c9febf9SKonstantin Belousov 					    MLX5_ETH_WQE_L3_CSUM;
8397c9febf9SKonstantin Belousov 				} else {
8407c9febf9SKonstantin Belousov 					cs_mask =
8417c9febf9SKonstantin Belousov 					    MLX5_ETH_WQE_L4_CSUM |
8427c9febf9SKonstantin Belousov 					    MLX5_ETH_WQE_L3_CSUM;
8437c9febf9SKonstantin Belousov 				}
8447c9febf9SKonstantin Belousov 				args.ihs = mlx5e_get_vxlan_header_size(mb, wqe,
8457c9febf9SKonstantin Belousov 				    cs_mask, opcode);
8467c9febf9SKonstantin Belousov 				if (unlikely(args.ihs == 0)) {
8477c9febf9SKonstantin Belousov 					err = EINVAL;
8487c9febf9SKonstantin Belousov 					goto tx_drop;
8497c9febf9SKonstantin Belousov 				}
8507c9febf9SKonstantin Belousov 			}
8517c9febf9SKonstantin Belousov 
8527c9febf9SKonstantin Belousov 			sq->mbuf[pi].num_bytes = max_t (unsigned int,
8537c9febf9SKonstantin Belousov 			    mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
8547c9febf9SKonstantin Belousov 		}
855dc7e38acSHans Petter Selasky 	} else {
856dc7e38acSHans Petter Selasky 		opcode = MLX5_OPCODE_SEND;
8573e581cabSSlava Shwartsman 
8587272f9cdSHans Petter Selasky 		if (args.ihs == 0) {
8593e581cabSSlava Shwartsman 			switch (sq->min_inline_mode) {
8603e581cabSSlava Shwartsman 			case MLX5_INLINE_MODE_IP:
8613e581cabSSlava Shwartsman 			case MLX5_INLINE_MODE_TCP_UDP:
8627272f9cdSHans Petter Selasky 				args.ihs = mlx5e_get_full_header_size(mb, NULL);
8637272f9cdSHans Petter Selasky 				if (unlikely(args.ihs == 0))
8647272f9cdSHans Petter Selasky 					args.ihs = mlx5e_get_l2_header_size(sq, mb);
8653e581cabSSlava Shwartsman 				break;
8663e581cabSSlava Shwartsman 			case MLX5_INLINE_MODE_L2:
8677272f9cdSHans Petter Selasky 				args.ihs = mlx5e_get_l2_header_size(sq, mb);
8683e581cabSSlava Shwartsman 				break;
8693e581cabSSlava Shwartsman 			case MLX5_INLINE_MODE_NONE:
8703e581cabSSlava Shwartsman 				/* FALLTHROUGH */
8713e581cabSSlava Shwartsman 			default:
8723e581cabSSlava Shwartsman 				if ((mb->m_flags & M_VLANTAG) != 0 &&
8733e581cabSSlava Shwartsman 				    (sq->min_insert_caps & MLX5E_INSERT_VLAN) != 0) {
8743e581cabSSlava Shwartsman 					/* inlining VLAN data is not required */
8753e581cabSSlava Shwartsman 					wqe->eth.vlan_cmd = htons(0x8000); /* bit 0 CVLAN */
8763e581cabSSlava Shwartsman 					wqe->eth.vlan_hdr = htons(mb->m_pkthdr.ether_vtag);
8777272f9cdSHans Petter Selasky 					args.ihs = 0;
8783e581cabSSlava Shwartsman 				} else if ((mb->m_flags & M_VLANTAG) == 0 &&
8793e581cabSSlava Shwartsman 				    (sq->min_insert_caps & MLX5E_INSERT_NON_VLAN) != 0) {
8803e581cabSSlava Shwartsman 					/* inlining non-VLAN data is not required */
8817272f9cdSHans Petter Selasky 					args.ihs = 0;
8823e581cabSSlava Shwartsman 				} else {
8833e581cabSSlava Shwartsman 					/* we are forced to inlining L2 header, if any */
8847272f9cdSHans Petter Selasky 					args.ihs = mlx5e_get_l2_header_size(sq, mb);
8853e581cabSSlava Shwartsman 				}
8863e581cabSSlava Shwartsman 				break;
8873e581cabSSlava Shwartsman 			}
8887272f9cdSHans Petter Selasky 		}
889dc7e38acSHans Petter Selasky 		sq->mbuf[pi].num_bytes = max_t (unsigned int,
890dc7e38acSHans Petter Selasky 		    mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
891dc7e38acSHans Petter Selasky 	}
8923e581cabSSlava Shwartsman 
8937272f9cdSHans Petter Selasky 	if (likely(args.ihs == 0)) {
8943e581cabSSlava Shwartsman 		/* nothing to inline */
8953e581cabSSlava Shwartsman 	} else if ((mb->m_flags & M_VLANTAG) != 0) {
8963e581cabSSlava Shwartsman 		struct ether_vlan_header *eh = (struct ether_vlan_header *)
8973e581cabSSlava Shwartsman 		    wqe->eth.inline_hdr_start;
898bb3853c6SHans Petter Selasky 
899bb3853c6SHans Petter Selasky 		/* Range checks */
900cd1442c0SHans Petter Selasky 		if (unlikely(args.ihs > (sq->max_inline - ETHER_VLAN_ENCAP_LEN))) {
9017c9febf9SKonstantin Belousov 			if (mb->m_pkthdr.csum_flags & (CSUM_TSO | CSUM_ENCAP_VXLAN)) {
902cd1442c0SHans Petter Selasky 				err = EINVAL;
903cd1442c0SHans Petter Selasky 				goto tx_drop;
904cd1442c0SHans Petter Selasky 			}
905cd1442c0SHans Petter Selasky 			args.ihs = (sq->max_inline - ETHER_VLAN_ENCAP_LEN);
906cd1442c0SHans Petter Selasky 		} else if (unlikely(args.ihs < ETHER_HDR_LEN)) {
907dc7e38acSHans Petter Selasky 			err = EINVAL;
908dc7e38acSHans Petter Selasky 			goto tx_drop;
909dc7e38acSHans Petter Selasky 		}
910dc7e38acSHans Petter Selasky 		m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh);
911dc7e38acSHans Petter Selasky 		m_adj(mb, ETHER_HDR_LEN);
912bb3853c6SHans Petter Selasky 		/* Insert 4 bytes VLAN tag into data stream */
913dc7e38acSHans Petter Selasky 		eh->evl_proto = eh->evl_encap_proto;
914dc7e38acSHans Petter Selasky 		eh->evl_encap_proto = htons(ETHERTYPE_VLAN);
915dc7e38acSHans Petter Selasky 		eh->evl_tag = htons(mb->m_pkthdr.ether_vtag);
916bb3853c6SHans Petter Selasky 		/* Copy rest of header data, if any */
9177272f9cdSHans Petter Selasky 		m_copydata(mb, 0, args.ihs - ETHER_HDR_LEN, (caddr_t)(eh + 1));
9187272f9cdSHans Petter Selasky 		m_adj(mb, args.ihs - ETHER_HDR_LEN);
919bb3853c6SHans Petter Selasky 		/* Extend header by 4 bytes */
9207272f9cdSHans Petter Selasky 		args.ihs += ETHER_VLAN_ENCAP_LEN;
9217272f9cdSHans Petter Selasky 		wqe->eth.inline_hdr_sz = cpu_to_be16(args.ihs);
922dc7e38acSHans Petter Selasky 	} else {
923cd1442c0SHans Petter Selasky 		/* check if inline header size is too big */
924cd1442c0SHans Petter Selasky 		if (unlikely(args.ihs > sq->max_inline)) {
9257c9febf9SKonstantin Belousov 			if (unlikely(mb->m_pkthdr.csum_flags & (CSUM_TSO |
9267c9febf9SKonstantin Belousov 			    CSUM_ENCAP_VXLAN))) {
927cd1442c0SHans Petter Selasky 				err = EINVAL;
928cd1442c0SHans Petter Selasky 				goto tx_drop;
929cd1442c0SHans Petter Selasky 			}
930cd1442c0SHans Petter Selasky 			args.ihs = sq->max_inline;
931cd1442c0SHans Petter Selasky 		}
9327272f9cdSHans Petter Selasky 		m_copydata(mb, 0, args.ihs, wqe->eth.inline_hdr_start);
9337272f9cdSHans Petter Selasky 		m_adj(mb, args.ihs);
9347272f9cdSHans Petter Selasky 		wqe->eth.inline_hdr_sz = cpu_to_be16(args.ihs);
93505399002SHans Petter Selasky 	}
936dc7e38acSHans Petter Selasky 
937dc7e38acSHans Petter Selasky 	ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
9387272f9cdSHans Petter Selasky 	if (args.ihs > sizeof(wqe->eth.inline_hdr_start)) {
9397272f9cdSHans Petter Selasky 		ds_cnt += DIV_ROUND_UP(args.ihs - sizeof(wqe->eth.inline_hdr_start),
940dc7e38acSHans Petter Selasky 		    MLX5_SEND_WQE_DS);
941dc7e38acSHans Petter Selasky 	}
942dc7e38acSHans Petter Selasky 	dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt;
943dc7e38acSHans Petter Selasky 
944dc7e38acSHans Petter Selasky 	err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
945dc7e38acSHans Petter Selasky 	    mb, segs, &nsegs, BUS_DMA_NOWAIT);
946dc7e38acSHans Petter Selasky 	if (err == EFBIG) {
947dc7e38acSHans Petter Selasky 		/* Update statistics */
948dc7e38acSHans Petter Selasky 		sq->stats.defragged++;
949dc7e38acSHans Petter Selasky 		/* Too many mbuf fragments */
950dc7e38acSHans Petter Selasky 		mb = m_defrag(*mbp, M_NOWAIT);
951dc7e38acSHans Petter Selasky 		if (mb == NULL) {
952dc7e38acSHans Petter Selasky 			mb = *mbp;
953dc7e38acSHans Petter Selasky 			goto tx_drop;
954dc7e38acSHans Petter Selasky 		}
955dc7e38acSHans Petter Selasky 		/* Try again */
956dc7e38acSHans Petter Selasky 		err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
957dc7e38acSHans Petter Selasky 		    mb, segs, &nsegs, BUS_DMA_NOWAIT);
958dc7e38acSHans Petter Selasky 	}
959bb3853c6SHans Petter Selasky 	/* Catch errors */
960a2c320d7SHans Petter Selasky 	if (err != 0)
961dc7e38acSHans Petter Selasky 		goto tx_drop;
962dc7e38acSHans Petter Selasky 
9637272f9cdSHans Petter Selasky 	/* Make sure all mbuf data, if any, is visible to the bus */
9647d69d339SHans Petter Selasky 	if (nsegs != 0) {
9657d69d339SHans Petter Selasky 		bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map,
9667d69d339SHans Petter Selasky 		    BUS_DMASYNC_PREWRITE);
9677d69d339SHans Petter Selasky 	} else {
9687d69d339SHans Petter Selasky 		/* All data was inlined, free the mbuf. */
9697d69d339SHans Petter Selasky 		bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
9707d69d339SHans Petter Selasky 		m_freem(mb);
9717d69d339SHans Petter Selasky 		mb = NULL;
9727d69d339SHans Petter Selasky 	}
9737d69d339SHans Petter Selasky 
974dc7e38acSHans Petter Selasky 	for (x = 0; x != nsegs; x++) {
975dc7e38acSHans Petter Selasky 		if (segs[x].ds_len == 0)
976dc7e38acSHans Petter Selasky 			continue;
977dc7e38acSHans Petter Selasky 		dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr);
978dc7e38acSHans Petter Selasky 		dseg->lkey = sq->mkey_be;
979dc7e38acSHans Petter Selasky 		dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len);
980dc7e38acSHans Petter Selasky 		dseg++;
981dc7e38acSHans Petter Selasky 	}
9827d69d339SHans Petter Selasky 
983dc7e38acSHans Petter Selasky 	ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl));
984dc7e38acSHans Petter Selasky 
985dc7e38acSHans Petter Selasky 	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
986dc7e38acSHans Petter Selasky 	wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
9877272f9cdSHans Petter Selasky 	wqe->ctrl.imm = cpu_to_be32(args.tisn << 8);
9887272f9cdSHans Petter Selasky 
9897272f9cdSHans Petter Selasky 	if (mlx5e_do_send_cqe_inline(sq))
990dc7e38acSHans Petter Selasky 		wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
991376bcf63SHans Petter Selasky 	else
992376bcf63SHans Petter Selasky 		wqe->ctrl.fm_ce_se = 0;
993dc7e38acSHans Petter Selasky 
994af89c4afSHans Petter Selasky 	/* Copy data for doorbell */
995af89c4afSHans Petter Selasky 	memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
996af89c4afSHans Petter Selasky 
997bb3853c6SHans Petter Selasky 	/* Store pointer to mbuf */
998dc7e38acSHans Petter Selasky 	sq->mbuf[pi].mbuf = mb;
999dc7e38acSHans Petter Selasky 	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
1000ebdb7006SHans Petter Selasky 	if (unlikely(args.mst != NULL))
1001ebdb7006SHans Petter Selasky 		sq->mbuf[pi].mst = m_snd_tag_ref(args.mst);
1002ebdb7006SHans Petter Selasky 	else
1003ebdb7006SHans Petter Selasky 		MPASS(sq->mbuf[pi].mst == NULL);
1004ebdb7006SHans Petter Selasky 
1005dc7e38acSHans Petter Selasky 	sq->pc += sq->mbuf[pi].num_wqebbs;
1006dc7e38acSHans Petter Selasky 
100701f02abfSSlava Shwartsman 	/* Count all traffic going out */
1008dc7e38acSHans Petter Selasky 	sq->stats.packets++;
100901f02abfSSlava Shwartsman 	sq->stats.bytes += sq->mbuf[pi].num_bytes;
101001f02abfSSlava Shwartsman 
1011a2c320d7SHans Petter Selasky 	*mbp = NULL;	/* safety clear */
1012dc7e38acSHans Petter Selasky 	return (0);
1013dc7e38acSHans Petter Selasky 
1014dc7e38acSHans Petter Selasky tx_drop:
1015dc7e38acSHans Petter Selasky 	sq->stats.dropped++;
1016dc7e38acSHans Petter Selasky 	*mbp = NULL;
1017dc7e38acSHans Petter Selasky 	m_freem(mb);
1018dc7e38acSHans Petter Selasky 	return err;
1019dc7e38acSHans Petter Selasky }
1020dc7e38acSHans Petter Selasky 
1021dc7e38acSHans Petter Selasky static void
1022dc7e38acSHans Petter Selasky mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
1023dc7e38acSHans Petter Selasky {
1024dc7e38acSHans Petter Selasky 	u16 sqcc;
1025dc7e38acSHans Petter Selasky 
1026dc7e38acSHans Petter Selasky 	/*
1027dc7e38acSHans Petter Selasky 	 * sq->cc must be updated only after mlx5_cqwq_update_db_record(),
1028dc7e38acSHans Petter Selasky 	 * otherwise a cq overrun may occur
1029dc7e38acSHans Petter Selasky 	 */
1030dc7e38acSHans Petter Selasky 	sqcc = sq->cc;
1031dc7e38acSHans Petter Selasky 
1032376bcf63SHans Petter Selasky 	while (budget > 0) {
1033dc7e38acSHans Petter Selasky 		struct mlx5_cqe64 *cqe;
1034ebdb7006SHans Petter Selasky 		struct m_snd_tag *mst;
1035dc7e38acSHans Petter Selasky 		struct mbuf *mb;
10364f4739a7SHans Petter Selasky 		bool match;
10374f4739a7SHans Petter Selasky 		u16 sqcc_this;
10384f4739a7SHans Petter Selasky 		u16 delta;
1039376bcf63SHans Petter Selasky 		u16 x;
1040dc7e38acSHans Petter Selasky 		u16 ci;
1041dc7e38acSHans Petter Selasky 
1042dc7e38acSHans Petter Selasky 		cqe = mlx5e_get_cqe(&sq->cq);
1043dc7e38acSHans Petter Selasky 		if (!cqe)
1044dc7e38acSHans Petter Selasky 			break;
1045dc7e38acSHans Petter Selasky 
104690cc1c77SHans Petter Selasky 		mlx5_cqwq_pop(&sq->cq.wq);
104790cc1c77SHans Petter Selasky 
10484f4739a7SHans Petter Selasky 		/* check if the completion event indicates an error */
1049bc531a1fSHans Petter Selasky 		if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
1050bc531a1fSHans Petter Selasky 			mlx5e_dump_err_cqe(&sq->cq, sq->sqn, (const void *)cqe);
10514f4739a7SHans Petter Selasky 			sq->stats.cqe_err++;
1052bc531a1fSHans Petter Selasky 		}
10534f4739a7SHans Petter Selasky 
10544f4739a7SHans Petter Selasky 		/* setup local variables */
10554f4739a7SHans Petter Selasky 		sqcc_this = be16toh(cqe->wqe_counter);
10564f4739a7SHans Petter Selasky 		match = false;
10574f4739a7SHans Petter Selasky 
1058376bcf63SHans Petter Selasky 		/* update budget according to the event factor */
1059376bcf63SHans Petter Selasky 		budget -= sq->cev_factor;
1060376bcf63SHans Petter Selasky 
10614f4739a7SHans Petter Selasky 		for (x = 0;; x++) {
10624f4739a7SHans Petter Selasky 			if (unlikely(match != false)) {
10634f4739a7SHans Petter Selasky 				break;
10644f4739a7SHans Petter Selasky 			} else if (unlikely(x == sq->cev_factor)) {
10654f4739a7SHans Petter Selasky 				/* WQE counter match not found */
10664f4739a7SHans Petter Selasky 				sq->stats.cqe_err++;
10674f4739a7SHans Petter Selasky 				break;
10684f4739a7SHans Petter Selasky 			}
1069dc7e38acSHans Petter Selasky 			ci = sqcc & sq->wq.sz_m1;
10704f4739a7SHans Petter Selasky 			delta = sqcc_this - sqcc;
10714f4739a7SHans Petter Selasky 			match = (delta < sq->mbuf[ci].num_wqebbs);
1072dc7e38acSHans Petter Selasky 			mb = sq->mbuf[ci].mbuf;
10738b1b42c1SHans Petter Selasky 			sq->mbuf[ci].mbuf = NULL;
1074ebdb7006SHans Petter Selasky 			mst = sq->mbuf[ci].mst;
1075ebdb7006SHans Petter Selasky 			sq->mbuf[ci].mst = NULL;
1076dc7e38acSHans Petter Selasky 
1077ebdb7006SHans Petter Selasky 			if (unlikely(mb == NULL)) {
10784f4739a7SHans Petter Selasky 				if (unlikely(sq->mbuf[ci].num_bytes == 0))
1079dc7e38acSHans Petter Selasky 					sq->stats.nop++;
1080dc7e38acSHans Petter Selasky 			} else {
1081dc7e38acSHans Petter Selasky 				bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map,
1082dc7e38acSHans Petter Selasky 				    BUS_DMASYNC_POSTWRITE);
1083dc7e38acSHans Petter Selasky 				bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map);
1084dc7e38acSHans Petter Selasky 
1085bb3853c6SHans Petter Selasky 				/* Free transmitted mbuf */
1086dc7e38acSHans Petter Selasky 				m_freem(mb);
1087dc7e38acSHans Petter Selasky 			}
1088ebdb7006SHans Petter Selasky 
1089ebdb7006SHans Petter Selasky 			if (unlikely(mst != NULL))
1090ebdb7006SHans Petter Selasky 				m_snd_tag_rele(mst);
1091ebdb7006SHans Petter Selasky 
1092dc7e38acSHans Petter Selasky 			sqcc += sq->mbuf[ci].num_wqebbs;
1093dc7e38acSHans Petter Selasky 		}
1094376bcf63SHans Petter Selasky 	}
1095dc7e38acSHans Petter Selasky 
1096dc7e38acSHans Petter Selasky 	mlx5_cqwq_update_db_record(&sq->cq.wq);
1097dc7e38acSHans Petter Selasky 
1098bb3853c6SHans Petter Selasky 	/* Ensure cq space is freed before enabling more cqes */
10999cb83c46SHans Petter Selasky 	atomic_thread_fence_rel();
1100dc7e38acSHans Petter Selasky 
1101dc7e38acSHans Petter Selasky 	sq->cc = sqcc;
1102dc7e38acSHans Petter Selasky }
1103dc7e38acSHans Petter Selasky 
1104dc7e38acSHans Petter Selasky static int
11055dc00f00SJustin Hibbits mlx5e_xmit_locked(if_t ifp, struct mlx5e_sq *sq, struct mbuf *mb)
1106dc7e38acSHans Petter Selasky {
1107b98ba640SHans Petter Selasky 	int err = 0;
1108b98ba640SHans Petter Selasky 
11095dc00f00SJustin Hibbits 	if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
11103230c29dSSlava Shwartsman 	    READ_ONCE(sq->running) == 0)) {
1111b98ba640SHans Petter Selasky 		m_freem(mb);
1112b98ba640SHans Petter Selasky 		return (ENETDOWN);
1113b98ba640SHans Petter Selasky 	}
1114b98ba640SHans Petter Selasky 
1115b98ba640SHans Petter Selasky 	/* Do transmit */
1116b98ba640SHans Petter Selasky 	if (mlx5e_sq_xmit(sq, &mb) != 0) {
1117b98ba640SHans Petter Selasky 		/* NOTE: m_freem() is NULL safe */
1118b98ba640SHans Petter Selasky 		m_freem(mb);
1119b98ba640SHans Petter Selasky 		err = ENOBUFS;
1120b98ba640SHans Petter Selasky 	}
1121b98ba640SHans Petter Selasky 
11222d5e5a0dSHans Petter Selasky 	/* Write the doorbell record, if any. */
11232d5e5a0dSHans Petter Selasky 	mlx5e_tx_notify_hw(sq, false);
1124b98ba640SHans Petter Selasky 
1125b98ba640SHans Petter Selasky 	/*
1126b98ba640SHans Petter Selasky 	 * Check if we need to start the event timer which flushes the
1127b98ba640SHans Petter Selasky 	 * transmit ring on timeout:
1128b98ba640SHans Petter Selasky 	 */
1129b98ba640SHans Petter Selasky 	if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL &&
1130b98ba640SHans Petter Selasky 	    sq->cev_factor != 1)) {
1131b98ba640SHans Petter Selasky 		/* start the timer */
1132b98ba640SHans Petter Selasky 		mlx5e_sq_cev_timeout(sq);
1133b98ba640SHans Petter Selasky 	} else {
1134b98ba640SHans Petter Selasky 		/* don't send NOPs yet */
1135b98ba640SHans Petter Selasky 		sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
1136b98ba640SHans Petter Selasky 	}
1137b98ba640SHans Petter Selasky 	return (err);
1138b98ba640SHans Petter Selasky }
1139b98ba640SHans Petter Selasky 
1140dc7e38acSHans Petter Selasky int
11415dc00f00SJustin Hibbits mlx5e_xmit(if_t ifp, struct mbuf *mb)
1142dc7e38acSHans Petter Selasky {
1143dc7e38acSHans Petter Selasky 	struct mlx5e_sq *sq;
1144dc7e38acSHans Petter Selasky 	int ret;
1145dc7e38acSHans Petter Selasky 
1146fb3bc596SJohn Baldwin 	if (mb->m_pkthdr.csum_flags & CSUM_SND_TAG) {
1147fb3bc596SJohn Baldwin 		MPASS(mb->m_pkthdr.snd_tag->ifp == ifp);
1148cc971b22SSlava Shwartsman 		sq = mlx5e_select_queue_by_send_tag(ifp, mb);
1149dc7e38acSHans Petter Selasky 		if (unlikely(sq == NULL)) {
1150cc971b22SSlava Shwartsman 			goto select_queue;
1151cc971b22SSlava Shwartsman 		}
1152cc971b22SSlava Shwartsman 	} else {
1153cc971b22SSlava Shwartsman select_queue:
1154cc971b22SSlava Shwartsman 		sq = mlx5e_select_queue(ifp, mb);
1155cc971b22SSlava Shwartsman 		if (unlikely(sq == NULL)) {
115638535d6cSHans Petter Selasky 			/* Free mbuf */
115738535d6cSHans Petter Selasky 			m_freem(mb);
115838535d6cSHans Petter Selasky 
115938535d6cSHans Petter Selasky 			/* Invalid send queue */
1160dc7e38acSHans Petter Selasky 			return (ENXIO);
1161dc7e38acSHans Petter Selasky 		}
1162cc971b22SSlava Shwartsman 	}
1163b98ba640SHans Petter Selasky 
1164b98ba640SHans Petter Selasky 	mtx_lock(&sq->lock);
1165dc7e38acSHans Petter Selasky 	ret = mlx5e_xmit_locked(ifp, sq, mb);
1166dc7e38acSHans Petter Selasky 	mtx_unlock(&sq->lock);
1167dc7e38acSHans Petter Selasky 
1168dc7e38acSHans Petter Selasky 	return (ret);
1169dc7e38acSHans Petter Selasky }
1170dc7e38acSHans Petter Selasky 
1171dc7e38acSHans Petter Selasky void
1172f34f0a65SHans Petter Selasky mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe __unused)
1173dc7e38acSHans Petter Selasky {
1174dc7e38acSHans Petter Selasky 	struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq);
1175dc7e38acSHans Petter Selasky 
1176dc7e38acSHans Petter Selasky 	mtx_lock(&sq->comp_lock);
1177dc7e38acSHans Petter Selasky 	mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX);
1178e5d6b589SHans Petter Selasky 	mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock));
1179dc7e38acSHans Petter Selasky 	mtx_unlock(&sq->comp_lock);
1180dc7e38acSHans Petter Selasky }
1181