xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c (revision 6683132d54bd6d589889e43dabdc53d35e38a028)
1 /*-
2  * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "en.h"
29 #include <machine/atomic.h>
30 
31 static inline bool
32 mlx5e_do_send_cqe(struct mlx5e_sq *sq)
33 {
34 	sq->cev_counter++;
35 	/* interleave the CQEs */
36 	if (sq->cev_counter >= sq->cev_factor) {
37 		sq->cev_counter = 0;
38 		return (1);
39 	}
40 	return (0);
41 }
42 
43 void
44 mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt)
45 {
46 	u16 pi = sq->pc & sq->wq.sz_m1;
47 	struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
48 
49 	memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
50 
51 	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
52 	wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
53 	if (mlx5e_do_send_cqe(sq))
54 		wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
55 	else
56 		wqe->ctrl.fm_ce_se = 0;
57 
58 	/* Copy data for doorbell */
59 	memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
60 
61 	sq->mbuf[pi].mbuf = NULL;
62 	sq->mbuf[pi].num_bytes = 0;
63 	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
64 	sq->pc += sq->mbuf[pi].num_wqebbs;
65 }
66 
67 #if (__FreeBSD_version >= 1100000)
68 static uint32_t mlx5e_hash_value;
69 
70 static void
71 mlx5e_hash_init(void *arg)
72 {
73 	mlx5e_hash_value = m_ether_tcpip_hash_init();
74 }
75 
76 /* Make kernel call mlx5e_hash_init after the random stack finished initializing */
77 SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL);
78 #endif
79 
80 static struct mlx5e_sq *
81 mlx5e_select_queue_by_send_tag(struct ifnet *ifp, struct mbuf *mb)
82 {
83 	struct mlx5e_snd_tag *ptag;
84 	struct mlx5e_sq *sq;
85 
86 	/* get pointer to sendqueue */
87 	ptag = container_of(mb->m_pkthdr.snd_tag,
88 	    struct mlx5e_snd_tag, m_snd_tag);
89 
90 	switch (ptag->type) {
91 #ifdef RATELIMIT
92 	case IF_SND_TAG_TYPE_RATE_LIMIT:
93 		sq = container_of(ptag,
94 		    struct mlx5e_rl_channel, tag)->sq;
95 		break;
96 #endif
97 	case IF_SND_TAG_TYPE_UNLIMITED:
98 		sq = &container_of(ptag,
99 		    struct mlx5e_channel, tag)->sq[0];
100 		KASSERT(({
101 		    struct mlx5e_priv *priv = ifp->if_softc;
102 		    priv->channel_refs > 0; }),
103 		    ("mlx5e_select_queue: Channel refs are zero for unlimited tag"));
104 		break;
105 	default:
106 		sq = NULL;
107 		break;
108 	}
109 
110 	/* check if valid */
111 	if (sq != NULL && READ_ONCE(sq->running) != 0)
112 		return (sq);
113 
114 	return (NULL);
115 }
116 
117 static struct mlx5e_sq *
118 mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb)
119 {
120 	struct mlx5e_priv *priv = ifp->if_softc;
121 	struct mlx5e_sq *sq;
122 	u32 ch;
123 	u32 tc;
124 
125 	/* obtain VLAN information if present */
126 	if (mb->m_flags & M_VLANTAG) {
127 		tc = (mb->m_pkthdr.ether_vtag >> 13);
128 		if (tc >= priv->num_tc)
129 			tc = priv->default_vlan_prio;
130 	} else {
131 		tc = priv->default_vlan_prio;
132 	}
133 
134 	ch = priv->params.num_channels;
135 
136 	/* check if flowid is set */
137 	if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) {
138 #ifdef RSS
139 		u32 temp;
140 
141 		if (rss_hash2bucket(mb->m_pkthdr.flowid,
142 		    M_HASHTYPE_GET(mb), &temp) == 0)
143 			ch = temp % ch;
144 		else
145 #endif
146 			ch = (mb->m_pkthdr.flowid % 128) % ch;
147 	} else {
148 #if (__FreeBSD_version >= 1100000)
149 		ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 |
150 		    MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch;
151 #else
152 		/*
153 		 * m_ether_tcpip_hash not present in stable, so just
154 		 * throw unhashed mbufs on queue 0
155 		 */
156 		ch = 0;
157 #endif
158 	}
159 
160 	/* check if send queue is running */
161 	sq = &priv->channel[ch].sq[tc];
162 	if (likely(READ_ONCE(sq->running) != 0))
163 		return (sq);
164 	return (NULL);
165 }
166 
167 static inline u16
168 mlx5e_get_l2_header_size(struct mlx5e_sq *sq, struct mbuf *mb)
169 {
170 	struct ether_vlan_header *eh;
171 	uint16_t eth_type;
172 	int min_inline;
173 
174 	eh = mtod(mb, struct ether_vlan_header *);
175 	if (unlikely(mb->m_len < ETHER_HDR_LEN)) {
176 		goto max_inline;
177 	} else if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
178 		if (unlikely(mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)))
179 			goto max_inline;
180 		eth_type = ntohs(eh->evl_proto);
181 		min_inline = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
182 	} else {
183 		eth_type = ntohs(eh->evl_encap_proto);
184 		min_inline = ETHER_HDR_LEN;
185 	}
186 
187 	switch (eth_type) {
188 	case ETHERTYPE_IP:
189 	case ETHERTYPE_IPV6:
190 		/*
191 		 * Make sure the TOS(IPv4) or traffic class(IPv6)
192 		 * field gets inlined. Else the SQ may stall.
193 		 */
194 		min_inline += 4;
195 		break;
196 	default:
197 		goto max_inline;
198 	}
199 
200 	/*
201 	 * m_copydata() will be used on the remaining header which
202 	 * does not need to reside within the first m_len bytes of
203 	 * data:
204 	 */
205 	if (mb->m_pkthdr.len < min_inline)
206 		goto max_inline;
207 	return (min_inline);
208 
209 max_inline:
210 	return (MIN(mb->m_pkthdr.len, sq->max_inline));
211 }
212 
213 static int
214 mlx5e_get_full_header_size(struct mbuf *mb)
215 {
216 	struct ether_vlan_header *eh;
217 	struct tcphdr *th;
218 	struct ip *ip;
219 	int ip_hlen, tcp_hlen;
220 	struct ip6_hdr *ip6;
221 	uint16_t eth_type;
222 	int eth_hdr_len;
223 
224 	eh = mtod(mb, struct ether_vlan_header *);
225 	if (mb->m_len < ETHER_HDR_LEN)
226 		return (0);
227 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
228 		if (mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))
229 			return (0);
230 		eth_type = ntohs(eh->evl_proto);
231 		eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
232 	} else {
233 		eth_type = ntohs(eh->evl_encap_proto);
234 		eth_hdr_len = ETHER_HDR_LEN;
235 	}
236 	switch (eth_type) {
237 	case ETHERTYPE_IP:
238 		ip = (struct ip *)(mb->m_data + eth_hdr_len);
239 		if (mb->m_len < eth_hdr_len + sizeof(*ip))
240 			return (0);
241 		switch (ip->ip_p) {
242 		case IPPROTO_TCP:
243 			ip_hlen = ip->ip_hl << 2;
244 			eth_hdr_len += ip_hlen;
245 			break;
246 		case IPPROTO_UDP:
247 			ip_hlen = ip->ip_hl << 2;
248 			eth_hdr_len += ip_hlen + 8;
249 			goto done;
250 		default:
251 			return (0);
252 		}
253 		break;
254 	case ETHERTYPE_IPV6:
255 		ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len);
256 		if (mb->m_len < eth_hdr_len + sizeof(*ip6))
257 			return (0);
258 		switch (ip6->ip6_nxt) {
259 		case IPPROTO_TCP:
260 			eth_hdr_len += sizeof(*ip6);
261 			break;
262 		case IPPROTO_UDP:
263 			eth_hdr_len += sizeof(*ip6) + 8;
264 			goto done;
265 		default:
266 			return (0);
267 		}
268 		break;
269 	default:
270 		return (0);
271 	}
272 	if (mb->m_len < eth_hdr_len + sizeof(*th))
273 		return (0);
274 	th = (struct tcphdr *)(mb->m_data + eth_hdr_len);
275 	tcp_hlen = th->th_off << 2;
276 	eth_hdr_len += tcp_hlen;
277 done:
278 	/*
279 	 * m_copydata() will be used on the remaining header which
280 	 * does not need to reside within the first m_len bytes of
281 	 * data:
282 	 */
283 	if (mb->m_pkthdr.len < eth_hdr_len)
284 		return (0);
285 	return (eth_hdr_len);
286 }
287 
288 static int
289 mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
290 {
291 	bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS];
292 	struct mlx5_wqe_data_seg *dseg;
293 	struct mlx5e_tx_wqe *wqe;
294 	struct ifnet *ifp;
295 	int nsegs;
296 	int err;
297 	int x;
298 	struct mbuf *mb = *mbp;
299 	u16 ds_cnt;
300 	u16 ihs;
301 	u16 pi;
302 	u8 opcode;
303 
304 	/* Return ENOBUFS if the queue is full */
305 	if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS)))
306 		return (ENOBUFS);
307 
308 	/* Align SQ edge with NOPs to avoid WQE wrap around */
309 	pi = ((~sq->pc) & sq->wq.sz_m1);
310 	if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
311 		/* Send one multi NOP message instead of many */
312 		mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS);
313 		pi = ((~sq->pc) & sq->wq.sz_m1);
314 		if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1))
315 			return (ENOMEM);
316 	}
317 
318 	/* Setup local variables */
319 	pi = sq->pc & sq->wq.sz_m1;
320 	wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
321 	ifp = sq->ifp;
322 
323 	memset(wqe, 0, sizeof(*wqe));
324 
325 	/* Send a copy of the frame to the BPF listener, if any */
326 	if (ifp != NULL && ifp->if_bpf != NULL)
327 		ETHER_BPF_MTAP(ifp, mb);
328 
329 	if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) {
330 		wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM;
331 	}
332 	if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) {
333 		wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM;
334 	}
335 	if (wqe->eth.cs_flags == 0) {
336 		sq->stats.csum_offload_none++;
337 	}
338 	if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
339 		u32 payload_len;
340 		u32 mss = mb->m_pkthdr.tso_segsz;
341 		u32 num_pkts;
342 
343 		wqe->eth.mss = cpu_to_be16(mss);
344 		opcode = MLX5_OPCODE_LSO;
345 		ihs = mlx5e_get_full_header_size(mb);
346 		if (unlikely(ihs == 0)) {
347 			err = EINVAL;
348 			goto tx_drop;
349 		}
350 		payload_len = mb->m_pkthdr.len - ihs;
351 		if (payload_len == 0)
352 			num_pkts = 1;
353 		else
354 			num_pkts = DIV_ROUND_UP(payload_len, mss);
355 		sq->mbuf[pi].num_bytes = payload_len + (num_pkts * ihs);
356 
357 		sq->stats.tso_packets++;
358 		sq->stats.tso_bytes += payload_len;
359 	} else {
360 		opcode = MLX5_OPCODE_SEND;
361 
362 		switch (sq->min_inline_mode) {
363 		case MLX5_INLINE_MODE_IP:
364 		case MLX5_INLINE_MODE_TCP_UDP:
365 			ihs = mlx5e_get_full_header_size(mb);
366 			if (unlikely(ihs == 0))
367 				ihs = mlx5e_get_l2_header_size(sq, mb);
368 			break;
369 		case MLX5_INLINE_MODE_L2:
370 			ihs = mlx5e_get_l2_header_size(sq, mb);
371 			break;
372 		case MLX5_INLINE_MODE_NONE:
373 			/* FALLTHROUGH */
374 		default:
375 			if ((mb->m_flags & M_VLANTAG) != 0 &&
376 			    (sq->min_insert_caps & MLX5E_INSERT_VLAN) != 0) {
377 				/* inlining VLAN data is not required */
378 				wqe->eth.vlan_cmd = htons(0x8000); /* bit 0 CVLAN */
379 				wqe->eth.vlan_hdr = htons(mb->m_pkthdr.ether_vtag);
380 				ihs = 0;
381 			} else if ((mb->m_flags & M_VLANTAG) == 0 &&
382 				   (sq->min_insert_caps & MLX5E_INSERT_NON_VLAN) != 0) {
383 				/* inlining non-VLAN data is not required */
384 				ihs = 0;
385 			} else {
386 				/* we are forced to inlining L2 header, if any */
387 				ihs = mlx5e_get_l2_header_size(sq, mb);
388 			}
389 			break;
390 		}
391 		sq->mbuf[pi].num_bytes = max_t (unsigned int,
392 		    mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
393 	}
394 
395 	if (likely(ihs == 0)) {
396 		/* nothing to inline */
397 	} else if (unlikely(ihs > sq->max_inline)) {
398 		/* inline header size is too big */
399 		err = EINVAL;
400 		goto tx_drop;
401 	} else if ((mb->m_flags & M_VLANTAG) != 0) {
402 		struct ether_vlan_header *eh = (struct ether_vlan_header *)
403 		    wqe->eth.inline_hdr_start;
404 
405 		/* Range checks */
406 		if (unlikely(ihs > (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN)))
407 			ihs = (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN);
408 		else if (unlikely(ihs < ETHER_HDR_LEN)) {
409 			err = EINVAL;
410 			goto tx_drop;
411 		}
412 		m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh);
413 		m_adj(mb, ETHER_HDR_LEN);
414 		/* Insert 4 bytes VLAN tag into data stream */
415 		eh->evl_proto = eh->evl_encap_proto;
416 		eh->evl_encap_proto = htons(ETHERTYPE_VLAN);
417 		eh->evl_tag = htons(mb->m_pkthdr.ether_vtag);
418 		/* Copy rest of header data, if any */
419 		m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh + 1));
420 		m_adj(mb, ihs - ETHER_HDR_LEN);
421 		/* Extend header by 4 bytes */
422 		ihs += ETHER_VLAN_ENCAP_LEN;
423 		wqe->eth.inline_hdr_sz = cpu_to_be16(ihs);
424 	} else {
425 		m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start);
426 		m_adj(mb, ihs);
427 		wqe->eth.inline_hdr_sz = cpu_to_be16(ihs);
428 	}
429 
430 	ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
431 	if (ihs > sizeof(wqe->eth.inline_hdr_start)) {
432 		ds_cnt += DIV_ROUND_UP(ihs - sizeof(wqe->eth.inline_hdr_start),
433 		    MLX5_SEND_WQE_DS);
434 	}
435 	dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt;
436 
437 	err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
438 	    mb, segs, &nsegs, BUS_DMA_NOWAIT);
439 	if (err == EFBIG) {
440 		/* Update statistics */
441 		sq->stats.defragged++;
442 		/* Too many mbuf fragments */
443 		mb = m_defrag(*mbp, M_NOWAIT);
444 		if (mb == NULL) {
445 			mb = *mbp;
446 			goto tx_drop;
447 		}
448 		/* Try again */
449 		err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
450 		    mb, segs, &nsegs, BUS_DMA_NOWAIT);
451 	}
452 	/* Catch errors */
453 	if (err != 0)
454 		goto tx_drop;
455 
456 	/* Make sure all mbuf data, if any, is written to RAM */
457 	if (nsegs != 0) {
458 		bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map,
459 		    BUS_DMASYNC_PREWRITE);
460 	} else {
461 		/* All data was inlined, free the mbuf. */
462 		bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
463 		m_freem(mb);
464 		mb = NULL;
465 	}
466 
467 	for (x = 0; x != nsegs; x++) {
468 		if (segs[x].ds_len == 0)
469 			continue;
470 		dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr);
471 		dseg->lkey = sq->mkey_be;
472 		dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len);
473 		dseg++;
474 	}
475 
476 	ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl));
477 
478 	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
479 	wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
480 	if (mlx5e_do_send_cqe(sq))
481 		wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
482 	else
483 		wqe->ctrl.fm_ce_se = 0;
484 
485 	/* Copy data for doorbell */
486 	memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
487 
488 	/* Store pointer to mbuf */
489 	sq->mbuf[pi].mbuf = mb;
490 	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
491 	sq->pc += sq->mbuf[pi].num_wqebbs;
492 
493 	/* Count all traffic going out */
494 	sq->stats.packets++;
495 	sq->stats.bytes += sq->mbuf[pi].num_bytes;
496 
497 	*mbp = NULL;	/* safety clear */
498 	return (0);
499 
500 tx_drop:
501 	sq->stats.dropped++;
502 	*mbp = NULL;
503 	m_freem(mb);
504 	return err;
505 }
506 
507 static void
508 mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
509 {
510 	u16 sqcc;
511 
512 	/*
513 	 * sq->cc must be updated only after mlx5_cqwq_update_db_record(),
514 	 * otherwise a cq overrun may occur
515 	 */
516 	sqcc = sq->cc;
517 
518 	while (budget > 0) {
519 		struct mlx5_cqe64 *cqe;
520 		struct mbuf *mb;
521 		u16 x;
522 		u16 ci;
523 
524 		cqe = mlx5e_get_cqe(&sq->cq);
525 		if (!cqe)
526 			break;
527 
528 		mlx5_cqwq_pop(&sq->cq.wq);
529 
530 		/* update budget according to the event factor */
531 		budget -= sq->cev_factor;
532 
533 		for (x = 0; x != sq->cev_factor; x++) {
534 			ci = sqcc & sq->wq.sz_m1;
535 			mb = sq->mbuf[ci].mbuf;
536 			sq->mbuf[ci].mbuf = NULL;
537 
538 			if (mb == NULL) {
539 				if (sq->mbuf[ci].num_bytes == 0) {
540 					/* NOP */
541 					sq->stats.nop++;
542 				}
543 			} else {
544 				bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map,
545 				    BUS_DMASYNC_POSTWRITE);
546 				bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map);
547 
548 				/* Free transmitted mbuf */
549 				m_freem(mb);
550 			}
551 			sqcc += sq->mbuf[ci].num_wqebbs;
552 		}
553 	}
554 
555 	mlx5_cqwq_update_db_record(&sq->cq.wq);
556 
557 	/* Ensure cq space is freed before enabling more cqes */
558 	atomic_thread_fence_rel();
559 
560 	sq->cc = sqcc;
561 }
562 
563 static int
564 mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
565 {
566 	int err = 0;
567 
568 	if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
569 	    READ_ONCE(sq->running) == 0)) {
570 		m_freem(mb);
571 		return (ENETDOWN);
572 	}
573 
574 	/* Do transmit */
575 	if (mlx5e_sq_xmit(sq, &mb) != 0) {
576 		/* NOTE: m_freem() is NULL safe */
577 		m_freem(mb);
578 		err = ENOBUFS;
579 	}
580 
581 	/* Check if we need to write the doorbell */
582 	if (likely(sq->doorbell.d64 != 0)) {
583 		mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
584 		sq->doorbell.d64 = 0;
585 	}
586 
587 	/*
588 	 * Check if we need to start the event timer which flushes the
589 	 * transmit ring on timeout:
590 	 */
591 	if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL &&
592 	    sq->cev_factor != 1)) {
593 		/* start the timer */
594 		mlx5e_sq_cev_timeout(sq);
595 	} else {
596 		/* don't send NOPs yet */
597 		sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
598 	}
599 	return (err);
600 }
601 
602 int
603 mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb)
604 {
605 	struct mlx5e_sq *sq;
606 	int ret;
607 
608 	if (mb->m_pkthdr.csum_flags & CSUM_SND_TAG) {
609 		MPASS(mb->m_pkthdr.snd_tag->ifp == ifp);
610 		sq = mlx5e_select_queue_by_send_tag(ifp, mb);
611 		if (unlikely(sq == NULL)) {
612 			goto select_queue;
613 		}
614 	} else {
615 select_queue:
616 		sq = mlx5e_select_queue(ifp, mb);
617 		if (unlikely(sq == NULL)) {
618 			/* Free mbuf */
619 			m_freem(mb);
620 
621 			/* Invalid send queue */
622 			return (ENXIO);
623 		}
624 	}
625 
626 	mtx_lock(&sq->lock);
627 	ret = mlx5e_xmit_locked(ifp, sq, mb);
628 	mtx_unlock(&sq->lock);
629 
630 	return (ret);
631 }
632 
633 void
634 mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq)
635 {
636 	struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq);
637 
638 	mtx_lock(&sq->comp_lock);
639 	mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX);
640 	mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock));
641 	mtx_unlock(&sq->comp_lock);
642 }
643