xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c (revision c5fda9bac0325eb8c5b447717862d279006f318f)
1 /*-
2  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "en.h"
29 #include <machine/atomic.h>
30 
31 static inline bool
32 mlx5e_do_send_cqe(struct mlx5e_sq *sq)
33 {
34 	sq->cev_counter++;
35 	/* interleave the CQEs */
36 	if (sq->cev_counter >= sq->cev_factor) {
37 		sq->cev_counter = 0;
38 		return (1);
39 	}
40 	return (0);
41 }
42 
43 void
44 mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt)
45 {
46 	u16 pi = sq->pc & sq->wq.sz_m1;
47 	struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
48 
49 	memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
50 
51 	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
52 	wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
53 	if (mlx5e_do_send_cqe(sq))
54 		wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
55 	else
56 		wqe->ctrl.fm_ce_se = 0;
57 
58 	/* Copy data for doorbell */
59 	memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
60 
61 	sq->mbuf[pi].mbuf = NULL;
62 	sq->mbuf[pi].num_bytes = 0;
63 	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
64 	sq->pc += sq->mbuf[pi].num_wqebbs;
65 }
66 
67 #if (__FreeBSD_version >= 1100000)
68 static uint32_t mlx5e_hash_value;
69 
70 static void
71 mlx5e_hash_init(void *arg)
72 {
73 	mlx5e_hash_value = m_ether_tcpip_hash_init();
74 }
75 
76 /* Make kernel call mlx5e_hash_init after the random stack finished initializing */
77 SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL);
78 #endif
79 
80 static struct mlx5e_sq *
81 mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb)
82 {
83 	struct mlx5e_priv *priv = ifp->if_softc;
84 	struct mlx5e_channel * volatile *ppch;
85 	struct mlx5e_channel *pch;
86 	u32 ch;
87 	u32 tc;
88 
89 	ppch = priv->channel;
90 
91 	/* check if channels are successfully opened */
92 	if (unlikely(ppch == NULL))
93 		return (NULL);
94 
95 	/* obtain VLAN information if present */
96 	if (mb->m_flags & M_VLANTAG) {
97 		tc = (mb->m_pkthdr.ether_vtag >> 13);
98 		if (tc >= priv->num_tc)
99 			tc = priv->default_vlan_prio;
100 	} else {
101 		tc = priv->default_vlan_prio;
102 	}
103 
104 	ch = priv->params.num_channels;
105 
106 #ifdef RATELIMIT
107 	if (mb->m_pkthdr.snd_tag != NULL) {
108 		struct mlx5e_sq *sq;
109 
110 		/* check for route change */
111 		if (mb->m_pkthdr.snd_tag->ifp != ifp)
112 			return (NULL);
113 
114 		/* get pointer to sendqueue */
115 		sq = container_of(mb->m_pkthdr.snd_tag,
116 		    struct mlx5e_rl_channel, m_snd_tag)->sq;
117 
118 		/* check if valid */
119 		if (sq != NULL && sq->stopped == 0)
120 			return (sq);
121 
122 		/* FALLTHROUGH */
123 	}
124 #endif
125 	/* check if flowid is set */
126 	if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) {
127 #ifdef RSS
128 		u32 temp;
129 
130 		if (rss_hash2bucket(mb->m_pkthdr.flowid,
131 		    M_HASHTYPE_GET(mb), &temp) == 0)
132 			ch = temp % ch;
133 		else
134 #endif
135 			ch = (mb->m_pkthdr.flowid % 128) % ch;
136 	} else {
137 #if (__FreeBSD_version >= 1100000)
138 		ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 |
139 		    MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch;
140 #else
141 		/*
142 		 * m_ether_tcpip_hash not present in stable, so just
143 		 * throw unhashed mbufs on queue 0
144 		 */
145 		ch = 0;
146 #endif
147 	}
148 
149 	/* check if channel is allocated and not stopped */
150 	pch = ppch[ch];
151 	if (likely(pch != NULL && pch->sq[tc].stopped == 0))
152 		return (&pch->sq[tc]);
153 	return (NULL);
154 }
155 
156 static inline u16
157 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, struct mbuf *mb)
158 {
159 	return (MIN(MLX5E_MAX_TX_INLINE, mb->m_len));
160 }
161 
162 static int
163 mlx5e_get_header_size(struct mbuf *mb)
164 {
165 	struct ether_vlan_header *eh;
166 	struct tcphdr *th;
167 	struct ip *ip;
168 	int ip_hlen, tcp_hlen;
169 	struct ip6_hdr *ip6;
170 	uint16_t eth_type;
171 	int eth_hdr_len;
172 
173 	eh = mtod(mb, struct ether_vlan_header *);
174 	if (mb->m_len < ETHER_HDR_LEN)
175 		return (0);
176 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
177 		eth_type = ntohs(eh->evl_proto);
178 		eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
179 	} else {
180 		eth_type = ntohs(eh->evl_encap_proto);
181 		eth_hdr_len = ETHER_HDR_LEN;
182 	}
183 	if (mb->m_len < eth_hdr_len)
184 		return (0);
185 	switch (eth_type) {
186 	case ETHERTYPE_IP:
187 		ip = (struct ip *)(mb->m_data + eth_hdr_len);
188 		if (mb->m_len < eth_hdr_len + sizeof(*ip))
189 			return (0);
190 		if (ip->ip_p != IPPROTO_TCP)
191 			return (0);
192 		ip_hlen = ip->ip_hl << 2;
193 		eth_hdr_len += ip_hlen;
194 		break;
195 	case ETHERTYPE_IPV6:
196 		ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len);
197 		if (mb->m_len < eth_hdr_len + sizeof(*ip6))
198 			return (0);
199 		if (ip6->ip6_nxt != IPPROTO_TCP)
200 			return (0);
201 		eth_hdr_len += sizeof(*ip6);
202 		break;
203 	default:
204 		return (0);
205 	}
206 	if (mb->m_len < eth_hdr_len + sizeof(*th))
207 		return (0);
208 	th = (struct tcphdr *)(mb->m_data + eth_hdr_len);
209 	tcp_hlen = th->th_off << 2;
210 	eth_hdr_len += tcp_hlen;
211 	if (mb->m_len < eth_hdr_len)
212 		return (0);
213 	return (eth_hdr_len);
214 }
215 
216 /*
217  * The return value is not going back to the stack because of
218  * the drbr
219  */
220 static int
221 mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
222 {
223 	bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS];
224 	struct mlx5_wqe_data_seg *dseg;
225 	struct mlx5e_tx_wqe *wqe;
226 	struct ifnet *ifp;
227 	int nsegs;
228 	int err;
229 	int x;
230 	struct mbuf *mb = *mbp;
231 	u16 ds_cnt;
232 	u16 ihs;
233 	u16 pi;
234 	u8 opcode;
235 
236 	/*
237 	 * Return ENOBUFS if the queue is full, this may trigger reinsertion
238 	 * of the mbuf into the drbr (see mlx5e_xmit_locked)
239 	 */
240 	if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) {
241 		return (ENOBUFS);
242 	}
243 
244 	/* Align SQ edge with NOPs to avoid WQE wrap around */
245 	pi = ((~sq->pc) & sq->wq.sz_m1);
246 	if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
247 		/* Send one multi NOP message instead of many */
248 		mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS);
249 		pi = ((~sq->pc) & sq->wq.sz_m1);
250 		if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1))
251 			return (ENOMEM);
252 	}
253 
254 	/* Setup local variables */
255 	pi = sq->pc & sq->wq.sz_m1;
256 	wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
257 	ifp = sq->ifp;
258 
259 	memset(wqe, 0, sizeof(*wqe));
260 
261 	/* Send a copy of the frame to the BPF listener, if any */
262 	if (ifp != NULL && ifp->if_bpf != NULL)
263 		ETHER_BPF_MTAP(ifp, mb);
264 
265 	if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) {
266 		wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM;
267 	}
268 	if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) {
269 		wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM;
270 	}
271 	if (wqe->eth.cs_flags == 0) {
272 		sq->stats.csum_offload_none++;
273 	}
274 	if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
275 		u32 payload_len;
276 		u32 mss = mb->m_pkthdr.tso_segsz;
277 		u32 num_pkts;
278 
279 		wqe->eth.mss = cpu_to_be16(mss);
280 		opcode = MLX5_OPCODE_LSO;
281 		ihs = mlx5e_get_header_size(mb);
282 		payload_len = mb->m_pkthdr.len - ihs;
283 		if (payload_len == 0)
284 			num_pkts = 1;
285 		else
286 			num_pkts = DIV_ROUND_UP(payload_len, mss);
287 		sq->mbuf[pi].num_bytes = payload_len + (num_pkts * ihs);
288 
289 		sq->stats.tso_packets++;
290 		sq->stats.tso_bytes += payload_len;
291 	} else {
292 		opcode = MLX5_OPCODE_SEND;
293 		ihs = mlx5e_get_inline_hdr_size(sq, mb);
294 		sq->mbuf[pi].num_bytes = max_t (unsigned int,
295 		    mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
296 	}
297 	if (mb->m_flags & M_VLANTAG) {
298 		struct ether_vlan_header *eh =
299 		    (struct ether_vlan_header *)wqe->eth.inline_hdr_start;
300 
301 		/* Range checks */
302 		if (ihs > (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN))
303 			ihs = (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN);
304 		else if (ihs < ETHER_HDR_LEN) {
305 			err = EINVAL;
306 			goto tx_drop;
307 		}
308 		m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh);
309 		m_adj(mb, ETHER_HDR_LEN);
310 		/* Insert 4 bytes VLAN tag into data stream */
311 		eh->evl_proto = eh->evl_encap_proto;
312 		eh->evl_encap_proto = htons(ETHERTYPE_VLAN);
313 		eh->evl_tag = htons(mb->m_pkthdr.ether_vtag);
314 		/* Copy rest of header data, if any */
315 		m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh + 1));
316 		m_adj(mb, ihs - ETHER_HDR_LEN);
317 		/* Extend header by 4 bytes */
318 		ihs += ETHER_VLAN_ENCAP_LEN;
319 	} else {
320 		m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start);
321 		m_adj(mb, ihs);
322 	}
323 
324 	wqe->eth.inline_hdr_sz = cpu_to_be16(ihs);
325 
326 	ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
327 	if (likely(ihs > sizeof(wqe->eth.inline_hdr_start))) {
328 		ds_cnt += DIV_ROUND_UP(ihs - sizeof(wqe->eth.inline_hdr_start),
329 		    MLX5_SEND_WQE_DS);
330 	}
331 	dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt;
332 
333 	err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
334 	    mb, segs, &nsegs, BUS_DMA_NOWAIT);
335 	if (err == EFBIG) {
336 		/* Update statistics */
337 		sq->stats.defragged++;
338 		/* Too many mbuf fragments */
339 		mb = m_defrag(*mbp, M_NOWAIT);
340 		if (mb == NULL) {
341 			mb = *mbp;
342 			goto tx_drop;
343 		}
344 		/* Try again */
345 		err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
346 		    mb, segs, &nsegs, BUS_DMA_NOWAIT);
347 	}
348 	/* Catch errors */
349 	if (err != 0)
350 		goto tx_drop;
351 
352 	/* Make sure all mbuf data, if any, is written to RAM */
353 	if (nsegs != 0) {
354 		bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map,
355 		    BUS_DMASYNC_PREWRITE);
356 	} else {
357 		/* All data was inlined, free the mbuf. */
358 		bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
359 		m_freem(mb);
360 		mb = NULL;
361 	}
362 
363 	for (x = 0; x != nsegs; x++) {
364 		if (segs[x].ds_len == 0)
365 			continue;
366 		dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr);
367 		dseg->lkey = sq->mkey_be;
368 		dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len);
369 		dseg++;
370 	}
371 
372 	ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl));
373 
374 	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
375 	wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
376 	if (mlx5e_do_send_cqe(sq))
377 		wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
378 	else
379 		wqe->ctrl.fm_ce_se = 0;
380 
381 	/* Copy data for doorbell */
382 	memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
383 
384 	/* Store pointer to mbuf */
385 	sq->mbuf[pi].mbuf = mb;
386 	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
387 	sq->pc += sq->mbuf[pi].num_wqebbs;
388 
389 	sq->stats.packets++;
390 	*mbp = NULL;	/* safety clear */
391 	return (0);
392 
393 tx_drop:
394 	sq->stats.dropped++;
395 	*mbp = NULL;
396 	m_freem(mb);
397 	return err;
398 }
399 
400 static void
401 mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
402 {
403 	u16 sqcc;
404 
405 	/*
406 	 * sq->cc must be updated only after mlx5_cqwq_update_db_record(),
407 	 * otherwise a cq overrun may occur
408 	 */
409 	sqcc = sq->cc;
410 
411 	while (budget > 0) {
412 		struct mlx5_cqe64 *cqe;
413 		struct mbuf *mb;
414 		u16 x;
415 		u16 ci;
416 
417 		cqe = mlx5e_get_cqe(&sq->cq);
418 		if (!cqe)
419 			break;
420 
421 		mlx5_cqwq_pop(&sq->cq.wq);
422 
423 		/* update budget according to the event factor */
424 		budget -= sq->cev_factor;
425 
426 		for (x = 0; x != sq->cev_factor; x++) {
427 			ci = sqcc & sq->wq.sz_m1;
428 			mb = sq->mbuf[ci].mbuf;
429 			sq->mbuf[ci].mbuf = NULL;	/* Safety clear */
430 
431 			if (mb == NULL) {
432 				if (sq->mbuf[ci].num_bytes == 0) {
433 					/* NOP */
434 					sq->stats.nop++;
435 				}
436 			} else {
437 				bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map,
438 				    BUS_DMASYNC_POSTWRITE);
439 				bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map);
440 
441 				/* Free transmitted mbuf */
442 				m_freem(mb);
443 			}
444 			sqcc += sq->mbuf[ci].num_wqebbs;
445 		}
446 	}
447 
448 	mlx5_cqwq_update_db_record(&sq->cq.wq);
449 
450 	/* Ensure cq space is freed before enabling more cqes */
451 	atomic_thread_fence_rel();
452 
453 	sq->cc = sqcc;
454 
455 	if (sq->sq_tq != NULL &&
456 	    atomic_cmpset_int(&sq->queue_state, MLX5E_SQ_FULL, MLX5E_SQ_READY))
457 		taskqueue_enqueue(sq->sq_tq, &sq->sq_task);
458 }
459 
460 static int
461 mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
462 {
463 	struct mbuf *next;
464 	int err = 0;
465 
466 	if (likely(mb != NULL)) {
467 		/*
468 		 * If we can't insert mbuf into drbr, try to xmit anyway.
469 		 * We keep the error we got so we could return that after xmit.
470 		 */
471 		err = drbr_enqueue(ifp, sq->br, mb);
472 	}
473 
474 	/*
475 	 * Check if the network interface is closed or if the SQ is
476 	 * being stopped:
477 	 */
478 	if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
479 	    sq->stopped != 0))
480 		return (err);
481 
482 	/* Process the queue */
483 	while ((next = drbr_peek(ifp, sq->br)) != NULL) {
484 		if (mlx5e_sq_xmit(sq, &next) != 0) {
485 			if (next == NULL) {
486 				drbr_advance(ifp, sq->br);
487 			} else {
488 				drbr_putback(ifp, sq->br, next);
489 				atomic_store_rel_int(&sq->queue_state, MLX5E_SQ_FULL);
490 			}
491 			break;
492 		}
493 		drbr_advance(ifp, sq->br);
494 	}
495 	/* Check if we need to write the doorbell */
496 	if (likely(sq->doorbell.d64 != 0)) {
497 		mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
498 		sq->doorbell.d64 = 0;
499 	}
500 	/*
501 	 * Check if we need to start the event timer which flushes the
502 	 * transmit ring on timeout:
503 	 */
504 	if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL &&
505 	    sq->cev_factor != 1)) {
506 		/* start the timer */
507 		mlx5e_sq_cev_timeout(sq);
508 	} else {
509 		/* don't send NOPs yet */
510 		sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
511 	}
512 	return (err);
513 }
514 
515 static int
516 mlx5e_xmit_locked_no_br(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
517 {
518 	int err = 0;
519 
520 	if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
521 	    sq->stopped != 0)) {
522 		m_freem(mb);
523 		return (ENETDOWN);
524 	}
525 
526 	/* Do transmit */
527 	if (mlx5e_sq_xmit(sq, &mb) != 0) {
528 		/* NOTE: m_freem() is NULL safe */
529 		m_freem(mb);
530 		err = ENOBUFS;
531 	}
532 
533 	/* Check if we need to write the doorbell */
534 	if (likely(sq->doorbell.d64 != 0)) {
535 		mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
536 		sq->doorbell.d64 = 0;
537 	}
538 
539 	/*
540 	 * Check if we need to start the event timer which flushes the
541 	 * transmit ring on timeout:
542 	 */
543 	if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL &&
544 	    sq->cev_factor != 1)) {
545 		/* start the timer */
546 		mlx5e_sq_cev_timeout(sq);
547 	} else {
548 		/* don't send NOPs yet */
549 		sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
550 	}
551 	return (err);
552 }
553 
554 int
555 mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb)
556 {
557 	struct mlx5e_sq *sq;
558 	int ret;
559 
560 	sq = mlx5e_select_queue(ifp, mb);
561 	if (unlikely(sq == NULL)) {
562 #ifdef RATELIMIT
563 		/* Check for route change */
564 		if (mb->m_pkthdr.snd_tag != NULL &&
565 		    mb->m_pkthdr.snd_tag->ifp != ifp) {
566 			/* Free mbuf */
567 			m_freem(mb);
568 
569 			/*
570 			 * Tell upper layers about route change and to
571 			 * re-transmit this packet:
572 			 */
573 			return (EAGAIN);
574 		}
575 #endif
576 		/* Free mbuf */
577 		m_freem(mb);
578 
579 		/* Invalid send queue */
580 		return (ENXIO);
581 	}
582 
583 	if (unlikely(sq->br == NULL)) {
584 		/* rate limited traffic */
585 		mtx_lock(&sq->lock);
586 		ret = mlx5e_xmit_locked_no_br(ifp, sq, mb);
587 		mtx_unlock(&sq->lock);
588 	} else if (mtx_trylock(&sq->lock)) {
589 		ret = mlx5e_xmit_locked(ifp, sq, mb);
590 		mtx_unlock(&sq->lock);
591 	} else {
592 		ret = drbr_enqueue(ifp, sq->br, mb);
593 		taskqueue_enqueue(sq->sq_tq, &sq->sq_task);
594 	}
595 
596 	return (ret);
597 }
598 
599 void
600 mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq)
601 {
602 	struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq);
603 
604 	mtx_lock(&sq->comp_lock);
605 	mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX);
606 	mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock));
607 	mtx_unlock(&sq->comp_lock);
608 }
609 
610 void
611 mlx5e_tx_que(void *context, int pending)
612 {
613 	struct mlx5e_sq *sq = context;
614 	struct ifnet *ifp = sq->ifp;
615 
616 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
617 		mtx_lock(&sq->lock);
618 		if (!drbr_empty(ifp, sq->br))
619 			mlx5e_xmit_locked(ifp, sq, NULL);
620 		mtx_unlock(&sq->lock);
621 	}
622 }
623