xref: /freebsd/sys/dev/ena/ena_datapath.c (revision 0f7b8f79f67b25cb0727c7b7d604eb1eec91fef1)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 #include <sys/cdefs.h>
31 #include "opt_rss.h"
32 #include "ena.h"
33 #include "ena_datapath.h"
34 #ifdef DEV_NETMAP
35 #include "ena_netmap.h"
36 #endif /* DEV_NETMAP */
37 #include <net/rss_config.h>
38 
39 #include <netinet6/ip6_var.h>
40 
41 /*********************************************************************
42  *  Static functions prototypes
43  *********************************************************************/
44 
45 static bool ena_tx_cleanup(struct ena_ring *);
46 static bool ena_rx_cleanup(struct ena_ring *);
47 static inline int ena_get_tx_req_id(struct ena_ring *tx_ring,
48     struct ena_com_io_cq *io_cq, uint16_t *req_id);
49 static void ena_rx_hash_mbuf(struct ena_ring *, struct ena_com_rx_ctx *,
50     struct mbuf *);
51 static struct mbuf *ena_rx_mbuf(struct ena_ring *, struct ena_com_rx_buf_info *,
52     struct ena_com_rx_ctx *, uint16_t *);
53 static inline void ena_rx_checksum(struct ena_ring *, struct ena_com_rx_ctx *,
54     struct mbuf *);
55 static void ena_tx_csum(struct ena_com_tx_ctx *, struct mbuf *, bool);
56 static int ena_check_and_collapse_mbuf(struct ena_ring *tx_ring,
57     struct mbuf **mbuf);
58 static int ena_xmit_mbuf(struct ena_ring *, struct mbuf **);
59 static void ena_start_xmit(struct ena_ring *);
60 
61 /*********************************************************************
62  *  Global functions
63  *********************************************************************/
64 
65 void
ena_cleanup(void * arg,int pending)66 ena_cleanup(void *arg, int pending)
67 {
68 	struct ena_que *que = arg;
69 	struct ena_adapter *adapter = que->adapter;
70 	if_t ifp = adapter->ifp;
71 	struct ena_ring *tx_ring;
72 	struct ena_ring *rx_ring;
73 	struct ena_com_io_cq *io_cq;
74 	struct ena_eth_io_intr_reg intr_reg;
75 	int qid, ena_qid;
76 	int i;
77 	bool rx_again, tx_again;
78 
79 	tx_ring = que->tx_ring;
80 	rx_ring = que->rx_ring;
81 	qid = que->id;
82 	ena_qid = ENA_IO_TXQ_IDX(qid);
83 	io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
84 
85 	atomic_store_8(&tx_ring->cleanup_running, 1);
86 	/* Need to make sure that ENA_FLAG_TRIGGER_RESET is visible to ena_cleanup() and
87 	 * that cleanup_running is visible to check_missing_comp_in_tx_queue() to
88 	 * prevent the case of accessing CQ concurrently with check_cdesc_in_tx_cq()
89 	 */
90 	mb();
91 	if (unlikely(((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) ||
92 	    (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))))
93 		return;
94 
95 	ena_log_io(adapter->pdev, DBG, "MSI-X TX/RX routine\n");
96 
97 	atomic_store_8(&tx_ring->first_interrupt, 1);
98 	atomic_store_8(&rx_ring->first_interrupt, 1);
99 
100 	for (i = 0; i < ENA_CLEAN_BUDGET; ++i) {
101 		rx_again = ena_rx_cleanup(rx_ring);
102 		tx_again = ena_tx_cleanup(tx_ring);
103 
104 		if (unlikely(((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) ||
105 		    (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))))
106 			return;
107 
108 		if (!rx_again && !tx_again)
109 			break;
110 	}
111 
112 	/* Signal that work is done and unmask interrupt */
113 	ena_com_update_intr_reg(&intr_reg, ENA_RX_IRQ_INTERVAL,
114 	    ENA_TX_IRQ_INTERVAL, true, false);
115 	counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1);
116 	ena_com_unmask_intr(io_cq, &intr_reg);
117 	atomic_store_8(&tx_ring->cleanup_running, 0);
118 }
119 
120 void
ena_deferred_mq_start(void * arg,int pending)121 ena_deferred_mq_start(void *arg, int pending)
122 {
123 	struct ena_ring *tx_ring = (struct ena_ring *)arg;
124 	if_t ifp = tx_ring->adapter->ifp;
125 
126 	while (!drbr_empty(ifp, tx_ring->br) && tx_ring->running &&
127 	    (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
128 		ENA_RING_MTX_LOCK(tx_ring);
129 		ena_start_xmit(tx_ring);
130 		ENA_RING_MTX_UNLOCK(tx_ring);
131 	}
132 }
133 
134 int
ena_mq_start(if_t ifp,struct mbuf * m)135 ena_mq_start(if_t ifp, struct mbuf *m)
136 {
137 	struct ena_adapter *adapter = if_getsoftc(ifp);
138 	struct ena_ring *tx_ring;
139 	int ret, is_drbr_empty;
140 	uint32_t i;
141 #ifdef RSS
142 	uint32_t bucket_id;
143 #endif
144 
145 	if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0))
146 		return (ENODEV);
147 
148 	/* Which queue to use */
149 	/*
150 	 * If everything is setup correctly, it should be the
151 	 * same bucket that the current CPU we're on is.
152 	 * It should improve performance.
153 	 */
154 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
155 #ifdef RSS
156 		if (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m),
157 		    &bucket_id) == 0)
158 			i = bucket_id % adapter->num_io_queues;
159 		else
160 #endif
161 			i = m->m_pkthdr.flowid % adapter->num_io_queues;
162 	} else {
163 		i = curcpu % adapter->num_io_queues;
164 	}
165 	tx_ring = &adapter->tx_ring[i];
166 
167 	/* Check if drbr is empty before putting packet */
168 	is_drbr_empty = drbr_empty(ifp, tx_ring->br);
169 	ret = drbr_enqueue(ifp, tx_ring->br, m);
170 	if (unlikely(ret != 0)) {
171 		taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
172 		return (ret);
173 	}
174 
175 	if (is_drbr_empty && (ENA_RING_MTX_TRYLOCK(tx_ring) != 0)) {
176 		ena_start_xmit(tx_ring);
177 		ENA_RING_MTX_UNLOCK(tx_ring);
178 	} else {
179 		taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
180 	}
181 
182 	return (0);
183 }
184 
185 void
ena_qflush(if_t ifp)186 ena_qflush(if_t ifp)
187 {
188 	struct ena_adapter *adapter = if_getsoftc(ifp);
189 	struct ena_ring *tx_ring = adapter->tx_ring;
190 	int i;
191 
192 	for (i = 0; i < adapter->num_io_queues; ++i, ++tx_ring)
193 		if (!drbr_empty(ifp, tx_ring->br)) {
194 			ENA_RING_MTX_LOCK(tx_ring);
195 			drbr_flush(ifp, tx_ring->br);
196 			ENA_RING_MTX_UNLOCK(tx_ring);
197 		}
198 
199 	if_qflush(ifp);
200 }
201 
202 /*********************************************************************
203  *  Static functions
204  *********************************************************************/
205 
206 static inline int
ena_get_tx_req_id(struct ena_ring * tx_ring,struct ena_com_io_cq * io_cq,uint16_t * req_id)207 ena_get_tx_req_id(struct ena_ring *tx_ring, struct ena_com_io_cq *io_cq,
208     uint16_t *req_id)
209 {
210 	struct ena_adapter *adapter = tx_ring->adapter;
211 	int rc = ena_com_tx_comp_req_id_get(io_cq, req_id);
212 
213 	if (unlikely(rc == ENA_COM_TRY_AGAIN))
214 		return (EAGAIN);
215 
216 	rc = validate_tx_req_id(tx_ring, *req_id, rc);
217 
218 	if (unlikely(tx_ring->tx_buffer_info[*req_id].mbuf == NULL)) {
219 		ena_log(adapter->pdev, ERR,
220 		    "tx_info doesn't have valid mbuf. req_id %hu qid %hu\n",
221 		    *req_id, tx_ring->qid);
222 		ena_trigger_reset(adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
223 		rc = EFAULT;
224 	}
225 
226 	return (rc);
227 }
228 
229 /**
230  * ena_tx_cleanup - clear sent packets and corresponding descriptors
231  * @tx_ring: ring for which we want to clean packets
232  *
233  * Once packets are sent, we ask the device in a loop for no longer used
234  * descriptors. We find the related mbuf chain in a map (index in an array)
235  * and free it, then update ring state.
236  * This is performed in "endless" loop, updating ring pointers every
237  * TX_COMMIT. The first check of free descriptor is performed before the actual
238  * loop, then repeated at the loop end.
239  **/
240 static bool
ena_tx_cleanup(struct ena_ring * tx_ring)241 ena_tx_cleanup(struct ena_ring *tx_ring)
242 {
243 	struct ena_adapter *adapter;
244 	struct ena_com_io_cq *io_cq;
245 	uint16_t next_to_clean;
246 	uint16_t req_id;
247 	uint16_t ena_qid;
248 	unsigned int total_done = 0;
249 	int rc;
250 	int commit = ENA_TX_COMMIT;
251 	int budget = ENA_TX_BUDGET;
252 	bool above_thresh;
253 
254 	adapter = tx_ring->que->adapter;
255 	ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
256 	io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
257 	next_to_clean = tx_ring->next_to_clean;
258 
259 #ifdef DEV_NETMAP
260 	if (netmap_tx_irq(adapter->ifp, tx_ring->qid) != NM_IRQ_PASS)
261 		return (0);
262 #endif /* DEV_NETMAP */
263 
264 	do {
265 		struct ena_tx_buffer *tx_info;
266 		struct mbuf *mbuf;
267 
268 		rc = ena_get_tx_req_id(tx_ring, io_cq, &req_id);
269 		if (unlikely(rc != 0))
270 			break;
271 
272 		tx_info = &tx_ring->tx_buffer_info[req_id];
273 
274 		mbuf = tx_info->mbuf;
275 
276 		tx_info->mbuf = NULL;
277 		bintime_clear(&tx_info->timestamp);
278 
279 		bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap,
280 		    BUS_DMASYNC_POSTWRITE);
281 		bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap);
282 
283 		ena_log_io(adapter->pdev, DBG, "tx: q %d mbuf %p completed\n",
284 		    tx_ring->qid, mbuf);
285 
286 		m_freem(mbuf);
287 
288 		total_done += tx_info->tx_descs;
289 
290 		tx_ring->free_tx_ids[next_to_clean] = req_id;
291 		next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
292 		    tx_ring->ring_size);
293 
294 		if (unlikely(--commit == 0)) {
295 			commit = ENA_TX_COMMIT;
296 			/* update ring state every ENA_TX_COMMIT descriptor */
297 			tx_ring->next_to_clean = next_to_clean;
298 			ena_com_comp_ack(
299 			    &adapter->ena_dev->io_sq_queues[ena_qid],
300 			    total_done);
301 			total_done = 0;
302 		}
303 	} while (likely(--budget));
304 
305 	ena_log_io(adapter->pdev, DBG, "tx: q %d done. total pkts: %d\n",
306 	    tx_ring->qid, ENA_TX_BUDGET - budget);
307 
308 	/* If there is still something to commit update ring state */
309 	if (likely(commit != ENA_TX_COMMIT)) {
310 		tx_ring->next_to_clean = next_to_clean;
311 		ena_com_comp_ack(&adapter->ena_dev->io_sq_queues[ena_qid],
312 		    total_done);
313 	}
314 
315 	/*
316 	 * Need to make the rings circular update visible to
317 	 * ena_xmit_mbuf() before checking for tx_ring->running.
318 	 */
319 	mb();
320 
321 	above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
322 	    ENA_TX_RESUME_THRESH);
323 	if (unlikely(!tx_ring->running && above_thresh)) {
324 		ENA_RING_MTX_LOCK(tx_ring);
325 		above_thresh = ena_com_sq_have_enough_space(
326 		    tx_ring->ena_com_io_sq, ENA_TX_RESUME_THRESH);
327 		if (!tx_ring->running && above_thresh) {
328 			tx_ring->running = true;
329 			counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1);
330 			taskqueue_enqueue(tx_ring->enqueue_tq,
331 			    &tx_ring->enqueue_task);
332 		}
333 		ENA_RING_MTX_UNLOCK(tx_ring);
334 	}
335 
336 	tx_ring->tx_last_cleanup_ticks = ticks;
337 
338 	return (budget == 0);
339 }
340 
341 static void
ena_rx_hash_mbuf(struct ena_ring * rx_ring,struct ena_com_rx_ctx * ena_rx_ctx,struct mbuf * mbuf)342 ena_rx_hash_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx,
343     struct mbuf *mbuf)
344 {
345 	struct ena_adapter *adapter = rx_ring->adapter;
346 
347 	if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) {
348 		mbuf->m_pkthdr.flowid = ena_rx_ctx->hash;
349 
350 		/*
351 		 * Hardware and software RSS are in agreement only when both are
352 		 * configured to Toeplitz algorithm.  This driver configures
353 		 * that algorithm only when software RSS is enabled and uses it.
354 		 */
355 		if (adapter->ena_dev->rss.hash_func != ENA_ADMIN_TOEPLITZ &&
356 		    ena_rx_ctx->l3_proto != ENA_ETH_IO_L3_PROTO_UNKNOWN) {
357 			M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
358 			return;
359 		}
360 
361 		if (ena_rx_ctx->frag &&
362 		    (ena_rx_ctx->l3_proto != ENA_ETH_IO_L3_PROTO_UNKNOWN)) {
363 			M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
364 			return;
365 		}
366 
367 		switch (ena_rx_ctx->l3_proto) {
368 		case ENA_ETH_IO_L3_PROTO_IPV4:
369 			switch (ena_rx_ctx->l4_proto) {
370 			case ENA_ETH_IO_L4_PROTO_TCP:
371 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
372 				break;
373 			case ENA_ETH_IO_L4_PROTO_UDP:
374 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
375 				break;
376 			default:
377 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
378 			}
379 			break;
380 		case ENA_ETH_IO_L3_PROTO_IPV6:
381 			switch (ena_rx_ctx->l4_proto) {
382 			case ENA_ETH_IO_L4_PROTO_TCP:
383 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
384 				break;
385 			case ENA_ETH_IO_L4_PROTO_UDP:
386 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
387 				break;
388 			default:
389 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
390 			}
391 			break;
392 		case ENA_ETH_IO_L3_PROTO_UNKNOWN:
393 			M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
394 			break;
395 		default:
396 			M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
397 		}
398 	} else {
399 		mbuf->m_pkthdr.flowid = rx_ring->qid;
400 		M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
401 	}
402 }
403 
404 /**
405  * ena_rx_mbuf - assemble mbuf from descriptors
406  * @rx_ring: ring for which we want to clean packets
407  * @ena_bufs: buffer info
408  * @ena_rx_ctx: metadata for this packet(s)
409  * @next_to_clean: ring pointer, will be updated only upon success
410  *
411  **/
412 static struct mbuf *
ena_rx_mbuf(struct ena_ring * rx_ring,struct ena_com_rx_buf_info * ena_bufs,struct ena_com_rx_ctx * ena_rx_ctx,uint16_t * next_to_clean)413 ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs,
414     struct ena_com_rx_ctx *ena_rx_ctx, uint16_t *next_to_clean)
415 {
416 	struct mbuf *mbuf;
417 	struct ena_rx_buffer *rx_info;
418 	struct ena_adapter *adapter;
419 	device_t pdev;
420 	unsigned int descs = ena_rx_ctx->descs;
421 	uint16_t ntc, len, req_id, buf = 0;
422 
423 	ntc = *next_to_clean;
424 	adapter = rx_ring->adapter;
425 	pdev = adapter->pdev;
426 
427 	len = ena_bufs[buf].len;
428 	req_id = ena_bufs[buf].req_id;
429 	rx_info = &rx_ring->rx_buffer_info[req_id];
430 	if (unlikely(rx_info->mbuf == NULL)) {
431 		ena_log(pdev, ERR, "NULL mbuf in rx_info. qid %u req_id %u\n",
432 		    rx_ring->qid, req_id);
433 		ena_trigger_reset(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
434 		return (NULL);
435 	}
436 
437 	ena_log_io(pdev, DBG, "rx_info %p, mbuf %p, paddr %jx\n", rx_info,
438 	    rx_info->mbuf, (uintmax_t)rx_info->ena_buf.paddr);
439 
440 	bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
441 	    BUS_DMASYNC_POSTREAD);
442 	mbuf = rx_info->mbuf;
443 	mbuf->m_flags |= M_PKTHDR;
444 	mbuf->m_pkthdr.len = len;
445 	mbuf->m_len = len;
446 	/* Only for the first segment the data starts at specific offset */
447 	mbuf->m_data = mtodo(mbuf, ena_rx_ctx->pkt_offset);
448 	ena_log_io(pdev, DBG, "Mbuf data offset=%u\n", ena_rx_ctx->pkt_offset);
449 	mbuf->m_pkthdr.rcvif = rx_ring->que->adapter->ifp;
450 
451 	/* Fill mbuf with hash key and it's interpretation for optimization */
452 	ena_rx_hash_mbuf(rx_ring, ena_rx_ctx, mbuf);
453 
454 	ena_log_io(pdev, DBG, "rx mbuf 0x%p, flags=0x%x, len: %d\n", mbuf,
455 	    mbuf->m_flags, mbuf->m_pkthdr.len);
456 
457 	/* DMA address is not needed anymore, unmap it */
458 	bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map);
459 
460 	rx_info->mbuf = NULL;
461 	rx_ring->free_rx_ids[ntc] = req_id;
462 	ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size);
463 
464 	/*
465 	 * While we have more than 1 descriptors for one rcvd packet, append
466 	 * other mbufs to the main one
467 	 */
468 	while (--descs) {
469 		++buf;
470 		len = ena_bufs[buf].len;
471 		req_id = ena_bufs[buf].req_id;
472 		rx_info = &rx_ring->rx_buffer_info[req_id];
473 
474 		if (unlikely(rx_info->mbuf == NULL)) {
475 			ena_log(pdev, ERR, "NULL mbuf in rx_info. qid %u req_id %u\n",
476 			    rx_ring->qid, req_id);
477 			/*
478 			 * If one of the required mbufs was not allocated yet,
479 			 * we can break there.
480 			 * All earlier used descriptors will be reallocated
481 			 * later and not used mbufs can be reused.
482 			 * The next_to_clean pointer will not be updated in case
483 			 * of an error, so caller should advance it manually
484 			 * in error handling routine to keep it up to date
485 			 * with hw ring.
486 			 */
487 			m_freem(mbuf);
488 			ena_trigger_reset(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
489 			return (NULL);
490 		}
491 
492 		bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
493 		    BUS_DMASYNC_POSTREAD);
494 		if (unlikely(m_append(mbuf, len, rx_info->mbuf->m_data) == 0)) {
495 			counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
496 			ena_log_io(pdev, WARN, "Failed to append Rx mbuf %p\n",
497 			    mbuf);
498 		}
499 
500 		ena_log_io(pdev, DBG, "rx mbuf updated. len %d\n",
501 		    mbuf->m_pkthdr.len);
502 
503 		/* Free already appended mbuf, it won't be useful anymore */
504 		bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map);
505 		m_freem(rx_info->mbuf);
506 		rx_info->mbuf = NULL;
507 
508 		rx_ring->free_rx_ids[ntc] = req_id;
509 		ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size);
510 	}
511 
512 	*next_to_clean = ntc;
513 
514 	return (mbuf);
515 }
516 
517 /**
518  * ena_rx_checksum - indicate in mbuf if hw indicated a good cksum
519  **/
520 static inline void
ena_rx_checksum(struct ena_ring * rx_ring,struct ena_com_rx_ctx * ena_rx_ctx,struct mbuf * mbuf)521 ena_rx_checksum(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx,
522     struct mbuf *mbuf)
523 {
524 	device_t pdev = rx_ring->adapter->pdev;
525 
526 	/* if IP and error */
527 	if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
528 	    ena_rx_ctx->l3_csum_err)) {
529 		/* ipv4 checksum error */
530 		mbuf->m_pkthdr.csum_flags = 0;
531 		counter_u64_add(rx_ring->rx_stats.csum_bad, 1);
532 		ena_log_io(pdev, DBG, "RX IPv4 header checksum error\n");
533 		return;
534 	}
535 
536 	/* if TCP/UDP */
537 	if ((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
538 	    (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)) {
539 		if (ena_rx_ctx->l4_csum_err) {
540 			/* TCP/UDP checksum error */
541 			mbuf->m_pkthdr.csum_flags = 0;
542 			counter_u64_add(rx_ring->rx_stats.csum_bad, 1);
543 			ena_log_io(pdev, DBG, "RX L4 checksum error\n");
544 		} else {
545 			mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
546 			mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
547 			counter_u64_add(rx_ring->rx_stats.csum_good, 1);
548 		}
549 	}
550 }
551 
552 /**
553  * ena_rx_cleanup - handle rx irq
554  * @arg: ring for which irq is being handled
555  **/
556 static bool
ena_rx_cleanup(struct ena_ring * rx_ring)557 ena_rx_cleanup(struct ena_ring *rx_ring)
558 {
559 	struct ena_adapter *adapter;
560 	device_t pdev;
561 	struct mbuf *mbuf;
562 	struct ena_com_rx_ctx ena_rx_ctx;
563 	struct ena_com_io_cq *io_cq;
564 	struct ena_com_io_sq *io_sq;
565 	enum ena_regs_reset_reason_types reset_reason;
566 	if_t ifp;
567 	uint16_t ena_qid;
568 	uint16_t next_to_clean;
569 	uint32_t refill_required;
570 	uint32_t refill_threshold;
571 	uint32_t do_if_input = 0;
572 	unsigned int qid;
573 	int rc, i;
574 	int budget = (ENA_RX_DESC_BUDGET == -1) ? INT_MAX : ENA_RX_DESC_BUDGET;
575 #ifdef DEV_NETMAP
576 	int done;
577 #endif /* DEV_NETMAP */
578 
579 	adapter = rx_ring->que->adapter;
580 	pdev = adapter->pdev;
581 	ifp = adapter->ifp;
582 	qid = rx_ring->que->id;
583 	ena_qid = ENA_IO_RXQ_IDX(qid);
584 	io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
585 	io_sq = &adapter->ena_dev->io_sq_queues[ena_qid];
586 	next_to_clean = rx_ring->next_to_clean;
587 
588 #ifdef DEV_NETMAP
589 	if (netmap_rx_irq(adapter->ifp, rx_ring->qid, &done) != NM_IRQ_PASS)
590 		return (0);
591 #endif /* DEV_NETMAP */
592 
593 	ena_log_io(pdev, DBG, "rx: qid %d\n", qid);
594 
595 	do {
596 		ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
597 		ena_rx_ctx.max_bufs = adapter->max_rx_sgl_size;
598 		ena_rx_ctx.descs = 0;
599 		ena_rx_ctx.pkt_offset = 0;
600 
601 		bus_dmamap_sync(io_cq->cdesc_addr.mem_handle.tag,
602 		    io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD);
603 		rc = ena_com_rx_pkt(io_cq, io_sq, &ena_rx_ctx);
604 		if (unlikely(rc != 0)) {
605 			if (rc == ENA_COM_NO_SPACE) {
606 				counter_u64_add(rx_ring->rx_stats.bad_desc_num,
607 				    1);
608 				reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
609 			} else if (rc == ENA_COM_FAULT) {
610 				reset_reason = ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED;
611 			} else {
612 				counter_u64_add(rx_ring->rx_stats.bad_req_id,
613 				    1);
614 				reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
615 			}
616 			ena_trigger_reset(adapter, reset_reason);
617 			return (0);
618 		}
619 
620 		if (unlikely(ena_rx_ctx.descs == 0))
621 			break;
622 
623 		ena_log_io(pdev, DBG,
624 		    "rx: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
625 		    rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
626 		    ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
627 
628 		/* Receive mbuf from the ring */
629 		mbuf = ena_rx_mbuf(rx_ring, rx_ring->ena_bufs, &ena_rx_ctx,
630 		    &next_to_clean);
631 		bus_dmamap_sync(io_cq->cdesc_addr.mem_handle.tag,
632 		    io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_PREREAD);
633 		/* Exit if we failed to retrieve a buffer */
634 		if (unlikely(mbuf == NULL)) {
635 			for (i = 0; i < ena_rx_ctx.descs; ++i) {
636 				rx_ring->free_rx_ids[next_to_clean] =
637 				    rx_ring->ena_bufs[i].req_id;
638 				next_to_clean = ENA_RX_RING_IDX_NEXT(
639 				    next_to_clean, rx_ring->ring_size);
640 			}
641 			break;
642 		}
643 
644 		if (((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) ||
645 		    ((if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6) != 0)) {
646 			ena_rx_checksum(rx_ring, &ena_rx_ctx, mbuf);
647 		}
648 
649 		counter_enter();
650 		counter_u64_add_protected(rx_ring->rx_stats.bytes,
651 		    mbuf->m_pkthdr.len);
652 		counter_u64_add_protected(adapter->hw_stats.rx_bytes,
653 		    mbuf->m_pkthdr.len);
654 		counter_exit();
655 		/*
656 		 * LRO is only for IP/TCP packets and TCP checksum of the packet
657 		 * should be computed by hardware.
658 		 */
659 		do_if_input = 1;
660 		if (((if_getcapenable(ifp) & IFCAP_LRO) != 0)  &&
661 		    ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) &&
662 		    (ena_rx_ctx.l4_proto == ENA_ETH_IO_L4_PROTO_TCP)) {
663 			/*
664 			 * Send to the stack if:
665 			 *  - LRO not enabled, or
666 			 *  - no LRO resources, or
667 			 *  - lro enqueue fails
668 			 */
669 			if ((rx_ring->lro.lro_cnt != 0) &&
670 			    (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0))
671 				do_if_input = 0;
672 		}
673 		if (do_if_input != 0) {
674 			ena_log_io(pdev, DBG,
675 			    "calling if_input() with mbuf %p\n", mbuf);
676 			if_input(ifp, mbuf);
677 		}
678 
679 		counter_enter();
680 		counter_u64_add_protected(rx_ring->rx_stats.cnt, 1);
681 		counter_u64_add_protected(adapter->hw_stats.rx_packets, 1);
682 		counter_exit();
683 
684 		/*
685 		 * Adjust our budget; note that we count descriptors, not
686 		 * packets, since we need to ensure we don't run out of rx
687 		 * buffers when receiving jumbos.
688 		 */
689 		budget -= ena_rx_ctx.descs;
690 	} while (budget > 0);
691 
692 	rx_ring->next_to_clean = next_to_clean;
693 
694 	refill_required = ena_com_free_q_entries(io_sq);
695 	refill_threshold = min_t(int,
696 	    rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
697 	    ENA_RX_REFILL_THRESH_PACKET);
698 
699 	if (refill_required > refill_threshold) {
700 		ena_refill_rx_bufs(rx_ring, refill_required);
701 	}
702 
703 	tcp_lro_flush_all(&rx_ring->lro);
704 
705 	return (budget <= 0);
706 }
707 
708 static void
ena_tx_csum(struct ena_com_tx_ctx * ena_tx_ctx,struct mbuf * mbuf,bool disable_meta_caching)709 ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct mbuf *mbuf,
710     bool disable_meta_caching)
711 {
712 	struct ena_com_tx_meta *ena_meta;
713 	struct ether_vlan_header *eh;
714 	struct mbuf *mbuf_next;
715 	u32 mss;
716 	bool offload;
717 	uint16_t etype;
718 	int ehdrlen;
719 	struct ip *ip;
720 	int ipproto;
721 	int iphlen;
722 	struct tcphdr *th;
723 	int offset;
724 
725 	offload = false;
726 	ena_meta = &ena_tx_ctx->ena_meta;
727 	mss = mbuf->m_pkthdr.tso_segsz;
728 
729 	if (mss != 0)
730 		offload = true;
731 
732 	if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0)
733 		offload = true;
734 
735 	if ((mbuf->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0)
736 		offload = true;
737 
738 	if ((mbuf->m_pkthdr.csum_flags & CSUM6_OFFLOAD) != 0)
739 		offload = true;
740 
741 	if (!offload) {
742 		if (disable_meta_caching) {
743 			memset(ena_meta, 0, sizeof(*ena_meta));
744 			ena_tx_ctx->meta_valid = 1;
745 		} else {
746 			ena_tx_ctx->meta_valid = 0;
747 		}
748 		return;
749 	}
750 
751 	/* Determine where frame payload starts. */
752 	eh = mtod(mbuf, struct ether_vlan_header *);
753 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
754 		etype = ntohs(eh->evl_proto);
755 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
756 	} else {
757 		etype = ntohs(eh->evl_encap_proto);
758 		ehdrlen = ETHER_HDR_LEN;
759 	}
760 
761 	mbuf_next = m_getptr(mbuf, ehdrlen, &offset);
762 
763 	switch (etype) {
764 	case ETHERTYPE_IP:
765 		ip = (struct ip *)(mtodo(mbuf_next, offset));
766 		iphlen = ip->ip_hl << 2;
767 		ipproto = ip->ip_p;
768 		ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
769 		if ((ip->ip_off & htons(IP_DF)) != 0)
770 			ena_tx_ctx->df = 1;
771 		break;
772 	case ETHERTYPE_IPV6:
773 		ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
774 		iphlen = ip6_lasthdr(mbuf, ehdrlen, IPPROTO_IPV6, &ipproto);
775 		iphlen -= ehdrlen;
776 		ena_tx_ctx->df = 1;
777 		break;
778 	default:
779 		iphlen = 0;
780 		ipproto = 0;
781 		break;
782 	}
783 
784 	mbuf_next = m_getptr(mbuf, iphlen + ehdrlen, &offset);
785 	th = (struct tcphdr *)(mtodo(mbuf_next, offset));
786 
787 	if ((mbuf->m_pkthdr.csum_flags & CSUM_IP) != 0) {
788 		ena_tx_ctx->l3_csum_enable = 1;
789 	}
790 	if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
791 		ena_tx_ctx->tso_enable = 1;
792 		ena_meta->l4_hdr_len = (th->th_off);
793 	}
794 
795 	if (ipproto == IPPROTO_TCP) {
796 		ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
797 		if ((mbuf->m_pkthdr.csum_flags &
798 		    (CSUM_IP_TCP | CSUM_IP6_TCP)) != 0)
799 			ena_tx_ctx->l4_csum_enable = 1;
800 		else
801 			ena_tx_ctx->l4_csum_enable = 0;
802 	} else if (ipproto == IPPROTO_UDP) {
803 		ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
804 		if ((mbuf->m_pkthdr.csum_flags &
805 		    (CSUM_IP_UDP | CSUM_IP6_UDP)) != 0)
806 			ena_tx_ctx->l4_csum_enable = 1;
807 		else
808 			ena_tx_ctx->l4_csum_enable = 0;
809 	} else {
810 		ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
811 		ena_tx_ctx->l4_csum_enable = 0;
812 	}
813 
814 	ena_meta->mss = mss;
815 	ena_meta->l3_hdr_len = iphlen;
816 	ena_meta->l3_hdr_offset = ehdrlen;
817 	ena_tx_ctx->meta_valid = 1;
818 }
819 
820 static int
ena_check_and_collapse_mbuf(struct ena_ring * tx_ring,struct mbuf ** mbuf)821 ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
822 {
823 	struct ena_adapter *adapter;
824 	struct mbuf *collapsed_mbuf;
825 	int num_frags;
826 
827 	adapter = tx_ring->adapter;
828 	num_frags = ena_mbuf_count(*mbuf);
829 
830 	/* One segment must be reserved for configuration descriptor. */
831 	if (num_frags < adapter->max_tx_sgl_size)
832 		return (0);
833 
834 	if ((num_frags == adapter->max_tx_sgl_size) &&
835 	    ((*mbuf)->m_pkthdr.len < tx_ring->tx_max_header_size))
836 		return (0);
837 
838 	counter_u64_add(tx_ring->tx_stats.collapse, 1);
839 
840 	collapsed_mbuf = m_collapse(*mbuf, M_NOWAIT,
841 	    adapter->max_tx_sgl_size - 1);
842 	if (unlikely(collapsed_mbuf == NULL)) {
843 		counter_u64_add(tx_ring->tx_stats.collapse_err, 1);
844 		return (ENOMEM);
845 	}
846 
847 	/* If mbuf was collapsed succesfully, original mbuf is released. */
848 	*mbuf = collapsed_mbuf;
849 
850 	return (0);
851 }
852 
853 static int
ena_tx_map_mbuf(struct ena_ring * tx_ring,struct ena_tx_buffer * tx_info,struct mbuf * mbuf,void ** push_hdr,u16 * header_len)854 ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info,
855     struct mbuf *mbuf, void **push_hdr, u16 *header_len)
856 {
857 	struct ena_adapter *adapter = tx_ring->adapter;
858 	struct ena_com_buf *ena_buf;
859 	bus_dma_segment_t segs[ENA_BUS_DMA_SEGS];
860 	size_t iseg = 0;
861 	uint32_t mbuf_head_len;
862 	uint16_t offset;
863 	int rc, nsegs;
864 
865 	mbuf_head_len = mbuf->m_len;
866 	tx_info->mbuf = mbuf;
867 	ena_buf = tx_info->bufs;
868 
869 	/*
870 	 * For easier maintaining of the DMA map, map the whole mbuf even if
871 	 * the LLQ is used. The descriptors will be filled using the segments.
872 	 */
873 	rc = bus_dmamap_load_mbuf_sg(adapter->tx_buf_tag,
874 	    tx_info->dmamap, mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
875 	if (unlikely((rc != 0) || (nsegs == 0))) {
876 		ena_log_io(adapter->pdev, WARN,
877 		    "dmamap load failed! err: %d nsegs: %d\n", rc, nsegs);
878 		goto dma_error;
879 	}
880 
881 	if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
882 		/*
883 		 * When the device is LLQ mode, the driver will copy
884 		 * the header into the device memory space.
885 		 * the ena_com layer assumes the header is in a linear
886 		 * memory space.
887 		 * This assumption might be wrong since part of the header
888 		 * can be in the fragmented buffers.
889 		 * First check if header fits in the mbuf. If not, copy it to
890 		 * separate buffer that will be holding linearized data.
891 		 */
892 		*header_len = min_t(uint32_t, mbuf->m_pkthdr.len,
893 		    tx_ring->tx_max_header_size);
894 
895 		/* If header is in linear space, just point into mbuf's data. */
896 		if (likely(*header_len <= mbuf_head_len)) {
897 			*push_hdr = mbuf->m_data;
898 		/*
899 		 * Otherwise, copy whole portion of header from multiple
900 		 * mbufs to intermediate buffer.
901 		 */
902 		} else {
903 			m_copydata(mbuf, 0, *header_len,
904 			    tx_ring->push_buf_intermediate_buf);
905 			*push_hdr = tx_ring->push_buf_intermediate_buf;
906 
907 			counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1);
908 		}
909 
910 		ena_log_io(adapter->pdev, DBG,
911 		    "mbuf: %p header_buf->vaddr: %p push_len: %d\n",
912 		    mbuf, *push_hdr, *header_len);
913 
914 		/* If packet is fitted in LLQ header, no need for DMA segments. */
915 		if (mbuf->m_pkthdr.len <= tx_ring->tx_max_header_size) {
916 			return (0);
917 		} else {
918 			offset = tx_ring->tx_max_header_size;
919 			/*
920 			 * As Header part is mapped to LLQ header, we can skip
921 			 * it and just map the residuum of the mbuf to DMA
922 			 * Segments.
923 			 */
924 			while (offset > 0) {
925 				if (offset >= segs[iseg].ds_len) {
926 					offset -= segs[iseg].ds_len;
927 				} else {
928 					ena_buf->paddr = segs[iseg].ds_addr +
929 					    offset;
930 					ena_buf->len = segs[iseg].ds_len -
931 					    offset;
932 					ena_buf++;
933 					tx_info->num_of_bufs++;
934 					offset = 0;
935 				}
936 				iseg++;
937 			}
938 		}
939 	} else {
940 		*push_hdr = NULL;
941 		/*
942 		 * header_len is just a hint for the device. Because FreeBSD is
943 		 * not giving us information about packet header length and it
944 		 * is not guaranteed that all packet headers will be in the 1st
945 		 * mbuf, setting header_len to 0 is making the device ignore
946 		 * this value and resolve header on it's own.
947 		 */
948 		*header_len = 0;
949 	}
950 
951 	/* Map rest of the mbuf */
952 	while (iseg < nsegs) {
953 		ena_buf->paddr = segs[iseg].ds_addr;
954 		ena_buf->len = segs[iseg].ds_len;
955 		ena_buf++;
956 		iseg++;
957 		tx_info->num_of_bufs++;
958 	}
959 
960 	return (0);
961 
962 dma_error:
963 	counter_u64_add(tx_ring->tx_stats.dma_mapping_err, 1);
964 	tx_info->mbuf = NULL;
965 	return (rc);
966 }
967 
968 static int
ena_xmit_mbuf(struct ena_ring * tx_ring,struct mbuf ** mbuf)969 ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
970 {
971 	struct ena_adapter *adapter;
972 	device_t pdev;
973 	struct ena_tx_buffer *tx_info;
974 	struct ena_com_tx_ctx ena_tx_ctx;
975 	struct ena_com_dev *ena_dev;
976 	struct ena_com_io_sq *io_sq;
977 	void *push_hdr;
978 	uint16_t next_to_use;
979 	uint16_t req_id;
980 	uint16_t ena_qid;
981 	uint16_t header_len;
982 	int rc;
983 	int nb_hw_desc;
984 
985 	ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
986 	adapter = tx_ring->que->adapter;
987 	pdev = adapter->pdev;
988 	ena_dev = adapter->ena_dev;
989 	io_sq = &ena_dev->io_sq_queues[ena_qid];
990 
991 	rc = ena_check_and_collapse_mbuf(tx_ring, mbuf);
992 	if (unlikely(rc != 0)) {
993 		ena_log_io(pdev, WARN, "Failed to collapse mbuf! err: %d\n",
994 		    rc);
995 		return (rc);
996 	}
997 
998 	ena_log_io(pdev, DBG, "Tx: %d bytes\n", (*mbuf)->m_pkthdr.len);
999 
1000 	next_to_use = tx_ring->next_to_use;
1001 	req_id = tx_ring->free_tx_ids[next_to_use];
1002 	tx_info = &tx_ring->tx_buffer_info[req_id];
1003 	tx_info->num_of_bufs = 0;
1004 
1005 	ENA_WARN(tx_info->mbuf != NULL, adapter->ena_dev,
1006 	    "mbuf isn't NULL for req_id %d\n", req_id);
1007 
1008 	rc = ena_tx_map_mbuf(tx_ring, tx_info, *mbuf, &push_hdr, &header_len);
1009 	if (unlikely(rc != 0)) {
1010 		ena_log_io(pdev, WARN, "Failed to map TX mbuf\n");
1011 		return (rc);
1012 	}
1013 	memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
1014 	ena_tx_ctx.ena_bufs = tx_info->bufs;
1015 	ena_tx_ctx.push_header = push_hdr;
1016 	ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
1017 	ena_tx_ctx.req_id = req_id;
1018 	ena_tx_ctx.header_len = header_len;
1019 
1020 	/* Set flags and meta data */
1021 	ena_tx_csum(&ena_tx_ctx, *mbuf, adapter->disable_meta_caching);
1022 
1023 	if (tx_ring->acum_pkts == ENA_DB_THRESHOLD ||
1024 	    ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx)) {
1025 		ena_log_io(pdev, DBG,
1026 		    "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
1027 		    tx_ring->que->id);
1028 		ena_ring_tx_doorbell(tx_ring);
1029 	}
1030 
1031 	/* Prepare the packet's descriptors and send them to device */
1032 	rc = ena_com_prepare_tx(io_sq, &ena_tx_ctx, &nb_hw_desc);
1033 	if (unlikely(rc != 0)) {
1034 		if (likely(rc == ENA_COM_NO_MEM)) {
1035 			ena_log_io(pdev, DBG, "tx ring[%d] is out of space\n",
1036 			    tx_ring->que->id);
1037 		} else {
1038 			ena_log(pdev, ERR, "failed to prepare tx bufs\n");
1039 			ena_trigger_reset(adapter,
1040 			    ENA_REGS_RESET_DRIVER_INVALID_STATE);
1041 		}
1042 		counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1);
1043 		goto dma_error;
1044 	}
1045 
1046 	counter_enter();
1047 	counter_u64_add_protected(tx_ring->tx_stats.cnt, 1);
1048 	counter_u64_add_protected(tx_ring->tx_stats.bytes,
1049 	    (*mbuf)->m_pkthdr.len);
1050 
1051 	counter_u64_add_protected(adapter->hw_stats.tx_packets, 1);
1052 	counter_u64_add_protected(adapter->hw_stats.tx_bytes,
1053 	    (*mbuf)->m_pkthdr.len);
1054 	counter_exit();
1055 
1056 	tx_info->tx_descs = nb_hw_desc;
1057 	getbinuptime(&tx_info->timestamp);
1058 	tx_info->print_once = true;
1059 
1060 	tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
1061 	    tx_ring->ring_size);
1062 
1063 	/* stop the queue when no more space available, the packet can have up
1064 	 * to sgl_size + 2. one for the meta descriptor and one for header
1065 	 * (if the header is larger than tx_max_header_size).
1066 	 */
1067 	if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1068 	    adapter->max_tx_sgl_size + 2))) {
1069 		ena_log_io(pdev, DBG, "Stop queue %d\n", tx_ring->que->id);
1070 
1071 		tx_ring->running = false;
1072 		counter_u64_add(tx_ring->tx_stats.queue_stop, 1);
1073 
1074 		/* There is a rare condition where this function decides to
1075 		 * stop the queue but meanwhile tx_cleanup() updates
1076 		 * next_to_completion and terminates.
1077 		 * The queue will remain stopped forever.
1078 		 * To solve this issue this function performs mb(), checks
1079 		 * the wakeup condition and wakes up the queue if needed.
1080 		 */
1081 		mb();
1082 
1083 		if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1084 		    ENA_TX_RESUME_THRESH)) {
1085 			tx_ring->running = true;
1086 			counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1);
1087 		}
1088 	}
1089 
1090 	bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap,
1091 	    BUS_DMASYNC_PREWRITE);
1092 
1093 	return (0);
1094 
1095 dma_error:
1096 	tx_info->mbuf = NULL;
1097 	bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap);
1098 
1099 	return (rc);
1100 }
1101 
1102 static void
ena_start_xmit(struct ena_ring * tx_ring)1103 ena_start_xmit(struct ena_ring *tx_ring)
1104 {
1105 	struct mbuf *mbuf;
1106 	struct ena_adapter *adapter = tx_ring->adapter;
1107 	int ret = 0;
1108 
1109 	ENA_RING_MTX_ASSERT(tx_ring);
1110 
1111 	if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0))
1112 		return;
1113 
1114 	if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)))
1115 		return;
1116 
1117 	while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) {
1118 		ena_log_io(adapter->pdev, DBG,
1119 		    "\ndequeued mbuf %p with flags %#x and header csum flags %#jx\n",
1120 		    mbuf, mbuf->m_flags, (uint64_t)mbuf->m_pkthdr.csum_flags);
1121 
1122 		if (unlikely(!tx_ring->running)) {
1123 			drbr_putback(adapter->ifp, tx_ring->br, mbuf);
1124 			break;
1125 		}
1126 
1127 		if (unlikely((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0)) {
1128 			if (ret == ENA_COM_NO_MEM) {
1129 				drbr_putback(adapter->ifp, tx_ring->br, mbuf);
1130 			} else if (ret == ENA_COM_NO_SPACE) {
1131 				drbr_putback(adapter->ifp, tx_ring->br, mbuf);
1132 			} else {
1133 				m_freem(mbuf);
1134 				drbr_advance(adapter->ifp, tx_ring->br);
1135 			}
1136 
1137 			break;
1138 		}
1139 
1140 		drbr_advance(adapter->ifp, tx_ring->br);
1141 
1142 		if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0))
1143 			return;
1144 
1145 		tx_ring->acum_pkts++;
1146 
1147 		BPF_MTAP(adapter->ifp, mbuf);
1148 	}
1149 
1150 	if (likely(tx_ring->acum_pkts != 0)) {
1151 		/* Trigger the dma engine */
1152 		ena_ring_tx_doorbell(tx_ring);
1153 	}
1154 
1155 	if (unlikely(!tx_ring->running))
1156 		taskqueue_enqueue(tx_ring->que->cleanup_tq,
1157 		    &tx_ring->que->cleanup_task);
1158 }
1159