xref: /freebsd/sys/dev/ena/ena_netmap.c (revision 2f17afd19a3534dc1755c52edb0c2f70ea0eb1e4)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 #include <sys/cdefs.h>
31 #ifdef DEV_NETMAP
32 
33 #include "ena.h"
34 #include "ena_netmap.h"
35 
36 #define ENA_NETMAP_MORE_FRAMES		1
37 #define ENA_NETMAP_NO_MORE_FRAMES	0
38 #define ENA_MAX_FRAMES			16384
39 
40 struct ena_netmap_ctx {
41 	struct netmap_kring *kring;
42 	struct ena_adapter *adapter;
43 	struct netmap_adapter *na;
44 	struct netmap_slot *slots;
45 	struct ena_ring *ring;
46 	struct ena_com_io_cq *io_cq;
47 	struct ena_com_io_sq *io_sq;
48 	u_int nm_i;
49 	uint16_t nt;
50 	uint16_t lim;
51 };
52 
53 /* Netmap callbacks */
54 static int ena_netmap_reg(struct netmap_adapter *, int);
55 static int ena_netmap_txsync(struct netmap_kring *, int);
56 static int ena_netmap_rxsync(struct netmap_kring *, int);
57 
58 /* Helper functions */
59 static int ena_netmap_tx_frames(struct ena_netmap_ctx *);
60 static int ena_netmap_tx_frame(struct ena_netmap_ctx *);
61 static inline uint16_t ena_netmap_count_slots(struct ena_netmap_ctx *);
62 static inline uint16_t ena_netmap_packet_len(struct netmap_slot *, u_int,
63     uint16_t);
64 static int ena_netmap_copy_data(struct netmap_adapter *, struct netmap_slot *,
65     u_int, uint16_t, uint16_t, void *);
66 static int ena_netmap_map_single_slot(struct netmap_adapter *,
67     struct netmap_slot *, bus_dma_tag_t, bus_dmamap_t, void **, uint64_t *);
68 static int ena_netmap_tx_map_slots(struct ena_netmap_ctx *,
69     struct ena_tx_buffer *, void **, uint16_t *, uint16_t *);
70 static void ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx *,
71     struct ena_tx_buffer *);
72 static void ena_netmap_tx_cleanup(struct ena_netmap_ctx *);
73 static uint16_t ena_netmap_tx_clean_one(struct ena_netmap_ctx *, uint16_t);
74 static int ena_netmap_rx_frames(struct ena_netmap_ctx *);
75 static int ena_netmap_rx_frame(struct ena_netmap_ctx *);
76 static int ena_netmap_rx_load_desc(struct ena_netmap_ctx *, uint16_t, int *);
77 static void ena_netmap_rx_cleanup(struct ena_netmap_ctx *);
78 static void ena_netmap_fill_ctx(struct netmap_kring *, struct ena_netmap_ctx *,
79     uint16_t);
80 
81 int
ena_netmap_attach(struct ena_adapter * adapter)82 ena_netmap_attach(struct ena_adapter *adapter)
83 {
84 	struct netmap_adapter na;
85 
86 	ena_log_nm(adapter->pdev, INFO, "netmap attach\n");
87 
88 	bzero(&na, sizeof(na));
89 	na.na_flags = NAF_MOREFRAG;
90 	na.ifp = adapter->ifp;
91 	na.num_tx_desc = adapter->requested_tx_ring_size;
92 	na.num_rx_desc = adapter->requested_rx_ring_size;
93 	na.num_tx_rings = adapter->num_io_queues;
94 	na.num_rx_rings = adapter->num_io_queues;
95 	na.rx_buf_maxsize = adapter->buf_ring_size;
96 	na.nm_txsync = ena_netmap_txsync;
97 	na.nm_rxsync = ena_netmap_rxsync;
98 	na.nm_register = ena_netmap_reg;
99 
100 	return (netmap_attach(&na));
101 }
102 
103 int
ena_netmap_alloc_rx_slot(struct ena_adapter * adapter,struct ena_ring * rx_ring,struct ena_rx_buffer * rx_info)104 ena_netmap_alloc_rx_slot(struct ena_adapter *adapter, struct ena_ring *rx_ring,
105     struct ena_rx_buffer *rx_info)
106 {
107 	struct netmap_adapter *na = NA(adapter->ifp);
108 	struct netmap_kring *kring;
109 	struct netmap_ring *ring;
110 	struct netmap_slot *slot;
111 	void *addr;
112 	uint64_t paddr;
113 	int nm_i, qid, head, lim, rc;
114 
115 	/* if previously allocated frag is not used */
116 	if (unlikely(rx_info->netmap_buf_idx != 0))
117 		return (0);
118 
119 	qid = rx_ring->qid;
120 	kring = na->rx_rings[qid];
121 	nm_i = kring->nr_hwcur;
122 	head = kring->rhead;
123 
124 	ena_log_nm(adapter->pdev, DBG,
125 	    "nr_hwcur: %d, nr_hwtail: %d, rhead: %d, rcur: %d, rtail: %d\n",
126 	    kring->nr_hwcur, kring->nr_hwtail, kring->rhead, kring->rcur,
127 	    kring->rtail);
128 
129 	if ((nm_i == head) && rx_ring->initialized) {
130 		ena_log_nm(adapter->pdev, ERR,
131 		    "No free slots in netmap ring\n");
132 		return (ENOMEM);
133 	}
134 
135 	ring = kring->ring;
136 	if (ring == NULL) {
137 		ena_log_nm(adapter->pdev, ERR, "Rx ring %d is NULL\n", qid);
138 		return (EFAULT);
139 	}
140 	slot = &ring->slot[nm_i];
141 
142 	addr = PNMB(na, slot, &paddr);
143 	if (addr == NETMAP_BUF_BASE(na)) {
144 		ena_log_nm(adapter->pdev, ERR, "Bad buff in slot\n");
145 		return (EFAULT);
146 	}
147 
148 	rc = netmap_load_map(na, adapter->rx_buf_tag, rx_info->map, addr);
149 	if (rc != 0) {
150 		ena_log_nm(adapter->pdev, WARN, "DMA mapping error\n");
151 		return (rc);
152 	}
153 	bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
154 
155 	rx_info->ena_buf.paddr = paddr;
156 	rx_info->ena_buf.len = ring->nr_buf_size;
157 	rx_info->mbuf = NULL;
158 	rx_info->netmap_buf_idx = slot->buf_idx;
159 
160 	slot->buf_idx = 0;
161 
162 	lim = kring->nkr_num_slots - 1;
163 	kring->nr_hwcur = nm_next(nm_i, lim);
164 
165 	return (0);
166 }
167 
168 void
ena_netmap_free_rx_slot(struct ena_adapter * adapter,struct ena_ring * rx_ring,struct ena_rx_buffer * rx_info)169 ena_netmap_free_rx_slot(struct ena_adapter *adapter, struct ena_ring *rx_ring,
170     struct ena_rx_buffer *rx_info)
171 {
172 	struct netmap_adapter *na;
173 	struct netmap_kring *kring;
174 	struct netmap_slot *slot;
175 	int nm_i, qid, lim;
176 
177 	na = NA(adapter->ifp);
178 	if (na == NULL) {
179 		ena_log_nm(adapter->pdev, ERR, "netmap adapter is NULL\n");
180 		return;
181 	}
182 
183 	if (na->rx_rings == NULL) {
184 		ena_log_nm(adapter->pdev, ERR, "netmap rings are NULL\n");
185 		return;
186 	}
187 
188 	qid = rx_ring->qid;
189 	kring = na->rx_rings[qid];
190 	if (kring == NULL) {
191 		ena_log_nm(adapter->pdev, ERR,
192 		    "netmap kernel ring %d is NULL\n", qid);
193 		return;
194 	}
195 
196 	lim = kring->nkr_num_slots - 1;
197 	nm_i = nm_prev(kring->nr_hwcur, lim);
198 
199 	if (kring->nr_mode != NKR_NETMAP_ON)
200 		return;
201 
202 	bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
203 	    BUS_DMASYNC_POSTREAD);
204 	netmap_unload_map(na, adapter->rx_buf_tag, rx_info->map);
205 
206 	KASSERT(kring->ring != NULL, ("Netmap Rx ring is NULL\n"));
207 
208 	slot = &kring->ring->slot[nm_i];
209 
210 	ENA_WARN(slot->buf_idx != 0, adapter->ena_dev, "Overwrite slot buf\n");
211 	slot->buf_idx = rx_info->netmap_buf_idx;
212 	slot->flags = NS_BUF_CHANGED;
213 
214 	rx_info->netmap_buf_idx = 0;
215 	kring->nr_hwcur = nm_i;
216 }
217 
218 static bool
ena_ring_in_netmap(struct ena_adapter * adapter,int qid,enum txrx x)219 ena_ring_in_netmap(struct ena_adapter *adapter, int qid, enum txrx x)
220 {
221 	struct netmap_adapter *na;
222 	struct netmap_kring *kring;
223 
224 	if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
225 		na = NA(adapter->ifp);
226 		kring = (x == NR_RX) ? na->rx_rings[qid] : na->tx_rings[qid];
227 		if (kring->nr_mode == NKR_NETMAP_ON)
228 			return true;
229 	}
230 	return false;
231 }
232 
233 bool
ena_tx_ring_in_netmap(struct ena_adapter * adapter,int qid)234 ena_tx_ring_in_netmap(struct ena_adapter *adapter, int qid)
235 {
236 	return ena_ring_in_netmap(adapter, qid, NR_TX);
237 }
238 
239 bool
ena_rx_ring_in_netmap(struct ena_adapter * adapter,int qid)240 ena_rx_ring_in_netmap(struct ena_adapter *adapter, int qid)
241 {
242 	return ena_ring_in_netmap(adapter, qid, NR_RX);
243 }
244 
245 static void
ena_netmap_reset_ring(struct ena_adapter * adapter,int qid,enum txrx x)246 ena_netmap_reset_ring(struct ena_adapter *adapter, int qid, enum txrx x)
247 {
248 	if (!ena_ring_in_netmap(adapter, qid, x))
249 		return;
250 
251 	netmap_reset(NA(adapter->ifp), x, qid, 0);
252 	ena_log_nm(adapter->pdev, INFO, "%s ring %d is in netmap mode\n",
253 	    (x == NR_TX) ? "Tx" : "Rx", qid);
254 }
255 
256 void
ena_netmap_reset_rx_ring(struct ena_adapter * adapter,int qid)257 ena_netmap_reset_rx_ring(struct ena_adapter *adapter, int qid)
258 {
259 	ena_netmap_reset_ring(adapter, qid, NR_RX);
260 }
261 
262 void
ena_netmap_reset_tx_ring(struct ena_adapter * adapter,int qid)263 ena_netmap_reset_tx_ring(struct ena_adapter *adapter, int qid)
264 {
265 	ena_netmap_reset_ring(adapter, qid, NR_TX);
266 }
267 
268 static int
ena_netmap_reg(struct netmap_adapter * na,int onoff)269 ena_netmap_reg(struct netmap_adapter *na, int onoff)
270 {
271 	if_t ifp = na->ifp;
272 	struct ena_adapter *adapter = if_getsoftc(ifp);
273 	device_t pdev = adapter->pdev;
274 	struct netmap_kring *kring;
275 	enum txrx t;
276 	int rc, i;
277 
278 	ENA_LOCK_LOCK();
279 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
280 	ena_down(adapter);
281 
282 	if (onoff) {
283 		ena_log_nm(pdev, INFO, "netmap on\n");
284 		for_rx_tx(t) {
285 			for (i = 0; i <= nma_get_nrings(na, t); i++) {
286 				kring = NMR(na, t)[i];
287 				if (nm_kring_pending_on(kring)) {
288 					kring->nr_mode = NKR_NETMAP_ON;
289 				}
290 			}
291 		}
292 		nm_set_native_flags(na);
293 	} else {
294 		ena_log_nm(pdev, INFO, "netmap off\n");
295 		nm_clear_native_flags(na);
296 		for_rx_tx(t) {
297 			for (i = 0; i <= nma_get_nrings(na, t); i++) {
298 				kring = NMR(na, t)[i];
299 				if (nm_kring_pending_off(kring)) {
300 					kring->nr_mode = NKR_NETMAP_OFF;
301 				}
302 			}
303 		}
304 	}
305 
306 	rc = ena_up(adapter);
307 	if (rc != 0) {
308 		ena_log_nm(pdev, WARN, "ena_up failed with rc=%d\n", rc);
309 		adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE;
310 		nm_clear_native_flags(na);
311 		ena_destroy_device(adapter, false);
312 		ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
313 		rc = ena_restore_device(adapter);
314 	}
315 	ENA_LOCK_UNLOCK();
316 
317 	return (rc);
318 }
319 
320 static int
ena_netmap_txsync(struct netmap_kring * kring,int flags)321 ena_netmap_txsync(struct netmap_kring *kring, int flags)
322 {
323 	struct ena_netmap_ctx ctx;
324 	int rc = 0;
325 
326 	ena_netmap_fill_ctx(kring, &ctx, ENA_IO_TXQ_IDX(kring->ring_id));
327 	ctx.ring = &ctx.adapter->tx_ring[kring->ring_id];
328 
329 	ENA_RING_MTX_LOCK(ctx.ring);
330 	if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, ctx.adapter)))
331 		goto txsync_end;
332 
333 	if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, ctx.adapter)))
334 		goto txsync_end;
335 
336 	rc = ena_netmap_tx_frames(&ctx);
337 	ena_netmap_tx_cleanup(&ctx);
338 
339 txsync_end:
340 	ENA_RING_MTX_UNLOCK(ctx.ring);
341 	return (rc);
342 }
343 
344 static int
ena_netmap_tx_frames(struct ena_netmap_ctx * ctx)345 ena_netmap_tx_frames(struct ena_netmap_ctx *ctx)
346 {
347 	struct ena_ring *tx_ring = ctx->ring;
348 	int rc = 0;
349 
350 	ctx->nm_i = ctx->kring->nr_hwcur;
351 	ctx->nt = ctx->ring->next_to_use;
352 
353 	__builtin_prefetch(&ctx->slots[ctx->nm_i]);
354 
355 	while (ctx->nm_i != ctx->kring->rhead) {
356 		if ((rc = ena_netmap_tx_frame(ctx)) != 0) {
357 			/*
358 			 * When there is no empty space in Tx ring, error is
359 			 * still being returned. It should not be passed to the
360 			 * netmap, as application knows current ring state from
361 			 * netmap ring pointers. Returning error there could
362 			 * cause application to exit, but the Tx ring is
363 			 * commonly being full.
364 			 */
365 			if (rc == ENA_COM_NO_MEM)
366 				rc = 0;
367 			break;
368 		}
369 		tx_ring->acum_pkts++;
370 	}
371 
372 	/* If any packet was sent... */
373 	if (likely(ctx->nm_i != ctx->kring->nr_hwcur)) {
374 		/* ...send the doorbell to the device. */
375 		ena_ring_tx_doorbell(tx_ring);
376 
377 		ctx->ring->next_to_use = ctx->nt;
378 		ctx->kring->nr_hwcur = ctx->nm_i;
379 	}
380 
381 	return (rc);
382 }
383 
384 static int
ena_netmap_tx_frame(struct ena_netmap_ctx * ctx)385 ena_netmap_tx_frame(struct ena_netmap_ctx *ctx)
386 {
387 	struct ena_com_tx_ctx ena_tx_ctx;
388 	struct ena_adapter *adapter;
389 	struct ena_ring *tx_ring;
390 	struct ena_tx_buffer *tx_info;
391 	uint16_t req_id;
392 	uint16_t header_len;
393 	uint16_t packet_len;
394 	int nb_hw_desc;
395 	int rc;
396 	void *push_hdr;
397 
398 	adapter = ctx->adapter;
399 	if (ena_netmap_count_slots(ctx) > adapter->max_tx_sgl_size) {
400 		ena_log_nm(adapter->pdev, WARN, "Too many slots per packet\n");
401 		return (EINVAL);
402 	}
403 
404 	tx_ring = ctx->ring;
405 
406 	req_id = tx_ring->free_tx_ids[ctx->nt];
407 	tx_info = &tx_ring->tx_buffer_info[req_id];
408 	tx_info->num_of_bufs = 0;
409 	tx_info->nm_info.sockets_used = 0;
410 
411 	rc = ena_netmap_tx_map_slots(ctx, tx_info, &push_hdr, &header_len,
412 	    &packet_len);
413 	if (unlikely(rc != 0)) {
414 		ena_log_nm(adapter->pdev, ERR, "Failed to map Tx slot\n");
415 		return (rc);
416 	}
417 
418 	bzero(&ena_tx_ctx, sizeof(struct ena_com_tx_ctx));
419 	ena_tx_ctx.ena_bufs = tx_info->bufs;
420 	ena_tx_ctx.push_header = push_hdr;
421 	ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
422 	ena_tx_ctx.req_id = req_id;
423 	ena_tx_ctx.header_len = header_len;
424 	ena_tx_ctx.meta_valid = adapter->disable_meta_caching;
425 
426 	/* There are no any offloads, as the netmap doesn't support them */
427 
428 	if (tx_ring->acum_pkts == ENA_DB_THRESHOLD ||
429 	    ena_com_is_doorbell_needed(ctx->io_sq, &ena_tx_ctx))
430 		ena_ring_tx_doorbell(tx_ring);
431 
432 	rc = ena_com_prepare_tx(ctx->io_sq, &ena_tx_ctx, &nb_hw_desc);
433 	if (unlikely(rc != 0)) {
434 		if (likely(rc == ENA_COM_NO_MEM)) {
435 			ena_log_nm(adapter->pdev, DBG,
436 			    "Tx ring[%d] is out of space\n", tx_ring->que->id);
437 		} else {
438 			ena_log_nm(adapter->pdev, ERR,
439 			    "Failed to prepare Tx bufs\n");
440 			ena_trigger_reset(adapter,
441 			    ENA_REGS_RESET_DRIVER_INVALID_STATE);
442 		}
443 		counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1);
444 
445 		ena_netmap_unmap_last_socket_chain(ctx, tx_info);
446 		return (rc);
447 	}
448 
449 	counter_enter();
450 	counter_u64_add_protected(tx_ring->tx_stats.cnt, 1);
451 	counter_u64_add_protected(tx_ring->tx_stats.bytes, packet_len);
452 	counter_u64_add_protected(adapter->hw_stats.tx_packets, 1);
453 	counter_u64_add_protected(adapter->hw_stats.tx_bytes, packet_len);
454 	counter_exit();
455 
456 	tx_info->tx_descs = nb_hw_desc;
457 
458 	ctx->nt = ENA_TX_RING_IDX_NEXT(ctx->nt, ctx->ring->ring_size);
459 
460 	for (unsigned int i = 0; i < tx_info->num_of_bufs; i++)
461 		bus_dmamap_sync(adapter->tx_buf_tag,
462 		    tx_info->nm_info.map_seg[i], BUS_DMASYNC_PREWRITE);
463 
464 	return (0);
465 }
466 
467 static inline uint16_t
ena_netmap_count_slots(struct ena_netmap_ctx * ctx)468 ena_netmap_count_slots(struct ena_netmap_ctx *ctx)
469 {
470 	uint16_t slots = 1;
471 	uint16_t nm = ctx->nm_i;
472 
473 	while ((ctx->slots[nm].flags & NS_MOREFRAG) != 0) {
474 		slots++;
475 		nm = nm_next(nm, ctx->lim);
476 	}
477 
478 	return slots;
479 }
480 
481 static inline uint16_t
ena_netmap_packet_len(struct netmap_slot * slots,u_int slot_index,uint16_t limit)482 ena_netmap_packet_len(struct netmap_slot *slots, u_int slot_index,
483     uint16_t limit)
484 {
485 	struct netmap_slot *nm_slot;
486 	uint16_t packet_size = 0;
487 
488 	do {
489 		nm_slot = &slots[slot_index];
490 		packet_size += nm_slot->len;
491 		slot_index = nm_next(slot_index, limit);
492 	} while ((nm_slot->flags & NS_MOREFRAG) != 0);
493 
494 	return packet_size;
495 }
496 
497 static int
ena_netmap_copy_data(struct netmap_adapter * na,struct netmap_slot * slots,u_int slot_index,uint16_t limit,uint16_t bytes_to_copy,void * destination)498 ena_netmap_copy_data(struct netmap_adapter *na, struct netmap_slot *slots,
499     u_int slot_index, uint16_t limit, uint16_t bytes_to_copy, void *destination)
500 {
501 	struct netmap_slot *nm_slot;
502 	void *slot_vaddr;
503 	uint16_t data_amount;
504 
505 	do {
506 		nm_slot = &slots[slot_index];
507 		slot_vaddr = NMB(na, nm_slot);
508 		if (unlikely(slot_vaddr == NULL))
509 			return (EINVAL);
510 
511 		data_amount = min_t(uint16_t, bytes_to_copy, nm_slot->len);
512 		memcpy(destination, slot_vaddr, data_amount);
513 		bytes_to_copy -= data_amount;
514 
515 		slot_index = nm_next(slot_index, limit);
516 	} while ((nm_slot->flags & NS_MOREFRAG) != 0 && bytes_to_copy > 0);
517 
518 	return (0);
519 }
520 
521 static int
ena_netmap_map_single_slot(struct netmap_adapter * na,struct netmap_slot * slot,bus_dma_tag_t dmatag,bus_dmamap_t dmamap,void ** vaddr,uint64_t * paddr)522 ena_netmap_map_single_slot(struct netmap_adapter *na, struct netmap_slot *slot,
523     bus_dma_tag_t dmatag, bus_dmamap_t dmamap, void **vaddr, uint64_t *paddr)
524 {
525 	device_t pdev;
526 	int rc;
527 
528 	pdev = ((struct ena_adapter *)if_getsoftc(na->ifp))->pdev;
529 
530 	*vaddr = PNMB(na, slot, paddr);
531 	if (unlikely(vaddr == NULL)) {
532 		ena_log_nm(pdev, ERR, "Slot address is NULL\n");
533 		return (EINVAL);
534 	}
535 
536 	rc = netmap_load_map(na, dmatag, dmamap, *vaddr);
537 	if (unlikely(rc != 0)) {
538 		ena_log_nm(pdev, ERR, "Failed to map slot %d for DMA\n",
539 		    slot->buf_idx);
540 		return (EINVAL);
541 	}
542 
543 	return (0);
544 }
545 
546 static int
ena_netmap_tx_map_slots(struct ena_netmap_ctx * ctx,struct ena_tx_buffer * tx_info,void ** push_hdr,uint16_t * header_len,uint16_t * packet_len)547 ena_netmap_tx_map_slots(struct ena_netmap_ctx *ctx,
548     struct ena_tx_buffer *tx_info, void **push_hdr, uint16_t *header_len,
549     uint16_t *packet_len)
550 {
551 	struct netmap_slot *slot;
552 	struct ena_com_buf *ena_buf;
553 	struct ena_adapter *adapter;
554 	struct ena_ring *tx_ring;
555 	struct ena_netmap_tx_info *nm_info;
556 	bus_dmamap_t *nm_maps;
557 	void *vaddr;
558 	uint64_t paddr;
559 	uint32_t *nm_buf_idx;
560 	uint32_t slot_head_len;
561 	uint32_t frag_len;
562 	uint32_t remaining_len;
563 	uint16_t push_len;
564 	uint16_t delta;
565 	int rc;
566 
567 	adapter = ctx->adapter;
568 	tx_ring = ctx->ring;
569 	ena_buf = tx_info->bufs;
570 	nm_info = &tx_info->nm_info;
571 	nm_maps = nm_info->map_seg;
572 	nm_buf_idx = nm_info->socket_buf_idx;
573 	slot = &ctx->slots[ctx->nm_i];
574 
575 	slot_head_len = slot->len;
576 	*packet_len = ena_netmap_packet_len(ctx->slots, ctx->nm_i, ctx->lim);
577 	remaining_len = *packet_len;
578 	delta = 0;
579 
580 	__builtin_prefetch(&ctx->slots[nm_next(ctx->nm_i, ctx->lim)]);
581 	if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
582 		/*
583 		 * When the device is in LLQ mode, the driver will copy
584 		 * the header into the device memory space.
585 		 * The ena_com layer assumes that the header is in a linear
586 		 * memory space.
587 		 * This assumption might be wrong since part of the header
588 		 * can be in the fragmented buffers.
589 		 * First, check if header fits in the first slot. If not, copy
590 		 * it to separate buffer that will be holding linearized data.
591 		 */
592 		push_len = min_t(uint32_t, *packet_len,
593 		    tx_ring->tx_max_header_size);
594 		*header_len = push_len;
595 		/* If header is in linear space, just point to socket's data. */
596 		if (likely(push_len <= slot_head_len)) {
597 			*push_hdr = NMB(ctx->na, slot);
598 			if (unlikely(push_hdr == NULL)) {
599 				ena_log_nm(adapter->pdev, ERR,
600 				    "Slot vaddress is NULL\n");
601 				return (EINVAL);
602 			}
603 		/*
604 		 * Otherwise, copy whole portion of header from multiple
605 		 * slots to intermediate buffer.
606 		 */
607 		} else {
608 			rc = ena_netmap_copy_data(ctx->na, ctx->slots,
609 			    ctx->nm_i, ctx->lim, push_len,
610 			    tx_ring->push_buf_intermediate_buf);
611 			if (unlikely(rc)) {
612 				ena_log_nm(adapter->pdev, ERR,
613 				    "Failed to copy data from slots to push_buf\n");
614 				return (EINVAL);
615 			}
616 
617 			*push_hdr = tx_ring->push_buf_intermediate_buf;
618 			counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1);
619 
620 			delta = push_len - slot_head_len;
621 		}
622 
623 		ena_log_nm(adapter->pdev, DBG,
624 		    "slot: %d header_buf->vaddr: %p push_len: %d\n",
625 		    slot->buf_idx, *push_hdr, push_len);
626 
627 		/*
628 		 * If header was in linear memory space, map for the dma rest of
629 		 * the data in the first mbuf of the mbuf chain.
630 		 */
631 		if (slot_head_len > push_len) {
632 			rc = ena_netmap_map_single_slot(ctx->na, slot,
633 			    adapter->tx_buf_tag, *nm_maps, &vaddr, &paddr);
634 			if (unlikely(rc != 0)) {
635 				ena_log_nm(adapter->pdev, ERR,
636 				    "DMA mapping error\n");
637 				return (rc);
638 			}
639 			nm_maps++;
640 
641 			ena_buf->paddr = paddr + push_len;
642 			ena_buf->len = slot->len - push_len;
643 			ena_buf++;
644 
645 			tx_info->num_of_bufs++;
646 		}
647 
648 		remaining_len -= slot->len;
649 
650 		/* Save buf idx before advancing */
651 		*nm_buf_idx = slot->buf_idx;
652 		nm_buf_idx++;
653 		slot->buf_idx = 0;
654 
655 		/* Advance to the next socket */
656 		ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
657 		slot = &ctx->slots[ctx->nm_i];
658 		nm_info->sockets_used++;
659 
660 		/*
661 		 * If header is in non linear space (delta > 0), then skip mbufs
662 		 * containing header and map the last one containing both header
663 		 * and the packet data.
664 		 * The first segment is already counted in.
665 		 */
666 		while (delta > 0) {
667 			__builtin_prefetch(&ctx->slots[nm_next(ctx->nm_i, ctx->lim)]);
668 			frag_len = slot->len;
669 
670 			/*
671 			 * If whole segment contains header just move to the
672 			 * next one and reduce delta.
673 			 */
674 			if (unlikely(delta >= frag_len)) {
675 				delta -= frag_len;
676 			} else {
677 				/*
678 				 * Map the data and then assign it with the
679 				 * offsets
680 				 */
681 				rc = ena_netmap_map_single_slot(ctx->na, slot,
682 				    adapter->tx_buf_tag, *nm_maps, &vaddr,
683 				    &paddr);
684 				if (unlikely(rc != 0)) {
685 					ena_log_nm(adapter->pdev, ERR,
686 					    "DMA mapping error\n");
687 					goto error_map;
688 				}
689 				nm_maps++;
690 
691 				ena_buf->paddr = paddr + delta;
692 				ena_buf->len = slot->len - delta;
693 				ena_buf++;
694 
695 				tx_info->num_of_bufs++;
696 				delta = 0;
697 			}
698 
699 			remaining_len -= slot->len;
700 
701 			/* Save buf idx before advancing */
702 			*nm_buf_idx = slot->buf_idx;
703 			nm_buf_idx++;
704 			slot->buf_idx = 0;
705 
706 			/* Advance to the next socket */
707 			ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
708 			slot = &ctx->slots[ctx->nm_i];
709 			nm_info->sockets_used++;
710 		}
711 	} else {
712 		*push_hdr = NULL;
713 		/*
714 		 * header_len is just a hint for the device. Because netmap is
715 		 * not giving us any information about packet header length and
716 		 * it is not guaranteed that all packet headers will be in the
717 		 * 1st slot, setting header_len to 0 is making the device ignore
718 		 * this value and resolve header on it's own.
719 		 */
720 		*header_len = 0;
721 	}
722 
723 	/* Map all remaining data (regular routine for non-LLQ mode) */
724 	while (remaining_len > 0) {
725 		__builtin_prefetch(&ctx->slots[nm_next(ctx->nm_i, ctx->lim)]);
726 
727 		rc = ena_netmap_map_single_slot(ctx->na, slot,
728 		    adapter->tx_buf_tag, *nm_maps, &vaddr, &paddr);
729 		if (unlikely(rc != 0)) {
730 			ena_log_nm(adapter->pdev, ERR, "DMA mapping error\n");
731 			goto error_map;
732 		}
733 		nm_maps++;
734 
735 		ena_buf->paddr = paddr;
736 		ena_buf->len = slot->len;
737 		ena_buf++;
738 
739 		tx_info->num_of_bufs++;
740 
741 		remaining_len -= slot->len;
742 
743 		/* Save buf idx before advancing */
744 		*nm_buf_idx = slot->buf_idx;
745 		nm_buf_idx++;
746 		slot->buf_idx = 0;
747 
748 		/* Advance to the next socket */
749 		ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
750 		slot = &ctx->slots[ctx->nm_i];
751 		nm_info->sockets_used++;
752 	}
753 
754 	return (0);
755 
756 error_map:
757 	ena_netmap_unmap_last_socket_chain(ctx, tx_info);
758 
759 	return (rc);
760 }
761 
762 static void
ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx * ctx,struct ena_tx_buffer * tx_info)763 ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx *ctx,
764     struct ena_tx_buffer *tx_info)
765 {
766 	struct ena_netmap_tx_info *nm_info;
767 	int n;
768 
769 	nm_info = &tx_info->nm_info;
770 
771 	/**
772 	 * As the used sockets must not be equal to the buffers used in the LLQ
773 	 * mode, they must be treated separately.
774 	 * First, unmap the DMA maps.
775 	 */
776 	n = tx_info->num_of_bufs;
777 	while (n--) {
778 		netmap_unload_map(ctx->na, ctx->adapter->tx_buf_tag,
779 		    nm_info->map_seg[n]);
780 	}
781 	tx_info->num_of_bufs = 0;
782 
783 	/* Next, retain the sockets back to the userspace */
784 	n = nm_info->sockets_used;
785 	while (n--) {
786 		ctx->nm_i = nm_prev(ctx->nm_i, ctx->lim);
787 		ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n];
788 		ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED;
789 		nm_info->socket_buf_idx[n] = 0;
790 	}
791 	nm_info->sockets_used = 0;
792 }
793 
794 static void
ena_netmap_tx_cleanup(struct ena_netmap_ctx * ctx)795 ena_netmap_tx_cleanup(struct ena_netmap_ctx *ctx)
796 {
797 	struct ena_ring *tx_ring = ctx->ring;
798 	int rc;
799 	uint16_t req_id;
800 	uint16_t total_tx_descs = 0;
801 
802 	ctx->nm_i = ctx->kring->nr_hwtail;
803 	ctx->nt = tx_ring->next_to_clean;
804 
805 	/* Reclaim buffers for completed transmissions */
806 	do {
807 		rc = ena_com_tx_comp_req_id_get(ctx->io_cq, &req_id);
808 		if(unlikely(rc == ENA_COM_TRY_AGAIN))
809 			break;
810 
811 		rc = validate_tx_req_id(tx_ring, req_id, rc);
812 		if(unlikely(rc != 0))
813 			break;
814 
815 		total_tx_descs += ena_netmap_tx_clean_one(ctx, req_id);
816 	} while (1);
817 
818 	ctx->kring->nr_hwtail = ctx->nm_i;
819 
820 	if (total_tx_descs > 0) {
821 		/* acknowledge completion of sent packets */
822 		tx_ring->next_to_clean = ctx->nt;
823 		ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
824 	}
825 }
826 
827 static uint16_t
ena_netmap_tx_clean_one(struct ena_netmap_ctx * ctx,uint16_t req_id)828 ena_netmap_tx_clean_one(struct ena_netmap_ctx *ctx, uint16_t req_id)
829 {
830 	struct ena_tx_buffer *tx_info;
831 	struct ena_netmap_tx_info *nm_info;
832 	int n;
833 
834 	tx_info = &ctx->ring->tx_buffer_info[req_id];
835 	nm_info = &tx_info->nm_info;
836 
837 	/**
838 	 * As the used sockets must not be equal to the buffers used in the LLQ
839 	 * mode, they must be treated separately.
840 	 * First, unmap the DMA maps.
841 	 */
842 	n = tx_info->num_of_bufs;
843 	for (n = 0; n < tx_info->num_of_bufs; n++) {
844 		netmap_unload_map(ctx->na, ctx->adapter->tx_buf_tag,
845 		    nm_info->map_seg[n]);
846 	}
847 	tx_info->num_of_bufs = 0;
848 
849 	/* Next, retain the sockets back to the userspace */
850 	for (n = 0; n < nm_info->sockets_used; n++) {
851 		ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
852 		ENA_WARN(ctx->slots[ctx->nm_i].buf_idx != 0,
853 		    ctx->adapter->ena_dev, "Tx idx is not 0.\n");
854 		ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n];
855 		ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED;
856 		nm_info->socket_buf_idx[n] = 0;
857 	}
858 	nm_info->sockets_used = 0;
859 
860 	ctx->ring->free_tx_ids[ctx->nt] = req_id;
861 	ctx->nt = ENA_TX_RING_IDX_NEXT(ctx->nt, ctx->lim);
862 
863 	return tx_info->tx_descs;
864 }
865 
866 static int
ena_netmap_rxsync(struct netmap_kring * kring,int flags)867 ena_netmap_rxsync(struct netmap_kring *kring, int flags)
868 {
869 	struct ena_netmap_ctx ctx;
870 	int rc;
871 
872 	ena_netmap_fill_ctx(kring, &ctx, ENA_IO_RXQ_IDX(kring->ring_id));
873 	ctx.ring = &ctx.adapter->rx_ring[kring->ring_id];
874 
875 	if (ctx.kring->rhead > ctx.lim) {
876 		/* Probably not needed to release slots from RX ring. */
877 		return (netmap_ring_reinit(ctx.kring));
878 	}
879 
880 	if (unlikely((if_getdrvflags(ctx.na->ifp) & IFF_DRV_RUNNING) == 0))
881 		return (0);
882 
883 	if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, ctx.adapter)))
884 		return (0);
885 
886 	if ((rc = ena_netmap_rx_frames(&ctx)) != 0)
887 		return (rc);
888 
889 	ena_netmap_rx_cleanup(&ctx);
890 
891 	return (0);
892 }
893 
894 static inline int
ena_netmap_rx_frames(struct ena_netmap_ctx * ctx)895 ena_netmap_rx_frames(struct ena_netmap_ctx *ctx)
896 {
897 	int rc = 0;
898 	int frames_counter = 0;
899 
900 	ctx->nt = ctx->ring->next_to_clean;
901 	ctx->nm_i = ctx->kring->nr_hwtail;
902 
903 	while ((rc = ena_netmap_rx_frame(ctx)) == ENA_NETMAP_MORE_FRAMES) {
904 		frames_counter++;
905 		/* In case of multiple frames, it is not an error. */
906 		rc = 0;
907 		if (frames_counter > ENA_MAX_FRAMES) {
908 			ena_log_nm(ctx->adapter->pdev, ERR,
909 			    "Driver is stuck in the Rx loop\n");
910 			break;
911 		}
912 	};
913 
914 	ctx->kring->nr_hwtail = ctx->nm_i;
915 	ctx->kring->nr_kflags &= ~NKR_PENDINTR;
916 	ctx->ring->next_to_clean = ctx->nt;
917 
918 	return (rc);
919 }
920 
921 static inline int
ena_netmap_rx_frame(struct ena_netmap_ctx * ctx)922 ena_netmap_rx_frame(struct ena_netmap_ctx *ctx)
923 {
924 	struct ena_com_rx_ctx ena_rx_ctx;
925 	enum ena_regs_reset_reason_types reset_reason;
926 	int rc, len = 0;
927 	uint16_t buf, nm;
928 
929 	ena_rx_ctx.ena_bufs = ctx->ring->ena_bufs;
930 	ena_rx_ctx.max_bufs = ctx->adapter->max_rx_sgl_size;
931 	bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag,
932 	    ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD);
933 
934 	rc = ena_com_rx_pkt(ctx->io_cq, ctx->io_sq, &ena_rx_ctx);
935 	if (unlikely(rc != 0)) {
936 		ena_log_nm(ctx->adapter->pdev, ERR,
937 		    "Failed to read pkt from the device with error: %d\n", rc);
938 		if (rc == ENA_COM_NO_SPACE) {
939 			counter_u64_add(ctx->ring->rx_stats.bad_desc_num, 1);
940 			reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
941 		} else if (rc == ENA_COM_FAULT) {
942 			reset_reason = ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED;
943 		} else {
944 			counter_u64_add(ctx->ring->rx_stats.bad_req_id, 1);
945 			reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
946 		}
947 		ena_trigger_reset(ctx->adapter, reset_reason);
948 		return (rc);
949 	}
950 	if (unlikely(ena_rx_ctx.descs == 0))
951 		return (ENA_NETMAP_NO_MORE_FRAMES);
952 
953 	ena_log_nm(ctx->adapter->pdev, DBG,
954 	    "Rx: q %d got packet from ena. descs #:"
955 	    " %d l3 proto %d l4 proto %d hash: %x\n",
956 	    ctx->ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
957 	    ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
958 
959 	for (buf = 0; buf < ena_rx_ctx.descs; buf++)
960 		if ((rc = ena_netmap_rx_load_desc(ctx, buf, &len)) != 0)
961 			break;
962 	/*
963 	 * ena_netmap_rx_load_desc doesn't know the number of descriptors.
964 	 * It just set flag NS_MOREFRAG to all slots, then here flag of
965 	 * last slot is cleared.
966 	 */
967 	ctx->slots[nm_prev(ctx->nm_i, ctx->lim)].flags &= ~NS_MOREFRAG;
968 
969 	if (rc != 0) {
970 		goto rx_clear_desc;
971 	}
972 
973 	bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag,
974 	    ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_PREREAD);
975 
976 	counter_enter();
977 	counter_u64_add_protected(ctx->ring->rx_stats.bytes, len);
978 	counter_u64_add_protected(ctx->adapter->hw_stats.rx_bytes, len);
979 	counter_u64_add_protected(ctx->ring->rx_stats.cnt, 1);
980 	counter_u64_add_protected(ctx->adapter->hw_stats.rx_packets, 1);
981 	counter_exit();
982 
983 	return (ENA_NETMAP_MORE_FRAMES);
984 
985 rx_clear_desc:
986 	nm = ctx->nm_i;
987 
988 	/* Remove failed packet from ring */
989 	while (buf--) {
990 		ctx->slots[nm].flags = 0;
991 		ctx->slots[nm].len = 0;
992 		nm = nm_prev(nm, ctx->lim);
993 	}
994 
995 	return (rc);
996 }
997 
998 static inline int
ena_netmap_rx_load_desc(struct ena_netmap_ctx * ctx,uint16_t buf,int * len)999 ena_netmap_rx_load_desc(struct ena_netmap_ctx *ctx, uint16_t buf, int *len)
1000 {
1001 	struct ena_rx_buffer *rx_info;
1002 	uint16_t req_id;
1003 
1004 	req_id = ctx->ring->ena_bufs[buf].req_id;
1005 	rx_info = &ctx->ring->rx_buffer_info[req_id];
1006 	bus_dmamap_sync(ctx->adapter->rx_buf_tag, rx_info->map,
1007 	    BUS_DMASYNC_POSTREAD);
1008 	netmap_unload_map(ctx->na, ctx->adapter->rx_buf_tag, rx_info->map);
1009 
1010 	ENA_WARN(ctx->slots[ctx->nm_i].buf_idx != 0, ctx->adapter->ena_dev,
1011 	    "Rx idx is not 0.\n");
1012 
1013 	ctx->slots[ctx->nm_i].buf_idx = rx_info->netmap_buf_idx;
1014 	rx_info->netmap_buf_idx = 0;
1015 	/*
1016 	 * Set NS_MOREFRAG to all slots.
1017 	 * Then ena_netmap_rx_frame clears it from last one.
1018 	 */
1019 	ctx->slots[ctx->nm_i].flags |= NS_MOREFRAG | NS_BUF_CHANGED;
1020 	ctx->slots[ctx->nm_i].len = ctx->ring->ena_bufs[buf].len;
1021 	*len += ctx->slots[ctx->nm_i].len;
1022 	ctx->ring->free_rx_ids[ctx->nt] = req_id;
1023 	ena_log_nm(ctx->adapter->pdev, DBG,
1024 	    "rx_info %p, buf_idx %d, paddr %jx, nm: %d\n", rx_info,
1025 	    ctx->slots[ctx->nm_i].buf_idx, (uintmax_t)rx_info->ena_buf.paddr,
1026 	    ctx->nm_i);
1027 
1028 	ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
1029 	ctx->nt = ENA_RX_RING_IDX_NEXT(ctx->nt, ctx->ring->ring_size);
1030 
1031 	return (0);
1032 }
1033 
1034 static inline void
ena_netmap_rx_cleanup(struct ena_netmap_ctx * ctx)1035 ena_netmap_rx_cleanup(struct ena_netmap_ctx *ctx)
1036 {
1037 	int refill_required;
1038 
1039 	refill_required = ctx->kring->rhead - ctx->kring->nr_hwcur;
1040 	if (ctx->kring->nr_hwcur != ctx->kring->nr_hwtail)
1041 		refill_required -= 1;
1042 
1043 	if (refill_required == 0)
1044 		return;
1045 	else if (refill_required < 0)
1046 		refill_required += ctx->kring->nkr_num_slots;
1047 
1048 	ena_refill_rx_bufs(ctx->ring, refill_required);
1049 }
1050 
1051 static inline void
ena_netmap_fill_ctx(struct netmap_kring * kring,struct ena_netmap_ctx * ctx,uint16_t ena_qid)1052 ena_netmap_fill_ctx(struct netmap_kring *kring, struct ena_netmap_ctx *ctx,
1053     uint16_t ena_qid)
1054 {
1055 	ctx->kring = kring;
1056 	ctx->na = kring->na;
1057 	ctx->adapter = if_getsoftc(ctx->na->ifp);
1058 	ctx->lim = kring->nkr_num_slots - 1;
1059 	ctx->io_cq = &ctx->adapter->ena_dev->io_cq_queues[ena_qid];
1060 	ctx->io_sq = &ctx->adapter->ena_dev->io_sq_queues[ena_qid];
1061 	ctx->slots = kring->ring->slot;
1062 }
1063 
1064 void
ena_netmap_unload(struct ena_adapter * adapter,bus_dmamap_t map)1065 ena_netmap_unload(struct ena_adapter *adapter, bus_dmamap_t map)
1066 {
1067 	struct netmap_adapter *na = NA(adapter->ifp);
1068 
1069 	netmap_unload_map(na, adapter->tx_buf_tag, map);
1070 }
1071 
1072 #endif /* DEV_NETMAP */
1073