xref: /freebsd/sys/dev/mana/mana_en.c (revision ce110ea12fcea71ae437d0a1d0549d3d32055b0e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Microsoft Corp.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/kernel.h>
37 #include <sys/kthread.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/smp.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <sys/time.h>
44 #include <sys/eventhandler.h>
45 
46 #include <machine/bus.h>
47 #include <machine/resource.h>
48 #include <machine/in_cksum.h>
49 
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/if_types.h>
53 #include <net/if_vlan_var.h>
54 #ifdef RSS
55 #include <net/rss_config.h>
56 #endif
57 
58 #include <netinet/in_systm.h>
59 #include <netinet/in.h>
60 #include <netinet/if_ether.h>
61 #include <netinet/ip.h>
62 #include <netinet/ip6.h>
63 #include <netinet/tcp.h>
64 #include <netinet/udp.h>
65 
66 #include "mana.h"
67 #include "mana_sysctl.h"
68 
69 static int mana_up(struct mana_port_context *apc);
70 static int mana_down(struct mana_port_context *apc);
71 
72 static void
73 mana_rss_key_fill(void *k, size_t size)
74 {
75 	static bool rss_key_generated = false;
76 	static uint8_t rss_key[MANA_HASH_KEY_SIZE];
77 
78 	KASSERT(size <= MANA_HASH_KEY_SIZE,
79 	    ("Request more buytes than MANA RSS key can hold"));
80 
81 	if (!rss_key_generated) {
82 		arc4random_buf(rss_key, MANA_HASH_KEY_SIZE);
83 		rss_key_generated = true;
84 	}
85 	memcpy(k, rss_key, size);
86 }
87 
88 static int
89 mana_ifmedia_change(struct ifnet *ifp __unused)
90 {
91 	return EOPNOTSUPP;
92 }
93 
94 static void
95 mana_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
96 {
97 	struct mana_port_context *apc = if_getsoftc(ifp);
98 
99 	if (!apc) {
100 		if_printf(ifp, "Port not available\n");
101 		return;
102 	}
103 
104 	MANA_APC_LOCK_LOCK(apc);
105 
106 	ifmr->ifm_status = IFM_AVALID;
107 	ifmr->ifm_active = IFM_ETHER;
108 
109 	if (!apc->port_is_up) {
110 		MANA_APC_LOCK_UNLOCK(apc);
111 		mana_info(NULL, "Port %u link is down\n", apc->port_idx);
112 		return;
113 	}
114 
115 	ifmr->ifm_status |= IFM_ACTIVE;
116 	ifmr->ifm_active |= IFM_100G_DR | IFM_FDX;
117 
118 	MANA_APC_LOCK_UNLOCK(apc);
119 }
120 
121 static uint64_t
122 mana_get_counter(struct ifnet *ifp, ift_counter cnt)
123 {
124 	struct mana_port_context *apc = if_getsoftc(ifp);
125 	struct mana_port_stats *stats = &apc->port_stats;
126 
127 	switch (cnt) {
128 	case IFCOUNTER_IPACKETS:
129 		return (counter_u64_fetch(stats->rx_packets));
130 	case IFCOUNTER_OPACKETS:
131 		return (counter_u64_fetch(stats->tx_packets));
132 	case IFCOUNTER_IBYTES:
133 		return (counter_u64_fetch(stats->rx_bytes));
134 	case IFCOUNTER_OBYTES:
135 		return (counter_u64_fetch(stats->tx_bytes));
136 	case IFCOUNTER_IQDROPS:
137 		return (counter_u64_fetch(stats->rx_drops));
138 	case IFCOUNTER_OQDROPS:
139 		return (counter_u64_fetch(stats->tx_drops));
140 	default:
141 		return (if_get_counter_default(ifp, cnt));
142 	}
143 }
144 
145 static void
146 mana_drain_eq_task(struct gdma_queue *queue)
147 {
148 	if (!queue || !queue->eq.cleanup_tq)
149 		return;
150 
151 	while (taskqueue_cancel(queue->eq.cleanup_tq,
152 	    &queue->eq.cleanup_task, NULL)) {
153 		taskqueue_drain(queue->eq.cleanup_tq,
154 		    &queue->eq.cleanup_task);
155 	}
156 }
157 
158 static void
159 mana_qflush(struct ifnet *ifp)
160 {
161 	if_qflush(ifp);
162 }
163 
164 int
165 mana_restart(struct mana_port_context *apc)
166 {
167 	int rc = 0;
168 
169 	MANA_APC_LOCK_LOCK(apc);
170 	if (apc->port_is_up)
171 		 mana_down(apc);
172 
173 	rc = mana_up(apc);
174 	MANA_APC_LOCK_UNLOCK(apc);
175 
176 	return (rc);
177 }
178 
179 static int
180 mana_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
181 {
182 	struct mana_port_context *apc = if_getsoftc(ifp);
183 	struct ifrsskey *ifrk;
184 	struct ifrsshash *ifrh;
185 	struct ifreq *ifr;
186 	uint16_t new_mtu;
187 	int rc = 0;
188 
189 	switch (command) {
190 	case SIOCSIFMTU:
191 		ifr = (struct ifreq *)data;
192 		new_mtu = ifr->ifr_mtu;
193 		if (ifp->if_mtu == new_mtu)
194 			break;
195 		if ((new_mtu + 18 > MAX_FRAME_SIZE) ||
196 		    (new_mtu + 18 < MIN_FRAME_SIZE)) {
197 			if_printf(ifp, "Invalid MTU. new_mtu: %d, "
198 			    "max allowed: %d, min allowed: %d\n",
199 			    new_mtu, MAX_FRAME_SIZE - 18, MIN_FRAME_SIZE - 18);
200 			return EINVAL;
201 		}
202 		MANA_APC_LOCK_LOCK(apc);
203 		if (apc->port_is_up)
204 			mana_down(apc);
205 
206 		apc->frame_size = new_mtu + 18;
207 		if_setmtu(ifp, new_mtu);
208 		mana_dbg(NULL, "Set MTU to %d\n", new_mtu);
209 
210 		rc = mana_up(apc);
211 		MANA_APC_LOCK_UNLOCK(apc);
212 		break;
213 
214 	case SIOCSIFFLAGS:
215 		if (ifp->if_flags & IFF_UP) {
216 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
217 				MANA_APC_LOCK_LOCK(apc);
218 				if (!apc->port_is_up)
219 					rc = mana_up(apc);
220 				MANA_APC_LOCK_UNLOCK(apc);
221 			}
222 		} else {
223 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
224 				MANA_APC_LOCK_LOCK(apc);
225 				if (apc->port_is_up)
226 					mana_down(apc);
227 				MANA_APC_LOCK_UNLOCK(apc);
228 			}
229 		}
230 		break;
231 
232 	case SIOCSIFMEDIA:
233 	case SIOCGIFMEDIA:
234 	case SIOCGIFXMEDIA:
235 		ifr = (struct ifreq *)data;
236 		rc = ifmedia_ioctl(ifp, ifr, &apc->media, command);
237 		break;
238 
239 	case SIOCGIFRSSKEY:
240 		ifrk = (struct ifrsskey *)data;
241 		ifrk->ifrk_func = RSS_FUNC_TOEPLITZ;
242 		ifrk->ifrk_keylen = MANA_HASH_KEY_SIZE;
243 		memcpy(ifrk->ifrk_key, apc->hashkey, MANA_HASH_KEY_SIZE);
244 		break;
245 
246 	case SIOCGIFRSSHASH:
247 		ifrh = (struct ifrsshash *)data;
248 		ifrh->ifrh_func = RSS_FUNC_TOEPLITZ;
249 		ifrh->ifrh_types =
250 		    RSS_TYPE_TCP_IPV4 |
251 		    RSS_TYPE_UDP_IPV4 |
252 		    RSS_TYPE_TCP_IPV6 |
253 		    RSS_TYPE_UDP_IPV6;
254 		break;
255 
256 	default:
257 		rc = ether_ioctl(ifp, command, data);
258 		break;
259 	}
260 
261 	return (rc);
262 }
263 
264 static inline void
265 mana_alloc_counters(counter_u64_t *begin, int size)
266 {
267 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
268 
269 	for (; begin < end; ++begin)
270 		*begin = counter_u64_alloc(M_WAITOK);
271 }
272 
273 static inline void
274 mana_free_counters(counter_u64_t *begin, int size)
275 {
276 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
277 
278 	for (; begin < end; ++begin)
279 		counter_u64_free(*begin);
280 }
281 
282 static inline void
283 mana_reset_counters(counter_u64_t *begin, int size)
284 {
285 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
286 
287 	for (; begin < end; ++begin)
288 		counter_u64_zero(*begin);
289 }
290 
291 static bool
292 mana_can_tx(struct gdma_queue *wq)
293 {
294 	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
295 }
296 
297 static inline int
298 mana_tx_map_mbuf(struct mana_port_context *apc,
299     struct mana_send_buf_info *tx_info,
300     struct mbuf **m_head, struct mana_tx_package *tp,
301     struct mana_stats *tx_stats)
302 {
303 	struct gdma_dev *gd = apc->ac->gdma_dev;
304 	bus_dma_segment_t segs[MAX_MBUF_FRAGS];
305 	struct mbuf *m = *m_head;
306 	int err, nsegs, i;
307 
308 	err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag, tx_info->dma_map,
309 	    m, segs, &nsegs, BUS_DMA_NOWAIT);
310 	if (err == EFBIG) {
311 		struct mbuf *m_new;
312 
313 		counter_u64_add(tx_stats->collapse, 1);
314 		m_new = m_collapse(m, M_NOWAIT, MAX_MBUF_FRAGS);
315 		if (unlikely(m_new == NULL)) {
316 			counter_u64_add(tx_stats->collapse_err, 1);
317 			return ENOBUFS;
318 		} else {
319 			*m_head = m = m_new;
320 		}
321 
322 		mana_warn(NULL,
323 		    "Too many segs in orig mbuf, m_collapse called\n");
324 
325 		err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag,
326 		    tx_info->dma_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
327 	}
328 	if (!err) {
329 		for (i = 0; i < nsegs; i++) {
330 			tp->wqe_req.sgl[i].address = segs[i].ds_addr;
331 			tp->wqe_req.sgl[i].mem_key = gd->gpa_mkey;
332 			tp->wqe_req.sgl[i].size = segs[i].ds_len;
333 		}
334 		tp->wqe_req.num_sge = nsegs;
335 
336 		tx_info->mbuf = *m_head;
337 
338 		bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
339 		    BUS_DMASYNC_PREWRITE);
340 	}
341 
342 	return err;
343 }
344 
345 static inline void
346 mana_tx_unmap_mbuf(struct mana_port_context *apc,
347     struct mana_send_buf_info *tx_info)
348 {
349 	bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
350 	    BUS_DMASYNC_POSTWRITE);
351 	bus_dmamap_unload(apc->tx_buf_tag, tx_info->dma_map);
352 	if (tx_info->mbuf) {
353 		m_freem(tx_info->mbuf);
354 		tx_info->mbuf = NULL;
355 	}
356 }
357 
358 static inline int
359 mana_load_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
360     struct mana_recv_buf_oob *rx_oob, bool alloc_mbuf)
361 {
362 	bus_dma_segment_t segs[1];
363 	struct mbuf *mbuf;
364 	int nsegs, err;
365 	uint32_t mlen;
366 
367 	if (alloc_mbuf) {
368 		mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxq->datasize);
369 		if (unlikely(mbuf == NULL)) {
370 			mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
371 			if (unlikely(mbuf == NULL)) {
372 				return ENOMEM;
373 			}
374 			mlen = MCLBYTES;
375 		} else {
376 			mlen = rxq->datasize;
377 		}
378 
379 		mbuf->m_pkthdr.len = mbuf->m_len = mlen;
380 	} else {
381 		if (rx_oob->mbuf) {
382 			mbuf = rx_oob->mbuf;
383 			mlen = rx_oob->mbuf->m_pkthdr.len;
384 		} else {
385 			return ENOMEM;
386 		}
387 	}
388 
389 	err = bus_dmamap_load_mbuf_sg(apc->rx_buf_tag, rx_oob->dma_map,
390 	    mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
391 
392 	if (unlikely((err != 0) || (nsegs != 1))) {
393 		mana_warn(NULL, "Failed to map mbuf, error: %d, "
394 		    "nsegs: %d\n", err, nsegs);
395 		counter_u64_add(rxq->stats.dma_mapping_err, 1);
396 		goto error;
397 	}
398 
399 	bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
400 	    BUS_DMASYNC_PREREAD);
401 
402 	rx_oob->mbuf = mbuf;
403 	rx_oob->num_sge = 1;
404 	rx_oob->sgl[0].address = segs[0].ds_addr;
405 	rx_oob->sgl[0].size = mlen;
406 	rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
407 
408 	return 0;
409 
410 error:
411 	m_freem(mbuf);
412 	return EFAULT;
413 }
414 
415 static inline void
416 mana_unload_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
417     struct mana_recv_buf_oob *rx_oob, bool free_mbuf)
418 {
419 	bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
420 	    BUS_DMASYNC_POSTREAD);
421 	bus_dmamap_unload(apc->rx_buf_tag, rx_oob->dma_map);
422 
423 	if (free_mbuf && rx_oob->mbuf) {
424 		m_freem(rx_oob->mbuf);
425 		rx_oob->mbuf = NULL;
426 	}
427 }
428 
429 
430 /* Use couple mbuf PH_loc spaces for l3 and l4 protocal type */
431 #define MANA_L3_PROTO(_mbuf)	((_mbuf)->m_pkthdr.PH_loc.sixteen[0])
432 #define MANA_L4_PROTO(_mbuf)	((_mbuf)->m_pkthdr.PH_loc.sixteen[1])
433 
434 #define MANA_TXQ_FULL	(IFF_DRV_RUNNING | IFF_DRV_OACTIVE)
435 
436 static void
437 mana_xmit(struct mana_txq *txq)
438 {
439 	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
440 	struct mana_send_buf_info *tx_info;
441 	struct ifnet *ndev = txq->ndev;
442 	struct mbuf *mbuf;
443 	struct mana_port_context *apc = if_getsoftc(ndev);
444 	struct mana_port_stats *port_stats = &apc->port_stats;
445 	struct gdma_dev *gd = apc->ac->gdma_dev;
446 	uint64_t packets, bytes;
447 	uint16_t next_to_use;
448 	struct mana_tx_package pkg = {};
449 	struct mana_stats *tx_stats;
450 	struct gdma_queue *gdma_sq;
451 	struct gdma_queue *gdma_eq;
452 	struct mana_cq *cq;
453 	int err, len;
454 
455 	gdma_sq = txq->gdma_sq;
456 	cq = &apc->tx_qp[txq->idx].tx_cq;
457 	gdma_eq = cq->gdma_cq->cq.parent;
458 	tx_stats = &txq->stats;
459 
460 	packets = 0;
461 	bytes = 0;
462 	next_to_use = txq->next_to_use;
463 
464 	while ((mbuf = drbr_peek(ndev, txq->txq_br)) != NULL) {
465 		if (!apc->port_is_up ||
466 		    (if_getdrvflags(ndev) & MANA_TXQ_FULL) != IFF_DRV_RUNNING) {
467 			drbr_putback(ndev, txq->txq_br, mbuf);
468 			break;
469 		}
470 
471 		if (!mana_can_tx(gdma_sq)) {
472 			/* SQ is full. Set the IFF_DRV_OACTIVE flag */
473 			if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE, 0);
474 			counter_u64_add(tx_stats->stop, 1);
475 			uint64_t stops = counter_u64_fetch(tx_stats->stop);
476 			uint64_t wakeups = counter_u64_fetch(tx_stats->wakeup);
477 #define MANA_TXQ_STOP_THRESHOLD		50
478 			if (stops > MANA_TXQ_STOP_THRESHOLD && wakeups > 0 &&
479 			    stops > wakeups && txq->alt_txq_idx == txq->idx) {
480 				txq->alt_txq_idx =
481 				    (txq->idx + (stops / wakeups))
482 				    % apc->num_queues;
483 				counter_u64_add(tx_stats->alt_chg, 1);
484 			}
485 
486 			drbr_putback(ndev, txq->txq_br, mbuf);
487 
488 			taskqueue_enqueue(gdma_eq->eq.cleanup_tq,
489 			    &gdma_eq->eq.cleanup_task);
490 			break;
491 		}
492 
493 		tx_info = &txq->tx_buf_info[next_to_use];
494 
495 		memset(&pkg, 0, sizeof(struct mana_tx_package));
496 		pkg.wqe_req.sgl = pkg.sgl_array;
497 
498 		err = mana_tx_map_mbuf(apc, tx_info, &mbuf, &pkg, tx_stats);
499 		if (unlikely(err)) {
500 			mana_dbg(NULL,
501 			    "Failed to map tx mbuf, err %d\n", err);
502 
503 			counter_u64_add(tx_stats->dma_mapping_err, 1);
504 
505 			/* The mbuf is still there. Free it */
506 			m_freem(mbuf);
507 			/* Advance the drbr queue */
508 			drbr_advance(ndev, txq->txq_br);
509 			continue;
510 		}
511 
512 		pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
513 		pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
514 
515 		if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
516 			pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
517 			pkt_fmt = MANA_LONG_PKT_FMT;
518 		} else {
519 			pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
520 		}
521 
522 		pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
523 
524 		if (pkt_fmt == MANA_SHORT_PKT_FMT)
525 			pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
526 		else
527 			pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
528 
529 		pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
530 		pkg.wqe_req.flags = 0;
531 		pkg.wqe_req.client_data_unit = 0;
532 
533 		if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
534 			if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
535 				pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
536 			else
537 				pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
538 
539 			pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
540 			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
541 			pkg.tx_oob.s_oob.trans_off = mbuf->m_pkthdr.l3hlen;
542 
543 			pkg.wqe_req.client_data_unit = mbuf->m_pkthdr.tso_segsz;
544 			pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
545 		} else if (mbuf->m_pkthdr.csum_flags &
546 		    (CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP6_UDP | CSUM_IP6_TCP)) {
547 			if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP) {
548 				pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
549 				pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
550 			} else {
551 				pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
552 			}
553 
554 			if (MANA_L4_PROTO(mbuf) == IPPROTO_TCP) {
555 				pkg.tx_oob.s_oob.comp_tcp_csum = 1;
556 				pkg.tx_oob.s_oob.trans_off =
557 				    mbuf->m_pkthdr.l3hlen;
558 			} else {
559 				pkg.tx_oob.s_oob.comp_udp_csum = 1;
560 			}
561 		} else if (mbuf->m_pkthdr.csum_flags & CSUM_IP) {
562 			pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
563 			pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
564 		} else {
565 			if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
566 				pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
567 			else if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IPV6)
568 				pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
569 		}
570 
571 		len = mbuf->m_pkthdr.len;
572 
573 		err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
574 		    (struct gdma_posted_wqe_info *)&tx_info->wqe_inf);
575 		if (unlikely(err)) {
576 			/* Should not happen */
577 			if_printf(ndev, "Failed to post TX OOB: %d\n", err);
578 
579 			mana_tx_unmap_mbuf(apc, tx_info);
580 
581 			drbr_advance(ndev, txq->txq_br);
582 			continue;
583 		}
584 
585 		next_to_use =
586 		    (next_to_use + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
587 
588 		atomic_inc_return(&txq->pending_sends);
589 
590 		drbr_advance(ndev, txq->txq_br);
591 
592 		mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
593 
594 		packets++;
595 		bytes += len;
596 	}
597 
598 	counter_enter();
599 	counter_u64_add_protected(tx_stats->packets, packets);
600 	counter_u64_add_protected(port_stats->tx_packets, packets);
601 	counter_u64_add_protected(tx_stats->bytes, bytes);
602 	counter_u64_add_protected(port_stats->tx_bytes, bytes);
603 	counter_exit();
604 
605 	txq->next_to_use = next_to_use;
606 }
607 
608 static void
609 mana_xmit_taskfunc(void *arg, int pending)
610 {
611 	struct mana_txq *txq = (struct mana_txq *)arg;
612 	struct ifnet *ndev = txq->ndev;
613 	struct mana_port_context *apc = if_getsoftc(ndev);
614 
615 	while (!drbr_empty(ndev, txq->txq_br) && apc->port_is_up &&
616 	    (if_getdrvflags(ndev) & MANA_TXQ_FULL) == IFF_DRV_RUNNING) {
617 		mtx_lock(&txq->txq_mtx);
618 		mana_xmit(txq);
619 		mtx_unlock(&txq->txq_mtx);
620 	}
621 }
622 
623 #define PULLUP_HDR(m, len)				\
624 do {							\
625 	if (unlikely((m)->m_len < (len))) {		\
626 		(m) = m_pullup((m), (len));		\
627 		if ((m) == NULL)			\
628 			return (NULL);			\
629 	}						\
630 } while (0)
631 
632 /*
633  * If this function failed, the mbuf would be freed.
634  */
635 static inline struct mbuf *
636 mana_tso_fixup(struct mbuf *mbuf)
637 {
638 	struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
639 	struct tcphdr *th;
640 	uint16_t etype;
641 	int ehlen;
642 
643 	if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
644 		etype = ntohs(eh->evl_proto);
645 		ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
646 	} else {
647 		etype = ntohs(eh->evl_encap_proto);
648 		ehlen = ETHER_HDR_LEN;
649 	}
650 
651 	if (etype == ETHERTYPE_IP) {
652 		struct ip *ip;
653 		int iphlen;
654 
655 		PULLUP_HDR(mbuf, ehlen + sizeof(*ip));
656 		ip = mtodo(mbuf, ehlen);
657 		iphlen = ip->ip_hl << 2;
658 		mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
659 
660 		PULLUP_HDR(mbuf, ehlen + iphlen + sizeof(*th));
661 		th = mtodo(mbuf, ehlen + iphlen);
662 
663 		ip->ip_len = 0;
664 		ip->ip_sum = 0;
665 		th->th_sum = in_pseudo(ip->ip_src.s_addr,
666 		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
667 	} else if (etype == ETHERTYPE_IPV6) {
668 		struct ip6_hdr *ip6;
669 
670 		PULLUP_HDR(mbuf, ehlen + sizeof(*ip6) + sizeof(*th));
671 		ip6 = mtodo(mbuf, ehlen);
672 		if (ip6->ip6_nxt != IPPROTO_TCP) {
673 			/* Realy something wrong, just return */
674 			mana_dbg(NULL, "TSO mbuf not TCP, freed.\n");
675 			m_freem(mbuf);
676 			return NULL;
677 		}
678 		mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
679 
680 		th = mtodo(mbuf, ehlen + sizeof(*ip6));
681 
682 		ip6->ip6_plen = 0;
683 		th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
684 	} else {
685 		/* CSUM_TSO is set but not IP protocol. */
686 		mana_warn(NULL, "TSO mbuf not right, freed.\n");
687 		m_freem(mbuf);
688 		return NULL;
689 	}
690 
691 	MANA_L3_PROTO(mbuf) = etype;
692 
693 	return (mbuf);
694 }
695 
696 /*
697  * If this function failed, the mbuf would be freed.
698  */
699 static inline struct mbuf *
700 mana_mbuf_csum_check(struct mbuf *mbuf)
701 {
702 	struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
703 	struct mbuf *mbuf_next;
704 	uint16_t etype;
705 	int offset;
706 	int ehlen;
707 
708 	if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
709 		etype = ntohs(eh->evl_proto);
710 		ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
711 	} else {
712 		etype = ntohs(eh->evl_encap_proto);
713 		ehlen = ETHER_HDR_LEN;
714 	}
715 
716 	mbuf_next = m_getptr(mbuf, ehlen, &offset);
717 
718 	MANA_L4_PROTO(mbuf) = 0;
719 	if (etype == ETHERTYPE_IP) {
720 		const struct ip *ip;
721 		int iphlen;
722 
723 		ip = (struct ip *)(mtodo(mbuf_next, offset));
724 		iphlen = ip->ip_hl << 2;
725 		mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
726 
727 		MANA_L4_PROTO(mbuf) = ip->ip_p;
728 	} else if (etype == ETHERTYPE_IPV6) {
729 		const struct ip6_hdr *ip6;
730 
731 		ip6 = (struct ip6_hdr *)(mtodo(mbuf_next, offset));
732 		mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
733 
734 		MANA_L4_PROTO(mbuf) = ip6->ip6_nxt;
735 	} else {
736 		MANA_L4_PROTO(mbuf) = 0;
737 	}
738 
739 	MANA_L3_PROTO(mbuf) = etype;
740 
741 	return (mbuf);
742 }
743 
744 static int
745 mana_start_xmit(struct ifnet *ifp, struct mbuf *m)
746 {
747 	struct mana_port_context *apc = if_getsoftc(ifp);
748 	struct mana_txq *txq;
749 	int is_drbr_empty;
750 	uint16_t txq_id;
751 	int err;
752 
753 	if (unlikely((!apc->port_is_up) ||
754 	    (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
755 		return ENODEV;
756 
757 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
758 		m = mana_tso_fixup(m);
759 		if (unlikely(m == NULL)) {
760 			counter_enter();
761 			counter_u64_add_protected(apc->port_stats.tx_drops, 1);
762 			counter_exit();
763 			return EIO;
764 		}
765 	} else {
766 		m = mana_mbuf_csum_check(m);
767 		if (unlikely(m == NULL)) {
768 			counter_enter();
769 			counter_u64_add_protected(apc->port_stats.tx_drops, 1);
770 			counter_exit();
771 			return EIO;
772 		}
773 	}
774 
775 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
776 		uint32_t hash = m->m_pkthdr.flowid;
777 		txq_id = apc->indir_table[(hash) & MANA_INDIRECT_TABLE_MASK] %
778 		    apc->num_queues;
779 	} else {
780 		txq_id = m->m_pkthdr.flowid % apc->num_queues;
781 	}
782 
783 	if (apc->enable_tx_altq)
784 		txq_id = apc->tx_qp[txq_id].txq.alt_txq_idx;
785 
786 	txq = &apc->tx_qp[txq_id].txq;
787 
788 	is_drbr_empty = drbr_empty(ifp, txq->txq_br);
789 	err = drbr_enqueue(ifp, txq->txq_br, m);
790 	if (unlikely(err)) {
791 		mana_warn(NULL, "txq %u failed to enqueue: %d\n",
792 		    txq_id, err);
793 		taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
794 		return err;
795 	}
796 
797 	if (is_drbr_empty && mtx_trylock(&txq->txq_mtx)) {
798 		mana_xmit(txq);
799 		mtx_unlock(&txq->txq_mtx);
800 	} else {
801 		taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
802 	}
803 
804 	return 0;
805 }
806 
807 static void
808 mana_cleanup_port_context(struct mana_port_context *apc)
809 {
810 	bus_dma_tag_destroy(apc->tx_buf_tag);
811 	bus_dma_tag_destroy(apc->rx_buf_tag);
812 	apc->rx_buf_tag = NULL;
813 
814 	free(apc->rxqs, M_DEVBUF);
815 	apc->rxqs = NULL;
816 
817 	mana_free_counters((counter_u64_t *)&apc->port_stats,
818 	    sizeof(struct mana_port_stats));
819 }
820 
821 static int
822 mana_init_port_context(struct mana_port_context *apc)
823 {
824 	device_t dev = apc->ac->gdma_dev->gdma_context->dev;
825 	uint32_t tso_maxsize;
826 	int err;
827 
828 	tso_maxsize = MAX_MBUF_FRAGS * MANA_TSO_MAXSEG_SZ -
829 	    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
830 
831 	/* Create DMA tag for tx bufs */
832 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
833 	    1, 0,			/* alignment, boundary	*/
834 	    BUS_SPACE_MAXADDR,		/* lowaddr		*/
835 	    BUS_SPACE_MAXADDR,		/* highaddr		*/
836 	    NULL, NULL,			/* filter, filterarg	*/
837 	    tso_maxsize,		/* maxsize		*/
838 	    MAX_MBUF_FRAGS,		/* nsegments		*/
839 	    tso_maxsize,		/* maxsegsize		*/
840 	    0,				/* flags		*/
841 	    NULL, NULL,			/* lockfunc, lockfuncarg*/
842 	    &apc->tx_buf_tag);
843 	if (unlikely(err)) {
844 		device_printf(dev, "Feiled to create TX DMA tag\n");
845 		return err;
846 	}
847 
848 	/* Create DMA tag for rx bufs */
849 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
850 	    64, 0,			/* alignment, boundary	*/
851 	    BUS_SPACE_MAXADDR,		/* lowaddr		*/
852 	    BUS_SPACE_MAXADDR,		/* highaddr		*/
853 	    NULL, NULL,			/* filter, filterarg	*/
854 	    MJUMPAGESIZE,		/* maxsize		*/
855 	    1,				/* nsegments		*/
856 	    MJUMPAGESIZE,		/* maxsegsize		*/
857 	    0,				/* flags		*/
858 	    NULL, NULL,			/* lockfunc, lockfuncarg*/
859 	    &apc->rx_buf_tag);
860 	if (unlikely(err)) {
861 		device_printf(dev, "Feiled to create RX DMA tag\n");
862 		return err;
863 	}
864 
865 	apc->rxqs = mallocarray(apc->num_queues, sizeof(struct mana_rxq *),
866 	    M_DEVBUF, M_WAITOK | M_ZERO);
867 
868 	if (!apc->rxqs) {
869 		bus_dma_tag_destroy(apc->tx_buf_tag);
870 		bus_dma_tag_destroy(apc->rx_buf_tag);
871 		apc->rx_buf_tag = NULL;
872 		return ENOMEM;
873 	}
874 
875 	return 0;
876 }
877 
878 static int
879 mana_send_request(struct mana_context *ac, void *in_buf,
880     uint32_t in_len, void *out_buf, uint32_t out_len)
881 {
882 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
883 	struct gdma_resp_hdr *resp = out_buf;
884 	struct gdma_req_hdr *req = in_buf;
885 	device_t dev = gc->dev;
886 	static atomic_t activity_id;
887 	int err;
888 
889 	req->dev_id = gc->mana.dev_id;
890 	req->activity_id = atomic_inc_return(&activity_id);
891 
892 	mana_dbg(NULL, "activity_id  = %u\n", activity_id);
893 
894 	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
895 	    out_buf);
896 	if (err || resp->status) {
897 		device_printf(dev, "Failed to send mana message: %d, 0x%x\n",
898 			err, resp->status);
899 		return err ? err : EPROTO;
900 	}
901 
902 	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
903 	    req->activity_id != resp->activity_id) {
904 		device_printf(dev,
905 		    "Unexpected mana message response: %x,%x,%x,%x\n",
906 		    req->dev_id.as_uint32, resp->dev_id.as_uint32,
907 		    req->activity_id, resp->activity_id);
908 		return EPROTO;
909 	}
910 
911 	return 0;
912 }
913 
914 static int
915 mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
916     const enum mana_command_code expected_code,
917     const uint32_t min_size)
918 {
919 	if (resp_hdr->response.msg_type != expected_code)
920 		return EPROTO;
921 
922 	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
923 		return EPROTO;
924 
925 	if (resp_hdr->response.msg_size < min_size)
926 		return EPROTO;
927 
928 	return 0;
929 }
930 
931 static int
932 mana_query_device_cfg(struct mana_context *ac, uint32_t proto_major_ver,
933     uint32_t proto_minor_ver, uint32_t proto_micro_ver,
934     uint16_t *max_num_vports)
935 {
936 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
937 	struct mana_query_device_cfg_resp resp = {};
938 	struct mana_query_device_cfg_req req = {};
939 	device_t dev = gc->dev;
940 	int err = 0;
941 
942 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
943 	    sizeof(req), sizeof(resp));
944 	req.proto_major_ver = proto_major_ver;
945 	req.proto_minor_ver = proto_minor_ver;
946 	req.proto_micro_ver = proto_micro_ver;
947 
948 	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
949 	if (err) {
950 		device_printf(dev, "Failed to query config: %d", err);
951 		return err;
952 	}
953 
954 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
955 	    sizeof(resp));
956 	if (err || resp.hdr.status) {
957 		device_printf(dev, "Invalid query result: %d, 0x%x\n", err,
958 		    resp.hdr.status);
959 		if (!err)
960 			err = EPROTO;
961 		return err;
962 	}
963 
964 	*max_num_vports = resp.max_num_vports;
965 
966 	mana_dbg(NULL, "mana max_num_vports from device = %d\n",
967 	    *max_num_vports);
968 
969 	return 0;
970 }
971 
972 static int
973 mana_query_vport_cfg(struct mana_port_context *apc, uint32_t vport_index,
974     uint32_t *max_sq, uint32_t *max_rq, uint32_t *num_indir_entry)
975 {
976 	struct mana_query_vport_cfg_resp resp = {};
977 	struct mana_query_vport_cfg_req req = {};
978 	int err;
979 
980 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
981 	    sizeof(req), sizeof(resp));
982 
983 	req.vport_index = vport_index;
984 
985 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
986 	    sizeof(resp));
987 	if (err)
988 		return err;
989 
990 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
991 	    sizeof(resp));
992 	if (err)
993 		return err;
994 
995 	if (resp.hdr.status)
996 		return EPROTO;
997 
998 	*max_sq = resp.max_num_sq;
999 	*max_rq = resp.max_num_rq;
1000 	*num_indir_entry = resp.num_indirection_ent;
1001 
1002 	apc->port_handle = resp.vport;
1003 	memcpy(apc->mac_addr, resp.mac_addr, ETHER_ADDR_LEN);
1004 
1005 	return 0;
1006 }
1007 
1008 static int
1009 mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
1010     uint32_t doorbell_pg_id)
1011 {
1012 	struct mana_config_vport_resp resp = {};
1013 	struct mana_config_vport_req req = {};
1014 	int err;
1015 
1016 	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
1017 	    sizeof(req), sizeof(resp));
1018 	req.vport = apc->port_handle;
1019 	req.pdid = protection_dom_id;
1020 	req.doorbell_pageid = doorbell_pg_id;
1021 
1022 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1023 	    sizeof(resp));
1024 	if (err) {
1025 		if_printf(apc->ndev, "Failed to configure vPort: %d\n", err);
1026 		goto out;
1027 	}
1028 
1029 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1030 	    sizeof(resp));
1031 	if (err || resp.hdr.status) {
1032 		if_printf(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1033 		    err, resp.hdr.status);
1034 		if (!err)
1035 			err = EPROTO;
1036 
1037 		goto out;
1038 	}
1039 
1040 	apc->tx_shortform_allowed = resp.short_form_allowed;
1041 	apc->tx_vp_offset = resp.tx_vport_offset;
1042 out:
1043 	return err;
1044 }
1045 
1046 static int
1047 mana_cfg_vport_steering(struct mana_port_context *apc,
1048     enum TRI_STATE rx,
1049     bool update_default_rxobj, bool update_key,
1050     bool update_tab)
1051 {
1052 	uint16_t num_entries = MANA_INDIRECT_TABLE_SIZE;
1053 	struct mana_cfg_rx_steer_req *req = NULL;
1054 	struct mana_cfg_rx_steer_resp resp = {};
1055 	struct ifnet *ndev = apc->ndev;
1056 	mana_handle_t *req_indir_tab;
1057 	uint32_t req_buf_size;
1058 	int err;
1059 
1060 	req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
1061 	req = malloc(req_buf_size, M_DEVBUF, M_WAITOK | M_ZERO);
1062 	if (!req)
1063 		return ENOMEM;
1064 
1065 	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1066 	    sizeof(resp));
1067 
1068 	req->vport = apc->port_handle;
1069 	req->num_indir_entries = num_entries;
1070 	req->indir_tab_offset = sizeof(*req);
1071 	req->rx_enable = rx;
1072 	req->rss_enable = apc->rss_state;
1073 	req->update_default_rxobj = update_default_rxobj;
1074 	req->update_hashkey = update_key;
1075 	req->update_indir_tab = update_tab;
1076 	req->default_rxobj = apc->default_rxobj;
1077 
1078 	if (update_key)
1079 		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1080 
1081 	if (update_tab) {
1082 		req_indir_tab = (mana_handle_t *)(req + 1);
1083 		memcpy(req_indir_tab, apc->rxobj_table,
1084 		       req->num_indir_entries * sizeof(mana_handle_t));
1085 	}
1086 
1087 	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1088 	    sizeof(resp));
1089 	if (err) {
1090 		if_printf(ndev, "Failed to configure vPort RX: %d\n", err);
1091 		goto out;
1092 	}
1093 
1094 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1095 	    sizeof(resp));
1096 	if (err) {
1097 		if_printf(ndev, "vPort RX configuration failed: %d\n", err);
1098 		goto out;
1099 	}
1100 
1101 	if (resp.hdr.status) {
1102 		if_printf(ndev, "vPort RX configuration failed: 0x%x\n",
1103 		    resp.hdr.status);
1104 		err = EPROTO;
1105 	}
1106 out:
1107 	free(req, M_DEVBUF);
1108 	return err;
1109 }
1110 
1111 static int
1112 mana_create_wq_obj(struct mana_port_context *apc,
1113     mana_handle_t vport,
1114     uint32_t wq_type, struct mana_obj_spec *wq_spec,
1115     struct mana_obj_spec *cq_spec,
1116     mana_handle_t *wq_obj)
1117 {
1118 	struct mana_create_wqobj_resp resp = {};
1119 	struct mana_create_wqobj_req req = {};
1120 	struct ifnet *ndev = apc->ndev;
1121 	int err;
1122 
1123 	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1124 	    sizeof(req), sizeof(resp));
1125 	req.vport = vport;
1126 	req.wq_type = wq_type;
1127 	req.wq_gdma_region = wq_spec->gdma_region;
1128 	req.cq_gdma_region = cq_spec->gdma_region;
1129 	req.wq_size = wq_spec->queue_size;
1130 	req.cq_size = cq_spec->queue_size;
1131 	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1132 	req.cq_parent_qid = cq_spec->attached_eq;
1133 
1134 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1135 	    sizeof(resp));
1136 	if (err) {
1137 		if_printf(ndev, "Failed to create WQ object: %d\n", err);
1138 		goto out;
1139 	}
1140 
1141 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1142 	    sizeof(resp));
1143 	if (err || resp.hdr.status) {
1144 		if_printf(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1145 		    resp.hdr.status);
1146 		if (!err)
1147 			err = EPROTO;
1148 		goto out;
1149 	}
1150 
1151 	if (resp.wq_obj == INVALID_MANA_HANDLE) {
1152 		if_printf(ndev, "Got an invalid WQ object handle\n");
1153 		err = EPROTO;
1154 		goto out;
1155 	}
1156 
1157 	*wq_obj = resp.wq_obj;
1158 	wq_spec->queue_index = resp.wq_id;
1159 	cq_spec->queue_index = resp.cq_id;
1160 
1161 	return 0;
1162 out:
1163 	return err;
1164 }
1165 
1166 static void
1167 mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type,
1168     mana_handle_t wq_obj)
1169 {
1170 	struct mana_destroy_wqobj_resp resp = {};
1171 	struct mana_destroy_wqobj_req req = {};
1172 	struct ifnet *ndev = apc->ndev;
1173 	int err;
1174 
1175 	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1176 	    sizeof(req), sizeof(resp));
1177 	req.wq_type = wq_type;
1178 	req.wq_obj_handle = wq_obj;
1179 
1180 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1181 	    sizeof(resp));
1182 	if (err) {
1183 		if_printf(ndev, "Failed to destroy WQ object: %d\n", err);
1184 		return;
1185 	}
1186 
1187 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1188 	    sizeof(resp));
1189 	if (err || resp.hdr.status)
1190 		if_printf(ndev, "Failed to destroy WQ object: %d, 0x%x\n",
1191 		    err, resp.hdr.status);
1192 }
1193 
1194 static void
1195 mana_init_cqe_poll_buf(struct gdma_comp *cqe_poll_buf)
1196 {
1197 	int i;
1198 
1199 	for (i = 0; i < CQE_POLLING_BUFFER; i++)
1200 		memset(&cqe_poll_buf[i], 0, sizeof(struct gdma_comp));
1201 }
1202 
1203 static void
1204 mana_destroy_eq(struct gdma_context *gc, struct mana_port_context *apc)
1205 {
1206 	struct gdma_queue *eq;
1207 	int i;
1208 
1209 	if (!apc->eqs)
1210 		return;
1211 
1212 	for (i = 0; i < apc->num_queues; i++) {
1213 		eq = apc->eqs[i].eq;
1214 		if (!eq)
1215 			continue;
1216 
1217 		mana_gd_destroy_queue(gc, eq);
1218 	}
1219 
1220 	free(apc->eqs, M_DEVBUF);
1221 	apc->eqs = NULL;
1222 }
1223 
1224 static int
1225 mana_create_eq(struct mana_port_context *apc)
1226 {
1227 	struct gdma_dev *gd = apc->ac->gdma_dev;
1228 	struct gdma_queue_spec spec = {};
1229 	int err;
1230 	int i;
1231 
1232 	apc->eqs = mallocarray(apc->num_queues, sizeof(struct mana_eq),
1233 	    M_DEVBUF, M_WAITOK | M_ZERO);
1234 	if (!apc->eqs)
1235 		return ENOMEM;
1236 
1237 	spec.type = GDMA_EQ;
1238 	spec.monitor_avl_buf = false;
1239 	spec.queue_size = EQ_SIZE;
1240 	spec.eq.callback = NULL;
1241 	spec.eq.context = apc->eqs;
1242 	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1243 	spec.eq.ndev = apc->ndev;
1244 
1245 	for (i = 0; i < apc->num_queues; i++) {
1246 		mana_init_cqe_poll_buf(apc->eqs[i].cqe_poll);
1247 
1248 		err = mana_gd_create_mana_eq(gd, &spec, &apc->eqs[i].eq);
1249 		if (err)
1250 			goto out;
1251 	}
1252 
1253 	return 0;
1254 out:
1255 	mana_destroy_eq(gd->gdma_context, apc);
1256 	return err;
1257 }
1258 
1259 static int
1260 mana_move_wq_tail(struct gdma_queue *wq, uint32_t num_units)
1261 {
1262 	uint32_t used_space_old;
1263 	uint32_t used_space_new;
1264 
1265 	used_space_old = wq->head - wq->tail;
1266 	used_space_new = wq->head - (wq->tail + num_units);
1267 
1268 	if (used_space_new > used_space_old) {
1269 		mana_err(NULL,
1270 		    "WARNING: new used space %u greater than old one %u\n",
1271 		    used_space_new, used_space_old);
1272 		return ERANGE;
1273 	}
1274 
1275 	wq->tail += num_units;
1276 	return 0;
1277 }
1278 
1279 static void
1280 mana_poll_tx_cq(struct mana_cq *cq)
1281 {
1282 	struct gdma_comp *completions = cq->gdma_comp_buf;
1283 	struct gdma_posted_wqe_info *wqe_info;
1284 	struct mana_send_buf_info *tx_info;
1285 	unsigned int pkt_transmitted = 0;
1286 	unsigned int wqe_unit_cnt = 0;
1287 	struct mana_txq *txq = cq->txq;
1288 	struct mana_port_context *apc;
1289 	uint16_t next_to_complete;
1290 	struct ifnet *ndev;
1291 	int comp_read;
1292 	int txq_idx = txq->idx;;
1293 	int i;
1294 	int sa_drop = 0;
1295 
1296 	struct gdma_queue *gdma_wq;
1297 	unsigned int avail_space;
1298 	bool txq_full = false;
1299 
1300 	ndev = txq->ndev;
1301 	apc = if_getsoftc(ndev);
1302 
1303 	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1304 	    CQE_POLLING_BUFFER);
1305 
1306 	next_to_complete = txq->next_to_complete;
1307 
1308 	for (i = 0; i < comp_read; i++) {
1309 		struct mana_tx_comp_oob *cqe_oob;
1310 
1311 		if (!completions[i].is_sq) {
1312 			mana_err(NULL, "WARNING: Not for SQ\n");
1313 			return;
1314 		}
1315 
1316 		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1317 		if (cqe_oob->cqe_hdr.client_type !=
1318 				 MANA_CQE_COMPLETION) {
1319 			mana_err(NULL,
1320 			    "WARNING: Invalid CQE client type %u\n",
1321 			    cqe_oob->cqe_hdr.client_type);
1322 			return;
1323 		}
1324 
1325 		switch (cqe_oob->cqe_hdr.cqe_type) {
1326 		case CQE_TX_OKAY:
1327 			break;
1328 
1329 		case CQE_TX_SA_DROP:
1330 		case CQE_TX_MTU_DROP:
1331 		case CQE_TX_INVALID_OOB:
1332 		case CQE_TX_INVALID_ETH_TYPE:
1333 		case CQE_TX_HDR_PROCESSING_ERROR:
1334 		case CQE_TX_VF_DISABLED:
1335 		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1336 		case CQE_TX_VPORT_DISABLED:
1337 		case CQE_TX_VLAN_TAGGING_VIOLATION:
1338 			sa_drop ++;
1339 			mana_err(NULL,
1340 			    "TX: txq %d CQE error %d, ntc = %d, "
1341 			    "pending sends = %d: err ignored.\n",
1342 			    txq_idx, cqe_oob->cqe_hdr.cqe_type,
1343 			    next_to_complete, txq->pending_sends);
1344 			break;
1345 
1346 		default:
1347 			/* If the CQE type is unexpected, log an error,
1348 			 * and go through the error path.
1349 			 */
1350 			mana_err(NULL,
1351 			    "ERROR: TX: Unexpected CQE type %d: HW BUG?\n",
1352 			    cqe_oob->cqe_hdr.cqe_type);
1353 			return;
1354 		}
1355 		if (txq->gdma_txq_id != completions[i].wq_num) {
1356 			mana_dbg(NULL,
1357 			    "txq gdma id not match completion wq num: "
1358 			    "%d != %d\n",
1359 			    txq->gdma_txq_id, completions[i].wq_num);
1360 			break;
1361 		}
1362 
1363 		tx_info = &txq->tx_buf_info[next_to_complete];
1364 		if (!tx_info->mbuf) {
1365 			mana_err(NULL,
1366 			    "WARNING: txq %d Empty mbuf on tx_info: %u, "
1367 			    "ntu = %u, pending_sends = %d, "
1368 			    "transmitted = %d, sa_drop = %d, i = %d, comp_read = %d\n",
1369 			    txq_idx, next_to_complete, txq->next_to_use,
1370 			    txq->pending_sends, pkt_transmitted, sa_drop,
1371 			    i, comp_read);
1372 			continue;
1373 		}
1374 
1375 		wqe_info = &tx_info->wqe_inf;
1376 		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1377 
1378 		mana_tx_unmap_mbuf(apc, tx_info);
1379 		mb();
1380 
1381 		next_to_complete =
1382 		    (next_to_complete + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
1383 
1384 		pkt_transmitted++;
1385 	}
1386 
1387 	txq->next_to_complete = next_to_complete;
1388 
1389 	if (wqe_unit_cnt == 0) {
1390 		mana_err(NULL,
1391 		    "WARNING: TX ring not proceeding!\n");
1392 		return;
1393 	}
1394 
1395 	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1396 
1397 	/* Ensure tail updated before checking q stop */
1398 	wmb();
1399 
1400 	gdma_wq = txq->gdma_sq;
1401 	avail_space = mana_gd_wq_avail_space(gdma_wq);
1402 
1403 
1404 	if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL) {
1405 		txq_full = true;
1406 	}
1407 
1408 	/* Ensure checking txq_full before apc->port_is_up. */
1409 	rmb();
1410 
1411 	if (txq_full && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1412 		/* Grab the txq lock and re-test */
1413 		mtx_lock(&txq->txq_mtx);
1414 		avail_space = mana_gd_wq_avail_space(gdma_wq);
1415 
1416 		if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL &&
1417 		    apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1418 			/* Clear the Q full flag */
1419 			if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING,
1420 			    IFF_DRV_OACTIVE);
1421 			counter_u64_add(txq->stats.wakeup, 1);
1422 			if (txq->alt_txq_idx != txq->idx) {
1423 				uint64_t stops = counter_u64_fetch(txq->stats.stop);
1424 				uint64_t wakeups = counter_u64_fetch(txq->stats.wakeup);
1425 				/* Reset alt_txq_idx back if it is not overloaded */
1426 				if (stops < wakeups) {
1427 					txq->alt_txq_idx = txq->idx;
1428 					counter_u64_add(txq->stats.alt_reset, 1);
1429 				}
1430 			}
1431 			rmb();
1432 			/* Schedule a tx enqueue task */
1433 			taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
1434 		}
1435 		mtx_unlock(&txq->txq_mtx);
1436 	}
1437 
1438 	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1439 		mana_err(NULL,
1440 		    "WARNING: TX %d pending_sends error: %d\n",
1441 		    txq->idx, txq->pending_sends);
1442 }
1443 
1444 static void
1445 mana_post_pkt_rxq(struct mana_rxq *rxq)
1446 {
1447 	struct mana_recv_buf_oob *recv_buf_oob;
1448 	uint32_t curr_index;
1449 	int err;
1450 
1451 	curr_index = rxq->buf_index++;
1452 	if (rxq->buf_index == rxq->num_rx_buf)
1453 		rxq->buf_index = 0;
1454 
1455 	recv_buf_oob = &rxq->rx_oobs[curr_index];
1456 
1457 	err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1458 	    &recv_buf_oob->wqe_inf);
1459 	if (err) {
1460 		mana_err(NULL, "WARNING: rxq %u post pkt err %d\n",
1461 		    rxq->rxq_idx, err);
1462 		return;
1463 	}
1464 
1465 	if (recv_buf_oob->wqe_inf.wqe_size_in_bu != 1) {
1466 		mana_err(NULL, "WARNING: rxq %u wqe_size_in_bu %u\n",
1467 		    rxq->rxq_idx, recv_buf_oob->wqe_inf.wqe_size_in_bu);
1468 	}
1469 }
1470 
1471 static void
1472 mana_rx_mbuf(struct mbuf *mbuf, struct mana_rxcomp_oob *cqe,
1473     struct mana_rxq *rxq)
1474 {
1475 	struct mana_stats *rx_stats = &rxq->stats;
1476 	struct ifnet *ndev = rxq->ndev;
1477 	uint32_t pkt_len = cqe->ppi[0].pkt_len;
1478 	uint16_t rxq_idx = rxq->rxq_idx;
1479 	struct mana_port_context *apc;
1480 	struct gdma_queue *eq;
1481 	bool do_lro = false;
1482 	bool do_if_input;
1483 
1484 	apc = if_getsoftc(ndev);
1485 	eq = apc->eqs[rxq_idx].eq;
1486 	eq->eq.work_done++;
1487 
1488 	if (!mbuf) {
1489 		return;
1490 	}
1491 
1492 	mbuf->m_flags |= M_PKTHDR;
1493 	mbuf->m_pkthdr.len = pkt_len;
1494 	mbuf->m_len = pkt_len;
1495 	mbuf->m_pkthdr.rcvif = ndev;
1496 
1497 	if ((ndev->if_capenable & IFCAP_RXCSUM ||
1498 	    ndev->if_capenable & IFCAP_RXCSUM_IPV6) &&
1499 	    (cqe->rx_iphdr_csum_succeed)) {
1500 		mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1501 		mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1502 		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) {
1503 			mbuf->m_pkthdr.csum_flags |=
1504 			    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1505 			mbuf->m_pkthdr.csum_data = 0xffff;
1506 
1507 			if (cqe->rx_tcp_csum_succeed)
1508 				do_lro = true;
1509 		}
1510 	}
1511 
1512 	if (cqe->rx_hashtype != 0) {
1513 		mbuf->m_pkthdr.flowid = cqe->ppi[0].pkt_hash;
1514 
1515 		uint16_t hashtype = cqe->rx_hashtype;
1516 		if (hashtype & NDIS_HASH_IPV4_MASK) {
1517 			hashtype &= NDIS_HASH_IPV4_MASK;
1518 			switch (hashtype) {
1519 			case NDIS_HASH_TCP_IPV4:
1520 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
1521 				break;
1522 			case NDIS_HASH_UDP_IPV4:
1523 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
1524 				break;
1525 			default:
1526 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
1527 			}
1528 		} else if (hashtype & NDIS_HASH_IPV6_MASK) {
1529 			hashtype &= NDIS_HASH_IPV6_MASK;
1530 			switch (hashtype) {
1531 			case NDIS_HASH_TCP_IPV6:
1532 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
1533 				break;
1534 			case NDIS_HASH_TCP_IPV6_EX:
1535 				M_HASHTYPE_SET(mbuf,
1536 				    M_HASHTYPE_RSS_TCP_IPV6_EX);
1537 				break;
1538 			case NDIS_HASH_UDP_IPV6:
1539 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
1540 				break;
1541 			case NDIS_HASH_UDP_IPV6_EX:
1542 				M_HASHTYPE_SET(mbuf,
1543 				    M_HASHTYPE_RSS_UDP_IPV6_EX);
1544 				break;
1545 			default:
1546 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
1547 			}
1548 		} else {
1549 			M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
1550 		}
1551 	} else {
1552 		mbuf->m_pkthdr.flowid = rxq_idx;
1553 		M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
1554 	}
1555 
1556 	do_if_input = true;
1557 	if ((ndev->if_capenable & IFCAP_LRO) && do_lro) {
1558 		if (rxq->lro.lro_cnt != 0 &&
1559 		    tcp_lro_rx(&rxq->lro, mbuf, 0) == 0)
1560 			do_if_input = false;
1561 	}
1562 	if (do_if_input) {
1563 		ndev->if_input(ndev, mbuf);
1564 	}
1565 
1566 	counter_enter();
1567 	counter_u64_add_protected(rx_stats->packets, 1);
1568 	counter_u64_add_protected(apc->port_stats.rx_packets, 1);
1569 	counter_u64_add_protected(rx_stats->bytes, pkt_len);
1570 	counter_u64_add_protected(apc->port_stats.rx_bytes, pkt_len);
1571 	counter_exit();
1572 }
1573 
1574 static void
1575 mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1576     struct gdma_comp *cqe)
1577 {
1578 	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1579 	struct mana_recv_buf_oob *rxbuf_oob;
1580 	struct ifnet *ndev = rxq->ndev;
1581 	struct mana_port_context *apc;
1582 	struct mbuf *old_mbuf;
1583 	uint32_t curr, pktlen;
1584 	int err;
1585 
1586 	switch (oob->cqe_hdr.cqe_type) {
1587 	case CQE_RX_OKAY:
1588 		break;
1589 
1590 	case CQE_RX_TRUNCATED:
1591 		if_printf(ndev, "Dropped a truncated packet\n");
1592 		return;
1593 
1594 	case CQE_RX_COALESCED_4:
1595 		if_printf(ndev, "RX coalescing is unsupported\n");
1596 		return;
1597 
1598 	case CQE_RX_OBJECT_FENCE:
1599 		if_printf(ndev, "RX Fencing is unsupported\n");
1600 		return;
1601 
1602 	default:
1603 		if_printf(ndev, "Unknown RX CQE type = %d\n",
1604 		    oob->cqe_hdr.cqe_type);
1605 		return;
1606 	}
1607 
1608 	if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
1609 		return;
1610 
1611 	pktlen = oob->ppi[0].pkt_len;
1612 
1613 	if (pktlen == 0) {
1614 		/* data packets should never have packetlength of zero */
1615 #if defined(__amd64__)
1616 		if_printf(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%lx\n",
1617 		    rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1618 #else
1619 		if_printf(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1620 		    rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1621 #endif
1622 		return;
1623 	}
1624 
1625 	curr = rxq->buf_index;
1626 	rxbuf_oob = &rxq->rx_oobs[curr];
1627 	if (rxbuf_oob->wqe_inf.wqe_size_in_bu != 1) {
1628 		mana_err(NULL, "WARNING: Rx Incorrect complete "
1629 		    "WQE size %u\n",
1630 		    rxbuf_oob->wqe_inf.wqe_size_in_bu);
1631 	}
1632 
1633 	apc = if_getsoftc(ndev);
1634 
1635 	old_mbuf = rxbuf_oob->mbuf;
1636 
1637 	/* Unload DMA map for the old mbuf */
1638 	mana_unload_rx_mbuf(apc, rxq, rxbuf_oob, false);
1639 
1640 	/* Load a new mbuf to replace the old one */
1641 	err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, true);
1642 	if (err) {
1643 		mana_dbg(NULL,
1644 		    "failed to load rx mbuf, err = %d, packet dropped.\n",
1645 		    err);
1646 		counter_u64_add(rxq->stats.mbuf_alloc_fail, 1);
1647 		/*
1648 		 * Failed to load new mbuf, rxbuf_oob->mbuf is still
1649 		 * pointing to the old one. Drop the packet.
1650 		 */
1651 		 old_mbuf = NULL;
1652 		 /* Reload the existing mbuf */
1653 		 mana_load_rx_mbuf(apc, rxq, rxbuf_oob, false);
1654 	}
1655 
1656 	mana_rx_mbuf(old_mbuf, oob, rxq);
1657 
1658 	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1659 
1660 	mana_post_pkt_rxq(rxq);
1661 }
1662 
1663 static void
1664 mana_poll_rx_cq(struct mana_cq *cq)
1665 {
1666 	struct gdma_comp *comp = cq->gdma_comp_buf;
1667 	int comp_read, i;
1668 
1669 	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1670 	KASSERT(comp_read <= CQE_POLLING_BUFFER,
1671 	    ("comp_read %d great than buf size %d",
1672 	    comp_read, CQE_POLLING_BUFFER));
1673 
1674 	for (i = 0; i < comp_read; i++) {
1675 		if (comp[i].is_sq == true) {
1676 			mana_err(NULL,
1677 			    "WARNING: CQE not for receive queue\n");
1678 			return;
1679 		}
1680 
1681 		/* verify recv cqe references the right rxq */
1682 		if (comp[i].wq_num != cq->rxq->gdma_id) {
1683 			mana_err(NULL,
1684 			    "WARNING: Received CQE %d  not for "
1685 			    "this receive queue %d\n",
1686 			    comp[i].wq_num,  cq->rxq->gdma_id);
1687 			return;
1688 		}
1689 
1690 		mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
1691 	}
1692 
1693 	tcp_lro_flush_all(&cq->rxq->lro);
1694 }
1695 
1696 static void
1697 mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1698 {
1699 	struct mana_cq *cq = context;
1700 
1701 	KASSERT(cq->gdma_cq == gdma_queue,
1702 	    ("cq do not match %p, %p", cq->gdma_cq, gdma_queue));
1703 
1704 	if (cq->type == MANA_CQ_TYPE_RX) {
1705 		mana_poll_rx_cq(cq);
1706 	} else {
1707 		mana_poll_tx_cq(cq);
1708 	}
1709 
1710 	mana_gd_arm_cq(gdma_queue);
1711 }
1712 
1713 static void
1714 mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1715 {
1716 	struct gdma_dev *gd = apc->ac->gdma_dev;
1717 
1718 	if (!cq->gdma_cq)
1719 		return;
1720 
1721 	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1722 }
1723 
1724 static void
1725 mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1726 {
1727 	struct gdma_dev *gd = apc->ac->gdma_dev;
1728 	struct mana_send_buf_info *txbuf_info;
1729 	uint32_t pending_sends;
1730 	int i;
1731 
1732 	if (!txq->gdma_sq)
1733 		return;
1734 
1735 	if ((pending_sends = atomic_read(&txq->pending_sends)) > 0) {
1736 		mana_err(NULL,
1737 		    "WARNING: txq pending sends not zero: %u\n",
1738 		    pending_sends);
1739 	}
1740 
1741 	if (txq->next_to_use != txq->next_to_complete) {
1742 		mana_err(NULL,
1743 		    "WARNING: txq buf not completed, "
1744 		    "next use %u, next complete %u\n",
1745 		    txq->next_to_use, txq->next_to_complete);
1746 	}
1747 
1748 	/* Flush buf ring. Grab txq mtx lock */
1749 	if (txq->txq_br) {
1750 		mtx_lock(&txq->txq_mtx);
1751 		drbr_flush(apc->ndev, txq->txq_br);
1752 		mtx_unlock(&txq->txq_mtx);
1753 		buf_ring_free(txq->txq_br, M_DEVBUF);
1754 	}
1755 
1756 	/* Drain taskqueue */
1757 	if (txq->enqueue_tq) {
1758 		while (taskqueue_cancel(txq->enqueue_tq,
1759 		    &txq->enqueue_task, NULL)) {
1760 			taskqueue_drain(txq->enqueue_tq,
1761 			    &txq->enqueue_task);
1762 		}
1763 
1764 		taskqueue_free(txq->enqueue_tq);
1765 	}
1766 
1767 	if (txq->tx_buf_info) {
1768 		/* Free all mbufs which are still in-flight */
1769 		for (i = 0; i < MAX_SEND_BUFFERS_PER_QUEUE; i++) {
1770 			txbuf_info = &txq->tx_buf_info[i];
1771 			if (txbuf_info->mbuf) {
1772 				mana_tx_unmap_mbuf(apc, txbuf_info);
1773 			}
1774 		}
1775 
1776 		free(txq->tx_buf_info, M_DEVBUF);
1777 	}
1778 
1779 	mana_free_counters((counter_u64_t *)&txq->stats,
1780 	    sizeof(txq->stats));
1781 
1782 	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1783 
1784 	mtx_destroy(&txq->txq_mtx);
1785 }
1786 
1787 static void
1788 mana_destroy_txq(struct mana_port_context *apc)
1789 {
1790 	int i;
1791 
1792 	if (!apc->tx_qp)
1793 		return;
1794 
1795 	for (i = 0; i < apc->num_queues; i++) {
1796 		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1797 
1798 		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1799 
1800 		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1801 	}
1802 
1803 	free(apc->tx_qp, M_DEVBUF);
1804 	apc->tx_qp = NULL;
1805 }
1806 
1807 static int
1808 mana_create_txq(struct mana_port_context *apc, struct ifnet *net)
1809 {
1810 	struct gdma_dev *gd = apc->ac->gdma_dev;
1811 	struct mana_obj_spec wq_spec;
1812 	struct mana_obj_spec cq_spec;
1813 	struct gdma_queue_spec spec;
1814 	struct gdma_context *gc;
1815 	struct mana_txq *txq;
1816 	struct mana_cq *cq;
1817 	uint32_t txq_size;
1818 	uint32_t cq_size;
1819 	int err;
1820 	int i;
1821 
1822 	apc->tx_qp = mallocarray(apc->num_queues, sizeof(struct mana_tx_qp),
1823 	    M_DEVBUF, M_WAITOK | M_ZERO);
1824 	if (!apc->tx_qp)
1825 		return ENOMEM;
1826 
1827 	/*  The minimum size of the WQE is 32 bytes, hence
1828 	 *  MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1829 	 *  the SQ can store. This value is then used to size other queues
1830 	 *  to prevent overflow.
1831 	 */
1832 	txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1833 	KASSERT(IS_ALIGNED(txq_size, PAGE_SIZE),
1834 	    ("txq size not page aligned"));
1835 
1836 	cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1837 	cq_size = ALIGN(cq_size, PAGE_SIZE);
1838 
1839 	gc = gd->gdma_context;
1840 
1841 	for (i = 0; i < apc->num_queues; i++) {
1842 		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1843 
1844 		/* Create SQ */
1845 		txq = &apc->tx_qp[i].txq;
1846 
1847 		txq->ndev = net;
1848 		txq->vp_offset = apc->tx_vp_offset;
1849 		txq->idx = i;
1850 		txq->alt_txq_idx = i;
1851 
1852 		memset(&spec, 0, sizeof(spec));
1853 		spec.type = GDMA_SQ;
1854 		spec.monitor_avl_buf = true;
1855 		spec.queue_size = txq_size;
1856 		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1857 		if (err)
1858 			goto out;
1859 
1860 		/* Create SQ's CQ */
1861 		cq = &apc->tx_qp[i].tx_cq;
1862 		cq->gdma_comp_buf = apc->eqs[i].cqe_poll;
1863 		cq->type = MANA_CQ_TYPE_TX;
1864 
1865 		cq->txq = txq;
1866 
1867 		memset(&spec, 0, sizeof(spec));
1868 		spec.type = GDMA_CQ;
1869 		spec.monitor_avl_buf = false;
1870 		spec.queue_size = cq_size;
1871 		spec.cq.callback = mana_cq_handler;
1872 		spec.cq.parent_eq = apc->eqs[i].eq;
1873 		spec.cq.context = cq;
1874 		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1875 		if (err)
1876 			goto out;
1877 
1878 		memset(&wq_spec, 0, sizeof(wq_spec));
1879 		memset(&cq_spec, 0, sizeof(cq_spec));
1880 
1881 		wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
1882 		wq_spec.queue_size = txq->gdma_sq->queue_size;
1883 
1884 		cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1885 		cq_spec.queue_size = cq->gdma_cq->queue_size;
1886 		cq_spec.modr_ctx_id = 0;
1887 		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1888 
1889 		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
1890 		    &wq_spec, &cq_spec, &apc->tx_qp[i].tx_object);
1891 
1892 		if (err)
1893 			goto out;
1894 
1895 		txq->gdma_sq->id = wq_spec.queue_index;
1896 		cq->gdma_cq->id = cq_spec.queue_index;
1897 
1898 		txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1899 		cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1900 
1901 		txq->gdma_txq_id = txq->gdma_sq->id;
1902 
1903 		cq->gdma_id = cq->gdma_cq->id;
1904 
1905 		mana_dbg(NULL,
1906 		    "txq %d, txq gdma id %d, txq cq gdma id %d\n",
1907 		    i, txq->gdma_txq_id, cq->gdma_id);;
1908 
1909 		if (cq->gdma_id >= gc->max_num_cqs) {
1910 			if_printf(net, "CQ id %u too large.\n", cq->gdma_id);
1911 			return EINVAL;
1912 		}
1913 
1914 		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1915 
1916 		/* Initialize tx specific data */
1917 		txq->tx_buf_info = malloc(MAX_SEND_BUFFERS_PER_QUEUE *
1918 		    sizeof(struct mana_send_buf_info),
1919 		    M_DEVBUF, M_WAITOK | M_ZERO);
1920 		if (unlikely(txq->tx_buf_info == NULL)) {
1921 			if_printf(net,
1922 			    "Failed to allocate tx buf info for SQ %u\n",
1923 			    txq->gdma_sq->id);
1924 			err = ENOMEM;
1925 			goto out;
1926 		}
1927 
1928 
1929 		snprintf(txq->txq_mtx_name, nitems(txq->txq_mtx_name),
1930 		    "mana:tx(%d)", i);
1931 		mtx_init(&txq->txq_mtx, txq->txq_mtx_name, NULL, MTX_DEF);
1932 
1933 		txq->txq_br = buf_ring_alloc(4 * MAX_SEND_BUFFERS_PER_QUEUE,
1934 		    M_DEVBUF, M_WAITOK, &txq->txq_mtx);
1935 		if (unlikely(txq->txq_br == NULL)) {
1936 			if_printf(net,
1937 			    "Failed to allocate buf ring for SQ %u\n",
1938 			    txq->gdma_sq->id);
1939 			err = ENOMEM;
1940 			goto out;
1941 		}
1942 
1943 		/* Allocate taskqueue for deferred send */
1944 		TASK_INIT(&txq->enqueue_task, 0, mana_xmit_taskfunc, txq);
1945 		txq->enqueue_tq = taskqueue_create_fast("mana_tx_enque",
1946 		    M_NOWAIT, taskqueue_thread_enqueue, &txq->enqueue_tq);
1947 		if (unlikely(txq->enqueue_tq == NULL)) {
1948 			if_printf(net,
1949 			    "Unable to create tx %d enqueue task queue\n", i);
1950 			err = ENOMEM;
1951 			goto out;
1952 		}
1953 		taskqueue_start_threads(&txq->enqueue_tq, 1, PI_NET,
1954 		    "mana txq %d", i);
1955 
1956 		mana_alloc_counters((counter_u64_t *)&txq->stats,
1957 		    sizeof(txq->stats));
1958 
1959 		mana_gd_arm_cq(cq->gdma_cq);
1960 	}
1961 
1962 	return 0;
1963 out:
1964 	mana_destroy_txq(apc);
1965 	return err;
1966 }
1967 
1968 static void
1969 mana_destroy_rxq(struct mana_port_context *apc, struct mana_rxq *rxq,
1970     bool validate_state)
1971 {
1972 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1973 	struct mana_recv_buf_oob *rx_oob;
1974 	int i;
1975 
1976 	if (!rxq)
1977 		return;
1978 
1979 	if (validate_state) {
1980 		/*
1981 		 * XXX Cancel and drain cleanup task queue here.
1982 		 */
1983 		;
1984 	}
1985 
1986 	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
1987 
1988 	mana_deinit_cq(apc, &rxq->rx_cq);
1989 
1990 	mana_free_counters((counter_u64_t *)&rxq->stats,
1991 	    sizeof(rxq->stats));
1992 
1993 	/* Free LRO resources */
1994 	tcp_lro_free(&rxq->lro);
1995 
1996 	for (i = 0; i < rxq->num_rx_buf; i++) {
1997 		rx_oob = &rxq->rx_oobs[i];
1998 
1999 		if (rx_oob->mbuf)
2000 			mana_unload_rx_mbuf(apc, rxq, rx_oob, true);
2001 
2002 		bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2003 	}
2004 
2005 	if (rxq->gdma_rq)
2006 		mana_gd_destroy_queue(gc, rxq->gdma_rq);
2007 
2008 	free(rxq, M_DEVBUF);
2009 }
2010 
2011 #define MANA_WQE_HEADER_SIZE 16
2012 #define MANA_WQE_SGE_SIZE 16
2013 
2014 static int
2015 mana_alloc_rx_wqe(struct mana_port_context *apc,
2016     struct mana_rxq *rxq, uint32_t *rxq_size, uint32_t *cq_size)
2017 {
2018 	struct mana_recv_buf_oob *rx_oob;
2019 	uint32_t buf_idx;
2020 	int err;
2021 
2022 	if (rxq->datasize == 0 || rxq->datasize > PAGE_SIZE) {
2023 		mana_err(NULL,
2024 		    "WARNING: Invalid rxq datasize %u\n", rxq->datasize);
2025 	}
2026 
2027 	*rxq_size = 0;
2028 	*cq_size = 0;
2029 
2030 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2031 		rx_oob = &rxq->rx_oobs[buf_idx];
2032 		memset(rx_oob, 0, sizeof(*rx_oob));
2033 
2034 		err = bus_dmamap_create(apc->rx_buf_tag, 0,
2035 		    &rx_oob->dma_map);
2036 		if (err) {
2037 			mana_err(NULL,
2038 			    "Failed to  create rx DMA map for buf %d\n",
2039 			    buf_idx);
2040 			return err;
2041 		}
2042 
2043 		err = mana_load_rx_mbuf(apc, rxq, rx_oob, true);
2044 		if (err) {
2045 			mana_err(NULL,
2046 			    "Failed to  create rx DMA map for buf %d\n",
2047 			    buf_idx);
2048 			bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2049 			return err;
2050 		}
2051 
2052 		rx_oob->wqe_req.sgl = rx_oob->sgl;
2053 		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2054 		rx_oob->wqe_req.inline_oob_size = 0;
2055 		rx_oob->wqe_req.inline_oob_data = NULL;
2056 		rx_oob->wqe_req.flags = 0;
2057 		rx_oob->wqe_req.client_data_unit = 0;
2058 
2059 		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2060 				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2061 		*cq_size += COMP_ENTRY_SIZE;
2062 	}
2063 
2064 	return 0;
2065 }
2066 
2067 static int
2068 mana_push_wqe(struct mana_rxq *rxq)
2069 {
2070 	struct mana_recv_buf_oob *rx_oob;
2071 	uint32_t buf_idx;
2072 	int err;
2073 
2074 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2075 		rx_oob = &rxq->rx_oobs[buf_idx];
2076 
2077 		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2078 		    &rx_oob->wqe_inf);
2079 		if (err)
2080 			return ENOSPC;
2081 	}
2082 
2083 	return 0;
2084 }
2085 
2086 static struct mana_rxq *
2087 mana_create_rxq(struct mana_port_context *apc, uint32_t rxq_idx,
2088     struct mana_eq *eq, struct ifnet *ndev)
2089 {
2090 	struct gdma_dev *gd = apc->ac->gdma_dev;
2091 	struct mana_obj_spec wq_spec;
2092 	struct mana_obj_spec cq_spec;
2093 	struct gdma_queue_spec spec;
2094 	struct mana_cq *cq = NULL;
2095 	uint32_t cq_size, rq_size;
2096 	struct gdma_context *gc;
2097 	struct mana_rxq *rxq;
2098 	int err;
2099 
2100 	gc = gd->gdma_context;
2101 
2102 	rxq = malloc(sizeof(*rxq) +
2103 	    RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob),
2104 	    M_DEVBUF, M_WAITOK | M_ZERO);
2105 	if (!rxq)
2106 		return NULL;
2107 
2108 	rxq->ndev = ndev;
2109 	rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
2110 	rxq->rxq_idx = rxq_idx;
2111 	/*
2112 	 * Minimum size is MCLBYTES(2048) bytes for a mbuf cluster.
2113 	 * Now we just allow maxium size of 4096.
2114 	 */
2115 	rxq->datasize = ALIGN(apc->frame_size, MCLBYTES);
2116 	if (rxq->datasize > MAX_FRAME_SIZE)
2117 		rxq->datasize = MAX_FRAME_SIZE;
2118 
2119 	mana_dbg(NULL, "Setting rxq %d datasize %d\n",
2120 	    rxq_idx, rxq->datasize);
2121 
2122 	rxq->rxobj = INVALID_MANA_HANDLE;
2123 
2124 	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2125 	if (err)
2126 		goto out;
2127 
2128 	/* Create LRO for the RQ */
2129 	if (ndev->if_capenable & IFCAP_LRO) {
2130 		err = tcp_lro_init(&rxq->lro);
2131 		if (err) {
2132 			if_printf(ndev, "Failed to create LRO for rxq %d\n",
2133 			    rxq_idx);
2134 		} else {
2135 			rxq->lro.ifp = ndev;
2136 		}
2137 	}
2138 
2139 	mana_alloc_counters((counter_u64_t *)&rxq->stats,
2140 	    sizeof(rxq->stats));
2141 
2142 	rq_size = ALIGN(rq_size, PAGE_SIZE);
2143 	cq_size = ALIGN(cq_size, PAGE_SIZE);
2144 
2145 	/* Create RQ */
2146 	memset(&spec, 0, sizeof(spec));
2147 	spec.type = GDMA_RQ;
2148 	spec.monitor_avl_buf = true;
2149 	spec.queue_size = rq_size;
2150 	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2151 	if (err)
2152 		goto out;
2153 
2154 	/* Create RQ's CQ */
2155 	cq = &rxq->rx_cq;
2156 	cq->gdma_comp_buf = eq->cqe_poll;
2157 	cq->type = MANA_CQ_TYPE_RX;
2158 	cq->rxq = rxq;
2159 
2160 	memset(&spec, 0, sizeof(spec));
2161 	spec.type = GDMA_CQ;
2162 	spec.monitor_avl_buf = false;
2163 	spec.queue_size = cq_size;
2164 	spec.cq.callback = mana_cq_handler;
2165 	spec.cq.parent_eq = eq->eq;
2166 	spec.cq.context = cq;
2167 	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2168 	if (err)
2169 		goto out;
2170 
2171 	memset(&wq_spec, 0, sizeof(wq_spec));
2172 	memset(&cq_spec, 0, sizeof(cq_spec));
2173 	wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
2174 	wq_spec.queue_size = rxq->gdma_rq->queue_size;
2175 
2176 	cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
2177 	cq_spec.queue_size = cq->gdma_cq->queue_size;
2178 	cq_spec.modr_ctx_id = 0;
2179 	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2180 
2181 	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2182 	    &wq_spec, &cq_spec, &rxq->rxobj);
2183 	if (err)
2184 		goto out;
2185 
2186 	rxq->gdma_rq->id = wq_spec.queue_index;
2187 	cq->gdma_cq->id = cq_spec.queue_index;
2188 
2189 	rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
2190 	cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
2191 
2192 	rxq->gdma_id = rxq->gdma_rq->id;
2193 	cq->gdma_id = cq->gdma_cq->id;
2194 
2195 	err = mana_push_wqe(rxq);
2196 	if (err)
2197 		goto out;
2198 
2199 	if (cq->gdma_id >= gc->max_num_cqs)
2200 		goto out;
2201 
2202 	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2203 
2204 	mana_gd_arm_cq(cq->gdma_cq);
2205 out:
2206 	if (!err)
2207 		return rxq;
2208 
2209 	if_printf(ndev, "Failed to create RXQ: err = %d\n", err);
2210 
2211 	mana_destroy_rxq(apc, rxq, false);
2212 
2213 	if (cq)
2214 		mana_deinit_cq(apc, cq);
2215 
2216 	return NULL;
2217 }
2218 
2219 static int
2220 mana_add_rx_queues(struct mana_port_context *apc, struct ifnet *ndev)
2221 {
2222 	struct mana_rxq *rxq;
2223 	int err = 0;
2224 	int i;
2225 
2226 	for (i = 0; i < apc->num_queues; i++) {
2227 		rxq = mana_create_rxq(apc, i, &apc->eqs[i], ndev);
2228 		if (!rxq) {
2229 			err = ENOMEM;
2230 			goto out;
2231 		}
2232 
2233 		apc->rxqs[i] = rxq;
2234 	}
2235 
2236 	apc->default_rxobj = apc->rxqs[0]->rxobj;
2237 out:
2238 	return err;
2239 }
2240 
2241 static void
2242 mana_destroy_vport(struct mana_port_context *apc)
2243 {
2244 	struct mana_rxq *rxq;
2245 	uint32_t rxq_idx;
2246 	struct mana_cq *rx_cq;
2247 	struct gdma_queue *cq, *eq;
2248 
2249 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2250 		rxq = apc->rxqs[rxq_idx];
2251 		if (!rxq)
2252 			continue;
2253 
2254 		rx_cq = &rxq->rx_cq;
2255 		if ((cq = rx_cq->gdma_cq) != NULL) {
2256 			eq = cq->cq.parent;
2257 			mana_drain_eq_task(eq);
2258 		}
2259 
2260 		mana_destroy_rxq(apc, rxq, true);
2261 		apc->rxqs[rxq_idx] = NULL;
2262 	}
2263 
2264 	mana_destroy_txq(apc);
2265 }
2266 
2267 static int
2268 mana_create_vport(struct mana_port_context *apc, struct ifnet *net)
2269 {
2270 	struct gdma_dev *gd = apc->ac->gdma_dev;
2271 	int err;
2272 
2273 	apc->default_rxobj = INVALID_MANA_HANDLE;
2274 
2275 	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2276 	if (err)
2277 		return err;
2278 
2279 	return mana_create_txq(apc, net);
2280 }
2281 
2282 
2283 static void mana_rss_table_init(struct mana_port_context *apc)
2284 {
2285 	int i;
2286 
2287 	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
2288 		apc->indir_table[i] = i % apc->num_queues;
2289 }
2290 
2291 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2292 		    bool update_hash, bool update_tab)
2293 {
2294 	uint32_t queue_idx;
2295 	int i;
2296 
2297 	if (update_tab) {
2298 		for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
2299 			queue_idx = apc->indir_table[i];
2300 			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2301 		}
2302 	}
2303 
2304 	return mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2305 }
2306 
2307 static int
2308 mana_init_port(struct ifnet *ndev)
2309 {
2310 	struct mana_port_context *apc = if_getsoftc(ndev);
2311 	uint32_t max_txq, max_rxq, max_queues;
2312 	int port_idx = apc->port_idx;
2313 	uint32_t num_indirect_entries;
2314 	int err;
2315 
2316 	err = mana_init_port_context(apc);
2317 	if (err)
2318 		return err;
2319 
2320 	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
2321 	    &num_indirect_entries);
2322 	if (err) {
2323 		if_printf(ndev, "Failed to query info for vPort 0\n");
2324 		goto reset_apc;
2325 	}
2326 
2327 	max_queues = min_t(uint32_t, max_txq, max_rxq);
2328 	if (apc->max_queues > max_queues)
2329 		apc->max_queues = max_queues;
2330 
2331 	if (apc->num_queues > apc->max_queues)
2332 		apc->num_queues = apc->max_queues;
2333 
2334 	return 0;
2335 
2336 reset_apc:
2337 	bus_dma_tag_destroy(apc->rx_buf_tag);
2338 	apc->rx_buf_tag = NULL;
2339 	free(apc->rxqs, M_DEVBUF);
2340 	apc->rxqs = NULL;
2341 	return err;
2342 }
2343 
2344 int
2345 mana_alloc_queues(struct ifnet *ndev)
2346 {
2347 	struct mana_port_context *apc = if_getsoftc(ndev);
2348 	struct gdma_dev *gd = apc->ac->gdma_dev;
2349 	int err;
2350 
2351 	err = mana_create_eq(apc);
2352 	if (err)
2353 		return err;
2354 
2355 	err = mana_create_vport(apc, ndev);
2356 	if (err)
2357 		goto destroy_eq;
2358 
2359 	err = mana_add_rx_queues(apc, ndev);
2360 	if (err)
2361 		goto destroy_vport;
2362 
2363 	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
2364 
2365 	mana_rss_table_init(apc);
2366 
2367 	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2368 	if (err)
2369 		goto destroy_vport;
2370 
2371 	return 0;
2372 
2373 destroy_vport:
2374 	mana_destroy_vport(apc);
2375 destroy_eq:
2376 	mana_destroy_eq(gd->gdma_context, apc);
2377 	return err;
2378 }
2379 
2380 static int
2381 mana_up(struct mana_port_context *apc)
2382 {
2383 	int err;
2384 
2385 	mana_dbg(NULL, "mana_up called\n");
2386 
2387 	err = mana_alloc_queues(apc->ndev);
2388 	if (err) {
2389 		mana_err(NULL, "Faile alloc mana queues: %d\n", err);
2390 		return err;
2391 	}
2392 
2393 	/* Add queue specific sysctl */
2394 	mana_sysctl_add_queues(apc);
2395 
2396 	apc->port_is_up = true;
2397 
2398 	/* Ensure port state updated before txq state */
2399 	wmb();
2400 
2401 	if_link_state_change(apc->ndev, LINK_STATE_UP);
2402 	if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2403 
2404 	return 0;
2405 }
2406 
2407 
2408 static void
2409 mana_init(void *arg)
2410 {
2411 	struct mana_port_context *apc = (struct mana_port_context *)arg;
2412 
2413 	MANA_APC_LOCK_LOCK(apc);
2414 	if (!apc->port_is_up) {
2415 		mana_up(apc);
2416 	}
2417 	MANA_APC_LOCK_UNLOCK(apc);
2418 }
2419 
2420 static int
2421 mana_dealloc_queues(struct ifnet *ndev)
2422 {
2423 	struct mana_port_context *apc = if_getsoftc(ndev);
2424 	struct mana_txq *txq;
2425 	int i, err;
2426 
2427 	if (apc->port_is_up)
2428 		return EINVAL;
2429 
2430 	/* No packet can be transmitted now since apc->port_is_up is false.
2431 	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
2432 	 * a txq because it may not timely see apc->port_is_up being cleared
2433 	 * to false, but it doesn't matter since mana_start_xmit() drops any
2434 	 * new packets due to apc->port_is_up being false.
2435 	 *
2436 	 * Drain all the in-flight TX packets
2437 	 */
2438 	for (i = 0; i < apc->num_queues; i++) {
2439 		txq = &apc->tx_qp[i].txq;
2440 
2441 		struct mana_cq *tx_cq = &apc->tx_qp[i].tx_cq;
2442 		struct gdma_queue *eq = NULL;
2443 		if (tx_cq->gdma_cq)
2444 			eq = tx_cq->gdma_cq->cq.parent;
2445 		if (eq) {
2446 			/* Stop EQ interrupt */
2447 			eq->eq.do_not_ring_db = true;
2448 			/* Schedule a cleanup task */
2449 			taskqueue_enqueue(eq->eq.cleanup_tq,
2450 			    &eq->eq.cleanup_task);
2451 		}
2452 
2453 		while (atomic_read(&txq->pending_sends) > 0)
2454 			usleep_range(1000, 2000);
2455 	}
2456 
2457 	/* We're 100% sure the queues can no longer be woken up, because
2458 	 * we're sure now mana_poll_tx_cq() can't be running.
2459 	 */
2460 
2461 	apc->rss_state = TRI_STATE_FALSE;
2462 	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2463 	if (err) {
2464 		if_printf(ndev, "Failed to disable vPort: %d\n", err);
2465 		return err;
2466 	}
2467 
2468 	/* TODO: Implement RX fencing */
2469 	gdma_msleep(1000);
2470 
2471 	mana_destroy_vport(apc);
2472 
2473 	mana_destroy_eq(apc->ac->gdma_dev->gdma_context, apc);
2474 
2475 	return 0;
2476 }
2477 
2478 static int
2479 mana_down(struct mana_port_context *apc)
2480 {
2481 	int err = 0;
2482 
2483 	apc->port_st_save = apc->port_is_up;
2484 	apc->port_is_up = false;
2485 
2486 	/* Ensure port state updated before txq state */
2487 	wmb();
2488 
2489 	if (apc->port_st_save) {
2490 		if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE,
2491 		    IFF_DRV_RUNNING);
2492 		if_link_state_change(apc->ndev, LINK_STATE_DOWN);
2493 
2494 		mana_sysctl_free_queues(apc);
2495 
2496 		err = mana_dealloc_queues(apc->ndev);
2497 		if (err) {
2498 			if_printf(apc->ndev,
2499 			    "Failed to bring down mana interface: %d\n", err);
2500 		}
2501 	}
2502 
2503 	return err;
2504 }
2505 
2506 int
2507 mana_detach(struct ifnet *ndev)
2508 {
2509 	struct mana_port_context *apc = if_getsoftc(ndev);
2510 	int err;
2511 
2512 	ether_ifdetach(ndev);
2513 
2514 	if (!apc)
2515 		return 0;
2516 
2517 	MANA_APC_LOCK_LOCK(apc);
2518 	err = mana_down(apc);
2519 	MANA_APC_LOCK_UNLOCK(apc);
2520 
2521 	mana_cleanup_port_context(apc);
2522 
2523 	MANA_APC_LOCK_DESTROY(apc);
2524 
2525 	free(apc, M_DEVBUF);
2526 
2527 	return err;
2528 }
2529 
2530 static int
2531 mana_probe_port(struct mana_context *ac, int port_idx,
2532     struct ifnet **ndev_storage)
2533 {
2534 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
2535 	struct mana_port_context *apc;
2536 	struct ifnet *ndev;
2537 	int err;
2538 
2539 	ndev = if_alloc_dev(IFT_ETHER, gc->dev);
2540 	if (!ndev) {
2541 		mana_err(NULL, "Failed to allocate ifnet struct\n");
2542 		return ENOMEM;
2543 	}
2544 
2545 	*ndev_storage = ndev;
2546 
2547 	apc = malloc(sizeof(*apc), M_DEVBUF, M_WAITOK | M_ZERO);
2548 	if (!apc) {
2549 		mana_err(NULL, "Failed to allocate port context\n");
2550 		err = ENOMEM;
2551 		goto free_net;
2552 	}
2553 
2554 	apc->ac = ac;
2555 	apc->ndev = ndev;
2556 	apc->max_queues = gc->max_num_queues;
2557 	apc->num_queues = min_t(unsigned int,
2558 	    gc->max_num_queues, MANA_MAX_NUM_QUEUES);
2559 	apc->port_handle = INVALID_MANA_HANDLE;
2560 	apc->port_idx = port_idx;
2561 	apc->frame_size = DEFAULT_FRAME_SIZE;
2562 
2563 	MANA_APC_LOCK_INIT(apc);
2564 
2565 	if_initname(ndev, device_get_name(gc->dev), port_idx);
2566 	if_setdev(ndev,gc->dev);
2567 	if_setsoftc(ndev, apc);
2568 
2569 	if_setflags(ndev, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2570 	if_setinitfn(ndev, mana_init);
2571 	if_settransmitfn(ndev, mana_start_xmit);
2572 	if_setqflushfn(ndev, mana_qflush);
2573 	if_setioctlfn(ndev, mana_ioctl);
2574 	if_setgetcounterfn(ndev, mana_get_counter);
2575 
2576 	if_setmtu(ndev, ETHERMTU);
2577 	if_setbaudrate(ndev, IF_Gbps(100));
2578 
2579 	mana_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2580 
2581 	err = mana_init_port(ndev);
2582 	if (err)
2583 		goto reset_apc;
2584 
2585 	ndev->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
2586 	ndev->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
2587 	ndev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
2588 
2589 	ndev->if_capabilities |= IFCAP_LRO | IFCAP_LINKSTATE;
2590 
2591 	/* Enable all available capabilities by default. */
2592 	ndev->if_capenable = ndev->if_capabilities;
2593 
2594 	/* TSO parameters */
2595 	ndev->if_hw_tsomax = MAX_MBUF_FRAGS * MANA_TSO_MAXSEG_SZ -
2596 	    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2597 	ndev->if_hw_tsomaxsegcount = MAX_MBUF_FRAGS;
2598 	ndev->if_hw_tsomaxsegsize = PAGE_SIZE;
2599 
2600 	ifmedia_init(&apc->media, IFM_IMASK,
2601 	    mana_ifmedia_change, mana_ifmedia_status);
2602 	ifmedia_add(&apc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2603 	ifmedia_set(&apc->media, IFM_ETHER | IFM_AUTO);
2604 
2605 	ether_ifattach(ndev, apc->mac_addr);
2606 
2607 	/* Initialize statistics */
2608 	mana_alloc_counters((counter_u64_t *)&apc->port_stats,
2609 	    sizeof(struct mana_port_stats));
2610 	mana_sysctl_add_port(apc);
2611 
2612 	/* Tell the stack that the interface is not active */
2613 	if_setdrvflagbits(ndev, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2614 
2615 	return 0;
2616 
2617 reset_apc:
2618 	free(apc, M_DEVBUF);
2619 free_net:
2620 	*ndev_storage = NULL;
2621 	if_printf(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2622 	if_free(ndev);
2623 	return err;
2624 }
2625 
2626 int mana_probe(struct gdma_dev *gd)
2627 {
2628 	struct gdma_context *gc = gd->gdma_context;
2629 	device_t dev = gc->dev;
2630 	struct mana_context *ac;
2631 	int err;
2632 	int i;
2633 
2634 	device_printf(dev, "%s protocol version: %d.%d.%d\n", DEVICE_NAME,
2635 		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2636 
2637 	err = mana_gd_register_device(gd);
2638 	if (err)
2639 		return err;
2640 
2641 	ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO);
2642 	if (!ac)
2643 		return ENOMEM;
2644 
2645 	ac->gdma_dev = gd;
2646 	ac->num_ports = 1;
2647 	gd->driver_data = ac;
2648 
2649 	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
2650 	    MANA_MICRO_VERSION, &ac->num_ports);
2651 	if (err)
2652 		goto out;
2653 
2654 	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
2655 		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
2656 
2657 	for (i = 0; i < ac->num_ports; i++) {
2658 		err = mana_probe_port(ac, i, &ac->ports[i]);
2659 		if (err) {
2660 			device_printf(dev,
2661 			    "Failed to probe mana port %d\n", i);
2662 			break;
2663 		}
2664 	}
2665 
2666 out:
2667 	if (err)
2668 		mana_remove(gd);
2669 
2670 	return err;
2671 }
2672 
2673 void
2674 mana_remove(struct gdma_dev *gd)
2675 {
2676 	struct gdma_context *gc = gd->gdma_context;
2677 	struct mana_context *ac = gd->driver_data;
2678 	device_t dev = gc->dev;
2679 	struct ifnet *ndev;
2680 	int i;
2681 
2682 	for (i = 0; i < ac->num_ports; i++) {
2683 		ndev = ac->ports[i];
2684 		if (!ndev) {
2685 			if (i == 0)
2686 				device_printf(dev, "No net device to remove\n");
2687 			goto out;
2688 		}
2689 
2690 		mana_detach(ndev);
2691 
2692 		if_free(ndev);
2693 	}
2694 out:
2695 	mana_gd_deregister_device(gd);
2696 	gd->driver_data = NULL;
2697 	gd->gdma_context = NULL;
2698 	free(ac, M_DEVBUF);
2699 }
2700