xref: /freebsd/sys/dev/mana/mana_en.c (revision a1452eec4768272056aa070db94ea7184ce1117c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Microsoft Corp.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/smp.h>
39 #include <sys/socket.h>
40 #include <sys/sockio.h>
41 #include <sys/time.h>
42 #include <sys/eventhandler.h>
43 
44 #include <machine/bus.h>
45 #include <machine/resource.h>
46 #include <machine/in_cksum.h>
47 
48 #include <net/if.h>
49 #include <net/if_var.h>
50 #include <net/if_types.h>
51 #include <net/if_vlan_var.h>
52 #ifdef RSS
53 #include <net/rss_config.h>
54 #endif
55 
56 #include <netinet/in_systm.h>
57 #include <netinet/in.h>
58 #include <netinet/if_ether.h>
59 #include <netinet/ip.h>
60 #include <netinet/ip6.h>
61 #include <netinet/tcp.h>
62 #include <netinet/udp.h>
63 
64 #include "mana.h"
65 #include "mana_sysctl.h"
66 
67 static int mana_up(struct mana_port_context *apc);
68 static int mana_down(struct mana_port_context *apc);
69 
70 extern unsigned int mana_tx_req_size;
71 extern unsigned int mana_rx_req_size;
72 
73 static void
74 mana_rss_key_fill(void *k, size_t size)
75 {
76 	static bool rss_key_generated = false;
77 	static uint8_t rss_key[MANA_HASH_KEY_SIZE];
78 
79 	KASSERT(size <= MANA_HASH_KEY_SIZE,
80 	    ("Request more buytes than MANA RSS key can hold"));
81 
82 	if (!rss_key_generated) {
83 		arc4random_buf(rss_key, MANA_HASH_KEY_SIZE);
84 		rss_key_generated = true;
85 	}
86 	memcpy(k, rss_key, size);
87 }
88 
89 static int
90 mana_ifmedia_change(if_t ifp __unused)
91 {
92 	return EOPNOTSUPP;
93 }
94 
95 static void
96 mana_ifmedia_status(if_t ifp, struct ifmediareq *ifmr)
97 {
98 	struct mana_port_context *apc = if_getsoftc(ifp);
99 
100 	if (!apc) {
101 		if_printf(ifp, "Port not available\n");
102 		return;
103 	}
104 
105 	MANA_APC_LOCK_LOCK(apc);
106 
107 	ifmr->ifm_status = IFM_AVALID;
108 	ifmr->ifm_active = IFM_ETHER;
109 
110 	if (!apc->port_is_up) {
111 		MANA_APC_LOCK_UNLOCK(apc);
112 		mana_dbg(NULL, "Port %u link is down\n", apc->port_idx);
113 		return;
114 	}
115 
116 	ifmr->ifm_status |= IFM_ACTIVE;
117 	ifmr->ifm_active |= IFM_100G_DR | IFM_FDX;
118 
119 	MANA_APC_LOCK_UNLOCK(apc);
120 }
121 
122 static uint64_t
123 mana_get_counter(if_t ifp, ift_counter cnt)
124 {
125 	struct mana_port_context *apc = if_getsoftc(ifp);
126 	struct mana_port_stats *stats = &apc->port_stats;
127 
128 	switch (cnt) {
129 	case IFCOUNTER_IPACKETS:
130 		return (counter_u64_fetch(stats->rx_packets));
131 	case IFCOUNTER_OPACKETS:
132 		return (counter_u64_fetch(stats->tx_packets));
133 	case IFCOUNTER_IBYTES:
134 		return (counter_u64_fetch(stats->rx_bytes));
135 	case IFCOUNTER_OBYTES:
136 		return (counter_u64_fetch(stats->tx_bytes));
137 	case IFCOUNTER_IQDROPS:
138 		return (counter_u64_fetch(stats->rx_drops));
139 	case IFCOUNTER_OQDROPS:
140 		return (counter_u64_fetch(stats->tx_drops));
141 	default:
142 		return (if_get_counter_default(ifp, cnt));
143 	}
144 }
145 
146 static void
147 mana_qflush(if_t ifp)
148 {
149 	if_qflush(ifp);
150 }
151 
152 int
153 mana_restart(struct mana_port_context *apc)
154 {
155 	int rc = 0;
156 
157 	MANA_APC_LOCK_LOCK(apc);
158 	if (apc->port_is_up)
159 		 mana_down(apc);
160 
161 	rc = mana_up(apc);
162 	MANA_APC_LOCK_UNLOCK(apc);
163 
164 	return (rc);
165 }
166 
167 static int
168 mana_ioctl(if_t ifp, u_long command, caddr_t data)
169 {
170 	struct mana_port_context *apc = if_getsoftc(ifp);
171 	struct ifrsskey *ifrk;
172 	struct ifrsshash *ifrh;
173 	struct ifreq *ifr;
174 	uint16_t new_mtu;
175 	int rc = 0, mask;
176 
177 	switch (command) {
178 	case SIOCSIFMTU:
179 		ifr = (struct ifreq *)data;
180 		new_mtu = ifr->ifr_mtu;
181 		if (if_getmtu(ifp) == new_mtu)
182 			break;
183 		if ((new_mtu + 18 > MAX_FRAME_SIZE) ||
184 		    (new_mtu + 18 < MIN_FRAME_SIZE)) {
185 			if_printf(ifp, "Invalid MTU. new_mtu: %d, "
186 			    "max allowed: %d, min allowed: %d\n",
187 			    new_mtu, MAX_FRAME_SIZE - 18, MIN_FRAME_SIZE - 18);
188 			return EINVAL;
189 		}
190 		MANA_APC_LOCK_LOCK(apc);
191 		if (apc->port_is_up)
192 			mana_down(apc);
193 
194 		apc->frame_size = new_mtu + 18;
195 		if_setmtu(ifp, new_mtu);
196 		mana_dbg(NULL, "Set MTU to %d\n", new_mtu);
197 
198 		rc = mana_up(apc);
199 		MANA_APC_LOCK_UNLOCK(apc);
200 		break;
201 
202 	case SIOCSIFFLAGS:
203 		if (if_getflags(ifp) & IFF_UP) {
204 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
205 				MANA_APC_LOCK_LOCK(apc);
206 				if (!apc->port_is_up)
207 					rc = mana_up(apc);
208 				MANA_APC_LOCK_UNLOCK(apc);
209 			}
210 		} else {
211 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
212 				MANA_APC_LOCK_LOCK(apc);
213 				if (apc->port_is_up)
214 					mana_down(apc);
215 				MANA_APC_LOCK_UNLOCK(apc);
216 			}
217 		}
218 		break;
219 
220 	case SIOCSIFCAP:
221 		MANA_APC_LOCK_LOCK(apc);
222 		ifr = (struct ifreq *)data;
223 		/*
224 		 * Fix up requested capabilities w/ supported capabilities,
225 		 * since the supported capabilities could have been changed.
226 		 */
227 		mask = (ifr->ifr_reqcap & if_getcapabilities(ifp)) ^
228 		    if_getcapenable(ifp);
229 
230 		if (mask & IFCAP_TXCSUM) {
231 			if_togglecapenable(ifp, IFCAP_TXCSUM);
232 			if_togglehwassist(ifp, (CSUM_TCP | CSUM_UDP | CSUM_IP));
233 
234 			if ((IFCAP_TSO4 & if_getcapenable(ifp)) &&
235 			    !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
236 				mask &= ~IFCAP_TSO4;
237 				if_setcapenablebit(ifp, 0, IFCAP_TSO4);
238 				if_sethwassistbits(ifp, 0, CSUM_IP_TSO);
239 				mana_warn(NULL,
240 				    "Also disabled tso4 due to -txcsum.\n");
241 			}
242 		}
243 
244 		if (mask & IFCAP_TXCSUM_IPV6) {
245 			if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
246 			if_togglehwassist(ifp, (CSUM_UDP_IPV6 | CSUM_TCP_IPV6));
247 
248 			if ((IFCAP_TSO6 & if_getcapenable(ifp)) &&
249 			    !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
250 				mask &= ~IFCAP_TSO6;
251 				if_setcapenablebit(ifp, 0, IFCAP_TSO6);
252 				if_sethwassistbits(ifp, 0, CSUM_IP6_TSO);
253 				mana_warn(ifp,
254 				    "Also disabled tso6 due to -txcsum6.\n");
255 			}
256 		}
257 
258 		if (mask & IFCAP_RXCSUM)
259 			if_togglecapenable(ifp, IFCAP_RXCSUM);
260 		/* We can't diff IPv6 packets from IPv4 packets on RX path. */
261 		if (mask & IFCAP_RXCSUM_IPV6)
262 			if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6);
263 
264 		if (mask & IFCAP_LRO)
265 			if_togglecapenable(ifp, IFCAP_LRO);
266 
267 		if (mask & IFCAP_TSO4) {
268 			if (!(IFCAP_TSO4 & if_getcapenable(ifp)) &&
269 			    !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
270 				MANA_APC_LOCK_UNLOCK(apc);
271 				if_printf(ifp, "Enable txcsum first.\n");
272 				rc = EAGAIN;
273 				goto out;
274 			}
275 			if_togglecapenable(ifp, IFCAP_TSO4);
276 			if_togglehwassist(ifp, CSUM_IP_TSO);
277 		}
278 
279 		if (mask & IFCAP_TSO6) {
280 			if (!(IFCAP_TSO6 & if_getcapenable(ifp)) &&
281 			    !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
282 				MANA_APC_LOCK_UNLOCK(apc);
283 				if_printf(ifp, "Enable txcsum6 first.\n");
284 				rc = EAGAIN;
285 				goto out;
286 			}
287 			if_togglecapenable(ifp, IFCAP_TSO6);
288 			if_togglehwassist(ifp, CSUM_IP6_TSO);
289 		}
290 
291 		MANA_APC_LOCK_UNLOCK(apc);
292 out:
293 		break;
294 
295 	case SIOCSIFMEDIA:
296 	case SIOCGIFMEDIA:
297 	case SIOCGIFXMEDIA:
298 		ifr = (struct ifreq *)data;
299 		rc = ifmedia_ioctl(ifp, ifr, &apc->media, command);
300 		break;
301 
302 	case SIOCGIFRSSKEY:
303 		ifrk = (struct ifrsskey *)data;
304 		ifrk->ifrk_func = RSS_FUNC_TOEPLITZ;
305 		ifrk->ifrk_keylen = MANA_HASH_KEY_SIZE;
306 		memcpy(ifrk->ifrk_key, apc->hashkey, MANA_HASH_KEY_SIZE);
307 		break;
308 
309 	case SIOCGIFRSSHASH:
310 		ifrh = (struct ifrsshash *)data;
311 		ifrh->ifrh_func = RSS_FUNC_TOEPLITZ;
312 		ifrh->ifrh_types =
313 		    RSS_TYPE_TCP_IPV4 |
314 		    RSS_TYPE_UDP_IPV4 |
315 		    RSS_TYPE_TCP_IPV6 |
316 		    RSS_TYPE_UDP_IPV6;
317 		break;
318 
319 	default:
320 		rc = ether_ioctl(ifp, command, data);
321 		break;
322 	}
323 
324 	return (rc);
325 }
326 
327 static inline void
328 mana_alloc_counters(counter_u64_t *begin, int size)
329 {
330 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
331 
332 	for (; begin < end; ++begin)
333 		*begin = counter_u64_alloc(M_WAITOK);
334 }
335 
336 static inline void
337 mana_free_counters(counter_u64_t *begin, int size)
338 {
339 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
340 
341 	for (; begin < end; ++begin)
342 		counter_u64_free(*begin);
343 }
344 
345 static bool
346 mana_can_tx(struct gdma_queue *wq)
347 {
348 	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
349 }
350 
351 static inline int
352 mana_tx_map_mbuf(struct mana_port_context *apc,
353     struct mana_send_buf_info *tx_info,
354     struct mbuf **m_head, struct mana_tx_package *tp,
355     struct mana_stats *tx_stats)
356 {
357 	struct gdma_dev *gd = apc->ac->gdma_dev;
358 	bus_dma_segment_t segs[MAX_MBUF_FRAGS];
359 	struct mbuf *m = *m_head;
360 	int err, nsegs, i;
361 
362 	err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag, tx_info->dma_map,
363 	    m, segs, &nsegs, BUS_DMA_NOWAIT);
364 	if (err == EFBIG) {
365 		struct mbuf *m_new;
366 
367 		counter_u64_add(tx_stats->collapse, 1);
368 		m_new = m_collapse(m, M_NOWAIT, MAX_MBUF_FRAGS);
369 		if (unlikely(m_new == NULL)) {
370 			counter_u64_add(tx_stats->collapse_err, 1);
371 			return ENOBUFS;
372 		} else {
373 			*m_head = m = m_new;
374 		}
375 
376 		mana_warn(NULL,
377 		    "Too many segs in orig mbuf, m_collapse called\n");
378 
379 		err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag,
380 		    tx_info->dma_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
381 	}
382 	if (!err) {
383 		for (i = 0; i < nsegs; i++) {
384 			tp->wqe_req.sgl[i].address = segs[i].ds_addr;
385 			tp->wqe_req.sgl[i].mem_key = gd->gpa_mkey;
386 			tp->wqe_req.sgl[i].size = segs[i].ds_len;
387 		}
388 		tp->wqe_req.num_sge = nsegs;
389 
390 		tx_info->mbuf = *m_head;
391 
392 		bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
393 		    BUS_DMASYNC_PREWRITE);
394 	}
395 
396 	return err;
397 }
398 
399 static inline void
400 mana_tx_unmap_mbuf(struct mana_port_context *apc,
401     struct mana_send_buf_info *tx_info)
402 {
403 	bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
404 	    BUS_DMASYNC_POSTWRITE);
405 	bus_dmamap_unload(apc->tx_buf_tag, tx_info->dma_map);
406 	if (tx_info->mbuf) {
407 		m_freem(tx_info->mbuf);
408 		tx_info->mbuf = NULL;
409 	}
410 }
411 
412 static inline int
413 mana_load_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
414     struct mana_recv_buf_oob *rx_oob, bool alloc_mbuf)
415 {
416 	bus_dma_segment_t segs[1];
417 	struct mbuf *mbuf;
418 	int nsegs, err;
419 	uint32_t mlen;
420 
421 	if (alloc_mbuf) {
422 		mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxq->datasize);
423 		if (unlikely(mbuf == NULL)) {
424 			mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
425 			if (unlikely(mbuf == NULL)) {
426 				return ENOMEM;
427 			}
428 			mlen = MCLBYTES;
429 		} else {
430 			mlen = rxq->datasize;
431 		}
432 
433 		mbuf->m_pkthdr.len = mbuf->m_len = mlen;
434 	} else {
435 		if (rx_oob->mbuf) {
436 			mbuf = rx_oob->mbuf;
437 			mlen = rx_oob->mbuf->m_pkthdr.len;
438 		} else {
439 			return ENOMEM;
440 		}
441 	}
442 
443 	err = bus_dmamap_load_mbuf_sg(apc->rx_buf_tag, rx_oob->dma_map,
444 	    mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
445 
446 	if (unlikely((err != 0) || (nsegs != 1))) {
447 		mana_warn(NULL, "Failed to map mbuf, error: %d, "
448 		    "nsegs: %d\n", err, nsegs);
449 		counter_u64_add(rxq->stats.dma_mapping_err, 1);
450 		goto error;
451 	}
452 
453 	bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
454 	    BUS_DMASYNC_PREREAD);
455 
456 	rx_oob->mbuf = mbuf;
457 	rx_oob->num_sge = 1;
458 	rx_oob->sgl[0].address = segs[0].ds_addr;
459 	rx_oob->sgl[0].size = mlen;
460 	rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
461 
462 	return 0;
463 
464 error:
465 	m_freem(mbuf);
466 	return EFAULT;
467 }
468 
469 static inline void
470 mana_unload_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
471     struct mana_recv_buf_oob *rx_oob, bool free_mbuf)
472 {
473 	bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
474 	    BUS_DMASYNC_POSTREAD);
475 	bus_dmamap_unload(apc->rx_buf_tag, rx_oob->dma_map);
476 
477 	if (free_mbuf && rx_oob->mbuf) {
478 		m_freem(rx_oob->mbuf);
479 		rx_oob->mbuf = NULL;
480 	}
481 }
482 
483 
484 /* Use couple mbuf PH_loc spaces for l3 and l4 protocal type */
485 #define MANA_L3_PROTO(_mbuf)	((_mbuf)->m_pkthdr.PH_loc.sixteen[0])
486 #define MANA_L4_PROTO(_mbuf)	((_mbuf)->m_pkthdr.PH_loc.sixteen[1])
487 
488 #define MANA_TXQ_FULL	(IFF_DRV_RUNNING | IFF_DRV_OACTIVE)
489 
490 static void
491 mana_xmit(struct mana_txq *txq)
492 {
493 	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
494 	struct mana_send_buf_info *tx_info;
495 	if_t ndev = txq->ndev;
496 	struct mbuf *mbuf;
497 	struct mana_port_context *apc = if_getsoftc(ndev);
498 	unsigned int tx_queue_size = apc->tx_queue_size;
499 	struct mana_port_stats *port_stats = &apc->port_stats;
500 	struct gdma_dev *gd = apc->ac->gdma_dev;
501 	uint64_t packets, bytes;
502 	uint16_t next_to_use;
503 	struct mana_tx_package pkg = {};
504 	struct mana_stats *tx_stats;
505 	struct gdma_queue *gdma_sq;
506 	struct mana_cq *cq;
507 	int err, len;
508 	bool is_tso;
509 
510 	gdma_sq = txq->gdma_sq;
511 	cq = &apc->tx_qp[txq->idx].tx_cq;
512 	tx_stats = &txq->stats;
513 
514 	packets = 0;
515 	bytes = 0;
516 	next_to_use = txq->next_to_use;
517 
518 	while ((mbuf = drbr_peek(ndev, txq->txq_br)) != NULL) {
519 		if (!apc->port_is_up ||
520 		    (if_getdrvflags(ndev) & MANA_TXQ_FULL) != IFF_DRV_RUNNING) {
521 			drbr_putback(ndev, txq->txq_br, mbuf);
522 			break;
523 		}
524 
525 		if (!mana_can_tx(gdma_sq)) {
526 			/* SQ is full. Set the IFF_DRV_OACTIVE flag */
527 			if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE, 0);
528 			counter_u64_add(tx_stats->stop, 1);
529 			uint64_t stops = counter_u64_fetch(tx_stats->stop);
530 			uint64_t wakeups = counter_u64_fetch(tx_stats->wakeup);
531 #define MANA_TXQ_STOP_THRESHOLD		50
532 			if (stops > MANA_TXQ_STOP_THRESHOLD && wakeups > 0 &&
533 			    stops > wakeups && txq->alt_txq_idx == txq->idx) {
534 				txq->alt_txq_idx =
535 				    (txq->idx + (stops / wakeups))
536 				    % apc->num_queues;
537 				counter_u64_add(tx_stats->alt_chg, 1);
538 			}
539 
540 			drbr_putback(ndev, txq->txq_br, mbuf);
541 
542 			taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task);
543 			break;
544 		}
545 
546 		tx_info = &txq->tx_buf_info[next_to_use];
547 
548 		memset(&pkg, 0, sizeof(struct mana_tx_package));
549 		pkg.wqe_req.sgl = pkg.sgl_array;
550 
551 		err = mana_tx_map_mbuf(apc, tx_info, &mbuf, &pkg, tx_stats);
552 		if (unlikely(err)) {
553 			mana_dbg(NULL,
554 			    "Failed to map tx mbuf, err %d\n", err);
555 
556 			counter_u64_add(tx_stats->dma_mapping_err, 1);
557 
558 			/* The mbuf is still there. Free it */
559 			m_freem(mbuf);
560 			/* Advance the drbr queue */
561 			drbr_advance(ndev, txq->txq_br);
562 			continue;
563 		}
564 
565 		pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
566 		pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
567 
568 		if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
569 			pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
570 			pkt_fmt = MANA_LONG_PKT_FMT;
571 		} else {
572 			pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
573 		}
574 
575 		pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
576 
577 		if (pkt_fmt == MANA_SHORT_PKT_FMT)
578 			pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
579 		else
580 			pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
581 
582 		pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
583 		pkg.wqe_req.flags = 0;
584 		pkg.wqe_req.client_data_unit = 0;
585 
586 		is_tso = false;
587 		if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
588 			is_tso =  true;
589 
590 			if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
591 				pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
592 			else
593 				pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
594 
595 			pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
596 			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
597 			pkg.tx_oob.s_oob.trans_off = mbuf->m_pkthdr.l3hlen;
598 
599 			pkg.wqe_req.client_data_unit = mbuf->m_pkthdr.tso_segsz;
600 			pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
601 		} else if (mbuf->m_pkthdr.csum_flags &
602 		    (CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP6_UDP | CSUM_IP6_TCP)) {
603 			if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP) {
604 				pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
605 				pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
606 			} else {
607 				pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
608 			}
609 
610 			if (MANA_L4_PROTO(mbuf) == IPPROTO_TCP) {
611 				pkg.tx_oob.s_oob.comp_tcp_csum = 1;
612 				pkg.tx_oob.s_oob.trans_off =
613 				    mbuf->m_pkthdr.l3hlen;
614 			} else {
615 				pkg.tx_oob.s_oob.comp_udp_csum = 1;
616 			}
617 		} else if (mbuf->m_pkthdr.csum_flags & CSUM_IP) {
618 			pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
619 			pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
620 		} else {
621 			if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
622 				pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
623 			else if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IPV6)
624 				pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
625 		}
626 
627 		len = mbuf->m_pkthdr.len;
628 
629 		err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
630 		    (struct gdma_posted_wqe_info *)&tx_info->wqe_inf);
631 		if (unlikely(err)) {
632 			/* Should not happen */
633 			if_printf(ndev, "Failed to post TX OOB: %d\n", err);
634 
635 			mana_tx_unmap_mbuf(apc, tx_info);
636 
637 			drbr_advance(ndev, txq->txq_br);
638 			continue;
639 		}
640 
641 		next_to_use =
642 		    (next_to_use + 1) % tx_queue_size;
643 
644 		(void)atomic_inc_return(&txq->pending_sends);
645 
646 		drbr_advance(ndev, txq->txq_br);
647 
648 		mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
649 
650 		packets++;
651 		bytes += len;
652 
653 		if (is_tso) {
654 			txq->tso_pkts++;
655 			txq->tso_bytes += len;
656 		}
657 	}
658 
659 	counter_enter();
660 	counter_u64_add_protected(tx_stats->packets, packets);
661 	counter_u64_add_protected(port_stats->tx_packets, packets);
662 	counter_u64_add_protected(tx_stats->bytes, bytes);
663 	counter_u64_add_protected(port_stats->tx_bytes, bytes);
664 	counter_exit();
665 
666 	txq->next_to_use = next_to_use;
667 }
668 
669 static void
670 mana_xmit_taskfunc(void *arg, int pending)
671 {
672 	struct mana_txq *txq = (struct mana_txq *)arg;
673 	if_t ndev = txq->ndev;
674 	struct mana_port_context *apc = if_getsoftc(ndev);
675 
676 	while (!drbr_empty(ndev, txq->txq_br) && apc->port_is_up &&
677 	    (if_getdrvflags(ndev) & MANA_TXQ_FULL) == IFF_DRV_RUNNING) {
678 		mtx_lock(&txq->txq_mtx);
679 		mana_xmit(txq);
680 		mtx_unlock(&txq->txq_mtx);
681 	}
682 }
683 
684 #define PULLUP_HDR(m, len)				\
685 do {							\
686 	if (unlikely((m)->m_len < (len))) {		\
687 		(m) = m_pullup((m), (len));		\
688 		if ((m) == NULL)			\
689 			return (NULL);			\
690 	}						\
691 } while (0)
692 
693 /*
694  * If this function failed, the mbuf would be freed.
695  */
696 static inline struct mbuf *
697 mana_tso_fixup(struct mbuf *mbuf)
698 {
699 	struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
700 	struct tcphdr *th;
701 	uint16_t etype;
702 	int ehlen;
703 
704 	if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
705 		etype = ntohs(eh->evl_proto);
706 		ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
707 	} else {
708 		etype = ntohs(eh->evl_encap_proto);
709 		ehlen = ETHER_HDR_LEN;
710 	}
711 
712 	if (etype == ETHERTYPE_IP) {
713 		struct ip *ip;
714 		int iphlen;
715 
716 		PULLUP_HDR(mbuf, ehlen + sizeof(*ip));
717 		ip = mtodo(mbuf, ehlen);
718 		iphlen = ip->ip_hl << 2;
719 		mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
720 
721 		PULLUP_HDR(mbuf, ehlen + iphlen + sizeof(*th));
722 		th = mtodo(mbuf, ehlen + iphlen);
723 
724 		ip->ip_len = 0;
725 		ip->ip_sum = 0;
726 		th->th_sum = in_pseudo(ip->ip_src.s_addr,
727 		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
728 	} else if (etype == ETHERTYPE_IPV6) {
729 		struct ip6_hdr *ip6;
730 
731 		PULLUP_HDR(mbuf, ehlen + sizeof(*ip6) + sizeof(*th));
732 		ip6 = mtodo(mbuf, ehlen);
733 		if (ip6->ip6_nxt != IPPROTO_TCP) {
734 			/* Realy something wrong, just return */
735 			mana_dbg(NULL, "TSO mbuf not TCP, freed.\n");
736 			m_freem(mbuf);
737 			return NULL;
738 		}
739 		mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
740 
741 		th = mtodo(mbuf, ehlen + sizeof(*ip6));
742 
743 		ip6->ip6_plen = 0;
744 		th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
745 	} else {
746 		/* CSUM_TSO is set but not IP protocol. */
747 		mana_warn(NULL, "TSO mbuf not right, freed.\n");
748 		m_freem(mbuf);
749 		return NULL;
750 	}
751 
752 	MANA_L3_PROTO(mbuf) = etype;
753 
754 	return (mbuf);
755 }
756 
757 /*
758  * If this function failed, the mbuf would be freed.
759  */
760 static inline struct mbuf *
761 mana_mbuf_csum_check(struct mbuf *mbuf)
762 {
763 	struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
764 	struct mbuf *mbuf_next;
765 	uint16_t etype;
766 	int offset;
767 	int ehlen;
768 
769 	if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
770 		etype = ntohs(eh->evl_proto);
771 		ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
772 	} else {
773 		etype = ntohs(eh->evl_encap_proto);
774 		ehlen = ETHER_HDR_LEN;
775 	}
776 
777 	mbuf_next = m_getptr(mbuf, ehlen, &offset);
778 
779 	MANA_L4_PROTO(mbuf) = 0;
780 	if (etype == ETHERTYPE_IP) {
781 		const struct ip *ip;
782 		int iphlen;
783 
784 		ip = (struct ip *)(mtodo(mbuf_next, offset));
785 		iphlen = ip->ip_hl << 2;
786 		mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
787 
788 		MANA_L4_PROTO(mbuf) = ip->ip_p;
789 	} else if (etype == ETHERTYPE_IPV6) {
790 		const struct ip6_hdr *ip6;
791 
792 		ip6 = (struct ip6_hdr *)(mtodo(mbuf_next, offset));
793 		mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
794 
795 		MANA_L4_PROTO(mbuf) = ip6->ip6_nxt;
796 	} else {
797 		MANA_L4_PROTO(mbuf) = 0;
798 	}
799 
800 	MANA_L3_PROTO(mbuf) = etype;
801 
802 	return (mbuf);
803 }
804 
805 static int
806 mana_start_xmit(if_t ifp, struct mbuf *m)
807 {
808 	struct mana_port_context *apc = if_getsoftc(ifp);
809 	struct mana_txq *txq;
810 	int is_drbr_empty;
811 	uint16_t txq_id;
812 	int err;
813 
814 	if (unlikely((!apc->port_is_up) ||
815 	    (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
816 		return ENODEV;
817 
818 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
819 		m = mana_tso_fixup(m);
820 		if (unlikely(m == NULL)) {
821 			counter_enter();
822 			counter_u64_add_protected(apc->port_stats.tx_drops, 1);
823 			counter_exit();
824 			return EIO;
825 		}
826 	} else {
827 		m = mana_mbuf_csum_check(m);
828 		if (unlikely(m == NULL)) {
829 			counter_enter();
830 			counter_u64_add_protected(apc->port_stats.tx_drops, 1);
831 			counter_exit();
832 			return EIO;
833 		}
834 	}
835 
836 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
837 		uint32_t hash = m->m_pkthdr.flowid;
838 		txq_id = apc->indir_table[(hash) & MANA_INDIRECT_TABLE_MASK] %
839 		    apc->num_queues;
840 	} else {
841 		txq_id = m->m_pkthdr.flowid % apc->num_queues;
842 	}
843 
844 	if (apc->enable_tx_altq)
845 		txq_id = apc->tx_qp[txq_id].txq.alt_txq_idx;
846 
847 	txq = &apc->tx_qp[txq_id].txq;
848 
849 	is_drbr_empty = drbr_empty(ifp, txq->txq_br);
850 	err = drbr_enqueue(ifp, txq->txq_br, m);
851 	if (unlikely(err)) {
852 		mana_warn(NULL, "txq %u failed to enqueue: %d\n",
853 		    txq_id, err);
854 		taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
855 		return err;
856 	}
857 
858 	if (is_drbr_empty && mtx_trylock(&txq->txq_mtx)) {
859 		mana_xmit(txq);
860 		mtx_unlock(&txq->txq_mtx);
861 	} else {
862 		taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
863 	}
864 
865 	return 0;
866 }
867 
868 static void
869 mana_cleanup_port_context(struct mana_port_context *apc)
870 {
871 	bus_dma_tag_destroy(apc->tx_buf_tag);
872 	bus_dma_tag_destroy(apc->rx_buf_tag);
873 	apc->rx_buf_tag = NULL;
874 
875 	free(apc->rxqs, M_DEVBUF);
876 	apc->rxqs = NULL;
877 
878 	mana_free_counters((counter_u64_t *)&apc->port_stats,
879 	    sizeof(struct mana_port_stats));
880 }
881 
882 static int
883 mana_init_port_context(struct mana_port_context *apc)
884 {
885 	device_t dev = apc->ac->gdma_dev->gdma_context->dev;
886 	uint32_t tso_maxsize;
887 	int err;
888 
889 	tso_maxsize = MANA_TSO_MAX_SZ;
890 
891 	/* Create DMA tag for tx bufs */
892 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
893 	    1, 0,			/* alignment, boundary	*/
894 	    BUS_SPACE_MAXADDR,		/* lowaddr		*/
895 	    BUS_SPACE_MAXADDR,		/* highaddr		*/
896 	    NULL, NULL,			/* filter, filterarg	*/
897 	    tso_maxsize,		/* maxsize		*/
898 	    MAX_MBUF_FRAGS,		/* nsegments		*/
899 	    tso_maxsize,		/* maxsegsize		*/
900 	    0,				/* flags		*/
901 	    NULL, NULL,			/* lockfunc, lockfuncarg*/
902 	    &apc->tx_buf_tag);
903 	if (unlikely(err)) {
904 		device_printf(dev, "Feiled to create TX DMA tag\n");
905 		return err;
906 	}
907 
908 	/* Create DMA tag for rx bufs */
909 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
910 	    64, 0,			/* alignment, boundary	*/
911 	    BUS_SPACE_MAXADDR,		/* lowaddr		*/
912 	    BUS_SPACE_MAXADDR,		/* highaddr		*/
913 	    NULL, NULL,			/* filter, filterarg	*/
914 	    MJUMPAGESIZE,		/* maxsize		*/
915 	    1,				/* nsegments		*/
916 	    MJUMPAGESIZE,		/* maxsegsize		*/
917 	    0,				/* flags		*/
918 	    NULL, NULL,			/* lockfunc, lockfuncarg*/
919 	    &apc->rx_buf_tag);
920 	if (unlikely(err)) {
921 		device_printf(dev, "Feiled to create RX DMA tag\n");
922 		return err;
923 	}
924 
925 	apc->rxqs = mallocarray(apc->num_queues, sizeof(struct mana_rxq *),
926 	    M_DEVBUF, M_WAITOK | M_ZERO);
927 
928 	return 0;
929 }
930 
931 static int
932 mana_send_request(struct mana_context *ac, void *in_buf,
933     uint32_t in_len, void *out_buf, uint32_t out_len)
934 {
935 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
936 	struct gdma_resp_hdr *resp = out_buf;
937 	struct gdma_req_hdr *req = in_buf;
938 	device_t dev = gc->dev;
939 	static atomic_t activity_id;
940 	int err;
941 
942 	req->dev_id = gc->mana.dev_id;
943 	req->activity_id = atomic_inc_return(&activity_id);
944 
945 	mana_dbg(NULL, "activity_id  = %u\n", activity_id);
946 
947 	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
948 	    out_buf);
949 	if (err || resp->status) {
950 		device_printf(dev, "Failed to send mana message: %d, 0x%x\n",
951 			err, resp->status);
952 		return err ? err : EPROTO;
953 	}
954 
955 	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
956 	    req->activity_id != resp->activity_id) {
957 		device_printf(dev,
958 		    "Unexpected mana message response: %x,%x,%x,%x\n",
959 		    req->dev_id.as_uint32, resp->dev_id.as_uint32,
960 		    req->activity_id, resp->activity_id);
961 		return EPROTO;
962 	}
963 
964 	return 0;
965 }
966 
967 static int
968 mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
969     const enum mana_command_code expected_code,
970     const uint32_t min_size)
971 {
972 	if (resp_hdr->response.msg_type != expected_code)
973 		return EPROTO;
974 
975 	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
976 		return EPROTO;
977 
978 	if (resp_hdr->response.msg_size < min_size)
979 		return EPROTO;
980 
981 	return 0;
982 }
983 
984 static int
985 mana_query_device_cfg(struct mana_context *ac, uint32_t proto_major_ver,
986     uint32_t proto_minor_ver, uint32_t proto_micro_ver,
987     uint16_t *max_num_vports)
988 {
989 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
990 	struct mana_query_device_cfg_resp resp = {};
991 	struct mana_query_device_cfg_req req = {};
992 	device_t dev = gc->dev;
993 	int err = 0;
994 
995 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
996 	    sizeof(req), sizeof(resp));
997 	req.proto_major_ver = proto_major_ver;
998 	req.proto_minor_ver = proto_minor_ver;
999 	req.proto_micro_ver = proto_micro_ver;
1000 
1001 	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
1002 	if (err) {
1003 		device_printf(dev, "Failed to query config: %d", err);
1004 		return err;
1005 	}
1006 
1007 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
1008 	    sizeof(resp));
1009 	if (err || resp.hdr.status) {
1010 		device_printf(dev, "Invalid query result: %d, 0x%x\n", err,
1011 		    resp.hdr.status);
1012 		if (!err)
1013 			err = EPROTO;
1014 		return err;
1015 	}
1016 
1017 	*max_num_vports = resp.max_num_vports;
1018 
1019 	mana_dbg(NULL, "mana max_num_vports from device = %d\n",
1020 	    *max_num_vports);
1021 
1022 	return 0;
1023 }
1024 
1025 static int
1026 mana_query_vport_cfg(struct mana_port_context *apc, uint32_t vport_index,
1027     uint32_t *max_sq, uint32_t *max_rq, uint32_t *num_indir_entry)
1028 {
1029 	struct mana_query_vport_cfg_resp resp = {};
1030 	struct mana_query_vport_cfg_req req = {};
1031 	int err;
1032 
1033 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
1034 	    sizeof(req), sizeof(resp));
1035 
1036 	req.vport_index = vport_index;
1037 
1038 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1039 	    sizeof(resp));
1040 	if (err)
1041 		return err;
1042 
1043 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
1044 	    sizeof(resp));
1045 	if (err)
1046 		return err;
1047 
1048 	if (resp.hdr.status)
1049 		return EPROTO;
1050 
1051 	*max_sq = resp.max_num_sq;
1052 	*max_rq = resp.max_num_rq;
1053 	*num_indir_entry = resp.num_indirection_ent;
1054 
1055 	apc->port_handle = resp.vport;
1056 	memcpy(apc->mac_addr, resp.mac_addr, ETHER_ADDR_LEN);
1057 
1058 	return 0;
1059 }
1060 
1061 void
1062 mana_uncfg_vport(struct mana_port_context *apc)
1063 {
1064 	apc->vport_use_count--;
1065 	if (apc->vport_use_count < 0) {
1066 		mana_err(NULL,
1067 		    "WARNING: vport_use_count less than 0: %u\n",
1068 		    apc->vport_use_count);
1069 	}
1070 }
1071 
1072 int
1073 mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
1074     uint32_t doorbell_pg_id)
1075 {
1076 	struct mana_config_vport_resp resp = {};
1077 	struct mana_config_vport_req req = {};
1078 	int err;
1079 
1080 	/* This function is used to program the Ethernet port in the hardware
1081 	 * table. It can be called from the Ethernet driver or the RDMA driver.
1082 	 *
1083 	 * For Ethernet usage, the hardware supports only one active user on a
1084 	 * physical port. The driver checks on the port usage before programming
1085 	 * the hardware when creating the RAW QP (RDMA driver) or exposing the
1086 	 * device to kernel NET layer (Ethernet driver).
1087 	 *
1088 	 * Because the RDMA driver doesn't know in advance which QP type the
1089 	 * user will create, it exposes the device with all its ports. The user
1090 	 * may not be able to create RAW QP on a port if this port is already
1091 	 * in used by the Ethernet driver from the kernel.
1092 	 *
1093 	 * This physical port limitation only applies to the RAW QP. For RC QP,
1094 	 * the hardware doesn't have this limitation. The user can create RC
1095 	 * QPs on a physical port up to the hardware limits independent of the
1096 	 * Ethernet usage on the same port.
1097 	 */
1098 	if (apc->vport_use_count > 0) {
1099 		return EBUSY;
1100 	}
1101 	apc->vport_use_count++;
1102 
1103 	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
1104 	    sizeof(req), sizeof(resp));
1105 	req.vport = apc->port_handle;
1106 	req.pdid = protection_dom_id;
1107 	req.doorbell_pageid = doorbell_pg_id;
1108 
1109 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1110 	    sizeof(resp));
1111 	if (err) {
1112 		if_printf(apc->ndev, "Failed to configure vPort: %d\n", err);
1113 		goto out;
1114 	}
1115 
1116 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1117 	    sizeof(resp));
1118 	if (err || resp.hdr.status) {
1119 		if_printf(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1120 		    err, resp.hdr.status);
1121 		if (!err)
1122 			err = EPROTO;
1123 
1124 		goto out;
1125 	}
1126 
1127 	apc->tx_shortform_allowed = resp.short_form_allowed;
1128 	apc->tx_vp_offset = resp.tx_vport_offset;
1129 
1130 	if_printf(apc->ndev, "Configured vPort %ju PD %u DB %u\n",
1131 	    apc->port_handle, protection_dom_id, doorbell_pg_id);
1132 
1133 out:
1134 	if (err)
1135 		mana_uncfg_vport(apc);
1136 
1137 	return err;
1138 }
1139 
1140 static int
1141 mana_cfg_vport_steering(struct mana_port_context *apc,
1142     enum TRI_STATE rx,
1143     bool update_default_rxobj, bool update_key,
1144     bool update_tab)
1145 {
1146 	uint16_t num_entries = MANA_INDIRECT_TABLE_SIZE;
1147 	struct mana_cfg_rx_steer_req *req = NULL;
1148 	struct mana_cfg_rx_steer_resp resp = {};
1149 	if_t ndev = apc->ndev;
1150 	mana_handle_t *req_indir_tab;
1151 	uint32_t req_buf_size;
1152 	int err;
1153 
1154 	req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
1155 	req = malloc(req_buf_size, M_DEVBUF, M_WAITOK | M_ZERO);
1156 
1157 	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1158 	    sizeof(resp));
1159 
1160 	req->vport = apc->port_handle;
1161 	req->num_indir_entries = num_entries;
1162 	req->indir_tab_offset = sizeof(*req);
1163 	req->rx_enable = rx;
1164 	req->rss_enable = apc->rss_state;
1165 	req->update_default_rxobj = update_default_rxobj;
1166 	req->update_hashkey = update_key;
1167 	req->update_indir_tab = update_tab;
1168 	req->default_rxobj = apc->default_rxobj;
1169 
1170 	if (update_key)
1171 		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1172 
1173 	if (update_tab) {
1174 		req_indir_tab = (mana_handle_t *)(req + 1);
1175 		memcpy(req_indir_tab, apc->rxobj_table,
1176 		       req->num_indir_entries * sizeof(mana_handle_t));
1177 	}
1178 
1179 	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1180 	    sizeof(resp));
1181 	if (err) {
1182 		if_printf(ndev, "Failed to configure vPort RX: %d\n", err);
1183 		goto out;
1184 	}
1185 
1186 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1187 	    sizeof(resp));
1188 	if (err) {
1189 		if_printf(ndev, "vPort RX configuration failed: %d\n", err);
1190 		goto out;
1191 	}
1192 
1193 	if (resp.hdr.status) {
1194 		if_printf(ndev, "vPort RX configuration failed: 0x%x\n",
1195 		    resp.hdr.status);
1196 		err = EPROTO;
1197 	}
1198 
1199 	if_printf(ndev, "Configured steering vPort %ju entries %u\n",
1200 	    apc->port_handle, num_entries);
1201 
1202 out:
1203 	free(req, M_DEVBUF);
1204 	return err;
1205 }
1206 
1207 int
1208 mana_create_wq_obj(struct mana_port_context *apc,
1209     mana_handle_t vport,
1210     uint32_t wq_type, struct mana_obj_spec *wq_spec,
1211     struct mana_obj_spec *cq_spec,
1212     mana_handle_t *wq_obj)
1213 {
1214 	struct mana_create_wqobj_resp resp = {};
1215 	struct mana_create_wqobj_req req = {};
1216 	if_t ndev = apc->ndev;
1217 	int err;
1218 
1219 	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1220 	    sizeof(req), sizeof(resp));
1221 	req.vport = vport;
1222 	req.wq_type = wq_type;
1223 	req.wq_gdma_region = wq_spec->gdma_region;
1224 	req.cq_gdma_region = cq_spec->gdma_region;
1225 	req.wq_size = wq_spec->queue_size;
1226 	req.cq_size = cq_spec->queue_size;
1227 	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1228 	req.cq_parent_qid = cq_spec->attached_eq;
1229 
1230 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1231 	    sizeof(resp));
1232 	if (err) {
1233 		if_printf(ndev, "Failed to create WQ object: %d\n", err);
1234 		goto out;
1235 	}
1236 
1237 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1238 	    sizeof(resp));
1239 	if (err || resp.hdr.status) {
1240 		if_printf(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1241 		    resp.hdr.status);
1242 		if (!err)
1243 			err = EPROTO;
1244 		goto out;
1245 	}
1246 
1247 	if (resp.wq_obj == INVALID_MANA_HANDLE) {
1248 		if_printf(ndev, "Got an invalid WQ object handle\n");
1249 		err = EPROTO;
1250 		goto out;
1251 	}
1252 
1253 	*wq_obj = resp.wq_obj;
1254 	wq_spec->queue_index = resp.wq_id;
1255 	cq_spec->queue_index = resp.cq_id;
1256 
1257 	return 0;
1258 out:
1259 	return err;
1260 }
1261 
1262 void
1263 mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type,
1264     mana_handle_t wq_obj)
1265 {
1266 	struct mana_destroy_wqobj_resp resp = {};
1267 	struct mana_destroy_wqobj_req req = {};
1268 	if_t ndev = apc->ndev;
1269 	int err;
1270 
1271 	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1272 	    sizeof(req), sizeof(resp));
1273 	req.wq_type = wq_type;
1274 	req.wq_obj_handle = wq_obj;
1275 
1276 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1277 	    sizeof(resp));
1278 	if (err) {
1279 		if_printf(ndev, "Failed to destroy WQ object: %d\n", err);
1280 		return;
1281 	}
1282 
1283 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1284 	    sizeof(resp));
1285 	if (err || resp.hdr.status)
1286 		if_printf(ndev, "Failed to destroy WQ object: %d, 0x%x\n",
1287 		    err, resp.hdr.status);
1288 }
1289 
1290 static void
1291 mana_destroy_eq(struct mana_context *ac)
1292 {
1293 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
1294 	struct gdma_queue *eq;
1295 	int i;
1296 
1297 	if (!ac->eqs)
1298 		return;
1299 
1300 	for (i = 0; i < gc->max_num_queues; i++) {
1301 		eq = ac->eqs[i].eq;
1302 		if (!eq)
1303 			continue;
1304 
1305 		mana_gd_destroy_queue(gc, eq);
1306 	}
1307 
1308 	free(ac->eqs, M_DEVBUF);
1309 	ac->eqs = NULL;
1310 }
1311 
1312 static int
1313 mana_create_eq(struct mana_context *ac)
1314 {
1315 	struct gdma_dev *gd = ac->gdma_dev;
1316 	struct gdma_context *gc = gd->gdma_context;
1317 	struct gdma_queue_spec spec = {};
1318 	int err;
1319 	int i;
1320 
1321 	ac->eqs = mallocarray(gc->max_num_queues, sizeof(struct mana_eq),
1322 	    M_DEVBUF, M_WAITOK | M_ZERO);
1323 
1324 	spec.type = GDMA_EQ;
1325 	spec.monitor_avl_buf = false;
1326 	spec.queue_size = EQ_SIZE;
1327 	spec.eq.callback = NULL;
1328 	spec.eq.context = ac->eqs;
1329 	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1330 
1331 	for (i = 0; i < gc->max_num_queues; i++) {
1332 		err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
1333 		if (err)
1334 			goto out;
1335 	}
1336 
1337 	return 0;
1338 out:
1339 	mana_destroy_eq(ac);
1340 	return err;
1341 }
1342 
1343 static int
1344 mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1345 {
1346 	struct mana_fence_rq_resp resp = {};
1347 	struct mana_fence_rq_req req = {};
1348 	int err;
1349 
1350 	init_completion(&rxq->fence_event);
1351 
1352 	mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
1353 	    sizeof(req), sizeof(resp));
1354 	req.wq_obj_handle = rxq->rxobj;
1355 
1356 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1357 	    sizeof(resp));
1358 	if (err) {
1359 		if_printf(apc->ndev, "Failed to fence RQ %u: %d\n",
1360 		    rxq->rxq_idx, err);
1361 		return err;
1362 	}
1363 
1364 	err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
1365 	if (err || resp.hdr.status) {
1366 		if_printf(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
1367 		    rxq->rxq_idx, err, resp.hdr.status);
1368 		if (!err)
1369 			err = EPROTO;
1370 
1371 		return err;
1372 	}
1373 
1374 	if (wait_for_completion_timeout(&rxq->fence_event, 10 * hz)) {
1375 		if_printf(apc->ndev, "Failed to fence RQ %u: timed out\n",
1376 		    rxq->rxq_idx);
1377 		return ETIMEDOUT;
1378         }
1379 
1380 	return 0;
1381 }
1382 
1383 static void
1384 mana_fence_rqs(struct mana_port_context *apc)
1385 {
1386 	unsigned int rxq_idx;
1387 	struct mana_rxq *rxq;
1388 	int err;
1389 
1390 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1391 		rxq = apc->rxqs[rxq_idx];
1392 		err = mana_fence_rq(apc, rxq);
1393 
1394 		/* In case of any error, use sleep instead. */
1395 		if (err)
1396 			gdma_msleep(100);
1397 	}
1398 }
1399 
1400 static int
1401 mana_move_wq_tail(struct gdma_queue *wq, uint32_t num_units)
1402 {
1403 	uint32_t used_space_old;
1404 	uint32_t used_space_new;
1405 
1406 	used_space_old = wq->head - wq->tail;
1407 	used_space_new = wq->head - (wq->tail + num_units);
1408 
1409 	if (used_space_new > used_space_old) {
1410 		mana_err(NULL,
1411 		    "WARNING: new used space %u greater than old one %u\n",
1412 		    used_space_new, used_space_old);
1413 		return ERANGE;
1414 	}
1415 
1416 	wq->tail += num_units;
1417 	return 0;
1418 }
1419 
1420 static void
1421 mana_poll_tx_cq(struct mana_cq *cq)
1422 {
1423 	struct gdma_comp *completions = cq->gdma_comp_buf;
1424 	struct gdma_posted_wqe_info *wqe_info;
1425 	struct mana_send_buf_info *tx_info;
1426 	unsigned int pkt_transmitted = 0;
1427 	unsigned int wqe_unit_cnt = 0;
1428 	struct mana_txq *txq = cq->txq;
1429 	struct mana_port_context *apc;
1430 	unsigned int tx_queue_size;
1431 	uint16_t next_to_complete;
1432 	if_t ndev;
1433 	int comp_read;
1434 	int txq_idx = txq->idx;
1435 	int i;
1436 	int sa_drop = 0;
1437 
1438 	struct gdma_queue *gdma_wq;
1439 	unsigned int avail_space;
1440 	bool txq_full = false;
1441 
1442 	ndev = txq->ndev;
1443 	apc = if_getsoftc(ndev);
1444 	tx_queue_size = apc->tx_queue_size;
1445 
1446 	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1447 	    CQE_POLLING_BUFFER);
1448 
1449 	if (comp_read < 1)
1450 		return;
1451 
1452 	next_to_complete = txq->next_to_complete;
1453 
1454 	for (i = 0; i < comp_read; i++) {
1455 		struct mana_tx_comp_oob *cqe_oob;
1456 
1457 		if (!completions[i].is_sq) {
1458 			mana_err(NULL, "WARNING: Not for SQ\n");
1459 			return;
1460 		}
1461 
1462 		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1463 		if (cqe_oob->cqe_hdr.client_type !=
1464 				 MANA_CQE_COMPLETION) {
1465 			mana_err(NULL,
1466 			    "WARNING: Invalid CQE client type %u\n",
1467 			    cqe_oob->cqe_hdr.client_type);
1468 			return;
1469 		}
1470 
1471 		switch (cqe_oob->cqe_hdr.cqe_type) {
1472 		case CQE_TX_OKAY:
1473 			break;
1474 
1475 		case CQE_TX_SA_DROP:
1476 		case CQE_TX_MTU_DROP:
1477 		case CQE_TX_INVALID_OOB:
1478 		case CQE_TX_INVALID_ETH_TYPE:
1479 		case CQE_TX_HDR_PROCESSING_ERROR:
1480 		case CQE_TX_VF_DISABLED:
1481 		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1482 		case CQE_TX_VPORT_DISABLED:
1483 		case CQE_TX_VLAN_TAGGING_VIOLATION:
1484 			sa_drop ++;
1485 			mana_dbg(NULL,
1486 			    "TX: txq %d CQE error %d, ntc = %d, "
1487 			    "pending sends = %d: err ignored.\n",
1488 			    txq_idx, cqe_oob->cqe_hdr.cqe_type,
1489 			    next_to_complete, txq->pending_sends);
1490 			counter_u64_add(txq->stats.cqe_err, 1);
1491 			break;
1492 
1493 		default:
1494 			/* If the CQE type is unknown, log a debug msg,
1495 			 * and still free the mbuf, etc.
1496 			 */
1497 			mana_dbg(NULL,
1498 			    "ERROR: TX: Unknown CQE type %d\n",
1499 			    cqe_oob->cqe_hdr.cqe_type);
1500 			counter_u64_add(txq->stats.cqe_unknown_type, 1);
1501 			break;
1502 		}
1503 		if (txq->gdma_txq_id != completions[i].wq_num) {
1504 			mana_dbg(NULL,
1505 			    "txq gdma id not match completion wq num: "
1506 			    "%d != %d\n",
1507 			    txq->gdma_txq_id, completions[i].wq_num);
1508 			break;
1509 		}
1510 
1511 		tx_info = &txq->tx_buf_info[next_to_complete];
1512 		if (!tx_info->mbuf) {
1513 			mana_err(NULL,
1514 			    "WARNING: txq %d Empty mbuf on tx_info: %u, "
1515 			    "ntu = %u, pending_sends = %d, "
1516 			    "transmitted = %d, sa_drop = %d, i = %d, comp_read = %d\n",
1517 			    txq_idx, next_to_complete, txq->next_to_use,
1518 			    txq->pending_sends, pkt_transmitted, sa_drop,
1519 			    i, comp_read);
1520 			break;
1521 		}
1522 
1523 		wqe_info = &tx_info->wqe_inf;
1524 		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1525 
1526 		mana_tx_unmap_mbuf(apc, tx_info);
1527 		mb();
1528 
1529 		next_to_complete =
1530 		    (next_to_complete + 1) % tx_queue_size;
1531 
1532 		pkt_transmitted++;
1533 	}
1534 
1535 	txq->next_to_complete = next_to_complete;
1536 
1537 	if (wqe_unit_cnt == 0) {
1538 		mana_err(NULL,
1539 		    "WARNING: TX ring not proceeding!\n");
1540 		return;
1541 	}
1542 
1543 	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1544 
1545 	/* Ensure tail updated before checking q stop */
1546 	wmb();
1547 
1548 	gdma_wq = txq->gdma_sq;
1549 	avail_space = mana_gd_wq_avail_space(gdma_wq);
1550 
1551 
1552 	if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL) {
1553 		txq_full = true;
1554 	}
1555 
1556 	/* Ensure checking txq_full before apc->port_is_up. */
1557 	rmb();
1558 
1559 	if (txq_full && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1560 		/* Grab the txq lock and re-test */
1561 		mtx_lock(&txq->txq_mtx);
1562 		avail_space = mana_gd_wq_avail_space(gdma_wq);
1563 
1564 		if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL &&
1565 		    apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1566 			/* Clear the Q full flag */
1567 			if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING,
1568 			    IFF_DRV_OACTIVE);
1569 			counter_u64_add(txq->stats.wakeup, 1);
1570 			if (txq->alt_txq_idx != txq->idx) {
1571 				uint64_t stops = counter_u64_fetch(txq->stats.stop);
1572 				uint64_t wakeups = counter_u64_fetch(txq->stats.wakeup);
1573 				/* Reset alt_txq_idx back if it is not overloaded */
1574 				if (stops < wakeups) {
1575 					txq->alt_txq_idx = txq->idx;
1576 					counter_u64_add(txq->stats.alt_reset, 1);
1577 				}
1578 			}
1579 			rmb();
1580 			/* Schedule a tx enqueue task */
1581 			taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
1582 		}
1583 		mtx_unlock(&txq->txq_mtx);
1584 	}
1585 
1586 	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1587 		mana_err(NULL,
1588 		    "WARNING: TX %d pending_sends error: %d\n",
1589 		    txq->idx, txq->pending_sends);
1590 
1591 	cq->work_done = pkt_transmitted;
1592 }
1593 
1594 static void
1595 mana_post_pkt_rxq(struct mana_rxq *rxq)
1596 {
1597 	struct mana_recv_buf_oob *recv_buf_oob;
1598 	uint32_t curr_index;
1599 	int err;
1600 
1601 	curr_index = rxq->buf_index++;
1602 	if (rxq->buf_index == rxq->num_rx_buf)
1603 		rxq->buf_index = 0;
1604 
1605 	recv_buf_oob = &rxq->rx_oobs[curr_index];
1606 
1607 	err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1608 	    &recv_buf_oob->wqe_inf);
1609 	if (err) {
1610 		mana_err(NULL, "WARNING: rxq %u post pkt err %d\n",
1611 		    rxq->rxq_idx, err);
1612 		return;
1613 	}
1614 
1615 	if (recv_buf_oob->wqe_inf.wqe_size_in_bu != 1) {
1616 		mana_err(NULL, "WARNING: rxq %u wqe_size_in_bu %u\n",
1617 		    rxq->rxq_idx, recv_buf_oob->wqe_inf.wqe_size_in_bu);
1618 	}
1619 }
1620 
1621 static void
1622 mana_rx_mbuf(struct mbuf *mbuf, struct mana_rxcomp_oob *cqe,
1623     struct mana_rxq *rxq)
1624 {
1625 	struct mana_stats *rx_stats = &rxq->stats;
1626 	if_t ndev = rxq->ndev;
1627 	uint32_t pkt_len = cqe->ppi[0].pkt_len;
1628 	uint16_t rxq_idx = rxq->rxq_idx;
1629 	struct mana_port_context *apc;
1630 	bool do_lro = false;
1631 	bool do_if_input;
1632 
1633 	apc = if_getsoftc(ndev);
1634 	rxq->rx_cq.work_done++;
1635 
1636 	if (!mbuf) {
1637 		return;
1638 	}
1639 
1640 	mbuf->m_flags |= M_PKTHDR;
1641 	mbuf->m_pkthdr.len = pkt_len;
1642 	mbuf->m_len = pkt_len;
1643 	mbuf->m_pkthdr.rcvif = ndev;
1644 
1645 	if ((if_getcapenable(ndev) & IFCAP_RXCSUM ||
1646 	    if_getcapenable(ndev) & IFCAP_RXCSUM_IPV6) &&
1647 	    (cqe->rx_iphdr_csum_succeed)) {
1648 		mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1649 		mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1650 		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) {
1651 			mbuf->m_pkthdr.csum_flags |=
1652 			    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1653 			mbuf->m_pkthdr.csum_data = 0xffff;
1654 
1655 			if (cqe->rx_tcp_csum_succeed)
1656 				do_lro = true;
1657 		}
1658 	}
1659 
1660 	if (cqe->rx_hashtype != 0) {
1661 		mbuf->m_pkthdr.flowid = cqe->ppi[0].pkt_hash;
1662 
1663 		uint16_t hashtype = cqe->rx_hashtype;
1664 		if (hashtype & NDIS_HASH_IPV4_MASK) {
1665 			hashtype &= NDIS_HASH_IPV4_MASK;
1666 			switch (hashtype) {
1667 			case NDIS_HASH_TCP_IPV4:
1668 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
1669 				break;
1670 			case NDIS_HASH_UDP_IPV4:
1671 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
1672 				break;
1673 			default:
1674 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
1675 			}
1676 		} else if (hashtype & NDIS_HASH_IPV6_MASK) {
1677 			hashtype &= NDIS_HASH_IPV6_MASK;
1678 			switch (hashtype) {
1679 			case NDIS_HASH_TCP_IPV6:
1680 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
1681 				break;
1682 			case NDIS_HASH_TCP_IPV6_EX:
1683 				M_HASHTYPE_SET(mbuf,
1684 				    M_HASHTYPE_RSS_TCP_IPV6_EX);
1685 				break;
1686 			case NDIS_HASH_UDP_IPV6:
1687 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
1688 				break;
1689 			case NDIS_HASH_UDP_IPV6_EX:
1690 				M_HASHTYPE_SET(mbuf,
1691 				    M_HASHTYPE_RSS_UDP_IPV6_EX);
1692 				break;
1693 			default:
1694 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
1695 			}
1696 		} else {
1697 			M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
1698 		}
1699 	} else {
1700 		mbuf->m_pkthdr.flowid = rxq_idx;
1701 		M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
1702 	}
1703 
1704 	do_if_input = true;
1705 	if ((if_getcapenable(ndev) & IFCAP_LRO) && do_lro) {
1706 		rxq->lro_tried++;
1707 		if (rxq->lro.lro_cnt != 0 &&
1708 		    tcp_lro_rx(&rxq->lro, mbuf, 0) == 0)
1709 			do_if_input = false;
1710 		else
1711 			rxq->lro_failed++;
1712 	}
1713 	if (do_if_input) {
1714 		if_input(ndev, mbuf);
1715 	}
1716 
1717 	counter_enter();
1718 	counter_u64_add_protected(rx_stats->packets, 1);
1719 	counter_u64_add_protected(apc->port_stats.rx_packets, 1);
1720 	counter_u64_add_protected(rx_stats->bytes, pkt_len);
1721 	counter_u64_add_protected(apc->port_stats.rx_bytes, pkt_len);
1722 	counter_exit();
1723 }
1724 
1725 static void
1726 mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1727     struct gdma_comp *cqe)
1728 {
1729 	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1730 	struct mana_recv_buf_oob *rxbuf_oob;
1731 	if_t ndev = rxq->ndev;
1732 	struct mana_port_context *apc;
1733 	struct mbuf *old_mbuf;
1734 	uint32_t curr, pktlen;
1735 	int err;
1736 
1737 	switch (oob->cqe_hdr.cqe_type) {
1738 	case CQE_RX_OKAY:
1739 		break;
1740 
1741 	case CQE_RX_TRUNCATED:
1742 		apc = if_getsoftc(ndev);
1743 		counter_u64_add(apc->port_stats.rx_drops, 1);
1744 		rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1745 		if_printf(ndev, "Dropped a truncated packet\n");
1746 		goto drop;
1747 
1748 	case CQE_RX_COALESCED_4:
1749 		if_printf(ndev, "RX coalescing is unsupported\n");
1750 		return;
1751 
1752 	case CQE_RX_OBJECT_FENCE:
1753 		complete(&rxq->fence_event);
1754 		return;
1755 
1756 	default:
1757 		if_printf(ndev, "Unknown RX CQE type = %d\n",
1758 		    oob->cqe_hdr.cqe_type);
1759 		return;
1760 	}
1761 
1762 	if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
1763 		return;
1764 
1765 	pktlen = oob->ppi[0].pkt_len;
1766 
1767 	if (pktlen == 0) {
1768 		/* data packets should never have packetlength of zero */
1769 		if_printf(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%jx\n",
1770 		    rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1771 		return;
1772 	}
1773 
1774 	curr = rxq->buf_index;
1775 	rxbuf_oob = &rxq->rx_oobs[curr];
1776 	if (rxbuf_oob->wqe_inf.wqe_size_in_bu != 1) {
1777 		mana_err(NULL, "WARNING: Rx Incorrect complete "
1778 		    "WQE size %u\n",
1779 		    rxbuf_oob->wqe_inf.wqe_size_in_bu);
1780 	}
1781 
1782 	apc = if_getsoftc(ndev);
1783 
1784 	old_mbuf = rxbuf_oob->mbuf;
1785 
1786 	/* Unload DMA map for the old mbuf */
1787 	mana_unload_rx_mbuf(apc, rxq, rxbuf_oob, false);
1788 
1789 	/* Load a new mbuf to replace the old one */
1790 	err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, true);
1791 	if (err) {
1792 		mana_dbg(NULL,
1793 		    "failed to load rx mbuf, err = %d, packet dropped.\n",
1794 		    err);
1795 		counter_u64_add(rxq->stats.mbuf_alloc_fail, 1);
1796 		/*
1797 		 * Failed to load new mbuf, rxbuf_oob->mbuf is still
1798 		 * pointing to the old one. Drop the packet.
1799 		 */
1800 		 old_mbuf = NULL;
1801 		 /* Reload the existing mbuf */
1802 		 mana_load_rx_mbuf(apc, rxq, rxbuf_oob, false);
1803 	}
1804 
1805 	mana_rx_mbuf(old_mbuf, oob, rxq);
1806 
1807 drop:
1808 	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1809 
1810 	mana_post_pkt_rxq(rxq);
1811 }
1812 
1813 static void
1814 mana_poll_rx_cq(struct mana_cq *cq)
1815 {
1816 	struct gdma_comp *comp = cq->gdma_comp_buf;
1817 	int comp_read, i;
1818 
1819 	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1820 	KASSERT(comp_read <= CQE_POLLING_BUFFER,
1821 	    ("comp_read %d great than buf size %d",
1822 	    comp_read, CQE_POLLING_BUFFER));
1823 
1824 	for (i = 0; i < comp_read; i++) {
1825 		if (comp[i].is_sq == true) {
1826 			mana_err(NULL,
1827 			    "WARNING: CQE not for receive queue\n");
1828 			return;
1829 		}
1830 
1831 		/* verify recv cqe references the right rxq */
1832 		if (comp[i].wq_num != cq->rxq->gdma_id) {
1833 			mana_err(NULL,
1834 			    "WARNING: Received CQE %d  not for "
1835 			    "this receive queue %d\n",
1836 			    comp[i].wq_num,  cq->rxq->gdma_id);
1837 			return;
1838 		}
1839 
1840 		mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
1841 	}
1842 
1843 	if (comp_read > 0) {
1844 		struct gdma_context *gc =
1845 		    cq->rxq->gdma_rq->gdma_dev->gdma_context;
1846 
1847 		mana_gd_wq_ring_doorbell(gc, cq->rxq->gdma_rq);
1848 	}
1849 
1850 	tcp_lro_flush_all(&cq->rxq->lro);
1851 }
1852 
1853 static void
1854 mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1855 {
1856 	struct mana_cq *cq = context;
1857 	uint8_t arm_bit;
1858 
1859 	KASSERT(cq->gdma_cq == gdma_queue,
1860 	    ("cq do not match %p, %p", cq->gdma_cq, gdma_queue));
1861 
1862 	if (cq->type == MANA_CQ_TYPE_RX) {
1863 		mana_poll_rx_cq(cq);
1864 	} else {
1865 		mana_poll_tx_cq(cq);
1866 	}
1867 
1868 	if (cq->work_done < cq->budget && cq->do_not_ring_db == false)
1869 		arm_bit = SET_ARM_BIT;
1870 	else
1871 		arm_bit = 0;
1872 
1873 	mana_gd_ring_cq(gdma_queue, arm_bit);
1874 }
1875 
1876 #define MANA_POLL_BUDGET	256
1877 #define MANA_RX_BUDGET		8
1878 #define MANA_TX_BUDGET		8
1879 
1880 static void
1881 mana_poll(void *arg, int pending)
1882 {
1883 	struct mana_cq *cq = arg;
1884 	int i;
1885 
1886 	cq->work_done = 0;
1887 	if (cq->type == MANA_CQ_TYPE_RX) {
1888 		cq->budget = MANA_RX_BUDGET;
1889 	} else {
1890 		cq->budget = MANA_TX_BUDGET;
1891 	}
1892 
1893 	for (i = 0; i < MANA_POLL_BUDGET; i++) {
1894 		/*
1895 		 * If this is the last loop, set the budget big enough
1896 		 * so it will arm the CQ any way.
1897 		 */
1898 		if (i == (MANA_POLL_BUDGET - 1))
1899 			cq->budget = CQE_POLLING_BUFFER + 1;
1900 
1901 		mana_cq_handler(cq, cq->gdma_cq);
1902 
1903 		if (cq->work_done < cq->budget)
1904 			break;
1905 
1906 		cq->work_done = 0;
1907 	}
1908 }
1909 
1910 static void
1911 mana_schedule_task(void *arg, struct gdma_queue *gdma_queue)
1912 {
1913 	struct mana_cq *cq = arg;
1914 
1915 	taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task);
1916 }
1917 
1918 static void
1919 mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1920 {
1921 	struct gdma_dev *gd = apc->ac->gdma_dev;
1922 
1923 	if (!cq->gdma_cq)
1924 		return;
1925 
1926 	/* Drain cleanup taskqueue */
1927 	if (cq->cleanup_tq) {
1928 		while (taskqueue_cancel(cq->cleanup_tq,
1929 		    &cq->cleanup_task, NULL)) {
1930 			taskqueue_drain(cq->cleanup_tq,
1931 			    &cq->cleanup_task);
1932 		}
1933 
1934 		taskqueue_free(cq->cleanup_tq);
1935 	}
1936 
1937 	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1938 }
1939 
1940 static void
1941 mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1942 {
1943 	struct gdma_dev *gd = apc->ac->gdma_dev;
1944 	struct mana_send_buf_info *txbuf_info;
1945 	uint32_t pending_sends;
1946 	int i;
1947 
1948 	if (!txq->gdma_sq)
1949 		return;
1950 
1951 	if ((pending_sends = atomic_read(&txq->pending_sends)) > 0) {
1952 		mana_err(NULL,
1953 		    "WARNING: txq pending sends not zero: %u\n",
1954 		    pending_sends);
1955 	}
1956 
1957 	if (txq->next_to_use != txq->next_to_complete) {
1958 		mana_err(NULL,
1959 		    "WARNING: txq buf not completed, "
1960 		    "next use %u, next complete %u\n",
1961 		    txq->next_to_use, txq->next_to_complete);
1962 	}
1963 
1964 	/* Flush buf ring. Grab txq mtx lock */
1965 	if (txq->txq_br) {
1966 		mtx_lock(&txq->txq_mtx);
1967 		drbr_flush(apc->ndev, txq->txq_br);
1968 		mtx_unlock(&txq->txq_mtx);
1969 		buf_ring_free(txq->txq_br, M_DEVBUF);
1970 	}
1971 
1972 	/* Drain taskqueue */
1973 	if (txq->enqueue_tq) {
1974 		while (taskqueue_cancel(txq->enqueue_tq,
1975 		    &txq->enqueue_task, NULL)) {
1976 			taskqueue_drain(txq->enqueue_tq,
1977 			    &txq->enqueue_task);
1978 		}
1979 
1980 		taskqueue_free(txq->enqueue_tq);
1981 	}
1982 
1983 	if (txq->tx_buf_info) {
1984 		/* Free all mbufs which are still in-flight */
1985 		for (i = 0; i < apc->tx_queue_size; i++) {
1986 			txbuf_info = &txq->tx_buf_info[i];
1987 			if (txbuf_info->mbuf) {
1988 				mana_tx_unmap_mbuf(apc, txbuf_info);
1989 			}
1990 		}
1991 
1992 		free(txq->tx_buf_info, M_DEVBUF);
1993 	}
1994 
1995 	mana_free_counters((counter_u64_t *)&txq->stats,
1996 	    sizeof(txq->stats));
1997 
1998 	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1999 
2000 	mtx_destroy(&txq->txq_mtx);
2001 }
2002 
2003 static void
2004 mana_destroy_txq(struct mana_port_context *apc)
2005 {
2006 	int i;
2007 
2008 	if (!apc->tx_qp)
2009 		return;
2010 
2011 	for (i = 0; i < apc->num_queues; i++) {
2012 		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
2013 
2014 		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
2015 
2016 		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
2017 	}
2018 
2019 	free(apc->tx_qp, M_DEVBUF);
2020 	apc->tx_qp = NULL;
2021 }
2022 
2023 static int
2024 mana_create_txq(struct mana_port_context *apc, if_t net)
2025 {
2026 	struct mana_context *ac = apc->ac;
2027 	struct gdma_dev *gd = ac->gdma_dev;
2028 	struct mana_obj_spec wq_spec;
2029 	struct mana_obj_spec cq_spec;
2030 	struct gdma_queue_spec spec;
2031 	struct gdma_context *gc;
2032 	struct mana_txq *txq;
2033 	struct mana_cq *cq;
2034 	uint32_t txq_size;
2035 	uint32_t cq_size;
2036 	int err;
2037 	int i;
2038 
2039 	apc->tx_qp = mallocarray(apc->num_queues, sizeof(struct mana_tx_qp),
2040 	    M_DEVBUF, M_WAITOK | M_ZERO);
2041 
2042 	/*  The minimum size of the WQE is 32 bytes, hence
2043 	 *  apc->tx_queue_size represents the maximum number of WQEs
2044 	 *  the SQ can store. This value is then used to size other queues
2045 	 *  to prevent overflow.
2046 	 *  Also note that the txq_size is always going to be page aligned,
2047 	 *  as min val of apc->tx_queue_size is 128 and that would make
2048 	 *  txq_size 128 * 32 = 4096 and the other higher values of
2049 	 *  apc->tx_queue_size are always power of two.
2050 	 */
2051 	txq_size = apc->tx_queue_size * 32;
2052 	KASSERT(IS_ALIGNED(txq_size, PAGE_SIZE),
2053 	    ("txq size not page aligned"));
2054 
2055 	cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE;
2056 	cq_size = ALIGN(cq_size, PAGE_SIZE);
2057 
2058 	gc = gd->gdma_context;
2059 
2060 	for (i = 0; i < apc->num_queues; i++) {
2061 		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
2062 
2063 		/* Create SQ */
2064 		txq = &apc->tx_qp[i].txq;
2065 
2066 		txq->ndev = net;
2067 		txq->vp_offset = apc->tx_vp_offset;
2068 		txq->idx = i;
2069 		txq->alt_txq_idx = i;
2070 
2071 		memset(&spec, 0, sizeof(spec));
2072 		spec.type = GDMA_SQ;
2073 		spec.monitor_avl_buf = true;
2074 		spec.queue_size = txq_size;
2075 		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
2076 		if (err)
2077 			goto out;
2078 
2079 		/* Create SQ's CQ */
2080 		cq = &apc->tx_qp[i].tx_cq;
2081 		cq->type = MANA_CQ_TYPE_TX;
2082 
2083 		cq->txq = txq;
2084 
2085 		memset(&spec, 0, sizeof(spec));
2086 		spec.type = GDMA_CQ;
2087 		spec.monitor_avl_buf = false;
2088 		spec.queue_size = cq_size;
2089 		spec.cq.callback = mana_schedule_task;
2090 		spec.cq.parent_eq = ac->eqs[i].eq;
2091 		spec.cq.context = cq;
2092 		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2093 		if (err)
2094 			goto out;
2095 
2096 		memset(&wq_spec, 0, sizeof(wq_spec));
2097 		memset(&cq_spec, 0, sizeof(cq_spec));
2098 
2099 		wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
2100 		wq_spec.queue_size = txq->gdma_sq->queue_size;
2101 
2102 		cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2103 		cq_spec.queue_size = cq->gdma_cq->queue_size;
2104 		cq_spec.modr_ctx_id = 0;
2105 		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2106 
2107 		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
2108 		    &wq_spec, &cq_spec, &apc->tx_qp[i].tx_object);
2109 
2110 		if (err)
2111 			goto out;
2112 
2113 		txq->gdma_sq->id = wq_spec.queue_index;
2114 		cq->gdma_cq->id = cq_spec.queue_index;
2115 
2116 		txq->gdma_sq->mem_info.dma_region_handle =
2117 		    GDMA_INVALID_DMA_REGION;
2118 		cq->gdma_cq->mem_info.dma_region_handle =
2119 		    GDMA_INVALID_DMA_REGION;
2120 
2121 		txq->gdma_txq_id = txq->gdma_sq->id;
2122 
2123 		cq->gdma_id = cq->gdma_cq->id;
2124 
2125 		mana_dbg(NULL,
2126 		    "txq %d, txq gdma id %d, txq cq gdma id %d\n",
2127 		    i, txq->gdma_txq_id, cq->gdma_id);
2128 
2129 		if (cq->gdma_id >= gc->max_num_cqs) {
2130 			if_printf(net, "CQ id %u too large.\n", cq->gdma_id);
2131 			err = EINVAL;
2132 			goto out;
2133 		}
2134 
2135 		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2136 
2137 		/* Initialize tx specific data */
2138 		txq->tx_buf_info = malloc(apc->tx_queue_size *
2139 		    sizeof(struct mana_send_buf_info),
2140 		    M_DEVBUF, M_WAITOK | M_ZERO);
2141 
2142 		snprintf(txq->txq_mtx_name, nitems(txq->txq_mtx_name),
2143 		    "mana:tx(%d)", i);
2144 		mtx_init(&txq->txq_mtx, txq->txq_mtx_name, NULL, MTX_DEF);
2145 
2146 		txq->txq_br = buf_ring_alloc(4 * apc->tx_queue_size,
2147 		    M_DEVBUF, M_WAITOK, &txq->txq_mtx);
2148 
2149 		/* Allocate taskqueue for deferred send */
2150 		TASK_INIT(&txq->enqueue_task, 0, mana_xmit_taskfunc, txq);
2151 		txq->enqueue_tq = taskqueue_create_fast("mana_tx_enque",
2152 		    M_NOWAIT, taskqueue_thread_enqueue, &txq->enqueue_tq);
2153 		if (unlikely(txq->enqueue_tq == NULL)) {
2154 			if_printf(net,
2155 			    "Unable to create tx %d enqueue task queue\n", i);
2156 			err = ENOMEM;
2157 			goto out;
2158 		}
2159 		taskqueue_start_threads(&txq->enqueue_tq, 1, PI_NET,
2160 		    "mana txq p%u-tx%d", apc->port_idx, i);
2161 
2162 		mana_alloc_counters((counter_u64_t *)&txq->stats,
2163 		    sizeof(txq->stats));
2164 
2165 		/* Allocate and start the cleanup task on CQ */
2166 		cq->do_not_ring_db = false;
2167 
2168 		NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq);
2169 		cq->cleanup_tq =
2170 		    taskqueue_create_fast("mana tx cq cleanup",
2171 		    M_WAITOK, taskqueue_thread_enqueue,
2172 		    &cq->cleanup_tq);
2173 
2174 		if (apc->last_tx_cq_bind_cpu < 0)
2175 			apc->last_tx_cq_bind_cpu = CPU_FIRST();
2176 		cq->cpu = apc->last_tx_cq_bind_cpu;
2177 		apc->last_tx_cq_bind_cpu = CPU_NEXT(apc->last_tx_cq_bind_cpu);
2178 
2179 		if (apc->bind_cleanup_thread_cpu) {
2180 			cpuset_t cpu_mask;
2181 			CPU_SETOF(cq->cpu, &cpu_mask);
2182 			taskqueue_start_threads_cpuset(&cq->cleanup_tq,
2183 			    1, PI_NET, &cpu_mask,
2184 			    "mana cq p%u-tx%u-cpu%d",
2185 			    apc->port_idx, txq->idx, cq->cpu);
2186 		} else {
2187 			taskqueue_start_threads(&cq->cleanup_tq, 1,
2188 			    PI_NET, "mana cq p%u-tx%u",
2189 			    apc->port_idx, txq->idx);
2190 		}
2191 
2192 		mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2193 	}
2194 
2195 	return 0;
2196 out:
2197 	mana_destroy_txq(apc);
2198 	return err;
2199 }
2200 
2201 static void
2202 mana_destroy_rxq(struct mana_port_context *apc, struct mana_rxq *rxq,
2203     bool validate_state)
2204 {
2205 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2206 	struct mana_recv_buf_oob *rx_oob;
2207 	int i;
2208 
2209 	if (!rxq)
2210 		return;
2211 
2212 	if (validate_state) {
2213 		/*
2214 		 * XXX Cancel and drain cleanup task queue here.
2215 		 */
2216 		;
2217 	}
2218 
2219 	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2220 
2221 	mana_deinit_cq(apc, &rxq->rx_cq);
2222 
2223 	mana_free_counters((counter_u64_t *)&rxq->stats,
2224 	    sizeof(rxq->stats));
2225 
2226 	/* Free LRO resources */
2227 	tcp_lro_free(&rxq->lro);
2228 
2229 	for (i = 0; i < rxq->num_rx_buf; i++) {
2230 		rx_oob = &rxq->rx_oobs[i];
2231 
2232 		if (rx_oob->mbuf)
2233 			mana_unload_rx_mbuf(apc, rxq, rx_oob, true);
2234 
2235 		bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2236 	}
2237 
2238 	if (rxq->gdma_rq)
2239 		mana_gd_destroy_queue(gc, rxq->gdma_rq);
2240 
2241 	free(rxq, M_DEVBUF);
2242 }
2243 
2244 #define MANA_WQE_HEADER_SIZE 16
2245 #define MANA_WQE_SGE_SIZE 16
2246 
2247 static int
2248 mana_alloc_rx_wqe(struct mana_port_context *apc,
2249     struct mana_rxq *rxq, uint32_t *rxq_size, uint32_t *cq_size)
2250 {
2251 	struct mana_recv_buf_oob *rx_oob;
2252 	uint32_t buf_idx;
2253 	int err;
2254 
2255 	if (rxq->datasize == 0 || rxq->datasize > PAGE_SIZE) {
2256 		mana_err(NULL,
2257 		    "WARNING: Invalid rxq datasize %u\n", rxq->datasize);
2258 	}
2259 
2260 	*rxq_size = 0;
2261 	*cq_size = 0;
2262 
2263 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2264 		rx_oob = &rxq->rx_oobs[buf_idx];
2265 		memset(rx_oob, 0, sizeof(*rx_oob));
2266 
2267 		err = bus_dmamap_create(apc->rx_buf_tag, 0,
2268 		    &rx_oob->dma_map);
2269 		if (err) {
2270 			mana_err(NULL,
2271 			    "Failed to  create rx DMA map for buf %d\n",
2272 			    buf_idx);
2273 			return err;
2274 		}
2275 
2276 		err = mana_load_rx_mbuf(apc, rxq, rx_oob, true);
2277 		if (err) {
2278 			mana_err(NULL,
2279 			    "Failed to  create rx DMA map for buf %d\n",
2280 			    buf_idx);
2281 			bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2282 			return err;
2283 		}
2284 
2285 		rx_oob->wqe_req.sgl = rx_oob->sgl;
2286 		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2287 		rx_oob->wqe_req.inline_oob_size = 0;
2288 		rx_oob->wqe_req.inline_oob_data = NULL;
2289 		rx_oob->wqe_req.flags = 0;
2290 		rx_oob->wqe_req.client_data_unit = 0;
2291 
2292 		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2293 				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2294 		*cq_size += COMP_ENTRY_SIZE;
2295 	}
2296 
2297 	return 0;
2298 }
2299 
2300 static int
2301 mana_push_wqe(struct mana_rxq *rxq)
2302 {
2303 	struct mana_recv_buf_oob *rx_oob;
2304 	uint32_t buf_idx;
2305 	int err;
2306 
2307 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2308 		rx_oob = &rxq->rx_oobs[buf_idx];
2309 
2310 		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2311 		    &rx_oob->wqe_inf);
2312 		if (err)
2313 			return ENOSPC;
2314 	}
2315 
2316 	return 0;
2317 }
2318 
2319 static struct mana_rxq *
2320 mana_create_rxq(struct mana_port_context *apc, uint32_t rxq_idx,
2321     struct mana_eq *eq, if_t ndev)
2322 {
2323 	struct gdma_dev *gd = apc->ac->gdma_dev;
2324 	struct mana_obj_spec wq_spec;
2325 	struct mana_obj_spec cq_spec;
2326 	struct gdma_queue_spec spec;
2327 	struct mana_cq *cq = NULL;
2328 	uint32_t cq_size, rq_size;
2329 	struct gdma_context *gc;
2330 	struct mana_rxq *rxq;
2331 	int err;
2332 
2333 	gc = gd->gdma_context;
2334 
2335 	rxq = malloc(sizeof(*rxq) +
2336 	    apc->rx_queue_size * sizeof(struct mana_recv_buf_oob),
2337 	    M_DEVBUF, M_WAITOK | M_ZERO);
2338 	rxq->ndev = ndev;
2339 	rxq->num_rx_buf = apc->rx_queue_size;
2340 	rxq->rxq_idx = rxq_idx;
2341 	/*
2342 	 * Minimum size is MCLBYTES(2048) bytes for a mbuf cluster.
2343 	 * Now we just allow maximum size of 4096.
2344 	 */
2345 	rxq->datasize = ALIGN(apc->frame_size, MCLBYTES);
2346 	if (rxq->datasize > MAX_FRAME_SIZE)
2347 		rxq->datasize = MAX_FRAME_SIZE;
2348 
2349 	mana_dbg(NULL, "Setting rxq %d datasize %d\n",
2350 	    rxq_idx, rxq->datasize);
2351 
2352 	rxq->rxobj = INVALID_MANA_HANDLE;
2353 
2354 	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2355 	if (err)
2356 		goto out;
2357 
2358 	/* Create LRO for the RQ */
2359 	if (if_getcapenable(ndev) & IFCAP_LRO) {
2360 		err = tcp_lro_init(&rxq->lro);
2361 		if (err) {
2362 			if_printf(ndev, "Failed to create LRO for rxq %d\n",
2363 			    rxq_idx);
2364 		} else {
2365 			rxq->lro.ifp = ndev;
2366 		}
2367 	}
2368 
2369 	mana_alloc_counters((counter_u64_t *)&rxq->stats,
2370 	    sizeof(rxq->stats));
2371 
2372 	rq_size = ALIGN(rq_size, PAGE_SIZE);
2373 	cq_size = ALIGN(cq_size, PAGE_SIZE);
2374 
2375 	/* Create RQ */
2376 	memset(&spec, 0, sizeof(spec));
2377 	spec.type = GDMA_RQ;
2378 	spec.monitor_avl_buf = true;
2379 	spec.queue_size = rq_size;
2380 	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2381 	if (err)
2382 		goto out;
2383 
2384 	/* Create RQ's CQ */
2385 	cq = &rxq->rx_cq;
2386 	cq->type = MANA_CQ_TYPE_RX;
2387 	cq->rxq = rxq;
2388 
2389 	memset(&spec, 0, sizeof(spec));
2390 	spec.type = GDMA_CQ;
2391 	spec.monitor_avl_buf = false;
2392 	spec.queue_size = cq_size;
2393 	spec.cq.callback = mana_schedule_task;
2394 	spec.cq.parent_eq = eq->eq;
2395 	spec.cq.context = cq;
2396 	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2397 	if (err)
2398 		goto out;
2399 
2400 	memset(&wq_spec, 0, sizeof(wq_spec));
2401 	memset(&cq_spec, 0, sizeof(cq_spec));
2402 	wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
2403 	wq_spec.queue_size = rxq->gdma_rq->queue_size;
2404 
2405 	cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2406 	cq_spec.queue_size = cq->gdma_cq->queue_size;
2407 	cq_spec.modr_ctx_id = 0;
2408 	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2409 
2410 	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2411 	    &wq_spec, &cq_spec, &rxq->rxobj);
2412 	if (err)
2413 		goto out;
2414 
2415 	rxq->gdma_rq->id = wq_spec.queue_index;
2416 	cq->gdma_cq->id = cq_spec.queue_index;
2417 
2418 	rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2419 	cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2420 
2421 	rxq->gdma_id = rxq->gdma_rq->id;
2422 	cq->gdma_id = cq->gdma_cq->id;
2423 
2424 	err = mana_push_wqe(rxq);
2425 	if (err)
2426 		goto out;
2427 
2428 	if (cq->gdma_id >= gc->max_num_cqs) {
2429 		err = EINVAL;
2430 		goto out;
2431 	}
2432 
2433 	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2434 
2435 	/* Allocate and start the cleanup task on CQ */
2436 	cq->do_not_ring_db = false;
2437 
2438 	NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq);
2439 	cq->cleanup_tq =
2440 	    taskqueue_create_fast("mana rx cq cleanup",
2441 	    M_WAITOK, taskqueue_thread_enqueue,
2442 	    &cq->cleanup_tq);
2443 
2444 	if (apc->last_rx_cq_bind_cpu < 0)
2445 		apc->last_rx_cq_bind_cpu = CPU_FIRST();
2446 	cq->cpu = apc->last_rx_cq_bind_cpu;
2447 	apc->last_rx_cq_bind_cpu = CPU_NEXT(apc->last_rx_cq_bind_cpu);
2448 
2449 	if (apc->bind_cleanup_thread_cpu) {
2450 		cpuset_t cpu_mask;
2451 		CPU_SETOF(cq->cpu, &cpu_mask);
2452 		taskqueue_start_threads_cpuset(&cq->cleanup_tq,
2453 		    1, PI_NET, &cpu_mask,
2454 		    "mana cq p%u-rx%u-cpu%d",
2455 		    apc->port_idx, rxq->rxq_idx, cq->cpu);
2456 	} else {
2457 		taskqueue_start_threads(&cq->cleanup_tq, 1,
2458 		    PI_NET, "mana cq p%u-rx%u",
2459 		    apc->port_idx, rxq->rxq_idx);
2460 	}
2461 
2462 	mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2463 out:
2464 	if (!err)
2465 		return rxq;
2466 
2467 	if_printf(ndev, "Failed to create RXQ: err = %d\n", err);
2468 
2469 	mana_destroy_rxq(apc, rxq, false);
2470 
2471 	if (cq)
2472 		mana_deinit_cq(apc, cq);
2473 
2474 	return NULL;
2475 }
2476 
2477 static int
2478 mana_add_rx_queues(struct mana_port_context *apc, if_t ndev)
2479 {
2480 	struct mana_context *ac = apc->ac;
2481 	struct mana_rxq *rxq;
2482 	int err = 0;
2483 	int i;
2484 
2485 	for (i = 0; i < apc->num_queues; i++) {
2486 		rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2487 		if (!rxq) {
2488 			err = ENOMEM;
2489 			goto out;
2490 		}
2491 
2492 		apc->rxqs[i] = rxq;
2493 	}
2494 
2495 	apc->default_rxobj = apc->rxqs[0]->rxobj;
2496 out:
2497 	return err;
2498 }
2499 
2500 static void
2501 mana_destroy_vport(struct mana_port_context *apc)
2502 {
2503 	struct mana_rxq *rxq;
2504 	uint32_t rxq_idx;
2505 
2506 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2507 		rxq = apc->rxqs[rxq_idx];
2508 		if (!rxq)
2509 			continue;
2510 
2511 		mana_destroy_rxq(apc, rxq, true);
2512 		apc->rxqs[rxq_idx] = NULL;
2513 	}
2514 
2515 	mana_destroy_txq(apc);
2516 
2517 	mana_uncfg_vport(apc);
2518 }
2519 
2520 static int
2521 mana_create_vport(struct mana_port_context *apc, if_t net)
2522 {
2523 	struct gdma_dev *gd = apc->ac->gdma_dev;
2524 	int err;
2525 
2526 	apc->default_rxobj = INVALID_MANA_HANDLE;
2527 
2528 	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2529 	if (err)
2530 		return err;
2531 
2532 	return mana_create_txq(apc, net);
2533 }
2534 
2535 
2536 static void mana_rss_table_init(struct mana_port_context *apc)
2537 {
2538 	int i;
2539 
2540 	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
2541 		apc->indir_table[i] = i % apc->num_queues;
2542 }
2543 
2544 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2545 		    bool update_hash, bool update_tab)
2546 {
2547 	uint32_t queue_idx;
2548 	int err;
2549 	int i;
2550 
2551 	if (update_tab) {
2552 		for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
2553 			queue_idx = apc->indir_table[i];
2554 			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2555 		}
2556 	}
2557 
2558 	err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2559 	if (err)
2560 		return err;
2561 
2562 	mana_fence_rqs(apc);
2563 
2564 	return 0;
2565 }
2566 
2567 static int
2568 mana_init_port(if_t ndev)
2569 {
2570 	struct mana_port_context *apc = if_getsoftc(ndev);
2571 	uint32_t max_txq, max_rxq, max_queues;
2572 	int port_idx = apc->port_idx;
2573 	uint32_t num_indirect_entries;
2574 	int err;
2575 
2576 	err = mana_init_port_context(apc);
2577 	if (err)
2578 		return err;
2579 
2580 	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
2581 	    &num_indirect_entries);
2582 	if (err) {
2583 		if_printf(ndev, "Failed to query info for vPort %d\n",
2584 		    port_idx);
2585 		goto reset_apc;
2586 	}
2587 
2588 	max_queues = min_t(uint32_t, max_txq, max_rxq);
2589 	if (apc->max_queues > max_queues)
2590 		apc->max_queues = max_queues;
2591 
2592 	if (apc->num_queues > apc->max_queues)
2593 		apc->num_queues = apc->max_queues;
2594 
2595 	return 0;
2596 
2597 reset_apc:
2598 	bus_dma_tag_destroy(apc->rx_buf_tag);
2599 	apc->rx_buf_tag = NULL;
2600 	free(apc->rxqs, M_DEVBUF);
2601 	apc->rxqs = NULL;
2602 	return err;
2603 }
2604 
2605 int
2606 mana_alloc_queues(if_t ndev)
2607 {
2608 	struct mana_port_context *apc = if_getsoftc(ndev);
2609 	int err;
2610 
2611 	err = mana_create_vport(apc, ndev);
2612 	if (err)
2613 		return err;
2614 
2615 	err = mana_add_rx_queues(apc, ndev);
2616 	if (err)
2617 		goto destroy_vport;
2618 
2619 	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
2620 
2621 	mana_rss_table_init(apc);
2622 
2623 	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2624 	if (err)
2625 		goto destroy_vport;
2626 
2627 	return 0;
2628 
2629 destroy_vport:
2630 	mana_destroy_vport(apc);
2631 	return err;
2632 }
2633 
2634 static int
2635 mana_up(struct mana_port_context *apc)
2636 {
2637 	int err;
2638 
2639 	mana_dbg(NULL, "mana_up called\n");
2640 
2641 	err = mana_alloc_queues(apc->ndev);
2642 	if (err) {
2643 		mana_err(NULL, "Faile alloc mana queues: %d\n", err);
2644 		return err;
2645 	}
2646 
2647 	/* Add queue specific sysctl */
2648 	mana_sysctl_add_queues(apc);
2649 
2650 	apc->port_is_up = true;
2651 
2652 	/* Ensure port state updated before txq state */
2653 	wmb();
2654 
2655 	if_link_state_change(apc->ndev, LINK_STATE_UP);
2656 	if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2657 
2658 	return 0;
2659 }
2660 
2661 
2662 static void
2663 mana_init(void *arg)
2664 {
2665 	struct mana_port_context *apc = (struct mana_port_context *)arg;
2666 
2667 	MANA_APC_LOCK_LOCK(apc);
2668 	if (!apc->port_is_up) {
2669 		mana_up(apc);
2670 	}
2671 	MANA_APC_LOCK_UNLOCK(apc);
2672 }
2673 
2674 static int
2675 mana_dealloc_queues(if_t ndev)
2676 {
2677 	struct mana_port_context *apc = if_getsoftc(ndev);
2678 	struct mana_txq *txq;
2679 	int i, err;
2680 
2681 	if (apc->port_is_up)
2682 		return EINVAL;
2683 
2684 	/* No packet can be transmitted now since apc->port_is_up is false.
2685 	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
2686 	 * a txq because it may not timely see apc->port_is_up being cleared
2687 	 * to false, but it doesn't matter since mana_start_xmit() drops any
2688 	 * new packets due to apc->port_is_up being false.
2689 	 *
2690 	 * Drain all the in-flight TX packets
2691 	 */
2692 	for (i = 0; i < apc->num_queues; i++) {
2693 		txq = &apc->tx_qp[i].txq;
2694 
2695 		struct mana_cq *tx_cq = &apc->tx_qp[i].tx_cq;
2696 		struct mana_cq *rx_cq = &(apc->rxqs[i]->rx_cq);
2697 
2698 		tx_cq->do_not_ring_db = true;
2699 		rx_cq->do_not_ring_db = true;
2700 
2701 		/* Schedule a cleanup task */
2702 		taskqueue_enqueue(tx_cq->cleanup_tq, &tx_cq->cleanup_task);
2703 
2704 		while (atomic_read(&txq->pending_sends) > 0)
2705 			usleep_range(1000, 2000);
2706 	}
2707 
2708 	/* We're 100% sure the queues can no longer be woken up, because
2709 	 * we're sure now mana_poll_tx_cq() can't be running.
2710 	 */
2711 
2712 	apc->rss_state = TRI_STATE_FALSE;
2713 	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2714 	if (err) {
2715 		if_printf(ndev, "Failed to disable vPort: %d\n", err);
2716 		return err;
2717 	}
2718 
2719 	mana_destroy_vport(apc);
2720 
2721 	return 0;
2722 }
2723 
2724 static int
2725 mana_down(struct mana_port_context *apc)
2726 {
2727 	int err = 0;
2728 
2729 	apc->port_st_save = apc->port_is_up;
2730 	apc->port_is_up = false;
2731 
2732 	/* Ensure port state updated before txq state */
2733 	wmb();
2734 
2735 	if (apc->port_st_save) {
2736 		if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE,
2737 		    IFF_DRV_RUNNING);
2738 		if_link_state_change(apc->ndev, LINK_STATE_DOWN);
2739 
2740 		mana_sysctl_free_queues(apc);
2741 
2742 		err = mana_dealloc_queues(apc->ndev);
2743 		if (err) {
2744 			if_printf(apc->ndev,
2745 			    "Failed to bring down mana interface: %d\n", err);
2746 		}
2747 	}
2748 
2749 	return err;
2750 }
2751 
2752 int
2753 mana_detach(if_t ndev)
2754 {
2755 	struct mana_port_context *apc = if_getsoftc(ndev);
2756 	int err;
2757 
2758 	ether_ifdetach(ndev);
2759 
2760 	if (!apc)
2761 		return 0;
2762 
2763 	MANA_APC_LOCK_LOCK(apc);
2764 	err = mana_down(apc);
2765 	MANA_APC_LOCK_UNLOCK(apc);
2766 
2767 	mana_cleanup_port_context(apc);
2768 
2769 	MANA_APC_LOCK_DESTROY(apc);
2770 
2771 	free(apc, M_DEVBUF);
2772 
2773 	return err;
2774 }
2775 
2776 static unsigned int
2777 mana_get_tx_queue_size(int port_idx, unsigned int request_size)
2778 {
2779 	unsigned int new_size;
2780 
2781 	if (request_size == 0)
2782 		/* Uninitialized */
2783 		new_size = DEF_SEND_BUFFERS_PER_QUEUE;
2784 	else
2785 		new_size = roundup_pow_of_two(request_size);
2786 
2787 	if (new_size < MIN_SEND_BUFFERS_PER_QUEUE ||
2788 	    new_size > MAX_SEND_BUFFERS_PER_QUEUE) {
2789 		mana_info(NULL, "mana port %d: requested tx buffer "
2790 		    "size %u out of allowable range (%u - %u), "
2791 		    "setting to default\n",
2792 		    port_idx, request_size,
2793 		    MIN_SEND_BUFFERS_PER_QUEUE,
2794 		    MAX_SEND_BUFFERS_PER_QUEUE);
2795 		new_size = DEF_SEND_BUFFERS_PER_QUEUE;
2796 	}
2797 	mana_info(NULL, "mana port %d: tx buffer size %u "
2798 	    "(%u requested)\n",
2799 	    port_idx, new_size, request_size);
2800 
2801 	return (new_size);
2802 }
2803 
2804 static unsigned int
2805 mana_get_rx_queue_size(int port_idx, unsigned int request_size)
2806 {
2807 	unsigned int new_size;
2808 
2809 	if (request_size == 0)
2810 		/* Uninitialized */
2811 		new_size = DEF_RX_BUFFERS_PER_QUEUE;
2812 	else
2813 		new_size = roundup_pow_of_two(request_size);
2814 
2815 	if (new_size < MIN_RX_BUFFERS_PER_QUEUE ||
2816 	    new_size > MAX_RX_BUFFERS_PER_QUEUE) {
2817 		mana_info(NULL, "mana port %d: requested rx buffer "
2818 		    "size %u out of allowable range (%u - %u), "
2819 		    "setting to default\n",
2820 		    port_idx, request_size,
2821 		    MIN_RX_BUFFERS_PER_QUEUE,
2822 		    MAX_RX_BUFFERS_PER_QUEUE);
2823 		new_size = DEF_RX_BUFFERS_PER_QUEUE;
2824 	}
2825 	mana_info(NULL, "mana port %d: rx buffer size %u "
2826 	    "(%u requested)\n",
2827 	    port_idx, new_size, request_size);
2828 
2829 	return (new_size);
2830 }
2831 
2832 static int
2833 mana_probe_port(struct mana_context *ac, int port_idx,
2834     if_t *ndev_storage)
2835 {
2836 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
2837 	struct mana_port_context *apc;
2838 	uint32_t hwassist;
2839 	if_t ndev;
2840 	int err;
2841 
2842 	ndev = if_alloc_dev(IFT_ETHER, gc->dev);
2843 	*ndev_storage = ndev;
2844 
2845 	apc = malloc(sizeof(*apc), M_DEVBUF, M_WAITOK | M_ZERO);
2846 	apc->ac = ac;
2847 	apc->ndev = ndev;
2848 	apc->max_queues = gc->max_num_queues;
2849 	apc->num_queues = min_t(unsigned int,
2850 	    gc->max_num_queues, MANA_MAX_NUM_QUEUES);
2851 	apc->tx_queue_size = mana_get_tx_queue_size(port_idx,
2852 	    mana_tx_req_size);
2853 	apc->rx_queue_size = mana_get_rx_queue_size(port_idx,
2854 	    mana_rx_req_size);
2855 	apc->port_handle = INVALID_MANA_HANDLE;
2856 	apc->port_idx = port_idx;
2857 	apc->frame_size = DEFAULT_FRAME_SIZE;
2858 	apc->last_tx_cq_bind_cpu = -1;
2859 	apc->last_rx_cq_bind_cpu = -1;
2860 	apc->vport_use_count = 0;
2861 
2862 	MANA_APC_LOCK_INIT(apc);
2863 
2864 	if_initname(ndev, device_get_name(gc->dev), port_idx);
2865 	if_setdev(ndev,gc->dev);
2866 	if_setsoftc(ndev, apc);
2867 
2868 	if_setflags(ndev, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2869 	if_setinitfn(ndev, mana_init);
2870 	if_settransmitfn(ndev, mana_start_xmit);
2871 	if_setqflushfn(ndev, mana_qflush);
2872 	if_setioctlfn(ndev, mana_ioctl);
2873 	if_setgetcounterfn(ndev, mana_get_counter);
2874 
2875 	if_setmtu(ndev, ETHERMTU);
2876 	if_setbaudrate(ndev, IF_Gbps(100));
2877 
2878 	mana_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2879 
2880 	err = mana_init_port(ndev);
2881 	if (err)
2882 		goto reset_apc;
2883 
2884 	if_setcapabilitiesbit(ndev,
2885 	    IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 |
2886 	    IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 |
2887 	    IFCAP_TSO4 | IFCAP_TSO6 |
2888 	    IFCAP_LRO | IFCAP_LINKSTATE, 0);
2889 
2890 	/* Enable all available capabilities by default. */
2891 	if_setcapenable(ndev, if_getcapabilities(ndev));
2892 
2893 	/* TSO parameters */
2894 	if_sethwtsomax(ndev, MANA_TSO_MAX_SZ -
2895 	    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2896 	if_sethwtsomaxsegcount(ndev, MAX_MBUF_FRAGS);
2897 	if_sethwtsomaxsegsize(ndev, PAGE_SIZE);
2898 
2899 	hwassist = 0;
2900 	if (if_getcapenable(ndev) & (IFCAP_TSO4 | IFCAP_TSO6))
2901 		hwassist |= CSUM_TSO;
2902 	if (if_getcapenable(ndev) & IFCAP_TXCSUM)
2903 		hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2904 	if (if_getcapenable(ndev) & IFCAP_TXCSUM_IPV6)
2905 		hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2906 	mana_dbg(NULL, "set hwassist 0x%x\n", hwassist);
2907 	if_sethwassist(ndev, hwassist);
2908 
2909 	ifmedia_init(&apc->media, IFM_IMASK,
2910 	    mana_ifmedia_change, mana_ifmedia_status);
2911 	ifmedia_add(&apc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2912 	ifmedia_set(&apc->media, IFM_ETHER | IFM_AUTO);
2913 
2914 	ether_ifattach(ndev, apc->mac_addr);
2915 
2916 	/* Initialize statistics */
2917 	mana_alloc_counters((counter_u64_t *)&apc->port_stats,
2918 	    sizeof(struct mana_port_stats));
2919 	mana_sysctl_add_port(apc);
2920 
2921 	/* Tell the stack that the interface is not active */
2922 	if_setdrvflagbits(ndev, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2923 
2924 	return 0;
2925 
2926 reset_apc:
2927 	free(apc, M_DEVBUF);
2928 	*ndev_storage = NULL;
2929 	if_printf(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2930 	if_free(ndev);
2931 	return err;
2932 }
2933 
2934 int mana_probe(struct gdma_dev *gd)
2935 {
2936 	struct gdma_context *gc = gd->gdma_context;
2937 	device_t dev = gc->dev;
2938 	struct mana_context *ac;
2939 	int err;
2940 	int i;
2941 
2942 	device_printf(dev, "%s protocol version: %d.%d.%d\n", DEVICE_NAME,
2943 		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2944 
2945 	err = mana_gd_register_device(gd);
2946 	if (err)
2947 		return err;
2948 
2949 	ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO);
2950 	ac->gdma_dev = gd;
2951 	ac->num_ports = 1;
2952 	gd->driver_data = ac;
2953 
2954 	err = mana_create_eq(ac);
2955 	if (err)
2956 		goto out;
2957 
2958 	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
2959 	    MANA_MICRO_VERSION, &ac->num_ports);
2960 	if (err)
2961 		goto out;
2962 
2963 	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
2964 		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
2965 
2966 	for (i = 0; i < ac->num_ports; i++) {
2967 		err = mana_probe_port(ac, i, &ac->ports[i]);
2968 		if (err) {
2969 			device_printf(dev,
2970 			    "Failed to probe mana port %d\n", i);
2971 			break;
2972 		}
2973 	}
2974 
2975 out:
2976 	if (err)
2977 		mana_remove(gd);
2978 
2979 	return err;
2980 }
2981 
2982 void
2983 mana_remove(struct gdma_dev *gd)
2984 {
2985 	struct gdma_context *gc = gd->gdma_context;
2986 	struct mana_context *ac = gd->driver_data;
2987 	device_t dev = gc->dev;
2988 	if_t ndev;
2989 	int i;
2990 
2991 	for (i = 0; i < ac->num_ports; i++) {
2992 		ndev = ac->ports[i];
2993 		if (!ndev) {
2994 			if (i == 0)
2995 				device_printf(dev, "No net device to remove\n");
2996 			goto out;
2997 		}
2998 
2999 		mana_detach(ndev);
3000 
3001 		if_free(ndev);
3002 	}
3003 
3004 	mana_destroy_eq(ac);
3005 
3006 out:
3007 	mana_gd_deregister_device(gd);
3008 	gd->driver_data = NULL;
3009 	gd->gdma_context = NULL;
3010 	free(ac, M_DEVBUF);
3011 }
3012