xref: /freebsd/sys/dev/mana/mana_en.c (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Microsoft Corp.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/smp.h>
39 #include <sys/socket.h>
40 #include <sys/sockio.h>
41 #include <sys/time.h>
42 #include <sys/eventhandler.h>
43 
44 #include <machine/bus.h>
45 #include <machine/resource.h>
46 #include <machine/in_cksum.h>
47 
48 #include <net/if.h>
49 #include <net/if_var.h>
50 #include <net/if_types.h>
51 #include <net/if_vlan_var.h>
52 #ifdef RSS
53 #include <net/rss_config.h>
54 #endif
55 
56 #include <netinet/in_systm.h>
57 #include <netinet/in.h>
58 #include <netinet/if_ether.h>
59 #include <netinet/ip.h>
60 #include <netinet/ip6.h>
61 #include <netinet/tcp.h>
62 #include <netinet/udp.h>
63 
64 #include "mana.h"
65 #include "mana_sysctl.h"
66 
67 static int mana_up(struct mana_port_context *apc);
68 static int mana_down(struct mana_port_context *apc);
69 
70 static void
71 mana_rss_key_fill(void *k, size_t size)
72 {
73 	static bool rss_key_generated = false;
74 	static uint8_t rss_key[MANA_HASH_KEY_SIZE];
75 
76 	KASSERT(size <= MANA_HASH_KEY_SIZE,
77 	    ("Request more buytes than MANA RSS key can hold"));
78 
79 	if (!rss_key_generated) {
80 		arc4random_buf(rss_key, MANA_HASH_KEY_SIZE);
81 		rss_key_generated = true;
82 	}
83 	memcpy(k, rss_key, size);
84 }
85 
86 static int
87 mana_ifmedia_change(if_t ifp __unused)
88 {
89 	return EOPNOTSUPP;
90 }
91 
92 static void
93 mana_ifmedia_status(if_t ifp, struct ifmediareq *ifmr)
94 {
95 	struct mana_port_context *apc = if_getsoftc(ifp);
96 
97 	if (!apc) {
98 		if_printf(ifp, "Port not available\n");
99 		return;
100 	}
101 
102 	MANA_APC_LOCK_LOCK(apc);
103 
104 	ifmr->ifm_status = IFM_AVALID;
105 	ifmr->ifm_active = IFM_ETHER;
106 
107 	if (!apc->port_is_up) {
108 		MANA_APC_LOCK_UNLOCK(apc);
109 		mana_dbg(NULL, "Port %u link is down\n", apc->port_idx);
110 		return;
111 	}
112 
113 	ifmr->ifm_status |= IFM_ACTIVE;
114 	ifmr->ifm_active |= IFM_100G_DR | IFM_FDX;
115 
116 	MANA_APC_LOCK_UNLOCK(apc);
117 }
118 
119 static uint64_t
120 mana_get_counter(if_t ifp, ift_counter cnt)
121 {
122 	struct mana_port_context *apc = if_getsoftc(ifp);
123 	struct mana_port_stats *stats = &apc->port_stats;
124 
125 	switch (cnt) {
126 	case IFCOUNTER_IPACKETS:
127 		return (counter_u64_fetch(stats->rx_packets));
128 	case IFCOUNTER_OPACKETS:
129 		return (counter_u64_fetch(stats->tx_packets));
130 	case IFCOUNTER_IBYTES:
131 		return (counter_u64_fetch(stats->rx_bytes));
132 	case IFCOUNTER_OBYTES:
133 		return (counter_u64_fetch(stats->tx_bytes));
134 	case IFCOUNTER_IQDROPS:
135 		return (counter_u64_fetch(stats->rx_drops));
136 	case IFCOUNTER_OQDROPS:
137 		return (counter_u64_fetch(stats->tx_drops));
138 	default:
139 		return (if_get_counter_default(ifp, cnt));
140 	}
141 }
142 
143 static void
144 mana_qflush(if_t ifp)
145 {
146 	if_qflush(ifp);
147 }
148 
149 int
150 mana_restart(struct mana_port_context *apc)
151 {
152 	int rc = 0;
153 
154 	MANA_APC_LOCK_LOCK(apc);
155 	if (apc->port_is_up)
156 		 mana_down(apc);
157 
158 	rc = mana_up(apc);
159 	MANA_APC_LOCK_UNLOCK(apc);
160 
161 	return (rc);
162 }
163 
164 static int
165 mana_ioctl(if_t ifp, u_long command, caddr_t data)
166 {
167 	struct mana_port_context *apc = if_getsoftc(ifp);
168 	struct ifrsskey *ifrk;
169 	struct ifrsshash *ifrh;
170 	struct ifreq *ifr;
171 	uint16_t new_mtu;
172 	int rc = 0;
173 
174 	switch (command) {
175 	case SIOCSIFMTU:
176 		ifr = (struct ifreq *)data;
177 		new_mtu = ifr->ifr_mtu;
178 		if (if_getmtu(ifp) == new_mtu)
179 			break;
180 		if ((new_mtu + 18 > MAX_FRAME_SIZE) ||
181 		    (new_mtu + 18 < MIN_FRAME_SIZE)) {
182 			if_printf(ifp, "Invalid MTU. new_mtu: %d, "
183 			    "max allowed: %d, min allowed: %d\n",
184 			    new_mtu, MAX_FRAME_SIZE - 18, MIN_FRAME_SIZE - 18);
185 			return EINVAL;
186 		}
187 		MANA_APC_LOCK_LOCK(apc);
188 		if (apc->port_is_up)
189 			mana_down(apc);
190 
191 		apc->frame_size = new_mtu + 18;
192 		if_setmtu(ifp, new_mtu);
193 		mana_dbg(NULL, "Set MTU to %d\n", new_mtu);
194 
195 		rc = mana_up(apc);
196 		MANA_APC_LOCK_UNLOCK(apc);
197 		break;
198 
199 	case SIOCSIFFLAGS:
200 		if (if_getflags(ifp) & IFF_UP) {
201 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
202 				MANA_APC_LOCK_LOCK(apc);
203 				if (!apc->port_is_up)
204 					rc = mana_up(apc);
205 				MANA_APC_LOCK_UNLOCK(apc);
206 			}
207 		} else {
208 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
209 				MANA_APC_LOCK_LOCK(apc);
210 				if (apc->port_is_up)
211 					mana_down(apc);
212 				MANA_APC_LOCK_UNLOCK(apc);
213 			}
214 		}
215 		break;
216 
217 	case SIOCSIFMEDIA:
218 	case SIOCGIFMEDIA:
219 	case SIOCGIFXMEDIA:
220 		ifr = (struct ifreq *)data;
221 		rc = ifmedia_ioctl(ifp, ifr, &apc->media, command);
222 		break;
223 
224 	case SIOCGIFRSSKEY:
225 		ifrk = (struct ifrsskey *)data;
226 		ifrk->ifrk_func = RSS_FUNC_TOEPLITZ;
227 		ifrk->ifrk_keylen = MANA_HASH_KEY_SIZE;
228 		memcpy(ifrk->ifrk_key, apc->hashkey, MANA_HASH_KEY_SIZE);
229 		break;
230 
231 	case SIOCGIFRSSHASH:
232 		ifrh = (struct ifrsshash *)data;
233 		ifrh->ifrh_func = RSS_FUNC_TOEPLITZ;
234 		ifrh->ifrh_types =
235 		    RSS_TYPE_TCP_IPV4 |
236 		    RSS_TYPE_UDP_IPV4 |
237 		    RSS_TYPE_TCP_IPV6 |
238 		    RSS_TYPE_UDP_IPV6;
239 		break;
240 
241 	default:
242 		rc = ether_ioctl(ifp, command, data);
243 		break;
244 	}
245 
246 	return (rc);
247 }
248 
249 static inline void
250 mana_alloc_counters(counter_u64_t *begin, int size)
251 {
252 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
253 
254 	for (; begin < end; ++begin)
255 		*begin = counter_u64_alloc(M_WAITOK);
256 }
257 
258 static inline void
259 mana_free_counters(counter_u64_t *begin, int size)
260 {
261 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
262 
263 	for (; begin < end; ++begin)
264 		counter_u64_free(*begin);
265 }
266 
267 static bool
268 mana_can_tx(struct gdma_queue *wq)
269 {
270 	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
271 }
272 
273 static inline int
274 mana_tx_map_mbuf(struct mana_port_context *apc,
275     struct mana_send_buf_info *tx_info,
276     struct mbuf **m_head, struct mana_tx_package *tp,
277     struct mana_stats *tx_stats)
278 {
279 	struct gdma_dev *gd = apc->ac->gdma_dev;
280 	bus_dma_segment_t segs[MAX_MBUF_FRAGS];
281 	struct mbuf *m = *m_head;
282 	int err, nsegs, i;
283 
284 	err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag, tx_info->dma_map,
285 	    m, segs, &nsegs, BUS_DMA_NOWAIT);
286 	if (err == EFBIG) {
287 		struct mbuf *m_new;
288 
289 		counter_u64_add(tx_stats->collapse, 1);
290 		m_new = m_collapse(m, M_NOWAIT, MAX_MBUF_FRAGS);
291 		if (unlikely(m_new == NULL)) {
292 			counter_u64_add(tx_stats->collapse_err, 1);
293 			return ENOBUFS;
294 		} else {
295 			*m_head = m = m_new;
296 		}
297 
298 		mana_warn(NULL,
299 		    "Too many segs in orig mbuf, m_collapse called\n");
300 
301 		err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag,
302 		    tx_info->dma_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
303 	}
304 	if (!err) {
305 		for (i = 0; i < nsegs; i++) {
306 			tp->wqe_req.sgl[i].address = segs[i].ds_addr;
307 			tp->wqe_req.sgl[i].mem_key = gd->gpa_mkey;
308 			tp->wqe_req.sgl[i].size = segs[i].ds_len;
309 		}
310 		tp->wqe_req.num_sge = nsegs;
311 
312 		tx_info->mbuf = *m_head;
313 
314 		bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
315 		    BUS_DMASYNC_PREWRITE);
316 	}
317 
318 	return err;
319 }
320 
321 static inline void
322 mana_tx_unmap_mbuf(struct mana_port_context *apc,
323     struct mana_send_buf_info *tx_info)
324 {
325 	bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
326 	    BUS_DMASYNC_POSTWRITE);
327 	bus_dmamap_unload(apc->tx_buf_tag, tx_info->dma_map);
328 	if (tx_info->mbuf) {
329 		m_freem(tx_info->mbuf);
330 		tx_info->mbuf = NULL;
331 	}
332 }
333 
334 static inline int
335 mana_load_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
336     struct mana_recv_buf_oob *rx_oob, bool alloc_mbuf)
337 {
338 	bus_dma_segment_t segs[1];
339 	struct mbuf *mbuf;
340 	int nsegs, err;
341 	uint32_t mlen;
342 
343 	if (alloc_mbuf) {
344 		mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxq->datasize);
345 		if (unlikely(mbuf == NULL)) {
346 			mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
347 			if (unlikely(mbuf == NULL)) {
348 				return ENOMEM;
349 			}
350 			mlen = MCLBYTES;
351 		} else {
352 			mlen = rxq->datasize;
353 		}
354 
355 		mbuf->m_pkthdr.len = mbuf->m_len = mlen;
356 	} else {
357 		if (rx_oob->mbuf) {
358 			mbuf = rx_oob->mbuf;
359 			mlen = rx_oob->mbuf->m_pkthdr.len;
360 		} else {
361 			return ENOMEM;
362 		}
363 	}
364 
365 	err = bus_dmamap_load_mbuf_sg(apc->rx_buf_tag, rx_oob->dma_map,
366 	    mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
367 
368 	if (unlikely((err != 0) || (nsegs != 1))) {
369 		mana_warn(NULL, "Failed to map mbuf, error: %d, "
370 		    "nsegs: %d\n", err, nsegs);
371 		counter_u64_add(rxq->stats.dma_mapping_err, 1);
372 		goto error;
373 	}
374 
375 	bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
376 	    BUS_DMASYNC_PREREAD);
377 
378 	rx_oob->mbuf = mbuf;
379 	rx_oob->num_sge = 1;
380 	rx_oob->sgl[0].address = segs[0].ds_addr;
381 	rx_oob->sgl[0].size = mlen;
382 	rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
383 
384 	return 0;
385 
386 error:
387 	m_freem(mbuf);
388 	return EFAULT;
389 }
390 
391 static inline void
392 mana_unload_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
393     struct mana_recv_buf_oob *rx_oob, bool free_mbuf)
394 {
395 	bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
396 	    BUS_DMASYNC_POSTREAD);
397 	bus_dmamap_unload(apc->rx_buf_tag, rx_oob->dma_map);
398 
399 	if (free_mbuf && rx_oob->mbuf) {
400 		m_freem(rx_oob->mbuf);
401 		rx_oob->mbuf = NULL;
402 	}
403 }
404 
405 
406 /* Use couple mbuf PH_loc spaces for l3 and l4 protocal type */
407 #define MANA_L3_PROTO(_mbuf)	((_mbuf)->m_pkthdr.PH_loc.sixteen[0])
408 #define MANA_L4_PROTO(_mbuf)	((_mbuf)->m_pkthdr.PH_loc.sixteen[1])
409 
410 #define MANA_TXQ_FULL	(IFF_DRV_RUNNING | IFF_DRV_OACTIVE)
411 
412 static void
413 mana_xmit(struct mana_txq *txq)
414 {
415 	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
416 	struct mana_send_buf_info *tx_info;
417 	if_t ndev = txq->ndev;
418 	struct mbuf *mbuf;
419 	struct mana_port_context *apc = if_getsoftc(ndev);
420 	struct mana_port_stats *port_stats = &apc->port_stats;
421 	struct gdma_dev *gd = apc->ac->gdma_dev;
422 	uint64_t packets, bytes;
423 	uint16_t next_to_use;
424 	struct mana_tx_package pkg = {};
425 	struct mana_stats *tx_stats;
426 	struct gdma_queue *gdma_sq;
427 	struct mana_cq *cq;
428 	int err, len;
429 
430 	gdma_sq = txq->gdma_sq;
431 	cq = &apc->tx_qp[txq->idx].tx_cq;
432 	tx_stats = &txq->stats;
433 
434 	packets = 0;
435 	bytes = 0;
436 	next_to_use = txq->next_to_use;
437 
438 	while ((mbuf = drbr_peek(ndev, txq->txq_br)) != NULL) {
439 		if (!apc->port_is_up ||
440 		    (if_getdrvflags(ndev) & MANA_TXQ_FULL) != IFF_DRV_RUNNING) {
441 			drbr_putback(ndev, txq->txq_br, mbuf);
442 			break;
443 		}
444 
445 		if (!mana_can_tx(gdma_sq)) {
446 			/* SQ is full. Set the IFF_DRV_OACTIVE flag */
447 			if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE, 0);
448 			counter_u64_add(tx_stats->stop, 1);
449 			uint64_t stops = counter_u64_fetch(tx_stats->stop);
450 			uint64_t wakeups = counter_u64_fetch(tx_stats->wakeup);
451 #define MANA_TXQ_STOP_THRESHOLD		50
452 			if (stops > MANA_TXQ_STOP_THRESHOLD && wakeups > 0 &&
453 			    stops > wakeups && txq->alt_txq_idx == txq->idx) {
454 				txq->alt_txq_idx =
455 				    (txq->idx + (stops / wakeups))
456 				    % apc->num_queues;
457 				counter_u64_add(tx_stats->alt_chg, 1);
458 			}
459 
460 			drbr_putback(ndev, txq->txq_br, mbuf);
461 
462 			taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task);
463 			break;
464 		}
465 
466 		tx_info = &txq->tx_buf_info[next_to_use];
467 
468 		memset(&pkg, 0, sizeof(struct mana_tx_package));
469 		pkg.wqe_req.sgl = pkg.sgl_array;
470 
471 		err = mana_tx_map_mbuf(apc, tx_info, &mbuf, &pkg, tx_stats);
472 		if (unlikely(err)) {
473 			mana_dbg(NULL,
474 			    "Failed to map tx mbuf, err %d\n", err);
475 
476 			counter_u64_add(tx_stats->dma_mapping_err, 1);
477 
478 			/* The mbuf is still there. Free it */
479 			m_freem(mbuf);
480 			/* Advance the drbr queue */
481 			drbr_advance(ndev, txq->txq_br);
482 			continue;
483 		}
484 
485 		pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
486 		pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
487 
488 		if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
489 			pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
490 			pkt_fmt = MANA_LONG_PKT_FMT;
491 		} else {
492 			pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
493 		}
494 
495 		pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
496 
497 		if (pkt_fmt == MANA_SHORT_PKT_FMT)
498 			pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
499 		else
500 			pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
501 
502 		pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
503 		pkg.wqe_req.flags = 0;
504 		pkg.wqe_req.client_data_unit = 0;
505 
506 		if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
507 			if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
508 				pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
509 			else
510 				pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
511 
512 			pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
513 			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
514 			pkg.tx_oob.s_oob.trans_off = mbuf->m_pkthdr.l3hlen;
515 
516 			pkg.wqe_req.client_data_unit = mbuf->m_pkthdr.tso_segsz;
517 			pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
518 		} else if (mbuf->m_pkthdr.csum_flags &
519 		    (CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP6_UDP | CSUM_IP6_TCP)) {
520 			if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP) {
521 				pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
522 				pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
523 			} else {
524 				pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
525 			}
526 
527 			if (MANA_L4_PROTO(mbuf) == IPPROTO_TCP) {
528 				pkg.tx_oob.s_oob.comp_tcp_csum = 1;
529 				pkg.tx_oob.s_oob.trans_off =
530 				    mbuf->m_pkthdr.l3hlen;
531 			} else {
532 				pkg.tx_oob.s_oob.comp_udp_csum = 1;
533 			}
534 		} else if (mbuf->m_pkthdr.csum_flags & CSUM_IP) {
535 			pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
536 			pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
537 		} else {
538 			if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
539 				pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
540 			else if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IPV6)
541 				pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
542 		}
543 
544 		len = mbuf->m_pkthdr.len;
545 
546 		err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
547 		    (struct gdma_posted_wqe_info *)&tx_info->wqe_inf);
548 		if (unlikely(err)) {
549 			/* Should not happen */
550 			if_printf(ndev, "Failed to post TX OOB: %d\n", err);
551 
552 			mana_tx_unmap_mbuf(apc, tx_info);
553 
554 			drbr_advance(ndev, txq->txq_br);
555 			continue;
556 		}
557 
558 		next_to_use =
559 		    (next_to_use + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
560 
561 		(void)atomic_inc_return(&txq->pending_sends);
562 
563 		drbr_advance(ndev, txq->txq_br);
564 
565 		mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
566 
567 		packets++;
568 		bytes += len;
569 	}
570 
571 	counter_enter();
572 	counter_u64_add_protected(tx_stats->packets, packets);
573 	counter_u64_add_protected(port_stats->tx_packets, packets);
574 	counter_u64_add_protected(tx_stats->bytes, bytes);
575 	counter_u64_add_protected(port_stats->tx_bytes, bytes);
576 	counter_exit();
577 
578 	txq->next_to_use = next_to_use;
579 }
580 
581 static void
582 mana_xmit_taskfunc(void *arg, int pending)
583 {
584 	struct mana_txq *txq = (struct mana_txq *)arg;
585 	if_t ndev = txq->ndev;
586 	struct mana_port_context *apc = if_getsoftc(ndev);
587 
588 	while (!drbr_empty(ndev, txq->txq_br) && apc->port_is_up &&
589 	    (if_getdrvflags(ndev) & MANA_TXQ_FULL) == IFF_DRV_RUNNING) {
590 		mtx_lock(&txq->txq_mtx);
591 		mana_xmit(txq);
592 		mtx_unlock(&txq->txq_mtx);
593 	}
594 }
595 
596 #define PULLUP_HDR(m, len)				\
597 do {							\
598 	if (unlikely((m)->m_len < (len))) {		\
599 		(m) = m_pullup((m), (len));		\
600 		if ((m) == NULL)			\
601 			return (NULL);			\
602 	}						\
603 } while (0)
604 
605 /*
606  * If this function failed, the mbuf would be freed.
607  */
608 static inline struct mbuf *
609 mana_tso_fixup(struct mbuf *mbuf)
610 {
611 	struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
612 	struct tcphdr *th;
613 	uint16_t etype;
614 	int ehlen;
615 
616 	if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
617 		etype = ntohs(eh->evl_proto);
618 		ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
619 	} else {
620 		etype = ntohs(eh->evl_encap_proto);
621 		ehlen = ETHER_HDR_LEN;
622 	}
623 
624 	if (etype == ETHERTYPE_IP) {
625 		struct ip *ip;
626 		int iphlen;
627 
628 		PULLUP_HDR(mbuf, ehlen + sizeof(*ip));
629 		ip = mtodo(mbuf, ehlen);
630 		iphlen = ip->ip_hl << 2;
631 		mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
632 
633 		PULLUP_HDR(mbuf, ehlen + iphlen + sizeof(*th));
634 		th = mtodo(mbuf, ehlen + iphlen);
635 
636 		ip->ip_len = 0;
637 		ip->ip_sum = 0;
638 		th->th_sum = in_pseudo(ip->ip_src.s_addr,
639 		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
640 	} else if (etype == ETHERTYPE_IPV6) {
641 		struct ip6_hdr *ip6;
642 
643 		PULLUP_HDR(mbuf, ehlen + sizeof(*ip6) + sizeof(*th));
644 		ip6 = mtodo(mbuf, ehlen);
645 		if (ip6->ip6_nxt != IPPROTO_TCP) {
646 			/* Realy something wrong, just return */
647 			mana_dbg(NULL, "TSO mbuf not TCP, freed.\n");
648 			m_freem(mbuf);
649 			return NULL;
650 		}
651 		mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
652 
653 		th = mtodo(mbuf, ehlen + sizeof(*ip6));
654 
655 		ip6->ip6_plen = 0;
656 		th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
657 	} else {
658 		/* CSUM_TSO is set but not IP protocol. */
659 		mana_warn(NULL, "TSO mbuf not right, freed.\n");
660 		m_freem(mbuf);
661 		return NULL;
662 	}
663 
664 	MANA_L3_PROTO(mbuf) = etype;
665 
666 	return (mbuf);
667 }
668 
669 /*
670  * If this function failed, the mbuf would be freed.
671  */
672 static inline struct mbuf *
673 mana_mbuf_csum_check(struct mbuf *mbuf)
674 {
675 	struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
676 	struct mbuf *mbuf_next;
677 	uint16_t etype;
678 	int offset;
679 	int ehlen;
680 
681 	if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
682 		etype = ntohs(eh->evl_proto);
683 		ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
684 	} else {
685 		etype = ntohs(eh->evl_encap_proto);
686 		ehlen = ETHER_HDR_LEN;
687 	}
688 
689 	mbuf_next = m_getptr(mbuf, ehlen, &offset);
690 
691 	MANA_L4_PROTO(mbuf) = 0;
692 	if (etype == ETHERTYPE_IP) {
693 		const struct ip *ip;
694 		int iphlen;
695 
696 		ip = (struct ip *)(mtodo(mbuf_next, offset));
697 		iphlen = ip->ip_hl << 2;
698 		mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
699 
700 		MANA_L4_PROTO(mbuf) = ip->ip_p;
701 	} else if (etype == ETHERTYPE_IPV6) {
702 		const struct ip6_hdr *ip6;
703 
704 		ip6 = (struct ip6_hdr *)(mtodo(mbuf_next, offset));
705 		mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
706 
707 		MANA_L4_PROTO(mbuf) = ip6->ip6_nxt;
708 	} else {
709 		MANA_L4_PROTO(mbuf) = 0;
710 	}
711 
712 	MANA_L3_PROTO(mbuf) = etype;
713 
714 	return (mbuf);
715 }
716 
717 static int
718 mana_start_xmit(if_t ifp, struct mbuf *m)
719 {
720 	struct mana_port_context *apc = if_getsoftc(ifp);
721 	struct mana_txq *txq;
722 	int is_drbr_empty;
723 	uint16_t txq_id;
724 	int err;
725 
726 	if (unlikely((!apc->port_is_up) ||
727 	    (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
728 		return ENODEV;
729 
730 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
731 		m = mana_tso_fixup(m);
732 		if (unlikely(m == NULL)) {
733 			counter_enter();
734 			counter_u64_add_protected(apc->port_stats.tx_drops, 1);
735 			counter_exit();
736 			return EIO;
737 		}
738 	} else {
739 		m = mana_mbuf_csum_check(m);
740 		if (unlikely(m == NULL)) {
741 			counter_enter();
742 			counter_u64_add_protected(apc->port_stats.tx_drops, 1);
743 			counter_exit();
744 			return EIO;
745 		}
746 	}
747 
748 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
749 		uint32_t hash = m->m_pkthdr.flowid;
750 		txq_id = apc->indir_table[(hash) & MANA_INDIRECT_TABLE_MASK] %
751 		    apc->num_queues;
752 	} else {
753 		txq_id = m->m_pkthdr.flowid % apc->num_queues;
754 	}
755 
756 	if (apc->enable_tx_altq)
757 		txq_id = apc->tx_qp[txq_id].txq.alt_txq_idx;
758 
759 	txq = &apc->tx_qp[txq_id].txq;
760 
761 	is_drbr_empty = drbr_empty(ifp, txq->txq_br);
762 	err = drbr_enqueue(ifp, txq->txq_br, m);
763 	if (unlikely(err)) {
764 		mana_warn(NULL, "txq %u failed to enqueue: %d\n",
765 		    txq_id, err);
766 		taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
767 		return err;
768 	}
769 
770 	if (is_drbr_empty && mtx_trylock(&txq->txq_mtx)) {
771 		mana_xmit(txq);
772 		mtx_unlock(&txq->txq_mtx);
773 	} else {
774 		taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
775 	}
776 
777 	return 0;
778 }
779 
780 static void
781 mana_cleanup_port_context(struct mana_port_context *apc)
782 {
783 	bus_dma_tag_destroy(apc->tx_buf_tag);
784 	bus_dma_tag_destroy(apc->rx_buf_tag);
785 	apc->rx_buf_tag = NULL;
786 
787 	free(apc->rxqs, M_DEVBUF);
788 	apc->rxqs = NULL;
789 
790 	mana_free_counters((counter_u64_t *)&apc->port_stats,
791 	    sizeof(struct mana_port_stats));
792 }
793 
794 static int
795 mana_init_port_context(struct mana_port_context *apc)
796 {
797 	device_t dev = apc->ac->gdma_dev->gdma_context->dev;
798 	uint32_t tso_maxsize;
799 	int err;
800 
801 	tso_maxsize = MANA_TSO_MAX_SZ;
802 
803 	/* Create DMA tag for tx bufs */
804 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
805 	    1, 0,			/* alignment, boundary	*/
806 	    BUS_SPACE_MAXADDR,		/* lowaddr		*/
807 	    BUS_SPACE_MAXADDR,		/* highaddr		*/
808 	    NULL, NULL,			/* filter, filterarg	*/
809 	    tso_maxsize,		/* maxsize		*/
810 	    MAX_MBUF_FRAGS,		/* nsegments		*/
811 	    tso_maxsize,		/* maxsegsize		*/
812 	    0,				/* flags		*/
813 	    NULL, NULL,			/* lockfunc, lockfuncarg*/
814 	    &apc->tx_buf_tag);
815 	if (unlikely(err)) {
816 		device_printf(dev, "Feiled to create TX DMA tag\n");
817 		return err;
818 	}
819 
820 	/* Create DMA tag for rx bufs */
821 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
822 	    64, 0,			/* alignment, boundary	*/
823 	    BUS_SPACE_MAXADDR,		/* lowaddr		*/
824 	    BUS_SPACE_MAXADDR,		/* highaddr		*/
825 	    NULL, NULL,			/* filter, filterarg	*/
826 	    MJUMPAGESIZE,		/* maxsize		*/
827 	    1,				/* nsegments		*/
828 	    MJUMPAGESIZE,		/* maxsegsize		*/
829 	    0,				/* flags		*/
830 	    NULL, NULL,			/* lockfunc, lockfuncarg*/
831 	    &apc->rx_buf_tag);
832 	if (unlikely(err)) {
833 		device_printf(dev, "Feiled to create RX DMA tag\n");
834 		return err;
835 	}
836 
837 	apc->rxqs = mallocarray(apc->num_queues, sizeof(struct mana_rxq *),
838 	    M_DEVBUF, M_WAITOK | M_ZERO);
839 
840 	if (!apc->rxqs) {
841 		bus_dma_tag_destroy(apc->tx_buf_tag);
842 		bus_dma_tag_destroy(apc->rx_buf_tag);
843 		apc->rx_buf_tag = NULL;
844 		return ENOMEM;
845 	}
846 
847 	return 0;
848 }
849 
850 static int
851 mana_send_request(struct mana_context *ac, void *in_buf,
852     uint32_t in_len, void *out_buf, uint32_t out_len)
853 {
854 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
855 	struct gdma_resp_hdr *resp = out_buf;
856 	struct gdma_req_hdr *req = in_buf;
857 	device_t dev = gc->dev;
858 	static atomic_t activity_id;
859 	int err;
860 
861 	req->dev_id = gc->mana.dev_id;
862 	req->activity_id = atomic_inc_return(&activity_id);
863 
864 	mana_dbg(NULL, "activity_id  = %u\n", activity_id);
865 
866 	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
867 	    out_buf);
868 	if (err || resp->status) {
869 		device_printf(dev, "Failed to send mana message: %d, 0x%x\n",
870 			err, resp->status);
871 		return err ? err : EPROTO;
872 	}
873 
874 	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
875 	    req->activity_id != resp->activity_id) {
876 		device_printf(dev,
877 		    "Unexpected mana message response: %x,%x,%x,%x\n",
878 		    req->dev_id.as_uint32, resp->dev_id.as_uint32,
879 		    req->activity_id, resp->activity_id);
880 		return EPROTO;
881 	}
882 
883 	return 0;
884 }
885 
886 static int
887 mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
888     const enum mana_command_code expected_code,
889     const uint32_t min_size)
890 {
891 	if (resp_hdr->response.msg_type != expected_code)
892 		return EPROTO;
893 
894 	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
895 		return EPROTO;
896 
897 	if (resp_hdr->response.msg_size < min_size)
898 		return EPROTO;
899 
900 	return 0;
901 }
902 
903 static int
904 mana_query_device_cfg(struct mana_context *ac, uint32_t proto_major_ver,
905     uint32_t proto_minor_ver, uint32_t proto_micro_ver,
906     uint16_t *max_num_vports)
907 {
908 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
909 	struct mana_query_device_cfg_resp resp = {};
910 	struct mana_query_device_cfg_req req = {};
911 	device_t dev = gc->dev;
912 	int err = 0;
913 
914 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
915 	    sizeof(req), sizeof(resp));
916 	req.proto_major_ver = proto_major_ver;
917 	req.proto_minor_ver = proto_minor_ver;
918 	req.proto_micro_ver = proto_micro_ver;
919 
920 	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
921 	if (err) {
922 		device_printf(dev, "Failed to query config: %d", err);
923 		return err;
924 	}
925 
926 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
927 	    sizeof(resp));
928 	if (err || resp.hdr.status) {
929 		device_printf(dev, "Invalid query result: %d, 0x%x\n", err,
930 		    resp.hdr.status);
931 		if (!err)
932 			err = EPROTO;
933 		return err;
934 	}
935 
936 	*max_num_vports = resp.max_num_vports;
937 
938 	mana_dbg(NULL, "mana max_num_vports from device = %d\n",
939 	    *max_num_vports);
940 
941 	return 0;
942 }
943 
944 static int
945 mana_query_vport_cfg(struct mana_port_context *apc, uint32_t vport_index,
946     uint32_t *max_sq, uint32_t *max_rq, uint32_t *num_indir_entry)
947 {
948 	struct mana_query_vport_cfg_resp resp = {};
949 	struct mana_query_vport_cfg_req req = {};
950 	int err;
951 
952 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
953 	    sizeof(req), sizeof(resp));
954 
955 	req.vport_index = vport_index;
956 
957 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
958 	    sizeof(resp));
959 	if (err)
960 		return err;
961 
962 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
963 	    sizeof(resp));
964 	if (err)
965 		return err;
966 
967 	if (resp.hdr.status)
968 		return EPROTO;
969 
970 	*max_sq = resp.max_num_sq;
971 	*max_rq = resp.max_num_rq;
972 	*num_indir_entry = resp.num_indirection_ent;
973 
974 	apc->port_handle = resp.vport;
975 	memcpy(apc->mac_addr, resp.mac_addr, ETHER_ADDR_LEN);
976 
977 	return 0;
978 }
979 
980 void
981 mana_uncfg_vport(struct mana_port_context *apc)
982 {
983 	apc->vport_use_count--;
984 	if (apc->vport_use_count < 0) {
985 		mana_err(NULL,
986 		    "WARNING: vport_use_count less than 0: %u\n",
987 		    apc->vport_use_count);
988 	}
989 }
990 
991 int
992 mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
993     uint32_t doorbell_pg_id)
994 {
995 	struct mana_config_vport_resp resp = {};
996 	struct mana_config_vport_req req = {};
997 	int err;
998 
999 	/* This function is used to program the Ethernet port in the hardware
1000 	 * table. It can be called from the Ethernet driver or the RDMA driver.
1001 	 *
1002 	 * For Ethernet usage, the hardware supports only one active user on a
1003 	 * physical port. The driver checks on the port usage before programming
1004 	 * the hardware when creating the RAW QP (RDMA driver) or exposing the
1005 	 * device to kernel NET layer (Ethernet driver).
1006 	 *
1007 	 * Because the RDMA driver doesn't know in advance which QP type the
1008 	 * user will create, it exposes the device with all its ports. The user
1009 	 * may not be able to create RAW QP on a port if this port is already
1010 	 * in used by the Ethernet driver from the kernel.
1011 	 *
1012 	 * This physical port limitation only applies to the RAW QP. For RC QP,
1013 	 * the hardware doesn't have this limitation. The user can create RC
1014 	 * QPs on a physical port up to the hardware limits independent of the
1015 	 * Ethernet usage on the same port.
1016 	 */
1017 	if (apc->vport_use_count > 0) {
1018 		return EBUSY;
1019 	}
1020 	apc->vport_use_count++;
1021 
1022 	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
1023 	    sizeof(req), sizeof(resp));
1024 	req.vport = apc->port_handle;
1025 	req.pdid = protection_dom_id;
1026 	req.doorbell_pageid = doorbell_pg_id;
1027 
1028 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1029 	    sizeof(resp));
1030 	if (err) {
1031 		if_printf(apc->ndev, "Failed to configure vPort: %d\n", err);
1032 		goto out;
1033 	}
1034 
1035 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1036 	    sizeof(resp));
1037 	if (err || resp.hdr.status) {
1038 		if_printf(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1039 		    err, resp.hdr.status);
1040 		if (!err)
1041 			err = EPROTO;
1042 
1043 		goto out;
1044 	}
1045 
1046 	apc->tx_shortform_allowed = resp.short_form_allowed;
1047 	apc->tx_vp_offset = resp.tx_vport_offset;
1048 
1049 	if_printf(apc->ndev, "Configured vPort %ju PD %u DB %u\n",
1050 	    apc->port_handle, protection_dom_id, doorbell_pg_id);
1051 
1052 out:
1053 	if (err)
1054 		mana_uncfg_vport(apc);
1055 
1056 	return err;
1057 }
1058 
1059 static int
1060 mana_cfg_vport_steering(struct mana_port_context *apc,
1061     enum TRI_STATE rx,
1062     bool update_default_rxobj, bool update_key,
1063     bool update_tab)
1064 {
1065 	uint16_t num_entries = MANA_INDIRECT_TABLE_SIZE;
1066 	struct mana_cfg_rx_steer_req *req = NULL;
1067 	struct mana_cfg_rx_steer_resp resp = {};
1068 	if_t ndev = apc->ndev;
1069 	mana_handle_t *req_indir_tab;
1070 	uint32_t req_buf_size;
1071 	int err;
1072 
1073 	req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
1074 	req = malloc(req_buf_size, M_DEVBUF, M_WAITOK | M_ZERO);
1075 	if (!req)
1076 		return ENOMEM;
1077 
1078 	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1079 	    sizeof(resp));
1080 
1081 	req->vport = apc->port_handle;
1082 	req->num_indir_entries = num_entries;
1083 	req->indir_tab_offset = sizeof(*req);
1084 	req->rx_enable = rx;
1085 	req->rss_enable = apc->rss_state;
1086 	req->update_default_rxobj = update_default_rxobj;
1087 	req->update_hashkey = update_key;
1088 	req->update_indir_tab = update_tab;
1089 	req->default_rxobj = apc->default_rxobj;
1090 
1091 	if (update_key)
1092 		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1093 
1094 	if (update_tab) {
1095 		req_indir_tab = (mana_handle_t *)(req + 1);
1096 		memcpy(req_indir_tab, apc->rxobj_table,
1097 		       req->num_indir_entries * sizeof(mana_handle_t));
1098 	}
1099 
1100 	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1101 	    sizeof(resp));
1102 	if (err) {
1103 		if_printf(ndev, "Failed to configure vPort RX: %d\n", err);
1104 		goto out;
1105 	}
1106 
1107 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1108 	    sizeof(resp));
1109 	if (err) {
1110 		if_printf(ndev, "vPort RX configuration failed: %d\n", err);
1111 		goto out;
1112 	}
1113 
1114 	if (resp.hdr.status) {
1115 		if_printf(ndev, "vPort RX configuration failed: 0x%x\n",
1116 		    resp.hdr.status);
1117 		err = EPROTO;
1118 	}
1119 
1120 	if_printf(ndev, "Configured steering vPort %ju entries %u\n",
1121 	    apc->port_handle, num_entries);
1122 
1123 out:
1124 	free(req, M_DEVBUF);
1125 	return err;
1126 }
1127 
1128 int
1129 mana_create_wq_obj(struct mana_port_context *apc,
1130     mana_handle_t vport,
1131     uint32_t wq_type, struct mana_obj_spec *wq_spec,
1132     struct mana_obj_spec *cq_spec,
1133     mana_handle_t *wq_obj)
1134 {
1135 	struct mana_create_wqobj_resp resp = {};
1136 	struct mana_create_wqobj_req req = {};
1137 	if_t ndev = apc->ndev;
1138 	int err;
1139 
1140 	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1141 	    sizeof(req), sizeof(resp));
1142 	req.vport = vport;
1143 	req.wq_type = wq_type;
1144 	req.wq_gdma_region = wq_spec->gdma_region;
1145 	req.cq_gdma_region = cq_spec->gdma_region;
1146 	req.wq_size = wq_spec->queue_size;
1147 	req.cq_size = cq_spec->queue_size;
1148 	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1149 	req.cq_parent_qid = cq_spec->attached_eq;
1150 
1151 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1152 	    sizeof(resp));
1153 	if (err) {
1154 		if_printf(ndev, "Failed to create WQ object: %d\n", err);
1155 		goto out;
1156 	}
1157 
1158 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1159 	    sizeof(resp));
1160 	if (err || resp.hdr.status) {
1161 		if_printf(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1162 		    resp.hdr.status);
1163 		if (!err)
1164 			err = EPROTO;
1165 		goto out;
1166 	}
1167 
1168 	if (resp.wq_obj == INVALID_MANA_HANDLE) {
1169 		if_printf(ndev, "Got an invalid WQ object handle\n");
1170 		err = EPROTO;
1171 		goto out;
1172 	}
1173 
1174 	*wq_obj = resp.wq_obj;
1175 	wq_spec->queue_index = resp.wq_id;
1176 	cq_spec->queue_index = resp.cq_id;
1177 
1178 	return 0;
1179 out:
1180 	return err;
1181 }
1182 
1183 void
1184 mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type,
1185     mana_handle_t wq_obj)
1186 {
1187 	struct mana_destroy_wqobj_resp resp = {};
1188 	struct mana_destroy_wqobj_req req = {};
1189 	if_t ndev = apc->ndev;
1190 	int err;
1191 
1192 	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1193 	    sizeof(req), sizeof(resp));
1194 	req.wq_type = wq_type;
1195 	req.wq_obj_handle = wq_obj;
1196 
1197 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1198 	    sizeof(resp));
1199 	if (err) {
1200 		if_printf(ndev, "Failed to destroy WQ object: %d\n", err);
1201 		return;
1202 	}
1203 
1204 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1205 	    sizeof(resp));
1206 	if (err || resp.hdr.status)
1207 		if_printf(ndev, "Failed to destroy WQ object: %d, 0x%x\n",
1208 		    err, resp.hdr.status);
1209 }
1210 
1211 static void
1212 mana_destroy_eq(struct mana_context *ac)
1213 {
1214 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
1215 	struct gdma_queue *eq;
1216 	int i;
1217 
1218 	if (!ac->eqs)
1219 		return;
1220 
1221 	for (i = 0; i < gc->max_num_queues; i++) {
1222 		eq = ac->eqs[i].eq;
1223 		if (!eq)
1224 			continue;
1225 
1226 		mana_gd_destroy_queue(gc, eq);
1227 	}
1228 
1229 	free(ac->eqs, M_DEVBUF);
1230 	ac->eqs = NULL;
1231 }
1232 
1233 static int
1234 mana_create_eq(struct mana_context *ac)
1235 {
1236 	struct gdma_dev *gd = ac->gdma_dev;
1237 	struct gdma_context *gc = gd->gdma_context;
1238 	struct gdma_queue_spec spec = {};
1239 	int err;
1240 	int i;
1241 
1242 	ac->eqs = mallocarray(gc->max_num_queues, sizeof(struct mana_eq),
1243 	    M_DEVBUF, M_WAITOK | M_ZERO);
1244 	if (!ac->eqs)
1245 		return ENOMEM;
1246 
1247 	spec.type = GDMA_EQ;
1248 	spec.monitor_avl_buf = false;
1249 	spec.queue_size = EQ_SIZE;
1250 	spec.eq.callback = NULL;
1251 	spec.eq.context = ac->eqs;
1252 	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1253 
1254 	for (i = 0; i < gc->max_num_queues; i++) {
1255 		err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
1256 		if (err)
1257 			goto out;
1258 	}
1259 
1260 	return 0;
1261 out:
1262 	mana_destroy_eq(ac);
1263 	return err;
1264 }
1265 
1266 static int
1267 mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1268 {
1269 	struct mana_fence_rq_resp resp = {};
1270 	struct mana_fence_rq_req req = {};
1271 	int err;
1272 
1273 	init_completion(&rxq->fence_event);
1274 
1275 	mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
1276 	    sizeof(req), sizeof(resp));
1277 	req.wq_obj_handle = rxq->rxobj;
1278 
1279 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1280 	    sizeof(resp));
1281 	if (err) {
1282 		if_printf(apc->ndev, "Failed to fence RQ %u: %d\n",
1283 		    rxq->rxq_idx, err);
1284 		return err;
1285 	}
1286 
1287 	err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
1288 	if (err || resp.hdr.status) {
1289 		if_printf(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
1290 		    rxq->rxq_idx, err, resp.hdr.status);
1291 		if (!err)
1292 			err = EPROTO;
1293 
1294 		return err;
1295 	}
1296 
1297 	if (wait_for_completion_timeout(&rxq->fence_event, 10 * hz)) {
1298 		if_printf(apc->ndev, "Failed to fence RQ %u: timed out\n",
1299 		    rxq->rxq_idx);
1300 		return ETIMEDOUT;
1301         }
1302 
1303 	return 0;
1304 }
1305 
1306 static void
1307 mana_fence_rqs(struct mana_port_context *apc)
1308 {
1309 	unsigned int rxq_idx;
1310 	struct mana_rxq *rxq;
1311 	int err;
1312 
1313 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1314 		rxq = apc->rxqs[rxq_idx];
1315 		err = mana_fence_rq(apc, rxq);
1316 
1317 		/* In case of any error, use sleep instead. */
1318 		if (err)
1319 			gdma_msleep(100);
1320 	}
1321 }
1322 
1323 static int
1324 mana_move_wq_tail(struct gdma_queue *wq, uint32_t num_units)
1325 {
1326 	uint32_t used_space_old;
1327 	uint32_t used_space_new;
1328 
1329 	used_space_old = wq->head - wq->tail;
1330 	used_space_new = wq->head - (wq->tail + num_units);
1331 
1332 	if (used_space_new > used_space_old) {
1333 		mana_err(NULL,
1334 		    "WARNING: new used space %u greater than old one %u\n",
1335 		    used_space_new, used_space_old);
1336 		return ERANGE;
1337 	}
1338 
1339 	wq->tail += num_units;
1340 	return 0;
1341 }
1342 
1343 static void
1344 mana_poll_tx_cq(struct mana_cq *cq)
1345 {
1346 	struct gdma_comp *completions = cq->gdma_comp_buf;
1347 	struct gdma_posted_wqe_info *wqe_info;
1348 	struct mana_send_buf_info *tx_info;
1349 	unsigned int pkt_transmitted = 0;
1350 	unsigned int wqe_unit_cnt = 0;
1351 	struct mana_txq *txq = cq->txq;
1352 	struct mana_port_context *apc;
1353 	uint16_t next_to_complete;
1354 	if_t ndev;
1355 	int comp_read;
1356 	int txq_idx = txq->idx;;
1357 	int i;
1358 	int sa_drop = 0;
1359 
1360 	struct gdma_queue *gdma_wq;
1361 	unsigned int avail_space;
1362 	bool txq_full = false;
1363 
1364 	ndev = txq->ndev;
1365 	apc = if_getsoftc(ndev);
1366 
1367 	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1368 	    CQE_POLLING_BUFFER);
1369 
1370 	if (comp_read < 1)
1371 		return;
1372 
1373 	next_to_complete = txq->next_to_complete;
1374 
1375 	for (i = 0; i < comp_read; i++) {
1376 		struct mana_tx_comp_oob *cqe_oob;
1377 
1378 		if (!completions[i].is_sq) {
1379 			mana_err(NULL, "WARNING: Not for SQ\n");
1380 			return;
1381 		}
1382 
1383 		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1384 		if (cqe_oob->cqe_hdr.client_type !=
1385 				 MANA_CQE_COMPLETION) {
1386 			mana_err(NULL,
1387 			    "WARNING: Invalid CQE client type %u\n",
1388 			    cqe_oob->cqe_hdr.client_type);
1389 			return;
1390 		}
1391 
1392 		switch (cqe_oob->cqe_hdr.cqe_type) {
1393 		case CQE_TX_OKAY:
1394 			break;
1395 
1396 		case CQE_TX_SA_DROP:
1397 		case CQE_TX_MTU_DROP:
1398 		case CQE_TX_INVALID_OOB:
1399 		case CQE_TX_INVALID_ETH_TYPE:
1400 		case CQE_TX_HDR_PROCESSING_ERROR:
1401 		case CQE_TX_VF_DISABLED:
1402 		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1403 		case CQE_TX_VPORT_DISABLED:
1404 		case CQE_TX_VLAN_TAGGING_VIOLATION:
1405 			sa_drop ++;
1406 			mana_err(NULL,
1407 			    "TX: txq %d CQE error %d, ntc = %d, "
1408 			    "pending sends = %d: err ignored.\n",
1409 			    txq_idx, cqe_oob->cqe_hdr.cqe_type,
1410 			    next_to_complete, txq->pending_sends);
1411 			break;
1412 
1413 		default:
1414 			/* If the CQE type is unexpected, log an error,
1415 			 * and go through the error path.
1416 			 */
1417 			mana_err(NULL,
1418 			    "ERROR: TX: Unexpected CQE type %d: HW BUG?\n",
1419 			    cqe_oob->cqe_hdr.cqe_type);
1420 			return;
1421 		}
1422 		if (txq->gdma_txq_id != completions[i].wq_num) {
1423 			mana_dbg(NULL,
1424 			    "txq gdma id not match completion wq num: "
1425 			    "%d != %d\n",
1426 			    txq->gdma_txq_id, completions[i].wq_num);
1427 			break;
1428 		}
1429 
1430 		tx_info = &txq->tx_buf_info[next_to_complete];
1431 		if (!tx_info->mbuf) {
1432 			mana_err(NULL,
1433 			    "WARNING: txq %d Empty mbuf on tx_info: %u, "
1434 			    "ntu = %u, pending_sends = %d, "
1435 			    "transmitted = %d, sa_drop = %d, i = %d, comp_read = %d\n",
1436 			    txq_idx, next_to_complete, txq->next_to_use,
1437 			    txq->pending_sends, pkt_transmitted, sa_drop,
1438 			    i, comp_read);
1439 			break;
1440 		}
1441 
1442 		wqe_info = &tx_info->wqe_inf;
1443 		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1444 
1445 		mana_tx_unmap_mbuf(apc, tx_info);
1446 		mb();
1447 
1448 		next_to_complete =
1449 		    (next_to_complete + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
1450 
1451 		pkt_transmitted++;
1452 	}
1453 
1454 	txq->next_to_complete = next_to_complete;
1455 
1456 	if (wqe_unit_cnt == 0) {
1457 		mana_err(NULL,
1458 		    "WARNING: TX ring not proceeding!\n");
1459 		return;
1460 	}
1461 
1462 	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1463 
1464 	/* Ensure tail updated before checking q stop */
1465 	wmb();
1466 
1467 	gdma_wq = txq->gdma_sq;
1468 	avail_space = mana_gd_wq_avail_space(gdma_wq);
1469 
1470 
1471 	if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL) {
1472 		txq_full = true;
1473 	}
1474 
1475 	/* Ensure checking txq_full before apc->port_is_up. */
1476 	rmb();
1477 
1478 	if (txq_full && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1479 		/* Grab the txq lock and re-test */
1480 		mtx_lock(&txq->txq_mtx);
1481 		avail_space = mana_gd_wq_avail_space(gdma_wq);
1482 
1483 		if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL &&
1484 		    apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1485 			/* Clear the Q full flag */
1486 			if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING,
1487 			    IFF_DRV_OACTIVE);
1488 			counter_u64_add(txq->stats.wakeup, 1);
1489 			if (txq->alt_txq_idx != txq->idx) {
1490 				uint64_t stops = counter_u64_fetch(txq->stats.stop);
1491 				uint64_t wakeups = counter_u64_fetch(txq->stats.wakeup);
1492 				/* Reset alt_txq_idx back if it is not overloaded */
1493 				if (stops < wakeups) {
1494 					txq->alt_txq_idx = txq->idx;
1495 					counter_u64_add(txq->stats.alt_reset, 1);
1496 				}
1497 			}
1498 			rmb();
1499 			/* Schedule a tx enqueue task */
1500 			taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
1501 		}
1502 		mtx_unlock(&txq->txq_mtx);
1503 	}
1504 
1505 	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1506 		mana_err(NULL,
1507 		    "WARNING: TX %d pending_sends error: %d\n",
1508 		    txq->idx, txq->pending_sends);
1509 
1510 	cq->work_done = pkt_transmitted;
1511 }
1512 
1513 static void
1514 mana_post_pkt_rxq(struct mana_rxq *rxq)
1515 {
1516 	struct mana_recv_buf_oob *recv_buf_oob;
1517 	uint32_t curr_index;
1518 	int err;
1519 
1520 	curr_index = rxq->buf_index++;
1521 	if (rxq->buf_index == rxq->num_rx_buf)
1522 		rxq->buf_index = 0;
1523 
1524 	recv_buf_oob = &rxq->rx_oobs[curr_index];
1525 
1526 	err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1527 	    &recv_buf_oob->wqe_inf);
1528 	if (err) {
1529 		mana_err(NULL, "WARNING: rxq %u post pkt err %d\n",
1530 		    rxq->rxq_idx, err);
1531 		return;
1532 	}
1533 
1534 	if (recv_buf_oob->wqe_inf.wqe_size_in_bu != 1) {
1535 		mana_err(NULL, "WARNING: rxq %u wqe_size_in_bu %u\n",
1536 		    rxq->rxq_idx, recv_buf_oob->wqe_inf.wqe_size_in_bu);
1537 	}
1538 }
1539 
1540 static void
1541 mana_rx_mbuf(struct mbuf *mbuf, struct mana_rxcomp_oob *cqe,
1542     struct mana_rxq *rxq)
1543 {
1544 	struct mana_stats *rx_stats = &rxq->stats;
1545 	if_t ndev = rxq->ndev;
1546 	uint32_t pkt_len = cqe->ppi[0].pkt_len;
1547 	uint16_t rxq_idx = rxq->rxq_idx;
1548 	struct mana_port_context *apc;
1549 	bool do_lro = false;
1550 	bool do_if_input;
1551 
1552 	apc = if_getsoftc(ndev);
1553 	rxq->rx_cq.work_done++;
1554 
1555 	if (!mbuf) {
1556 		return;
1557 	}
1558 
1559 	mbuf->m_flags |= M_PKTHDR;
1560 	mbuf->m_pkthdr.len = pkt_len;
1561 	mbuf->m_len = pkt_len;
1562 	mbuf->m_pkthdr.rcvif = ndev;
1563 
1564 	if ((if_getcapenable(ndev) & IFCAP_RXCSUM ||
1565 	    if_getcapenable(ndev) & IFCAP_RXCSUM_IPV6) &&
1566 	    (cqe->rx_iphdr_csum_succeed)) {
1567 		mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1568 		mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1569 		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) {
1570 			mbuf->m_pkthdr.csum_flags |=
1571 			    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1572 			mbuf->m_pkthdr.csum_data = 0xffff;
1573 
1574 			if (cqe->rx_tcp_csum_succeed)
1575 				do_lro = true;
1576 		}
1577 	}
1578 
1579 	if (cqe->rx_hashtype != 0) {
1580 		mbuf->m_pkthdr.flowid = cqe->ppi[0].pkt_hash;
1581 
1582 		uint16_t hashtype = cqe->rx_hashtype;
1583 		if (hashtype & NDIS_HASH_IPV4_MASK) {
1584 			hashtype &= NDIS_HASH_IPV4_MASK;
1585 			switch (hashtype) {
1586 			case NDIS_HASH_TCP_IPV4:
1587 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
1588 				break;
1589 			case NDIS_HASH_UDP_IPV4:
1590 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
1591 				break;
1592 			default:
1593 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
1594 			}
1595 		} else if (hashtype & NDIS_HASH_IPV6_MASK) {
1596 			hashtype &= NDIS_HASH_IPV6_MASK;
1597 			switch (hashtype) {
1598 			case NDIS_HASH_TCP_IPV6:
1599 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
1600 				break;
1601 			case NDIS_HASH_TCP_IPV6_EX:
1602 				M_HASHTYPE_SET(mbuf,
1603 				    M_HASHTYPE_RSS_TCP_IPV6_EX);
1604 				break;
1605 			case NDIS_HASH_UDP_IPV6:
1606 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
1607 				break;
1608 			case NDIS_HASH_UDP_IPV6_EX:
1609 				M_HASHTYPE_SET(mbuf,
1610 				    M_HASHTYPE_RSS_UDP_IPV6_EX);
1611 				break;
1612 			default:
1613 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
1614 			}
1615 		} else {
1616 			M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
1617 		}
1618 	} else {
1619 		mbuf->m_pkthdr.flowid = rxq_idx;
1620 		M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
1621 	}
1622 
1623 	do_if_input = true;
1624 	if ((if_getcapenable(ndev) & IFCAP_LRO) && do_lro) {
1625 		if (rxq->lro.lro_cnt != 0 &&
1626 		    tcp_lro_rx(&rxq->lro, mbuf, 0) == 0)
1627 			do_if_input = false;
1628 	}
1629 	if (do_if_input) {
1630 		if_input(ndev, mbuf);
1631 	}
1632 
1633 	counter_enter();
1634 	counter_u64_add_protected(rx_stats->packets, 1);
1635 	counter_u64_add_protected(apc->port_stats.rx_packets, 1);
1636 	counter_u64_add_protected(rx_stats->bytes, pkt_len);
1637 	counter_u64_add_protected(apc->port_stats.rx_bytes, pkt_len);
1638 	counter_exit();
1639 }
1640 
1641 static void
1642 mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1643     struct gdma_comp *cqe)
1644 {
1645 	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1646 	struct mana_recv_buf_oob *rxbuf_oob;
1647 	if_t ndev = rxq->ndev;
1648 	struct mana_port_context *apc;
1649 	struct mbuf *old_mbuf;
1650 	uint32_t curr, pktlen;
1651 	int err;
1652 
1653 	switch (oob->cqe_hdr.cqe_type) {
1654 	case CQE_RX_OKAY:
1655 		break;
1656 
1657 	case CQE_RX_TRUNCATED:
1658 		apc = if_getsoftc(ndev);
1659 		counter_u64_add(apc->port_stats.rx_drops, 1);
1660 		rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1661 		if_printf(ndev, "Dropped a truncated packet\n");
1662 		goto drop;
1663 
1664 	case CQE_RX_COALESCED_4:
1665 		if_printf(ndev, "RX coalescing is unsupported\n");
1666 		return;
1667 
1668 	case CQE_RX_OBJECT_FENCE:
1669 		complete(&rxq->fence_event);
1670 		return;
1671 
1672 	default:
1673 		if_printf(ndev, "Unknown RX CQE type = %d\n",
1674 		    oob->cqe_hdr.cqe_type);
1675 		return;
1676 	}
1677 
1678 	if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
1679 		return;
1680 
1681 	pktlen = oob->ppi[0].pkt_len;
1682 
1683 	if (pktlen == 0) {
1684 		/* data packets should never have packetlength of zero */
1685 		if_printf(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%jx\n",
1686 		    rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1687 		return;
1688 	}
1689 
1690 	curr = rxq->buf_index;
1691 	rxbuf_oob = &rxq->rx_oobs[curr];
1692 	if (rxbuf_oob->wqe_inf.wqe_size_in_bu != 1) {
1693 		mana_err(NULL, "WARNING: Rx Incorrect complete "
1694 		    "WQE size %u\n",
1695 		    rxbuf_oob->wqe_inf.wqe_size_in_bu);
1696 	}
1697 
1698 	apc = if_getsoftc(ndev);
1699 
1700 	old_mbuf = rxbuf_oob->mbuf;
1701 
1702 	/* Unload DMA map for the old mbuf */
1703 	mana_unload_rx_mbuf(apc, rxq, rxbuf_oob, false);
1704 
1705 	/* Load a new mbuf to replace the old one */
1706 	err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, true);
1707 	if (err) {
1708 		mana_dbg(NULL,
1709 		    "failed to load rx mbuf, err = %d, packet dropped.\n",
1710 		    err);
1711 		counter_u64_add(rxq->stats.mbuf_alloc_fail, 1);
1712 		/*
1713 		 * Failed to load new mbuf, rxbuf_oob->mbuf is still
1714 		 * pointing to the old one. Drop the packet.
1715 		 */
1716 		 old_mbuf = NULL;
1717 		 /* Reload the existing mbuf */
1718 		 mana_load_rx_mbuf(apc, rxq, rxbuf_oob, false);
1719 	}
1720 
1721 	mana_rx_mbuf(old_mbuf, oob, rxq);
1722 
1723 drop:
1724 	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1725 
1726 	mana_post_pkt_rxq(rxq);
1727 }
1728 
1729 static void
1730 mana_poll_rx_cq(struct mana_cq *cq)
1731 {
1732 	struct gdma_comp *comp = cq->gdma_comp_buf;
1733 	int comp_read, i;
1734 
1735 	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1736 	KASSERT(comp_read <= CQE_POLLING_BUFFER,
1737 	    ("comp_read %d great than buf size %d",
1738 	    comp_read, CQE_POLLING_BUFFER));
1739 
1740 	for (i = 0; i < comp_read; i++) {
1741 		if (comp[i].is_sq == true) {
1742 			mana_err(NULL,
1743 			    "WARNING: CQE not for receive queue\n");
1744 			return;
1745 		}
1746 
1747 		/* verify recv cqe references the right rxq */
1748 		if (comp[i].wq_num != cq->rxq->gdma_id) {
1749 			mana_err(NULL,
1750 			    "WARNING: Received CQE %d  not for "
1751 			    "this receive queue %d\n",
1752 			    comp[i].wq_num,  cq->rxq->gdma_id);
1753 			return;
1754 		}
1755 
1756 		mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
1757 	}
1758 
1759 	if (comp_read > 0) {
1760 		struct gdma_context *gc =
1761 		    cq->rxq->gdma_rq->gdma_dev->gdma_context;
1762 
1763 		mana_gd_wq_ring_doorbell(gc, cq->rxq->gdma_rq);
1764 	}
1765 
1766 	tcp_lro_flush_all(&cq->rxq->lro);
1767 }
1768 
1769 static void
1770 mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1771 {
1772 	struct mana_cq *cq = context;
1773 	uint8_t arm_bit;
1774 
1775 	KASSERT(cq->gdma_cq == gdma_queue,
1776 	    ("cq do not match %p, %p", cq->gdma_cq, gdma_queue));
1777 
1778 	if (cq->type == MANA_CQ_TYPE_RX) {
1779 		mana_poll_rx_cq(cq);
1780 	} else {
1781 		mana_poll_tx_cq(cq);
1782 	}
1783 
1784 	if (cq->work_done < cq->budget && cq->do_not_ring_db == false)
1785 		arm_bit = SET_ARM_BIT;
1786 	else
1787 		arm_bit = 0;
1788 
1789 	mana_gd_ring_cq(gdma_queue, arm_bit);
1790 }
1791 
1792 #define MANA_POLL_BUDGET	8
1793 #define MANA_RX_BUDGET		256
1794 #define MANA_TX_BUDGET		MAX_SEND_BUFFERS_PER_QUEUE
1795 
1796 static void
1797 mana_poll(void *arg, int pending)
1798 {
1799 	struct mana_cq *cq = arg;
1800 	int i;
1801 
1802 	cq->work_done = 0;
1803 	if (cq->type == MANA_CQ_TYPE_RX) {
1804 		cq->budget = MANA_RX_BUDGET;
1805 	} else {
1806 		cq->budget = MANA_TX_BUDGET;
1807 	}
1808 
1809 	for (i = 0; i < MANA_POLL_BUDGET; i++) {
1810 		/*
1811 		 * If this is the last loop, set the budget big enough
1812 		 * so it will arm the CQ any way.
1813 		 */
1814 		if (i == (MANA_POLL_BUDGET - 1))
1815 			cq->budget = CQE_POLLING_BUFFER + 1;
1816 
1817 		mana_cq_handler(cq, cq->gdma_cq);
1818 
1819 		if (cq->work_done < cq->budget)
1820 			break;
1821 
1822 		cq->work_done = 0;
1823 	}
1824 }
1825 
1826 static void
1827 mana_schedule_task(void *arg, struct gdma_queue *gdma_queue)
1828 {
1829 	struct mana_cq *cq = arg;
1830 
1831 	taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task);
1832 }
1833 
1834 static void
1835 mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1836 {
1837 	struct gdma_dev *gd = apc->ac->gdma_dev;
1838 
1839 	if (!cq->gdma_cq)
1840 		return;
1841 
1842 	/* Drain cleanup taskqueue */
1843 	if (cq->cleanup_tq) {
1844 		while (taskqueue_cancel(cq->cleanup_tq,
1845 		    &cq->cleanup_task, NULL)) {
1846 			taskqueue_drain(cq->cleanup_tq,
1847 			    &cq->cleanup_task);
1848 		}
1849 
1850 		taskqueue_free(cq->cleanup_tq);
1851 	}
1852 
1853 	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1854 }
1855 
1856 static void
1857 mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1858 {
1859 	struct gdma_dev *gd = apc->ac->gdma_dev;
1860 	struct mana_send_buf_info *txbuf_info;
1861 	uint32_t pending_sends;
1862 	int i;
1863 
1864 	if (!txq->gdma_sq)
1865 		return;
1866 
1867 	if ((pending_sends = atomic_read(&txq->pending_sends)) > 0) {
1868 		mana_err(NULL,
1869 		    "WARNING: txq pending sends not zero: %u\n",
1870 		    pending_sends);
1871 	}
1872 
1873 	if (txq->next_to_use != txq->next_to_complete) {
1874 		mana_err(NULL,
1875 		    "WARNING: txq buf not completed, "
1876 		    "next use %u, next complete %u\n",
1877 		    txq->next_to_use, txq->next_to_complete);
1878 	}
1879 
1880 	/* Flush buf ring. Grab txq mtx lock */
1881 	if (txq->txq_br) {
1882 		mtx_lock(&txq->txq_mtx);
1883 		drbr_flush(apc->ndev, txq->txq_br);
1884 		mtx_unlock(&txq->txq_mtx);
1885 		buf_ring_free(txq->txq_br, M_DEVBUF);
1886 	}
1887 
1888 	/* Drain taskqueue */
1889 	if (txq->enqueue_tq) {
1890 		while (taskqueue_cancel(txq->enqueue_tq,
1891 		    &txq->enqueue_task, NULL)) {
1892 			taskqueue_drain(txq->enqueue_tq,
1893 			    &txq->enqueue_task);
1894 		}
1895 
1896 		taskqueue_free(txq->enqueue_tq);
1897 	}
1898 
1899 	if (txq->tx_buf_info) {
1900 		/* Free all mbufs which are still in-flight */
1901 		for (i = 0; i < MAX_SEND_BUFFERS_PER_QUEUE; i++) {
1902 			txbuf_info = &txq->tx_buf_info[i];
1903 			if (txbuf_info->mbuf) {
1904 				mana_tx_unmap_mbuf(apc, txbuf_info);
1905 			}
1906 		}
1907 
1908 		free(txq->tx_buf_info, M_DEVBUF);
1909 	}
1910 
1911 	mana_free_counters((counter_u64_t *)&txq->stats,
1912 	    sizeof(txq->stats));
1913 
1914 	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1915 
1916 	mtx_destroy(&txq->txq_mtx);
1917 }
1918 
1919 static void
1920 mana_destroy_txq(struct mana_port_context *apc)
1921 {
1922 	int i;
1923 
1924 	if (!apc->tx_qp)
1925 		return;
1926 
1927 	for (i = 0; i < apc->num_queues; i++) {
1928 		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1929 
1930 		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1931 
1932 		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1933 	}
1934 
1935 	free(apc->tx_qp, M_DEVBUF);
1936 	apc->tx_qp = NULL;
1937 }
1938 
1939 static int
1940 mana_create_txq(struct mana_port_context *apc, if_t net)
1941 {
1942 	struct mana_context *ac = apc->ac;
1943 	struct gdma_dev *gd = ac->gdma_dev;
1944 	struct mana_obj_spec wq_spec;
1945 	struct mana_obj_spec cq_spec;
1946 	struct gdma_queue_spec spec;
1947 	struct gdma_context *gc;
1948 	struct mana_txq *txq;
1949 	struct mana_cq *cq;
1950 	uint32_t txq_size;
1951 	uint32_t cq_size;
1952 	int err;
1953 	int i;
1954 
1955 	apc->tx_qp = mallocarray(apc->num_queues, sizeof(struct mana_tx_qp),
1956 	    M_DEVBUF, M_WAITOK | M_ZERO);
1957 	if (!apc->tx_qp)
1958 		return ENOMEM;
1959 
1960 	/*  The minimum size of the WQE is 32 bytes, hence
1961 	 *  MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1962 	 *  the SQ can store. This value is then used to size other queues
1963 	 *  to prevent overflow.
1964 	 */
1965 	txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1966 	KASSERT(IS_ALIGNED(txq_size, PAGE_SIZE),
1967 	    ("txq size not page aligned"));
1968 
1969 	cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1970 	cq_size = ALIGN(cq_size, PAGE_SIZE);
1971 
1972 	gc = gd->gdma_context;
1973 
1974 	for (i = 0; i < apc->num_queues; i++) {
1975 		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1976 
1977 		/* Create SQ */
1978 		txq = &apc->tx_qp[i].txq;
1979 
1980 		txq->ndev = net;
1981 		txq->vp_offset = apc->tx_vp_offset;
1982 		txq->idx = i;
1983 		txq->alt_txq_idx = i;
1984 
1985 		memset(&spec, 0, sizeof(spec));
1986 		spec.type = GDMA_SQ;
1987 		spec.monitor_avl_buf = true;
1988 		spec.queue_size = txq_size;
1989 		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1990 		if (err)
1991 			goto out;
1992 
1993 		/* Create SQ's CQ */
1994 		cq = &apc->tx_qp[i].tx_cq;
1995 		cq->type = MANA_CQ_TYPE_TX;
1996 
1997 		cq->txq = txq;
1998 
1999 		memset(&spec, 0, sizeof(spec));
2000 		spec.type = GDMA_CQ;
2001 		spec.monitor_avl_buf = false;
2002 		spec.queue_size = cq_size;
2003 		spec.cq.callback = mana_schedule_task;
2004 		spec.cq.parent_eq = ac->eqs[i].eq;
2005 		spec.cq.context = cq;
2006 		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2007 		if (err)
2008 			goto out;
2009 
2010 		memset(&wq_spec, 0, sizeof(wq_spec));
2011 		memset(&cq_spec, 0, sizeof(cq_spec));
2012 
2013 		wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
2014 		wq_spec.queue_size = txq->gdma_sq->queue_size;
2015 
2016 		cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2017 		cq_spec.queue_size = cq->gdma_cq->queue_size;
2018 		cq_spec.modr_ctx_id = 0;
2019 		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2020 
2021 		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
2022 		    &wq_spec, &cq_spec, &apc->tx_qp[i].tx_object);
2023 
2024 		if (err)
2025 			goto out;
2026 
2027 		txq->gdma_sq->id = wq_spec.queue_index;
2028 		cq->gdma_cq->id = cq_spec.queue_index;
2029 
2030 		txq->gdma_sq->mem_info.dma_region_handle =
2031 		    GDMA_INVALID_DMA_REGION;
2032 		cq->gdma_cq->mem_info.dma_region_handle =
2033 		    GDMA_INVALID_DMA_REGION;
2034 
2035 		txq->gdma_txq_id = txq->gdma_sq->id;
2036 
2037 		cq->gdma_id = cq->gdma_cq->id;
2038 
2039 		mana_dbg(NULL,
2040 		    "txq %d, txq gdma id %d, txq cq gdma id %d\n",
2041 		    i, txq->gdma_txq_id, cq->gdma_id);;
2042 
2043 		if (cq->gdma_id >= gc->max_num_cqs) {
2044 			if_printf(net, "CQ id %u too large.\n", cq->gdma_id);
2045 			err = EINVAL;
2046 			goto out;
2047 		}
2048 
2049 		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2050 
2051 		/* Initialize tx specific data */
2052 		txq->tx_buf_info = malloc(MAX_SEND_BUFFERS_PER_QUEUE *
2053 		    sizeof(struct mana_send_buf_info),
2054 		    M_DEVBUF, M_WAITOK | M_ZERO);
2055 		if (unlikely(txq->tx_buf_info == NULL)) {
2056 			if_printf(net,
2057 			    "Failed to allocate tx buf info for SQ %u\n",
2058 			    txq->gdma_sq->id);
2059 			err = ENOMEM;
2060 			goto out;
2061 		}
2062 
2063 
2064 		snprintf(txq->txq_mtx_name, nitems(txq->txq_mtx_name),
2065 		    "mana:tx(%d)", i);
2066 		mtx_init(&txq->txq_mtx, txq->txq_mtx_name, NULL, MTX_DEF);
2067 
2068 		txq->txq_br = buf_ring_alloc(4 * MAX_SEND_BUFFERS_PER_QUEUE,
2069 		    M_DEVBUF, M_WAITOK, &txq->txq_mtx);
2070 		if (unlikely(txq->txq_br == NULL)) {
2071 			if_printf(net,
2072 			    "Failed to allocate buf ring for SQ %u\n",
2073 			    txq->gdma_sq->id);
2074 			err = ENOMEM;
2075 			goto out;
2076 		}
2077 
2078 		/* Allocate taskqueue for deferred send */
2079 		TASK_INIT(&txq->enqueue_task, 0, mana_xmit_taskfunc, txq);
2080 		txq->enqueue_tq = taskqueue_create_fast("mana_tx_enque",
2081 		    M_NOWAIT, taskqueue_thread_enqueue, &txq->enqueue_tq);
2082 		if (unlikely(txq->enqueue_tq == NULL)) {
2083 			if_printf(net,
2084 			    "Unable to create tx %d enqueue task queue\n", i);
2085 			err = ENOMEM;
2086 			goto out;
2087 		}
2088 		taskqueue_start_threads(&txq->enqueue_tq, 1, PI_NET,
2089 		    "mana txq p%u-tx%d", apc->port_idx, i);
2090 
2091 		mana_alloc_counters((counter_u64_t *)&txq->stats,
2092 		    sizeof(txq->stats));
2093 
2094 		/* Allocate and start the cleanup task on CQ */
2095 		cq->do_not_ring_db = false;
2096 
2097 		NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq);
2098 		cq->cleanup_tq =
2099 		    taskqueue_create_fast("mana tx cq cleanup",
2100 		    M_WAITOK, taskqueue_thread_enqueue,
2101 		    &cq->cleanup_tq);
2102 
2103 		if (apc->last_tx_cq_bind_cpu < 0)
2104 			apc->last_tx_cq_bind_cpu = CPU_FIRST();
2105 		cq->cpu = apc->last_tx_cq_bind_cpu;
2106 		apc->last_tx_cq_bind_cpu = CPU_NEXT(apc->last_tx_cq_bind_cpu);
2107 
2108 		if (apc->bind_cleanup_thread_cpu) {
2109 			cpuset_t cpu_mask;
2110 			CPU_SETOF(cq->cpu, &cpu_mask);
2111 			taskqueue_start_threads_cpuset(&cq->cleanup_tq,
2112 			    1, PI_NET, &cpu_mask,
2113 			    "mana cq p%u-tx%u-cpu%d",
2114 			    apc->port_idx, txq->idx, cq->cpu);
2115 		} else {
2116 			taskqueue_start_threads(&cq->cleanup_tq, 1,
2117 			    PI_NET, "mana cq p%u-tx%u",
2118 			    apc->port_idx, txq->idx);
2119 		}
2120 
2121 		mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2122 	}
2123 
2124 	return 0;
2125 out:
2126 	mana_destroy_txq(apc);
2127 	return err;
2128 }
2129 
2130 static void
2131 mana_destroy_rxq(struct mana_port_context *apc, struct mana_rxq *rxq,
2132     bool validate_state)
2133 {
2134 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2135 	struct mana_recv_buf_oob *rx_oob;
2136 	int i;
2137 
2138 	if (!rxq)
2139 		return;
2140 
2141 	if (validate_state) {
2142 		/*
2143 		 * XXX Cancel and drain cleanup task queue here.
2144 		 */
2145 		;
2146 	}
2147 
2148 	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2149 
2150 	mana_deinit_cq(apc, &rxq->rx_cq);
2151 
2152 	mana_free_counters((counter_u64_t *)&rxq->stats,
2153 	    sizeof(rxq->stats));
2154 
2155 	/* Free LRO resources */
2156 	tcp_lro_free(&rxq->lro);
2157 
2158 	for (i = 0; i < rxq->num_rx_buf; i++) {
2159 		rx_oob = &rxq->rx_oobs[i];
2160 
2161 		if (rx_oob->mbuf)
2162 			mana_unload_rx_mbuf(apc, rxq, rx_oob, true);
2163 
2164 		bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2165 	}
2166 
2167 	if (rxq->gdma_rq)
2168 		mana_gd_destroy_queue(gc, rxq->gdma_rq);
2169 
2170 	free(rxq, M_DEVBUF);
2171 }
2172 
2173 #define MANA_WQE_HEADER_SIZE 16
2174 #define MANA_WQE_SGE_SIZE 16
2175 
2176 static int
2177 mana_alloc_rx_wqe(struct mana_port_context *apc,
2178     struct mana_rxq *rxq, uint32_t *rxq_size, uint32_t *cq_size)
2179 {
2180 	struct mana_recv_buf_oob *rx_oob;
2181 	uint32_t buf_idx;
2182 	int err;
2183 
2184 	if (rxq->datasize == 0 || rxq->datasize > PAGE_SIZE) {
2185 		mana_err(NULL,
2186 		    "WARNING: Invalid rxq datasize %u\n", rxq->datasize);
2187 	}
2188 
2189 	*rxq_size = 0;
2190 	*cq_size = 0;
2191 
2192 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2193 		rx_oob = &rxq->rx_oobs[buf_idx];
2194 		memset(rx_oob, 0, sizeof(*rx_oob));
2195 
2196 		err = bus_dmamap_create(apc->rx_buf_tag, 0,
2197 		    &rx_oob->dma_map);
2198 		if (err) {
2199 			mana_err(NULL,
2200 			    "Failed to  create rx DMA map for buf %d\n",
2201 			    buf_idx);
2202 			return err;
2203 		}
2204 
2205 		err = mana_load_rx_mbuf(apc, rxq, rx_oob, true);
2206 		if (err) {
2207 			mana_err(NULL,
2208 			    "Failed to  create rx DMA map for buf %d\n",
2209 			    buf_idx);
2210 			bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2211 			return err;
2212 		}
2213 
2214 		rx_oob->wqe_req.sgl = rx_oob->sgl;
2215 		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2216 		rx_oob->wqe_req.inline_oob_size = 0;
2217 		rx_oob->wqe_req.inline_oob_data = NULL;
2218 		rx_oob->wqe_req.flags = 0;
2219 		rx_oob->wqe_req.client_data_unit = 0;
2220 
2221 		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2222 				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2223 		*cq_size += COMP_ENTRY_SIZE;
2224 	}
2225 
2226 	return 0;
2227 }
2228 
2229 static int
2230 mana_push_wqe(struct mana_rxq *rxq)
2231 {
2232 	struct mana_recv_buf_oob *rx_oob;
2233 	uint32_t buf_idx;
2234 	int err;
2235 
2236 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2237 		rx_oob = &rxq->rx_oobs[buf_idx];
2238 
2239 		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2240 		    &rx_oob->wqe_inf);
2241 		if (err)
2242 			return ENOSPC;
2243 	}
2244 
2245 	return 0;
2246 }
2247 
2248 static struct mana_rxq *
2249 mana_create_rxq(struct mana_port_context *apc, uint32_t rxq_idx,
2250     struct mana_eq *eq, if_t ndev)
2251 {
2252 	struct gdma_dev *gd = apc->ac->gdma_dev;
2253 	struct mana_obj_spec wq_spec;
2254 	struct mana_obj_spec cq_spec;
2255 	struct gdma_queue_spec spec;
2256 	struct mana_cq *cq = NULL;
2257 	uint32_t cq_size, rq_size;
2258 	struct gdma_context *gc;
2259 	struct mana_rxq *rxq;
2260 	int err;
2261 
2262 	gc = gd->gdma_context;
2263 
2264 	rxq = malloc(sizeof(*rxq) +
2265 	    RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob),
2266 	    M_DEVBUF, M_WAITOK | M_ZERO);
2267 	if (!rxq)
2268 		return NULL;
2269 
2270 	rxq->ndev = ndev;
2271 	rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
2272 	rxq->rxq_idx = rxq_idx;
2273 	/*
2274 	 * Minimum size is MCLBYTES(2048) bytes for a mbuf cluster.
2275 	 * Now we just allow maximum size of 4096.
2276 	 */
2277 	rxq->datasize = ALIGN(apc->frame_size, MCLBYTES);
2278 	if (rxq->datasize > MAX_FRAME_SIZE)
2279 		rxq->datasize = MAX_FRAME_SIZE;
2280 
2281 	mana_dbg(NULL, "Setting rxq %d datasize %d\n",
2282 	    rxq_idx, rxq->datasize);
2283 
2284 	rxq->rxobj = INVALID_MANA_HANDLE;
2285 
2286 	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2287 	if (err)
2288 		goto out;
2289 
2290 	/* Create LRO for the RQ */
2291 	if (if_getcapenable(ndev) & IFCAP_LRO) {
2292 		err = tcp_lro_init(&rxq->lro);
2293 		if (err) {
2294 			if_printf(ndev, "Failed to create LRO for rxq %d\n",
2295 			    rxq_idx);
2296 		} else {
2297 			rxq->lro.ifp = ndev;
2298 		}
2299 	}
2300 
2301 	mana_alloc_counters((counter_u64_t *)&rxq->stats,
2302 	    sizeof(rxq->stats));
2303 
2304 	rq_size = ALIGN(rq_size, PAGE_SIZE);
2305 	cq_size = ALIGN(cq_size, PAGE_SIZE);
2306 
2307 	/* Create RQ */
2308 	memset(&spec, 0, sizeof(spec));
2309 	spec.type = GDMA_RQ;
2310 	spec.monitor_avl_buf = true;
2311 	spec.queue_size = rq_size;
2312 	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2313 	if (err)
2314 		goto out;
2315 
2316 	/* Create RQ's CQ */
2317 	cq = &rxq->rx_cq;
2318 	cq->type = MANA_CQ_TYPE_RX;
2319 	cq->rxq = rxq;
2320 
2321 	memset(&spec, 0, sizeof(spec));
2322 	spec.type = GDMA_CQ;
2323 	spec.monitor_avl_buf = false;
2324 	spec.queue_size = cq_size;
2325 	spec.cq.callback = mana_schedule_task;
2326 	spec.cq.parent_eq = eq->eq;
2327 	spec.cq.context = cq;
2328 	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2329 	if (err)
2330 		goto out;
2331 
2332 	memset(&wq_spec, 0, sizeof(wq_spec));
2333 	memset(&cq_spec, 0, sizeof(cq_spec));
2334 	wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
2335 	wq_spec.queue_size = rxq->gdma_rq->queue_size;
2336 
2337 	cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2338 	cq_spec.queue_size = cq->gdma_cq->queue_size;
2339 	cq_spec.modr_ctx_id = 0;
2340 	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2341 
2342 	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2343 	    &wq_spec, &cq_spec, &rxq->rxobj);
2344 	if (err)
2345 		goto out;
2346 
2347 	rxq->gdma_rq->id = wq_spec.queue_index;
2348 	cq->gdma_cq->id = cq_spec.queue_index;
2349 
2350 	rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2351 	cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2352 
2353 	rxq->gdma_id = rxq->gdma_rq->id;
2354 	cq->gdma_id = cq->gdma_cq->id;
2355 
2356 	err = mana_push_wqe(rxq);
2357 	if (err)
2358 		goto out;
2359 
2360 	if (cq->gdma_id >= gc->max_num_cqs) {
2361 		err = EINVAL;
2362 		goto out;
2363 	}
2364 
2365 	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2366 
2367 	/* Allocate and start the cleanup task on CQ */
2368 	cq->do_not_ring_db = false;
2369 
2370 	NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq);
2371 	cq->cleanup_tq =
2372 	    taskqueue_create_fast("mana rx cq cleanup",
2373 	    M_WAITOK, taskqueue_thread_enqueue,
2374 	    &cq->cleanup_tq);
2375 
2376 	if (apc->last_rx_cq_bind_cpu < 0)
2377 		apc->last_rx_cq_bind_cpu = CPU_FIRST();
2378 	cq->cpu = apc->last_rx_cq_bind_cpu;
2379 	apc->last_rx_cq_bind_cpu = CPU_NEXT(apc->last_rx_cq_bind_cpu);
2380 
2381 	if (apc->bind_cleanup_thread_cpu) {
2382 		cpuset_t cpu_mask;
2383 		CPU_SETOF(cq->cpu, &cpu_mask);
2384 		taskqueue_start_threads_cpuset(&cq->cleanup_tq,
2385 		    1, PI_NET, &cpu_mask,
2386 		    "mana cq p%u-rx%u-cpu%d",
2387 		    apc->port_idx, rxq->rxq_idx, cq->cpu);
2388 	} else {
2389 		taskqueue_start_threads(&cq->cleanup_tq, 1,
2390 		    PI_NET, "mana cq p%u-rx%u",
2391 		    apc->port_idx, rxq->rxq_idx);
2392 	}
2393 
2394 	mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2395 out:
2396 	if (!err)
2397 		return rxq;
2398 
2399 	if_printf(ndev, "Failed to create RXQ: err = %d\n", err);
2400 
2401 	mana_destroy_rxq(apc, rxq, false);
2402 
2403 	if (cq)
2404 		mana_deinit_cq(apc, cq);
2405 
2406 	return NULL;
2407 }
2408 
2409 static int
2410 mana_add_rx_queues(struct mana_port_context *apc, if_t ndev)
2411 {
2412 	struct mana_context *ac = apc->ac;
2413 	struct mana_rxq *rxq;
2414 	int err = 0;
2415 	int i;
2416 
2417 	for (i = 0; i < apc->num_queues; i++) {
2418 		rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2419 		if (!rxq) {
2420 			err = ENOMEM;
2421 			goto out;
2422 		}
2423 
2424 		apc->rxqs[i] = rxq;
2425 	}
2426 
2427 	apc->default_rxobj = apc->rxqs[0]->rxobj;
2428 out:
2429 	return err;
2430 }
2431 
2432 static void
2433 mana_destroy_vport(struct mana_port_context *apc)
2434 {
2435 	struct mana_rxq *rxq;
2436 	uint32_t rxq_idx;
2437 
2438 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2439 		rxq = apc->rxqs[rxq_idx];
2440 		if (!rxq)
2441 			continue;
2442 
2443 		mana_destroy_rxq(apc, rxq, true);
2444 		apc->rxqs[rxq_idx] = NULL;
2445 	}
2446 
2447 	mana_destroy_txq(apc);
2448 
2449 	mana_uncfg_vport(apc);
2450 }
2451 
2452 static int
2453 mana_create_vport(struct mana_port_context *apc, if_t net)
2454 {
2455 	struct gdma_dev *gd = apc->ac->gdma_dev;
2456 	int err;
2457 
2458 	apc->default_rxobj = INVALID_MANA_HANDLE;
2459 
2460 	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2461 	if (err)
2462 		return err;
2463 
2464 	return mana_create_txq(apc, net);
2465 }
2466 
2467 
2468 static void mana_rss_table_init(struct mana_port_context *apc)
2469 {
2470 	int i;
2471 
2472 	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
2473 		apc->indir_table[i] = i % apc->num_queues;
2474 }
2475 
2476 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2477 		    bool update_hash, bool update_tab)
2478 {
2479 	uint32_t queue_idx;
2480 	int err;
2481 	int i;
2482 
2483 	if (update_tab) {
2484 		for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
2485 			queue_idx = apc->indir_table[i];
2486 			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2487 		}
2488 	}
2489 
2490 	err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2491 	if (err)
2492 		return err;
2493 
2494 	mana_fence_rqs(apc);
2495 
2496 	return 0;
2497 }
2498 
2499 static int
2500 mana_init_port(if_t ndev)
2501 {
2502 	struct mana_port_context *apc = if_getsoftc(ndev);
2503 	uint32_t max_txq, max_rxq, max_queues;
2504 	int port_idx = apc->port_idx;
2505 	uint32_t num_indirect_entries;
2506 	int err;
2507 
2508 	err = mana_init_port_context(apc);
2509 	if (err)
2510 		return err;
2511 
2512 	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
2513 	    &num_indirect_entries);
2514 	if (err) {
2515 		if_printf(ndev, "Failed to query info for vPort %d\n",
2516 		    port_idx);
2517 		goto reset_apc;
2518 	}
2519 
2520 	max_queues = min_t(uint32_t, max_txq, max_rxq);
2521 	if (apc->max_queues > max_queues)
2522 		apc->max_queues = max_queues;
2523 
2524 	if (apc->num_queues > apc->max_queues)
2525 		apc->num_queues = apc->max_queues;
2526 
2527 	return 0;
2528 
2529 reset_apc:
2530 	bus_dma_tag_destroy(apc->rx_buf_tag);
2531 	apc->rx_buf_tag = NULL;
2532 	free(apc->rxqs, M_DEVBUF);
2533 	apc->rxqs = NULL;
2534 	return err;
2535 }
2536 
2537 int
2538 mana_alloc_queues(if_t ndev)
2539 {
2540 	struct mana_port_context *apc = if_getsoftc(ndev);
2541 	int err;
2542 
2543 	err = mana_create_vport(apc, ndev);
2544 	if (err)
2545 		return err;
2546 
2547 	err = mana_add_rx_queues(apc, ndev);
2548 	if (err)
2549 		goto destroy_vport;
2550 
2551 	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
2552 
2553 	mana_rss_table_init(apc);
2554 
2555 	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2556 	if (err)
2557 		goto destroy_vport;
2558 
2559 	return 0;
2560 
2561 destroy_vport:
2562 	mana_destroy_vport(apc);
2563 	return err;
2564 }
2565 
2566 static int
2567 mana_up(struct mana_port_context *apc)
2568 {
2569 	int err;
2570 
2571 	mana_dbg(NULL, "mana_up called\n");
2572 
2573 	err = mana_alloc_queues(apc->ndev);
2574 	if (err) {
2575 		mana_err(NULL, "Faile alloc mana queues: %d\n", err);
2576 		return err;
2577 	}
2578 
2579 	/* Add queue specific sysctl */
2580 	mana_sysctl_add_queues(apc);
2581 
2582 	apc->port_is_up = true;
2583 
2584 	/* Ensure port state updated before txq state */
2585 	wmb();
2586 
2587 	if_link_state_change(apc->ndev, LINK_STATE_UP);
2588 	if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2589 
2590 	return 0;
2591 }
2592 
2593 
2594 static void
2595 mana_init(void *arg)
2596 {
2597 	struct mana_port_context *apc = (struct mana_port_context *)arg;
2598 
2599 	MANA_APC_LOCK_LOCK(apc);
2600 	if (!apc->port_is_up) {
2601 		mana_up(apc);
2602 	}
2603 	MANA_APC_LOCK_UNLOCK(apc);
2604 }
2605 
2606 static int
2607 mana_dealloc_queues(if_t ndev)
2608 {
2609 	struct mana_port_context *apc = if_getsoftc(ndev);
2610 	struct mana_txq *txq;
2611 	int i, err;
2612 
2613 	if (apc->port_is_up)
2614 		return EINVAL;
2615 
2616 	/* No packet can be transmitted now since apc->port_is_up is false.
2617 	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
2618 	 * a txq because it may not timely see apc->port_is_up being cleared
2619 	 * to false, but it doesn't matter since mana_start_xmit() drops any
2620 	 * new packets due to apc->port_is_up being false.
2621 	 *
2622 	 * Drain all the in-flight TX packets
2623 	 */
2624 	for (i = 0; i < apc->num_queues; i++) {
2625 		txq = &apc->tx_qp[i].txq;
2626 
2627 		struct mana_cq *tx_cq = &apc->tx_qp[i].tx_cq;
2628 		struct mana_cq *rx_cq = &(apc->rxqs[i]->rx_cq);
2629 
2630 		tx_cq->do_not_ring_db = true;
2631 		rx_cq->do_not_ring_db = true;
2632 
2633 		/* Schedule a cleanup task */
2634 		taskqueue_enqueue(tx_cq->cleanup_tq, &tx_cq->cleanup_task);
2635 
2636 		while (atomic_read(&txq->pending_sends) > 0)
2637 			usleep_range(1000, 2000);
2638 	}
2639 
2640 	/* We're 100% sure the queues can no longer be woken up, because
2641 	 * we're sure now mana_poll_tx_cq() can't be running.
2642 	 */
2643 
2644 	apc->rss_state = TRI_STATE_FALSE;
2645 	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2646 	if (err) {
2647 		if_printf(ndev, "Failed to disable vPort: %d\n", err);
2648 		return err;
2649 	}
2650 
2651 	mana_destroy_vport(apc);
2652 
2653 	return 0;
2654 }
2655 
2656 static int
2657 mana_down(struct mana_port_context *apc)
2658 {
2659 	int err = 0;
2660 
2661 	apc->port_st_save = apc->port_is_up;
2662 	apc->port_is_up = false;
2663 
2664 	/* Ensure port state updated before txq state */
2665 	wmb();
2666 
2667 	if (apc->port_st_save) {
2668 		if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE,
2669 		    IFF_DRV_RUNNING);
2670 		if_link_state_change(apc->ndev, LINK_STATE_DOWN);
2671 
2672 		mana_sysctl_free_queues(apc);
2673 
2674 		err = mana_dealloc_queues(apc->ndev);
2675 		if (err) {
2676 			if_printf(apc->ndev,
2677 			    "Failed to bring down mana interface: %d\n", err);
2678 		}
2679 	}
2680 
2681 	return err;
2682 }
2683 
2684 int
2685 mana_detach(if_t ndev)
2686 {
2687 	struct mana_port_context *apc = if_getsoftc(ndev);
2688 	int err;
2689 
2690 	ether_ifdetach(ndev);
2691 
2692 	if (!apc)
2693 		return 0;
2694 
2695 	MANA_APC_LOCK_LOCK(apc);
2696 	err = mana_down(apc);
2697 	MANA_APC_LOCK_UNLOCK(apc);
2698 
2699 	mana_cleanup_port_context(apc);
2700 
2701 	MANA_APC_LOCK_DESTROY(apc);
2702 
2703 	free(apc, M_DEVBUF);
2704 
2705 	return err;
2706 }
2707 
2708 static int
2709 mana_probe_port(struct mana_context *ac, int port_idx,
2710     if_t *ndev_storage)
2711 {
2712 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
2713 	struct mana_port_context *apc;
2714 	uint32_t hwassist;
2715 	if_t ndev;
2716 	int err;
2717 
2718 	ndev = if_alloc_dev(IFT_ETHER, gc->dev);
2719 	if (!ndev) {
2720 		mana_err(NULL, "Failed to allocate ifnet struct\n");
2721 		return ENOMEM;
2722 	}
2723 
2724 	*ndev_storage = ndev;
2725 
2726 	apc = malloc(sizeof(*apc), M_DEVBUF, M_WAITOK | M_ZERO);
2727 	if (!apc) {
2728 		mana_err(NULL, "Failed to allocate port context\n");
2729 		err = ENOMEM;
2730 		goto free_net;
2731 	}
2732 
2733 	apc->ac = ac;
2734 	apc->ndev = ndev;
2735 	apc->max_queues = gc->max_num_queues;
2736 	apc->num_queues = min_t(unsigned int,
2737 	    gc->max_num_queues, MANA_MAX_NUM_QUEUES);
2738 	apc->port_handle = INVALID_MANA_HANDLE;
2739 	apc->port_idx = port_idx;
2740 	apc->frame_size = DEFAULT_FRAME_SIZE;
2741 	apc->last_tx_cq_bind_cpu = -1;
2742 	apc->last_rx_cq_bind_cpu = -1;
2743 	apc->vport_use_count = 0;
2744 
2745 	MANA_APC_LOCK_INIT(apc);
2746 
2747 	if_initname(ndev, device_get_name(gc->dev), port_idx);
2748 	if_setdev(ndev,gc->dev);
2749 	if_setsoftc(ndev, apc);
2750 
2751 	if_setflags(ndev, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2752 	if_setinitfn(ndev, mana_init);
2753 	if_settransmitfn(ndev, mana_start_xmit);
2754 	if_setqflushfn(ndev, mana_qflush);
2755 	if_setioctlfn(ndev, mana_ioctl);
2756 	if_setgetcounterfn(ndev, mana_get_counter);
2757 
2758 	if_setmtu(ndev, ETHERMTU);
2759 	if_setbaudrate(ndev, IF_Gbps(100));
2760 
2761 	mana_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2762 
2763 	err = mana_init_port(ndev);
2764 	if (err)
2765 		goto reset_apc;
2766 
2767 	if_setcapabilitiesbit(ndev,
2768 	    IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 |
2769 	    IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 |
2770 	    IFCAP_TSO4 | IFCAP_TSO6 |
2771 	    IFCAP_LRO | IFCAP_LINKSTATE, 0);
2772 
2773 	/* Enable all available capabilities by default. */
2774 	if_setcapenable(ndev, if_getcapabilities(ndev));
2775 
2776 	/* TSO parameters */
2777 	if_sethwtsomax(ndev, MANA_TSO_MAX_SZ -
2778 	    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2779 	if_sethwtsomaxsegcount(ndev, MAX_MBUF_FRAGS);
2780 	if_sethwtsomaxsegsize(ndev, PAGE_SIZE);
2781 
2782 	hwassist = 0;
2783 	if (if_getcapenable(ndev) & (IFCAP_TSO4 | IFCAP_TSO6))
2784 		hwassist |= CSUM_TSO;
2785 	if (if_getcapenable(ndev) & IFCAP_TXCSUM)
2786 		hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2787 	if (if_getcapenable(ndev) & IFCAP_TXCSUM_IPV6)
2788 		hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2789 	mana_dbg(NULL, "set hwassist 0x%x\n", hwassist);
2790 	if_sethwassist(ndev, hwassist);
2791 
2792 	ifmedia_init(&apc->media, IFM_IMASK,
2793 	    mana_ifmedia_change, mana_ifmedia_status);
2794 	ifmedia_add(&apc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2795 	ifmedia_set(&apc->media, IFM_ETHER | IFM_AUTO);
2796 
2797 	ether_ifattach(ndev, apc->mac_addr);
2798 
2799 	/* Initialize statistics */
2800 	mana_alloc_counters((counter_u64_t *)&apc->port_stats,
2801 	    sizeof(struct mana_port_stats));
2802 	mana_sysctl_add_port(apc);
2803 
2804 	/* Tell the stack that the interface is not active */
2805 	if_setdrvflagbits(ndev, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2806 
2807 	return 0;
2808 
2809 reset_apc:
2810 	free(apc, M_DEVBUF);
2811 free_net:
2812 	*ndev_storage = NULL;
2813 	if_printf(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2814 	if_free(ndev);
2815 	return err;
2816 }
2817 
2818 int mana_probe(struct gdma_dev *gd)
2819 {
2820 	struct gdma_context *gc = gd->gdma_context;
2821 	device_t dev = gc->dev;
2822 	struct mana_context *ac;
2823 	int err;
2824 	int i;
2825 
2826 	device_printf(dev, "%s protocol version: %d.%d.%d\n", DEVICE_NAME,
2827 		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2828 
2829 	err = mana_gd_register_device(gd);
2830 	if (err)
2831 		return err;
2832 
2833 	ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO);
2834 	if (!ac)
2835 		return ENOMEM;
2836 
2837 	ac->gdma_dev = gd;
2838 	ac->num_ports = 1;
2839 	gd->driver_data = ac;
2840 
2841 	err = mana_create_eq(ac);
2842 	if (err)
2843 		goto out;
2844 
2845 	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
2846 	    MANA_MICRO_VERSION, &ac->num_ports);
2847 	if (err)
2848 		goto out;
2849 
2850 	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
2851 		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
2852 
2853 	for (i = 0; i < ac->num_ports; i++) {
2854 		err = mana_probe_port(ac, i, &ac->ports[i]);
2855 		if (err) {
2856 			device_printf(dev,
2857 			    "Failed to probe mana port %d\n", i);
2858 			break;
2859 		}
2860 	}
2861 
2862 out:
2863 	if (err)
2864 		mana_remove(gd);
2865 
2866 	return err;
2867 }
2868 
2869 void
2870 mana_remove(struct gdma_dev *gd)
2871 {
2872 	struct gdma_context *gc = gd->gdma_context;
2873 	struct mana_context *ac = gd->driver_data;
2874 	device_t dev = gc->dev;
2875 	if_t ndev;
2876 	int i;
2877 
2878 	for (i = 0; i < ac->num_ports; i++) {
2879 		ndev = ac->ports[i];
2880 		if (!ndev) {
2881 			if (i == 0)
2882 				device_printf(dev, "No net device to remove\n");
2883 			goto out;
2884 		}
2885 
2886 		mana_detach(ndev);
2887 
2888 		if_free(ndev);
2889 	}
2890 
2891 	mana_destroy_eq(ac);
2892 
2893 out:
2894 	mana_gd_deregister_device(gd);
2895 	gd->driver_data = NULL;
2896 	gd->gdma_context = NULL;
2897 	free(ac, M_DEVBUF);
2898 }
2899