xref: /freebsd/sys/dev/mana/mana_en.c (revision 38a52bd3b5cac3da6f7f6eef3dd050e6aa08ebb3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Microsoft Corp.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/kernel.h>
37 #include <sys/kthread.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/smp.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <sys/time.h>
44 #include <sys/eventhandler.h>
45 
46 #include <machine/bus.h>
47 #include <machine/resource.h>
48 #include <machine/in_cksum.h>
49 
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/if_types.h>
53 #include <net/if_vlan_var.h>
54 #ifdef RSS
55 #include <net/rss_config.h>
56 #endif
57 
58 #include <netinet/in_systm.h>
59 #include <netinet/in.h>
60 #include <netinet/if_ether.h>
61 #include <netinet/ip.h>
62 #include <netinet/ip6.h>
63 #include <netinet/tcp.h>
64 #include <netinet/udp.h>
65 
66 #include "mana.h"
67 #include "mana_sysctl.h"
68 
69 static int mana_up(struct mana_port_context *apc);
70 static int mana_down(struct mana_port_context *apc);
71 
72 static void
73 mana_rss_key_fill(void *k, size_t size)
74 {
75 	static bool rss_key_generated = false;
76 	static uint8_t rss_key[MANA_HASH_KEY_SIZE];
77 
78 	KASSERT(size <= MANA_HASH_KEY_SIZE,
79 	    ("Request more buytes than MANA RSS key can hold"));
80 
81 	if (!rss_key_generated) {
82 		arc4random_buf(rss_key, MANA_HASH_KEY_SIZE);
83 		rss_key_generated = true;
84 	}
85 	memcpy(k, rss_key, size);
86 }
87 
88 static int
89 mana_ifmedia_change(struct ifnet *ifp __unused)
90 {
91 	return EOPNOTSUPP;
92 }
93 
94 static void
95 mana_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
96 {
97 	struct mana_port_context *apc = if_getsoftc(ifp);
98 
99 	if (!apc) {
100 		if_printf(ifp, "Port not available\n");
101 		return;
102 	}
103 
104 	MANA_APC_LOCK_LOCK(apc);
105 
106 	ifmr->ifm_status = IFM_AVALID;
107 	ifmr->ifm_active = IFM_ETHER;
108 
109 	if (!apc->port_is_up) {
110 		MANA_APC_LOCK_UNLOCK(apc);
111 		mana_dbg(NULL, "Port %u link is down\n", apc->port_idx);
112 		return;
113 	}
114 
115 	ifmr->ifm_status |= IFM_ACTIVE;
116 	ifmr->ifm_active |= IFM_100G_DR | IFM_FDX;
117 
118 	MANA_APC_LOCK_UNLOCK(apc);
119 }
120 
121 static uint64_t
122 mana_get_counter(struct ifnet *ifp, ift_counter cnt)
123 {
124 	struct mana_port_context *apc = if_getsoftc(ifp);
125 	struct mana_port_stats *stats = &apc->port_stats;
126 
127 	switch (cnt) {
128 	case IFCOUNTER_IPACKETS:
129 		return (counter_u64_fetch(stats->rx_packets));
130 	case IFCOUNTER_OPACKETS:
131 		return (counter_u64_fetch(stats->tx_packets));
132 	case IFCOUNTER_IBYTES:
133 		return (counter_u64_fetch(stats->rx_bytes));
134 	case IFCOUNTER_OBYTES:
135 		return (counter_u64_fetch(stats->tx_bytes));
136 	case IFCOUNTER_IQDROPS:
137 		return (counter_u64_fetch(stats->rx_drops));
138 	case IFCOUNTER_OQDROPS:
139 		return (counter_u64_fetch(stats->tx_drops));
140 	default:
141 		return (if_get_counter_default(ifp, cnt));
142 	}
143 }
144 
145 static void
146 mana_qflush(struct ifnet *ifp)
147 {
148 	if_qflush(ifp);
149 }
150 
151 int
152 mana_restart(struct mana_port_context *apc)
153 {
154 	int rc = 0;
155 
156 	MANA_APC_LOCK_LOCK(apc);
157 	if (apc->port_is_up)
158 		 mana_down(apc);
159 
160 	rc = mana_up(apc);
161 	MANA_APC_LOCK_UNLOCK(apc);
162 
163 	return (rc);
164 }
165 
166 static int
167 mana_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
168 {
169 	struct mana_port_context *apc = if_getsoftc(ifp);
170 	struct ifrsskey *ifrk;
171 	struct ifrsshash *ifrh;
172 	struct ifreq *ifr;
173 	uint16_t new_mtu;
174 	int rc = 0;
175 
176 	switch (command) {
177 	case SIOCSIFMTU:
178 		ifr = (struct ifreq *)data;
179 		new_mtu = ifr->ifr_mtu;
180 		if (ifp->if_mtu == new_mtu)
181 			break;
182 		if ((new_mtu + 18 > MAX_FRAME_SIZE) ||
183 		    (new_mtu + 18 < MIN_FRAME_SIZE)) {
184 			if_printf(ifp, "Invalid MTU. new_mtu: %d, "
185 			    "max allowed: %d, min allowed: %d\n",
186 			    new_mtu, MAX_FRAME_SIZE - 18, MIN_FRAME_SIZE - 18);
187 			return EINVAL;
188 		}
189 		MANA_APC_LOCK_LOCK(apc);
190 		if (apc->port_is_up)
191 			mana_down(apc);
192 
193 		apc->frame_size = new_mtu + 18;
194 		if_setmtu(ifp, new_mtu);
195 		mana_dbg(NULL, "Set MTU to %d\n", new_mtu);
196 
197 		rc = mana_up(apc);
198 		MANA_APC_LOCK_UNLOCK(apc);
199 		break;
200 
201 	case SIOCSIFFLAGS:
202 		if (ifp->if_flags & IFF_UP) {
203 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
204 				MANA_APC_LOCK_LOCK(apc);
205 				if (!apc->port_is_up)
206 					rc = mana_up(apc);
207 				MANA_APC_LOCK_UNLOCK(apc);
208 			}
209 		} else {
210 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
211 				MANA_APC_LOCK_LOCK(apc);
212 				if (apc->port_is_up)
213 					mana_down(apc);
214 				MANA_APC_LOCK_UNLOCK(apc);
215 			}
216 		}
217 		break;
218 
219 	case SIOCSIFMEDIA:
220 	case SIOCGIFMEDIA:
221 	case SIOCGIFXMEDIA:
222 		ifr = (struct ifreq *)data;
223 		rc = ifmedia_ioctl(ifp, ifr, &apc->media, command);
224 		break;
225 
226 	case SIOCGIFRSSKEY:
227 		ifrk = (struct ifrsskey *)data;
228 		ifrk->ifrk_func = RSS_FUNC_TOEPLITZ;
229 		ifrk->ifrk_keylen = MANA_HASH_KEY_SIZE;
230 		memcpy(ifrk->ifrk_key, apc->hashkey, MANA_HASH_KEY_SIZE);
231 		break;
232 
233 	case SIOCGIFRSSHASH:
234 		ifrh = (struct ifrsshash *)data;
235 		ifrh->ifrh_func = RSS_FUNC_TOEPLITZ;
236 		ifrh->ifrh_types =
237 		    RSS_TYPE_TCP_IPV4 |
238 		    RSS_TYPE_UDP_IPV4 |
239 		    RSS_TYPE_TCP_IPV6 |
240 		    RSS_TYPE_UDP_IPV6;
241 		break;
242 
243 	default:
244 		rc = ether_ioctl(ifp, command, data);
245 		break;
246 	}
247 
248 	return (rc);
249 }
250 
251 static inline void
252 mana_alloc_counters(counter_u64_t *begin, int size)
253 {
254 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
255 
256 	for (; begin < end; ++begin)
257 		*begin = counter_u64_alloc(M_WAITOK);
258 }
259 
260 static inline void
261 mana_free_counters(counter_u64_t *begin, int size)
262 {
263 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
264 
265 	for (; begin < end; ++begin)
266 		counter_u64_free(*begin);
267 }
268 
269 static bool
270 mana_can_tx(struct gdma_queue *wq)
271 {
272 	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
273 }
274 
275 static inline int
276 mana_tx_map_mbuf(struct mana_port_context *apc,
277     struct mana_send_buf_info *tx_info,
278     struct mbuf **m_head, struct mana_tx_package *tp,
279     struct mana_stats *tx_stats)
280 {
281 	struct gdma_dev *gd = apc->ac->gdma_dev;
282 	bus_dma_segment_t segs[MAX_MBUF_FRAGS];
283 	struct mbuf *m = *m_head;
284 	int err, nsegs, i;
285 
286 	err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag, tx_info->dma_map,
287 	    m, segs, &nsegs, BUS_DMA_NOWAIT);
288 	if (err == EFBIG) {
289 		struct mbuf *m_new;
290 
291 		counter_u64_add(tx_stats->collapse, 1);
292 		m_new = m_collapse(m, M_NOWAIT, MAX_MBUF_FRAGS);
293 		if (unlikely(m_new == NULL)) {
294 			counter_u64_add(tx_stats->collapse_err, 1);
295 			return ENOBUFS;
296 		} else {
297 			*m_head = m = m_new;
298 		}
299 
300 		mana_warn(NULL,
301 		    "Too many segs in orig mbuf, m_collapse called\n");
302 
303 		err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag,
304 		    tx_info->dma_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
305 	}
306 	if (!err) {
307 		for (i = 0; i < nsegs; i++) {
308 			tp->wqe_req.sgl[i].address = segs[i].ds_addr;
309 			tp->wqe_req.sgl[i].mem_key = gd->gpa_mkey;
310 			tp->wqe_req.sgl[i].size = segs[i].ds_len;
311 		}
312 		tp->wqe_req.num_sge = nsegs;
313 
314 		tx_info->mbuf = *m_head;
315 
316 		bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
317 		    BUS_DMASYNC_PREWRITE);
318 	}
319 
320 	return err;
321 }
322 
323 static inline void
324 mana_tx_unmap_mbuf(struct mana_port_context *apc,
325     struct mana_send_buf_info *tx_info)
326 {
327 	bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
328 	    BUS_DMASYNC_POSTWRITE);
329 	bus_dmamap_unload(apc->tx_buf_tag, tx_info->dma_map);
330 	if (tx_info->mbuf) {
331 		m_freem(tx_info->mbuf);
332 		tx_info->mbuf = NULL;
333 	}
334 }
335 
336 static inline int
337 mana_load_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
338     struct mana_recv_buf_oob *rx_oob, bool alloc_mbuf)
339 {
340 	bus_dma_segment_t segs[1];
341 	struct mbuf *mbuf;
342 	int nsegs, err;
343 	uint32_t mlen;
344 
345 	if (alloc_mbuf) {
346 		mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxq->datasize);
347 		if (unlikely(mbuf == NULL)) {
348 			mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
349 			if (unlikely(mbuf == NULL)) {
350 				return ENOMEM;
351 			}
352 			mlen = MCLBYTES;
353 		} else {
354 			mlen = rxq->datasize;
355 		}
356 
357 		mbuf->m_pkthdr.len = mbuf->m_len = mlen;
358 	} else {
359 		if (rx_oob->mbuf) {
360 			mbuf = rx_oob->mbuf;
361 			mlen = rx_oob->mbuf->m_pkthdr.len;
362 		} else {
363 			return ENOMEM;
364 		}
365 	}
366 
367 	err = bus_dmamap_load_mbuf_sg(apc->rx_buf_tag, rx_oob->dma_map,
368 	    mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
369 
370 	if (unlikely((err != 0) || (nsegs != 1))) {
371 		mana_warn(NULL, "Failed to map mbuf, error: %d, "
372 		    "nsegs: %d\n", err, nsegs);
373 		counter_u64_add(rxq->stats.dma_mapping_err, 1);
374 		goto error;
375 	}
376 
377 	bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
378 	    BUS_DMASYNC_PREREAD);
379 
380 	rx_oob->mbuf = mbuf;
381 	rx_oob->num_sge = 1;
382 	rx_oob->sgl[0].address = segs[0].ds_addr;
383 	rx_oob->sgl[0].size = mlen;
384 	rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
385 
386 	return 0;
387 
388 error:
389 	m_freem(mbuf);
390 	return EFAULT;
391 }
392 
393 static inline void
394 mana_unload_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
395     struct mana_recv_buf_oob *rx_oob, bool free_mbuf)
396 {
397 	bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
398 	    BUS_DMASYNC_POSTREAD);
399 	bus_dmamap_unload(apc->rx_buf_tag, rx_oob->dma_map);
400 
401 	if (free_mbuf && rx_oob->mbuf) {
402 		m_freem(rx_oob->mbuf);
403 		rx_oob->mbuf = NULL;
404 	}
405 }
406 
407 
408 /* Use couple mbuf PH_loc spaces for l3 and l4 protocal type */
409 #define MANA_L3_PROTO(_mbuf)	((_mbuf)->m_pkthdr.PH_loc.sixteen[0])
410 #define MANA_L4_PROTO(_mbuf)	((_mbuf)->m_pkthdr.PH_loc.sixteen[1])
411 
412 #define MANA_TXQ_FULL	(IFF_DRV_RUNNING | IFF_DRV_OACTIVE)
413 
414 static void
415 mana_xmit(struct mana_txq *txq)
416 {
417 	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
418 	struct mana_send_buf_info *tx_info;
419 	struct ifnet *ndev = txq->ndev;
420 	struct mbuf *mbuf;
421 	struct mana_port_context *apc = if_getsoftc(ndev);
422 	struct mana_port_stats *port_stats = &apc->port_stats;
423 	struct gdma_dev *gd = apc->ac->gdma_dev;
424 	uint64_t packets, bytes;
425 	uint16_t next_to_use;
426 	struct mana_tx_package pkg = {};
427 	struct mana_stats *tx_stats;
428 	struct gdma_queue *gdma_sq;
429 	struct mana_cq *cq;
430 	int err, len;
431 
432 	gdma_sq = txq->gdma_sq;
433 	cq = &apc->tx_qp[txq->idx].tx_cq;
434 	tx_stats = &txq->stats;
435 
436 	packets = 0;
437 	bytes = 0;
438 	next_to_use = txq->next_to_use;
439 
440 	while ((mbuf = drbr_peek(ndev, txq->txq_br)) != NULL) {
441 		if (!apc->port_is_up ||
442 		    (if_getdrvflags(ndev) & MANA_TXQ_FULL) != IFF_DRV_RUNNING) {
443 			drbr_putback(ndev, txq->txq_br, mbuf);
444 			break;
445 		}
446 
447 		if (!mana_can_tx(gdma_sq)) {
448 			/* SQ is full. Set the IFF_DRV_OACTIVE flag */
449 			if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE, 0);
450 			counter_u64_add(tx_stats->stop, 1);
451 			uint64_t stops = counter_u64_fetch(tx_stats->stop);
452 			uint64_t wakeups = counter_u64_fetch(tx_stats->wakeup);
453 #define MANA_TXQ_STOP_THRESHOLD		50
454 			if (stops > MANA_TXQ_STOP_THRESHOLD && wakeups > 0 &&
455 			    stops > wakeups && txq->alt_txq_idx == txq->idx) {
456 				txq->alt_txq_idx =
457 				    (txq->idx + (stops / wakeups))
458 				    % apc->num_queues;
459 				counter_u64_add(tx_stats->alt_chg, 1);
460 			}
461 
462 			drbr_putback(ndev, txq->txq_br, mbuf);
463 
464 			taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task);
465 			break;
466 		}
467 
468 		tx_info = &txq->tx_buf_info[next_to_use];
469 
470 		memset(&pkg, 0, sizeof(struct mana_tx_package));
471 		pkg.wqe_req.sgl = pkg.sgl_array;
472 
473 		err = mana_tx_map_mbuf(apc, tx_info, &mbuf, &pkg, tx_stats);
474 		if (unlikely(err)) {
475 			mana_dbg(NULL,
476 			    "Failed to map tx mbuf, err %d\n", err);
477 
478 			counter_u64_add(tx_stats->dma_mapping_err, 1);
479 
480 			/* The mbuf is still there. Free it */
481 			m_freem(mbuf);
482 			/* Advance the drbr queue */
483 			drbr_advance(ndev, txq->txq_br);
484 			continue;
485 		}
486 
487 		pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
488 		pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
489 
490 		if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
491 			pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
492 			pkt_fmt = MANA_LONG_PKT_FMT;
493 		} else {
494 			pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
495 		}
496 
497 		pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
498 
499 		if (pkt_fmt == MANA_SHORT_PKT_FMT)
500 			pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
501 		else
502 			pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
503 
504 		pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
505 		pkg.wqe_req.flags = 0;
506 		pkg.wqe_req.client_data_unit = 0;
507 
508 		if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
509 			if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
510 				pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
511 			else
512 				pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
513 
514 			pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
515 			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
516 			pkg.tx_oob.s_oob.trans_off = mbuf->m_pkthdr.l3hlen;
517 
518 			pkg.wqe_req.client_data_unit = mbuf->m_pkthdr.tso_segsz;
519 			pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
520 		} else if (mbuf->m_pkthdr.csum_flags &
521 		    (CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP6_UDP | CSUM_IP6_TCP)) {
522 			if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP) {
523 				pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
524 				pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
525 			} else {
526 				pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
527 			}
528 
529 			if (MANA_L4_PROTO(mbuf) == IPPROTO_TCP) {
530 				pkg.tx_oob.s_oob.comp_tcp_csum = 1;
531 				pkg.tx_oob.s_oob.trans_off =
532 				    mbuf->m_pkthdr.l3hlen;
533 			} else {
534 				pkg.tx_oob.s_oob.comp_udp_csum = 1;
535 			}
536 		} else if (mbuf->m_pkthdr.csum_flags & CSUM_IP) {
537 			pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
538 			pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
539 		} else {
540 			if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
541 				pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
542 			else if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IPV6)
543 				pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
544 		}
545 
546 		len = mbuf->m_pkthdr.len;
547 
548 		err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
549 		    (struct gdma_posted_wqe_info *)&tx_info->wqe_inf);
550 		if (unlikely(err)) {
551 			/* Should not happen */
552 			if_printf(ndev, "Failed to post TX OOB: %d\n", err);
553 
554 			mana_tx_unmap_mbuf(apc, tx_info);
555 
556 			drbr_advance(ndev, txq->txq_br);
557 			continue;
558 		}
559 
560 		next_to_use =
561 		    (next_to_use + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
562 
563 		(void)atomic_inc_return(&txq->pending_sends);
564 
565 		drbr_advance(ndev, txq->txq_br);
566 
567 		mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
568 
569 		packets++;
570 		bytes += len;
571 	}
572 
573 	counter_enter();
574 	counter_u64_add_protected(tx_stats->packets, packets);
575 	counter_u64_add_protected(port_stats->tx_packets, packets);
576 	counter_u64_add_protected(tx_stats->bytes, bytes);
577 	counter_u64_add_protected(port_stats->tx_bytes, bytes);
578 	counter_exit();
579 
580 	txq->next_to_use = next_to_use;
581 }
582 
583 static void
584 mana_xmit_taskfunc(void *arg, int pending)
585 {
586 	struct mana_txq *txq = (struct mana_txq *)arg;
587 	struct ifnet *ndev = txq->ndev;
588 	struct mana_port_context *apc = if_getsoftc(ndev);
589 
590 	while (!drbr_empty(ndev, txq->txq_br) && apc->port_is_up &&
591 	    (if_getdrvflags(ndev) & MANA_TXQ_FULL) == IFF_DRV_RUNNING) {
592 		mtx_lock(&txq->txq_mtx);
593 		mana_xmit(txq);
594 		mtx_unlock(&txq->txq_mtx);
595 	}
596 }
597 
598 #define PULLUP_HDR(m, len)				\
599 do {							\
600 	if (unlikely((m)->m_len < (len))) {		\
601 		(m) = m_pullup((m), (len));		\
602 		if ((m) == NULL)			\
603 			return (NULL);			\
604 	}						\
605 } while (0)
606 
607 /*
608  * If this function failed, the mbuf would be freed.
609  */
610 static inline struct mbuf *
611 mana_tso_fixup(struct mbuf *mbuf)
612 {
613 	struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
614 	struct tcphdr *th;
615 	uint16_t etype;
616 	int ehlen;
617 
618 	if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
619 		etype = ntohs(eh->evl_proto);
620 		ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
621 	} else {
622 		etype = ntohs(eh->evl_encap_proto);
623 		ehlen = ETHER_HDR_LEN;
624 	}
625 
626 	if (etype == ETHERTYPE_IP) {
627 		struct ip *ip;
628 		int iphlen;
629 
630 		PULLUP_HDR(mbuf, ehlen + sizeof(*ip));
631 		ip = mtodo(mbuf, ehlen);
632 		iphlen = ip->ip_hl << 2;
633 		mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
634 
635 		PULLUP_HDR(mbuf, ehlen + iphlen + sizeof(*th));
636 		th = mtodo(mbuf, ehlen + iphlen);
637 
638 		ip->ip_len = 0;
639 		ip->ip_sum = 0;
640 		th->th_sum = in_pseudo(ip->ip_src.s_addr,
641 		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
642 	} else if (etype == ETHERTYPE_IPV6) {
643 		struct ip6_hdr *ip6;
644 
645 		PULLUP_HDR(mbuf, ehlen + sizeof(*ip6) + sizeof(*th));
646 		ip6 = mtodo(mbuf, ehlen);
647 		if (ip6->ip6_nxt != IPPROTO_TCP) {
648 			/* Realy something wrong, just return */
649 			mana_dbg(NULL, "TSO mbuf not TCP, freed.\n");
650 			m_freem(mbuf);
651 			return NULL;
652 		}
653 		mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
654 
655 		th = mtodo(mbuf, ehlen + sizeof(*ip6));
656 
657 		ip6->ip6_plen = 0;
658 		th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
659 	} else {
660 		/* CSUM_TSO is set but not IP protocol. */
661 		mana_warn(NULL, "TSO mbuf not right, freed.\n");
662 		m_freem(mbuf);
663 		return NULL;
664 	}
665 
666 	MANA_L3_PROTO(mbuf) = etype;
667 
668 	return (mbuf);
669 }
670 
671 /*
672  * If this function failed, the mbuf would be freed.
673  */
674 static inline struct mbuf *
675 mana_mbuf_csum_check(struct mbuf *mbuf)
676 {
677 	struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
678 	struct mbuf *mbuf_next;
679 	uint16_t etype;
680 	int offset;
681 	int ehlen;
682 
683 	if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
684 		etype = ntohs(eh->evl_proto);
685 		ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
686 	} else {
687 		etype = ntohs(eh->evl_encap_proto);
688 		ehlen = ETHER_HDR_LEN;
689 	}
690 
691 	mbuf_next = m_getptr(mbuf, ehlen, &offset);
692 
693 	MANA_L4_PROTO(mbuf) = 0;
694 	if (etype == ETHERTYPE_IP) {
695 		const struct ip *ip;
696 		int iphlen;
697 
698 		ip = (struct ip *)(mtodo(mbuf_next, offset));
699 		iphlen = ip->ip_hl << 2;
700 		mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
701 
702 		MANA_L4_PROTO(mbuf) = ip->ip_p;
703 	} else if (etype == ETHERTYPE_IPV6) {
704 		const struct ip6_hdr *ip6;
705 
706 		ip6 = (struct ip6_hdr *)(mtodo(mbuf_next, offset));
707 		mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
708 
709 		MANA_L4_PROTO(mbuf) = ip6->ip6_nxt;
710 	} else {
711 		MANA_L4_PROTO(mbuf) = 0;
712 	}
713 
714 	MANA_L3_PROTO(mbuf) = etype;
715 
716 	return (mbuf);
717 }
718 
719 static int
720 mana_start_xmit(struct ifnet *ifp, struct mbuf *m)
721 {
722 	struct mana_port_context *apc = if_getsoftc(ifp);
723 	struct mana_txq *txq;
724 	int is_drbr_empty;
725 	uint16_t txq_id;
726 	int err;
727 
728 	if (unlikely((!apc->port_is_up) ||
729 	    (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
730 		return ENODEV;
731 
732 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
733 		m = mana_tso_fixup(m);
734 		if (unlikely(m == NULL)) {
735 			counter_enter();
736 			counter_u64_add_protected(apc->port_stats.tx_drops, 1);
737 			counter_exit();
738 			return EIO;
739 		}
740 	} else {
741 		m = mana_mbuf_csum_check(m);
742 		if (unlikely(m == NULL)) {
743 			counter_enter();
744 			counter_u64_add_protected(apc->port_stats.tx_drops, 1);
745 			counter_exit();
746 			return EIO;
747 		}
748 	}
749 
750 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
751 		uint32_t hash = m->m_pkthdr.flowid;
752 		txq_id = apc->indir_table[(hash) & MANA_INDIRECT_TABLE_MASK] %
753 		    apc->num_queues;
754 	} else {
755 		txq_id = m->m_pkthdr.flowid % apc->num_queues;
756 	}
757 
758 	if (apc->enable_tx_altq)
759 		txq_id = apc->tx_qp[txq_id].txq.alt_txq_idx;
760 
761 	txq = &apc->tx_qp[txq_id].txq;
762 
763 	is_drbr_empty = drbr_empty(ifp, txq->txq_br);
764 	err = drbr_enqueue(ifp, txq->txq_br, m);
765 	if (unlikely(err)) {
766 		mana_warn(NULL, "txq %u failed to enqueue: %d\n",
767 		    txq_id, err);
768 		taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
769 		return err;
770 	}
771 
772 	if (is_drbr_empty && mtx_trylock(&txq->txq_mtx)) {
773 		mana_xmit(txq);
774 		mtx_unlock(&txq->txq_mtx);
775 	} else {
776 		taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
777 	}
778 
779 	return 0;
780 }
781 
782 static void
783 mana_cleanup_port_context(struct mana_port_context *apc)
784 {
785 	bus_dma_tag_destroy(apc->tx_buf_tag);
786 	bus_dma_tag_destroy(apc->rx_buf_tag);
787 	apc->rx_buf_tag = NULL;
788 
789 	free(apc->rxqs, M_DEVBUF);
790 	apc->rxqs = NULL;
791 
792 	mana_free_counters((counter_u64_t *)&apc->port_stats,
793 	    sizeof(struct mana_port_stats));
794 }
795 
796 static int
797 mana_init_port_context(struct mana_port_context *apc)
798 {
799 	device_t dev = apc->ac->gdma_dev->gdma_context->dev;
800 	uint32_t tso_maxsize;
801 	int err;
802 
803 	tso_maxsize = MAX_MBUF_FRAGS * MANA_TSO_MAXSEG_SZ -
804 	    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
805 
806 	/* Create DMA tag for tx bufs */
807 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
808 	    1, 0,			/* alignment, boundary	*/
809 	    BUS_SPACE_MAXADDR,		/* lowaddr		*/
810 	    BUS_SPACE_MAXADDR,		/* highaddr		*/
811 	    NULL, NULL,			/* filter, filterarg	*/
812 	    tso_maxsize,		/* maxsize		*/
813 	    MAX_MBUF_FRAGS,		/* nsegments		*/
814 	    tso_maxsize,		/* maxsegsize		*/
815 	    0,				/* flags		*/
816 	    NULL, NULL,			/* lockfunc, lockfuncarg*/
817 	    &apc->tx_buf_tag);
818 	if (unlikely(err)) {
819 		device_printf(dev, "Feiled to create TX DMA tag\n");
820 		return err;
821 	}
822 
823 	/* Create DMA tag for rx bufs */
824 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
825 	    64, 0,			/* alignment, boundary	*/
826 	    BUS_SPACE_MAXADDR,		/* lowaddr		*/
827 	    BUS_SPACE_MAXADDR,		/* highaddr		*/
828 	    NULL, NULL,			/* filter, filterarg	*/
829 	    MJUMPAGESIZE,		/* maxsize		*/
830 	    1,				/* nsegments		*/
831 	    MJUMPAGESIZE,		/* maxsegsize		*/
832 	    0,				/* flags		*/
833 	    NULL, NULL,			/* lockfunc, lockfuncarg*/
834 	    &apc->rx_buf_tag);
835 	if (unlikely(err)) {
836 		device_printf(dev, "Feiled to create RX DMA tag\n");
837 		return err;
838 	}
839 
840 	apc->rxqs = mallocarray(apc->num_queues, sizeof(struct mana_rxq *),
841 	    M_DEVBUF, M_WAITOK | M_ZERO);
842 
843 	if (!apc->rxqs) {
844 		bus_dma_tag_destroy(apc->tx_buf_tag);
845 		bus_dma_tag_destroy(apc->rx_buf_tag);
846 		apc->rx_buf_tag = NULL;
847 		return ENOMEM;
848 	}
849 
850 	return 0;
851 }
852 
853 static int
854 mana_send_request(struct mana_context *ac, void *in_buf,
855     uint32_t in_len, void *out_buf, uint32_t out_len)
856 {
857 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
858 	struct gdma_resp_hdr *resp = out_buf;
859 	struct gdma_req_hdr *req = in_buf;
860 	device_t dev = gc->dev;
861 	static atomic_t activity_id;
862 	int err;
863 
864 	req->dev_id = gc->mana.dev_id;
865 	req->activity_id = atomic_inc_return(&activity_id);
866 
867 	mana_dbg(NULL, "activity_id  = %u\n", activity_id);
868 
869 	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
870 	    out_buf);
871 	if (err || resp->status) {
872 		device_printf(dev, "Failed to send mana message: %d, 0x%x\n",
873 			err, resp->status);
874 		return err ? err : EPROTO;
875 	}
876 
877 	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
878 	    req->activity_id != resp->activity_id) {
879 		device_printf(dev,
880 		    "Unexpected mana message response: %x,%x,%x,%x\n",
881 		    req->dev_id.as_uint32, resp->dev_id.as_uint32,
882 		    req->activity_id, resp->activity_id);
883 		return EPROTO;
884 	}
885 
886 	return 0;
887 }
888 
889 static int
890 mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
891     const enum mana_command_code expected_code,
892     const uint32_t min_size)
893 {
894 	if (resp_hdr->response.msg_type != expected_code)
895 		return EPROTO;
896 
897 	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
898 		return EPROTO;
899 
900 	if (resp_hdr->response.msg_size < min_size)
901 		return EPROTO;
902 
903 	return 0;
904 }
905 
906 static int
907 mana_query_device_cfg(struct mana_context *ac, uint32_t proto_major_ver,
908     uint32_t proto_minor_ver, uint32_t proto_micro_ver,
909     uint16_t *max_num_vports)
910 {
911 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
912 	struct mana_query_device_cfg_resp resp = {};
913 	struct mana_query_device_cfg_req req = {};
914 	device_t dev = gc->dev;
915 	int err = 0;
916 
917 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
918 	    sizeof(req), sizeof(resp));
919 	req.proto_major_ver = proto_major_ver;
920 	req.proto_minor_ver = proto_minor_ver;
921 	req.proto_micro_ver = proto_micro_ver;
922 
923 	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
924 	if (err) {
925 		device_printf(dev, "Failed to query config: %d", err);
926 		return err;
927 	}
928 
929 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
930 	    sizeof(resp));
931 	if (err || resp.hdr.status) {
932 		device_printf(dev, "Invalid query result: %d, 0x%x\n", err,
933 		    resp.hdr.status);
934 		if (!err)
935 			err = EPROTO;
936 		return err;
937 	}
938 
939 	*max_num_vports = resp.max_num_vports;
940 
941 	mana_dbg(NULL, "mana max_num_vports from device = %d\n",
942 	    *max_num_vports);
943 
944 	return 0;
945 }
946 
947 static int
948 mana_query_vport_cfg(struct mana_port_context *apc, uint32_t vport_index,
949     uint32_t *max_sq, uint32_t *max_rq, uint32_t *num_indir_entry)
950 {
951 	struct mana_query_vport_cfg_resp resp = {};
952 	struct mana_query_vport_cfg_req req = {};
953 	int err;
954 
955 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
956 	    sizeof(req), sizeof(resp));
957 
958 	req.vport_index = vport_index;
959 
960 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
961 	    sizeof(resp));
962 	if (err)
963 		return err;
964 
965 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
966 	    sizeof(resp));
967 	if (err)
968 		return err;
969 
970 	if (resp.hdr.status)
971 		return EPROTO;
972 
973 	*max_sq = resp.max_num_sq;
974 	*max_rq = resp.max_num_rq;
975 	*num_indir_entry = resp.num_indirection_ent;
976 
977 	apc->port_handle = resp.vport;
978 	memcpy(apc->mac_addr, resp.mac_addr, ETHER_ADDR_LEN);
979 
980 	return 0;
981 }
982 
983 void
984 mana_uncfg_vport(struct mana_port_context *apc)
985 {
986 	MANA_APC_LOCK_LOCK(apc);
987 	apc->vport_use_count--;
988 	if (apc->vport_use_count < 0) {
989 		mana_err(NULL,
990 		    "WARNING: vport_use_count less than 0: %u\n",
991 		    apc->vport_use_count);
992 	}
993 	MANA_APC_LOCK_UNLOCK(apc);
994 }
995 
996 int
997 mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
998     uint32_t doorbell_pg_id)
999 {
1000 	struct mana_config_vport_resp resp = {};
1001 	struct mana_config_vport_req req = {};
1002 	int err;
1003 
1004 	/* This function is used to program the Ethernet port in the hardware
1005 	 * table. It can be called from the Ethernet driver or the RDMA driver.
1006 	 *
1007 	 * For Ethernet usage, the hardware supports only one active user on a
1008 	 * physical port. The driver checks on the port usage before programming
1009 	 * the hardware when creating the RAW QP (RDMA driver) or exposing the
1010 	 * device to kernel NET layer (Ethernet driver).
1011 	 *
1012 	 * Because the RDMA driver doesn't know in advance which QP type the
1013 	 * user will create, it exposes the device with all its ports. The user
1014 	 * may not be able to create RAW QP on a port if this port is already
1015 	 * in used by the Ethernet driver from the kernel.
1016 	 *
1017 	 * This physical port limitation only applies to the RAW QP. For RC QP,
1018 	 * the hardware doesn't have this limitation. The user can create RC
1019 	 * QPs on a physical port up to the hardware limits independent of the
1020 	 * Ethernet usage on the same port.
1021 	 */
1022 	MANA_APC_LOCK_LOCK(apc);
1023 	if (apc->vport_use_count > 0) {
1024 		MANA_APC_LOCK_UNLOCK(apc);
1025 		return EBUSY;
1026 	}
1027 	apc->vport_use_count++;
1028 	MANA_APC_LOCK_UNLOCK(apc);
1029 
1030 	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
1031 	    sizeof(req), sizeof(resp));
1032 	req.vport = apc->port_handle;
1033 	req.pdid = protection_dom_id;
1034 	req.doorbell_pageid = doorbell_pg_id;
1035 
1036 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1037 	    sizeof(resp));
1038 	if (err) {
1039 		if_printf(apc->ndev, "Failed to configure vPort: %d\n", err);
1040 		goto out;
1041 	}
1042 
1043 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1044 	    sizeof(resp));
1045 	if (err || resp.hdr.status) {
1046 		if_printf(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1047 		    err, resp.hdr.status);
1048 		if (!err)
1049 			err = EPROTO;
1050 
1051 		goto out;
1052 	}
1053 
1054 	apc->tx_shortform_allowed = resp.short_form_allowed;
1055 	apc->tx_vp_offset = resp.tx_vport_offset;
1056 
1057 #if defined(__amd64__)
1058 	if_printf(apc->ndev, "Configured vPort %lu PD %u DB %u\n",
1059 	    apc->port_handle, protection_dom_id, doorbell_pg_id);
1060 #endif
1061 
1062 out:
1063 	if (err)
1064 		mana_uncfg_vport(apc);
1065 
1066 	return err;
1067 }
1068 
1069 static int
1070 mana_cfg_vport_steering(struct mana_port_context *apc,
1071     enum TRI_STATE rx,
1072     bool update_default_rxobj, bool update_key,
1073     bool update_tab)
1074 {
1075 	uint16_t num_entries = MANA_INDIRECT_TABLE_SIZE;
1076 	struct mana_cfg_rx_steer_req *req = NULL;
1077 	struct mana_cfg_rx_steer_resp resp = {};
1078 	struct ifnet *ndev = apc->ndev;
1079 	mana_handle_t *req_indir_tab;
1080 	uint32_t req_buf_size;
1081 	int err;
1082 
1083 	req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
1084 	req = malloc(req_buf_size, M_DEVBUF, M_WAITOK | M_ZERO);
1085 	if (!req)
1086 		return ENOMEM;
1087 
1088 	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1089 	    sizeof(resp));
1090 
1091 	req->vport = apc->port_handle;
1092 	req->num_indir_entries = num_entries;
1093 	req->indir_tab_offset = sizeof(*req);
1094 	req->rx_enable = rx;
1095 	req->rss_enable = apc->rss_state;
1096 	req->update_default_rxobj = update_default_rxobj;
1097 	req->update_hashkey = update_key;
1098 	req->update_indir_tab = update_tab;
1099 	req->default_rxobj = apc->default_rxobj;
1100 
1101 	if (update_key)
1102 		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1103 
1104 	if (update_tab) {
1105 		req_indir_tab = (mana_handle_t *)(req + 1);
1106 		memcpy(req_indir_tab, apc->rxobj_table,
1107 		       req->num_indir_entries * sizeof(mana_handle_t));
1108 	}
1109 
1110 	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1111 	    sizeof(resp));
1112 	if (err) {
1113 		if_printf(ndev, "Failed to configure vPort RX: %d\n", err);
1114 		goto out;
1115 	}
1116 
1117 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1118 	    sizeof(resp));
1119 	if (err) {
1120 		if_printf(ndev, "vPort RX configuration failed: %d\n", err);
1121 		goto out;
1122 	}
1123 
1124 	if (resp.hdr.status) {
1125 		if_printf(ndev, "vPort RX configuration failed: 0x%x\n",
1126 		    resp.hdr.status);
1127 		err = EPROTO;
1128 	}
1129 
1130 #if defined(__amd64__)
1131 	if_printf(ndev, "Configured steering vPort %lu entries %u\n",
1132 	    apc->port_handle, num_entries);
1133 #endif
1134 
1135 out:
1136 	free(req, M_DEVBUF);
1137 	return err;
1138 }
1139 
1140 int
1141 mana_create_wq_obj(struct mana_port_context *apc,
1142     mana_handle_t vport,
1143     uint32_t wq_type, struct mana_obj_spec *wq_spec,
1144     struct mana_obj_spec *cq_spec,
1145     mana_handle_t *wq_obj)
1146 {
1147 	struct mana_create_wqobj_resp resp = {};
1148 	struct mana_create_wqobj_req req = {};
1149 	struct ifnet *ndev = apc->ndev;
1150 	int err;
1151 
1152 	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1153 	    sizeof(req), sizeof(resp));
1154 	req.vport = vport;
1155 	req.wq_type = wq_type;
1156 	req.wq_gdma_region = wq_spec->gdma_region;
1157 	req.cq_gdma_region = cq_spec->gdma_region;
1158 	req.wq_size = wq_spec->queue_size;
1159 	req.cq_size = cq_spec->queue_size;
1160 	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1161 	req.cq_parent_qid = cq_spec->attached_eq;
1162 
1163 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1164 	    sizeof(resp));
1165 	if (err) {
1166 		if_printf(ndev, "Failed to create WQ object: %d\n", err);
1167 		goto out;
1168 	}
1169 
1170 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1171 	    sizeof(resp));
1172 	if (err || resp.hdr.status) {
1173 		if_printf(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1174 		    resp.hdr.status);
1175 		if (!err)
1176 			err = EPROTO;
1177 		goto out;
1178 	}
1179 
1180 	if (resp.wq_obj == INVALID_MANA_HANDLE) {
1181 		if_printf(ndev, "Got an invalid WQ object handle\n");
1182 		err = EPROTO;
1183 		goto out;
1184 	}
1185 
1186 	*wq_obj = resp.wq_obj;
1187 	wq_spec->queue_index = resp.wq_id;
1188 	cq_spec->queue_index = resp.cq_id;
1189 
1190 	return 0;
1191 out:
1192 	return err;
1193 }
1194 
1195 void
1196 mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type,
1197     mana_handle_t wq_obj)
1198 {
1199 	struct mana_destroy_wqobj_resp resp = {};
1200 	struct mana_destroy_wqobj_req req = {};
1201 	struct ifnet *ndev = apc->ndev;
1202 	int err;
1203 
1204 	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1205 	    sizeof(req), sizeof(resp));
1206 	req.wq_type = wq_type;
1207 	req.wq_obj_handle = wq_obj;
1208 
1209 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1210 	    sizeof(resp));
1211 	if (err) {
1212 		if_printf(ndev, "Failed to destroy WQ object: %d\n", err);
1213 		return;
1214 	}
1215 
1216 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1217 	    sizeof(resp));
1218 	if (err || resp.hdr.status)
1219 		if_printf(ndev, "Failed to destroy WQ object: %d, 0x%x\n",
1220 		    err, resp.hdr.status);
1221 }
1222 
1223 static void
1224 mana_destroy_eq(struct mana_context *ac)
1225 {
1226 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
1227 	struct gdma_queue *eq;
1228 	int i;
1229 
1230 	if (!ac->eqs)
1231 		return;
1232 
1233 	for (i = 0; i < gc->max_num_queues; i++) {
1234 		eq = ac->eqs[i].eq;
1235 		if (!eq)
1236 			continue;
1237 
1238 		mana_gd_destroy_queue(gc, eq);
1239 	}
1240 
1241 	free(ac->eqs, M_DEVBUF);
1242 	ac->eqs = NULL;
1243 }
1244 
1245 static int
1246 mana_create_eq(struct mana_context *ac)
1247 {
1248 	struct gdma_dev *gd = ac->gdma_dev;
1249 	struct gdma_context *gc = gd->gdma_context;
1250 	struct gdma_queue_spec spec = {};
1251 	int err;
1252 	int i;
1253 
1254 	ac->eqs = mallocarray(gc->max_num_queues, sizeof(struct mana_eq),
1255 	    M_DEVBUF, M_WAITOK | M_ZERO);
1256 	if (!ac->eqs)
1257 		return ENOMEM;
1258 
1259 	spec.type = GDMA_EQ;
1260 	spec.monitor_avl_buf = false;
1261 	spec.queue_size = EQ_SIZE;
1262 	spec.eq.callback = NULL;
1263 	spec.eq.context = ac->eqs;
1264 	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1265 
1266 	for (i = 0; i < gc->max_num_queues; i++) {
1267 		err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
1268 		if (err)
1269 			goto out;
1270 	}
1271 
1272 	return 0;
1273 out:
1274 	mana_destroy_eq(ac);
1275 	return err;
1276 }
1277 
1278 static int
1279 mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1280 {
1281 	struct mana_fence_rq_resp resp = {};
1282 	struct mana_fence_rq_req req = {};
1283 	int err;
1284 
1285 	init_completion(&rxq->fence_event);
1286 
1287 	mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
1288 	    sizeof(req), sizeof(resp));
1289 	req.wq_obj_handle = rxq->rxobj;
1290 
1291 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1292 	    sizeof(resp));
1293 	if (err) {
1294 		if_printf(apc->ndev, "Failed to fence RQ %u: %d\n",
1295 		    rxq->rxq_idx, err);
1296 		return err;
1297 	}
1298 
1299 	err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
1300 	if (err || resp.hdr.status) {
1301 		if_printf(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
1302 		    rxq->rxq_idx, err, resp.hdr.status);
1303 		if (!err)
1304 			err = EPROTO;
1305 
1306 		return err;
1307 	}
1308 
1309 	if (wait_for_completion_timeout(&rxq->fence_event, 10 * hz)) {
1310 		if_printf(apc->ndev, "Failed to fence RQ %u: timed out\n",
1311 		    rxq->rxq_idx);
1312 		return ETIMEDOUT;
1313         }
1314 
1315 	return 0;
1316 }
1317 
1318 static void
1319 mana_fence_rqs(struct mana_port_context *apc)
1320 {
1321 	unsigned int rxq_idx;
1322 	struct mana_rxq *rxq;
1323 	int err;
1324 
1325 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1326 		rxq = apc->rxqs[rxq_idx];
1327 		err = mana_fence_rq(apc, rxq);
1328 
1329 		/* In case of any error, use sleep instead. */
1330 		if (err)
1331 			gdma_msleep(100);
1332 	}
1333 }
1334 
1335 static int
1336 mana_move_wq_tail(struct gdma_queue *wq, uint32_t num_units)
1337 {
1338 	uint32_t used_space_old;
1339 	uint32_t used_space_new;
1340 
1341 	used_space_old = wq->head - wq->tail;
1342 	used_space_new = wq->head - (wq->tail + num_units);
1343 
1344 	if (used_space_new > used_space_old) {
1345 		mana_err(NULL,
1346 		    "WARNING: new used space %u greater than old one %u\n",
1347 		    used_space_new, used_space_old);
1348 		return ERANGE;
1349 	}
1350 
1351 	wq->tail += num_units;
1352 	return 0;
1353 }
1354 
1355 static void
1356 mana_poll_tx_cq(struct mana_cq *cq)
1357 {
1358 	struct gdma_comp *completions = cq->gdma_comp_buf;
1359 	struct gdma_posted_wqe_info *wqe_info;
1360 	struct mana_send_buf_info *tx_info;
1361 	unsigned int pkt_transmitted = 0;
1362 	unsigned int wqe_unit_cnt = 0;
1363 	struct mana_txq *txq = cq->txq;
1364 	struct mana_port_context *apc;
1365 	uint16_t next_to_complete;
1366 	struct ifnet *ndev;
1367 	int comp_read;
1368 	int txq_idx = txq->idx;;
1369 	int i;
1370 	int sa_drop = 0;
1371 
1372 	struct gdma_queue *gdma_wq;
1373 	unsigned int avail_space;
1374 	bool txq_full = false;
1375 
1376 	ndev = txq->ndev;
1377 	apc = if_getsoftc(ndev);
1378 
1379 	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1380 	    CQE_POLLING_BUFFER);
1381 
1382 	if (comp_read < 1)
1383 		return;
1384 
1385 	next_to_complete = txq->next_to_complete;
1386 
1387 	for (i = 0; i < comp_read; i++) {
1388 		struct mana_tx_comp_oob *cqe_oob;
1389 
1390 		if (!completions[i].is_sq) {
1391 			mana_err(NULL, "WARNING: Not for SQ\n");
1392 			return;
1393 		}
1394 
1395 		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1396 		if (cqe_oob->cqe_hdr.client_type !=
1397 				 MANA_CQE_COMPLETION) {
1398 			mana_err(NULL,
1399 			    "WARNING: Invalid CQE client type %u\n",
1400 			    cqe_oob->cqe_hdr.client_type);
1401 			return;
1402 		}
1403 
1404 		switch (cqe_oob->cqe_hdr.cqe_type) {
1405 		case CQE_TX_OKAY:
1406 			break;
1407 
1408 		case CQE_TX_SA_DROP:
1409 		case CQE_TX_MTU_DROP:
1410 		case CQE_TX_INVALID_OOB:
1411 		case CQE_TX_INVALID_ETH_TYPE:
1412 		case CQE_TX_HDR_PROCESSING_ERROR:
1413 		case CQE_TX_VF_DISABLED:
1414 		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1415 		case CQE_TX_VPORT_DISABLED:
1416 		case CQE_TX_VLAN_TAGGING_VIOLATION:
1417 			sa_drop ++;
1418 			mana_err(NULL,
1419 			    "TX: txq %d CQE error %d, ntc = %d, "
1420 			    "pending sends = %d: err ignored.\n",
1421 			    txq_idx, cqe_oob->cqe_hdr.cqe_type,
1422 			    next_to_complete, txq->pending_sends);
1423 			break;
1424 
1425 		default:
1426 			/* If the CQE type is unexpected, log an error,
1427 			 * and go through the error path.
1428 			 */
1429 			mana_err(NULL,
1430 			    "ERROR: TX: Unexpected CQE type %d: HW BUG?\n",
1431 			    cqe_oob->cqe_hdr.cqe_type);
1432 			return;
1433 		}
1434 		if (txq->gdma_txq_id != completions[i].wq_num) {
1435 			mana_dbg(NULL,
1436 			    "txq gdma id not match completion wq num: "
1437 			    "%d != %d\n",
1438 			    txq->gdma_txq_id, completions[i].wq_num);
1439 			break;
1440 		}
1441 
1442 		tx_info = &txq->tx_buf_info[next_to_complete];
1443 		if (!tx_info->mbuf) {
1444 			mana_err(NULL,
1445 			    "WARNING: txq %d Empty mbuf on tx_info: %u, "
1446 			    "ntu = %u, pending_sends = %d, "
1447 			    "transmitted = %d, sa_drop = %d, i = %d, comp_read = %d\n",
1448 			    txq_idx, next_to_complete, txq->next_to_use,
1449 			    txq->pending_sends, pkt_transmitted, sa_drop,
1450 			    i, comp_read);
1451 			break;
1452 		}
1453 
1454 		wqe_info = &tx_info->wqe_inf;
1455 		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1456 
1457 		mana_tx_unmap_mbuf(apc, tx_info);
1458 		mb();
1459 
1460 		next_to_complete =
1461 		    (next_to_complete + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
1462 
1463 		pkt_transmitted++;
1464 	}
1465 
1466 	txq->next_to_complete = next_to_complete;
1467 
1468 	if (wqe_unit_cnt == 0) {
1469 		mana_err(NULL,
1470 		    "WARNING: TX ring not proceeding!\n");
1471 		return;
1472 	}
1473 
1474 	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1475 
1476 	/* Ensure tail updated before checking q stop */
1477 	wmb();
1478 
1479 	gdma_wq = txq->gdma_sq;
1480 	avail_space = mana_gd_wq_avail_space(gdma_wq);
1481 
1482 
1483 	if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL) {
1484 		txq_full = true;
1485 	}
1486 
1487 	/* Ensure checking txq_full before apc->port_is_up. */
1488 	rmb();
1489 
1490 	if (txq_full && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1491 		/* Grab the txq lock and re-test */
1492 		mtx_lock(&txq->txq_mtx);
1493 		avail_space = mana_gd_wq_avail_space(gdma_wq);
1494 
1495 		if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL &&
1496 		    apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1497 			/* Clear the Q full flag */
1498 			if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING,
1499 			    IFF_DRV_OACTIVE);
1500 			counter_u64_add(txq->stats.wakeup, 1);
1501 			if (txq->alt_txq_idx != txq->idx) {
1502 				uint64_t stops = counter_u64_fetch(txq->stats.stop);
1503 				uint64_t wakeups = counter_u64_fetch(txq->stats.wakeup);
1504 				/* Reset alt_txq_idx back if it is not overloaded */
1505 				if (stops < wakeups) {
1506 					txq->alt_txq_idx = txq->idx;
1507 					counter_u64_add(txq->stats.alt_reset, 1);
1508 				}
1509 			}
1510 			rmb();
1511 			/* Schedule a tx enqueue task */
1512 			taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
1513 		}
1514 		mtx_unlock(&txq->txq_mtx);
1515 	}
1516 
1517 	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1518 		mana_err(NULL,
1519 		    "WARNING: TX %d pending_sends error: %d\n",
1520 		    txq->idx, txq->pending_sends);
1521 
1522 	cq->work_done = pkt_transmitted;
1523 }
1524 
1525 static void
1526 mana_post_pkt_rxq(struct mana_rxq *rxq)
1527 {
1528 	struct mana_recv_buf_oob *recv_buf_oob;
1529 	uint32_t curr_index;
1530 	int err;
1531 
1532 	curr_index = rxq->buf_index++;
1533 	if (rxq->buf_index == rxq->num_rx_buf)
1534 		rxq->buf_index = 0;
1535 
1536 	recv_buf_oob = &rxq->rx_oobs[curr_index];
1537 
1538 	err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1539 	    &recv_buf_oob->wqe_inf);
1540 	if (err) {
1541 		mana_err(NULL, "WARNING: rxq %u post pkt err %d\n",
1542 		    rxq->rxq_idx, err);
1543 		return;
1544 	}
1545 
1546 	if (recv_buf_oob->wqe_inf.wqe_size_in_bu != 1) {
1547 		mana_err(NULL, "WARNING: rxq %u wqe_size_in_bu %u\n",
1548 		    rxq->rxq_idx, recv_buf_oob->wqe_inf.wqe_size_in_bu);
1549 	}
1550 }
1551 
1552 static void
1553 mana_rx_mbuf(struct mbuf *mbuf, struct mana_rxcomp_oob *cqe,
1554     struct mana_rxq *rxq)
1555 {
1556 	struct mana_stats *rx_stats = &rxq->stats;
1557 	struct ifnet *ndev = rxq->ndev;
1558 	uint32_t pkt_len = cqe->ppi[0].pkt_len;
1559 	uint16_t rxq_idx = rxq->rxq_idx;
1560 	struct mana_port_context *apc;
1561 	bool do_lro = false;
1562 	bool do_if_input;
1563 
1564 	apc = if_getsoftc(ndev);
1565 	rxq->rx_cq.work_done++;
1566 
1567 	if (!mbuf) {
1568 		return;
1569 	}
1570 
1571 	mbuf->m_flags |= M_PKTHDR;
1572 	mbuf->m_pkthdr.len = pkt_len;
1573 	mbuf->m_len = pkt_len;
1574 	mbuf->m_pkthdr.rcvif = ndev;
1575 
1576 	if ((ndev->if_capenable & IFCAP_RXCSUM ||
1577 	    ndev->if_capenable & IFCAP_RXCSUM_IPV6) &&
1578 	    (cqe->rx_iphdr_csum_succeed)) {
1579 		mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1580 		mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1581 		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) {
1582 			mbuf->m_pkthdr.csum_flags |=
1583 			    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1584 			mbuf->m_pkthdr.csum_data = 0xffff;
1585 
1586 			if (cqe->rx_tcp_csum_succeed)
1587 				do_lro = true;
1588 		}
1589 	}
1590 
1591 	if (cqe->rx_hashtype != 0) {
1592 		mbuf->m_pkthdr.flowid = cqe->ppi[0].pkt_hash;
1593 
1594 		uint16_t hashtype = cqe->rx_hashtype;
1595 		if (hashtype & NDIS_HASH_IPV4_MASK) {
1596 			hashtype &= NDIS_HASH_IPV4_MASK;
1597 			switch (hashtype) {
1598 			case NDIS_HASH_TCP_IPV4:
1599 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
1600 				break;
1601 			case NDIS_HASH_UDP_IPV4:
1602 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
1603 				break;
1604 			default:
1605 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
1606 			}
1607 		} else if (hashtype & NDIS_HASH_IPV6_MASK) {
1608 			hashtype &= NDIS_HASH_IPV6_MASK;
1609 			switch (hashtype) {
1610 			case NDIS_HASH_TCP_IPV6:
1611 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
1612 				break;
1613 			case NDIS_HASH_TCP_IPV6_EX:
1614 				M_HASHTYPE_SET(mbuf,
1615 				    M_HASHTYPE_RSS_TCP_IPV6_EX);
1616 				break;
1617 			case NDIS_HASH_UDP_IPV6:
1618 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
1619 				break;
1620 			case NDIS_HASH_UDP_IPV6_EX:
1621 				M_HASHTYPE_SET(mbuf,
1622 				    M_HASHTYPE_RSS_UDP_IPV6_EX);
1623 				break;
1624 			default:
1625 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
1626 			}
1627 		} else {
1628 			M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
1629 		}
1630 	} else {
1631 		mbuf->m_pkthdr.flowid = rxq_idx;
1632 		M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
1633 	}
1634 
1635 	do_if_input = true;
1636 	if ((ndev->if_capenable & IFCAP_LRO) && do_lro) {
1637 		if (rxq->lro.lro_cnt != 0 &&
1638 		    tcp_lro_rx(&rxq->lro, mbuf, 0) == 0)
1639 			do_if_input = false;
1640 	}
1641 	if (do_if_input) {
1642 		ndev->if_input(ndev, mbuf);
1643 	}
1644 
1645 	counter_enter();
1646 	counter_u64_add_protected(rx_stats->packets, 1);
1647 	counter_u64_add_protected(apc->port_stats.rx_packets, 1);
1648 	counter_u64_add_protected(rx_stats->bytes, pkt_len);
1649 	counter_u64_add_protected(apc->port_stats.rx_bytes, pkt_len);
1650 	counter_exit();
1651 }
1652 
1653 static void
1654 mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1655     struct gdma_comp *cqe)
1656 {
1657 	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1658 	struct mana_recv_buf_oob *rxbuf_oob;
1659 	struct ifnet *ndev = rxq->ndev;
1660 	struct mana_port_context *apc;
1661 	struct mbuf *old_mbuf;
1662 	uint32_t curr, pktlen;
1663 	int err;
1664 
1665 	switch (oob->cqe_hdr.cqe_type) {
1666 	case CQE_RX_OKAY:
1667 		break;
1668 
1669 	case CQE_RX_TRUNCATED:
1670 		apc = if_getsoftc(ndev);
1671 		counter_u64_add(apc->port_stats.rx_drops, 1);
1672 		rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1673 		if_printf(ndev, "Dropped a truncated packet\n");
1674 		goto drop;
1675 
1676 	case CQE_RX_COALESCED_4:
1677 		if_printf(ndev, "RX coalescing is unsupported\n");
1678 		return;
1679 
1680 	case CQE_RX_OBJECT_FENCE:
1681 		complete(&rxq->fence_event);
1682 		return;
1683 
1684 	default:
1685 		if_printf(ndev, "Unknown RX CQE type = %d\n",
1686 		    oob->cqe_hdr.cqe_type);
1687 		return;
1688 	}
1689 
1690 	if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
1691 		return;
1692 
1693 	pktlen = oob->ppi[0].pkt_len;
1694 
1695 	if (pktlen == 0) {
1696 		/* data packets should never have packetlength of zero */
1697 #if defined(__amd64__)
1698 		if_printf(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%lx\n",
1699 		    rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1700 #else
1701 		if_printf(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1702 		    rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1703 #endif
1704 		return;
1705 	}
1706 
1707 	curr = rxq->buf_index;
1708 	rxbuf_oob = &rxq->rx_oobs[curr];
1709 	if (rxbuf_oob->wqe_inf.wqe_size_in_bu != 1) {
1710 		mana_err(NULL, "WARNING: Rx Incorrect complete "
1711 		    "WQE size %u\n",
1712 		    rxbuf_oob->wqe_inf.wqe_size_in_bu);
1713 	}
1714 
1715 	apc = if_getsoftc(ndev);
1716 
1717 	old_mbuf = rxbuf_oob->mbuf;
1718 
1719 	/* Unload DMA map for the old mbuf */
1720 	mana_unload_rx_mbuf(apc, rxq, rxbuf_oob, false);
1721 
1722 	/* Load a new mbuf to replace the old one */
1723 	err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, true);
1724 	if (err) {
1725 		mana_dbg(NULL,
1726 		    "failed to load rx mbuf, err = %d, packet dropped.\n",
1727 		    err);
1728 		counter_u64_add(rxq->stats.mbuf_alloc_fail, 1);
1729 		/*
1730 		 * Failed to load new mbuf, rxbuf_oob->mbuf is still
1731 		 * pointing to the old one. Drop the packet.
1732 		 */
1733 		 old_mbuf = NULL;
1734 		 /* Reload the existing mbuf */
1735 		 mana_load_rx_mbuf(apc, rxq, rxbuf_oob, false);
1736 	}
1737 
1738 	mana_rx_mbuf(old_mbuf, oob, rxq);
1739 
1740 drop:
1741 	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1742 
1743 	mana_post_pkt_rxq(rxq);
1744 }
1745 
1746 static void
1747 mana_poll_rx_cq(struct mana_cq *cq)
1748 {
1749 	struct gdma_comp *comp = cq->gdma_comp_buf;
1750 	int comp_read, i;
1751 
1752 	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1753 	KASSERT(comp_read <= CQE_POLLING_BUFFER,
1754 	    ("comp_read %d great than buf size %d",
1755 	    comp_read, CQE_POLLING_BUFFER));
1756 
1757 	for (i = 0; i < comp_read; i++) {
1758 		if (comp[i].is_sq == true) {
1759 			mana_err(NULL,
1760 			    "WARNING: CQE not for receive queue\n");
1761 			return;
1762 		}
1763 
1764 		/* verify recv cqe references the right rxq */
1765 		if (comp[i].wq_num != cq->rxq->gdma_id) {
1766 			mana_err(NULL,
1767 			    "WARNING: Received CQE %d  not for "
1768 			    "this receive queue %d\n",
1769 			    comp[i].wq_num,  cq->rxq->gdma_id);
1770 			return;
1771 		}
1772 
1773 		mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
1774 	}
1775 
1776 	tcp_lro_flush_all(&cq->rxq->lro);
1777 }
1778 
1779 static void
1780 mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1781 {
1782 	struct mana_cq *cq = context;
1783 	uint8_t arm_bit;
1784 
1785 	KASSERT(cq->gdma_cq == gdma_queue,
1786 	    ("cq do not match %p, %p", cq->gdma_cq, gdma_queue));
1787 
1788 	if (cq->type == MANA_CQ_TYPE_RX) {
1789 		mana_poll_rx_cq(cq);
1790 	} else {
1791 		mana_poll_tx_cq(cq);
1792 	}
1793 
1794 	if (cq->work_done < cq->budget && cq->do_not_ring_db == false)
1795 		arm_bit = SET_ARM_BIT;
1796 	else
1797 		arm_bit = 0;
1798 
1799 	mana_gd_ring_cq(gdma_queue, arm_bit);
1800 }
1801 
1802 #define MANA_POLL_BUDGET	8
1803 #define MANA_RX_BUDGET		256
1804 #define MANA_TX_BUDGET		MAX_SEND_BUFFERS_PER_QUEUE
1805 
1806 static void
1807 mana_poll(void *arg, int pending)
1808 {
1809 	struct mana_cq *cq = arg;
1810 	int i;
1811 
1812 	cq->work_done = 0;
1813 	if (cq->type == MANA_CQ_TYPE_RX) {
1814 		cq->budget = MANA_RX_BUDGET;
1815 	} else {
1816 		cq->budget = MANA_TX_BUDGET;
1817 	}
1818 
1819 	for (i = 0; i < MANA_POLL_BUDGET; i++) {
1820 		/*
1821 		 * If this is the last loop, set the budget big enough
1822 		 * so it will arm the CQ any way.
1823 		 */
1824 		if (i == (MANA_POLL_BUDGET - 1))
1825 			cq->budget = CQE_POLLING_BUFFER + 1;
1826 
1827 		mana_cq_handler(cq, cq->gdma_cq);
1828 
1829 		if (cq->work_done < cq->budget)
1830 			break;
1831 
1832 		cq->work_done = 0;
1833 	}
1834 }
1835 
1836 static void
1837 mana_schedule_task(void *arg, struct gdma_queue *gdma_queue)
1838 {
1839 	struct mana_cq *cq = arg;
1840 
1841 	taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task);
1842 }
1843 
1844 static void
1845 mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1846 {
1847 	struct gdma_dev *gd = apc->ac->gdma_dev;
1848 
1849 	if (!cq->gdma_cq)
1850 		return;
1851 
1852 	/* Drain cleanup taskqueue */
1853 	if (cq->cleanup_tq) {
1854 		while (taskqueue_cancel(cq->cleanup_tq,
1855 		    &cq->cleanup_task, NULL)) {
1856 			taskqueue_drain(cq->cleanup_tq,
1857 			    &cq->cleanup_task);
1858 		}
1859 
1860 		taskqueue_free(cq->cleanup_tq);
1861 	}
1862 
1863 	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1864 }
1865 
1866 static void
1867 mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1868 {
1869 	struct gdma_dev *gd = apc->ac->gdma_dev;
1870 	struct mana_send_buf_info *txbuf_info;
1871 	uint32_t pending_sends;
1872 	int i;
1873 
1874 	if (!txq->gdma_sq)
1875 		return;
1876 
1877 	if ((pending_sends = atomic_read(&txq->pending_sends)) > 0) {
1878 		mana_err(NULL,
1879 		    "WARNING: txq pending sends not zero: %u\n",
1880 		    pending_sends);
1881 	}
1882 
1883 	if (txq->next_to_use != txq->next_to_complete) {
1884 		mana_err(NULL,
1885 		    "WARNING: txq buf not completed, "
1886 		    "next use %u, next complete %u\n",
1887 		    txq->next_to_use, txq->next_to_complete);
1888 	}
1889 
1890 	/* Flush buf ring. Grab txq mtx lock */
1891 	if (txq->txq_br) {
1892 		mtx_lock(&txq->txq_mtx);
1893 		drbr_flush(apc->ndev, txq->txq_br);
1894 		mtx_unlock(&txq->txq_mtx);
1895 		buf_ring_free(txq->txq_br, M_DEVBUF);
1896 	}
1897 
1898 	/* Drain taskqueue */
1899 	if (txq->enqueue_tq) {
1900 		while (taskqueue_cancel(txq->enqueue_tq,
1901 		    &txq->enqueue_task, NULL)) {
1902 			taskqueue_drain(txq->enqueue_tq,
1903 			    &txq->enqueue_task);
1904 		}
1905 
1906 		taskqueue_free(txq->enqueue_tq);
1907 	}
1908 
1909 	if (txq->tx_buf_info) {
1910 		/* Free all mbufs which are still in-flight */
1911 		for (i = 0; i < MAX_SEND_BUFFERS_PER_QUEUE; i++) {
1912 			txbuf_info = &txq->tx_buf_info[i];
1913 			if (txbuf_info->mbuf) {
1914 				mana_tx_unmap_mbuf(apc, txbuf_info);
1915 			}
1916 		}
1917 
1918 		free(txq->tx_buf_info, M_DEVBUF);
1919 	}
1920 
1921 	mana_free_counters((counter_u64_t *)&txq->stats,
1922 	    sizeof(txq->stats));
1923 
1924 	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1925 
1926 	mtx_destroy(&txq->txq_mtx);
1927 }
1928 
1929 static void
1930 mana_destroy_txq(struct mana_port_context *apc)
1931 {
1932 	int i;
1933 
1934 	if (!apc->tx_qp)
1935 		return;
1936 
1937 	for (i = 0; i < apc->num_queues; i++) {
1938 		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1939 
1940 		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1941 
1942 		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1943 	}
1944 
1945 	free(apc->tx_qp, M_DEVBUF);
1946 	apc->tx_qp = NULL;
1947 }
1948 
1949 static int
1950 mana_create_txq(struct mana_port_context *apc, struct ifnet *net)
1951 {
1952 	struct mana_context *ac = apc->ac;
1953 	struct gdma_dev *gd = ac->gdma_dev;
1954 	struct mana_obj_spec wq_spec;
1955 	struct mana_obj_spec cq_spec;
1956 	struct gdma_queue_spec spec;
1957 	struct gdma_context *gc;
1958 	struct mana_txq *txq;
1959 	struct mana_cq *cq;
1960 	uint32_t txq_size;
1961 	uint32_t cq_size;
1962 	int err;
1963 	int i;
1964 
1965 	apc->tx_qp = mallocarray(apc->num_queues, sizeof(struct mana_tx_qp),
1966 	    M_DEVBUF, M_WAITOK | M_ZERO);
1967 	if (!apc->tx_qp)
1968 		return ENOMEM;
1969 
1970 	/*  The minimum size of the WQE is 32 bytes, hence
1971 	 *  MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1972 	 *  the SQ can store. This value is then used to size other queues
1973 	 *  to prevent overflow.
1974 	 */
1975 	txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1976 	KASSERT(IS_ALIGNED(txq_size, PAGE_SIZE),
1977 	    ("txq size not page aligned"));
1978 
1979 	cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1980 	cq_size = ALIGN(cq_size, PAGE_SIZE);
1981 
1982 	gc = gd->gdma_context;
1983 
1984 	for (i = 0; i < apc->num_queues; i++) {
1985 		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1986 
1987 		/* Create SQ */
1988 		txq = &apc->tx_qp[i].txq;
1989 
1990 		txq->ndev = net;
1991 		txq->vp_offset = apc->tx_vp_offset;
1992 		txq->idx = i;
1993 		txq->alt_txq_idx = i;
1994 
1995 		memset(&spec, 0, sizeof(spec));
1996 		spec.type = GDMA_SQ;
1997 		spec.monitor_avl_buf = true;
1998 		spec.queue_size = txq_size;
1999 		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
2000 		if (err)
2001 			goto out;
2002 
2003 		/* Create SQ's CQ */
2004 		cq = &apc->tx_qp[i].tx_cq;
2005 		cq->type = MANA_CQ_TYPE_TX;
2006 
2007 		cq->txq = txq;
2008 
2009 		memset(&spec, 0, sizeof(spec));
2010 		spec.type = GDMA_CQ;
2011 		spec.monitor_avl_buf = false;
2012 		spec.queue_size = cq_size;
2013 		spec.cq.callback = mana_schedule_task;
2014 		spec.cq.parent_eq = ac->eqs[i].eq;
2015 		spec.cq.context = cq;
2016 		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2017 		if (err)
2018 			goto out;
2019 
2020 		memset(&wq_spec, 0, sizeof(wq_spec));
2021 		memset(&cq_spec, 0, sizeof(cq_spec));
2022 
2023 		wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
2024 		wq_spec.queue_size = txq->gdma_sq->queue_size;
2025 
2026 		cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2027 		cq_spec.queue_size = cq->gdma_cq->queue_size;
2028 		cq_spec.modr_ctx_id = 0;
2029 		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2030 
2031 		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
2032 		    &wq_spec, &cq_spec, &apc->tx_qp[i].tx_object);
2033 
2034 		if (err)
2035 			goto out;
2036 
2037 		txq->gdma_sq->id = wq_spec.queue_index;
2038 		cq->gdma_cq->id = cq_spec.queue_index;
2039 
2040 		txq->gdma_sq->mem_info.dma_region_handle =
2041 		    GDMA_INVALID_DMA_REGION;
2042 		cq->gdma_cq->mem_info.dma_region_handle =
2043 		    GDMA_INVALID_DMA_REGION;
2044 
2045 		txq->gdma_txq_id = txq->gdma_sq->id;
2046 
2047 		cq->gdma_id = cq->gdma_cq->id;
2048 
2049 		mana_dbg(NULL,
2050 		    "txq %d, txq gdma id %d, txq cq gdma id %d\n",
2051 		    i, txq->gdma_txq_id, cq->gdma_id);;
2052 
2053 		if (cq->gdma_id >= gc->max_num_cqs) {
2054 			if_printf(net, "CQ id %u too large.\n", cq->gdma_id);
2055 			err = EINVAL;
2056 			goto out;
2057 		}
2058 
2059 		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2060 
2061 		/* Initialize tx specific data */
2062 		txq->tx_buf_info = malloc(MAX_SEND_BUFFERS_PER_QUEUE *
2063 		    sizeof(struct mana_send_buf_info),
2064 		    M_DEVBUF, M_WAITOK | M_ZERO);
2065 		if (unlikely(txq->tx_buf_info == NULL)) {
2066 			if_printf(net,
2067 			    "Failed to allocate tx buf info for SQ %u\n",
2068 			    txq->gdma_sq->id);
2069 			err = ENOMEM;
2070 			goto out;
2071 		}
2072 
2073 
2074 		snprintf(txq->txq_mtx_name, nitems(txq->txq_mtx_name),
2075 		    "mana:tx(%d)", i);
2076 		mtx_init(&txq->txq_mtx, txq->txq_mtx_name, NULL, MTX_DEF);
2077 
2078 		txq->txq_br = buf_ring_alloc(4 * MAX_SEND_BUFFERS_PER_QUEUE,
2079 		    M_DEVBUF, M_WAITOK, &txq->txq_mtx);
2080 		if (unlikely(txq->txq_br == NULL)) {
2081 			if_printf(net,
2082 			    "Failed to allocate buf ring for SQ %u\n",
2083 			    txq->gdma_sq->id);
2084 			err = ENOMEM;
2085 			goto out;
2086 		}
2087 
2088 		/* Allocate taskqueue for deferred send */
2089 		TASK_INIT(&txq->enqueue_task, 0, mana_xmit_taskfunc, txq);
2090 		txq->enqueue_tq = taskqueue_create_fast("mana_tx_enque",
2091 		    M_NOWAIT, taskqueue_thread_enqueue, &txq->enqueue_tq);
2092 		if (unlikely(txq->enqueue_tq == NULL)) {
2093 			if_printf(net,
2094 			    "Unable to create tx %d enqueue task queue\n", i);
2095 			err = ENOMEM;
2096 			goto out;
2097 		}
2098 		taskqueue_start_threads(&txq->enqueue_tq, 1, PI_NET,
2099 		    "mana txq p%u-tx%d", apc->port_idx, i);
2100 
2101 		mana_alloc_counters((counter_u64_t *)&txq->stats,
2102 		    sizeof(txq->stats));
2103 
2104 		/* Allocate and start the cleanup task on CQ */
2105 		cq->do_not_ring_db = false;
2106 
2107 		NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq);
2108 		cq->cleanup_tq =
2109 		    taskqueue_create_fast("mana tx cq cleanup",
2110 		    M_WAITOK, taskqueue_thread_enqueue,
2111 		    &cq->cleanup_tq);
2112 
2113 		if (apc->last_tx_cq_bind_cpu < 0)
2114 			apc->last_tx_cq_bind_cpu = CPU_FIRST();
2115 		cq->cpu = apc->last_tx_cq_bind_cpu;
2116 		apc->last_tx_cq_bind_cpu = CPU_NEXT(apc->last_tx_cq_bind_cpu);
2117 
2118 		if (apc->bind_cleanup_thread_cpu) {
2119 			cpuset_t cpu_mask;
2120 			CPU_SETOF(cq->cpu, &cpu_mask);
2121 			taskqueue_start_threads_cpuset(&cq->cleanup_tq,
2122 			    1, PI_NET, &cpu_mask,
2123 			    "mana cq p%u-tx%u-cpu%d",
2124 			    apc->port_idx, txq->idx, cq->cpu);
2125 		} else {
2126 			taskqueue_start_threads(&cq->cleanup_tq, 1,
2127 			    PI_NET, "mana cq p%u-tx%u",
2128 			    apc->port_idx, txq->idx);
2129 		}
2130 
2131 		mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2132 	}
2133 
2134 	return 0;
2135 out:
2136 	mana_destroy_txq(apc);
2137 	return err;
2138 }
2139 
2140 static void
2141 mana_destroy_rxq(struct mana_port_context *apc, struct mana_rxq *rxq,
2142     bool validate_state)
2143 {
2144 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2145 	struct mana_recv_buf_oob *rx_oob;
2146 	int i;
2147 
2148 	if (!rxq)
2149 		return;
2150 
2151 	if (validate_state) {
2152 		/*
2153 		 * XXX Cancel and drain cleanup task queue here.
2154 		 */
2155 		;
2156 	}
2157 
2158 	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2159 
2160 	mana_deinit_cq(apc, &rxq->rx_cq);
2161 
2162 	mana_free_counters((counter_u64_t *)&rxq->stats,
2163 	    sizeof(rxq->stats));
2164 
2165 	/* Free LRO resources */
2166 	tcp_lro_free(&rxq->lro);
2167 
2168 	for (i = 0; i < rxq->num_rx_buf; i++) {
2169 		rx_oob = &rxq->rx_oobs[i];
2170 
2171 		if (rx_oob->mbuf)
2172 			mana_unload_rx_mbuf(apc, rxq, rx_oob, true);
2173 
2174 		bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2175 	}
2176 
2177 	if (rxq->gdma_rq)
2178 		mana_gd_destroy_queue(gc, rxq->gdma_rq);
2179 
2180 	free(rxq, M_DEVBUF);
2181 }
2182 
2183 #define MANA_WQE_HEADER_SIZE 16
2184 #define MANA_WQE_SGE_SIZE 16
2185 
2186 static int
2187 mana_alloc_rx_wqe(struct mana_port_context *apc,
2188     struct mana_rxq *rxq, uint32_t *rxq_size, uint32_t *cq_size)
2189 {
2190 	struct mana_recv_buf_oob *rx_oob;
2191 	uint32_t buf_idx;
2192 	int err;
2193 
2194 	if (rxq->datasize == 0 || rxq->datasize > PAGE_SIZE) {
2195 		mana_err(NULL,
2196 		    "WARNING: Invalid rxq datasize %u\n", rxq->datasize);
2197 	}
2198 
2199 	*rxq_size = 0;
2200 	*cq_size = 0;
2201 
2202 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2203 		rx_oob = &rxq->rx_oobs[buf_idx];
2204 		memset(rx_oob, 0, sizeof(*rx_oob));
2205 
2206 		err = bus_dmamap_create(apc->rx_buf_tag, 0,
2207 		    &rx_oob->dma_map);
2208 		if (err) {
2209 			mana_err(NULL,
2210 			    "Failed to  create rx DMA map for buf %d\n",
2211 			    buf_idx);
2212 			return err;
2213 		}
2214 
2215 		err = mana_load_rx_mbuf(apc, rxq, rx_oob, true);
2216 		if (err) {
2217 			mana_err(NULL,
2218 			    "Failed to  create rx DMA map for buf %d\n",
2219 			    buf_idx);
2220 			bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2221 			return err;
2222 		}
2223 
2224 		rx_oob->wqe_req.sgl = rx_oob->sgl;
2225 		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2226 		rx_oob->wqe_req.inline_oob_size = 0;
2227 		rx_oob->wqe_req.inline_oob_data = NULL;
2228 		rx_oob->wqe_req.flags = 0;
2229 		rx_oob->wqe_req.client_data_unit = 0;
2230 
2231 		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2232 				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2233 		*cq_size += COMP_ENTRY_SIZE;
2234 	}
2235 
2236 	return 0;
2237 }
2238 
2239 static int
2240 mana_push_wqe(struct mana_rxq *rxq)
2241 {
2242 	struct mana_recv_buf_oob *rx_oob;
2243 	uint32_t buf_idx;
2244 	int err;
2245 
2246 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2247 		rx_oob = &rxq->rx_oobs[buf_idx];
2248 
2249 		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2250 		    &rx_oob->wqe_inf);
2251 		if (err)
2252 			return ENOSPC;
2253 	}
2254 
2255 	return 0;
2256 }
2257 
2258 static struct mana_rxq *
2259 mana_create_rxq(struct mana_port_context *apc, uint32_t rxq_idx,
2260     struct mana_eq *eq, struct ifnet *ndev)
2261 {
2262 	struct gdma_dev *gd = apc->ac->gdma_dev;
2263 	struct mana_obj_spec wq_spec;
2264 	struct mana_obj_spec cq_spec;
2265 	struct gdma_queue_spec spec;
2266 	struct mana_cq *cq = NULL;
2267 	uint32_t cq_size, rq_size;
2268 	struct gdma_context *gc;
2269 	struct mana_rxq *rxq;
2270 	int err;
2271 
2272 	gc = gd->gdma_context;
2273 
2274 	rxq = malloc(sizeof(*rxq) +
2275 	    RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob),
2276 	    M_DEVBUF, M_WAITOK | M_ZERO);
2277 	if (!rxq)
2278 		return NULL;
2279 
2280 	rxq->ndev = ndev;
2281 	rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
2282 	rxq->rxq_idx = rxq_idx;
2283 	/*
2284 	 * Minimum size is MCLBYTES(2048) bytes for a mbuf cluster.
2285 	 * Now we just allow maximum size of 4096.
2286 	 */
2287 	rxq->datasize = ALIGN(apc->frame_size, MCLBYTES);
2288 	if (rxq->datasize > MAX_FRAME_SIZE)
2289 		rxq->datasize = MAX_FRAME_SIZE;
2290 
2291 	mana_dbg(NULL, "Setting rxq %d datasize %d\n",
2292 	    rxq_idx, rxq->datasize);
2293 
2294 	rxq->rxobj = INVALID_MANA_HANDLE;
2295 
2296 	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2297 	if (err)
2298 		goto out;
2299 
2300 	/* Create LRO for the RQ */
2301 	if (ndev->if_capenable & IFCAP_LRO) {
2302 		err = tcp_lro_init(&rxq->lro);
2303 		if (err) {
2304 			if_printf(ndev, "Failed to create LRO for rxq %d\n",
2305 			    rxq_idx);
2306 		} else {
2307 			rxq->lro.ifp = ndev;
2308 		}
2309 	}
2310 
2311 	mana_alloc_counters((counter_u64_t *)&rxq->stats,
2312 	    sizeof(rxq->stats));
2313 
2314 	rq_size = ALIGN(rq_size, PAGE_SIZE);
2315 	cq_size = ALIGN(cq_size, PAGE_SIZE);
2316 
2317 	/* Create RQ */
2318 	memset(&spec, 0, sizeof(spec));
2319 	spec.type = GDMA_RQ;
2320 	spec.monitor_avl_buf = true;
2321 	spec.queue_size = rq_size;
2322 	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2323 	if (err)
2324 		goto out;
2325 
2326 	/* Create RQ's CQ */
2327 	cq = &rxq->rx_cq;
2328 	cq->type = MANA_CQ_TYPE_RX;
2329 	cq->rxq = rxq;
2330 
2331 	memset(&spec, 0, sizeof(spec));
2332 	spec.type = GDMA_CQ;
2333 	spec.monitor_avl_buf = false;
2334 	spec.queue_size = cq_size;
2335 	spec.cq.callback = mana_schedule_task;
2336 	spec.cq.parent_eq = eq->eq;
2337 	spec.cq.context = cq;
2338 	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2339 	if (err)
2340 		goto out;
2341 
2342 	memset(&wq_spec, 0, sizeof(wq_spec));
2343 	memset(&cq_spec, 0, sizeof(cq_spec));
2344 	wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
2345 	wq_spec.queue_size = rxq->gdma_rq->queue_size;
2346 
2347 	cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2348 	cq_spec.queue_size = cq->gdma_cq->queue_size;
2349 	cq_spec.modr_ctx_id = 0;
2350 	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2351 
2352 	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2353 	    &wq_spec, &cq_spec, &rxq->rxobj);
2354 	if (err)
2355 		goto out;
2356 
2357 	rxq->gdma_rq->id = wq_spec.queue_index;
2358 	cq->gdma_cq->id = cq_spec.queue_index;
2359 
2360 	rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2361 	cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2362 
2363 	rxq->gdma_id = rxq->gdma_rq->id;
2364 	cq->gdma_id = cq->gdma_cq->id;
2365 
2366 	err = mana_push_wqe(rxq);
2367 	if (err)
2368 		goto out;
2369 
2370 	if (cq->gdma_id >= gc->max_num_cqs) {
2371 		err = EINVAL;
2372 		goto out;
2373 	}
2374 
2375 	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2376 
2377 	/* Allocate and start the cleanup task on CQ */
2378 	cq->do_not_ring_db = false;
2379 
2380 	NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq);
2381 	cq->cleanup_tq =
2382 	    taskqueue_create_fast("mana rx cq cleanup",
2383 	    M_WAITOK, taskqueue_thread_enqueue,
2384 	    &cq->cleanup_tq);
2385 
2386 	if (apc->last_rx_cq_bind_cpu < 0)
2387 		apc->last_rx_cq_bind_cpu = CPU_FIRST();
2388 	cq->cpu = apc->last_rx_cq_bind_cpu;
2389 	apc->last_rx_cq_bind_cpu = CPU_NEXT(apc->last_rx_cq_bind_cpu);
2390 
2391 	if (apc->bind_cleanup_thread_cpu) {
2392 		cpuset_t cpu_mask;
2393 		CPU_SETOF(cq->cpu, &cpu_mask);
2394 		taskqueue_start_threads_cpuset(&cq->cleanup_tq,
2395 		    1, PI_NET, &cpu_mask,
2396 		    "mana cq p%u-rx%u-cpu%d",
2397 		    apc->port_idx, rxq->rxq_idx, cq->cpu);
2398 	} else {
2399 		taskqueue_start_threads(&cq->cleanup_tq, 1,
2400 		    PI_NET, "mana cq p%u-rx%u",
2401 		    apc->port_idx, rxq->rxq_idx);
2402 	}
2403 
2404 	mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2405 out:
2406 	if (!err)
2407 		return rxq;
2408 
2409 	if_printf(ndev, "Failed to create RXQ: err = %d\n", err);
2410 
2411 	mana_destroy_rxq(apc, rxq, false);
2412 
2413 	if (cq)
2414 		mana_deinit_cq(apc, cq);
2415 
2416 	return NULL;
2417 }
2418 
2419 static int
2420 mana_add_rx_queues(struct mana_port_context *apc, struct ifnet *ndev)
2421 {
2422 	struct mana_context *ac = apc->ac;
2423 	struct mana_rxq *rxq;
2424 	int err = 0;
2425 	int i;
2426 
2427 	for (i = 0; i < apc->num_queues; i++) {
2428 		rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2429 		if (!rxq) {
2430 			err = ENOMEM;
2431 			goto out;
2432 		}
2433 
2434 		apc->rxqs[i] = rxq;
2435 	}
2436 
2437 	apc->default_rxobj = apc->rxqs[0]->rxobj;
2438 out:
2439 	return err;
2440 }
2441 
2442 static void
2443 mana_destroy_vport(struct mana_port_context *apc)
2444 {
2445 	struct mana_rxq *rxq;
2446 	uint32_t rxq_idx;
2447 
2448 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2449 		rxq = apc->rxqs[rxq_idx];
2450 		if (!rxq)
2451 			continue;
2452 
2453 		mana_destroy_rxq(apc, rxq, true);
2454 		apc->rxqs[rxq_idx] = NULL;
2455 	}
2456 
2457 	mana_destroy_txq(apc);
2458 
2459 	mana_uncfg_vport(apc);
2460 }
2461 
2462 static int
2463 mana_create_vport(struct mana_port_context *apc, struct ifnet *net)
2464 {
2465 	struct gdma_dev *gd = apc->ac->gdma_dev;
2466 	int err;
2467 
2468 	apc->default_rxobj = INVALID_MANA_HANDLE;
2469 
2470 	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2471 	if (err)
2472 		return err;
2473 
2474 	return mana_create_txq(apc, net);
2475 }
2476 
2477 
2478 static void mana_rss_table_init(struct mana_port_context *apc)
2479 {
2480 	int i;
2481 
2482 	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
2483 		apc->indir_table[i] = i % apc->num_queues;
2484 }
2485 
2486 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2487 		    bool update_hash, bool update_tab)
2488 {
2489 	uint32_t queue_idx;
2490 	int err;
2491 	int i;
2492 
2493 	if (update_tab) {
2494 		for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
2495 			queue_idx = apc->indir_table[i];
2496 			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2497 		}
2498 	}
2499 
2500 	err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2501 	if (err)
2502 		return err;
2503 
2504 	mana_fence_rqs(apc);
2505 
2506 	return 0;
2507 }
2508 
2509 static int
2510 mana_init_port(struct ifnet *ndev)
2511 {
2512 	struct mana_port_context *apc = if_getsoftc(ndev);
2513 	uint32_t max_txq, max_rxq, max_queues;
2514 	int port_idx = apc->port_idx;
2515 	uint32_t num_indirect_entries;
2516 	int err;
2517 
2518 	err = mana_init_port_context(apc);
2519 	if (err)
2520 		return err;
2521 
2522 	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
2523 	    &num_indirect_entries);
2524 	if (err) {
2525 		if_printf(ndev, "Failed to query info for vPort %d\n",
2526 		    port_idx);
2527 		goto reset_apc;
2528 	}
2529 
2530 	max_queues = min_t(uint32_t, max_txq, max_rxq);
2531 	if (apc->max_queues > max_queues)
2532 		apc->max_queues = max_queues;
2533 
2534 	if (apc->num_queues > apc->max_queues)
2535 		apc->num_queues = apc->max_queues;
2536 
2537 	return 0;
2538 
2539 reset_apc:
2540 	bus_dma_tag_destroy(apc->rx_buf_tag);
2541 	apc->rx_buf_tag = NULL;
2542 	free(apc->rxqs, M_DEVBUF);
2543 	apc->rxqs = NULL;
2544 	return err;
2545 }
2546 
2547 int
2548 mana_alloc_queues(struct ifnet *ndev)
2549 {
2550 	struct mana_port_context *apc = if_getsoftc(ndev);
2551 	int err;
2552 
2553 	err = mana_create_vport(apc, ndev);
2554 	if (err)
2555 		return err;
2556 
2557 	err = mana_add_rx_queues(apc, ndev);
2558 	if (err)
2559 		goto destroy_vport;
2560 
2561 	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
2562 
2563 	mana_rss_table_init(apc);
2564 
2565 	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2566 	if (err)
2567 		goto destroy_vport;
2568 
2569 	return 0;
2570 
2571 destroy_vport:
2572 	mana_destroy_vport(apc);
2573 	return err;
2574 }
2575 
2576 static int
2577 mana_up(struct mana_port_context *apc)
2578 {
2579 	int err;
2580 
2581 	mana_dbg(NULL, "mana_up called\n");
2582 
2583 	err = mana_alloc_queues(apc->ndev);
2584 	if (err) {
2585 		mana_err(NULL, "Faile alloc mana queues: %d\n", err);
2586 		return err;
2587 	}
2588 
2589 	/* Add queue specific sysctl */
2590 	mana_sysctl_add_queues(apc);
2591 
2592 	apc->port_is_up = true;
2593 
2594 	/* Ensure port state updated before txq state */
2595 	wmb();
2596 
2597 	if_link_state_change(apc->ndev, LINK_STATE_UP);
2598 	if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2599 
2600 	return 0;
2601 }
2602 
2603 
2604 static void
2605 mana_init(void *arg)
2606 {
2607 	struct mana_port_context *apc = (struct mana_port_context *)arg;
2608 
2609 	MANA_APC_LOCK_LOCK(apc);
2610 	if (!apc->port_is_up) {
2611 		mana_up(apc);
2612 	}
2613 	MANA_APC_LOCK_UNLOCK(apc);
2614 }
2615 
2616 static int
2617 mana_dealloc_queues(struct ifnet *ndev)
2618 {
2619 	struct mana_port_context *apc = if_getsoftc(ndev);
2620 	struct mana_txq *txq;
2621 	int i, err;
2622 
2623 	if (apc->port_is_up)
2624 		return EINVAL;
2625 
2626 	/* No packet can be transmitted now since apc->port_is_up is false.
2627 	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
2628 	 * a txq because it may not timely see apc->port_is_up being cleared
2629 	 * to false, but it doesn't matter since mana_start_xmit() drops any
2630 	 * new packets due to apc->port_is_up being false.
2631 	 *
2632 	 * Drain all the in-flight TX packets
2633 	 */
2634 	for (i = 0; i < apc->num_queues; i++) {
2635 		txq = &apc->tx_qp[i].txq;
2636 
2637 		struct mana_cq *tx_cq = &apc->tx_qp[i].tx_cq;
2638 		struct mana_cq *rx_cq = &(apc->rxqs[i]->rx_cq);
2639 
2640 		tx_cq->do_not_ring_db = true;
2641 		rx_cq->do_not_ring_db = true;
2642 
2643 		/* Schedule a cleanup task */
2644 		taskqueue_enqueue(tx_cq->cleanup_tq, &tx_cq->cleanup_task);
2645 
2646 		while (atomic_read(&txq->pending_sends) > 0)
2647 			usleep_range(1000, 2000);
2648 	}
2649 
2650 	/* We're 100% sure the queues can no longer be woken up, because
2651 	 * we're sure now mana_poll_tx_cq() can't be running.
2652 	 */
2653 
2654 	apc->rss_state = TRI_STATE_FALSE;
2655 	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2656 	if (err) {
2657 		if_printf(ndev, "Failed to disable vPort: %d\n", err);
2658 		return err;
2659 	}
2660 
2661 	mana_destroy_vport(apc);
2662 
2663 	return 0;
2664 }
2665 
2666 static int
2667 mana_down(struct mana_port_context *apc)
2668 {
2669 	int err = 0;
2670 
2671 	apc->port_st_save = apc->port_is_up;
2672 	apc->port_is_up = false;
2673 
2674 	/* Ensure port state updated before txq state */
2675 	wmb();
2676 
2677 	if (apc->port_st_save) {
2678 		if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE,
2679 		    IFF_DRV_RUNNING);
2680 		if_link_state_change(apc->ndev, LINK_STATE_DOWN);
2681 
2682 		mana_sysctl_free_queues(apc);
2683 
2684 		err = mana_dealloc_queues(apc->ndev);
2685 		if (err) {
2686 			if_printf(apc->ndev,
2687 			    "Failed to bring down mana interface: %d\n", err);
2688 		}
2689 	}
2690 
2691 	return err;
2692 }
2693 
2694 int
2695 mana_detach(struct ifnet *ndev)
2696 {
2697 	struct mana_port_context *apc = if_getsoftc(ndev);
2698 	int err;
2699 
2700 	ether_ifdetach(ndev);
2701 
2702 	if (!apc)
2703 		return 0;
2704 
2705 	MANA_APC_LOCK_LOCK(apc);
2706 	err = mana_down(apc);
2707 	MANA_APC_LOCK_UNLOCK(apc);
2708 
2709 	mana_cleanup_port_context(apc);
2710 
2711 	MANA_APC_LOCK_DESTROY(apc);
2712 
2713 	free(apc, M_DEVBUF);
2714 
2715 	return err;
2716 }
2717 
2718 static int
2719 mana_probe_port(struct mana_context *ac, int port_idx,
2720     struct ifnet **ndev_storage)
2721 {
2722 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
2723 	struct mana_port_context *apc;
2724 	struct ifnet *ndev;
2725 	int err;
2726 
2727 	ndev = if_alloc_dev(IFT_ETHER, gc->dev);
2728 	if (!ndev) {
2729 		mana_err(NULL, "Failed to allocate ifnet struct\n");
2730 		return ENOMEM;
2731 	}
2732 
2733 	*ndev_storage = ndev;
2734 
2735 	apc = malloc(sizeof(*apc), M_DEVBUF, M_WAITOK | M_ZERO);
2736 	if (!apc) {
2737 		mana_err(NULL, "Failed to allocate port context\n");
2738 		err = ENOMEM;
2739 		goto free_net;
2740 	}
2741 
2742 	apc->ac = ac;
2743 	apc->ndev = ndev;
2744 	apc->max_queues = gc->max_num_queues;
2745 	apc->num_queues = min_t(unsigned int,
2746 	    gc->max_num_queues, MANA_MAX_NUM_QUEUES);
2747 	apc->port_handle = INVALID_MANA_HANDLE;
2748 	apc->port_idx = port_idx;
2749 	apc->frame_size = DEFAULT_FRAME_SIZE;
2750 	apc->last_tx_cq_bind_cpu = -1;
2751 	apc->last_rx_cq_bind_cpu = -1;
2752 	apc->vport_use_count = 0;
2753 
2754 	MANA_APC_LOCK_INIT(apc);
2755 
2756 	if_initname(ndev, device_get_name(gc->dev), port_idx);
2757 	if_setdev(ndev,gc->dev);
2758 	if_setsoftc(ndev, apc);
2759 
2760 	if_setflags(ndev, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2761 	if_setinitfn(ndev, mana_init);
2762 	if_settransmitfn(ndev, mana_start_xmit);
2763 	if_setqflushfn(ndev, mana_qflush);
2764 	if_setioctlfn(ndev, mana_ioctl);
2765 	if_setgetcounterfn(ndev, mana_get_counter);
2766 
2767 	if_setmtu(ndev, ETHERMTU);
2768 	if_setbaudrate(ndev, IF_Gbps(100));
2769 
2770 	mana_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2771 
2772 	err = mana_init_port(ndev);
2773 	if (err)
2774 		goto reset_apc;
2775 
2776 	ndev->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
2777 	ndev->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
2778 	ndev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
2779 
2780 	ndev->if_capabilities |= IFCAP_LRO | IFCAP_LINKSTATE;
2781 
2782 	/* Enable all available capabilities by default. */
2783 	ndev->if_capenable = ndev->if_capabilities;
2784 
2785 	/* TSO parameters */
2786 	ndev->if_hw_tsomax = MAX_MBUF_FRAGS * MANA_TSO_MAXSEG_SZ -
2787 	    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2788 	ndev->if_hw_tsomaxsegcount = MAX_MBUF_FRAGS;
2789 	ndev->if_hw_tsomaxsegsize = PAGE_SIZE;
2790 
2791 	ifmedia_init(&apc->media, IFM_IMASK,
2792 	    mana_ifmedia_change, mana_ifmedia_status);
2793 	ifmedia_add(&apc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2794 	ifmedia_set(&apc->media, IFM_ETHER | IFM_AUTO);
2795 
2796 	ether_ifattach(ndev, apc->mac_addr);
2797 
2798 	/* Initialize statistics */
2799 	mana_alloc_counters((counter_u64_t *)&apc->port_stats,
2800 	    sizeof(struct mana_port_stats));
2801 	mana_sysctl_add_port(apc);
2802 
2803 	/* Tell the stack that the interface is not active */
2804 	if_setdrvflagbits(ndev, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2805 
2806 	return 0;
2807 
2808 reset_apc:
2809 	free(apc, M_DEVBUF);
2810 free_net:
2811 	*ndev_storage = NULL;
2812 	if_printf(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2813 	if_free(ndev);
2814 	return err;
2815 }
2816 
2817 int mana_probe(struct gdma_dev *gd)
2818 {
2819 	struct gdma_context *gc = gd->gdma_context;
2820 	device_t dev = gc->dev;
2821 	struct mana_context *ac;
2822 	int err;
2823 	int i;
2824 
2825 	device_printf(dev, "%s protocol version: %d.%d.%d\n", DEVICE_NAME,
2826 		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2827 
2828 	err = mana_gd_register_device(gd);
2829 	if (err)
2830 		return err;
2831 
2832 	ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO);
2833 	if (!ac)
2834 		return ENOMEM;
2835 
2836 	ac->gdma_dev = gd;
2837 	ac->num_ports = 1;
2838 	gd->driver_data = ac;
2839 
2840 	err = mana_create_eq(ac);
2841 	if (err)
2842 		goto out;
2843 
2844 	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
2845 	    MANA_MICRO_VERSION, &ac->num_ports);
2846 	if (err)
2847 		goto out;
2848 
2849 	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
2850 		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
2851 
2852 	for (i = 0; i < ac->num_ports; i++) {
2853 		err = mana_probe_port(ac, i, &ac->ports[i]);
2854 		if (err) {
2855 			device_printf(dev,
2856 			    "Failed to probe mana port %d\n", i);
2857 			break;
2858 		}
2859 	}
2860 
2861 out:
2862 	if (err)
2863 		mana_remove(gd);
2864 
2865 	return err;
2866 }
2867 
2868 void
2869 mana_remove(struct gdma_dev *gd)
2870 {
2871 	struct gdma_context *gc = gd->gdma_context;
2872 	struct mana_context *ac = gd->driver_data;
2873 	device_t dev = gc->dev;
2874 	struct ifnet *ndev;
2875 	int i;
2876 
2877 	for (i = 0; i < ac->num_ports; i++) {
2878 		ndev = ac->ports[i];
2879 		if (!ndev) {
2880 			if (i == 0)
2881 				device_printf(dev, "No net device to remove\n");
2882 			goto out;
2883 		}
2884 
2885 		mana_detach(ndev);
2886 
2887 		if_free(ndev);
2888 	}
2889 
2890 	mana_destroy_eq(ac);
2891 
2892 out:
2893 	mana_gd_deregister_device(gd);
2894 	gd->driver_data = NULL;
2895 	gd->gdma_context = NULL;
2896 	free(ac, M_DEVBUF);
2897 }
2898