1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2021 Microsoft Corp.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/smp.h>
39 #include <sys/socket.h>
40 #include <sys/sockio.h>
41 #include <sys/time.h>
42 #include <sys/eventhandler.h>
43
44 #include <machine/bus.h>
45 #include <machine/resource.h>
46 #include <machine/in_cksum.h>
47
48 #include <net/if.h>
49 #include <net/if_var.h>
50 #include <net/if_types.h>
51 #include <net/if_vlan_var.h>
52 #ifdef RSS
53 #include <net/rss_config.h>
54 #endif
55
56 #include <netinet/in_systm.h>
57 #include <netinet/in.h>
58 #include <netinet/if_ether.h>
59 #include <netinet/ip.h>
60 #include <netinet/ip6.h>
61 #include <netinet/tcp.h>
62 #include <netinet/udp.h>
63
64 #include "mana.h"
65 #include "mana_sysctl.h"
66
67 static int mana_up(struct mana_port_context *apc);
68 static int mana_down(struct mana_port_context *apc);
69
70 extern unsigned int mana_tx_req_size;
71 extern unsigned int mana_rx_req_size;
72 extern unsigned int mana_rx_refill_threshold;
73
74 static void
mana_rss_key_fill(void * k,size_t size)75 mana_rss_key_fill(void *k, size_t size)
76 {
77 static bool rss_key_generated = false;
78 static uint8_t rss_key[MANA_HASH_KEY_SIZE];
79
80 KASSERT(size <= MANA_HASH_KEY_SIZE,
81 ("Request more buytes than MANA RSS key can hold"));
82
83 if (!rss_key_generated) {
84 arc4random_buf(rss_key, MANA_HASH_KEY_SIZE);
85 rss_key_generated = true;
86 }
87 memcpy(k, rss_key, size);
88 }
89
90 static int
mana_ifmedia_change(if_t ifp __unused)91 mana_ifmedia_change(if_t ifp __unused)
92 {
93 return EOPNOTSUPP;
94 }
95
96 static void
mana_ifmedia_status(if_t ifp,struct ifmediareq * ifmr)97 mana_ifmedia_status(if_t ifp, struct ifmediareq *ifmr)
98 {
99 struct mana_port_context *apc = if_getsoftc(ifp);
100
101 if (!apc) {
102 if_printf(ifp, "Port not available\n");
103 return;
104 }
105
106 MANA_APC_LOCK_LOCK(apc);
107
108 ifmr->ifm_status = IFM_AVALID;
109 ifmr->ifm_active = IFM_ETHER;
110
111 if (!apc->port_is_up) {
112 MANA_APC_LOCK_UNLOCK(apc);
113 mana_dbg(NULL, "Port %u link is down\n", apc->port_idx);
114 return;
115 }
116
117 ifmr->ifm_status |= IFM_ACTIVE;
118 ifmr->ifm_active |= IFM_100G_DR | IFM_FDX;
119
120 MANA_APC_LOCK_UNLOCK(apc);
121 }
122
123 static uint64_t
mana_get_counter(if_t ifp,ift_counter cnt)124 mana_get_counter(if_t ifp, ift_counter cnt)
125 {
126 struct mana_port_context *apc = if_getsoftc(ifp);
127 struct mana_port_stats *stats = &apc->port_stats;
128
129 switch (cnt) {
130 case IFCOUNTER_IPACKETS:
131 return (counter_u64_fetch(stats->rx_packets));
132 case IFCOUNTER_OPACKETS:
133 return (counter_u64_fetch(stats->tx_packets));
134 case IFCOUNTER_IBYTES:
135 return (counter_u64_fetch(stats->rx_bytes));
136 case IFCOUNTER_OBYTES:
137 return (counter_u64_fetch(stats->tx_bytes));
138 case IFCOUNTER_IQDROPS:
139 return (counter_u64_fetch(stats->rx_drops));
140 case IFCOUNTER_OQDROPS:
141 return (counter_u64_fetch(stats->tx_drops));
142 default:
143 return (if_get_counter_default(ifp, cnt));
144 }
145 }
146
147 static void
mana_qflush(if_t ifp)148 mana_qflush(if_t ifp)
149 {
150 if_qflush(ifp);
151 }
152
153 int
mana_restart(struct mana_port_context * apc)154 mana_restart(struct mana_port_context *apc)
155 {
156 int rc = 0;
157
158 MANA_APC_LOCK_LOCK(apc);
159 if (apc->port_is_up)
160 mana_down(apc);
161
162 rc = mana_up(apc);
163 MANA_APC_LOCK_UNLOCK(apc);
164
165 return (rc);
166 }
167
168 static int
mana_ioctl(if_t ifp,u_long command,caddr_t data)169 mana_ioctl(if_t ifp, u_long command, caddr_t data)
170 {
171 struct mana_port_context *apc = if_getsoftc(ifp);
172 struct ifrsskey *ifrk;
173 struct ifrsshash *ifrh;
174 struct ifreq *ifr;
175 uint16_t new_mtu;
176 int rc = 0, mask;
177
178 switch (command) {
179 case SIOCSIFMTU:
180 ifr = (struct ifreq *)data;
181 new_mtu = ifr->ifr_mtu;
182 if (if_getmtu(ifp) == new_mtu)
183 break;
184 if ((new_mtu + 18 > MAX_FRAME_SIZE) ||
185 (new_mtu + 18 < MIN_FRAME_SIZE)) {
186 if_printf(ifp, "Invalid MTU. new_mtu: %d, "
187 "max allowed: %d, min allowed: %d\n",
188 new_mtu, MAX_FRAME_SIZE - 18, MIN_FRAME_SIZE - 18);
189 return EINVAL;
190 }
191 MANA_APC_LOCK_LOCK(apc);
192 if (apc->port_is_up)
193 mana_down(apc);
194
195 apc->frame_size = new_mtu + 18;
196 if_setmtu(ifp, new_mtu);
197 mana_dbg(NULL, "Set MTU to %d\n", new_mtu);
198
199 rc = mana_up(apc);
200 MANA_APC_LOCK_UNLOCK(apc);
201 break;
202
203 case SIOCSIFFLAGS:
204 if (if_getflags(ifp) & IFF_UP) {
205 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
206 MANA_APC_LOCK_LOCK(apc);
207 if (!apc->port_is_up)
208 rc = mana_up(apc);
209 MANA_APC_LOCK_UNLOCK(apc);
210 }
211 } else {
212 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
213 MANA_APC_LOCK_LOCK(apc);
214 if (apc->port_is_up)
215 mana_down(apc);
216 MANA_APC_LOCK_UNLOCK(apc);
217 }
218 }
219 break;
220
221 case SIOCSIFCAP:
222 MANA_APC_LOCK_LOCK(apc);
223 ifr = (struct ifreq *)data;
224 /*
225 * Fix up requested capabilities w/ supported capabilities,
226 * since the supported capabilities could have been changed.
227 */
228 mask = (ifr->ifr_reqcap & if_getcapabilities(ifp)) ^
229 if_getcapenable(ifp);
230
231 if (mask & IFCAP_TXCSUM) {
232 if_togglecapenable(ifp, IFCAP_TXCSUM);
233 if_togglehwassist(ifp, (CSUM_TCP | CSUM_UDP | CSUM_IP));
234
235 if ((IFCAP_TSO4 & if_getcapenable(ifp)) &&
236 !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
237 mask &= ~IFCAP_TSO4;
238 if_setcapenablebit(ifp, 0, IFCAP_TSO4);
239 if_sethwassistbits(ifp, 0, CSUM_IP_TSO);
240 mana_warn(NULL,
241 "Also disabled tso4 due to -txcsum.\n");
242 }
243 }
244
245 if (mask & IFCAP_TXCSUM_IPV6) {
246 if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
247 if_togglehwassist(ifp, (CSUM_UDP_IPV6 | CSUM_TCP_IPV6));
248
249 if ((IFCAP_TSO6 & if_getcapenable(ifp)) &&
250 !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
251 mask &= ~IFCAP_TSO6;
252 if_setcapenablebit(ifp, 0, IFCAP_TSO6);
253 if_sethwassistbits(ifp, 0, CSUM_IP6_TSO);
254 mana_warn(ifp,
255 "Also disabled tso6 due to -txcsum6.\n");
256 }
257 }
258
259 if (mask & IFCAP_RXCSUM)
260 if_togglecapenable(ifp, IFCAP_RXCSUM);
261 /* We can't diff IPv6 packets from IPv4 packets on RX path. */
262 if (mask & IFCAP_RXCSUM_IPV6)
263 if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6);
264
265 if (mask & IFCAP_LRO)
266 if_togglecapenable(ifp, IFCAP_LRO);
267
268 if (mask & IFCAP_TSO4) {
269 if (!(IFCAP_TSO4 & if_getcapenable(ifp)) &&
270 !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
271 MANA_APC_LOCK_UNLOCK(apc);
272 if_printf(ifp, "Enable txcsum first.\n");
273 rc = EAGAIN;
274 goto out;
275 }
276 if_togglecapenable(ifp, IFCAP_TSO4);
277 if_togglehwassist(ifp, CSUM_IP_TSO);
278 }
279
280 if (mask & IFCAP_TSO6) {
281 if (!(IFCAP_TSO6 & if_getcapenable(ifp)) &&
282 !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
283 MANA_APC_LOCK_UNLOCK(apc);
284 if_printf(ifp, "Enable txcsum6 first.\n");
285 rc = EAGAIN;
286 goto out;
287 }
288 if_togglecapenable(ifp, IFCAP_TSO6);
289 if_togglehwassist(ifp, CSUM_IP6_TSO);
290 }
291
292 MANA_APC_LOCK_UNLOCK(apc);
293 out:
294 break;
295
296 case SIOCSIFMEDIA:
297 case SIOCGIFMEDIA:
298 case SIOCGIFXMEDIA:
299 ifr = (struct ifreq *)data;
300 rc = ifmedia_ioctl(ifp, ifr, &apc->media, command);
301 break;
302
303 case SIOCGIFRSSKEY:
304 ifrk = (struct ifrsskey *)data;
305 ifrk->ifrk_func = RSS_FUNC_TOEPLITZ;
306 ifrk->ifrk_keylen = MANA_HASH_KEY_SIZE;
307 memcpy(ifrk->ifrk_key, apc->hashkey, MANA_HASH_KEY_SIZE);
308 break;
309
310 case SIOCGIFRSSHASH:
311 ifrh = (struct ifrsshash *)data;
312 ifrh->ifrh_func = RSS_FUNC_TOEPLITZ;
313 ifrh->ifrh_types =
314 RSS_TYPE_TCP_IPV4 |
315 RSS_TYPE_UDP_IPV4 |
316 RSS_TYPE_TCP_IPV6 |
317 RSS_TYPE_UDP_IPV6;
318 break;
319
320 default:
321 rc = ether_ioctl(ifp, command, data);
322 break;
323 }
324
325 return (rc);
326 }
327
328 static inline void
mana_alloc_counters(counter_u64_t * begin,int size)329 mana_alloc_counters(counter_u64_t *begin, int size)
330 {
331 counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
332
333 for (; begin < end; ++begin)
334 *begin = counter_u64_alloc(M_WAITOK);
335 }
336
337 static inline void
mana_free_counters(counter_u64_t * begin,int size)338 mana_free_counters(counter_u64_t *begin, int size)
339 {
340 counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
341
342 for (; begin < end; ++begin)
343 counter_u64_free(*begin);
344 }
345
346 static bool
mana_can_tx(struct gdma_queue * wq)347 mana_can_tx(struct gdma_queue *wq)
348 {
349 return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
350 }
351
352 static inline int
mana_tx_map_mbuf(struct mana_port_context * apc,struct mana_send_buf_info * tx_info,struct mbuf ** m_head,struct mana_tx_package * tp,struct mana_stats * tx_stats)353 mana_tx_map_mbuf(struct mana_port_context *apc,
354 struct mana_send_buf_info *tx_info,
355 struct mbuf **m_head, struct mana_tx_package *tp,
356 struct mana_stats *tx_stats)
357 {
358 struct gdma_dev *gd = apc->ac->gdma_dev;
359 bus_dma_segment_t segs[MAX_MBUF_FRAGS];
360 struct mbuf *m = *m_head;
361 int err, nsegs, i;
362
363 err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag, tx_info->dma_map,
364 m, segs, &nsegs, BUS_DMA_NOWAIT);
365 if (err == EFBIG) {
366 struct mbuf *m_new;
367
368 counter_u64_add(tx_stats->collapse, 1);
369 m_new = m_collapse(m, M_NOWAIT, MAX_MBUF_FRAGS);
370 if (unlikely(m_new == NULL)) {
371 counter_u64_add(tx_stats->collapse_err, 1);
372 return ENOBUFS;
373 } else {
374 *m_head = m = m_new;
375 }
376
377 mana_warn(NULL,
378 "Too many segs in orig mbuf, m_collapse called\n");
379
380 err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag,
381 tx_info->dma_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
382 }
383 if (!err) {
384 for (i = 0; i < nsegs; i++) {
385 tp->wqe_req.sgl[i].address = segs[i].ds_addr;
386 tp->wqe_req.sgl[i].mem_key = gd->gpa_mkey;
387 tp->wqe_req.sgl[i].size = segs[i].ds_len;
388 }
389 tp->wqe_req.num_sge = nsegs;
390
391 tx_info->mbuf = *m_head;
392
393 bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
394 BUS_DMASYNC_PREWRITE);
395 }
396
397 return err;
398 }
399
400 static inline void
mana_tx_unmap_mbuf(struct mana_port_context * apc,struct mana_send_buf_info * tx_info)401 mana_tx_unmap_mbuf(struct mana_port_context *apc,
402 struct mana_send_buf_info *tx_info)
403 {
404 bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
405 BUS_DMASYNC_POSTWRITE);
406 bus_dmamap_unload(apc->tx_buf_tag, tx_info->dma_map);
407 if (tx_info->mbuf) {
408 m_freem(tx_info->mbuf);
409 tx_info->mbuf = NULL;
410 }
411 }
412
413 static inline int
mana_load_rx_mbuf(struct mana_port_context * apc,struct mana_rxq * rxq,struct mana_recv_buf_oob * rx_oob,bool alloc_mbuf)414 mana_load_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
415 struct mana_recv_buf_oob *rx_oob, bool alloc_mbuf)
416 {
417 bus_dma_segment_t segs[1];
418 struct mbuf *mbuf;
419 int nsegs, err;
420 uint32_t mlen;
421
422 if (alloc_mbuf) {
423 mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxq->datasize);
424 if (unlikely(mbuf == NULL)) {
425 mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
426 if (unlikely(mbuf == NULL)) {
427 return ENOMEM;
428 }
429 mlen = MCLBYTES;
430 } else {
431 mlen = rxq->datasize;
432 }
433
434 mbuf->m_pkthdr.len = mbuf->m_len = mlen;
435 } else {
436 if (rx_oob->mbuf) {
437 mbuf = rx_oob->mbuf;
438 mlen = rx_oob->mbuf->m_pkthdr.len;
439 } else {
440 return ENOMEM;
441 }
442 }
443
444 err = bus_dmamap_load_mbuf_sg(apc->rx_buf_tag, rx_oob->dma_map,
445 mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
446
447 if (unlikely((err != 0) || (nsegs != 1))) {
448 mana_warn(NULL, "Failed to map mbuf, error: %d, "
449 "nsegs: %d\n", err, nsegs);
450 counter_u64_add(rxq->stats.dma_mapping_err, 1);
451 goto error;
452 }
453
454 bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
455 BUS_DMASYNC_PREREAD);
456
457 rx_oob->mbuf = mbuf;
458 rx_oob->num_sge = 1;
459 rx_oob->sgl[0].address = segs[0].ds_addr;
460 rx_oob->sgl[0].size = mlen;
461 rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
462
463 return 0;
464
465 error:
466 m_freem(mbuf);
467 return EFAULT;
468 }
469
470 static inline void
mana_unload_rx_mbuf(struct mana_port_context * apc,struct mana_rxq * rxq,struct mana_recv_buf_oob * rx_oob,bool free_mbuf)471 mana_unload_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
472 struct mana_recv_buf_oob *rx_oob, bool free_mbuf)
473 {
474 bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
475 BUS_DMASYNC_POSTREAD);
476 bus_dmamap_unload(apc->rx_buf_tag, rx_oob->dma_map);
477
478 if (free_mbuf && rx_oob->mbuf) {
479 m_freem(rx_oob->mbuf);
480 rx_oob->mbuf = NULL;
481 }
482 }
483
484
485 /* Use couple mbuf PH_loc spaces for l3 and l4 protocal type */
486 #define MANA_L3_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[0])
487 #define MANA_L4_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[1])
488
489 #define MANA_TXQ_FULL (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)
490
491 static void
mana_xmit(struct mana_txq * txq)492 mana_xmit(struct mana_txq *txq)
493 {
494 enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
495 struct mana_send_buf_info *tx_info;
496 if_t ndev = txq->ndev;
497 struct mbuf *mbuf;
498 struct mana_port_context *apc = if_getsoftc(ndev);
499 unsigned int tx_queue_size = apc->tx_queue_size;
500 struct mana_port_stats *port_stats = &apc->port_stats;
501 struct gdma_dev *gd = apc->ac->gdma_dev;
502 uint64_t packets, bytes;
503 uint16_t next_to_use;
504 struct mana_tx_package pkg = {};
505 struct mana_stats *tx_stats;
506 struct gdma_queue *gdma_sq;
507 struct mana_cq *cq;
508 int err, len;
509 bool is_tso;
510
511 gdma_sq = txq->gdma_sq;
512 cq = &apc->tx_qp[txq->idx].tx_cq;
513 tx_stats = &txq->stats;
514
515 packets = 0;
516 bytes = 0;
517 next_to_use = txq->next_to_use;
518
519 while ((mbuf = drbr_peek(ndev, txq->txq_br)) != NULL) {
520 if (!apc->port_is_up ||
521 (if_getdrvflags(ndev) & MANA_TXQ_FULL) != IFF_DRV_RUNNING) {
522 drbr_putback(ndev, txq->txq_br, mbuf);
523 break;
524 }
525
526 if (!mana_can_tx(gdma_sq)) {
527 /* SQ is full. Set the IFF_DRV_OACTIVE flag */
528 if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE, 0);
529 counter_u64_add(tx_stats->stop, 1);
530 uint64_t stops = counter_u64_fetch(tx_stats->stop);
531 uint64_t wakeups = counter_u64_fetch(tx_stats->wakeup);
532 #define MANA_TXQ_STOP_THRESHOLD 50
533 if (stops > MANA_TXQ_STOP_THRESHOLD && wakeups > 0 &&
534 stops > wakeups && txq->alt_txq_idx == txq->idx) {
535 txq->alt_txq_idx =
536 (txq->idx + (stops / wakeups))
537 % apc->num_queues;
538 counter_u64_add(tx_stats->alt_chg, 1);
539 }
540
541 drbr_putback(ndev, txq->txq_br, mbuf);
542
543 taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task);
544 break;
545 }
546
547 tx_info = &txq->tx_buf_info[next_to_use];
548
549 memset(&pkg, 0, sizeof(struct mana_tx_package));
550 pkg.wqe_req.sgl = pkg.sgl_array;
551
552 err = mana_tx_map_mbuf(apc, tx_info, &mbuf, &pkg, tx_stats);
553 if (unlikely(err)) {
554 mana_dbg(NULL,
555 "Failed to map tx mbuf, err %d\n", err);
556
557 counter_u64_add(tx_stats->dma_mapping_err, 1);
558
559 /* The mbuf is still there. Free it */
560 m_freem(mbuf);
561 /* Advance the drbr queue */
562 drbr_advance(ndev, txq->txq_br);
563 continue;
564 }
565
566 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
567 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
568
569 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
570 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
571 pkt_fmt = MANA_LONG_PKT_FMT;
572 } else {
573 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
574 }
575
576 pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
577
578 if (pkt_fmt == MANA_SHORT_PKT_FMT)
579 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
580 else
581 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
582
583 pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
584 pkg.wqe_req.flags = 0;
585 pkg.wqe_req.client_data_unit = 0;
586
587 is_tso = false;
588 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
589 is_tso = true;
590
591 if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
592 pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
593 else
594 pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
595
596 pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
597 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
598 pkg.tx_oob.s_oob.trans_off = mbuf->m_pkthdr.l3hlen;
599
600 pkg.wqe_req.client_data_unit = mbuf->m_pkthdr.tso_segsz;
601 pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
602 } else if (mbuf->m_pkthdr.csum_flags &
603 (CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP6_UDP | CSUM_IP6_TCP)) {
604 if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP) {
605 pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
606 pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
607 } else {
608 pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
609 }
610
611 if (MANA_L4_PROTO(mbuf) == IPPROTO_TCP) {
612 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
613 pkg.tx_oob.s_oob.trans_off =
614 mbuf->m_pkthdr.l3hlen;
615 } else {
616 pkg.tx_oob.s_oob.comp_udp_csum = 1;
617 }
618 } else if (mbuf->m_pkthdr.csum_flags & CSUM_IP) {
619 pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
620 pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
621 } else {
622 if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
623 pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
624 else if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IPV6)
625 pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
626 }
627
628 len = mbuf->m_pkthdr.len;
629
630 err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
631 (struct gdma_posted_wqe_info *)&tx_info->wqe_inf);
632 if (unlikely(err)) {
633 /* Should not happen */
634 if_printf(ndev, "Failed to post TX OOB: %d\n", err);
635
636 mana_tx_unmap_mbuf(apc, tx_info);
637
638 drbr_advance(ndev, txq->txq_br);
639 continue;
640 }
641
642 next_to_use = MANA_IDX_NEXT(next_to_use, tx_queue_size);
643
644 (void)atomic_inc_return(&txq->pending_sends);
645
646 drbr_advance(ndev, txq->txq_br);
647
648 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
649
650 packets++;
651 bytes += len;
652
653 if (is_tso) {
654 txq->tso_pkts++;
655 txq->tso_bytes += len;
656 }
657 }
658
659 counter_enter();
660 counter_u64_add_protected(tx_stats->packets, packets);
661 counter_u64_add_protected(port_stats->tx_packets, packets);
662 counter_u64_add_protected(tx_stats->bytes, bytes);
663 counter_u64_add_protected(port_stats->tx_bytes, bytes);
664 counter_exit();
665
666 txq->next_to_use = next_to_use;
667 }
668
669 static void
mana_xmit_taskfunc(void * arg,int pending)670 mana_xmit_taskfunc(void *arg, int pending)
671 {
672 struct mana_txq *txq = (struct mana_txq *)arg;
673 if_t ndev = txq->ndev;
674 struct mana_port_context *apc = if_getsoftc(ndev);
675
676 while (!drbr_empty(ndev, txq->txq_br) && apc->port_is_up &&
677 (if_getdrvflags(ndev) & MANA_TXQ_FULL) == IFF_DRV_RUNNING) {
678 mtx_lock(&txq->txq_mtx);
679 mana_xmit(txq);
680 mtx_unlock(&txq->txq_mtx);
681 }
682 }
683
684 #define PULLUP_HDR(m, len) \
685 do { \
686 if (unlikely((m)->m_len < (len))) { \
687 (m) = m_pullup((m), (len)); \
688 if ((m) == NULL) \
689 return (NULL); \
690 } \
691 } while (0)
692
693 /*
694 * If this function failed, the mbuf would be freed.
695 */
696 static inline struct mbuf *
mana_tso_fixup(struct mbuf * mbuf)697 mana_tso_fixup(struct mbuf *mbuf)
698 {
699 struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
700 struct tcphdr *th;
701 uint16_t etype;
702 int ehlen;
703
704 if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
705 etype = ntohs(eh->evl_proto);
706 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
707 } else {
708 etype = ntohs(eh->evl_encap_proto);
709 ehlen = ETHER_HDR_LEN;
710 }
711
712 if (etype == ETHERTYPE_IP) {
713 struct ip *ip;
714 int iphlen;
715
716 PULLUP_HDR(mbuf, ehlen + sizeof(*ip));
717 ip = mtodo(mbuf, ehlen);
718 iphlen = ip->ip_hl << 2;
719 mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
720
721 PULLUP_HDR(mbuf, ehlen + iphlen + sizeof(*th));
722 th = mtodo(mbuf, ehlen + iphlen);
723
724 ip->ip_len = 0;
725 ip->ip_sum = 0;
726 th->th_sum = in_pseudo(ip->ip_src.s_addr,
727 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
728 } else if (etype == ETHERTYPE_IPV6) {
729 struct ip6_hdr *ip6;
730
731 PULLUP_HDR(mbuf, ehlen + sizeof(*ip6) + sizeof(*th));
732 ip6 = mtodo(mbuf, ehlen);
733 if (ip6->ip6_nxt != IPPROTO_TCP) {
734 /* Realy something wrong, just return */
735 mana_dbg(NULL, "TSO mbuf not TCP, freed.\n");
736 m_freem(mbuf);
737 return NULL;
738 }
739 mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
740
741 th = mtodo(mbuf, ehlen + sizeof(*ip6));
742
743 ip6->ip6_plen = 0;
744 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
745 } else {
746 /* CSUM_TSO is set but not IP protocol. */
747 mana_warn(NULL, "TSO mbuf not right, freed.\n");
748 m_freem(mbuf);
749 return NULL;
750 }
751
752 MANA_L3_PROTO(mbuf) = etype;
753
754 return (mbuf);
755 }
756
757 /*
758 * If this function failed, the mbuf would be freed.
759 */
760 static inline struct mbuf *
mana_mbuf_csum_check(struct mbuf * mbuf)761 mana_mbuf_csum_check(struct mbuf *mbuf)
762 {
763 struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
764 struct mbuf *mbuf_next;
765 uint16_t etype;
766 int offset;
767 int ehlen;
768
769 if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
770 etype = ntohs(eh->evl_proto);
771 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
772 } else {
773 etype = ntohs(eh->evl_encap_proto);
774 ehlen = ETHER_HDR_LEN;
775 }
776
777 mbuf_next = m_getptr(mbuf, ehlen, &offset);
778
779 MANA_L4_PROTO(mbuf) = 0;
780 if (etype == ETHERTYPE_IP) {
781 const struct ip *ip;
782 int iphlen;
783
784 ip = (struct ip *)(mtodo(mbuf_next, offset));
785 iphlen = ip->ip_hl << 2;
786 mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
787
788 MANA_L4_PROTO(mbuf) = ip->ip_p;
789 } else if (etype == ETHERTYPE_IPV6) {
790 const struct ip6_hdr *ip6;
791
792 ip6 = (struct ip6_hdr *)(mtodo(mbuf_next, offset));
793 mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
794
795 MANA_L4_PROTO(mbuf) = ip6->ip6_nxt;
796 } else {
797 MANA_L4_PROTO(mbuf) = 0;
798 }
799
800 MANA_L3_PROTO(mbuf) = etype;
801
802 return (mbuf);
803 }
804
805 static int
mana_start_xmit(if_t ifp,struct mbuf * m)806 mana_start_xmit(if_t ifp, struct mbuf *m)
807 {
808 struct mana_port_context *apc = if_getsoftc(ifp);
809 struct mana_txq *txq;
810 int is_drbr_empty;
811 uint16_t txq_id;
812 int err;
813
814 if (unlikely((!apc->port_is_up) ||
815 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
816 return ENODEV;
817
818 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
819 m = mana_tso_fixup(m);
820 if (unlikely(m == NULL)) {
821 counter_enter();
822 counter_u64_add_protected(apc->port_stats.tx_drops, 1);
823 counter_exit();
824 return EIO;
825 }
826 } else {
827 m = mana_mbuf_csum_check(m);
828 if (unlikely(m == NULL)) {
829 counter_enter();
830 counter_u64_add_protected(apc->port_stats.tx_drops, 1);
831 counter_exit();
832 return EIO;
833 }
834 }
835
836 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
837 uint32_t hash = m->m_pkthdr.flowid;
838 txq_id = apc->indir_table[(hash) & MANA_INDIRECT_TABLE_MASK] %
839 apc->num_queues;
840 } else {
841 txq_id = m->m_pkthdr.flowid % apc->num_queues;
842 }
843
844 if (apc->enable_tx_altq)
845 txq_id = apc->tx_qp[txq_id].txq.alt_txq_idx;
846
847 txq = &apc->tx_qp[txq_id].txq;
848
849 is_drbr_empty = drbr_empty(ifp, txq->txq_br);
850 err = drbr_enqueue(ifp, txq->txq_br, m);
851 if (unlikely(err)) {
852 mana_warn(NULL, "txq %u failed to enqueue: %d\n",
853 txq_id, err);
854 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
855 return err;
856 }
857
858 if (is_drbr_empty && mtx_trylock(&txq->txq_mtx)) {
859 mana_xmit(txq);
860 mtx_unlock(&txq->txq_mtx);
861 } else {
862 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
863 }
864
865 return 0;
866 }
867
868 static void
mana_cleanup_port_context(struct mana_port_context * apc)869 mana_cleanup_port_context(struct mana_port_context *apc)
870 {
871 bus_dma_tag_destroy(apc->tx_buf_tag);
872 bus_dma_tag_destroy(apc->rx_buf_tag);
873 apc->rx_buf_tag = NULL;
874
875 free(apc->rxqs, M_DEVBUF);
876 apc->rxqs = NULL;
877
878 mana_free_counters((counter_u64_t *)&apc->port_stats,
879 sizeof(struct mana_port_stats));
880 }
881
882 static int
mana_init_port_context(struct mana_port_context * apc)883 mana_init_port_context(struct mana_port_context *apc)
884 {
885 device_t dev = apc->ac->gdma_dev->gdma_context->dev;
886 uint32_t tso_maxsize;
887 int err;
888
889 tso_maxsize = MANA_TSO_MAX_SZ;
890
891 /* Create DMA tag for tx bufs */
892 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
893 1, 0, /* alignment, boundary */
894 BUS_SPACE_MAXADDR, /* lowaddr */
895 BUS_SPACE_MAXADDR, /* highaddr */
896 NULL, NULL, /* filter, filterarg */
897 tso_maxsize, /* maxsize */
898 MAX_MBUF_FRAGS, /* nsegments */
899 tso_maxsize, /* maxsegsize */
900 0, /* flags */
901 NULL, NULL, /* lockfunc, lockfuncarg*/
902 &apc->tx_buf_tag);
903 if (unlikely(err)) {
904 device_printf(dev, "Feiled to create TX DMA tag\n");
905 return err;
906 }
907
908 /* Create DMA tag for rx bufs */
909 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
910 64, 0, /* alignment, boundary */
911 BUS_SPACE_MAXADDR, /* lowaddr */
912 BUS_SPACE_MAXADDR, /* highaddr */
913 NULL, NULL, /* filter, filterarg */
914 MJUMPAGESIZE, /* maxsize */
915 1, /* nsegments */
916 MJUMPAGESIZE, /* maxsegsize */
917 0, /* flags */
918 NULL, NULL, /* lockfunc, lockfuncarg*/
919 &apc->rx_buf_tag);
920 if (unlikely(err)) {
921 device_printf(dev, "Feiled to create RX DMA tag\n");
922 return err;
923 }
924
925 apc->rxqs = mallocarray(apc->num_queues, sizeof(struct mana_rxq *),
926 M_DEVBUF, M_WAITOK | M_ZERO);
927
928 return 0;
929 }
930
931 static int
mana_send_request(struct mana_context * ac,void * in_buf,uint32_t in_len,void * out_buf,uint32_t out_len)932 mana_send_request(struct mana_context *ac, void *in_buf,
933 uint32_t in_len, void *out_buf, uint32_t out_len)
934 {
935 struct gdma_context *gc = ac->gdma_dev->gdma_context;
936 struct gdma_resp_hdr *resp = out_buf;
937 struct gdma_req_hdr *req = in_buf;
938 device_t dev = gc->dev;
939 static atomic_t activity_id;
940 int err;
941
942 req->dev_id = gc->mana.dev_id;
943 req->activity_id = atomic_inc_return(&activity_id);
944
945 mana_dbg(NULL, "activity_id = %u\n", activity_id);
946
947 err = mana_gd_send_request(gc, in_len, in_buf, out_len,
948 out_buf);
949 if (err || resp->status) {
950 device_printf(dev, "Failed to send mana message: %d, 0x%x\n",
951 err, resp->status);
952 return err ? err : EPROTO;
953 }
954
955 if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
956 req->activity_id != resp->activity_id) {
957 device_printf(dev,
958 "Unexpected mana message response: %x,%x,%x,%x\n",
959 req->dev_id.as_uint32, resp->dev_id.as_uint32,
960 req->activity_id, resp->activity_id);
961 return EPROTO;
962 }
963
964 return 0;
965 }
966
967 static int
mana_verify_resp_hdr(const struct gdma_resp_hdr * resp_hdr,const enum mana_command_code expected_code,const uint32_t min_size)968 mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
969 const enum mana_command_code expected_code,
970 const uint32_t min_size)
971 {
972 if (resp_hdr->response.msg_type != expected_code)
973 return EPROTO;
974
975 if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
976 return EPROTO;
977
978 if (resp_hdr->response.msg_size < min_size)
979 return EPROTO;
980
981 return 0;
982 }
983
984 static int
mana_query_device_cfg(struct mana_context * ac,uint32_t proto_major_ver,uint32_t proto_minor_ver,uint32_t proto_micro_ver,uint16_t * max_num_vports)985 mana_query_device_cfg(struct mana_context *ac, uint32_t proto_major_ver,
986 uint32_t proto_minor_ver, uint32_t proto_micro_ver,
987 uint16_t *max_num_vports)
988 {
989 struct gdma_context *gc = ac->gdma_dev->gdma_context;
990 struct mana_query_device_cfg_resp resp = {};
991 struct mana_query_device_cfg_req req = {};
992 device_t dev = gc->dev;
993 int err = 0;
994
995 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
996 sizeof(req), sizeof(resp));
997 req.proto_major_ver = proto_major_ver;
998 req.proto_minor_ver = proto_minor_ver;
999 req.proto_micro_ver = proto_micro_ver;
1000
1001 err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
1002 if (err) {
1003 device_printf(dev, "Failed to query config: %d", err);
1004 return err;
1005 }
1006
1007 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
1008 sizeof(resp));
1009 if (err || resp.hdr.status) {
1010 device_printf(dev, "Invalid query result: %d, 0x%x\n", err,
1011 resp.hdr.status);
1012 if (!err)
1013 err = EPROTO;
1014 return err;
1015 }
1016
1017 *max_num_vports = resp.max_num_vports;
1018
1019 mana_dbg(NULL, "mana max_num_vports from device = %d\n",
1020 *max_num_vports);
1021
1022 return 0;
1023 }
1024
1025 static int
mana_query_vport_cfg(struct mana_port_context * apc,uint32_t vport_index,uint32_t * max_sq,uint32_t * max_rq,uint32_t * num_indir_entry)1026 mana_query_vport_cfg(struct mana_port_context *apc, uint32_t vport_index,
1027 uint32_t *max_sq, uint32_t *max_rq, uint32_t *num_indir_entry)
1028 {
1029 struct mana_query_vport_cfg_resp resp = {};
1030 struct mana_query_vport_cfg_req req = {};
1031 int err;
1032
1033 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
1034 sizeof(req), sizeof(resp));
1035
1036 req.vport_index = vport_index;
1037
1038 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1039 sizeof(resp));
1040 if (err)
1041 return err;
1042
1043 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
1044 sizeof(resp));
1045 if (err)
1046 return err;
1047
1048 if (resp.hdr.status)
1049 return EPROTO;
1050
1051 *max_sq = resp.max_num_sq;
1052 *max_rq = resp.max_num_rq;
1053 *num_indir_entry = resp.num_indirection_ent;
1054
1055 apc->port_handle = resp.vport;
1056 memcpy(apc->mac_addr, resp.mac_addr, ETHER_ADDR_LEN);
1057
1058 return 0;
1059 }
1060
1061 void
mana_uncfg_vport(struct mana_port_context * apc)1062 mana_uncfg_vport(struct mana_port_context *apc)
1063 {
1064 apc->vport_use_count--;
1065 if (apc->vport_use_count < 0) {
1066 mana_err(NULL,
1067 "WARNING: vport_use_count less than 0: %u\n",
1068 apc->vport_use_count);
1069 }
1070 }
1071
1072 int
mana_cfg_vport(struct mana_port_context * apc,uint32_t protection_dom_id,uint32_t doorbell_pg_id)1073 mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
1074 uint32_t doorbell_pg_id)
1075 {
1076 struct mana_config_vport_resp resp = {};
1077 struct mana_config_vport_req req = {};
1078 int err;
1079
1080 /* This function is used to program the Ethernet port in the hardware
1081 * table. It can be called from the Ethernet driver or the RDMA driver.
1082 *
1083 * For Ethernet usage, the hardware supports only one active user on a
1084 * physical port. The driver checks on the port usage before programming
1085 * the hardware when creating the RAW QP (RDMA driver) or exposing the
1086 * device to kernel NET layer (Ethernet driver).
1087 *
1088 * Because the RDMA driver doesn't know in advance which QP type the
1089 * user will create, it exposes the device with all its ports. The user
1090 * may not be able to create RAW QP on a port if this port is already
1091 * in used by the Ethernet driver from the kernel.
1092 *
1093 * This physical port limitation only applies to the RAW QP. For RC QP,
1094 * the hardware doesn't have this limitation. The user can create RC
1095 * QPs on a physical port up to the hardware limits independent of the
1096 * Ethernet usage on the same port.
1097 */
1098 if (apc->vport_use_count > 0) {
1099 return EBUSY;
1100 }
1101 apc->vport_use_count++;
1102
1103 mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
1104 sizeof(req), sizeof(resp));
1105 req.vport = apc->port_handle;
1106 req.pdid = protection_dom_id;
1107 req.doorbell_pageid = doorbell_pg_id;
1108
1109 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1110 sizeof(resp));
1111 if (err) {
1112 if_printf(apc->ndev, "Failed to configure vPort: %d\n", err);
1113 goto out;
1114 }
1115
1116 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1117 sizeof(resp));
1118 if (err || resp.hdr.status) {
1119 if_printf(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1120 err, resp.hdr.status);
1121 if (!err)
1122 err = EPROTO;
1123
1124 goto out;
1125 }
1126
1127 apc->tx_shortform_allowed = resp.short_form_allowed;
1128 apc->tx_vp_offset = resp.tx_vport_offset;
1129
1130 if_printf(apc->ndev, "Configured vPort %ju PD %u DB %u\n",
1131 apc->port_handle, protection_dom_id, doorbell_pg_id);
1132
1133 out:
1134 if (err)
1135 mana_uncfg_vport(apc);
1136
1137 return err;
1138 }
1139
1140 static int
mana_cfg_vport_steering(struct mana_port_context * apc,enum TRI_STATE rx,bool update_default_rxobj,bool update_key,bool update_tab)1141 mana_cfg_vport_steering(struct mana_port_context *apc,
1142 enum TRI_STATE rx,
1143 bool update_default_rxobj, bool update_key,
1144 bool update_tab)
1145 {
1146 uint16_t num_entries = MANA_INDIRECT_TABLE_SIZE;
1147 struct mana_cfg_rx_steer_req *req = NULL;
1148 struct mana_cfg_rx_steer_resp resp = {};
1149 if_t ndev = apc->ndev;
1150 mana_handle_t *req_indir_tab;
1151 uint32_t req_buf_size;
1152 int err;
1153
1154 req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
1155 req = malloc(req_buf_size, M_DEVBUF, M_WAITOK | M_ZERO);
1156
1157 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1158 sizeof(resp));
1159
1160 req->vport = apc->port_handle;
1161 req->num_indir_entries = num_entries;
1162 req->indir_tab_offset = sizeof(*req);
1163 req->rx_enable = rx;
1164 req->rss_enable = apc->rss_state;
1165 req->update_default_rxobj = update_default_rxobj;
1166 req->update_hashkey = update_key;
1167 req->update_indir_tab = update_tab;
1168 req->default_rxobj = apc->default_rxobj;
1169
1170 if (update_key)
1171 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1172
1173 if (update_tab) {
1174 req_indir_tab = (mana_handle_t *)(req + 1);
1175 memcpy(req_indir_tab, apc->rxobj_table,
1176 req->num_indir_entries * sizeof(mana_handle_t));
1177 }
1178
1179 err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1180 sizeof(resp));
1181 if (err) {
1182 if_printf(ndev, "Failed to configure vPort RX: %d\n", err);
1183 goto out;
1184 }
1185
1186 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1187 sizeof(resp));
1188 if (err) {
1189 if_printf(ndev, "vPort RX configuration failed: %d\n", err);
1190 goto out;
1191 }
1192
1193 if (resp.hdr.status) {
1194 if_printf(ndev, "vPort RX configuration failed: 0x%x\n",
1195 resp.hdr.status);
1196 err = EPROTO;
1197 }
1198
1199 if_printf(ndev, "Configured steering vPort %ju entries %u\n",
1200 apc->port_handle, num_entries);
1201
1202 out:
1203 free(req, M_DEVBUF);
1204 return err;
1205 }
1206
1207 int
mana_create_wq_obj(struct mana_port_context * apc,mana_handle_t vport,uint32_t wq_type,struct mana_obj_spec * wq_spec,struct mana_obj_spec * cq_spec,mana_handle_t * wq_obj)1208 mana_create_wq_obj(struct mana_port_context *apc,
1209 mana_handle_t vport,
1210 uint32_t wq_type, struct mana_obj_spec *wq_spec,
1211 struct mana_obj_spec *cq_spec,
1212 mana_handle_t *wq_obj)
1213 {
1214 struct mana_create_wqobj_resp resp = {};
1215 struct mana_create_wqobj_req req = {};
1216 if_t ndev = apc->ndev;
1217 int err;
1218
1219 mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1220 sizeof(req), sizeof(resp));
1221 req.vport = vport;
1222 req.wq_type = wq_type;
1223 req.wq_gdma_region = wq_spec->gdma_region;
1224 req.cq_gdma_region = cq_spec->gdma_region;
1225 req.wq_size = wq_spec->queue_size;
1226 req.cq_size = cq_spec->queue_size;
1227 req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1228 req.cq_parent_qid = cq_spec->attached_eq;
1229
1230 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1231 sizeof(resp));
1232 if (err) {
1233 if_printf(ndev, "Failed to create WQ object: %d\n", err);
1234 goto out;
1235 }
1236
1237 err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1238 sizeof(resp));
1239 if (err || resp.hdr.status) {
1240 if_printf(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1241 resp.hdr.status);
1242 if (!err)
1243 err = EPROTO;
1244 goto out;
1245 }
1246
1247 if (resp.wq_obj == INVALID_MANA_HANDLE) {
1248 if_printf(ndev, "Got an invalid WQ object handle\n");
1249 err = EPROTO;
1250 goto out;
1251 }
1252
1253 *wq_obj = resp.wq_obj;
1254 wq_spec->queue_index = resp.wq_id;
1255 cq_spec->queue_index = resp.cq_id;
1256
1257 return 0;
1258 out:
1259 return err;
1260 }
1261
1262 void
mana_destroy_wq_obj(struct mana_port_context * apc,uint32_t wq_type,mana_handle_t wq_obj)1263 mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type,
1264 mana_handle_t wq_obj)
1265 {
1266 struct mana_destroy_wqobj_resp resp = {};
1267 struct mana_destroy_wqobj_req req = {};
1268 if_t ndev = apc->ndev;
1269 int err;
1270
1271 mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1272 sizeof(req), sizeof(resp));
1273 req.wq_type = wq_type;
1274 req.wq_obj_handle = wq_obj;
1275
1276 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1277 sizeof(resp));
1278 if (err) {
1279 if_printf(ndev, "Failed to destroy WQ object: %d\n", err);
1280 return;
1281 }
1282
1283 err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1284 sizeof(resp));
1285 if (err || resp.hdr.status)
1286 if_printf(ndev, "Failed to destroy WQ object: %d, 0x%x\n",
1287 err, resp.hdr.status);
1288 }
1289
1290 static void
mana_destroy_eq(struct mana_context * ac)1291 mana_destroy_eq(struct mana_context *ac)
1292 {
1293 struct gdma_context *gc = ac->gdma_dev->gdma_context;
1294 struct gdma_queue *eq;
1295 int i;
1296
1297 if (!ac->eqs)
1298 return;
1299
1300 for (i = 0; i < gc->max_num_queues; i++) {
1301 eq = ac->eqs[i].eq;
1302 if (!eq)
1303 continue;
1304
1305 mana_gd_destroy_queue(gc, eq);
1306 }
1307
1308 free(ac->eqs, M_DEVBUF);
1309 ac->eqs = NULL;
1310 }
1311
1312 static int
mana_create_eq(struct mana_context * ac)1313 mana_create_eq(struct mana_context *ac)
1314 {
1315 struct gdma_dev *gd = ac->gdma_dev;
1316 struct gdma_context *gc = gd->gdma_context;
1317 struct gdma_queue_spec spec = {};
1318 int err;
1319 int i;
1320
1321 ac->eqs = mallocarray(gc->max_num_queues, sizeof(struct mana_eq),
1322 M_DEVBUF, M_WAITOK | M_ZERO);
1323
1324 spec.type = GDMA_EQ;
1325 spec.monitor_avl_buf = false;
1326 spec.queue_size = EQ_SIZE;
1327 spec.eq.callback = NULL;
1328 spec.eq.context = ac->eqs;
1329 spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1330
1331 for (i = 0; i < gc->max_num_queues; i++) {
1332 err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
1333 if (err)
1334 goto out;
1335 }
1336
1337 return 0;
1338 out:
1339 mana_destroy_eq(ac);
1340 return err;
1341 }
1342
1343 static int
mana_fence_rq(struct mana_port_context * apc,struct mana_rxq * rxq)1344 mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1345 {
1346 struct mana_fence_rq_resp resp = {};
1347 struct mana_fence_rq_req req = {};
1348 int err;
1349
1350 init_completion(&rxq->fence_event);
1351
1352 mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
1353 sizeof(req), sizeof(resp));
1354 req.wq_obj_handle = rxq->rxobj;
1355
1356 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1357 sizeof(resp));
1358 if (err) {
1359 if_printf(apc->ndev, "Failed to fence RQ %u: %d\n",
1360 rxq->rxq_idx, err);
1361 return err;
1362 }
1363
1364 err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
1365 if (err || resp.hdr.status) {
1366 if_printf(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
1367 rxq->rxq_idx, err, resp.hdr.status);
1368 if (!err)
1369 err = EPROTO;
1370
1371 return err;
1372 }
1373
1374 if (wait_for_completion_timeout(&rxq->fence_event, 10 * hz)) {
1375 if_printf(apc->ndev, "Failed to fence RQ %u: timed out\n",
1376 rxq->rxq_idx);
1377 return ETIMEDOUT;
1378 }
1379
1380 return 0;
1381 }
1382
1383 static void
mana_fence_rqs(struct mana_port_context * apc)1384 mana_fence_rqs(struct mana_port_context *apc)
1385 {
1386 unsigned int rxq_idx;
1387 struct mana_rxq *rxq;
1388 int err;
1389
1390 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1391 rxq = apc->rxqs[rxq_idx];
1392 err = mana_fence_rq(apc, rxq);
1393
1394 /* In case of any error, use sleep instead. */
1395 if (err)
1396 gdma_msleep(100);
1397 }
1398 }
1399
1400 static int
mana_move_wq_tail(struct gdma_queue * wq,uint32_t num_units)1401 mana_move_wq_tail(struct gdma_queue *wq, uint32_t num_units)
1402 {
1403 uint32_t used_space_old;
1404 uint32_t used_space_new;
1405
1406 used_space_old = wq->head - wq->tail;
1407 used_space_new = wq->head - (wq->tail + num_units);
1408
1409 if (used_space_new > used_space_old) {
1410 mana_err(NULL,
1411 "WARNING: new used space %u greater than old one %u\n",
1412 used_space_new, used_space_old);
1413 return ERANGE;
1414 }
1415
1416 wq->tail += num_units;
1417 return 0;
1418 }
1419
1420 static void
mana_poll_tx_cq(struct mana_cq * cq)1421 mana_poll_tx_cq(struct mana_cq *cq)
1422 {
1423 struct gdma_comp *completions = cq->gdma_comp_buf;
1424 struct gdma_posted_wqe_info *wqe_info;
1425 struct mana_send_buf_info *tx_info;
1426 unsigned int pkt_transmitted = 0;
1427 unsigned int wqe_unit_cnt = 0;
1428 struct mana_txq *txq = cq->txq;
1429 struct mana_port_context *apc;
1430 unsigned int tx_queue_size;
1431 uint16_t next_to_complete;
1432 if_t ndev;
1433 int comp_read;
1434 int txq_idx = txq->idx;
1435 int i;
1436 int sa_drop = 0;
1437
1438 struct gdma_queue *gdma_wq;
1439 unsigned int avail_space;
1440 bool txq_full = false;
1441
1442 ndev = txq->ndev;
1443 apc = if_getsoftc(ndev);
1444 tx_queue_size = apc->tx_queue_size;
1445
1446 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1447 CQE_POLLING_BUFFER);
1448
1449 if (comp_read < 1)
1450 return;
1451
1452 next_to_complete = txq->next_to_complete;
1453
1454 for (i = 0; i < comp_read; i++) {
1455 struct mana_tx_comp_oob *cqe_oob;
1456
1457 if (!completions[i].is_sq) {
1458 mana_err(NULL, "WARNING: Not for SQ\n");
1459 return;
1460 }
1461
1462 cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1463 if (cqe_oob->cqe_hdr.client_type !=
1464 MANA_CQE_COMPLETION) {
1465 mana_err(NULL,
1466 "WARNING: Invalid CQE client type %u\n",
1467 cqe_oob->cqe_hdr.client_type);
1468 return;
1469 }
1470
1471 switch (cqe_oob->cqe_hdr.cqe_type) {
1472 case CQE_TX_OKAY:
1473 break;
1474
1475 case CQE_TX_SA_DROP:
1476 case CQE_TX_MTU_DROP:
1477 case CQE_TX_INVALID_OOB:
1478 case CQE_TX_INVALID_ETH_TYPE:
1479 case CQE_TX_HDR_PROCESSING_ERROR:
1480 case CQE_TX_VF_DISABLED:
1481 case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1482 case CQE_TX_VPORT_DISABLED:
1483 case CQE_TX_VLAN_TAGGING_VIOLATION:
1484 sa_drop ++;
1485 mana_dbg(NULL,
1486 "TX: txq %d CQE error %d, ntc = %d, "
1487 "pending sends = %d: err ignored.\n",
1488 txq_idx, cqe_oob->cqe_hdr.cqe_type,
1489 next_to_complete, txq->pending_sends);
1490 counter_u64_add(txq->stats.cqe_err, 1);
1491 break;
1492
1493 default:
1494 /* If the CQE type is unknown, log a debug msg,
1495 * and still free the mbuf, etc.
1496 */
1497 mana_dbg(NULL,
1498 "ERROR: TX: Unknown CQE type %d\n",
1499 cqe_oob->cqe_hdr.cqe_type);
1500 counter_u64_add(txq->stats.cqe_unknown_type, 1);
1501 break;
1502 }
1503 if (txq->gdma_txq_id != completions[i].wq_num) {
1504 mana_dbg(NULL,
1505 "txq gdma id not match completion wq num: "
1506 "%d != %d\n",
1507 txq->gdma_txq_id, completions[i].wq_num);
1508 break;
1509 }
1510
1511 tx_info = &txq->tx_buf_info[next_to_complete];
1512 if (!tx_info->mbuf) {
1513 mana_err(NULL,
1514 "WARNING: txq %d Empty mbuf on tx_info: %u, "
1515 "ntu = %u, pending_sends = %d, "
1516 "transmitted = %d, sa_drop = %d, i = %d, comp_read = %d\n",
1517 txq_idx, next_to_complete, txq->next_to_use,
1518 txq->pending_sends, pkt_transmitted, sa_drop,
1519 i, comp_read);
1520 break;
1521 }
1522
1523 wqe_info = &tx_info->wqe_inf;
1524 wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1525
1526 mana_tx_unmap_mbuf(apc, tx_info);
1527 mb();
1528
1529 next_to_complete =
1530 MANA_IDX_NEXT(next_to_complete, tx_queue_size);
1531
1532 pkt_transmitted++;
1533 }
1534
1535 txq->next_to_complete = next_to_complete;
1536
1537 if (wqe_unit_cnt == 0) {
1538 mana_err(NULL,
1539 "WARNING: TX ring not proceeding!\n");
1540 return;
1541 }
1542
1543 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1544
1545 /* Ensure tail updated before checking q stop */
1546 wmb();
1547
1548 gdma_wq = txq->gdma_sq;
1549 avail_space = mana_gd_wq_avail_space(gdma_wq);
1550
1551
1552 if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL) {
1553 txq_full = true;
1554 }
1555
1556 /* Ensure checking txq_full before apc->port_is_up. */
1557 rmb();
1558
1559 if (txq_full && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1560 /* Grab the txq lock and re-test */
1561 mtx_lock(&txq->txq_mtx);
1562 avail_space = mana_gd_wq_avail_space(gdma_wq);
1563
1564 if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL &&
1565 apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1566 /* Clear the Q full flag */
1567 if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING,
1568 IFF_DRV_OACTIVE);
1569 counter_u64_add(txq->stats.wakeup, 1);
1570 if (txq->alt_txq_idx != txq->idx) {
1571 uint64_t stops = counter_u64_fetch(txq->stats.stop);
1572 uint64_t wakeups = counter_u64_fetch(txq->stats.wakeup);
1573 /* Reset alt_txq_idx back if it is not overloaded */
1574 if (stops < wakeups) {
1575 txq->alt_txq_idx = txq->idx;
1576 counter_u64_add(txq->stats.alt_reset, 1);
1577 }
1578 }
1579 rmb();
1580 /* Schedule a tx enqueue task */
1581 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
1582 }
1583 mtx_unlock(&txq->txq_mtx);
1584 }
1585
1586 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1587 mana_err(NULL,
1588 "WARNING: TX %d pending_sends error: %d\n",
1589 txq->idx, txq->pending_sends);
1590
1591 cq->work_done = pkt_transmitted;
1592 }
1593
1594 static void
mana_post_pkt_rxq(struct mana_rxq * rxq,struct mana_recv_buf_oob * recv_buf_oob)1595 mana_post_pkt_rxq(struct mana_rxq *rxq,
1596 struct mana_recv_buf_oob *recv_buf_oob)
1597 {
1598 int err;
1599
1600 err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1601 &recv_buf_oob->wqe_inf);
1602 if (err) {
1603 mana_err(NULL, "WARNING: rxq %u post pkt err %d\n",
1604 rxq->rxq_idx, err);
1605 return;
1606 }
1607
1608 if (recv_buf_oob->wqe_inf.wqe_size_in_bu != 1) {
1609 mana_err(NULL, "WARNING: rxq %u wqe_size_in_bu %u\n",
1610 rxq->rxq_idx, recv_buf_oob->wqe_inf.wqe_size_in_bu);
1611 }
1612 }
1613
1614 static void
mana_rx_mbuf(struct mbuf * mbuf,struct mana_rxcomp_oob * cqe,struct mana_rxq * rxq)1615 mana_rx_mbuf(struct mbuf *mbuf, struct mana_rxcomp_oob *cqe,
1616 struct mana_rxq *rxq)
1617 {
1618 struct mana_stats *rx_stats = &rxq->stats;
1619 if_t ndev = rxq->ndev;
1620 uint32_t pkt_len = cqe->ppi[0].pkt_len;
1621 uint16_t rxq_idx = rxq->rxq_idx;
1622 struct mana_port_context *apc;
1623 bool do_lro = false;
1624 bool do_if_input;
1625
1626 apc = if_getsoftc(ndev);
1627 rxq->rx_cq.work_done++;
1628
1629 if (!mbuf) {
1630 return;
1631 }
1632
1633 mbuf->m_flags |= M_PKTHDR;
1634 mbuf->m_pkthdr.len = pkt_len;
1635 mbuf->m_len = pkt_len;
1636 mbuf->m_pkthdr.rcvif = ndev;
1637
1638 if ((if_getcapenable(ndev) & IFCAP_RXCSUM ||
1639 if_getcapenable(ndev) & IFCAP_RXCSUM_IPV6) &&
1640 (cqe->rx_iphdr_csum_succeed)) {
1641 mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1642 mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1643 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) {
1644 mbuf->m_pkthdr.csum_flags |=
1645 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1646 mbuf->m_pkthdr.csum_data = 0xffff;
1647
1648 if (cqe->rx_tcp_csum_succeed)
1649 do_lro = true;
1650 }
1651 }
1652
1653 if (cqe->rx_hashtype != 0) {
1654 mbuf->m_pkthdr.flowid = cqe->ppi[0].pkt_hash;
1655
1656 uint16_t hashtype = cqe->rx_hashtype;
1657 if (hashtype & NDIS_HASH_IPV4_MASK) {
1658 hashtype &= NDIS_HASH_IPV4_MASK;
1659 switch (hashtype) {
1660 case NDIS_HASH_TCP_IPV4:
1661 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
1662 break;
1663 case NDIS_HASH_UDP_IPV4:
1664 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
1665 break;
1666 default:
1667 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
1668 }
1669 } else if (hashtype & NDIS_HASH_IPV6_MASK) {
1670 hashtype &= NDIS_HASH_IPV6_MASK;
1671 switch (hashtype) {
1672 case NDIS_HASH_TCP_IPV6:
1673 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
1674 break;
1675 case NDIS_HASH_TCP_IPV6_EX:
1676 M_HASHTYPE_SET(mbuf,
1677 M_HASHTYPE_RSS_TCP_IPV6_EX);
1678 break;
1679 case NDIS_HASH_UDP_IPV6:
1680 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
1681 break;
1682 case NDIS_HASH_UDP_IPV6_EX:
1683 M_HASHTYPE_SET(mbuf,
1684 M_HASHTYPE_RSS_UDP_IPV6_EX);
1685 break;
1686 default:
1687 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
1688 }
1689 } else {
1690 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
1691 }
1692 } else {
1693 mbuf->m_pkthdr.flowid = rxq_idx;
1694 M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
1695 }
1696
1697 do_if_input = true;
1698 if ((if_getcapenable(ndev) & IFCAP_LRO) && do_lro) {
1699 rxq->lro_tried++;
1700 if (rxq->lro.lro_cnt != 0 &&
1701 tcp_lro_rx(&rxq->lro, mbuf, 0) == 0)
1702 do_if_input = false;
1703 else
1704 rxq->lro_failed++;
1705 }
1706 if (do_if_input) {
1707 if_input(ndev, mbuf);
1708 }
1709
1710 counter_enter();
1711 counter_u64_add_protected(rx_stats->packets, 1);
1712 counter_u64_add_protected(apc->port_stats.rx_packets, 1);
1713 counter_u64_add_protected(rx_stats->bytes, pkt_len);
1714 counter_u64_add_protected(apc->port_stats.rx_bytes, pkt_len);
1715 counter_exit();
1716 }
1717
1718 static int
mana_refill_rx_mbufs(struct mana_port_context * apc,struct mana_rxq * rxq,uint32_t num)1719 mana_refill_rx_mbufs(struct mana_port_context *apc,
1720 struct mana_rxq *rxq, uint32_t num)
1721 {
1722 struct mana_recv_buf_oob *rxbuf_oob;
1723 uint32_t next_to_refill;
1724 uint32_t i;
1725 int err;
1726
1727 next_to_refill = rxq->next_to_refill;
1728
1729 for (i = 0; i < num; i++) {
1730 if (next_to_refill == rxq->buf_index) {
1731 mana_warn(NULL, "refilling index reached current, "
1732 "aborted! rxq %u, oob idx %u\n",
1733 rxq->rxq_idx, next_to_refill);
1734 break;
1735 }
1736
1737 rxbuf_oob = &rxq->rx_oobs[next_to_refill];
1738
1739 if (likely(rxbuf_oob->mbuf == NULL)) {
1740 err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, true);
1741 } else {
1742 mana_warn(NULL, "mbuf not null when refilling, "
1743 "rxq %u, oob idx %u, reusing\n",
1744 rxq->rxq_idx, next_to_refill);
1745 err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, false);
1746 }
1747
1748 if (unlikely(err != 0)) {
1749 mana_dbg(NULL,
1750 "failed to load rx mbuf, err = %d, rxq = %u\n",
1751 err, rxq->rxq_idx);
1752 counter_u64_add(rxq->stats.mbuf_alloc_fail, 1);
1753 break;
1754 }
1755
1756 mana_post_pkt_rxq(rxq, rxbuf_oob);
1757
1758 next_to_refill = MANA_IDX_NEXT(next_to_refill,
1759 rxq->num_rx_buf);
1760 }
1761
1762 if (likely(i != 0)) {
1763 struct gdma_context *gc =
1764 rxq->gdma_rq->gdma_dev->gdma_context;
1765
1766 mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq);
1767 }
1768
1769 if (unlikely(i < num)) {
1770 counter_u64_add(rxq->stats.partial_refill, 1);
1771 mana_dbg(NULL,
1772 "refilled rxq %u with only %u mbufs (%u requested)\n",
1773 rxq->rxq_idx, i, num);
1774 }
1775
1776 rxq->next_to_refill = next_to_refill;
1777 return (i);
1778 }
1779
1780 static void
mana_process_rx_cqe(struct mana_rxq * rxq,struct mana_cq * cq,struct gdma_comp * cqe)1781 mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1782 struct gdma_comp *cqe)
1783 {
1784 struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1785 struct mana_recv_buf_oob *rxbuf_oob;
1786 if_t ndev = rxq->ndev;
1787 struct mana_port_context *apc;
1788 struct mbuf *old_mbuf;
1789 uint32_t refill_required;
1790 uint32_t curr, pktlen;
1791
1792 switch (oob->cqe_hdr.cqe_type) {
1793 case CQE_RX_OKAY:
1794 break;
1795
1796 case CQE_RX_TRUNCATED:
1797 apc = if_getsoftc(ndev);
1798 counter_u64_add(apc->port_stats.rx_drops, 1);
1799 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1800 if_printf(ndev, "Dropped a truncated packet\n");
1801 goto drop;
1802
1803 case CQE_RX_COALESCED_4:
1804 if_printf(ndev, "RX coalescing is unsupported\n");
1805 return;
1806
1807 case CQE_RX_OBJECT_FENCE:
1808 complete(&rxq->fence_event);
1809 return;
1810
1811 default:
1812 if_printf(ndev, "Unknown RX CQE type = %d\n",
1813 oob->cqe_hdr.cqe_type);
1814 return;
1815 }
1816
1817 if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
1818 return;
1819
1820 pktlen = oob->ppi[0].pkt_len;
1821
1822 if (pktlen == 0) {
1823 /* data packets should never have packetlength of zero */
1824 if_printf(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%jx\n",
1825 rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1826 return;
1827 }
1828
1829 curr = rxq->buf_index;
1830 rxbuf_oob = &rxq->rx_oobs[curr];
1831 if (rxbuf_oob->wqe_inf.wqe_size_in_bu != 1) {
1832 mana_err(NULL, "WARNING: Rx Incorrect complete "
1833 "WQE size %u\n",
1834 rxbuf_oob->wqe_inf.wqe_size_in_bu);
1835 }
1836
1837 apc = if_getsoftc(ndev);
1838
1839 old_mbuf = rxbuf_oob->mbuf;
1840
1841 /* Unload DMA map for the old mbuf */
1842 mana_unload_rx_mbuf(apc, rxq, rxbuf_oob, false);
1843 /* Clear the mbuf pointer to avoid reuse */
1844 rxbuf_oob->mbuf = NULL;
1845
1846 mana_rx_mbuf(old_mbuf, oob, rxq);
1847
1848 drop:
1849 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1850
1851 rxq->buf_index = MANA_IDX_NEXT(rxq->buf_index, rxq->num_rx_buf);
1852
1853 /* Check if refill is needed */
1854 refill_required = MANA_GET_SPACE(rxq->next_to_refill,
1855 rxq->buf_index, rxq->num_rx_buf);
1856
1857 if (refill_required >= rxq->refill_thresh) {
1858 /* Refill empty rx_oobs with new mbufs */
1859 mana_refill_rx_mbufs(apc, rxq, refill_required);
1860 }
1861 }
1862
1863 static void
mana_poll_rx_cq(struct mana_cq * cq)1864 mana_poll_rx_cq(struct mana_cq *cq)
1865 {
1866 struct gdma_comp *comp = cq->gdma_comp_buf;
1867 int comp_read, i;
1868
1869 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1870 KASSERT(comp_read <= CQE_POLLING_BUFFER,
1871 ("comp_read %d great than buf size %d",
1872 comp_read, CQE_POLLING_BUFFER));
1873
1874 for (i = 0; i < comp_read; i++) {
1875 if (comp[i].is_sq == true) {
1876 mana_err(NULL,
1877 "WARNING: CQE not for receive queue\n");
1878 return;
1879 }
1880
1881 /* verify recv cqe references the right rxq */
1882 if (comp[i].wq_num != cq->rxq->gdma_id) {
1883 mana_err(NULL,
1884 "WARNING: Received CQE %d not for "
1885 "this receive queue %d\n",
1886 comp[i].wq_num, cq->rxq->gdma_id);
1887 return;
1888 }
1889
1890 mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
1891 }
1892
1893 if (comp_read > 0) {
1894 struct gdma_context *gc =
1895 cq->rxq->gdma_rq->gdma_dev->gdma_context;
1896
1897 mana_gd_wq_ring_doorbell(gc, cq->rxq->gdma_rq);
1898 }
1899
1900 tcp_lro_flush_all(&cq->rxq->lro);
1901 }
1902
1903 static void
mana_cq_handler(void * context,struct gdma_queue * gdma_queue)1904 mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1905 {
1906 struct mana_cq *cq = context;
1907 uint8_t arm_bit;
1908
1909 KASSERT(cq->gdma_cq == gdma_queue,
1910 ("cq do not match %p, %p", cq->gdma_cq, gdma_queue));
1911
1912 if (cq->type == MANA_CQ_TYPE_RX) {
1913 mana_poll_rx_cq(cq);
1914 } else {
1915 mana_poll_tx_cq(cq);
1916 }
1917
1918 if (cq->work_done < cq->budget && cq->do_not_ring_db == false)
1919 arm_bit = SET_ARM_BIT;
1920 else
1921 arm_bit = 0;
1922
1923 mana_gd_ring_cq(gdma_queue, arm_bit);
1924 }
1925
1926 #define MANA_POLL_BUDGET 256
1927 #define MANA_RX_BUDGET 8
1928 #define MANA_TX_BUDGET 8
1929
1930 static void
mana_poll(void * arg,int pending)1931 mana_poll(void *arg, int pending)
1932 {
1933 struct mana_cq *cq = arg;
1934 int i;
1935
1936 cq->work_done = 0;
1937 if (cq->type == MANA_CQ_TYPE_RX) {
1938 cq->budget = MANA_RX_BUDGET;
1939 } else {
1940 cq->budget = MANA_TX_BUDGET;
1941 }
1942
1943 for (i = 0; i < MANA_POLL_BUDGET; i++) {
1944 /*
1945 * If this is the last loop, set the budget big enough
1946 * so it will arm the CQ any way.
1947 */
1948 if (i == (MANA_POLL_BUDGET - 1))
1949 cq->budget = CQE_POLLING_BUFFER + 1;
1950
1951 mana_cq_handler(cq, cq->gdma_cq);
1952
1953 if (cq->work_done < cq->budget)
1954 break;
1955
1956 cq->work_done = 0;
1957 }
1958 }
1959
1960 static void
mana_schedule_task(void * arg,struct gdma_queue * gdma_queue)1961 mana_schedule_task(void *arg, struct gdma_queue *gdma_queue)
1962 {
1963 struct mana_cq *cq = arg;
1964
1965 taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task);
1966 }
1967
1968 static void
mana_deinit_cq(struct mana_port_context * apc,struct mana_cq * cq)1969 mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1970 {
1971 struct gdma_dev *gd = apc->ac->gdma_dev;
1972
1973 if (!cq->gdma_cq)
1974 return;
1975
1976 /* Drain cleanup taskqueue */
1977 if (cq->cleanup_tq) {
1978 while (taskqueue_cancel(cq->cleanup_tq,
1979 &cq->cleanup_task, NULL)) {
1980 taskqueue_drain(cq->cleanup_tq,
1981 &cq->cleanup_task);
1982 }
1983
1984 taskqueue_free(cq->cleanup_tq);
1985 }
1986
1987 mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1988 }
1989
1990 static void
mana_deinit_txq(struct mana_port_context * apc,struct mana_txq * txq)1991 mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1992 {
1993 struct gdma_dev *gd = apc->ac->gdma_dev;
1994 struct mana_send_buf_info *txbuf_info;
1995 uint32_t pending_sends;
1996 int i;
1997
1998 if (!txq->gdma_sq)
1999 return;
2000
2001 if ((pending_sends = atomic_read(&txq->pending_sends)) > 0) {
2002 mana_err(NULL,
2003 "WARNING: txq pending sends not zero: %u\n",
2004 pending_sends);
2005 }
2006
2007 if (txq->next_to_use != txq->next_to_complete) {
2008 mana_err(NULL,
2009 "WARNING: txq buf not completed, "
2010 "next use %u, next complete %u\n",
2011 txq->next_to_use, txq->next_to_complete);
2012 }
2013
2014 /* Flush buf ring. Grab txq mtx lock */
2015 if (txq->txq_br) {
2016 mtx_lock(&txq->txq_mtx);
2017 drbr_flush(apc->ndev, txq->txq_br);
2018 mtx_unlock(&txq->txq_mtx);
2019 buf_ring_free(txq->txq_br, M_DEVBUF);
2020 }
2021
2022 /* Drain taskqueue */
2023 if (txq->enqueue_tq) {
2024 while (taskqueue_cancel(txq->enqueue_tq,
2025 &txq->enqueue_task, NULL)) {
2026 taskqueue_drain(txq->enqueue_tq,
2027 &txq->enqueue_task);
2028 }
2029
2030 taskqueue_free(txq->enqueue_tq);
2031 }
2032
2033 if (txq->tx_buf_info) {
2034 /* Free all mbufs which are still in-flight */
2035 for (i = 0; i < apc->tx_queue_size; i++) {
2036 txbuf_info = &txq->tx_buf_info[i];
2037 if (txbuf_info->mbuf) {
2038 mana_tx_unmap_mbuf(apc, txbuf_info);
2039 }
2040 }
2041
2042 free(txq->tx_buf_info, M_DEVBUF);
2043 }
2044
2045 mana_free_counters((counter_u64_t *)&txq->stats,
2046 sizeof(txq->stats));
2047
2048 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
2049
2050 mtx_destroy(&txq->txq_mtx);
2051 }
2052
2053 static void
mana_destroy_txq(struct mana_port_context * apc)2054 mana_destroy_txq(struct mana_port_context *apc)
2055 {
2056 int i;
2057
2058 if (!apc->tx_qp)
2059 return;
2060
2061 for (i = 0; i < apc->num_queues; i++) {
2062 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
2063
2064 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
2065
2066 mana_deinit_txq(apc, &apc->tx_qp[i].txq);
2067 }
2068
2069 free(apc->tx_qp, M_DEVBUF);
2070 apc->tx_qp = NULL;
2071 }
2072
2073 static int
mana_create_txq(struct mana_port_context * apc,if_t net)2074 mana_create_txq(struct mana_port_context *apc, if_t net)
2075 {
2076 struct mana_context *ac = apc->ac;
2077 struct gdma_dev *gd = ac->gdma_dev;
2078 struct mana_obj_spec wq_spec;
2079 struct mana_obj_spec cq_spec;
2080 struct gdma_queue_spec spec;
2081 struct gdma_context *gc;
2082 struct mana_txq *txq;
2083 struct mana_cq *cq;
2084 uint32_t txq_size;
2085 uint32_t cq_size;
2086 int err;
2087 int i;
2088
2089 apc->tx_qp = mallocarray(apc->num_queues, sizeof(struct mana_tx_qp),
2090 M_DEVBUF, M_WAITOK | M_ZERO);
2091
2092 /* The minimum size of the WQE is 32 bytes, hence
2093 * apc->tx_queue_size represents the maximum number of WQEs
2094 * the SQ can store. This value is then used to size other queues
2095 * to prevent overflow.
2096 * Also note that the txq_size is always going to be page aligned,
2097 * as min val of apc->tx_queue_size is 128 and that would make
2098 * txq_size 128 * 32 = 4096 and the other higher values of
2099 * apc->tx_queue_size are always power of two.
2100 */
2101 txq_size = apc->tx_queue_size * 32;
2102 KASSERT(IS_ALIGNED(txq_size, PAGE_SIZE),
2103 ("txq size not page aligned"));
2104
2105 cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE;
2106 cq_size = ALIGN(cq_size, PAGE_SIZE);
2107
2108 gc = gd->gdma_context;
2109
2110 for (i = 0; i < apc->num_queues; i++) {
2111 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
2112
2113 /* Create SQ */
2114 txq = &apc->tx_qp[i].txq;
2115
2116 txq->ndev = net;
2117 txq->vp_offset = apc->tx_vp_offset;
2118 txq->idx = i;
2119 txq->alt_txq_idx = i;
2120
2121 memset(&spec, 0, sizeof(spec));
2122 spec.type = GDMA_SQ;
2123 spec.monitor_avl_buf = true;
2124 spec.queue_size = txq_size;
2125 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
2126 if (err)
2127 goto out;
2128
2129 /* Create SQ's CQ */
2130 cq = &apc->tx_qp[i].tx_cq;
2131 cq->type = MANA_CQ_TYPE_TX;
2132
2133 cq->txq = txq;
2134
2135 memset(&spec, 0, sizeof(spec));
2136 spec.type = GDMA_CQ;
2137 spec.monitor_avl_buf = false;
2138 spec.queue_size = cq_size;
2139 spec.cq.callback = mana_schedule_task;
2140 spec.cq.parent_eq = ac->eqs[i].eq;
2141 spec.cq.context = cq;
2142 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2143 if (err)
2144 goto out;
2145
2146 memset(&wq_spec, 0, sizeof(wq_spec));
2147 memset(&cq_spec, 0, sizeof(cq_spec));
2148
2149 wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
2150 wq_spec.queue_size = txq->gdma_sq->queue_size;
2151
2152 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2153 cq_spec.queue_size = cq->gdma_cq->queue_size;
2154 cq_spec.modr_ctx_id = 0;
2155 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2156
2157 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
2158 &wq_spec, &cq_spec, &apc->tx_qp[i].tx_object);
2159
2160 if (err)
2161 goto out;
2162
2163 txq->gdma_sq->id = wq_spec.queue_index;
2164 cq->gdma_cq->id = cq_spec.queue_index;
2165
2166 txq->gdma_sq->mem_info.dma_region_handle =
2167 GDMA_INVALID_DMA_REGION;
2168 cq->gdma_cq->mem_info.dma_region_handle =
2169 GDMA_INVALID_DMA_REGION;
2170
2171 txq->gdma_txq_id = txq->gdma_sq->id;
2172
2173 cq->gdma_id = cq->gdma_cq->id;
2174
2175 mana_dbg(NULL,
2176 "txq %d, txq gdma id %d, txq cq gdma id %d\n",
2177 i, txq->gdma_txq_id, cq->gdma_id);
2178
2179 if (cq->gdma_id >= gc->max_num_cqs) {
2180 if_printf(net, "CQ id %u too large.\n", cq->gdma_id);
2181 err = EINVAL;
2182 goto out;
2183 }
2184
2185 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2186
2187 /* Initialize tx specific data */
2188 txq->tx_buf_info = malloc(apc->tx_queue_size *
2189 sizeof(struct mana_send_buf_info),
2190 M_DEVBUF, M_WAITOK | M_ZERO);
2191
2192 snprintf(txq->txq_mtx_name, nitems(txq->txq_mtx_name),
2193 "mana:tx(%d)", i);
2194 mtx_init(&txq->txq_mtx, txq->txq_mtx_name, NULL, MTX_DEF);
2195
2196 txq->txq_br = buf_ring_alloc(4 * apc->tx_queue_size,
2197 M_DEVBUF, M_WAITOK, &txq->txq_mtx);
2198
2199 /* Allocate taskqueue for deferred send */
2200 TASK_INIT(&txq->enqueue_task, 0, mana_xmit_taskfunc, txq);
2201 txq->enqueue_tq = taskqueue_create_fast("mana_tx_enque",
2202 M_NOWAIT, taskqueue_thread_enqueue, &txq->enqueue_tq);
2203 if (unlikely(txq->enqueue_tq == NULL)) {
2204 if_printf(net,
2205 "Unable to create tx %d enqueue task queue\n", i);
2206 err = ENOMEM;
2207 goto out;
2208 }
2209 taskqueue_start_threads(&txq->enqueue_tq, 1, PI_NET,
2210 "mana txq p%u-tx%d", apc->port_idx, i);
2211
2212 mana_alloc_counters((counter_u64_t *)&txq->stats,
2213 sizeof(txq->stats));
2214
2215 /* Allocate and start the cleanup task on CQ */
2216 cq->do_not_ring_db = false;
2217
2218 NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq);
2219 cq->cleanup_tq =
2220 taskqueue_create_fast("mana tx cq cleanup",
2221 M_WAITOK, taskqueue_thread_enqueue,
2222 &cq->cleanup_tq);
2223
2224 if (apc->last_tx_cq_bind_cpu < 0)
2225 apc->last_tx_cq_bind_cpu = CPU_FIRST();
2226 cq->cpu = apc->last_tx_cq_bind_cpu;
2227 apc->last_tx_cq_bind_cpu = CPU_NEXT(apc->last_tx_cq_bind_cpu);
2228
2229 if (apc->bind_cleanup_thread_cpu) {
2230 cpuset_t cpu_mask;
2231 CPU_SETOF(cq->cpu, &cpu_mask);
2232 taskqueue_start_threads_cpuset(&cq->cleanup_tq,
2233 1, PI_NET, &cpu_mask,
2234 "mana cq p%u-tx%u-cpu%d",
2235 apc->port_idx, txq->idx, cq->cpu);
2236 } else {
2237 taskqueue_start_threads(&cq->cleanup_tq, 1,
2238 PI_NET, "mana cq p%u-tx%u",
2239 apc->port_idx, txq->idx);
2240 }
2241
2242 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2243 }
2244
2245 return 0;
2246 out:
2247 mana_destroy_txq(apc);
2248 return err;
2249 }
2250
2251 static void
mana_destroy_rxq(struct mana_port_context * apc,struct mana_rxq * rxq,bool validate_state)2252 mana_destroy_rxq(struct mana_port_context *apc, struct mana_rxq *rxq,
2253 bool validate_state)
2254 {
2255 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2256 struct mana_recv_buf_oob *rx_oob;
2257 int i;
2258
2259 if (!rxq)
2260 return;
2261
2262 if (validate_state) {
2263 /*
2264 * XXX Cancel and drain cleanup task queue here.
2265 */
2266 ;
2267 }
2268
2269 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2270
2271 mana_deinit_cq(apc, &rxq->rx_cq);
2272
2273 mana_free_counters((counter_u64_t *)&rxq->stats,
2274 sizeof(rxq->stats));
2275
2276 /* Free LRO resources */
2277 tcp_lro_free(&rxq->lro);
2278
2279 for (i = 0; i < rxq->num_rx_buf; i++) {
2280 rx_oob = &rxq->rx_oobs[i];
2281
2282 if (rx_oob->mbuf)
2283 mana_unload_rx_mbuf(apc, rxq, rx_oob, true);
2284
2285 bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2286 }
2287
2288 if (rxq->gdma_rq)
2289 mana_gd_destroy_queue(gc, rxq->gdma_rq);
2290
2291 free(rxq, M_DEVBUF);
2292 }
2293
2294 #define MANA_WQE_HEADER_SIZE 16
2295 #define MANA_WQE_SGE_SIZE 16
2296
2297 static int
mana_alloc_rx_wqe(struct mana_port_context * apc,struct mana_rxq * rxq,uint32_t * rxq_size,uint32_t * cq_size)2298 mana_alloc_rx_wqe(struct mana_port_context *apc,
2299 struct mana_rxq *rxq, uint32_t *rxq_size, uint32_t *cq_size)
2300 {
2301 struct mana_recv_buf_oob *rx_oob;
2302 uint32_t buf_idx;
2303 int err;
2304
2305 if (rxq->datasize == 0 || rxq->datasize > PAGE_SIZE) {
2306 mana_err(NULL,
2307 "WARNING: Invalid rxq datasize %u\n", rxq->datasize);
2308 }
2309
2310 *rxq_size = 0;
2311 *cq_size = 0;
2312
2313 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2314 rx_oob = &rxq->rx_oobs[buf_idx];
2315 memset(rx_oob, 0, sizeof(*rx_oob));
2316
2317 err = bus_dmamap_create(apc->rx_buf_tag, 0,
2318 &rx_oob->dma_map);
2319 if (err) {
2320 mana_err(NULL,
2321 "Failed to create rx DMA map for buf %d\n",
2322 buf_idx);
2323 return err;
2324 }
2325
2326 err = mana_load_rx_mbuf(apc, rxq, rx_oob, true);
2327 if (err) {
2328 mana_err(NULL,
2329 "Failed to create rx DMA map for buf %d\n",
2330 buf_idx);
2331 bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2332 return err;
2333 }
2334
2335 rx_oob->wqe_req.sgl = rx_oob->sgl;
2336 rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2337 rx_oob->wqe_req.inline_oob_size = 0;
2338 rx_oob->wqe_req.inline_oob_data = NULL;
2339 rx_oob->wqe_req.flags = 0;
2340 rx_oob->wqe_req.client_data_unit = 0;
2341
2342 *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2343 MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2344 *cq_size += COMP_ENTRY_SIZE;
2345 }
2346
2347 return 0;
2348 }
2349
2350 static int
mana_push_wqe(struct mana_rxq * rxq)2351 mana_push_wqe(struct mana_rxq *rxq)
2352 {
2353 struct mana_recv_buf_oob *rx_oob;
2354 uint32_t buf_idx;
2355 int err;
2356
2357 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2358 rx_oob = &rxq->rx_oobs[buf_idx];
2359
2360 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2361 &rx_oob->wqe_inf);
2362 if (err)
2363 return ENOSPC;
2364 }
2365
2366 return 0;
2367 }
2368
2369 static struct mana_rxq *
mana_create_rxq(struct mana_port_context * apc,uint32_t rxq_idx,struct mana_eq * eq,if_t ndev)2370 mana_create_rxq(struct mana_port_context *apc, uint32_t rxq_idx,
2371 struct mana_eq *eq, if_t ndev)
2372 {
2373 struct gdma_dev *gd = apc->ac->gdma_dev;
2374 struct mana_obj_spec wq_spec;
2375 struct mana_obj_spec cq_spec;
2376 struct gdma_queue_spec spec;
2377 struct mana_cq *cq = NULL;
2378 uint32_t cq_size, rq_size;
2379 struct gdma_context *gc;
2380 struct mana_rxq *rxq;
2381 int err;
2382
2383 gc = gd->gdma_context;
2384
2385 rxq = malloc(sizeof(*rxq) +
2386 apc->rx_queue_size * sizeof(struct mana_recv_buf_oob),
2387 M_DEVBUF, M_WAITOK | M_ZERO);
2388 rxq->ndev = ndev;
2389 rxq->num_rx_buf = apc->rx_queue_size;
2390 rxq->rxq_idx = rxq_idx;
2391 /*
2392 * Minimum size is MCLBYTES(2048) bytes for a mbuf cluster.
2393 * Now we just allow maximum size of 4096.
2394 */
2395 rxq->datasize = ALIGN(apc->frame_size, MCLBYTES);
2396 if (rxq->datasize > MAX_FRAME_SIZE)
2397 rxq->datasize = MAX_FRAME_SIZE;
2398
2399 mana_dbg(NULL, "Setting rxq %d datasize %d\n",
2400 rxq_idx, rxq->datasize);
2401
2402 /*
2403 * Two steps to set the mbuf refill_thresh.
2404 * 1) If mana_rx_refill_threshold is set, honor it.
2405 * Set to default value otherwise.
2406 * 2) Select the smaller of 1) above and 1/4 of the
2407 * rx buffer size.
2408 */
2409 if (mana_rx_refill_threshold != 0)
2410 rxq->refill_thresh = mana_rx_refill_threshold;
2411 else
2412 rxq->refill_thresh = MANA_RX_REFILL_THRESH;
2413 rxq->refill_thresh = min_t(uint32_t,
2414 rxq->num_rx_buf / 4, rxq->refill_thresh);
2415
2416 mana_dbg(NULL, "Setting rxq %d refill thresh %u\n",
2417 rxq_idx, rxq->refill_thresh);
2418
2419 rxq->rxobj = INVALID_MANA_HANDLE;
2420
2421 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2422 if (err)
2423 goto out;
2424
2425 /* Create LRO for the RQ */
2426 if (if_getcapenable(ndev) & IFCAP_LRO) {
2427 err = tcp_lro_init(&rxq->lro);
2428 if (err) {
2429 if_printf(ndev, "Failed to create LRO for rxq %d\n",
2430 rxq_idx);
2431 } else {
2432 rxq->lro.ifp = ndev;
2433 }
2434 }
2435
2436 mana_alloc_counters((counter_u64_t *)&rxq->stats,
2437 sizeof(rxq->stats));
2438
2439 rq_size = ALIGN(rq_size, PAGE_SIZE);
2440 cq_size = ALIGN(cq_size, PAGE_SIZE);
2441
2442 /* Create RQ */
2443 memset(&spec, 0, sizeof(spec));
2444 spec.type = GDMA_RQ;
2445 spec.monitor_avl_buf = true;
2446 spec.queue_size = rq_size;
2447 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2448 if (err)
2449 goto out;
2450
2451 /* Create RQ's CQ */
2452 cq = &rxq->rx_cq;
2453 cq->type = MANA_CQ_TYPE_RX;
2454 cq->rxq = rxq;
2455
2456 memset(&spec, 0, sizeof(spec));
2457 spec.type = GDMA_CQ;
2458 spec.monitor_avl_buf = false;
2459 spec.queue_size = cq_size;
2460 spec.cq.callback = mana_schedule_task;
2461 spec.cq.parent_eq = eq->eq;
2462 spec.cq.context = cq;
2463 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2464 if (err)
2465 goto out;
2466
2467 memset(&wq_spec, 0, sizeof(wq_spec));
2468 memset(&cq_spec, 0, sizeof(cq_spec));
2469 wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
2470 wq_spec.queue_size = rxq->gdma_rq->queue_size;
2471
2472 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2473 cq_spec.queue_size = cq->gdma_cq->queue_size;
2474 cq_spec.modr_ctx_id = 0;
2475 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2476
2477 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2478 &wq_spec, &cq_spec, &rxq->rxobj);
2479 if (err)
2480 goto out;
2481
2482 rxq->gdma_rq->id = wq_spec.queue_index;
2483 cq->gdma_cq->id = cq_spec.queue_index;
2484
2485 rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2486 cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2487
2488 rxq->gdma_id = rxq->gdma_rq->id;
2489 cq->gdma_id = cq->gdma_cq->id;
2490
2491 err = mana_push_wqe(rxq);
2492 if (err)
2493 goto out;
2494
2495 if (cq->gdma_id >= gc->max_num_cqs) {
2496 err = EINVAL;
2497 goto out;
2498 }
2499
2500 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2501
2502 /* Allocate and start the cleanup task on CQ */
2503 cq->do_not_ring_db = false;
2504
2505 NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq);
2506 cq->cleanup_tq =
2507 taskqueue_create_fast("mana rx cq cleanup",
2508 M_WAITOK, taskqueue_thread_enqueue,
2509 &cq->cleanup_tq);
2510
2511 if (apc->last_rx_cq_bind_cpu < 0)
2512 apc->last_rx_cq_bind_cpu = CPU_FIRST();
2513 cq->cpu = apc->last_rx_cq_bind_cpu;
2514 apc->last_rx_cq_bind_cpu = CPU_NEXT(apc->last_rx_cq_bind_cpu);
2515
2516 if (apc->bind_cleanup_thread_cpu) {
2517 cpuset_t cpu_mask;
2518 CPU_SETOF(cq->cpu, &cpu_mask);
2519 taskqueue_start_threads_cpuset(&cq->cleanup_tq,
2520 1, PI_NET, &cpu_mask,
2521 "mana cq p%u-rx%u-cpu%d",
2522 apc->port_idx, rxq->rxq_idx, cq->cpu);
2523 } else {
2524 taskqueue_start_threads(&cq->cleanup_tq, 1,
2525 PI_NET, "mana cq p%u-rx%u",
2526 apc->port_idx, rxq->rxq_idx);
2527 }
2528
2529 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2530 out:
2531 if (!err)
2532 return rxq;
2533
2534 if_printf(ndev, "Failed to create RXQ: err = %d\n", err);
2535
2536 mana_destroy_rxq(apc, rxq, false);
2537
2538 if (cq)
2539 mana_deinit_cq(apc, cq);
2540
2541 return NULL;
2542 }
2543
2544 static int
mana_add_rx_queues(struct mana_port_context * apc,if_t ndev)2545 mana_add_rx_queues(struct mana_port_context *apc, if_t ndev)
2546 {
2547 struct mana_context *ac = apc->ac;
2548 struct mana_rxq *rxq;
2549 int err = 0;
2550 int i;
2551
2552 for (i = 0; i < apc->num_queues; i++) {
2553 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2554 if (!rxq) {
2555 err = ENOMEM;
2556 goto out;
2557 }
2558
2559 apc->rxqs[i] = rxq;
2560 }
2561
2562 apc->default_rxobj = apc->rxqs[0]->rxobj;
2563 out:
2564 return err;
2565 }
2566
2567 static void
mana_destroy_vport(struct mana_port_context * apc)2568 mana_destroy_vport(struct mana_port_context *apc)
2569 {
2570 struct mana_rxq *rxq;
2571 uint32_t rxq_idx;
2572
2573 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2574 rxq = apc->rxqs[rxq_idx];
2575 if (!rxq)
2576 continue;
2577
2578 mana_destroy_rxq(apc, rxq, true);
2579 apc->rxqs[rxq_idx] = NULL;
2580 }
2581
2582 mana_destroy_txq(apc);
2583
2584 mana_uncfg_vport(apc);
2585 }
2586
2587 static int
mana_create_vport(struct mana_port_context * apc,if_t net)2588 mana_create_vport(struct mana_port_context *apc, if_t net)
2589 {
2590 struct gdma_dev *gd = apc->ac->gdma_dev;
2591 int err;
2592
2593 apc->default_rxobj = INVALID_MANA_HANDLE;
2594
2595 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2596 if (err)
2597 return err;
2598
2599 return mana_create_txq(apc, net);
2600 }
2601
2602
mana_rss_table_init(struct mana_port_context * apc)2603 static void mana_rss_table_init(struct mana_port_context *apc)
2604 {
2605 int i;
2606
2607 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
2608 apc->indir_table[i] = i % apc->num_queues;
2609 }
2610
mana_config_rss(struct mana_port_context * apc,enum TRI_STATE rx,bool update_hash,bool update_tab)2611 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2612 bool update_hash, bool update_tab)
2613 {
2614 uint32_t queue_idx;
2615 int err;
2616 int i;
2617
2618 if (update_tab) {
2619 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
2620 queue_idx = apc->indir_table[i];
2621 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2622 }
2623 }
2624
2625 err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2626 if (err)
2627 return err;
2628
2629 mana_fence_rqs(apc);
2630
2631 return 0;
2632 }
2633
2634 static int
mana_init_port(if_t ndev)2635 mana_init_port(if_t ndev)
2636 {
2637 struct mana_port_context *apc = if_getsoftc(ndev);
2638 uint32_t max_txq, max_rxq, max_queues;
2639 int port_idx = apc->port_idx;
2640 uint32_t num_indirect_entries;
2641 int err;
2642
2643 err = mana_init_port_context(apc);
2644 if (err)
2645 return err;
2646
2647 err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
2648 &num_indirect_entries);
2649 if (err) {
2650 if_printf(ndev, "Failed to query info for vPort %d\n",
2651 port_idx);
2652 goto reset_apc;
2653 }
2654
2655 max_queues = min_t(uint32_t, max_txq, max_rxq);
2656 if (apc->max_queues > max_queues)
2657 apc->max_queues = max_queues;
2658
2659 if (apc->num_queues > apc->max_queues)
2660 apc->num_queues = apc->max_queues;
2661
2662 return 0;
2663
2664 reset_apc:
2665 bus_dma_tag_destroy(apc->rx_buf_tag);
2666 apc->rx_buf_tag = NULL;
2667 free(apc->rxqs, M_DEVBUF);
2668 apc->rxqs = NULL;
2669 return err;
2670 }
2671
2672 int
mana_alloc_queues(if_t ndev)2673 mana_alloc_queues(if_t ndev)
2674 {
2675 struct mana_port_context *apc = if_getsoftc(ndev);
2676 int err;
2677
2678 err = mana_create_vport(apc, ndev);
2679 if (err)
2680 return err;
2681
2682 err = mana_add_rx_queues(apc, ndev);
2683 if (err)
2684 goto destroy_vport;
2685
2686 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
2687
2688 mana_rss_table_init(apc);
2689
2690 err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2691 if (err)
2692 goto destroy_vport;
2693
2694 return 0;
2695
2696 destroy_vport:
2697 mana_destroy_vport(apc);
2698 return err;
2699 }
2700
2701 static int
mana_up(struct mana_port_context * apc)2702 mana_up(struct mana_port_context *apc)
2703 {
2704 int err;
2705
2706 mana_dbg(NULL, "mana_up called\n");
2707
2708 err = mana_alloc_queues(apc->ndev);
2709 if (err) {
2710 mana_err(NULL, "Faile alloc mana queues: %d\n", err);
2711 return err;
2712 }
2713
2714 /* Add queue specific sysctl */
2715 mana_sysctl_add_queues(apc);
2716
2717 apc->port_is_up = true;
2718
2719 /* Ensure port state updated before txq state */
2720 wmb();
2721
2722 if_link_state_change(apc->ndev, LINK_STATE_UP);
2723 if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2724
2725 return 0;
2726 }
2727
2728
2729 static void
mana_init(void * arg)2730 mana_init(void *arg)
2731 {
2732 struct mana_port_context *apc = (struct mana_port_context *)arg;
2733
2734 MANA_APC_LOCK_LOCK(apc);
2735 if (!apc->port_is_up) {
2736 mana_up(apc);
2737 }
2738 MANA_APC_LOCK_UNLOCK(apc);
2739 }
2740
2741 static int
mana_dealloc_queues(if_t ndev)2742 mana_dealloc_queues(if_t ndev)
2743 {
2744 struct mana_port_context *apc = if_getsoftc(ndev);
2745 struct mana_txq *txq;
2746 int i, err;
2747
2748 if (apc->port_is_up)
2749 return EINVAL;
2750
2751 /* No packet can be transmitted now since apc->port_is_up is false.
2752 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
2753 * a txq because it may not timely see apc->port_is_up being cleared
2754 * to false, but it doesn't matter since mana_start_xmit() drops any
2755 * new packets due to apc->port_is_up being false.
2756 *
2757 * Drain all the in-flight TX packets
2758 */
2759 for (i = 0; i < apc->num_queues; i++) {
2760 txq = &apc->tx_qp[i].txq;
2761
2762 struct mana_cq *tx_cq = &apc->tx_qp[i].tx_cq;
2763 struct mana_cq *rx_cq = &(apc->rxqs[i]->rx_cq);
2764
2765 tx_cq->do_not_ring_db = true;
2766 rx_cq->do_not_ring_db = true;
2767
2768 /* Schedule a cleanup task */
2769 taskqueue_enqueue(tx_cq->cleanup_tq, &tx_cq->cleanup_task);
2770
2771 while (atomic_read(&txq->pending_sends) > 0)
2772 usleep_range(1000, 2000);
2773 }
2774
2775 /* We're 100% sure the queues can no longer be woken up, because
2776 * we're sure now mana_poll_tx_cq() can't be running.
2777 */
2778
2779 apc->rss_state = TRI_STATE_FALSE;
2780 err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2781 if (err) {
2782 if_printf(ndev, "Failed to disable vPort: %d\n", err);
2783 return err;
2784 }
2785
2786 mana_destroy_vport(apc);
2787
2788 return 0;
2789 }
2790
2791 static int
mana_down(struct mana_port_context * apc)2792 mana_down(struct mana_port_context *apc)
2793 {
2794 int err = 0;
2795
2796 apc->port_st_save = apc->port_is_up;
2797 apc->port_is_up = false;
2798
2799 /* Ensure port state updated before txq state */
2800 wmb();
2801
2802 if (apc->port_st_save) {
2803 if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE,
2804 IFF_DRV_RUNNING);
2805 if_link_state_change(apc->ndev, LINK_STATE_DOWN);
2806
2807 mana_sysctl_free_queues(apc);
2808
2809 err = mana_dealloc_queues(apc->ndev);
2810 if (err) {
2811 if_printf(apc->ndev,
2812 "Failed to bring down mana interface: %d\n", err);
2813 }
2814 }
2815
2816 return err;
2817 }
2818
2819 int
mana_detach(if_t ndev)2820 mana_detach(if_t ndev)
2821 {
2822 struct mana_port_context *apc = if_getsoftc(ndev);
2823 int err;
2824
2825 ether_ifdetach(ndev);
2826
2827 if (!apc)
2828 return 0;
2829
2830 MANA_APC_LOCK_LOCK(apc);
2831 err = mana_down(apc);
2832 MANA_APC_LOCK_UNLOCK(apc);
2833
2834 mana_cleanup_port_context(apc);
2835
2836 MANA_APC_LOCK_DESTROY(apc);
2837
2838 free(apc, M_DEVBUF);
2839
2840 return err;
2841 }
2842
2843 static unsigned int
mana_get_tx_queue_size(int port_idx,unsigned int request_size)2844 mana_get_tx_queue_size(int port_idx, unsigned int request_size)
2845 {
2846 unsigned int new_size;
2847
2848 if (request_size == 0)
2849 /* Uninitialized */
2850 new_size = DEF_SEND_BUFFERS_PER_QUEUE;
2851 else
2852 new_size = roundup_pow_of_two(request_size);
2853
2854 if (new_size < MIN_SEND_BUFFERS_PER_QUEUE ||
2855 new_size > MAX_SEND_BUFFERS_PER_QUEUE) {
2856 mana_info(NULL, "mana port %d: requested tx buffer "
2857 "size %u out of allowable range (%u - %u), "
2858 "setting to default\n",
2859 port_idx, request_size,
2860 MIN_SEND_BUFFERS_PER_QUEUE,
2861 MAX_SEND_BUFFERS_PER_QUEUE);
2862 new_size = DEF_SEND_BUFFERS_PER_QUEUE;
2863 }
2864 mana_info(NULL, "mana port %d: tx buffer size %u "
2865 "(%u requested)\n",
2866 port_idx, new_size, request_size);
2867
2868 return (new_size);
2869 }
2870
2871 static unsigned int
mana_get_rx_queue_size(int port_idx,unsigned int request_size)2872 mana_get_rx_queue_size(int port_idx, unsigned int request_size)
2873 {
2874 unsigned int new_size;
2875
2876 if (request_size == 0)
2877 /* Uninitialized */
2878 new_size = DEF_RX_BUFFERS_PER_QUEUE;
2879 else
2880 new_size = roundup_pow_of_two(request_size);
2881
2882 if (new_size < MIN_RX_BUFFERS_PER_QUEUE ||
2883 new_size > MAX_RX_BUFFERS_PER_QUEUE) {
2884 mana_info(NULL, "mana port %d: requested rx buffer "
2885 "size %u out of allowable range (%u - %u), "
2886 "setting to default\n",
2887 port_idx, request_size,
2888 MIN_RX_BUFFERS_PER_QUEUE,
2889 MAX_RX_BUFFERS_PER_QUEUE);
2890 new_size = DEF_RX_BUFFERS_PER_QUEUE;
2891 }
2892 mana_info(NULL, "mana port %d: rx buffer size %u "
2893 "(%u requested)\n",
2894 port_idx, new_size, request_size);
2895
2896 return (new_size);
2897 }
2898
2899 static int
mana_probe_port(struct mana_context * ac,int port_idx,if_t * ndev_storage)2900 mana_probe_port(struct mana_context *ac, int port_idx,
2901 if_t *ndev_storage)
2902 {
2903 struct gdma_context *gc = ac->gdma_dev->gdma_context;
2904 struct mana_port_context *apc;
2905 uint32_t hwassist;
2906 if_t ndev;
2907 int err;
2908
2909 ndev = if_alloc_dev(IFT_ETHER, gc->dev);
2910 *ndev_storage = ndev;
2911
2912 apc = malloc(sizeof(*apc), M_DEVBUF, M_WAITOK | M_ZERO);
2913 apc->ac = ac;
2914 apc->ndev = ndev;
2915 apc->max_queues = gc->max_num_queues;
2916 apc->num_queues = min_t(unsigned int,
2917 gc->max_num_queues, MANA_MAX_NUM_QUEUES);
2918 apc->tx_queue_size = mana_get_tx_queue_size(port_idx,
2919 mana_tx_req_size);
2920 apc->rx_queue_size = mana_get_rx_queue_size(port_idx,
2921 mana_rx_req_size);
2922 apc->port_handle = INVALID_MANA_HANDLE;
2923 apc->port_idx = port_idx;
2924 apc->frame_size = DEFAULT_FRAME_SIZE;
2925 apc->last_tx_cq_bind_cpu = -1;
2926 apc->last_rx_cq_bind_cpu = -1;
2927 apc->vport_use_count = 0;
2928
2929 MANA_APC_LOCK_INIT(apc);
2930
2931 if_initname(ndev, device_get_name(gc->dev), port_idx);
2932 if_setdev(ndev,gc->dev);
2933 if_setsoftc(ndev, apc);
2934
2935 if_setflags(ndev, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2936 if_setinitfn(ndev, mana_init);
2937 if_settransmitfn(ndev, mana_start_xmit);
2938 if_setqflushfn(ndev, mana_qflush);
2939 if_setioctlfn(ndev, mana_ioctl);
2940 if_setgetcounterfn(ndev, mana_get_counter);
2941
2942 if_setmtu(ndev, ETHERMTU);
2943 if_setbaudrate(ndev, IF_Gbps(100));
2944
2945 mana_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2946
2947 err = mana_init_port(ndev);
2948 if (err)
2949 goto reset_apc;
2950
2951 if_setcapabilitiesbit(ndev,
2952 IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 |
2953 IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 |
2954 IFCAP_TSO4 | IFCAP_TSO6 |
2955 IFCAP_LRO | IFCAP_LINKSTATE, 0);
2956
2957 /* Enable all available capabilities by default. */
2958 if_setcapenable(ndev, if_getcapabilities(ndev));
2959
2960 /* TSO parameters */
2961 if_sethwtsomax(ndev, MANA_TSO_MAX_SZ -
2962 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2963 if_sethwtsomaxsegcount(ndev, MAX_MBUF_FRAGS);
2964 if_sethwtsomaxsegsize(ndev, PAGE_SIZE);
2965
2966 hwassist = 0;
2967 if (if_getcapenable(ndev) & (IFCAP_TSO4 | IFCAP_TSO6))
2968 hwassist |= CSUM_TSO;
2969 if (if_getcapenable(ndev) & IFCAP_TXCSUM)
2970 hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2971 if (if_getcapenable(ndev) & IFCAP_TXCSUM_IPV6)
2972 hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2973 mana_dbg(NULL, "set hwassist 0x%x\n", hwassist);
2974 if_sethwassist(ndev, hwassist);
2975
2976 ifmedia_init(&apc->media, IFM_IMASK,
2977 mana_ifmedia_change, mana_ifmedia_status);
2978 ifmedia_add(&apc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2979 ifmedia_set(&apc->media, IFM_ETHER | IFM_AUTO);
2980
2981 ether_ifattach(ndev, apc->mac_addr);
2982
2983 /* Initialize statistics */
2984 mana_alloc_counters((counter_u64_t *)&apc->port_stats,
2985 sizeof(struct mana_port_stats));
2986 mana_sysctl_add_port(apc);
2987
2988 /* Tell the stack that the interface is not active */
2989 if_setdrvflagbits(ndev, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2990
2991 return 0;
2992
2993 reset_apc:
2994 free(apc, M_DEVBUF);
2995 *ndev_storage = NULL;
2996 if_printf(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2997 if_free(ndev);
2998 return err;
2999 }
3000
mana_probe(struct gdma_dev * gd)3001 int mana_probe(struct gdma_dev *gd)
3002 {
3003 struct gdma_context *gc = gd->gdma_context;
3004 device_t dev = gc->dev;
3005 struct mana_context *ac;
3006 int err;
3007 int i;
3008
3009 device_printf(dev, "%s protocol version: %d.%d.%d\n", DEVICE_NAME,
3010 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
3011
3012 err = mana_gd_register_device(gd);
3013 if (err)
3014 return err;
3015
3016 ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO);
3017 ac->gdma_dev = gd;
3018 ac->num_ports = 1;
3019 gd->driver_data = ac;
3020
3021 err = mana_create_eq(ac);
3022 if (err)
3023 goto out;
3024
3025 err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
3026 MANA_MICRO_VERSION, &ac->num_ports);
3027 if (err)
3028 goto out;
3029
3030 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
3031 ac->num_ports = MAX_PORTS_IN_MANA_DEV;
3032
3033 for (i = 0; i < ac->num_ports; i++) {
3034 err = mana_probe_port(ac, i, &ac->ports[i]);
3035 if (err) {
3036 device_printf(dev,
3037 "Failed to probe mana port %d\n", i);
3038 break;
3039 }
3040 }
3041
3042 out:
3043 if (err)
3044 mana_remove(gd);
3045
3046 return err;
3047 }
3048
3049 void
mana_remove(struct gdma_dev * gd)3050 mana_remove(struct gdma_dev *gd)
3051 {
3052 struct gdma_context *gc = gd->gdma_context;
3053 struct mana_context *ac = gd->driver_data;
3054 device_t dev = gc->dev;
3055 if_t ndev;
3056 int i;
3057
3058 for (i = 0; i < ac->num_ports; i++) {
3059 ndev = ac->ports[i];
3060 if (!ndev) {
3061 if (i == 0)
3062 device_printf(dev, "No net device to remove\n");
3063 goto out;
3064 }
3065
3066 mana_detach(ndev);
3067
3068 if_free(ndev);
3069 }
3070
3071 mana_destroy_eq(ac);
3072
3073 out:
3074 mana_gd_deregister_device(gd);
3075 gd->driver_data = NULL;
3076 gd->gdma_context = NULL;
3077 free(ac, M_DEVBUF);
3078 }
3079