1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2021 Microsoft Corp.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/smp.h>
39 #include <sys/socket.h>
40 #include <sys/sockio.h>
41 #include <sys/time.h>
42 #include <sys/eventhandler.h>
43
44 #include <machine/bus.h>
45 #include <machine/resource.h>
46 #include <machine/in_cksum.h>
47
48 #include <net/if.h>
49 #include <net/if_var.h>
50 #include <net/if_types.h>
51 #include <net/if_vlan_var.h>
52 #ifdef RSS
53 #include <net/rss_config.h>
54 #endif
55
56 #include <netinet/in_systm.h>
57 #include <netinet/in.h>
58 #include <netinet/if_ether.h>
59 #include <netinet/ip.h>
60 #include <netinet/ip6.h>
61 #include <netinet/tcp.h>
62 #include <netinet/udp.h>
63
64 #include "mana.h"
65 #include "mana_sysctl.h"
66
67 static int mana_up(struct mana_port_context *apc);
68 static int mana_down(struct mana_port_context *apc);
69
70 static void
mana_rss_key_fill(void * k,size_t size)71 mana_rss_key_fill(void *k, size_t size)
72 {
73 static bool rss_key_generated = false;
74 static uint8_t rss_key[MANA_HASH_KEY_SIZE];
75
76 KASSERT(size <= MANA_HASH_KEY_SIZE,
77 ("Request more buytes than MANA RSS key can hold"));
78
79 if (!rss_key_generated) {
80 arc4random_buf(rss_key, MANA_HASH_KEY_SIZE);
81 rss_key_generated = true;
82 }
83 memcpy(k, rss_key, size);
84 }
85
86 static int
mana_ifmedia_change(if_t ifp __unused)87 mana_ifmedia_change(if_t ifp __unused)
88 {
89 return EOPNOTSUPP;
90 }
91
92 static void
mana_ifmedia_status(if_t ifp,struct ifmediareq * ifmr)93 mana_ifmedia_status(if_t ifp, struct ifmediareq *ifmr)
94 {
95 struct mana_port_context *apc = if_getsoftc(ifp);
96
97 if (!apc) {
98 if_printf(ifp, "Port not available\n");
99 return;
100 }
101
102 MANA_APC_LOCK_LOCK(apc);
103
104 ifmr->ifm_status = IFM_AVALID;
105 ifmr->ifm_active = IFM_ETHER;
106
107 if (!apc->port_is_up) {
108 MANA_APC_LOCK_UNLOCK(apc);
109 mana_dbg(NULL, "Port %u link is down\n", apc->port_idx);
110 return;
111 }
112
113 ifmr->ifm_status |= IFM_ACTIVE;
114 ifmr->ifm_active |= IFM_100G_DR | IFM_FDX;
115
116 MANA_APC_LOCK_UNLOCK(apc);
117 }
118
119 static uint64_t
mana_get_counter(if_t ifp,ift_counter cnt)120 mana_get_counter(if_t ifp, ift_counter cnt)
121 {
122 struct mana_port_context *apc = if_getsoftc(ifp);
123 struct mana_port_stats *stats = &apc->port_stats;
124
125 switch (cnt) {
126 case IFCOUNTER_IPACKETS:
127 return (counter_u64_fetch(stats->rx_packets));
128 case IFCOUNTER_OPACKETS:
129 return (counter_u64_fetch(stats->tx_packets));
130 case IFCOUNTER_IBYTES:
131 return (counter_u64_fetch(stats->rx_bytes));
132 case IFCOUNTER_OBYTES:
133 return (counter_u64_fetch(stats->tx_bytes));
134 case IFCOUNTER_IQDROPS:
135 return (counter_u64_fetch(stats->rx_drops));
136 case IFCOUNTER_OQDROPS:
137 return (counter_u64_fetch(stats->tx_drops));
138 default:
139 return (if_get_counter_default(ifp, cnt));
140 }
141 }
142
143 static void
mana_qflush(if_t ifp)144 mana_qflush(if_t ifp)
145 {
146 if_qflush(ifp);
147 }
148
149 int
mana_restart(struct mana_port_context * apc)150 mana_restart(struct mana_port_context *apc)
151 {
152 int rc = 0;
153
154 MANA_APC_LOCK_LOCK(apc);
155 if (apc->port_is_up)
156 mana_down(apc);
157
158 rc = mana_up(apc);
159 MANA_APC_LOCK_UNLOCK(apc);
160
161 return (rc);
162 }
163
164 static int
mana_ioctl(if_t ifp,u_long command,caddr_t data)165 mana_ioctl(if_t ifp, u_long command, caddr_t data)
166 {
167 struct mana_port_context *apc = if_getsoftc(ifp);
168 struct ifrsskey *ifrk;
169 struct ifrsshash *ifrh;
170 struct ifreq *ifr;
171 uint16_t new_mtu;
172 int rc = 0, mask;
173
174 switch (command) {
175 case SIOCSIFMTU:
176 ifr = (struct ifreq *)data;
177 new_mtu = ifr->ifr_mtu;
178 if (if_getmtu(ifp) == new_mtu)
179 break;
180 if ((new_mtu + 18 > MAX_FRAME_SIZE) ||
181 (new_mtu + 18 < MIN_FRAME_SIZE)) {
182 if_printf(ifp, "Invalid MTU. new_mtu: %d, "
183 "max allowed: %d, min allowed: %d\n",
184 new_mtu, MAX_FRAME_SIZE - 18, MIN_FRAME_SIZE - 18);
185 return EINVAL;
186 }
187 MANA_APC_LOCK_LOCK(apc);
188 if (apc->port_is_up)
189 mana_down(apc);
190
191 apc->frame_size = new_mtu + 18;
192 if_setmtu(ifp, new_mtu);
193 mana_dbg(NULL, "Set MTU to %d\n", new_mtu);
194
195 rc = mana_up(apc);
196 MANA_APC_LOCK_UNLOCK(apc);
197 break;
198
199 case SIOCSIFFLAGS:
200 if (if_getflags(ifp) & IFF_UP) {
201 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
202 MANA_APC_LOCK_LOCK(apc);
203 if (!apc->port_is_up)
204 rc = mana_up(apc);
205 MANA_APC_LOCK_UNLOCK(apc);
206 }
207 } else {
208 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
209 MANA_APC_LOCK_LOCK(apc);
210 if (apc->port_is_up)
211 mana_down(apc);
212 MANA_APC_LOCK_UNLOCK(apc);
213 }
214 }
215 break;
216
217 case SIOCSIFCAP:
218 MANA_APC_LOCK_LOCK(apc);
219 ifr = (struct ifreq *)data;
220 /*
221 * Fix up requested capabilities w/ supported capabilities,
222 * since the supported capabilities could have been changed.
223 */
224 mask = (ifr->ifr_reqcap & if_getcapabilities(ifp)) ^
225 if_getcapenable(ifp);
226
227 if (mask & IFCAP_TXCSUM) {
228 if_togglecapenable(ifp, IFCAP_TXCSUM);
229 if_togglehwassist(ifp, (CSUM_TCP | CSUM_UDP | CSUM_IP));
230
231 if ((IFCAP_TSO4 & if_getcapenable(ifp)) &&
232 !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
233 mask &= ~IFCAP_TSO4;
234 if_setcapenablebit(ifp, 0, IFCAP_TSO4);
235 if_sethwassistbits(ifp, 0, CSUM_IP_TSO);
236 mana_warn(NULL,
237 "Also disabled tso4 due to -txcsum.\n");
238 }
239 }
240
241 if (mask & IFCAP_TXCSUM_IPV6) {
242 if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
243 if_togglehwassist(ifp, (CSUM_UDP_IPV6 | CSUM_TCP_IPV6));
244
245 if ((IFCAP_TSO6 & if_getcapenable(ifp)) &&
246 !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
247 mask &= ~IFCAP_TSO6;
248 if_setcapenablebit(ifp, 0, IFCAP_TSO6);
249 if_sethwassistbits(ifp, 0, CSUM_IP6_TSO);
250 mana_warn(ifp,
251 "Also disabled tso6 due to -txcsum6.\n");
252 }
253 }
254
255 if (mask & IFCAP_RXCSUM)
256 if_togglecapenable(ifp, IFCAP_RXCSUM);
257 /* We can't diff IPv6 packets from IPv4 packets on RX path. */
258 if (mask & IFCAP_RXCSUM_IPV6)
259 if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6);
260
261 if (mask & IFCAP_LRO)
262 if_togglecapenable(ifp, IFCAP_LRO);
263
264 if (mask & IFCAP_TSO4) {
265 if (!(IFCAP_TSO4 & if_getcapenable(ifp)) &&
266 !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
267 MANA_APC_LOCK_UNLOCK(apc);
268 if_printf(ifp, "Enable txcsum first.\n");
269 rc = EAGAIN;
270 goto out;
271 }
272 if_togglecapenable(ifp, IFCAP_TSO4);
273 if_togglehwassist(ifp, CSUM_IP_TSO);
274 }
275
276 if (mask & IFCAP_TSO6) {
277 if (!(IFCAP_TSO6 & if_getcapenable(ifp)) &&
278 !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
279 MANA_APC_LOCK_UNLOCK(apc);
280 if_printf(ifp, "Enable txcsum6 first.\n");
281 rc = EAGAIN;
282 goto out;
283 }
284 if_togglecapenable(ifp, IFCAP_TSO6);
285 if_togglehwassist(ifp, CSUM_IP6_TSO);
286 }
287
288 MANA_APC_LOCK_UNLOCK(apc);
289 out:
290 break;
291
292 case SIOCSIFMEDIA:
293 case SIOCGIFMEDIA:
294 case SIOCGIFXMEDIA:
295 ifr = (struct ifreq *)data;
296 rc = ifmedia_ioctl(ifp, ifr, &apc->media, command);
297 break;
298
299 case SIOCGIFRSSKEY:
300 ifrk = (struct ifrsskey *)data;
301 ifrk->ifrk_func = RSS_FUNC_TOEPLITZ;
302 ifrk->ifrk_keylen = MANA_HASH_KEY_SIZE;
303 memcpy(ifrk->ifrk_key, apc->hashkey, MANA_HASH_KEY_SIZE);
304 break;
305
306 case SIOCGIFRSSHASH:
307 ifrh = (struct ifrsshash *)data;
308 ifrh->ifrh_func = RSS_FUNC_TOEPLITZ;
309 ifrh->ifrh_types =
310 RSS_TYPE_TCP_IPV4 |
311 RSS_TYPE_UDP_IPV4 |
312 RSS_TYPE_TCP_IPV6 |
313 RSS_TYPE_UDP_IPV6;
314 break;
315
316 default:
317 rc = ether_ioctl(ifp, command, data);
318 break;
319 }
320
321 return (rc);
322 }
323
324 static inline void
mana_alloc_counters(counter_u64_t * begin,int size)325 mana_alloc_counters(counter_u64_t *begin, int size)
326 {
327 counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
328
329 for (; begin < end; ++begin)
330 *begin = counter_u64_alloc(M_WAITOK);
331 }
332
333 static inline void
mana_free_counters(counter_u64_t * begin,int size)334 mana_free_counters(counter_u64_t *begin, int size)
335 {
336 counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
337
338 for (; begin < end; ++begin)
339 counter_u64_free(*begin);
340 }
341
342 static bool
mana_can_tx(struct gdma_queue * wq)343 mana_can_tx(struct gdma_queue *wq)
344 {
345 return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
346 }
347
348 static inline int
mana_tx_map_mbuf(struct mana_port_context * apc,struct mana_send_buf_info * tx_info,struct mbuf ** m_head,struct mana_tx_package * tp,struct mana_stats * tx_stats)349 mana_tx_map_mbuf(struct mana_port_context *apc,
350 struct mana_send_buf_info *tx_info,
351 struct mbuf **m_head, struct mana_tx_package *tp,
352 struct mana_stats *tx_stats)
353 {
354 struct gdma_dev *gd = apc->ac->gdma_dev;
355 bus_dma_segment_t segs[MAX_MBUF_FRAGS];
356 struct mbuf *m = *m_head;
357 int err, nsegs, i;
358
359 err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag, tx_info->dma_map,
360 m, segs, &nsegs, BUS_DMA_NOWAIT);
361 if (err == EFBIG) {
362 struct mbuf *m_new;
363
364 counter_u64_add(tx_stats->collapse, 1);
365 m_new = m_collapse(m, M_NOWAIT, MAX_MBUF_FRAGS);
366 if (unlikely(m_new == NULL)) {
367 counter_u64_add(tx_stats->collapse_err, 1);
368 return ENOBUFS;
369 } else {
370 *m_head = m = m_new;
371 }
372
373 mana_warn(NULL,
374 "Too many segs in orig mbuf, m_collapse called\n");
375
376 err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag,
377 tx_info->dma_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
378 }
379 if (!err) {
380 for (i = 0; i < nsegs; i++) {
381 tp->wqe_req.sgl[i].address = segs[i].ds_addr;
382 tp->wqe_req.sgl[i].mem_key = gd->gpa_mkey;
383 tp->wqe_req.sgl[i].size = segs[i].ds_len;
384 }
385 tp->wqe_req.num_sge = nsegs;
386
387 tx_info->mbuf = *m_head;
388
389 bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
390 BUS_DMASYNC_PREWRITE);
391 }
392
393 return err;
394 }
395
396 static inline void
mana_tx_unmap_mbuf(struct mana_port_context * apc,struct mana_send_buf_info * tx_info)397 mana_tx_unmap_mbuf(struct mana_port_context *apc,
398 struct mana_send_buf_info *tx_info)
399 {
400 bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
401 BUS_DMASYNC_POSTWRITE);
402 bus_dmamap_unload(apc->tx_buf_tag, tx_info->dma_map);
403 if (tx_info->mbuf) {
404 m_freem(tx_info->mbuf);
405 tx_info->mbuf = NULL;
406 }
407 }
408
409 static inline int
mana_load_rx_mbuf(struct mana_port_context * apc,struct mana_rxq * rxq,struct mana_recv_buf_oob * rx_oob,bool alloc_mbuf)410 mana_load_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
411 struct mana_recv_buf_oob *rx_oob, bool alloc_mbuf)
412 {
413 bus_dma_segment_t segs[1];
414 struct mbuf *mbuf;
415 int nsegs, err;
416 uint32_t mlen;
417
418 if (alloc_mbuf) {
419 mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxq->datasize);
420 if (unlikely(mbuf == NULL)) {
421 mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
422 if (unlikely(mbuf == NULL)) {
423 return ENOMEM;
424 }
425 mlen = MCLBYTES;
426 } else {
427 mlen = rxq->datasize;
428 }
429
430 mbuf->m_pkthdr.len = mbuf->m_len = mlen;
431 } else {
432 if (rx_oob->mbuf) {
433 mbuf = rx_oob->mbuf;
434 mlen = rx_oob->mbuf->m_pkthdr.len;
435 } else {
436 return ENOMEM;
437 }
438 }
439
440 err = bus_dmamap_load_mbuf_sg(apc->rx_buf_tag, rx_oob->dma_map,
441 mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
442
443 if (unlikely((err != 0) || (nsegs != 1))) {
444 mana_warn(NULL, "Failed to map mbuf, error: %d, "
445 "nsegs: %d\n", err, nsegs);
446 counter_u64_add(rxq->stats.dma_mapping_err, 1);
447 goto error;
448 }
449
450 bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
451 BUS_DMASYNC_PREREAD);
452
453 rx_oob->mbuf = mbuf;
454 rx_oob->num_sge = 1;
455 rx_oob->sgl[0].address = segs[0].ds_addr;
456 rx_oob->sgl[0].size = mlen;
457 rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
458
459 return 0;
460
461 error:
462 m_freem(mbuf);
463 return EFAULT;
464 }
465
466 static inline void
mana_unload_rx_mbuf(struct mana_port_context * apc,struct mana_rxq * rxq,struct mana_recv_buf_oob * rx_oob,bool free_mbuf)467 mana_unload_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
468 struct mana_recv_buf_oob *rx_oob, bool free_mbuf)
469 {
470 bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
471 BUS_DMASYNC_POSTREAD);
472 bus_dmamap_unload(apc->rx_buf_tag, rx_oob->dma_map);
473
474 if (free_mbuf && rx_oob->mbuf) {
475 m_freem(rx_oob->mbuf);
476 rx_oob->mbuf = NULL;
477 }
478 }
479
480
481 /* Use couple mbuf PH_loc spaces for l3 and l4 protocal type */
482 #define MANA_L3_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[0])
483 #define MANA_L4_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[1])
484
485 #define MANA_TXQ_FULL (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)
486
487 static void
mana_xmit(struct mana_txq * txq)488 mana_xmit(struct mana_txq *txq)
489 {
490 enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
491 struct mana_send_buf_info *tx_info;
492 if_t ndev = txq->ndev;
493 struct mbuf *mbuf;
494 struct mana_port_context *apc = if_getsoftc(ndev);
495 struct mana_port_stats *port_stats = &apc->port_stats;
496 struct gdma_dev *gd = apc->ac->gdma_dev;
497 uint64_t packets, bytes;
498 uint16_t next_to_use;
499 struct mana_tx_package pkg = {};
500 struct mana_stats *tx_stats;
501 struct gdma_queue *gdma_sq;
502 struct mana_cq *cq;
503 int err, len;
504 bool is_tso;
505
506 gdma_sq = txq->gdma_sq;
507 cq = &apc->tx_qp[txq->idx].tx_cq;
508 tx_stats = &txq->stats;
509
510 packets = 0;
511 bytes = 0;
512 next_to_use = txq->next_to_use;
513
514 while ((mbuf = drbr_peek(ndev, txq->txq_br)) != NULL) {
515 if (!apc->port_is_up ||
516 (if_getdrvflags(ndev) & MANA_TXQ_FULL) != IFF_DRV_RUNNING) {
517 drbr_putback(ndev, txq->txq_br, mbuf);
518 break;
519 }
520
521 if (!mana_can_tx(gdma_sq)) {
522 /* SQ is full. Set the IFF_DRV_OACTIVE flag */
523 if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE, 0);
524 counter_u64_add(tx_stats->stop, 1);
525 uint64_t stops = counter_u64_fetch(tx_stats->stop);
526 uint64_t wakeups = counter_u64_fetch(tx_stats->wakeup);
527 #define MANA_TXQ_STOP_THRESHOLD 50
528 if (stops > MANA_TXQ_STOP_THRESHOLD && wakeups > 0 &&
529 stops > wakeups && txq->alt_txq_idx == txq->idx) {
530 txq->alt_txq_idx =
531 (txq->idx + (stops / wakeups))
532 % apc->num_queues;
533 counter_u64_add(tx_stats->alt_chg, 1);
534 }
535
536 drbr_putback(ndev, txq->txq_br, mbuf);
537
538 taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task);
539 break;
540 }
541
542 tx_info = &txq->tx_buf_info[next_to_use];
543
544 memset(&pkg, 0, sizeof(struct mana_tx_package));
545 pkg.wqe_req.sgl = pkg.sgl_array;
546
547 err = mana_tx_map_mbuf(apc, tx_info, &mbuf, &pkg, tx_stats);
548 if (unlikely(err)) {
549 mana_dbg(NULL,
550 "Failed to map tx mbuf, err %d\n", err);
551
552 counter_u64_add(tx_stats->dma_mapping_err, 1);
553
554 /* The mbuf is still there. Free it */
555 m_freem(mbuf);
556 /* Advance the drbr queue */
557 drbr_advance(ndev, txq->txq_br);
558 continue;
559 }
560
561 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
562 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
563
564 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
565 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
566 pkt_fmt = MANA_LONG_PKT_FMT;
567 } else {
568 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
569 }
570
571 pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
572
573 if (pkt_fmt == MANA_SHORT_PKT_FMT)
574 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
575 else
576 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
577
578 pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
579 pkg.wqe_req.flags = 0;
580 pkg.wqe_req.client_data_unit = 0;
581
582 is_tso = false;
583 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
584 is_tso = true;
585
586 if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
587 pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
588 else
589 pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
590
591 pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
592 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
593 pkg.tx_oob.s_oob.trans_off = mbuf->m_pkthdr.l3hlen;
594
595 pkg.wqe_req.client_data_unit = mbuf->m_pkthdr.tso_segsz;
596 pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
597 } else if (mbuf->m_pkthdr.csum_flags &
598 (CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP6_UDP | CSUM_IP6_TCP)) {
599 if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP) {
600 pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
601 pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
602 } else {
603 pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
604 }
605
606 if (MANA_L4_PROTO(mbuf) == IPPROTO_TCP) {
607 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
608 pkg.tx_oob.s_oob.trans_off =
609 mbuf->m_pkthdr.l3hlen;
610 } else {
611 pkg.tx_oob.s_oob.comp_udp_csum = 1;
612 }
613 } else if (mbuf->m_pkthdr.csum_flags & CSUM_IP) {
614 pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
615 pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
616 } else {
617 if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
618 pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
619 else if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IPV6)
620 pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
621 }
622
623 len = mbuf->m_pkthdr.len;
624
625 err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
626 (struct gdma_posted_wqe_info *)&tx_info->wqe_inf);
627 if (unlikely(err)) {
628 /* Should not happen */
629 if_printf(ndev, "Failed to post TX OOB: %d\n", err);
630
631 mana_tx_unmap_mbuf(apc, tx_info);
632
633 drbr_advance(ndev, txq->txq_br);
634 continue;
635 }
636
637 next_to_use =
638 (next_to_use + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
639
640 (void)atomic_inc_return(&txq->pending_sends);
641
642 drbr_advance(ndev, txq->txq_br);
643
644 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
645
646 packets++;
647 bytes += len;
648
649 if (is_tso) {
650 txq->tso_pkts++;
651 txq->tso_bytes += len;
652 }
653 }
654
655 counter_enter();
656 counter_u64_add_protected(tx_stats->packets, packets);
657 counter_u64_add_protected(port_stats->tx_packets, packets);
658 counter_u64_add_protected(tx_stats->bytes, bytes);
659 counter_u64_add_protected(port_stats->tx_bytes, bytes);
660 counter_exit();
661
662 txq->next_to_use = next_to_use;
663 }
664
665 static void
mana_xmit_taskfunc(void * arg,int pending)666 mana_xmit_taskfunc(void *arg, int pending)
667 {
668 struct mana_txq *txq = (struct mana_txq *)arg;
669 if_t ndev = txq->ndev;
670 struct mana_port_context *apc = if_getsoftc(ndev);
671
672 while (!drbr_empty(ndev, txq->txq_br) && apc->port_is_up &&
673 (if_getdrvflags(ndev) & MANA_TXQ_FULL) == IFF_DRV_RUNNING) {
674 mtx_lock(&txq->txq_mtx);
675 mana_xmit(txq);
676 mtx_unlock(&txq->txq_mtx);
677 }
678 }
679
680 #define PULLUP_HDR(m, len) \
681 do { \
682 if (unlikely((m)->m_len < (len))) { \
683 (m) = m_pullup((m), (len)); \
684 if ((m) == NULL) \
685 return (NULL); \
686 } \
687 } while (0)
688
689 /*
690 * If this function failed, the mbuf would be freed.
691 */
692 static inline struct mbuf *
mana_tso_fixup(struct mbuf * mbuf)693 mana_tso_fixup(struct mbuf *mbuf)
694 {
695 struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
696 struct tcphdr *th;
697 uint16_t etype;
698 int ehlen;
699
700 if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
701 etype = ntohs(eh->evl_proto);
702 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
703 } else {
704 etype = ntohs(eh->evl_encap_proto);
705 ehlen = ETHER_HDR_LEN;
706 }
707
708 if (etype == ETHERTYPE_IP) {
709 struct ip *ip;
710 int iphlen;
711
712 PULLUP_HDR(mbuf, ehlen + sizeof(*ip));
713 ip = mtodo(mbuf, ehlen);
714 iphlen = ip->ip_hl << 2;
715 mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
716
717 PULLUP_HDR(mbuf, ehlen + iphlen + sizeof(*th));
718 th = mtodo(mbuf, ehlen + iphlen);
719
720 ip->ip_len = 0;
721 ip->ip_sum = 0;
722 th->th_sum = in_pseudo(ip->ip_src.s_addr,
723 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
724 } else if (etype == ETHERTYPE_IPV6) {
725 struct ip6_hdr *ip6;
726
727 PULLUP_HDR(mbuf, ehlen + sizeof(*ip6) + sizeof(*th));
728 ip6 = mtodo(mbuf, ehlen);
729 if (ip6->ip6_nxt != IPPROTO_TCP) {
730 /* Realy something wrong, just return */
731 mana_dbg(NULL, "TSO mbuf not TCP, freed.\n");
732 m_freem(mbuf);
733 return NULL;
734 }
735 mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
736
737 th = mtodo(mbuf, ehlen + sizeof(*ip6));
738
739 ip6->ip6_plen = 0;
740 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
741 } else {
742 /* CSUM_TSO is set but not IP protocol. */
743 mana_warn(NULL, "TSO mbuf not right, freed.\n");
744 m_freem(mbuf);
745 return NULL;
746 }
747
748 MANA_L3_PROTO(mbuf) = etype;
749
750 return (mbuf);
751 }
752
753 /*
754 * If this function failed, the mbuf would be freed.
755 */
756 static inline struct mbuf *
mana_mbuf_csum_check(struct mbuf * mbuf)757 mana_mbuf_csum_check(struct mbuf *mbuf)
758 {
759 struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
760 struct mbuf *mbuf_next;
761 uint16_t etype;
762 int offset;
763 int ehlen;
764
765 if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
766 etype = ntohs(eh->evl_proto);
767 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
768 } else {
769 etype = ntohs(eh->evl_encap_proto);
770 ehlen = ETHER_HDR_LEN;
771 }
772
773 mbuf_next = m_getptr(mbuf, ehlen, &offset);
774
775 MANA_L4_PROTO(mbuf) = 0;
776 if (etype == ETHERTYPE_IP) {
777 const struct ip *ip;
778 int iphlen;
779
780 ip = (struct ip *)(mtodo(mbuf_next, offset));
781 iphlen = ip->ip_hl << 2;
782 mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
783
784 MANA_L4_PROTO(mbuf) = ip->ip_p;
785 } else if (etype == ETHERTYPE_IPV6) {
786 const struct ip6_hdr *ip6;
787
788 ip6 = (struct ip6_hdr *)(mtodo(mbuf_next, offset));
789 mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
790
791 MANA_L4_PROTO(mbuf) = ip6->ip6_nxt;
792 } else {
793 MANA_L4_PROTO(mbuf) = 0;
794 }
795
796 MANA_L3_PROTO(mbuf) = etype;
797
798 return (mbuf);
799 }
800
801 static int
mana_start_xmit(if_t ifp,struct mbuf * m)802 mana_start_xmit(if_t ifp, struct mbuf *m)
803 {
804 struct mana_port_context *apc = if_getsoftc(ifp);
805 struct mana_txq *txq;
806 int is_drbr_empty;
807 uint16_t txq_id;
808 int err;
809
810 if (unlikely((!apc->port_is_up) ||
811 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
812 return ENODEV;
813
814 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
815 m = mana_tso_fixup(m);
816 if (unlikely(m == NULL)) {
817 counter_enter();
818 counter_u64_add_protected(apc->port_stats.tx_drops, 1);
819 counter_exit();
820 return EIO;
821 }
822 } else {
823 m = mana_mbuf_csum_check(m);
824 if (unlikely(m == NULL)) {
825 counter_enter();
826 counter_u64_add_protected(apc->port_stats.tx_drops, 1);
827 counter_exit();
828 return EIO;
829 }
830 }
831
832 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
833 uint32_t hash = m->m_pkthdr.flowid;
834 txq_id = apc->indir_table[(hash) & MANA_INDIRECT_TABLE_MASK] %
835 apc->num_queues;
836 } else {
837 txq_id = m->m_pkthdr.flowid % apc->num_queues;
838 }
839
840 if (apc->enable_tx_altq)
841 txq_id = apc->tx_qp[txq_id].txq.alt_txq_idx;
842
843 txq = &apc->tx_qp[txq_id].txq;
844
845 is_drbr_empty = drbr_empty(ifp, txq->txq_br);
846 err = drbr_enqueue(ifp, txq->txq_br, m);
847 if (unlikely(err)) {
848 mana_warn(NULL, "txq %u failed to enqueue: %d\n",
849 txq_id, err);
850 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
851 return err;
852 }
853
854 if (is_drbr_empty && mtx_trylock(&txq->txq_mtx)) {
855 mana_xmit(txq);
856 mtx_unlock(&txq->txq_mtx);
857 } else {
858 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
859 }
860
861 return 0;
862 }
863
864 static void
mana_cleanup_port_context(struct mana_port_context * apc)865 mana_cleanup_port_context(struct mana_port_context *apc)
866 {
867 bus_dma_tag_destroy(apc->tx_buf_tag);
868 bus_dma_tag_destroy(apc->rx_buf_tag);
869 apc->rx_buf_tag = NULL;
870
871 free(apc->rxqs, M_DEVBUF);
872 apc->rxqs = NULL;
873
874 mana_free_counters((counter_u64_t *)&apc->port_stats,
875 sizeof(struct mana_port_stats));
876 }
877
878 static int
mana_init_port_context(struct mana_port_context * apc)879 mana_init_port_context(struct mana_port_context *apc)
880 {
881 device_t dev = apc->ac->gdma_dev->gdma_context->dev;
882 uint32_t tso_maxsize;
883 int err;
884
885 tso_maxsize = MANA_TSO_MAX_SZ;
886
887 /* Create DMA tag for tx bufs */
888 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
889 1, 0, /* alignment, boundary */
890 BUS_SPACE_MAXADDR, /* lowaddr */
891 BUS_SPACE_MAXADDR, /* highaddr */
892 NULL, NULL, /* filter, filterarg */
893 tso_maxsize, /* maxsize */
894 MAX_MBUF_FRAGS, /* nsegments */
895 tso_maxsize, /* maxsegsize */
896 0, /* flags */
897 NULL, NULL, /* lockfunc, lockfuncarg*/
898 &apc->tx_buf_tag);
899 if (unlikely(err)) {
900 device_printf(dev, "Feiled to create TX DMA tag\n");
901 return err;
902 }
903
904 /* Create DMA tag for rx bufs */
905 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
906 64, 0, /* alignment, boundary */
907 BUS_SPACE_MAXADDR, /* lowaddr */
908 BUS_SPACE_MAXADDR, /* highaddr */
909 NULL, NULL, /* filter, filterarg */
910 MJUMPAGESIZE, /* maxsize */
911 1, /* nsegments */
912 MJUMPAGESIZE, /* maxsegsize */
913 0, /* flags */
914 NULL, NULL, /* lockfunc, lockfuncarg*/
915 &apc->rx_buf_tag);
916 if (unlikely(err)) {
917 device_printf(dev, "Feiled to create RX DMA tag\n");
918 return err;
919 }
920
921 apc->rxqs = mallocarray(apc->num_queues, sizeof(struct mana_rxq *),
922 M_DEVBUF, M_WAITOK | M_ZERO);
923
924 return 0;
925 }
926
927 static int
mana_send_request(struct mana_context * ac,void * in_buf,uint32_t in_len,void * out_buf,uint32_t out_len)928 mana_send_request(struct mana_context *ac, void *in_buf,
929 uint32_t in_len, void *out_buf, uint32_t out_len)
930 {
931 struct gdma_context *gc = ac->gdma_dev->gdma_context;
932 struct gdma_resp_hdr *resp = out_buf;
933 struct gdma_req_hdr *req = in_buf;
934 device_t dev = gc->dev;
935 static atomic_t activity_id;
936 int err;
937
938 req->dev_id = gc->mana.dev_id;
939 req->activity_id = atomic_inc_return(&activity_id);
940
941 mana_dbg(NULL, "activity_id = %u\n", activity_id);
942
943 err = mana_gd_send_request(gc, in_len, in_buf, out_len,
944 out_buf);
945 if (err || resp->status) {
946 device_printf(dev, "Failed to send mana message: %d, 0x%x\n",
947 err, resp->status);
948 return err ? err : EPROTO;
949 }
950
951 if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
952 req->activity_id != resp->activity_id) {
953 device_printf(dev,
954 "Unexpected mana message response: %x,%x,%x,%x\n",
955 req->dev_id.as_uint32, resp->dev_id.as_uint32,
956 req->activity_id, resp->activity_id);
957 return EPROTO;
958 }
959
960 return 0;
961 }
962
963 static int
mana_verify_resp_hdr(const struct gdma_resp_hdr * resp_hdr,const enum mana_command_code expected_code,const uint32_t min_size)964 mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
965 const enum mana_command_code expected_code,
966 const uint32_t min_size)
967 {
968 if (resp_hdr->response.msg_type != expected_code)
969 return EPROTO;
970
971 if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
972 return EPROTO;
973
974 if (resp_hdr->response.msg_size < min_size)
975 return EPROTO;
976
977 return 0;
978 }
979
980 static int
mana_query_device_cfg(struct mana_context * ac,uint32_t proto_major_ver,uint32_t proto_minor_ver,uint32_t proto_micro_ver,uint16_t * max_num_vports)981 mana_query_device_cfg(struct mana_context *ac, uint32_t proto_major_ver,
982 uint32_t proto_minor_ver, uint32_t proto_micro_ver,
983 uint16_t *max_num_vports)
984 {
985 struct gdma_context *gc = ac->gdma_dev->gdma_context;
986 struct mana_query_device_cfg_resp resp = {};
987 struct mana_query_device_cfg_req req = {};
988 device_t dev = gc->dev;
989 int err = 0;
990
991 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
992 sizeof(req), sizeof(resp));
993 req.proto_major_ver = proto_major_ver;
994 req.proto_minor_ver = proto_minor_ver;
995 req.proto_micro_ver = proto_micro_ver;
996
997 err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
998 if (err) {
999 device_printf(dev, "Failed to query config: %d", err);
1000 return err;
1001 }
1002
1003 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
1004 sizeof(resp));
1005 if (err || resp.hdr.status) {
1006 device_printf(dev, "Invalid query result: %d, 0x%x\n", err,
1007 resp.hdr.status);
1008 if (!err)
1009 err = EPROTO;
1010 return err;
1011 }
1012
1013 *max_num_vports = resp.max_num_vports;
1014
1015 mana_dbg(NULL, "mana max_num_vports from device = %d\n",
1016 *max_num_vports);
1017
1018 return 0;
1019 }
1020
1021 static int
mana_query_vport_cfg(struct mana_port_context * apc,uint32_t vport_index,uint32_t * max_sq,uint32_t * max_rq,uint32_t * num_indir_entry)1022 mana_query_vport_cfg(struct mana_port_context *apc, uint32_t vport_index,
1023 uint32_t *max_sq, uint32_t *max_rq, uint32_t *num_indir_entry)
1024 {
1025 struct mana_query_vport_cfg_resp resp = {};
1026 struct mana_query_vport_cfg_req req = {};
1027 int err;
1028
1029 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
1030 sizeof(req), sizeof(resp));
1031
1032 req.vport_index = vport_index;
1033
1034 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1035 sizeof(resp));
1036 if (err)
1037 return err;
1038
1039 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
1040 sizeof(resp));
1041 if (err)
1042 return err;
1043
1044 if (resp.hdr.status)
1045 return EPROTO;
1046
1047 *max_sq = resp.max_num_sq;
1048 *max_rq = resp.max_num_rq;
1049 *num_indir_entry = resp.num_indirection_ent;
1050
1051 apc->port_handle = resp.vport;
1052 memcpy(apc->mac_addr, resp.mac_addr, ETHER_ADDR_LEN);
1053
1054 return 0;
1055 }
1056
1057 void
mana_uncfg_vport(struct mana_port_context * apc)1058 mana_uncfg_vport(struct mana_port_context *apc)
1059 {
1060 apc->vport_use_count--;
1061 if (apc->vport_use_count < 0) {
1062 mana_err(NULL,
1063 "WARNING: vport_use_count less than 0: %u\n",
1064 apc->vport_use_count);
1065 }
1066 }
1067
1068 int
mana_cfg_vport(struct mana_port_context * apc,uint32_t protection_dom_id,uint32_t doorbell_pg_id)1069 mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
1070 uint32_t doorbell_pg_id)
1071 {
1072 struct mana_config_vport_resp resp = {};
1073 struct mana_config_vport_req req = {};
1074 int err;
1075
1076 /* This function is used to program the Ethernet port in the hardware
1077 * table. It can be called from the Ethernet driver or the RDMA driver.
1078 *
1079 * For Ethernet usage, the hardware supports only one active user on a
1080 * physical port. The driver checks on the port usage before programming
1081 * the hardware when creating the RAW QP (RDMA driver) or exposing the
1082 * device to kernel NET layer (Ethernet driver).
1083 *
1084 * Because the RDMA driver doesn't know in advance which QP type the
1085 * user will create, it exposes the device with all its ports. The user
1086 * may not be able to create RAW QP on a port if this port is already
1087 * in used by the Ethernet driver from the kernel.
1088 *
1089 * This physical port limitation only applies to the RAW QP. For RC QP,
1090 * the hardware doesn't have this limitation. The user can create RC
1091 * QPs on a physical port up to the hardware limits independent of the
1092 * Ethernet usage on the same port.
1093 */
1094 if (apc->vport_use_count > 0) {
1095 return EBUSY;
1096 }
1097 apc->vport_use_count++;
1098
1099 mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
1100 sizeof(req), sizeof(resp));
1101 req.vport = apc->port_handle;
1102 req.pdid = protection_dom_id;
1103 req.doorbell_pageid = doorbell_pg_id;
1104
1105 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1106 sizeof(resp));
1107 if (err) {
1108 if_printf(apc->ndev, "Failed to configure vPort: %d\n", err);
1109 goto out;
1110 }
1111
1112 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1113 sizeof(resp));
1114 if (err || resp.hdr.status) {
1115 if_printf(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1116 err, resp.hdr.status);
1117 if (!err)
1118 err = EPROTO;
1119
1120 goto out;
1121 }
1122
1123 apc->tx_shortform_allowed = resp.short_form_allowed;
1124 apc->tx_vp_offset = resp.tx_vport_offset;
1125
1126 if_printf(apc->ndev, "Configured vPort %ju PD %u DB %u\n",
1127 apc->port_handle, protection_dom_id, doorbell_pg_id);
1128
1129 out:
1130 if (err)
1131 mana_uncfg_vport(apc);
1132
1133 return err;
1134 }
1135
1136 static int
mana_cfg_vport_steering(struct mana_port_context * apc,enum TRI_STATE rx,bool update_default_rxobj,bool update_key,bool update_tab)1137 mana_cfg_vport_steering(struct mana_port_context *apc,
1138 enum TRI_STATE rx,
1139 bool update_default_rxobj, bool update_key,
1140 bool update_tab)
1141 {
1142 uint16_t num_entries = MANA_INDIRECT_TABLE_SIZE;
1143 struct mana_cfg_rx_steer_req *req = NULL;
1144 struct mana_cfg_rx_steer_resp resp = {};
1145 if_t ndev = apc->ndev;
1146 mana_handle_t *req_indir_tab;
1147 uint32_t req_buf_size;
1148 int err;
1149
1150 req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
1151 req = malloc(req_buf_size, M_DEVBUF, M_WAITOK | M_ZERO);
1152
1153 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1154 sizeof(resp));
1155
1156 req->vport = apc->port_handle;
1157 req->num_indir_entries = num_entries;
1158 req->indir_tab_offset = sizeof(*req);
1159 req->rx_enable = rx;
1160 req->rss_enable = apc->rss_state;
1161 req->update_default_rxobj = update_default_rxobj;
1162 req->update_hashkey = update_key;
1163 req->update_indir_tab = update_tab;
1164 req->default_rxobj = apc->default_rxobj;
1165
1166 if (update_key)
1167 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1168
1169 if (update_tab) {
1170 req_indir_tab = (mana_handle_t *)(req + 1);
1171 memcpy(req_indir_tab, apc->rxobj_table,
1172 req->num_indir_entries * sizeof(mana_handle_t));
1173 }
1174
1175 err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1176 sizeof(resp));
1177 if (err) {
1178 if_printf(ndev, "Failed to configure vPort RX: %d\n", err);
1179 goto out;
1180 }
1181
1182 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1183 sizeof(resp));
1184 if (err) {
1185 if_printf(ndev, "vPort RX configuration failed: %d\n", err);
1186 goto out;
1187 }
1188
1189 if (resp.hdr.status) {
1190 if_printf(ndev, "vPort RX configuration failed: 0x%x\n",
1191 resp.hdr.status);
1192 err = EPROTO;
1193 }
1194
1195 if_printf(ndev, "Configured steering vPort %ju entries %u\n",
1196 apc->port_handle, num_entries);
1197
1198 out:
1199 free(req, M_DEVBUF);
1200 return err;
1201 }
1202
1203 int
mana_create_wq_obj(struct mana_port_context * apc,mana_handle_t vport,uint32_t wq_type,struct mana_obj_spec * wq_spec,struct mana_obj_spec * cq_spec,mana_handle_t * wq_obj)1204 mana_create_wq_obj(struct mana_port_context *apc,
1205 mana_handle_t vport,
1206 uint32_t wq_type, struct mana_obj_spec *wq_spec,
1207 struct mana_obj_spec *cq_spec,
1208 mana_handle_t *wq_obj)
1209 {
1210 struct mana_create_wqobj_resp resp = {};
1211 struct mana_create_wqobj_req req = {};
1212 if_t ndev = apc->ndev;
1213 int err;
1214
1215 mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1216 sizeof(req), sizeof(resp));
1217 req.vport = vport;
1218 req.wq_type = wq_type;
1219 req.wq_gdma_region = wq_spec->gdma_region;
1220 req.cq_gdma_region = cq_spec->gdma_region;
1221 req.wq_size = wq_spec->queue_size;
1222 req.cq_size = cq_spec->queue_size;
1223 req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1224 req.cq_parent_qid = cq_spec->attached_eq;
1225
1226 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1227 sizeof(resp));
1228 if (err) {
1229 if_printf(ndev, "Failed to create WQ object: %d\n", err);
1230 goto out;
1231 }
1232
1233 err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1234 sizeof(resp));
1235 if (err || resp.hdr.status) {
1236 if_printf(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1237 resp.hdr.status);
1238 if (!err)
1239 err = EPROTO;
1240 goto out;
1241 }
1242
1243 if (resp.wq_obj == INVALID_MANA_HANDLE) {
1244 if_printf(ndev, "Got an invalid WQ object handle\n");
1245 err = EPROTO;
1246 goto out;
1247 }
1248
1249 *wq_obj = resp.wq_obj;
1250 wq_spec->queue_index = resp.wq_id;
1251 cq_spec->queue_index = resp.cq_id;
1252
1253 return 0;
1254 out:
1255 return err;
1256 }
1257
1258 void
mana_destroy_wq_obj(struct mana_port_context * apc,uint32_t wq_type,mana_handle_t wq_obj)1259 mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type,
1260 mana_handle_t wq_obj)
1261 {
1262 struct mana_destroy_wqobj_resp resp = {};
1263 struct mana_destroy_wqobj_req req = {};
1264 if_t ndev = apc->ndev;
1265 int err;
1266
1267 mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1268 sizeof(req), sizeof(resp));
1269 req.wq_type = wq_type;
1270 req.wq_obj_handle = wq_obj;
1271
1272 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1273 sizeof(resp));
1274 if (err) {
1275 if_printf(ndev, "Failed to destroy WQ object: %d\n", err);
1276 return;
1277 }
1278
1279 err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1280 sizeof(resp));
1281 if (err || resp.hdr.status)
1282 if_printf(ndev, "Failed to destroy WQ object: %d, 0x%x\n",
1283 err, resp.hdr.status);
1284 }
1285
1286 static void
mana_destroy_eq(struct mana_context * ac)1287 mana_destroy_eq(struct mana_context *ac)
1288 {
1289 struct gdma_context *gc = ac->gdma_dev->gdma_context;
1290 struct gdma_queue *eq;
1291 int i;
1292
1293 if (!ac->eqs)
1294 return;
1295
1296 for (i = 0; i < gc->max_num_queues; i++) {
1297 eq = ac->eqs[i].eq;
1298 if (!eq)
1299 continue;
1300
1301 mana_gd_destroy_queue(gc, eq);
1302 }
1303
1304 free(ac->eqs, M_DEVBUF);
1305 ac->eqs = NULL;
1306 }
1307
1308 static int
mana_create_eq(struct mana_context * ac)1309 mana_create_eq(struct mana_context *ac)
1310 {
1311 struct gdma_dev *gd = ac->gdma_dev;
1312 struct gdma_context *gc = gd->gdma_context;
1313 struct gdma_queue_spec spec = {};
1314 int err;
1315 int i;
1316
1317 ac->eqs = mallocarray(gc->max_num_queues, sizeof(struct mana_eq),
1318 M_DEVBUF, M_WAITOK | M_ZERO);
1319
1320 spec.type = GDMA_EQ;
1321 spec.monitor_avl_buf = false;
1322 spec.queue_size = EQ_SIZE;
1323 spec.eq.callback = NULL;
1324 spec.eq.context = ac->eqs;
1325 spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1326
1327 for (i = 0; i < gc->max_num_queues; i++) {
1328 err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
1329 if (err)
1330 goto out;
1331 }
1332
1333 return 0;
1334 out:
1335 mana_destroy_eq(ac);
1336 return err;
1337 }
1338
1339 static int
mana_fence_rq(struct mana_port_context * apc,struct mana_rxq * rxq)1340 mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1341 {
1342 struct mana_fence_rq_resp resp = {};
1343 struct mana_fence_rq_req req = {};
1344 int err;
1345
1346 init_completion(&rxq->fence_event);
1347
1348 mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
1349 sizeof(req), sizeof(resp));
1350 req.wq_obj_handle = rxq->rxobj;
1351
1352 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1353 sizeof(resp));
1354 if (err) {
1355 if_printf(apc->ndev, "Failed to fence RQ %u: %d\n",
1356 rxq->rxq_idx, err);
1357 return err;
1358 }
1359
1360 err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
1361 if (err || resp.hdr.status) {
1362 if_printf(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
1363 rxq->rxq_idx, err, resp.hdr.status);
1364 if (!err)
1365 err = EPROTO;
1366
1367 return err;
1368 }
1369
1370 if (wait_for_completion_timeout(&rxq->fence_event, 10 * hz)) {
1371 if_printf(apc->ndev, "Failed to fence RQ %u: timed out\n",
1372 rxq->rxq_idx);
1373 return ETIMEDOUT;
1374 }
1375
1376 return 0;
1377 }
1378
1379 static void
mana_fence_rqs(struct mana_port_context * apc)1380 mana_fence_rqs(struct mana_port_context *apc)
1381 {
1382 unsigned int rxq_idx;
1383 struct mana_rxq *rxq;
1384 int err;
1385
1386 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1387 rxq = apc->rxqs[rxq_idx];
1388 err = mana_fence_rq(apc, rxq);
1389
1390 /* In case of any error, use sleep instead. */
1391 if (err)
1392 gdma_msleep(100);
1393 }
1394 }
1395
1396 static int
mana_move_wq_tail(struct gdma_queue * wq,uint32_t num_units)1397 mana_move_wq_tail(struct gdma_queue *wq, uint32_t num_units)
1398 {
1399 uint32_t used_space_old;
1400 uint32_t used_space_new;
1401
1402 used_space_old = wq->head - wq->tail;
1403 used_space_new = wq->head - (wq->tail + num_units);
1404
1405 if (used_space_new > used_space_old) {
1406 mana_err(NULL,
1407 "WARNING: new used space %u greater than old one %u\n",
1408 used_space_new, used_space_old);
1409 return ERANGE;
1410 }
1411
1412 wq->tail += num_units;
1413 return 0;
1414 }
1415
1416 static void
mana_poll_tx_cq(struct mana_cq * cq)1417 mana_poll_tx_cq(struct mana_cq *cq)
1418 {
1419 struct gdma_comp *completions = cq->gdma_comp_buf;
1420 struct gdma_posted_wqe_info *wqe_info;
1421 struct mana_send_buf_info *tx_info;
1422 unsigned int pkt_transmitted = 0;
1423 unsigned int wqe_unit_cnt = 0;
1424 struct mana_txq *txq = cq->txq;
1425 struct mana_port_context *apc;
1426 uint16_t next_to_complete;
1427 if_t ndev;
1428 int comp_read;
1429 int txq_idx = txq->idx;
1430 int i;
1431 int sa_drop = 0;
1432
1433 struct gdma_queue *gdma_wq;
1434 unsigned int avail_space;
1435 bool txq_full = false;
1436
1437 ndev = txq->ndev;
1438 apc = if_getsoftc(ndev);
1439
1440 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1441 CQE_POLLING_BUFFER);
1442
1443 if (comp_read < 1)
1444 return;
1445
1446 next_to_complete = txq->next_to_complete;
1447
1448 for (i = 0; i < comp_read; i++) {
1449 struct mana_tx_comp_oob *cqe_oob;
1450
1451 if (!completions[i].is_sq) {
1452 mana_err(NULL, "WARNING: Not for SQ\n");
1453 return;
1454 }
1455
1456 cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1457 if (cqe_oob->cqe_hdr.client_type !=
1458 MANA_CQE_COMPLETION) {
1459 mana_err(NULL,
1460 "WARNING: Invalid CQE client type %u\n",
1461 cqe_oob->cqe_hdr.client_type);
1462 return;
1463 }
1464
1465 switch (cqe_oob->cqe_hdr.cqe_type) {
1466 case CQE_TX_OKAY:
1467 break;
1468
1469 case CQE_TX_SA_DROP:
1470 case CQE_TX_MTU_DROP:
1471 case CQE_TX_INVALID_OOB:
1472 case CQE_TX_INVALID_ETH_TYPE:
1473 case CQE_TX_HDR_PROCESSING_ERROR:
1474 case CQE_TX_VF_DISABLED:
1475 case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1476 case CQE_TX_VPORT_DISABLED:
1477 case CQE_TX_VLAN_TAGGING_VIOLATION:
1478 sa_drop ++;
1479 mana_dbg(NULL,
1480 "TX: txq %d CQE error %d, ntc = %d, "
1481 "pending sends = %d: err ignored.\n",
1482 txq_idx, cqe_oob->cqe_hdr.cqe_type,
1483 next_to_complete, txq->pending_sends);
1484 counter_u64_add(txq->stats.cqe_err, 1);
1485 break;
1486
1487 default:
1488 /* If the CQE type is unknown, log a debug msg,
1489 * and still free the mbuf, etc.
1490 */
1491 mana_dbg(NULL,
1492 "ERROR: TX: Unknown CQE type %d\n",
1493 cqe_oob->cqe_hdr.cqe_type);
1494 counter_u64_add(txq->stats.cqe_unknown_type, 1);
1495 break;
1496 }
1497 if (txq->gdma_txq_id != completions[i].wq_num) {
1498 mana_dbg(NULL,
1499 "txq gdma id not match completion wq num: "
1500 "%d != %d\n",
1501 txq->gdma_txq_id, completions[i].wq_num);
1502 break;
1503 }
1504
1505 tx_info = &txq->tx_buf_info[next_to_complete];
1506 if (!tx_info->mbuf) {
1507 mana_err(NULL,
1508 "WARNING: txq %d Empty mbuf on tx_info: %u, "
1509 "ntu = %u, pending_sends = %d, "
1510 "transmitted = %d, sa_drop = %d, i = %d, comp_read = %d\n",
1511 txq_idx, next_to_complete, txq->next_to_use,
1512 txq->pending_sends, pkt_transmitted, sa_drop,
1513 i, comp_read);
1514 break;
1515 }
1516
1517 wqe_info = &tx_info->wqe_inf;
1518 wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1519
1520 mana_tx_unmap_mbuf(apc, tx_info);
1521 mb();
1522
1523 next_to_complete =
1524 (next_to_complete + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
1525
1526 pkt_transmitted++;
1527 }
1528
1529 txq->next_to_complete = next_to_complete;
1530
1531 if (wqe_unit_cnt == 0) {
1532 mana_err(NULL,
1533 "WARNING: TX ring not proceeding!\n");
1534 return;
1535 }
1536
1537 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1538
1539 /* Ensure tail updated before checking q stop */
1540 wmb();
1541
1542 gdma_wq = txq->gdma_sq;
1543 avail_space = mana_gd_wq_avail_space(gdma_wq);
1544
1545
1546 if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL) {
1547 txq_full = true;
1548 }
1549
1550 /* Ensure checking txq_full before apc->port_is_up. */
1551 rmb();
1552
1553 if (txq_full && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1554 /* Grab the txq lock and re-test */
1555 mtx_lock(&txq->txq_mtx);
1556 avail_space = mana_gd_wq_avail_space(gdma_wq);
1557
1558 if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL &&
1559 apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1560 /* Clear the Q full flag */
1561 if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING,
1562 IFF_DRV_OACTIVE);
1563 counter_u64_add(txq->stats.wakeup, 1);
1564 if (txq->alt_txq_idx != txq->idx) {
1565 uint64_t stops = counter_u64_fetch(txq->stats.stop);
1566 uint64_t wakeups = counter_u64_fetch(txq->stats.wakeup);
1567 /* Reset alt_txq_idx back if it is not overloaded */
1568 if (stops < wakeups) {
1569 txq->alt_txq_idx = txq->idx;
1570 counter_u64_add(txq->stats.alt_reset, 1);
1571 }
1572 }
1573 rmb();
1574 /* Schedule a tx enqueue task */
1575 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
1576 }
1577 mtx_unlock(&txq->txq_mtx);
1578 }
1579
1580 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1581 mana_err(NULL,
1582 "WARNING: TX %d pending_sends error: %d\n",
1583 txq->idx, txq->pending_sends);
1584
1585 cq->work_done = pkt_transmitted;
1586 }
1587
1588 static void
mana_post_pkt_rxq(struct mana_rxq * rxq)1589 mana_post_pkt_rxq(struct mana_rxq *rxq)
1590 {
1591 struct mana_recv_buf_oob *recv_buf_oob;
1592 uint32_t curr_index;
1593 int err;
1594
1595 curr_index = rxq->buf_index++;
1596 if (rxq->buf_index == rxq->num_rx_buf)
1597 rxq->buf_index = 0;
1598
1599 recv_buf_oob = &rxq->rx_oobs[curr_index];
1600
1601 err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1602 &recv_buf_oob->wqe_inf);
1603 if (err) {
1604 mana_err(NULL, "WARNING: rxq %u post pkt err %d\n",
1605 rxq->rxq_idx, err);
1606 return;
1607 }
1608
1609 if (recv_buf_oob->wqe_inf.wqe_size_in_bu != 1) {
1610 mana_err(NULL, "WARNING: rxq %u wqe_size_in_bu %u\n",
1611 rxq->rxq_idx, recv_buf_oob->wqe_inf.wqe_size_in_bu);
1612 }
1613 }
1614
1615 static void
mana_rx_mbuf(struct mbuf * mbuf,struct mana_rxcomp_oob * cqe,struct mana_rxq * rxq)1616 mana_rx_mbuf(struct mbuf *mbuf, struct mana_rxcomp_oob *cqe,
1617 struct mana_rxq *rxq)
1618 {
1619 struct mana_stats *rx_stats = &rxq->stats;
1620 if_t ndev = rxq->ndev;
1621 uint32_t pkt_len = cqe->ppi[0].pkt_len;
1622 uint16_t rxq_idx = rxq->rxq_idx;
1623 struct mana_port_context *apc;
1624 bool do_lro = false;
1625 bool do_if_input;
1626
1627 apc = if_getsoftc(ndev);
1628 rxq->rx_cq.work_done++;
1629
1630 if (!mbuf) {
1631 return;
1632 }
1633
1634 mbuf->m_flags |= M_PKTHDR;
1635 mbuf->m_pkthdr.len = pkt_len;
1636 mbuf->m_len = pkt_len;
1637 mbuf->m_pkthdr.rcvif = ndev;
1638
1639 if ((if_getcapenable(ndev) & IFCAP_RXCSUM ||
1640 if_getcapenable(ndev) & IFCAP_RXCSUM_IPV6) &&
1641 (cqe->rx_iphdr_csum_succeed)) {
1642 mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1643 mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1644 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) {
1645 mbuf->m_pkthdr.csum_flags |=
1646 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1647 mbuf->m_pkthdr.csum_data = 0xffff;
1648
1649 if (cqe->rx_tcp_csum_succeed)
1650 do_lro = true;
1651 }
1652 }
1653
1654 if (cqe->rx_hashtype != 0) {
1655 mbuf->m_pkthdr.flowid = cqe->ppi[0].pkt_hash;
1656
1657 uint16_t hashtype = cqe->rx_hashtype;
1658 if (hashtype & NDIS_HASH_IPV4_MASK) {
1659 hashtype &= NDIS_HASH_IPV4_MASK;
1660 switch (hashtype) {
1661 case NDIS_HASH_TCP_IPV4:
1662 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
1663 break;
1664 case NDIS_HASH_UDP_IPV4:
1665 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
1666 break;
1667 default:
1668 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
1669 }
1670 } else if (hashtype & NDIS_HASH_IPV6_MASK) {
1671 hashtype &= NDIS_HASH_IPV6_MASK;
1672 switch (hashtype) {
1673 case NDIS_HASH_TCP_IPV6:
1674 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
1675 break;
1676 case NDIS_HASH_TCP_IPV6_EX:
1677 M_HASHTYPE_SET(mbuf,
1678 M_HASHTYPE_RSS_TCP_IPV6_EX);
1679 break;
1680 case NDIS_HASH_UDP_IPV6:
1681 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
1682 break;
1683 case NDIS_HASH_UDP_IPV6_EX:
1684 M_HASHTYPE_SET(mbuf,
1685 M_HASHTYPE_RSS_UDP_IPV6_EX);
1686 break;
1687 default:
1688 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
1689 }
1690 } else {
1691 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
1692 }
1693 } else {
1694 mbuf->m_pkthdr.flowid = rxq_idx;
1695 M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
1696 }
1697
1698 do_if_input = true;
1699 if ((if_getcapenable(ndev) & IFCAP_LRO) && do_lro) {
1700 rxq->lro_tried++;
1701 if (rxq->lro.lro_cnt != 0 &&
1702 tcp_lro_rx(&rxq->lro, mbuf, 0) == 0)
1703 do_if_input = false;
1704 else
1705 rxq->lro_failed++;
1706 }
1707 if (do_if_input) {
1708 if_input(ndev, mbuf);
1709 }
1710
1711 counter_enter();
1712 counter_u64_add_protected(rx_stats->packets, 1);
1713 counter_u64_add_protected(apc->port_stats.rx_packets, 1);
1714 counter_u64_add_protected(rx_stats->bytes, pkt_len);
1715 counter_u64_add_protected(apc->port_stats.rx_bytes, pkt_len);
1716 counter_exit();
1717 }
1718
1719 static void
mana_process_rx_cqe(struct mana_rxq * rxq,struct mana_cq * cq,struct gdma_comp * cqe)1720 mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1721 struct gdma_comp *cqe)
1722 {
1723 struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1724 struct mana_recv_buf_oob *rxbuf_oob;
1725 if_t ndev = rxq->ndev;
1726 struct mana_port_context *apc;
1727 struct mbuf *old_mbuf;
1728 uint32_t curr, pktlen;
1729 int err;
1730
1731 switch (oob->cqe_hdr.cqe_type) {
1732 case CQE_RX_OKAY:
1733 break;
1734
1735 case CQE_RX_TRUNCATED:
1736 apc = if_getsoftc(ndev);
1737 counter_u64_add(apc->port_stats.rx_drops, 1);
1738 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1739 if_printf(ndev, "Dropped a truncated packet\n");
1740 goto drop;
1741
1742 case CQE_RX_COALESCED_4:
1743 if_printf(ndev, "RX coalescing is unsupported\n");
1744 return;
1745
1746 case CQE_RX_OBJECT_FENCE:
1747 complete(&rxq->fence_event);
1748 return;
1749
1750 default:
1751 if_printf(ndev, "Unknown RX CQE type = %d\n",
1752 oob->cqe_hdr.cqe_type);
1753 return;
1754 }
1755
1756 if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
1757 return;
1758
1759 pktlen = oob->ppi[0].pkt_len;
1760
1761 if (pktlen == 0) {
1762 /* data packets should never have packetlength of zero */
1763 if_printf(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%jx\n",
1764 rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1765 return;
1766 }
1767
1768 curr = rxq->buf_index;
1769 rxbuf_oob = &rxq->rx_oobs[curr];
1770 if (rxbuf_oob->wqe_inf.wqe_size_in_bu != 1) {
1771 mana_err(NULL, "WARNING: Rx Incorrect complete "
1772 "WQE size %u\n",
1773 rxbuf_oob->wqe_inf.wqe_size_in_bu);
1774 }
1775
1776 apc = if_getsoftc(ndev);
1777
1778 old_mbuf = rxbuf_oob->mbuf;
1779
1780 /* Unload DMA map for the old mbuf */
1781 mana_unload_rx_mbuf(apc, rxq, rxbuf_oob, false);
1782
1783 /* Load a new mbuf to replace the old one */
1784 err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, true);
1785 if (err) {
1786 mana_dbg(NULL,
1787 "failed to load rx mbuf, err = %d, packet dropped.\n",
1788 err);
1789 counter_u64_add(rxq->stats.mbuf_alloc_fail, 1);
1790 /*
1791 * Failed to load new mbuf, rxbuf_oob->mbuf is still
1792 * pointing to the old one. Drop the packet.
1793 */
1794 old_mbuf = NULL;
1795 /* Reload the existing mbuf */
1796 mana_load_rx_mbuf(apc, rxq, rxbuf_oob, false);
1797 }
1798
1799 mana_rx_mbuf(old_mbuf, oob, rxq);
1800
1801 drop:
1802 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1803
1804 mana_post_pkt_rxq(rxq);
1805 }
1806
1807 static void
mana_poll_rx_cq(struct mana_cq * cq)1808 mana_poll_rx_cq(struct mana_cq *cq)
1809 {
1810 struct gdma_comp *comp = cq->gdma_comp_buf;
1811 int comp_read, i;
1812
1813 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1814 KASSERT(comp_read <= CQE_POLLING_BUFFER,
1815 ("comp_read %d great than buf size %d",
1816 comp_read, CQE_POLLING_BUFFER));
1817
1818 for (i = 0; i < comp_read; i++) {
1819 if (comp[i].is_sq == true) {
1820 mana_err(NULL,
1821 "WARNING: CQE not for receive queue\n");
1822 return;
1823 }
1824
1825 /* verify recv cqe references the right rxq */
1826 if (comp[i].wq_num != cq->rxq->gdma_id) {
1827 mana_err(NULL,
1828 "WARNING: Received CQE %d not for "
1829 "this receive queue %d\n",
1830 comp[i].wq_num, cq->rxq->gdma_id);
1831 return;
1832 }
1833
1834 mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
1835 }
1836
1837 if (comp_read > 0) {
1838 struct gdma_context *gc =
1839 cq->rxq->gdma_rq->gdma_dev->gdma_context;
1840
1841 mana_gd_wq_ring_doorbell(gc, cq->rxq->gdma_rq);
1842 }
1843
1844 tcp_lro_flush_all(&cq->rxq->lro);
1845 }
1846
1847 static void
mana_cq_handler(void * context,struct gdma_queue * gdma_queue)1848 mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1849 {
1850 struct mana_cq *cq = context;
1851 uint8_t arm_bit;
1852
1853 KASSERT(cq->gdma_cq == gdma_queue,
1854 ("cq do not match %p, %p", cq->gdma_cq, gdma_queue));
1855
1856 if (cq->type == MANA_CQ_TYPE_RX) {
1857 mana_poll_rx_cq(cq);
1858 } else {
1859 mana_poll_tx_cq(cq);
1860 }
1861
1862 if (cq->work_done < cq->budget && cq->do_not_ring_db == false)
1863 arm_bit = SET_ARM_BIT;
1864 else
1865 arm_bit = 0;
1866
1867 mana_gd_ring_cq(gdma_queue, arm_bit);
1868 }
1869
1870 #define MANA_POLL_BUDGET 8
1871 #define MANA_RX_BUDGET 256
1872 #define MANA_TX_BUDGET MAX_SEND_BUFFERS_PER_QUEUE
1873
1874 static void
mana_poll(void * arg,int pending)1875 mana_poll(void *arg, int pending)
1876 {
1877 struct mana_cq *cq = arg;
1878 int i;
1879
1880 cq->work_done = 0;
1881 if (cq->type == MANA_CQ_TYPE_RX) {
1882 cq->budget = MANA_RX_BUDGET;
1883 } else {
1884 cq->budget = MANA_TX_BUDGET;
1885 }
1886
1887 for (i = 0; i < MANA_POLL_BUDGET; i++) {
1888 /*
1889 * If this is the last loop, set the budget big enough
1890 * so it will arm the CQ any way.
1891 */
1892 if (i == (MANA_POLL_BUDGET - 1))
1893 cq->budget = CQE_POLLING_BUFFER + 1;
1894
1895 mana_cq_handler(cq, cq->gdma_cq);
1896
1897 if (cq->work_done < cq->budget)
1898 break;
1899
1900 cq->work_done = 0;
1901 }
1902 }
1903
1904 static void
mana_schedule_task(void * arg,struct gdma_queue * gdma_queue)1905 mana_schedule_task(void *arg, struct gdma_queue *gdma_queue)
1906 {
1907 struct mana_cq *cq = arg;
1908
1909 taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task);
1910 }
1911
1912 static void
mana_deinit_cq(struct mana_port_context * apc,struct mana_cq * cq)1913 mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1914 {
1915 struct gdma_dev *gd = apc->ac->gdma_dev;
1916
1917 if (!cq->gdma_cq)
1918 return;
1919
1920 /* Drain cleanup taskqueue */
1921 if (cq->cleanup_tq) {
1922 while (taskqueue_cancel(cq->cleanup_tq,
1923 &cq->cleanup_task, NULL)) {
1924 taskqueue_drain(cq->cleanup_tq,
1925 &cq->cleanup_task);
1926 }
1927
1928 taskqueue_free(cq->cleanup_tq);
1929 }
1930
1931 mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1932 }
1933
1934 static void
mana_deinit_txq(struct mana_port_context * apc,struct mana_txq * txq)1935 mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1936 {
1937 struct gdma_dev *gd = apc->ac->gdma_dev;
1938 struct mana_send_buf_info *txbuf_info;
1939 uint32_t pending_sends;
1940 int i;
1941
1942 if (!txq->gdma_sq)
1943 return;
1944
1945 if ((pending_sends = atomic_read(&txq->pending_sends)) > 0) {
1946 mana_err(NULL,
1947 "WARNING: txq pending sends not zero: %u\n",
1948 pending_sends);
1949 }
1950
1951 if (txq->next_to_use != txq->next_to_complete) {
1952 mana_err(NULL,
1953 "WARNING: txq buf not completed, "
1954 "next use %u, next complete %u\n",
1955 txq->next_to_use, txq->next_to_complete);
1956 }
1957
1958 /* Flush buf ring. Grab txq mtx lock */
1959 if (txq->txq_br) {
1960 mtx_lock(&txq->txq_mtx);
1961 drbr_flush(apc->ndev, txq->txq_br);
1962 mtx_unlock(&txq->txq_mtx);
1963 buf_ring_free(txq->txq_br, M_DEVBUF);
1964 }
1965
1966 /* Drain taskqueue */
1967 if (txq->enqueue_tq) {
1968 while (taskqueue_cancel(txq->enqueue_tq,
1969 &txq->enqueue_task, NULL)) {
1970 taskqueue_drain(txq->enqueue_tq,
1971 &txq->enqueue_task);
1972 }
1973
1974 taskqueue_free(txq->enqueue_tq);
1975 }
1976
1977 if (txq->tx_buf_info) {
1978 /* Free all mbufs which are still in-flight */
1979 for (i = 0; i < MAX_SEND_BUFFERS_PER_QUEUE; i++) {
1980 txbuf_info = &txq->tx_buf_info[i];
1981 if (txbuf_info->mbuf) {
1982 mana_tx_unmap_mbuf(apc, txbuf_info);
1983 }
1984 }
1985
1986 free(txq->tx_buf_info, M_DEVBUF);
1987 }
1988
1989 mana_free_counters((counter_u64_t *)&txq->stats,
1990 sizeof(txq->stats));
1991
1992 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1993
1994 mtx_destroy(&txq->txq_mtx);
1995 }
1996
1997 static void
mana_destroy_txq(struct mana_port_context * apc)1998 mana_destroy_txq(struct mana_port_context *apc)
1999 {
2000 int i;
2001
2002 if (!apc->tx_qp)
2003 return;
2004
2005 for (i = 0; i < apc->num_queues; i++) {
2006 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
2007
2008 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
2009
2010 mana_deinit_txq(apc, &apc->tx_qp[i].txq);
2011 }
2012
2013 free(apc->tx_qp, M_DEVBUF);
2014 apc->tx_qp = NULL;
2015 }
2016
2017 static int
mana_create_txq(struct mana_port_context * apc,if_t net)2018 mana_create_txq(struct mana_port_context *apc, if_t net)
2019 {
2020 struct mana_context *ac = apc->ac;
2021 struct gdma_dev *gd = ac->gdma_dev;
2022 struct mana_obj_spec wq_spec;
2023 struct mana_obj_spec cq_spec;
2024 struct gdma_queue_spec spec;
2025 struct gdma_context *gc;
2026 struct mana_txq *txq;
2027 struct mana_cq *cq;
2028 uint32_t txq_size;
2029 uint32_t cq_size;
2030 int err;
2031 int i;
2032
2033 apc->tx_qp = mallocarray(apc->num_queues, sizeof(struct mana_tx_qp),
2034 M_DEVBUF, M_WAITOK | M_ZERO);
2035
2036 /* The minimum size of the WQE is 32 bytes, hence
2037 * MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
2038 * the SQ can store. This value is then used to size other queues
2039 * to prevent overflow.
2040 */
2041 txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
2042 KASSERT(IS_ALIGNED(txq_size, PAGE_SIZE),
2043 ("txq size not page aligned"));
2044
2045 cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
2046 cq_size = ALIGN(cq_size, PAGE_SIZE);
2047
2048 gc = gd->gdma_context;
2049
2050 for (i = 0; i < apc->num_queues; i++) {
2051 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
2052
2053 /* Create SQ */
2054 txq = &apc->tx_qp[i].txq;
2055
2056 txq->ndev = net;
2057 txq->vp_offset = apc->tx_vp_offset;
2058 txq->idx = i;
2059 txq->alt_txq_idx = i;
2060
2061 memset(&spec, 0, sizeof(spec));
2062 spec.type = GDMA_SQ;
2063 spec.monitor_avl_buf = true;
2064 spec.queue_size = txq_size;
2065 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
2066 if (err)
2067 goto out;
2068
2069 /* Create SQ's CQ */
2070 cq = &apc->tx_qp[i].tx_cq;
2071 cq->type = MANA_CQ_TYPE_TX;
2072
2073 cq->txq = txq;
2074
2075 memset(&spec, 0, sizeof(spec));
2076 spec.type = GDMA_CQ;
2077 spec.monitor_avl_buf = false;
2078 spec.queue_size = cq_size;
2079 spec.cq.callback = mana_schedule_task;
2080 spec.cq.parent_eq = ac->eqs[i].eq;
2081 spec.cq.context = cq;
2082 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2083 if (err)
2084 goto out;
2085
2086 memset(&wq_spec, 0, sizeof(wq_spec));
2087 memset(&cq_spec, 0, sizeof(cq_spec));
2088
2089 wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
2090 wq_spec.queue_size = txq->gdma_sq->queue_size;
2091
2092 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2093 cq_spec.queue_size = cq->gdma_cq->queue_size;
2094 cq_spec.modr_ctx_id = 0;
2095 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2096
2097 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
2098 &wq_spec, &cq_spec, &apc->tx_qp[i].tx_object);
2099
2100 if (err)
2101 goto out;
2102
2103 txq->gdma_sq->id = wq_spec.queue_index;
2104 cq->gdma_cq->id = cq_spec.queue_index;
2105
2106 txq->gdma_sq->mem_info.dma_region_handle =
2107 GDMA_INVALID_DMA_REGION;
2108 cq->gdma_cq->mem_info.dma_region_handle =
2109 GDMA_INVALID_DMA_REGION;
2110
2111 txq->gdma_txq_id = txq->gdma_sq->id;
2112
2113 cq->gdma_id = cq->gdma_cq->id;
2114
2115 mana_dbg(NULL,
2116 "txq %d, txq gdma id %d, txq cq gdma id %d\n",
2117 i, txq->gdma_txq_id, cq->gdma_id);
2118
2119 if (cq->gdma_id >= gc->max_num_cqs) {
2120 if_printf(net, "CQ id %u too large.\n", cq->gdma_id);
2121 err = EINVAL;
2122 goto out;
2123 }
2124
2125 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2126
2127 /* Initialize tx specific data */
2128 txq->tx_buf_info = malloc(MAX_SEND_BUFFERS_PER_QUEUE *
2129 sizeof(struct mana_send_buf_info),
2130 M_DEVBUF, M_WAITOK | M_ZERO);
2131
2132 snprintf(txq->txq_mtx_name, nitems(txq->txq_mtx_name),
2133 "mana:tx(%d)", i);
2134 mtx_init(&txq->txq_mtx, txq->txq_mtx_name, NULL, MTX_DEF);
2135
2136 txq->txq_br = buf_ring_alloc(4 * MAX_SEND_BUFFERS_PER_QUEUE,
2137 M_DEVBUF, M_WAITOK, &txq->txq_mtx);
2138
2139 /* Allocate taskqueue for deferred send */
2140 TASK_INIT(&txq->enqueue_task, 0, mana_xmit_taskfunc, txq);
2141 txq->enqueue_tq = taskqueue_create_fast("mana_tx_enque",
2142 M_NOWAIT, taskqueue_thread_enqueue, &txq->enqueue_tq);
2143 if (unlikely(txq->enqueue_tq == NULL)) {
2144 if_printf(net,
2145 "Unable to create tx %d enqueue task queue\n", i);
2146 err = ENOMEM;
2147 goto out;
2148 }
2149 taskqueue_start_threads(&txq->enqueue_tq, 1, PI_NET,
2150 "mana txq p%u-tx%d", apc->port_idx, i);
2151
2152 mana_alloc_counters((counter_u64_t *)&txq->stats,
2153 sizeof(txq->stats));
2154
2155 /* Allocate and start the cleanup task on CQ */
2156 cq->do_not_ring_db = false;
2157
2158 NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq);
2159 cq->cleanup_tq =
2160 taskqueue_create_fast("mana tx cq cleanup",
2161 M_WAITOK, taskqueue_thread_enqueue,
2162 &cq->cleanup_tq);
2163
2164 if (apc->last_tx_cq_bind_cpu < 0)
2165 apc->last_tx_cq_bind_cpu = CPU_FIRST();
2166 cq->cpu = apc->last_tx_cq_bind_cpu;
2167 apc->last_tx_cq_bind_cpu = CPU_NEXT(apc->last_tx_cq_bind_cpu);
2168
2169 if (apc->bind_cleanup_thread_cpu) {
2170 cpuset_t cpu_mask;
2171 CPU_SETOF(cq->cpu, &cpu_mask);
2172 taskqueue_start_threads_cpuset(&cq->cleanup_tq,
2173 1, PI_NET, &cpu_mask,
2174 "mana cq p%u-tx%u-cpu%d",
2175 apc->port_idx, txq->idx, cq->cpu);
2176 } else {
2177 taskqueue_start_threads(&cq->cleanup_tq, 1,
2178 PI_NET, "mana cq p%u-tx%u",
2179 apc->port_idx, txq->idx);
2180 }
2181
2182 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2183 }
2184
2185 return 0;
2186 out:
2187 mana_destroy_txq(apc);
2188 return err;
2189 }
2190
2191 static void
mana_destroy_rxq(struct mana_port_context * apc,struct mana_rxq * rxq,bool validate_state)2192 mana_destroy_rxq(struct mana_port_context *apc, struct mana_rxq *rxq,
2193 bool validate_state)
2194 {
2195 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2196 struct mana_recv_buf_oob *rx_oob;
2197 int i;
2198
2199 if (!rxq)
2200 return;
2201
2202 if (validate_state) {
2203 /*
2204 * XXX Cancel and drain cleanup task queue here.
2205 */
2206 ;
2207 }
2208
2209 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2210
2211 mana_deinit_cq(apc, &rxq->rx_cq);
2212
2213 mana_free_counters((counter_u64_t *)&rxq->stats,
2214 sizeof(rxq->stats));
2215
2216 /* Free LRO resources */
2217 tcp_lro_free(&rxq->lro);
2218
2219 for (i = 0; i < rxq->num_rx_buf; i++) {
2220 rx_oob = &rxq->rx_oobs[i];
2221
2222 if (rx_oob->mbuf)
2223 mana_unload_rx_mbuf(apc, rxq, rx_oob, true);
2224
2225 bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2226 }
2227
2228 if (rxq->gdma_rq)
2229 mana_gd_destroy_queue(gc, rxq->gdma_rq);
2230
2231 free(rxq, M_DEVBUF);
2232 }
2233
2234 #define MANA_WQE_HEADER_SIZE 16
2235 #define MANA_WQE_SGE_SIZE 16
2236
2237 static int
mana_alloc_rx_wqe(struct mana_port_context * apc,struct mana_rxq * rxq,uint32_t * rxq_size,uint32_t * cq_size)2238 mana_alloc_rx_wqe(struct mana_port_context *apc,
2239 struct mana_rxq *rxq, uint32_t *rxq_size, uint32_t *cq_size)
2240 {
2241 struct mana_recv_buf_oob *rx_oob;
2242 uint32_t buf_idx;
2243 int err;
2244
2245 if (rxq->datasize == 0 || rxq->datasize > PAGE_SIZE) {
2246 mana_err(NULL,
2247 "WARNING: Invalid rxq datasize %u\n", rxq->datasize);
2248 }
2249
2250 *rxq_size = 0;
2251 *cq_size = 0;
2252
2253 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2254 rx_oob = &rxq->rx_oobs[buf_idx];
2255 memset(rx_oob, 0, sizeof(*rx_oob));
2256
2257 err = bus_dmamap_create(apc->rx_buf_tag, 0,
2258 &rx_oob->dma_map);
2259 if (err) {
2260 mana_err(NULL,
2261 "Failed to create rx DMA map for buf %d\n",
2262 buf_idx);
2263 return err;
2264 }
2265
2266 err = mana_load_rx_mbuf(apc, rxq, rx_oob, true);
2267 if (err) {
2268 mana_err(NULL,
2269 "Failed to create rx DMA map for buf %d\n",
2270 buf_idx);
2271 bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2272 return err;
2273 }
2274
2275 rx_oob->wqe_req.sgl = rx_oob->sgl;
2276 rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2277 rx_oob->wqe_req.inline_oob_size = 0;
2278 rx_oob->wqe_req.inline_oob_data = NULL;
2279 rx_oob->wqe_req.flags = 0;
2280 rx_oob->wqe_req.client_data_unit = 0;
2281
2282 *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2283 MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2284 *cq_size += COMP_ENTRY_SIZE;
2285 }
2286
2287 return 0;
2288 }
2289
2290 static int
mana_push_wqe(struct mana_rxq * rxq)2291 mana_push_wqe(struct mana_rxq *rxq)
2292 {
2293 struct mana_recv_buf_oob *rx_oob;
2294 uint32_t buf_idx;
2295 int err;
2296
2297 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2298 rx_oob = &rxq->rx_oobs[buf_idx];
2299
2300 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2301 &rx_oob->wqe_inf);
2302 if (err)
2303 return ENOSPC;
2304 }
2305
2306 return 0;
2307 }
2308
2309 static struct mana_rxq *
mana_create_rxq(struct mana_port_context * apc,uint32_t rxq_idx,struct mana_eq * eq,if_t ndev)2310 mana_create_rxq(struct mana_port_context *apc, uint32_t rxq_idx,
2311 struct mana_eq *eq, if_t ndev)
2312 {
2313 struct gdma_dev *gd = apc->ac->gdma_dev;
2314 struct mana_obj_spec wq_spec;
2315 struct mana_obj_spec cq_spec;
2316 struct gdma_queue_spec spec;
2317 struct mana_cq *cq = NULL;
2318 uint32_t cq_size, rq_size;
2319 struct gdma_context *gc;
2320 struct mana_rxq *rxq;
2321 int err;
2322
2323 gc = gd->gdma_context;
2324
2325 rxq = malloc(sizeof(*rxq) +
2326 RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob),
2327 M_DEVBUF, M_WAITOK | M_ZERO);
2328 rxq->ndev = ndev;
2329 rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
2330 rxq->rxq_idx = rxq_idx;
2331 /*
2332 * Minimum size is MCLBYTES(2048) bytes for a mbuf cluster.
2333 * Now we just allow maximum size of 4096.
2334 */
2335 rxq->datasize = ALIGN(apc->frame_size, MCLBYTES);
2336 if (rxq->datasize > MAX_FRAME_SIZE)
2337 rxq->datasize = MAX_FRAME_SIZE;
2338
2339 mana_dbg(NULL, "Setting rxq %d datasize %d\n",
2340 rxq_idx, rxq->datasize);
2341
2342 rxq->rxobj = INVALID_MANA_HANDLE;
2343
2344 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2345 if (err)
2346 goto out;
2347
2348 /* Create LRO for the RQ */
2349 if (if_getcapenable(ndev) & IFCAP_LRO) {
2350 err = tcp_lro_init(&rxq->lro);
2351 if (err) {
2352 if_printf(ndev, "Failed to create LRO for rxq %d\n",
2353 rxq_idx);
2354 } else {
2355 rxq->lro.ifp = ndev;
2356 }
2357 }
2358
2359 mana_alloc_counters((counter_u64_t *)&rxq->stats,
2360 sizeof(rxq->stats));
2361
2362 rq_size = ALIGN(rq_size, PAGE_SIZE);
2363 cq_size = ALIGN(cq_size, PAGE_SIZE);
2364
2365 /* Create RQ */
2366 memset(&spec, 0, sizeof(spec));
2367 spec.type = GDMA_RQ;
2368 spec.monitor_avl_buf = true;
2369 spec.queue_size = rq_size;
2370 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2371 if (err)
2372 goto out;
2373
2374 /* Create RQ's CQ */
2375 cq = &rxq->rx_cq;
2376 cq->type = MANA_CQ_TYPE_RX;
2377 cq->rxq = rxq;
2378
2379 memset(&spec, 0, sizeof(spec));
2380 spec.type = GDMA_CQ;
2381 spec.monitor_avl_buf = false;
2382 spec.queue_size = cq_size;
2383 spec.cq.callback = mana_schedule_task;
2384 spec.cq.parent_eq = eq->eq;
2385 spec.cq.context = cq;
2386 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2387 if (err)
2388 goto out;
2389
2390 memset(&wq_spec, 0, sizeof(wq_spec));
2391 memset(&cq_spec, 0, sizeof(cq_spec));
2392 wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
2393 wq_spec.queue_size = rxq->gdma_rq->queue_size;
2394
2395 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2396 cq_spec.queue_size = cq->gdma_cq->queue_size;
2397 cq_spec.modr_ctx_id = 0;
2398 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2399
2400 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2401 &wq_spec, &cq_spec, &rxq->rxobj);
2402 if (err)
2403 goto out;
2404
2405 rxq->gdma_rq->id = wq_spec.queue_index;
2406 cq->gdma_cq->id = cq_spec.queue_index;
2407
2408 rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2409 cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2410
2411 rxq->gdma_id = rxq->gdma_rq->id;
2412 cq->gdma_id = cq->gdma_cq->id;
2413
2414 err = mana_push_wqe(rxq);
2415 if (err)
2416 goto out;
2417
2418 if (cq->gdma_id >= gc->max_num_cqs) {
2419 err = EINVAL;
2420 goto out;
2421 }
2422
2423 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2424
2425 /* Allocate and start the cleanup task on CQ */
2426 cq->do_not_ring_db = false;
2427
2428 NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq);
2429 cq->cleanup_tq =
2430 taskqueue_create_fast("mana rx cq cleanup",
2431 M_WAITOK, taskqueue_thread_enqueue,
2432 &cq->cleanup_tq);
2433
2434 if (apc->last_rx_cq_bind_cpu < 0)
2435 apc->last_rx_cq_bind_cpu = CPU_FIRST();
2436 cq->cpu = apc->last_rx_cq_bind_cpu;
2437 apc->last_rx_cq_bind_cpu = CPU_NEXT(apc->last_rx_cq_bind_cpu);
2438
2439 if (apc->bind_cleanup_thread_cpu) {
2440 cpuset_t cpu_mask;
2441 CPU_SETOF(cq->cpu, &cpu_mask);
2442 taskqueue_start_threads_cpuset(&cq->cleanup_tq,
2443 1, PI_NET, &cpu_mask,
2444 "mana cq p%u-rx%u-cpu%d",
2445 apc->port_idx, rxq->rxq_idx, cq->cpu);
2446 } else {
2447 taskqueue_start_threads(&cq->cleanup_tq, 1,
2448 PI_NET, "mana cq p%u-rx%u",
2449 apc->port_idx, rxq->rxq_idx);
2450 }
2451
2452 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2453 out:
2454 if (!err)
2455 return rxq;
2456
2457 if_printf(ndev, "Failed to create RXQ: err = %d\n", err);
2458
2459 mana_destroy_rxq(apc, rxq, false);
2460
2461 if (cq)
2462 mana_deinit_cq(apc, cq);
2463
2464 return NULL;
2465 }
2466
2467 static int
mana_add_rx_queues(struct mana_port_context * apc,if_t ndev)2468 mana_add_rx_queues(struct mana_port_context *apc, if_t ndev)
2469 {
2470 struct mana_context *ac = apc->ac;
2471 struct mana_rxq *rxq;
2472 int err = 0;
2473 int i;
2474
2475 for (i = 0; i < apc->num_queues; i++) {
2476 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2477 if (!rxq) {
2478 err = ENOMEM;
2479 goto out;
2480 }
2481
2482 apc->rxqs[i] = rxq;
2483 }
2484
2485 apc->default_rxobj = apc->rxqs[0]->rxobj;
2486 out:
2487 return err;
2488 }
2489
2490 static void
mana_destroy_vport(struct mana_port_context * apc)2491 mana_destroy_vport(struct mana_port_context *apc)
2492 {
2493 struct mana_rxq *rxq;
2494 uint32_t rxq_idx;
2495
2496 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2497 rxq = apc->rxqs[rxq_idx];
2498 if (!rxq)
2499 continue;
2500
2501 mana_destroy_rxq(apc, rxq, true);
2502 apc->rxqs[rxq_idx] = NULL;
2503 }
2504
2505 mana_destroy_txq(apc);
2506
2507 mana_uncfg_vport(apc);
2508 }
2509
2510 static int
mana_create_vport(struct mana_port_context * apc,if_t net)2511 mana_create_vport(struct mana_port_context *apc, if_t net)
2512 {
2513 struct gdma_dev *gd = apc->ac->gdma_dev;
2514 int err;
2515
2516 apc->default_rxobj = INVALID_MANA_HANDLE;
2517
2518 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2519 if (err)
2520 return err;
2521
2522 return mana_create_txq(apc, net);
2523 }
2524
2525
mana_rss_table_init(struct mana_port_context * apc)2526 static void mana_rss_table_init(struct mana_port_context *apc)
2527 {
2528 int i;
2529
2530 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
2531 apc->indir_table[i] = i % apc->num_queues;
2532 }
2533
mana_config_rss(struct mana_port_context * apc,enum TRI_STATE rx,bool update_hash,bool update_tab)2534 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2535 bool update_hash, bool update_tab)
2536 {
2537 uint32_t queue_idx;
2538 int err;
2539 int i;
2540
2541 if (update_tab) {
2542 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
2543 queue_idx = apc->indir_table[i];
2544 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2545 }
2546 }
2547
2548 err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2549 if (err)
2550 return err;
2551
2552 mana_fence_rqs(apc);
2553
2554 return 0;
2555 }
2556
2557 static int
mana_init_port(if_t ndev)2558 mana_init_port(if_t ndev)
2559 {
2560 struct mana_port_context *apc = if_getsoftc(ndev);
2561 uint32_t max_txq, max_rxq, max_queues;
2562 int port_idx = apc->port_idx;
2563 uint32_t num_indirect_entries;
2564 int err;
2565
2566 err = mana_init_port_context(apc);
2567 if (err)
2568 return err;
2569
2570 err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
2571 &num_indirect_entries);
2572 if (err) {
2573 if_printf(ndev, "Failed to query info for vPort %d\n",
2574 port_idx);
2575 goto reset_apc;
2576 }
2577
2578 max_queues = min_t(uint32_t, max_txq, max_rxq);
2579 if (apc->max_queues > max_queues)
2580 apc->max_queues = max_queues;
2581
2582 if (apc->num_queues > apc->max_queues)
2583 apc->num_queues = apc->max_queues;
2584
2585 return 0;
2586
2587 reset_apc:
2588 bus_dma_tag_destroy(apc->rx_buf_tag);
2589 apc->rx_buf_tag = NULL;
2590 free(apc->rxqs, M_DEVBUF);
2591 apc->rxqs = NULL;
2592 return err;
2593 }
2594
2595 int
mana_alloc_queues(if_t ndev)2596 mana_alloc_queues(if_t ndev)
2597 {
2598 struct mana_port_context *apc = if_getsoftc(ndev);
2599 int err;
2600
2601 err = mana_create_vport(apc, ndev);
2602 if (err)
2603 return err;
2604
2605 err = mana_add_rx_queues(apc, ndev);
2606 if (err)
2607 goto destroy_vport;
2608
2609 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
2610
2611 mana_rss_table_init(apc);
2612
2613 err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2614 if (err)
2615 goto destroy_vport;
2616
2617 return 0;
2618
2619 destroy_vport:
2620 mana_destroy_vport(apc);
2621 return err;
2622 }
2623
2624 static int
mana_up(struct mana_port_context * apc)2625 mana_up(struct mana_port_context *apc)
2626 {
2627 int err;
2628
2629 mana_dbg(NULL, "mana_up called\n");
2630
2631 err = mana_alloc_queues(apc->ndev);
2632 if (err) {
2633 mana_err(NULL, "Faile alloc mana queues: %d\n", err);
2634 return err;
2635 }
2636
2637 /* Add queue specific sysctl */
2638 mana_sysctl_add_queues(apc);
2639
2640 apc->port_is_up = true;
2641
2642 /* Ensure port state updated before txq state */
2643 wmb();
2644
2645 if_link_state_change(apc->ndev, LINK_STATE_UP);
2646 if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2647
2648 return 0;
2649 }
2650
2651
2652 static void
mana_init(void * arg)2653 mana_init(void *arg)
2654 {
2655 struct mana_port_context *apc = (struct mana_port_context *)arg;
2656
2657 MANA_APC_LOCK_LOCK(apc);
2658 if (!apc->port_is_up) {
2659 mana_up(apc);
2660 }
2661 MANA_APC_LOCK_UNLOCK(apc);
2662 }
2663
2664 static int
mana_dealloc_queues(if_t ndev)2665 mana_dealloc_queues(if_t ndev)
2666 {
2667 struct mana_port_context *apc = if_getsoftc(ndev);
2668 struct mana_txq *txq;
2669 int i, err;
2670
2671 if (apc->port_is_up)
2672 return EINVAL;
2673
2674 /* No packet can be transmitted now since apc->port_is_up is false.
2675 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
2676 * a txq because it may not timely see apc->port_is_up being cleared
2677 * to false, but it doesn't matter since mana_start_xmit() drops any
2678 * new packets due to apc->port_is_up being false.
2679 *
2680 * Drain all the in-flight TX packets
2681 */
2682 for (i = 0; i < apc->num_queues; i++) {
2683 txq = &apc->tx_qp[i].txq;
2684
2685 struct mana_cq *tx_cq = &apc->tx_qp[i].tx_cq;
2686 struct mana_cq *rx_cq = &(apc->rxqs[i]->rx_cq);
2687
2688 tx_cq->do_not_ring_db = true;
2689 rx_cq->do_not_ring_db = true;
2690
2691 /* Schedule a cleanup task */
2692 taskqueue_enqueue(tx_cq->cleanup_tq, &tx_cq->cleanup_task);
2693
2694 while (atomic_read(&txq->pending_sends) > 0)
2695 usleep_range(1000, 2000);
2696 }
2697
2698 /* We're 100% sure the queues can no longer be woken up, because
2699 * we're sure now mana_poll_tx_cq() can't be running.
2700 */
2701
2702 apc->rss_state = TRI_STATE_FALSE;
2703 err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2704 if (err) {
2705 if_printf(ndev, "Failed to disable vPort: %d\n", err);
2706 return err;
2707 }
2708
2709 mana_destroy_vport(apc);
2710
2711 return 0;
2712 }
2713
2714 static int
mana_down(struct mana_port_context * apc)2715 mana_down(struct mana_port_context *apc)
2716 {
2717 int err = 0;
2718
2719 apc->port_st_save = apc->port_is_up;
2720 apc->port_is_up = false;
2721
2722 /* Ensure port state updated before txq state */
2723 wmb();
2724
2725 if (apc->port_st_save) {
2726 if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE,
2727 IFF_DRV_RUNNING);
2728 if_link_state_change(apc->ndev, LINK_STATE_DOWN);
2729
2730 mana_sysctl_free_queues(apc);
2731
2732 err = mana_dealloc_queues(apc->ndev);
2733 if (err) {
2734 if_printf(apc->ndev,
2735 "Failed to bring down mana interface: %d\n", err);
2736 }
2737 }
2738
2739 return err;
2740 }
2741
2742 int
mana_detach(if_t ndev)2743 mana_detach(if_t ndev)
2744 {
2745 struct mana_port_context *apc = if_getsoftc(ndev);
2746 int err;
2747
2748 ether_ifdetach(ndev);
2749
2750 if (!apc)
2751 return 0;
2752
2753 MANA_APC_LOCK_LOCK(apc);
2754 err = mana_down(apc);
2755 MANA_APC_LOCK_UNLOCK(apc);
2756
2757 mana_cleanup_port_context(apc);
2758
2759 MANA_APC_LOCK_DESTROY(apc);
2760
2761 free(apc, M_DEVBUF);
2762
2763 return err;
2764 }
2765
2766 static int
mana_probe_port(struct mana_context * ac,int port_idx,if_t * ndev_storage)2767 mana_probe_port(struct mana_context *ac, int port_idx,
2768 if_t *ndev_storage)
2769 {
2770 struct gdma_context *gc = ac->gdma_dev->gdma_context;
2771 struct mana_port_context *apc;
2772 uint32_t hwassist;
2773 if_t ndev;
2774 int err;
2775
2776 ndev = if_alloc_dev(IFT_ETHER, gc->dev);
2777 *ndev_storage = ndev;
2778
2779 apc = malloc(sizeof(*apc), M_DEVBUF, M_WAITOK | M_ZERO);
2780 apc->ac = ac;
2781 apc->ndev = ndev;
2782 apc->max_queues = gc->max_num_queues;
2783 apc->num_queues = min_t(unsigned int,
2784 gc->max_num_queues, MANA_MAX_NUM_QUEUES);
2785 apc->port_handle = INVALID_MANA_HANDLE;
2786 apc->port_idx = port_idx;
2787 apc->frame_size = DEFAULT_FRAME_SIZE;
2788 apc->last_tx_cq_bind_cpu = -1;
2789 apc->last_rx_cq_bind_cpu = -1;
2790 apc->vport_use_count = 0;
2791
2792 MANA_APC_LOCK_INIT(apc);
2793
2794 if_initname(ndev, device_get_name(gc->dev), port_idx);
2795 if_setdev(ndev,gc->dev);
2796 if_setsoftc(ndev, apc);
2797
2798 if_setflags(ndev, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2799 if_setinitfn(ndev, mana_init);
2800 if_settransmitfn(ndev, mana_start_xmit);
2801 if_setqflushfn(ndev, mana_qflush);
2802 if_setioctlfn(ndev, mana_ioctl);
2803 if_setgetcounterfn(ndev, mana_get_counter);
2804
2805 if_setmtu(ndev, ETHERMTU);
2806 if_setbaudrate(ndev, IF_Gbps(100));
2807
2808 mana_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2809
2810 err = mana_init_port(ndev);
2811 if (err)
2812 goto reset_apc;
2813
2814 if_setcapabilitiesbit(ndev,
2815 IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 |
2816 IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 |
2817 IFCAP_TSO4 | IFCAP_TSO6 |
2818 IFCAP_LRO | IFCAP_LINKSTATE, 0);
2819
2820 /* Enable all available capabilities by default. */
2821 if_setcapenable(ndev, if_getcapabilities(ndev));
2822
2823 /* TSO parameters */
2824 if_sethwtsomax(ndev, MANA_TSO_MAX_SZ -
2825 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2826 if_sethwtsomaxsegcount(ndev, MAX_MBUF_FRAGS);
2827 if_sethwtsomaxsegsize(ndev, PAGE_SIZE);
2828
2829 hwassist = 0;
2830 if (if_getcapenable(ndev) & (IFCAP_TSO4 | IFCAP_TSO6))
2831 hwassist |= CSUM_TSO;
2832 if (if_getcapenable(ndev) & IFCAP_TXCSUM)
2833 hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2834 if (if_getcapenable(ndev) & IFCAP_TXCSUM_IPV6)
2835 hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2836 mana_dbg(NULL, "set hwassist 0x%x\n", hwassist);
2837 if_sethwassist(ndev, hwassist);
2838
2839 ifmedia_init(&apc->media, IFM_IMASK,
2840 mana_ifmedia_change, mana_ifmedia_status);
2841 ifmedia_add(&apc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2842 ifmedia_set(&apc->media, IFM_ETHER | IFM_AUTO);
2843
2844 ether_ifattach(ndev, apc->mac_addr);
2845
2846 /* Initialize statistics */
2847 mana_alloc_counters((counter_u64_t *)&apc->port_stats,
2848 sizeof(struct mana_port_stats));
2849 mana_sysctl_add_port(apc);
2850
2851 /* Tell the stack that the interface is not active */
2852 if_setdrvflagbits(ndev, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2853
2854 return 0;
2855
2856 reset_apc:
2857 free(apc, M_DEVBUF);
2858 *ndev_storage = NULL;
2859 if_printf(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2860 if_free(ndev);
2861 return err;
2862 }
2863
mana_probe(struct gdma_dev * gd)2864 int mana_probe(struct gdma_dev *gd)
2865 {
2866 struct gdma_context *gc = gd->gdma_context;
2867 device_t dev = gc->dev;
2868 struct mana_context *ac;
2869 int err;
2870 int i;
2871
2872 device_printf(dev, "%s protocol version: %d.%d.%d\n", DEVICE_NAME,
2873 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2874
2875 err = mana_gd_register_device(gd);
2876 if (err)
2877 return err;
2878
2879 ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO);
2880 ac->gdma_dev = gd;
2881 ac->num_ports = 1;
2882 gd->driver_data = ac;
2883
2884 err = mana_create_eq(ac);
2885 if (err)
2886 goto out;
2887
2888 err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
2889 MANA_MICRO_VERSION, &ac->num_ports);
2890 if (err)
2891 goto out;
2892
2893 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
2894 ac->num_ports = MAX_PORTS_IN_MANA_DEV;
2895
2896 for (i = 0; i < ac->num_ports; i++) {
2897 err = mana_probe_port(ac, i, &ac->ports[i]);
2898 if (err) {
2899 device_printf(dev,
2900 "Failed to probe mana port %d\n", i);
2901 break;
2902 }
2903 }
2904
2905 out:
2906 if (err)
2907 mana_remove(gd);
2908
2909 return err;
2910 }
2911
2912 void
mana_remove(struct gdma_dev * gd)2913 mana_remove(struct gdma_dev *gd)
2914 {
2915 struct gdma_context *gc = gd->gdma_context;
2916 struct mana_context *ac = gd->driver_data;
2917 device_t dev = gc->dev;
2918 if_t ndev;
2919 int i;
2920
2921 for (i = 0; i < ac->num_ports; i++) {
2922 ndev = ac->ports[i];
2923 if (!ndev) {
2924 if (i == 0)
2925 device_printf(dev, "No net device to remove\n");
2926 goto out;
2927 }
2928
2929 mana_detach(ndev);
2930
2931 if_free(ndev);
2932 }
2933
2934 mana_destroy_eq(ac);
2935
2936 out:
2937 mana_gd_deregister_device(gd);
2938 gd->driver_data = NULL;
2939 gd->gdma_context = NULL;
2940 free(ac, M_DEVBUF);
2941 }
2942