1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2021 Microsoft Corp. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 #include <sys/cdefs.h> 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/kernel.h> 35 #include <sys/kthread.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/smp.h> 39 #include <sys/socket.h> 40 #include <sys/sockio.h> 41 #include <sys/time.h> 42 #include <sys/eventhandler.h> 43 44 #include <machine/bus.h> 45 #include <machine/resource.h> 46 #include <machine/in_cksum.h> 47 48 #include <net/if.h> 49 #include <net/if_var.h> 50 #include <net/if_types.h> 51 #include <net/if_vlan_var.h> 52 #ifdef RSS 53 #include <net/rss_config.h> 54 #endif 55 56 #include <netinet/in_systm.h> 57 #include <netinet/in.h> 58 #include <netinet/if_ether.h> 59 #include <netinet/ip.h> 60 #include <netinet/ip6.h> 61 #include <netinet/tcp.h> 62 #include <netinet/udp.h> 63 64 #include "mana.h" 65 #include "mana_sysctl.h" 66 67 static int mana_up(struct mana_port_context *apc); 68 static int mana_down(struct mana_port_context *apc); 69 70 static void 71 mana_rss_key_fill(void *k, size_t size) 72 { 73 static bool rss_key_generated = false; 74 static uint8_t rss_key[MANA_HASH_KEY_SIZE]; 75 76 KASSERT(size <= MANA_HASH_KEY_SIZE, 77 ("Request more buytes than MANA RSS key can hold")); 78 79 if (!rss_key_generated) { 80 arc4random_buf(rss_key, MANA_HASH_KEY_SIZE); 81 rss_key_generated = true; 82 } 83 memcpy(k, rss_key, size); 84 } 85 86 static int 87 mana_ifmedia_change(if_t ifp __unused) 88 { 89 return EOPNOTSUPP; 90 } 91 92 static void 93 mana_ifmedia_status(if_t ifp, struct ifmediareq *ifmr) 94 { 95 struct mana_port_context *apc = if_getsoftc(ifp); 96 97 if (!apc) { 98 if_printf(ifp, "Port not available\n"); 99 return; 100 } 101 102 MANA_APC_LOCK_LOCK(apc); 103 104 ifmr->ifm_status = IFM_AVALID; 105 ifmr->ifm_active = IFM_ETHER; 106 107 if (!apc->port_is_up) { 108 MANA_APC_LOCK_UNLOCK(apc); 109 mana_dbg(NULL, "Port %u link is down\n", apc->port_idx); 110 return; 111 } 112 113 ifmr->ifm_status |= IFM_ACTIVE; 114 ifmr->ifm_active |= IFM_100G_DR | IFM_FDX; 115 116 MANA_APC_LOCK_UNLOCK(apc); 117 } 118 119 static uint64_t 120 mana_get_counter(if_t ifp, ift_counter cnt) 121 { 122 struct mana_port_context *apc = if_getsoftc(ifp); 123 struct mana_port_stats *stats = &apc->port_stats; 124 125 switch (cnt) { 126 case IFCOUNTER_IPACKETS: 127 return (counter_u64_fetch(stats->rx_packets)); 128 case IFCOUNTER_OPACKETS: 129 return (counter_u64_fetch(stats->tx_packets)); 130 case IFCOUNTER_IBYTES: 131 return (counter_u64_fetch(stats->rx_bytes)); 132 case IFCOUNTER_OBYTES: 133 return (counter_u64_fetch(stats->tx_bytes)); 134 case IFCOUNTER_IQDROPS: 135 return (counter_u64_fetch(stats->rx_drops)); 136 case IFCOUNTER_OQDROPS: 137 return (counter_u64_fetch(stats->tx_drops)); 138 default: 139 return (if_get_counter_default(ifp, cnt)); 140 } 141 } 142 143 static void 144 mana_qflush(if_t ifp) 145 { 146 if_qflush(ifp); 147 } 148 149 int 150 mana_restart(struct mana_port_context *apc) 151 { 152 int rc = 0; 153 154 MANA_APC_LOCK_LOCK(apc); 155 if (apc->port_is_up) 156 mana_down(apc); 157 158 rc = mana_up(apc); 159 MANA_APC_LOCK_UNLOCK(apc); 160 161 return (rc); 162 } 163 164 static int 165 mana_ioctl(if_t ifp, u_long command, caddr_t data) 166 { 167 struct mana_port_context *apc = if_getsoftc(ifp); 168 struct ifrsskey *ifrk; 169 struct ifrsshash *ifrh; 170 struct ifreq *ifr; 171 uint16_t new_mtu; 172 int rc = 0; 173 174 switch (command) { 175 case SIOCSIFMTU: 176 ifr = (struct ifreq *)data; 177 new_mtu = ifr->ifr_mtu; 178 if (if_getmtu(ifp) == new_mtu) 179 break; 180 if ((new_mtu + 18 > MAX_FRAME_SIZE) || 181 (new_mtu + 18 < MIN_FRAME_SIZE)) { 182 if_printf(ifp, "Invalid MTU. new_mtu: %d, " 183 "max allowed: %d, min allowed: %d\n", 184 new_mtu, MAX_FRAME_SIZE - 18, MIN_FRAME_SIZE - 18); 185 return EINVAL; 186 } 187 MANA_APC_LOCK_LOCK(apc); 188 if (apc->port_is_up) 189 mana_down(apc); 190 191 apc->frame_size = new_mtu + 18; 192 if_setmtu(ifp, new_mtu); 193 mana_dbg(NULL, "Set MTU to %d\n", new_mtu); 194 195 rc = mana_up(apc); 196 MANA_APC_LOCK_UNLOCK(apc); 197 break; 198 199 case SIOCSIFFLAGS: 200 if (if_getflags(ifp) & IFF_UP) { 201 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 202 MANA_APC_LOCK_LOCK(apc); 203 if (!apc->port_is_up) 204 rc = mana_up(apc); 205 MANA_APC_LOCK_UNLOCK(apc); 206 } 207 } else { 208 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 209 MANA_APC_LOCK_LOCK(apc); 210 if (apc->port_is_up) 211 mana_down(apc); 212 MANA_APC_LOCK_UNLOCK(apc); 213 } 214 } 215 break; 216 217 case SIOCSIFMEDIA: 218 case SIOCGIFMEDIA: 219 case SIOCGIFXMEDIA: 220 ifr = (struct ifreq *)data; 221 rc = ifmedia_ioctl(ifp, ifr, &apc->media, command); 222 break; 223 224 case SIOCGIFRSSKEY: 225 ifrk = (struct ifrsskey *)data; 226 ifrk->ifrk_func = RSS_FUNC_TOEPLITZ; 227 ifrk->ifrk_keylen = MANA_HASH_KEY_SIZE; 228 memcpy(ifrk->ifrk_key, apc->hashkey, MANA_HASH_KEY_SIZE); 229 break; 230 231 case SIOCGIFRSSHASH: 232 ifrh = (struct ifrsshash *)data; 233 ifrh->ifrh_func = RSS_FUNC_TOEPLITZ; 234 ifrh->ifrh_types = 235 RSS_TYPE_TCP_IPV4 | 236 RSS_TYPE_UDP_IPV4 | 237 RSS_TYPE_TCP_IPV6 | 238 RSS_TYPE_UDP_IPV6; 239 break; 240 241 default: 242 rc = ether_ioctl(ifp, command, data); 243 break; 244 } 245 246 return (rc); 247 } 248 249 static inline void 250 mana_alloc_counters(counter_u64_t *begin, int size) 251 { 252 counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 253 254 for (; begin < end; ++begin) 255 *begin = counter_u64_alloc(M_WAITOK); 256 } 257 258 static inline void 259 mana_free_counters(counter_u64_t *begin, int size) 260 { 261 counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 262 263 for (; begin < end; ++begin) 264 counter_u64_free(*begin); 265 } 266 267 static bool 268 mana_can_tx(struct gdma_queue *wq) 269 { 270 return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE; 271 } 272 273 static inline int 274 mana_tx_map_mbuf(struct mana_port_context *apc, 275 struct mana_send_buf_info *tx_info, 276 struct mbuf **m_head, struct mana_tx_package *tp, 277 struct mana_stats *tx_stats) 278 { 279 struct gdma_dev *gd = apc->ac->gdma_dev; 280 bus_dma_segment_t segs[MAX_MBUF_FRAGS]; 281 struct mbuf *m = *m_head; 282 int err, nsegs, i; 283 284 err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag, tx_info->dma_map, 285 m, segs, &nsegs, BUS_DMA_NOWAIT); 286 if (err == EFBIG) { 287 struct mbuf *m_new; 288 289 counter_u64_add(tx_stats->collapse, 1); 290 m_new = m_collapse(m, M_NOWAIT, MAX_MBUF_FRAGS); 291 if (unlikely(m_new == NULL)) { 292 counter_u64_add(tx_stats->collapse_err, 1); 293 return ENOBUFS; 294 } else { 295 *m_head = m = m_new; 296 } 297 298 mana_warn(NULL, 299 "Too many segs in orig mbuf, m_collapse called\n"); 300 301 err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag, 302 tx_info->dma_map, m, segs, &nsegs, BUS_DMA_NOWAIT); 303 } 304 if (!err) { 305 for (i = 0; i < nsegs; i++) { 306 tp->wqe_req.sgl[i].address = segs[i].ds_addr; 307 tp->wqe_req.sgl[i].mem_key = gd->gpa_mkey; 308 tp->wqe_req.sgl[i].size = segs[i].ds_len; 309 } 310 tp->wqe_req.num_sge = nsegs; 311 312 tx_info->mbuf = *m_head; 313 314 bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map, 315 BUS_DMASYNC_PREWRITE); 316 } 317 318 return err; 319 } 320 321 static inline void 322 mana_tx_unmap_mbuf(struct mana_port_context *apc, 323 struct mana_send_buf_info *tx_info) 324 { 325 bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map, 326 BUS_DMASYNC_POSTWRITE); 327 bus_dmamap_unload(apc->tx_buf_tag, tx_info->dma_map); 328 if (tx_info->mbuf) { 329 m_freem(tx_info->mbuf); 330 tx_info->mbuf = NULL; 331 } 332 } 333 334 static inline int 335 mana_load_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq, 336 struct mana_recv_buf_oob *rx_oob, bool alloc_mbuf) 337 { 338 bus_dma_segment_t segs[1]; 339 struct mbuf *mbuf; 340 int nsegs, err; 341 uint32_t mlen; 342 343 if (alloc_mbuf) { 344 mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxq->datasize); 345 if (unlikely(mbuf == NULL)) { 346 mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 347 if (unlikely(mbuf == NULL)) { 348 return ENOMEM; 349 } 350 mlen = MCLBYTES; 351 } else { 352 mlen = rxq->datasize; 353 } 354 355 mbuf->m_pkthdr.len = mbuf->m_len = mlen; 356 } else { 357 if (rx_oob->mbuf) { 358 mbuf = rx_oob->mbuf; 359 mlen = rx_oob->mbuf->m_pkthdr.len; 360 } else { 361 return ENOMEM; 362 } 363 } 364 365 err = bus_dmamap_load_mbuf_sg(apc->rx_buf_tag, rx_oob->dma_map, 366 mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 367 368 if (unlikely((err != 0) || (nsegs != 1))) { 369 mana_warn(NULL, "Failed to map mbuf, error: %d, " 370 "nsegs: %d\n", err, nsegs); 371 counter_u64_add(rxq->stats.dma_mapping_err, 1); 372 goto error; 373 } 374 375 bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map, 376 BUS_DMASYNC_PREREAD); 377 378 rx_oob->mbuf = mbuf; 379 rx_oob->num_sge = 1; 380 rx_oob->sgl[0].address = segs[0].ds_addr; 381 rx_oob->sgl[0].size = mlen; 382 rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey; 383 384 return 0; 385 386 error: 387 m_freem(mbuf); 388 return EFAULT; 389 } 390 391 static inline void 392 mana_unload_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq, 393 struct mana_recv_buf_oob *rx_oob, bool free_mbuf) 394 { 395 bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map, 396 BUS_DMASYNC_POSTREAD); 397 bus_dmamap_unload(apc->rx_buf_tag, rx_oob->dma_map); 398 399 if (free_mbuf && rx_oob->mbuf) { 400 m_freem(rx_oob->mbuf); 401 rx_oob->mbuf = NULL; 402 } 403 } 404 405 406 /* Use couple mbuf PH_loc spaces for l3 and l4 protocal type */ 407 #define MANA_L3_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[0]) 408 #define MANA_L4_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[1]) 409 410 #define MANA_TXQ_FULL (IFF_DRV_RUNNING | IFF_DRV_OACTIVE) 411 412 static void 413 mana_xmit(struct mana_txq *txq) 414 { 415 enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT; 416 struct mana_send_buf_info *tx_info; 417 if_t ndev = txq->ndev; 418 struct mbuf *mbuf; 419 struct mana_port_context *apc = if_getsoftc(ndev); 420 struct mana_port_stats *port_stats = &apc->port_stats; 421 struct gdma_dev *gd = apc->ac->gdma_dev; 422 uint64_t packets, bytes; 423 uint16_t next_to_use; 424 struct mana_tx_package pkg = {}; 425 struct mana_stats *tx_stats; 426 struct gdma_queue *gdma_sq; 427 struct mana_cq *cq; 428 int err, len; 429 430 gdma_sq = txq->gdma_sq; 431 cq = &apc->tx_qp[txq->idx].tx_cq; 432 tx_stats = &txq->stats; 433 434 packets = 0; 435 bytes = 0; 436 next_to_use = txq->next_to_use; 437 438 while ((mbuf = drbr_peek(ndev, txq->txq_br)) != NULL) { 439 if (!apc->port_is_up || 440 (if_getdrvflags(ndev) & MANA_TXQ_FULL) != IFF_DRV_RUNNING) { 441 drbr_putback(ndev, txq->txq_br, mbuf); 442 break; 443 } 444 445 if (!mana_can_tx(gdma_sq)) { 446 /* SQ is full. Set the IFF_DRV_OACTIVE flag */ 447 if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE, 0); 448 counter_u64_add(tx_stats->stop, 1); 449 uint64_t stops = counter_u64_fetch(tx_stats->stop); 450 uint64_t wakeups = counter_u64_fetch(tx_stats->wakeup); 451 #define MANA_TXQ_STOP_THRESHOLD 50 452 if (stops > MANA_TXQ_STOP_THRESHOLD && wakeups > 0 && 453 stops > wakeups && txq->alt_txq_idx == txq->idx) { 454 txq->alt_txq_idx = 455 (txq->idx + (stops / wakeups)) 456 % apc->num_queues; 457 counter_u64_add(tx_stats->alt_chg, 1); 458 } 459 460 drbr_putback(ndev, txq->txq_br, mbuf); 461 462 taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task); 463 break; 464 } 465 466 tx_info = &txq->tx_buf_info[next_to_use]; 467 468 memset(&pkg, 0, sizeof(struct mana_tx_package)); 469 pkg.wqe_req.sgl = pkg.sgl_array; 470 471 err = mana_tx_map_mbuf(apc, tx_info, &mbuf, &pkg, tx_stats); 472 if (unlikely(err)) { 473 mana_dbg(NULL, 474 "Failed to map tx mbuf, err %d\n", err); 475 476 counter_u64_add(tx_stats->dma_mapping_err, 1); 477 478 /* The mbuf is still there. Free it */ 479 m_freem(mbuf); 480 /* Advance the drbr queue */ 481 drbr_advance(ndev, txq->txq_br); 482 continue; 483 } 484 485 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id; 486 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; 487 488 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) { 489 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset; 490 pkt_fmt = MANA_LONG_PKT_FMT; 491 } else { 492 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset; 493 } 494 495 pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt; 496 497 if (pkt_fmt == MANA_SHORT_PKT_FMT) 498 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob); 499 else 500 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob); 501 502 pkg.wqe_req.inline_oob_data = &pkg.tx_oob; 503 pkg.wqe_req.flags = 0; 504 pkg.wqe_req.client_data_unit = 0; 505 506 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) { 507 if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP) 508 pkg.tx_oob.s_oob.is_outer_ipv4 = 1; 509 else 510 pkg.tx_oob.s_oob.is_outer_ipv6 = 1; 511 512 pkg.tx_oob.s_oob.comp_iphdr_csum = 1; 513 pkg.tx_oob.s_oob.comp_tcp_csum = 1; 514 pkg.tx_oob.s_oob.trans_off = mbuf->m_pkthdr.l3hlen; 515 516 pkg.wqe_req.client_data_unit = mbuf->m_pkthdr.tso_segsz; 517 pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0; 518 } else if (mbuf->m_pkthdr.csum_flags & 519 (CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP6_UDP | CSUM_IP6_TCP)) { 520 if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP) { 521 pkg.tx_oob.s_oob.is_outer_ipv4 = 1; 522 pkg.tx_oob.s_oob.comp_iphdr_csum = 1; 523 } else { 524 pkg.tx_oob.s_oob.is_outer_ipv6 = 1; 525 } 526 527 if (MANA_L4_PROTO(mbuf) == IPPROTO_TCP) { 528 pkg.tx_oob.s_oob.comp_tcp_csum = 1; 529 pkg.tx_oob.s_oob.trans_off = 530 mbuf->m_pkthdr.l3hlen; 531 } else { 532 pkg.tx_oob.s_oob.comp_udp_csum = 1; 533 } 534 } else if (mbuf->m_pkthdr.csum_flags & CSUM_IP) { 535 pkg.tx_oob.s_oob.is_outer_ipv4 = 1; 536 pkg.tx_oob.s_oob.comp_iphdr_csum = 1; 537 } else { 538 if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP) 539 pkg.tx_oob.s_oob.is_outer_ipv4 = 1; 540 else if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IPV6) 541 pkg.tx_oob.s_oob.is_outer_ipv6 = 1; 542 } 543 544 len = mbuf->m_pkthdr.len; 545 546 err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req, 547 (struct gdma_posted_wqe_info *)&tx_info->wqe_inf); 548 if (unlikely(err)) { 549 /* Should not happen */ 550 if_printf(ndev, "Failed to post TX OOB: %d\n", err); 551 552 mana_tx_unmap_mbuf(apc, tx_info); 553 554 drbr_advance(ndev, txq->txq_br); 555 continue; 556 } 557 558 next_to_use = 559 (next_to_use + 1) % MAX_SEND_BUFFERS_PER_QUEUE; 560 561 (void)atomic_inc_return(&txq->pending_sends); 562 563 drbr_advance(ndev, txq->txq_br); 564 565 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq); 566 567 packets++; 568 bytes += len; 569 } 570 571 counter_enter(); 572 counter_u64_add_protected(tx_stats->packets, packets); 573 counter_u64_add_protected(port_stats->tx_packets, packets); 574 counter_u64_add_protected(tx_stats->bytes, bytes); 575 counter_u64_add_protected(port_stats->tx_bytes, bytes); 576 counter_exit(); 577 578 txq->next_to_use = next_to_use; 579 } 580 581 static void 582 mana_xmit_taskfunc(void *arg, int pending) 583 { 584 struct mana_txq *txq = (struct mana_txq *)arg; 585 if_t ndev = txq->ndev; 586 struct mana_port_context *apc = if_getsoftc(ndev); 587 588 while (!drbr_empty(ndev, txq->txq_br) && apc->port_is_up && 589 (if_getdrvflags(ndev) & MANA_TXQ_FULL) == IFF_DRV_RUNNING) { 590 mtx_lock(&txq->txq_mtx); 591 mana_xmit(txq); 592 mtx_unlock(&txq->txq_mtx); 593 } 594 } 595 596 #define PULLUP_HDR(m, len) \ 597 do { \ 598 if (unlikely((m)->m_len < (len))) { \ 599 (m) = m_pullup((m), (len)); \ 600 if ((m) == NULL) \ 601 return (NULL); \ 602 } \ 603 } while (0) 604 605 /* 606 * If this function failed, the mbuf would be freed. 607 */ 608 static inline struct mbuf * 609 mana_tso_fixup(struct mbuf *mbuf) 610 { 611 struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *); 612 struct tcphdr *th; 613 uint16_t etype; 614 int ehlen; 615 616 if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) { 617 etype = ntohs(eh->evl_proto); 618 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 619 } else { 620 etype = ntohs(eh->evl_encap_proto); 621 ehlen = ETHER_HDR_LEN; 622 } 623 624 if (etype == ETHERTYPE_IP) { 625 struct ip *ip; 626 int iphlen; 627 628 PULLUP_HDR(mbuf, ehlen + sizeof(*ip)); 629 ip = mtodo(mbuf, ehlen); 630 iphlen = ip->ip_hl << 2; 631 mbuf->m_pkthdr.l3hlen = ehlen + iphlen; 632 633 PULLUP_HDR(mbuf, ehlen + iphlen + sizeof(*th)); 634 th = mtodo(mbuf, ehlen + iphlen); 635 636 ip->ip_len = 0; 637 ip->ip_sum = 0; 638 th->th_sum = in_pseudo(ip->ip_src.s_addr, 639 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 640 } else if (etype == ETHERTYPE_IPV6) { 641 struct ip6_hdr *ip6; 642 643 PULLUP_HDR(mbuf, ehlen + sizeof(*ip6) + sizeof(*th)); 644 ip6 = mtodo(mbuf, ehlen); 645 if (ip6->ip6_nxt != IPPROTO_TCP) { 646 /* Realy something wrong, just return */ 647 mana_dbg(NULL, "TSO mbuf not TCP, freed.\n"); 648 m_freem(mbuf); 649 return NULL; 650 } 651 mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6); 652 653 th = mtodo(mbuf, ehlen + sizeof(*ip6)); 654 655 ip6->ip6_plen = 0; 656 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); 657 } else { 658 /* CSUM_TSO is set but not IP protocol. */ 659 mana_warn(NULL, "TSO mbuf not right, freed.\n"); 660 m_freem(mbuf); 661 return NULL; 662 } 663 664 MANA_L3_PROTO(mbuf) = etype; 665 666 return (mbuf); 667 } 668 669 /* 670 * If this function failed, the mbuf would be freed. 671 */ 672 static inline struct mbuf * 673 mana_mbuf_csum_check(struct mbuf *mbuf) 674 { 675 struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *); 676 struct mbuf *mbuf_next; 677 uint16_t etype; 678 int offset; 679 int ehlen; 680 681 if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) { 682 etype = ntohs(eh->evl_proto); 683 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 684 } else { 685 etype = ntohs(eh->evl_encap_proto); 686 ehlen = ETHER_HDR_LEN; 687 } 688 689 mbuf_next = m_getptr(mbuf, ehlen, &offset); 690 691 MANA_L4_PROTO(mbuf) = 0; 692 if (etype == ETHERTYPE_IP) { 693 const struct ip *ip; 694 int iphlen; 695 696 ip = (struct ip *)(mtodo(mbuf_next, offset)); 697 iphlen = ip->ip_hl << 2; 698 mbuf->m_pkthdr.l3hlen = ehlen + iphlen; 699 700 MANA_L4_PROTO(mbuf) = ip->ip_p; 701 } else if (etype == ETHERTYPE_IPV6) { 702 const struct ip6_hdr *ip6; 703 704 ip6 = (struct ip6_hdr *)(mtodo(mbuf_next, offset)); 705 mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6); 706 707 MANA_L4_PROTO(mbuf) = ip6->ip6_nxt; 708 } else { 709 MANA_L4_PROTO(mbuf) = 0; 710 } 711 712 MANA_L3_PROTO(mbuf) = etype; 713 714 return (mbuf); 715 } 716 717 static int 718 mana_start_xmit(if_t ifp, struct mbuf *m) 719 { 720 struct mana_port_context *apc = if_getsoftc(ifp); 721 struct mana_txq *txq; 722 int is_drbr_empty; 723 uint16_t txq_id; 724 int err; 725 726 if (unlikely((!apc->port_is_up) || 727 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) 728 return ENODEV; 729 730 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 731 m = mana_tso_fixup(m); 732 if (unlikely(m == NULL)) { 733 counter_enter(); 734 counter_u64_add_protected(apc->port_stats.tx_drops, 1); 735 counter_exit(); 736 return EIO; 737 } 738 } else { 739 m = mana_mbuf_csum_check(m); 740 if (unlikely(m == NULL)) { 741 counter_enter(); 742 counter_u64_add_protected(apc->port_stats.tx_drops, 1); 743 counter_exit(); 744 return EIO; 745 } 746 } 747 748 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 749 uint32_t hash = m->m_pkthdr.flowid; 750 txq_id = apc->indir_table[(hash) & MANA_INDIRECT_TABLE_MASK] % 751 apc->num_queues; 752 } else { 753 txq_id = m->m_pkthdr.flowid % apc->num_queues; 754 } 755 756 if (apc->enable_tx_altq) 757 txq_id = apc->tx_qp[txq_id].txq.alt_txq_idx; 758 759 txq = &apc->tx_qp[txq_id].txq; 760 761 is_drbr_empty = drbr_empty(ifp, txq->txq_br); 762 err = drbr_enqueue(ifp, txq->txq_br, m); 763 if (unlikely(err)) { 764 mana_warn(NULL, "txq %u failed to enqueue: %d\n", 765 txq_id, err); 766 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task); 767 return err; 768 } 769 770 if (is_drbr_empty && mtx_trylock(&txq->txq_mtx)) { 771 mana_xmit(txq); 772 mtx_unlock(&txq->txq_mtx); 773 } else { 774 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task); 775 } 776 777 return 0; 778 } 779 780 static void 781 mana_cleanup_port_context(struct mana_port_context *apc) 782 { 783 bus_dma_tag_destroy(apc->tx_buf_tag); 784 bus_dma_tag_destroy(apc->rx_buf_tag); 785 apc->rx_buf_tag = NULL; 786 787 free(apc->rxqs, M_DEVBUF); 788 apc->rxqs = NULL; 789 790 mana_free_counters((counter_u64_t *)&apc->port_stats, 791 sizeof(struct mana_port_stats)); 792 } 793 794 static int 795 mana_init_port_context(struct mana_port_context *apc) 796 { 797 device_t dev = apc->ac->gdma_dev->gdma_context->dev; 798 uint32_t tso_maxsize; 799 int err; 800 801 tso_maxsize = MAX_MBUF_FRAGS * MANA_TSO_MAXSEG_SZ - 802 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 803 804 /* Create DMA tag for tx bufs */ 805 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 806 1, 0, /* alignment, boundary */ 807 BUS_SPACE_MAXADDR, /* lowaddr */ 808 BUS_SPACE_MAXADDR, /* highaddr */ 809 NULL, NULL, /* filter, filterarg */ 810 tso_maxsize, /* maxsize */ 811 MAX_MBUF_FRAGS, /* nsegments */ 812 tso_maxsize, /* maxsegsize */ 813 0, /* flags */ 814 NULL, NULL, /* lockfunc, lockfuncarg*/ 815 &apc->tx_buf_tag); 816 if (unlikely(err)) { 817 device_printf(dev, "Feiled to create TX DMA tag\n"); 818 return err; 819 } 820 821 /* Create DMA tag for rx bufs */ 822 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 823 64, 0, /* alignment, boundary */ 824 BUS_SPACE_MAXADDR, /* lowaddr */ 825 BUS_SPACE_MAXADDR, /* highaddr */ 826 NULL, NULL, /* filter, filterarg */ 827 MJUMPAGESIZE, /* maxsize */ 828 1, /* nsegments */ 829 MJUMPAGESIZE, /* maxsegsize */ 830 0, /* flags */ 831 NULL, NULL, /* lockfunc, lockfuncarg*/ 832 &apc->rx_buf_tag); 833 if (unlikely(err)) { 834 device_printf(dev, "Feiled to create RX DMA tag\n"); 835 return err; 836 } 837 838 apc->rxqs = mallocarray(apc->num_queues, sizeof(struct mana_rxq *), 839 M_DEVBUF, M_WAITOK | M_ZERO); 840 841 if (!apc->rxqs) { 842 bus_dma_tag_destroy(apc->tx_buf_tag); 843 bus_dma_tag_destroy(apc->rx_buf_tag); 844 apc->rx_buf_tag = NULL; 845 return ENOMEM; 846 } 847 848 return 0; 849 } 850 851 static int 852 mana_send_request(struct mana_context *ac, void *in_buf, 853 uint32_t in_len, void *out_buf, uint32_t out_len) 854 { 855 struct gdma_context *gc = ac->gdma_dev->gdma_context; 856 struct gdma_resp_hdr *resp = out_buf; 857 struct gdma_req_hdr *req = in_buf; 858 device_t dev = gc->dev; 859 static atomic_t activity_id; 860 int err; 861 862 req->dev_id = gc->mana.dev_id; 863 req->activity_id = atomic_inc_return(&activity_id); 864 865 mana_dbg(NULL, "activity_id = %u\n", activity_id); 866 867 err = mana_gd_send_request(gc, in_len, in_buf, out_len, 868 out_buf); 869 if (err || resp->status) { 870 device_printf(dev, "Failed to send mana message: %d, 0x%x\n", 871 err, resp->status); 872 return err ? err : EPROTO; 873 } 874 875 if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 || 876 req->activity_id != resp->activity_id) { 877 device_printf(dev, 878 "Unexpected mana message response: %x,%x,%x,%x\n", 879 req->dev_id.as_uint32, resp->dev_id.as_uint32, 880 req->activity_id, resp->activity_id); 881 return EPROTO; 882 } 883 884 return 0; 885 } 886 887 static int 888 mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr, 889 const enum mana_command_code expected_code, 890 const uint32_t min_size) 891 { 892 if (resp_hdr->response.msg_type != expected_code) 893 return EPROTO; 894 895 if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1) 896 return EPROTO; 897 898 if (resp_hdr->response.msg_size < min_size) 899 return EPROTO; 900 901 return 0; 902 } 903 904 static int 905 mana_query_device_cfg(struct mana_context *ac, uint32_t proto_major_ver, 906 uint32_t proto_minor_ver, uint32_t proto_micro_ver, 907 uint16_t *max_num_vports) 908 { 909 struct gdma_context *gc = ac->gdma_dev->gdma_context; 910 struct mana_query_device_cfg_resp resp = {}; 911 struct mana_query_device_cfg_req req = {}; 912 device_t dev = gc->dev; 913 int err = 0; 914 915 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG, 916 sizeof(req), sizeof(resp)); 917 req.proto_major_ver = proto_major_ver; 918 req.proto_minor_ver = proto_minor_ver; 919 req.proto_micro_ver = proto_micro_ver; 920 921 err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp)); 922 if (err) { 923 device_printf(dev, "Failed to query config: %d", err); 924 return err; 925 } 926 927 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG, 928 sizeof(resp)); 929 if (err || resp.hdr.status) { 930 device_printf(dev, "Invalid query result: %d, 0x%x\n", err, 931 resp.hdr.status); 932 if (!err) 933 err = EPROTO; 934 return err; 935 } 936 937 *max_num_vports = resp.max_num_vports; 938 939 mana_dbg(NULL, "mana max_num_vports from device = %d\n", 940 *max_num_vports); 941 942 return 0; 943 } 944 945 static int 946 mana_query_vport_cfg(struct mana_port_context *apc, uint32_t vport_index, 947 uint32_t *max_sq, uint32_t *max_rq, uint32_t *num_indir_entry) 948 { 949 struct mana_query_vport_cfg_resp resp = {}; 950 struct mana_query_vport_cfg_req req = {}; 951 int err; 952 953 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG, 954 sizeof(req), sizeof(resp)); 955 956 req.vport_index = vport_index; 957 958 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 959 sizeof(resp)); 960 if (err) 961 return err; 962 963 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG, 964 sizeof(resp)); 965 if (err) 966 return err; 967 968 if (resp.hdr.status) 969 return EPROTO; 970 971 *max_sq = resp.max_num_sq; 972 *max_rq = resp.max_num_rq; 973 *num_indir_entry = resp.num_indirection_ent; 974 975 apc->port_handle = resp.vport; 976 memcpy(apc->mac_addr, resp.mac_addr, ETHER_ADDR_LEN); 977 978 return 0; 979 } 980 981 void 982 mana_uncfg_vport(struct mana_port_context *apc) 983 { 984 apc->vport_use_count--; 985 if (apc->vport_use_count < 0) { 986 mana_err(NULL, 987 "WARNING: vport_use_count less than 0: %u\n", 988 apc->vport_use_count); 989 } 990 } 991 992 int 993 mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id, 994 uint32_t doorbell_pg_id) 995 { 996 struct mana_config_vport_resp resp = {}; 997 struct mana_config_vport_req req = {}; 998 int err; 999 1000 /* This function is used to program the Ethernet port in the hardware 1001 * table. It can be called from the Ethernet driver or the RDMA driver. 1002 * 1003 * For Ethernet usage, the hardware supports only one active user on a 1004 * physical port. The driver checks on the port usage before programming 1005 * the hardware when creating the RAW QP (RDMA driver) or exposing the 1006 * device to kernel NET layer (Ethernet driver). 1007 * 1008 * Because the RDMA driver doesn't know in advance which QP type the 1009 * user will create, it exposes the device with all its ports. The user 1010 * may not be able to create RAW QP on a port if this port is already 1011 * in used by the Ethernet driver from the kernel. 1012 * 1013 * This physical port limitation only applies to the RAW QP. For RC QP, 1014 * the hardware doesn't have this limitation. The user can create RC 1015 * QPs on a physical port up to the hardware limits independent of the 1016 * Ethernet usage on the same port. 1017 */ 1018 if (apc->vport_use_count > 0) { 1019 return EBUSY; 1020 } 1021 apc->vport_use_count++; 1022 1023 mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX, 1024 sizeof(req), sizeof(resp)); 1025 req.vport = apc->port_handle; 1026 req.pdid = protection_dom_id; 1027 req.doorbell_pageid = doorbell_pg_id; 1028 1029 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1030 sizeof(resp)); 1031 if (err) { 1032 if_printf(apc->ndev, "Failed to configure vPort: %d\n", err); 1033 goto out; 1034 } 1035 1036 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX, 1037 sizeof(resp)); 1038 if (err || resp.hdr.status) { 1039 if_printf(apc->ndev, "Failed to configure vPort: %d, 0x%x\n", 1040 err, resp.hdr.status); 1041 if (!err) 1042 err = EPROTO; 1043 1044 goto out; 1045 } 1046 1047 apc->tx_shortform_allowed = resp.short_form_allowed; 1048 apc->tx_vp_offset = resp.tx_vport_offset; 1049 1050 if_printf(apc->ndev, "Configured vPort %ju PD %u DB %u\n", 1051 apc->port_handle, protection_dom_id, doorbell_pg_id); 1052 1053 out: 1054 if (err) 1055 mana_uncfg_vport(apc); 1056 1057 return err; 1058 } 1059 1060 static int 1061 mana_cfg_vport_steering(struct mana_port_context *apc, 1062 enum TRI_STATE rx, 1063 bool update_default_rxobj, bool update_key, 1064 bool update_tab) 1065 { 1066 uint16_t num_entries = MANA_INDIRECT_TABLE_SIZE; 1067 struct mana_cfg_rx_steer_req *req = NULL; 1068 struct mana_cfg_rx_steer_resp resp = {}; 1069 if_t ndev = apc->ndev; 1070 mana_handle_t *req_indir_tab; 1071 uint32_t req_buf_size; 1072 int err; 1073 1074 req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries; 1075 req = malloc(req_buf_size, M_DEVBUF, M_WAITOK | M_ZERO); 1076 if (!req) 1077 return ENOMEM; 1078 1079 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size, 1080 sizeof(resp)); 1081 1082 req->vport = apc->port_handle; 1083 req->num_indir_entries = num_entries; 1084 req->indir_tab_offset = sizeof(*req); 1085 req->rx_enable = rx; 1086 req->rss_enable = apc->rss_state; 1087 req->update_default_rxobj = update_default_rxobj; 1088 req->update_hashkey = update_key; 1089 req->update_indir_tab = update_tab; 1090 req->default_rxobj = apc->default_rxobj; 1091 1092 if (update_key) 1093 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE); 1094 1095 if (update_tab) { 1096 req_indir_tab = (mana_handle_t *)(req + 1); 1097 memcpy(req_indir_tab, apc->rxobj_table, 1098 req->num_indir_entries * sizeof(mana_handle_t)); 1099 } 1100 1101 err = mana_send_request(apc->ac, req, req_buf_size, &resp, 1102 sizeof(resp)); 1103 if (err) { 1104 if_printf(ndev, "Failed to configure vPort RX: %d\n", err); 1105 goto out; 1106 } 1107 1108 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX, 1109 sizeof(resp)); 1110 if (err) { 1111 if_printf(ndev, "vPort RX configuration failed: %d\n", err); 1112 goto out; 1113 } 1114 1115 if (resp.hdr.status) { 1116 if_printf(ndev, "vPort RX configuration failed: 0x%x\n", 1117 resp.hdr.status); 1118 err = EPROTO; 1119 } 1120 1121 if_printf(ndev, "Configured steering vPort %ju entries %u\n", 1122 apc->port_handle, num_entries); 1123 1124 out: 1125 free(req, M_DEVBUF); 1126 return err; 1127 } 1128 1129 int 1130 mana_create_wq_obj(struct mana_port_context *apc, 1131 mana_handle_t vport, 1132 uint32_t wq_type, struct mana_obj_spec *wq_spec, 1133 struct mana_obj_spec *cq_spec, 1134 mana_handle_t *wq_obj) 1135 { 1136 struct mana_create_wqobj_resp resp = {}; 1137 struct mana_create_wqobj_req req = {}; 1138 if_t ndev = apc->ndev; 1139 int err; 1140 1141 mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ, 1142 sizeof(req), sizeof(resp)); 1143 req.vport = vport; 1144 req.wq_type = wq_type; 1145 req.wq_gdma_region = wq_spec->gdma_region; 1146 req.cq_gdma_region = cq_spec->gdma_region; 1147 req.wq_size = wq_spec->queue_size; 1148 req.cq_size = cq_spec->queue_size; 1149 req.cq_moderation_ctx_id = cq_spec->modr_ctx_id; 1150 req.cq_parent_qid = cq_spec->attached_eq; 1151 1152 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1153 sizeof(resp)); 1154 if (err) { 1155 if_printf(ndev, "Failed to create WQ object: %d\n", err); 1156 goto out; 1157 } 1158 1159 err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ, 1160 sizeof(resp)); 1161 if (err || resp.hdr.status) { 1162 if_printf(ndev, "Failed to create WQ object: %d, 0x%x\n", err, 1163 resp.hdr.status); 1164 if (!err) 1165 err = EPROTO; 1166 goto out; 1167 } 1168 1169 if (resp.wq_obj == INVALID_MANA_HANDLE) { 1170 if_printf(ndev, "Got an invalid WQ object handle\n"); 1171 err = EPROTO; 1172 goto out; 1173 } 1174 1175 *wq_obj = resp.wq_obj; 1176 wq_spec->queue_index = resp.wq_id; 1177 cq_spec->queue_index = resp.cq_id; 1178 1179 return 0; 1180 out: 1181 return err; 1182 } 1183 1184 void 1185 mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type, 1186 mana_handle_t wq_obj) 1187 { 1188 struct mana_destroy_wqobj_resp resp = {}; 1189 struct mana_destroy_wqobj_req req = {}; 1190 if_t ndev = apc->ndev; 1191 int err; 1192 1193 mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ, 1194 sizeof(req), sizeof(resp)); 1195 req.wq_type = wq_type; 1196 req.wq_obj_handle = wq_obj; 1197 1198 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1199 sizeof(resp)); 1200 if (err) { 1201 if_printf(ndev, "Failed to destroy WQ object: %d\n", err); 1202 return; 1203 } 1204 1205 err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ, 1206 sizeof(resp)); 1207 if (err || resp.hdr.status) 1208 if_printf(ndev, "Failed to destroy WQ object: %d, 0x%x\n", 1209 err, resp.hdr.status); 1210 } 1211 1212 static void 1213 mana_destroy_eq(struct mana_context *ac) 1214 { 1215 struct gdma_context *gc = ac->gdma_dev->gdma_context; 1216 struct gdma_queue *eq; 1217 int i; 1218 1219 if (!ac->eqs) 1220 return; 1221 1222 for (i = 0; i < gc->max_num_queues; i++) { 1223 eq = ac->eqs[i].eq; 1224 if (!eq) 1225 continue; 1226 1227 mana_gd_destroy_queue(gc, eq); 1228 } 1229 1230 free(ac->eqs, M_DEVBUF); 1231 ac->eqs = NULL; 1232 } 1233 1234 static int 1235 mana_create_eq(struct mana_context *ac) 1236 { 1237 struct gdma_dev *gd = ac->gdma_dev; 1238 struct gdma_context *gc = gd->gdma_context; 1239 struct gdma_queue_spec spec = {}; 1240 int err; 1241 int i; 1242 1243 ac->eqs = mallocarray(gc->max_num_queues, sizeof(struct mana_eq), 1244 M_DEVBUF, M_WAITOK | M_ZERO); 1245 if (!ac->eqs) 1246 return ENOMEM; 1247 1248 spec.type = GDMA_EQ; 1249 spec.monitor_avl_buf = false; 1250 spec.queue_size = EQ_SIZE; 1251 spec.eq.callback = NULL; 1252 spec.eq.context = ac->eqs; 1253 spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE; 1254 1255 for (i = 0; i < gc->max_num_queues; i++) { 1256 err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq); 1257 if (err) 1258 goto out; 1259 } 1260 1261 return 0; 1262 out: 1263 mana_destroy_eq(ac); 1264 return err; 1265 } 1266 1267 static int 1268 mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq) 1269 { 1270 struct mana_fence_rq_resp resp = {}; 1271 struct mana_fence_rq_req req = {}; 1272 int err; 1273 1274 init_completion(&rxq->fence_event); 1275 1276 mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ, 1277 sizeof(req), sizeof(resp)); 1278 req.wq_obj_handle = rxq->rxobj; 1279 1280 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 1281 sizeof(resp)); 1282 if (err) { 1283 if_printf(apc->ndev, "Failed to fence RQ %u: %d\n", 1284 rxq->rxq_idx, err); 1285 return err; 1286 } 1287 1288 err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp)); 1289 if (err || resp.hdr.status) { 1290 if_printf(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n", 1291 rxq->rxq_idx, err, resp.hdr.status); 1292 if (!err) 1293 err = EPROTO; 1294 1295 return err; 1296 } 1297 1298 if (wait_for_completion_timeout(&rxq->fence_event, 10 * hz)) { 1299 if_printf(apc->ndev, "Failed to fence RQ %u: timed out\n", 1300 rxq->rxq_idx); 1301 return ETIMEDOUT; 1302 } 1303 1304 return 0; 1305 } 1306 1307 static void 1308 mana_fence_rqs(struct mana_port_context *apc) 1309 { 1310 unsigned int rxq_idx; 1311 struct mana_rxq *rxq; 1312 int err; 1313 1314 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { 1315 rxq = apc->rxqs[rxq_idx]; 1316 err = mana_fence_rq(apc, rxq); 1317 1318 /* In case of any error, use sleep instead. */ 1319 if (err) 1320 gdma_msleep(100); 1321 } 1322 } 1323 1324 static int 1325 mana_move_wq_tail(struct gdma_queue *wq, uint32_t num_units) 1326 { 1327 uint32_t used_space_old; 1328 uint32_t used_space_new; 1329 1330 used_space_old = wq->head - wq->tail; 1331 used_space_new = wq->head - (wq->tail + num_units); 1332 1333 if (used_space_new > used_space_old) { 1334 mana_err(NULL, 1335 "WARNING: new used space %u greater than old one %u\n", 1336 used_space_new, used_space_old); 1337 return ERANGE; 1338 } 1339 1340 wq->tail += num_units; 1341 return 0; 1342 } 1343 1344 static void 1345 mana_poll_tx_cq(struct mana_cq *cq) 1346 { 1347 struct gdma_comp *completions = cq->gdma_comp_buf; 1348 struct gdma_posted_wqe_info *wqe_info; 1349 struct mana_send_buf_info *tx_info; 1350 unsigned int pkt_transmitted = 0; 1351 unsigned int wqe_unit_cnt = 0; 1352 struct mana_txq *txq = cq->txq; 1353 struct mana_port_context *apc; 1354 uint16_t next_to_complete; 1355 if_t ndev; 1356 int comp_read; 1357 int txq_idx = txq->idx;; 1358 int i; 1359 int sa_drop = 0; 1360 1361 struct gdma_queue *gdma_wq; 1362 unsigned int avail_space; 1363 bool txq_full = false; 1364 1365 ndev = txq->ndev; 1366 apc = if_getsoftc(ndev); 1367 1368 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions, 1369 CQE_POLLING_BUFFER); 1370 1371 if (comp_read < 1) 1372 return; 1373 1374 next_to_complete = txq->next_to_complete; 1375 1376 for (i = 0; i < comp_read; i++) { 1377 struct mana_tx_comp_oob *cqe_oob; 1378 1379 if (!completions[i].is_sq) { 1380 mana_err(NULL, "WARNING: Not for SQ\n"); 1381 return; 1382 } 1383 1384 cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data; 1385 if (cqe_oob->cqe_hdr.client_type != 1386 MANA_CQE_COMPLETION) { 1387 mana_err(NULL, 1388 "WARNING: Invalid CQE client type %u\n", 1389 cqe_oob->cqe_hdr.client_type); 1390 return; 1391 } 1392 1393 switch (cqe_oob->cqe_hdr.cqe_type) { 1394 case CQE_TX_OKAY: 1395 break; 1396 1397 case CQE_TX_SA_DROP: 1398 case CQE_TX_MTU_DROP: 1399 case CQE_TX_INVALID_OOB: 1400 case CQE_TX_INVALID_ETH_TYPE: 1401 case CQE_TX_HDR_PROCESSING_ERROR: 1402 case CQE_TX_VF_DISABLED: 1403 case CQE_TX_VPORT_IDX_OUT_OF_RANGE: 1404 case CQE_TX_VPORT_DISABLED: 1405 case CQE_TX_VLAN_TAGGING_VIOLATION: 1406 sa_drop ++; 1407 mana_err(NULL, 1408 "TX: txq %d CQE error %d, ntc = %d, " 1409 "pending sends = %d: err ignored.\n", 1410 txq_idx, cqe_oob->cqe_hdr.cqe_type, 1411 next_to_complete, txq->pending_sends); 1412 break; 1413 1414 default: 1415 /* If the CQE type is unexpected, log an error, 1416 * and go through the error path. 1417 */ 1418 mana_err(NULL, 1419 "ERROR: TX: Unexpected CQE type %d: HW BUG?\n", 1420 cqe_oob->cqe_hdr.cqe_type); 1421 return; 1422 } 1423 if (txq->gdma_txq_id != completions[i].wq_num) { 1424 mana_dbg(NULL, 1425 "txq gdma id not match completion wq num: " 1426 "%d != %d\n", 1427 txq->gdma_txq_id, completions[i].wq_num); 1428 break; 1429 } 1430 1431 tx_info = &txq->tx_buf_info[next_to_complete]; 1432 if (!tx_info->mbuf) { 1433 mana_err(NULL, 1434 "WARNING: txq %d Empty mbuf on tx_info: %u, " 1435 "ntu = %u, pending_sends = %d, " 1436 "transmitted = %d, sa_drop = %d, i = %d, comp_read = %d\n", 1437 txq_idx, next_to_complete, txq->next_to_use, 1438 txq->pending_sends, pkt_transmitted, sa_drop, 1439 i, comp_read); 1440 break; 1441 } 1442 1443 wqe_info = &tx_info->wqe_inf; 1444 wqe_unit_cnt += wqe_info->wqe_size_in_bu; 1445 1446 mana_tx_unmap_mbuf(apc, tx_info); 1447 mb(); 1448 1449 next_to_complete = 1450 (next_to_complete + 1) % MAX_SEND_BUFFERS_PER_QUEUE; 1451 1452 pkt_transmitted++; 1453 } 1454 1455 txq->next_to_complete = next_to_complete; 1456 1457 if (wqe_unit_cnt == 0) { 1458 mana_err(NULL, 1459 "WARNING: TX ring not proceeding!\n"); 1460 return; 1461 } 1462 1463 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt); 1464 1465 /* Ensure tail updated before checking q stop */ 1466 wmb(); 1467 1468 gdma_wq = txq->gdma_sq; 1469 avail_space = mana_gd_wq_avail_space(gdma_wq); 1470 1471 1472 if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL) { 1473 txq_full = true; 1474 } 1475 1476 /* Ensure checking txq_full before apc->port_is_up. */ 1477 rmb(); 1478 1479 if (txq_full && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) { 1480 /* Grab the txq lock and re-test */ 1481 mtx_lock(&txq->txq_mtx); 1482 avail_space = mana_gd_wq_avail_space(gdma_wq); 1483 1484 if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL && 1485 apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) { 1486 /* Clear the Q full flag */ 1487 if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING, 1488 IFF_DRV_OACTIVE); 1489 counter_u64_add(txq->stats.wakeup, 1); 1490 if (txq->alt_txq_idx != txq->idx) { 1491 uint64_t stops = counter_u64_fetch(txq->stats.stop); 1492 uint64_t wakeups = counter_u64_fetch(txq->stats.wakeup); 1493 /* Reset alt_txq_idx back if it is not overloaded */ 1494 if (stops < wakeups) { 1495 txq->alt_txq_idx = txq->idx; 1496 counter_u64_add(txq->stats.alt_reset, 1); 1497 } 1498 } 1499 rmb(); 1500 /* Schedule a tx enqueue task */ 1501 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task); 1502 } 1503 mtx_unlock(&txq->txq_mtx); 1504 } 1505 1506 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0) 1507 mana_err(NULL, 1508 "WARNING: TX %d pending_sends error: %d\n", 1509 txq->idx, txq->pending_sends); 1510 1511 cq->work_done = pkt_transmitted; 1512 } 1513 1514 static void 1515 mana_post_pkt_rxq(struct mana_rxq *rxq) 1516 { 1517 struct mana_recv_buf_oob *recv_buf_oob; 1518 uint32_t curr_index; 1519 int err; 1520 1521 curr_index = rxq->buf_index++; 1522 if (rxq->buf_index == rxq->num_rx_buf) 1523 rxq->buf_index = 0; 1524 1525 recv_buf_oob = &rxq->rx_oobs[curr_index]; 1526 1527 err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req, 1528 &recv_buf_oob->wqe_inf); 1529 if (err) { 1530 mana_err(NULL, "WARNING: rxq %u post pkt err %d\n", 1531 rxq->rxq_idx, err); 1532 return; 1533 } 1534 1535 if (recv_buf_oob->wqe_inf.wqe_size_in_bu != 1) { 1536 mana_err(NULL, "WARNING: rxq %u wqe_size_in_bu %u\n", 1537 rxq->rxq_idx, recv_buf_oob->wqe_inf.wqe_size_in_bu); 1538 } 1539 } 1540 1541 static void 1542 mana_rx_mbuf(struct mbuf *mbuf, struct mana_rxcomp_oob *cqe, 1543 struct mana_rxq *rxq) 1544 { 1545 struct mana_stats *rx_stats = &rxq->stats; 1546 if_t ndev = rxq->ndev; 1547 uint32_t pkt_len = cqe->ppi[0].pkt_len; 1548 uint16_t rxq_idx = rxq->rxq_idx; 1549 struct mana_port_context *apc; 1550 bool do_lro = false; 1551 bool do_if_input; 1552 1553 apc = if_getsoftc(ndev); 1554 rxq->rx_cq.work_done++; 1555 1556 if (!mbuf) { 1557 return; 1558 } 1559 1560 mbuf->m_flags |= M_PKTHDR; 1561 mbuf->m_pkthdr.len = pkt_len; 1562 mbuf->m_len = pkt_len; 1563 mbuf->m_pkthdr.rcvif = ndev; 1564 1565 if ((if_getcapenable(ndev) & IFCAP_RXCSUM || 1566 if_getcapenable(ndev) & IFCAP_RXCSUM_IPV6) && 1567 (cqe->rx_iphdr_csum_succeed)) { 1568 mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 1569 mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1570 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) { 1571 mbuf->m_pkthdr.csum_flags |= 1572 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 1573 mbuf->m_pkthdr.csum_data = 0xffff; 1574 1575 if (cqe->rx_tcp_csum_succeed) 1576 do_lro = true; 1577 } 1578 } 1579 1580 if (cqe->rx_hashtype != 0) { 1581 mbuf->m_pkthdr.flowid = cqe->ppi[0].pkt_hash; 1582 1583 uint16_t hashtype = cqe->rx_hashtype; 1584 if (hashtype & NDIS_HASH_IPV4_MASK) { 1585 hashtype &= NDIS_HASH_IPV4_MASK; 1586 switch (hashtype) { 1587 case NDIS_HASH_TCP_IPV4: 1588 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4); 1589 break; 1590 case NDIS_HASH_UDP_IPV4: 1591 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4); 1592 break; 1593 default: 1594 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4); 1595 } 1596 } else if (hashtype & NDIS_HASH_IPV6_MASK) { 1597 hashtype &= NDIS_HASH_IPV6_MASK; 1598 switch (hashtype) { 1599 case NDIS_HASH_TCP_IPV6: 1600 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6); 1601 break; 1602 case NDIS_HASH_TCP_IPV6_EX: 1603 M_HASHTYPE_SET(mbuf, 1604 M_HASHTYPE_RSS_TCP_IPV6_EX); 1605 break; 1606 case NDIS_HASH_UDP_IPV6: 1607 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6); 1608 break; 1609 case NDIS_HASH_UDP_IPV6_EX: 1610 M_HASHTYPE_SET(mbuf, 1611 M_HASHTYPE_RSS_UDP_IPV6_EX); 1612 break; 1613 default: 1614 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6); 1615 } 1616 } else { 1617 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH); 1618 } 1619 } else { 1620 mbuf->m_pkthdr.flowid = rxq_idx; 1621 M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE); 1622 } 1623 1624 do_if_input = true; 1625 if ((if_getcapenable(ndev) & IFCAP_LRO) && do_lro) { 1626 if (rxq->lro.lro_cnt != 0 && 1627 tcp_lro_rx(&rxq->lro, mbuf, 0) == 0) 1628 do_if_input = false; 1629 } 1630 if (do_if_input) { 1631 if_input(ndev, mbuf); 1632 } 1633 1634 counter_enter(); 1635 counter_u64_add_protected(rx_stats->packets, 1); 1636 counter_u64_add_protected(apc->port_stats.rx_packets, 1); 1637 counter_u64_add_protected(rx_stats->bytes, pkt_len); 1638 counter_u64_add_protected(apc->port_stats.rx_bytes, pkt_len); 1639 counter_exit(); 1640 } 1641 1642 static void 1643 mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, 1644 struct gdma_comp *cqe) 1645 { 1646 struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data; 1647 struct mana_recv_buf_oob *rxbuf_oob; 1648 if_t ndev = rxq->ndev; 1649 struct mana_port_context *apc; 1650 struct mbuf *old_mbuf; 1651 uint32_t curr, pktlen; 1652 int err; 1653 1654 switch (oob->cqe_hdr.cqe_type) { 1655 case CQE_RX_OKAY: 1656 break; 1657 1658 case CQE_RX_TRUNCATED: 1659 apc = if_getsoftc(ndev); 1660 counter_u64_add(apc->port_stats.rx_drops, 1); 1661 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index]; 1662 if_printf(ndev, "Dropped a truncated packet\n"); 1663 goto drop; 1664 1665 case CQE_RX_COALESCED_4: 1666 if_printf(ndev, "RX coalescing is unsupported\n"); 1667 return; 1668 1669 case CQE_RX_OBJECT_FENCE: 1670 complete(&rxq->fence_event); 1671 return; 1672 1673 default: 1674 if_printf(ndev, "Unknown RX CQE type = %d\n", 1675 oob->cqe_hdr.cqe_type); 1676 return; 1677 } 1678 1679 if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY) 1680 return; 1681 1682 pktlen = oob->ppi[0].pkt_len; 1683 1684 if (pktlen == 0) { 1685 /* data packets should never have packetlength of zero */ 1686 if_printf(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%jx\n", 1687 rxq->gdma_id, cq->gdma_id, rxq->rxobj); 1688 return; 1689 } 1690 1691 curr = rxq->buf_index; 1692 rxbuf_oob = &rxq->rx_oobs[curr]; 1693 if (rxbuf_oob->wqe_inf.wqe_size_in_bu != 1) { 1694 mana_err(NULL, "WARNING: Rx Incorrect complete " 1695 "WQE size %u\n", 1696 rxbuf_oob->wqe_inf.wqe_size_in_bu); 1697 } 1698 1699 apc = if_getsoftc(ndev); 1700 1701 old_mbuf = rxbuf_oob->mbuf; 1702 1703 /* Unload DMA map for the old mbuf */ 1704 mana_unload_rx_mbuf(apc, rxq, rxbuf_oob, false); 1705 1706 /* Load a new mbuf to replace the old one */ 1707 err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, true); 1708 if (err) { 1709 mana_dbg(NULL, 1710 "failed to load rx mbuf, err = %d, packet dropped.\n", 1711 err); 1712 counter_u64_add(rxq->stats.mbuf_alloc_fail, 1); 1713 /* 1714 * Failed to load new mbuf, rxbuf_oob->mbuf is still 1715 * pointing to the old one. Drop the packet. 1716 */ 1717 old_mbuf = NULL; 1718 /* Reload the existing mbuf */ 1719 mana_load_rx_mbuf(apc, rxq, rxbuf_oob, false); 1720 } 1721 1722 mana_rx_mbuf(old_mbuf, oob, rxq); 1723 1724 drop: 1725 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu); 1726 1727 mana_post_pkt_rxq(rxq); 1728 } 1729 1730 static void 1731 mana_poll_rx_cq(struct mana_cq *cq) 1732 { 1733 struct gdma_comp *comp = cq->gdma_comp_buf; 1734 int comp_read, i; 1735 1736 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); 1737 KASSERT(comp_read <= CQE_POLLING_BUFFER, 1738 ("comp_read %d great than buf size %d", 1739 comp_read, CQE_POLLING_BUFFER)); 1740 1741 for (i = 0; i < comp_read; i++) { 1742 if (comp[i].is_sq == true) { 1743 mana_err(NULL, 1744 "WARNING: CQE not for receive queue\n"); 1745 return; 1746 } 1747 1748 /* verify recv cqe references the right rxq */ 1749 if (comp[i].wq_num != cq->rxq->gdma_id) { 1750 mana_err(NULL, 1751 "WARNING: Received CQE %d not for " 1752 "this receive queue %d\n", 1753 comp[i].wq_num, cq->rxq->gdma_id); 1754 return; 1755 } 1756 1757 mana_process_rx_cqe(cq->rxq, cq, &comp[i]); 1758 } 1759 1760 tcp_lro_flush_all(&cq->rxq->lro); 1761 } 1762 1763 static void 1764 mana_cq_handler(void *context, struct gdma_queue *gdma_queue) 1765 { 1766 struct mana_cq *cq = context; 1767 uint8_t arm_bit; 1768 1769 KASSERT(cq->gdma_cq == gdma_queue, 1770 ("cq do not match %p, %p", cq->gdma_cq, gdma_queue)); 1771 1772 if (cq->type == MANA_CQ_TYPE_RX) { 1773 mana_poll_rx_cq(cq); 1774 } else { 1775 mana_poll_tx_cq(cq); 1776 } 1777 1778 if (cq->work_done < cq->budget && cq->do_not_ring_db == false) 1779 arm_bit = SET_ARM_BIT; 1780 else 1781 arm_bit = 0; 1782 1783 mana_gd_ring_cq(gdma_queue, arm_bit); 1784 } 1785 1786 #define MANA_POLL_BUDGET 8 1787 #define MANA_RX_BUDGET 256 1788 #define MANA_TX_BUDGET MAX_SEND_BUFFERS_PER_QUEUE 1789 1790 static void 1791 mana_poll(void *arg, int pending) 1792 { 1793 struct mana_cq *cq = arg; 1794 int i; 1795 1796 cq->work_done = 0; 1797 if (cq->type == MANA_CQ_TYPE_RX) { 1798 cq->budget = MANA_RX_BUDGET; 1799 } else { 1800 cq->budget = MANA_TX_BUDGET; 1801 } 1802 1803 for (i = 0; i < MANA_POLL_BUDGET; i++) { 1804 /* 1805 * If this is the last loop, set the budget big enough 1806 * so it will arm the CQ any way. 1807 */ 1808 if (i == (MANA_POLL_BUDGET - 1)) 1809 cq->budget = CQE_POLLING_BUFFER + 1; 1810 1811 mana_cq_handler(cq, cq->gdma_cq); 1812 1813 if (cq->work_done < cq->budget) 1814 break; 1815 1816 cq->work_done = 0; 1817 } 1818 } 1819 1820 static void 1821 mana_schedule_task(void *arg, struct gdma_queue *gdma_queue) 1822 { 1823 struct mana_cq *cq = arg; 1824 1825 taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task); 1826 } 1827 1828 static void 1829 mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq) 1830 { 1831 struct gdma_dev *gd = apc->ac->gdma_dev; 1832 1833 if (!cq->gdma_cq) 1834 return; 1835 1836 /* Drain cleanup taskqueue */ 1837 if (cq->cleanup_tq) { 1838 while (taskqueue_cancel(cq->cleanup_tq, 1839 &cq->cleanup_task, NULL)) { 1840 taskqueue_drain(cq->cleanup_tq, 1841 &cq->cleanup_task); 1842 } 1843 1844 taskqueue_free(cq->cleanup_tq); 1845 } 1846 1847 mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq); 1848 } 1849 1850 static void 1851 mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq) 1852 { 1853 struct gdma_dev *gd = apc->ac->gdma_dev; 1854 struct mana_send_buf_info *txbuf_info; 1855 uint32_t pending_sends; 1856 int i; 1857 1858 if (!txq->gdma_sq) 1859 return; 1860 1861 if ((pending_sends = atomic_read(&txq->pending_sends)) > 0) { 1862 mana_err(NULL, 1863 "WARNING: txq pending sends not zero: %u\n", 1864 pending_sends); 1865 } 1866 1867 if (txq->next_to_use != txq->next_to_complete) { 1868 mana_err(NULL, 1869 "WARNING: txq buf not completed, " 1870 "next use %u, next complete %u\n", 1871 txq->next_to_use, txq->next_to_complete); 1872 } 1873 1874 /* Flush buf ring. Grab txq mtx lock */ 1875 if (txq->txq_br) { 1876 mtx_lock(&txq->txq_mtx); 1877 drbr_flush(apc->ndev, txq->txq_br); 1878 mtx_unlock(&txq->txq_mtx); 1879 buf_ring_free(txq->txq_br, M_DEVBUF); 1880 } 1881 1882 /* Drain taskqueue */ 1883 if (txq->enqueue_tq) { 1884 while (taskqueue_cancel(txq->enqueue_tq, 1885 &txq->enqueue_task, NULL)) { 1886 taskqueue_drain(txq->enqueue_tq, 1887 &txq->enqueue_task); 1888 } 1889 1890 taskqueue_free(txq->enqueue_tq); 1891 } 1892 1893 if (txq->tx_buf_info) { 1894 /* Free all mbufs which are still in-flight */ 1895 for (i = 0; i < MAX_SEND_BUFFERS_PER_QUEUE; i++) { 1896 txbuf_info = &txq->tx_buf_info[i]; 1897 if (txbuf_info->mbuf) { 1898 mana_tx_unmap_mbuf(apc, txbuf_info); 1899 } 1900 } 1901 1902 free(txq->tx_buf_info, M_DEVBUF); 1903 } 1904 1905 mana_free_counters((counter_u64_t *)&txq->stats, 1906 sizeof(txq->stats)); 1907 1908 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq); 1909 1910 mtx_destroy(&txq->txq_mtx); 1911 } 1912 1913 static void 1914 mana_destroy_txq(struct mana_port_context *apc) 1915 { 1916 int i; 1917 1918 if (!apc->tx_qp) 1919 return; 1920 1921 for (i = 0; i < apc->num_queues; i++) { 1922 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); 1923 1924 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq); 1925 1926 mana_deinit_txq(apc, &apc->tx_qp[i].txq); 1927 } 1928 1929 free(apc->tx_qp, M_DEVBUF); 1930 apc->tx_qp = NULL; 1931 } 1932 1933 static int 1934 mana_create_txq(struct mana_port_context *apc, if_t net) 1935 { 1936 struct mana_context *ac = apc->ac; 1937 struct gdma_dev *gd = ac->gdma_dev; 1938 struct mana_obj_spec wq_spec; 1939 struct mana_obj_spec cq_spec; 1940 struct gdma_queue_spec spec; 1941 struct gdma_context *gc; 1942 struct mana_txq *txq; 1943 struct mana_cq *cq; 1944 uint32_t txq_size; 1945 uint32_t cq_size; 1946 int err; 1947 int i; 1948 1949 apc->tx_qp = mallocarray(apc->num_queues, sizeof(struct mana_tx_qp), 1950 M_DEVBUF, M_WAITOK | M_ZERO); 1951 if (!apc->tx_qp) 1952 return ENOMEM; 1953 1954 /* The minimum size of the WQE is 32 bytes, hence 1955 * MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs 1956 * the SQ can store. This value is then used to size other queues 1957 * to prevent overflow. 1958 */ 1959 txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32; 1960 KASSERT(IS_ALIGNED(txq_size, PAGE_SIZE), 1961 ("txq size not page aligned")); 1962 1963 cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE; 1964 cq_size = ALIGN(cq_size, PAGE_SIZE); 1965 1966 gc = gd->gdma_context; 1967 1968 for (i = 0; i < apc->num_queues; i++) { 1969 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE; 1970 1971 /* Create SQ */ 1972 txq = &apc->tx_qp[i].txq; 1973 1974 txq->ndev = net; 1975 txq->vp_offset = apc->tx_vp_offset; 1976 txq->idx = i; 1977 txq->alt_txq_idx = i; 1978 1979 memset(&spec, 0, sizeof(spec)); 1980 spec.type = GDMA_SQ; 1981 spec.monitor_avl_buf = true; 1982 spec.queue_size = txq_size; 1983 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq); 1984 if (err) 1985 goto out; 1986 1987 /* Create SQ's CQ */ 1988 cq = &apc->tx_qp[i].tx_cq; 1989 cq->type = MANA_CQ_TYPE_TX; 1990 1991 cq->txq = txq; 1992 1993 memset(&spec, 0, sizeof(spec)); 1994 spec.type = GDMA_CQ; 1995 spec.monitor_avl_buf = false; 1996 spec.queue_size = cq_size; 1997 spec.cq.callback = mana_schedule_task; 1998 spec.cq.parent_eq = ac->eqs[i].eq; 1999 spec.cq.context = cq; 2000 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); 2001 if (err) 2002 goto out; 2003 2004 memset(&wq_spec, 0, sizeof(wq_spec)); 2005 memset(&cq_spec, 0, sizeof(cq_spec)); 2006 2007 wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle; 2008 wq_spec.queue_size = txq->gdma_sq->queue_size; 2009 2010 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; 2011 cq_spec.queue_size = cq->gdma_cq->queue_size; 2012 cq_spec.modr_ctx_id = 0; 2013 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; 2014 2015 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ, 2016 &wq_spec, &cq_spec, &apc->tx_qp[i].tx_object); 2017 2018 if (err) 2019 goto out; 2020 2021 txq->gdma_sq->id = wq_spec.queue_index; 2022 cq->gdma_cq->id = cq_spec.queue_index; 2023 2024 txq->gdma_sq->mem_info.dma_region_handle = 2025 GDMA_INVALID_DMA_REGION; 2026 cq->gdma_cq->mem_info.dma_region_handle = 2027 GDMA_INVALID_DMA_REGION; 2028 2029 txq->gdma_txq_id = txq->gdma_sq->id; 2030 2031 cq->gdma_id = cq->gdma_cq->id; 2032 2033 mana_dbg(NULL, 2034 "txq %d, txq gdma id %d, txq cq gdma id %d\n", 2035 i, txq->gdma_txq_id, cq->gdma_id);; 2036 2037 if (cq->gdma_id >= gc->max_num_cqs) { 2038 if_printf(net, "CQ id %u too large.\n", cq->gdma_id); 2039 err = EINVAL; 2040 goto out; 2041 } 2042 2043 gc->cq_table[cq->gdma_id] = cq->gdma_cq; 2044 2045 /* Initialize tx specific data */ 2046 txq->tx_buf_info = malloc(MAX_SEND_BUFFERS_PER_QUEUE * 2047 sizeof(struct mana_send_buf_info), 2048 M_DEVBUF, M_WAITOK | M_ZERO); 2049 if (unlikely(txq->tx_buf_info == NULL)) { 2050 if_printf(net, 2051 "Failed to allocate tx buf info for SQ %u\n", 2052 txq->gdma_sq->id); 2053 err = ENOMEM; 2054 goto out; 2055 } 2056 2057 2058 snprintf(txq->txq_mtx_name, nitems(txq->txq_mtx_name), 2059 "mana:tx(%d)", i); 2060 mtx_init(&txq->txq_mtx, txq->txq_mtx_name, NULL, MTX_DEF); 2061 2062 txq->txq_br = buf_ring_alloc(4 * MAX_SEND_BUFFERS_PER_QUEUE, 2063 M_DEVBUF, M_WAITOK, &txq->txq_mtx); 2064 if (unlikely(txq->txq_br == NULL)) { 2065 if_printf(net, 2066 "Failed to allocate buf ring for SQ %u\n", 2067 txq->gdma_sq->id); 2068 err = ENOMEM; 2069 goto out; 2070 } 2071 2072 /* Allocate taskqueue for deferred send */ 2073 TASK_INIT(&txq->enqueue_task, 0, mana_xmit_taskfunc, txq); 2074 txq->enqueue_tq = taskqueue_create_fast("mana_tx_enque", 2075 M_NOWAIT, taskqueue_thread_enqueue, &txq->enqueue_tq); 2076 if (unlikely(txq->enqueue_tq == NULL)) { 2077 if_printf(net, 2078 "Unable to create tx %d enqueue task queue\n", i); 2079 err = ENOMEM; 2080 goto out; 2081 } 2082 taskqueue_start_threads(&txq->enqueue_tq, 1, PI_NET, 2083 "mana txq p%u-tx%d", apc->port_idx, i); 2084 2085 mana_alloc_counters((counter_u64_t *)&txq->stats, 2086 sizeof(txq->stats)); 2087 2088 /* Allocate and start the cleanup task on CQ */ 2089 cq->do_not_ring_db = false; 2090 2091 NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq); 2092 cq->cleanup_tq = 2093 taskqueue_create_fast("mana tx cq cleanup", 2094 M_WAITOK, taskqueue_thread_enqueue, 2095 &cq->cleanup_tq); 2096 2097 if (apc->last_tx_cq_bind_cpu < 0) 2098 apc->last_tx_cq_bind_cpu = CPU_FIRST(); 2099 cq->cpu = apc->last_tx_cq_bind_cpu; 2100 apc->last_tx_cq_bind_cpu = CPU_NEXT(apc->last_tx_cq_bind_cpu); 2101 2102 if (apc->bind_cleanup_thread_cpu) { 2103 cpuset_t cpu_mask; 2104 CPU_SETOF(cq->cpu, &cpu_mask); 2105 taskqueue_start_threads_cpuset(&cq->cleanup_tq, 2106 1, PI_NET, &cpu_mask, 2107 "mana cq p%u-tx%u-cpu%d", 2108 apc->port_idx, txq->idx, cq->cpu); 2109 } else { 2110 taskqueue_start_threads(&cq->cleanup_tq, 1, 2111 PI_NET, "mana cq p%u-tx%u", 2112 apc->port_idx, txq->idx); 2113 } 2114 2115 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); 2116 } 2117 2118 return 0; 2119 out: 2120 mana_destroy_txq(apc); 2121 return err; 2122 } 2123 2124 static void 2125 mana_destroy_rxq(struct mana_port_context *apc, struct mana_rxq *rxq, 2126 bool validate_state) 2127 { 2128 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; 2129 struct mana_recv_buf_oob *rx_oob; 2130 int i; 2131 2132 if (!rxq) 2133 return; 2134 2135 if (validate_state) { 2136 /* 2137 * XXX Cancel and drain cleanup task queue here. 2138 */ 2139 ; 2140 } 2141 2142 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); 2143 2144 mana_deinit_cq(apc, &rxq->rx_cq); 2145 2146 mana_free_counters((counter_u64_t *)&rxq->stats, 2147 sizeof(rxq->stats)); 2148 2149 /* Free LRO resources */ 2150 tcp_lro_free(&rxq->lro); 2151 2152 for (i = 0; i < rxq->num_rx_buf; i++) { 2153 rx_oob = &rxq->rx_oobs[i]; 2154 2155 if (rx_oob->mbuf) 2156 mana_unload_rx_mbuf(apc, rxq, rx_oob, true); 2157 2158 bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map); 2159 } 2160 2161 if (rxq->gdma_rq) 2162 mana_gd_destroy_queue(gc, rxq->gdma_rq); 2163 2164 free(rxq, M_DEVBUF); 2165 } 2166 2167 #define MANA_WQE_HEADER_SIZE 16 2168 #define MANA_WQE_SGE_SIZE 16 2169 2170 static int 2171 mana_alloc_rx_wqe(struct mana_port_context *apc, 2172 struct mana_rxq *rxq, uint32_t *rxq_size, uint32_t *cq_size) 2173 { 2174 struct mana_recv_buf_oob *rx_oob; 2175 uint32_t buf_idx; 2176 int err; 2177 2178 if (rxq->datasize == 0 || rxq->datasize > PAGE_SIZE) { 2179 mana_err(NULL, 2180 "WARNING: Invalid rxq datasize %u\n", rxq->datasize); 2181 } 2182 2183 *rxq_size = 0; 2184 *cq_size = 0; 2185 2186 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { 2187 rx_oob = &rxq->rx_oobs[buf_idx]; 2188 memset(rx_oob, 0, sizeof(*rx_oob)); 2189 2190 err = bus_dmamap_create(apc->rx_buf_tag, 0, 2191 &rx_oob->dma_map); 2192 if (err) { 2193 mana_err(NULL, 2194 "Failed to create rx DMA map for buf %d\n", 2195 buf_idx); 2196 return err; 2197 } 2198 2199 err = mana_load_rx_mbuf(apc, rxq, rx_oob, true); 2200 if (err) { 2201 mana_err(NULL, 2202 "Failed to create rx DMA map for buf %d\n", 2203 buf_idx); 2204 bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map); 2205 return err; 2206 } 2207 2208 rx_oob->wqe_req.sgl = rx_oob->sgl; 2209 rx_oob->wqe_req.num_sge = rx_oob->num_sge; 2210 rx_oob->wqe_req.inline_oob_size = 0; 2211 rx_oob->wqe_req.inline_oob_data = NULL; 2212 rx_oob->wqe_req.flags = 0; 2213 rx_oob->wqe_req.client_data_unit = 0; 2214 2215 *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE + 2216 MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32); 2217 *cq_size += COMP_ENTRY_SIZE; 2218 } 2219 2220 return 0; 2221 } 2222 2223 static int 2224 mana_push_wqe(struct mana_rxq *rxq) 2225 { 2226 struct mana_recv_buf_oob *rx_oob; 2227 uint32_t buf_idx; 2228 int err; 2229 2230 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { 2231 rx_oob = &rxq->rx_oobs[buf_idx]; 2232 2233 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req, 2234 &rx_oob->wqe_inf); 2235 if (err) 2236 return ENOSPC; 2237 } 2238 2239 return 0; 2240 } 2241 2242 static struct mana_rxq * 2243 mana_create_rxq(struct mana_port_context *apc, uint32_t rxq_idx, 2244 struct mana_eq *eq, if_t ndev) 2245 { 2246 struct gdma_dev *gd = apc->ac->gdma_dev; 2247 struct mana_obj_spec wq_spec; 2248 struct mana_obj_spec cq_spec; 2249 struct gdma_queue_spec spec; 2250 struct mana_cq *cq = NULL; 2251 uint32_t cq_size, rq_size; 2252 struct gdma_context *gc; 2253 struct mana_rxq *rxq; 2254 int err; 2255 2256 gc = gd->gdma_context; 2257 2258 rxq = malloc(sizeof(*rxq) + 2259 RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob), 2260 M_DEVBUF, M_WAITOK | M_ZERO); 2261 if (!rxq) 2262 return NULL; 2263 2264 rxq->ndev = ndev; 2265 rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE; 2266 rxq->rxq_idx = rxq_idx; 2267 /* 2268 * Minimum size is MCLBYTES(2048) bytes for a mbuf cluster. 2269 * Now we just allow maximum size of 4096. 2270 */ 2271 rxq->datasize = ALIGN(apc->frame_size, MCLBYTES); 2272 if (rxq->datasize > MAX_FRAME_SIZE) 2273 rxq->datasize = MAX_FRAME_SIZE; 2274 2275 mana_dbg(NULL, "Setting rxq %d datasize %d\n", 2276 rxq_idx, rxq->datasize); 2277 2278 rxq->rxobj = INVALID_MANA_HANDLE; 2279 2280 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); 2281 if (err) 2282 goto out; 2283 2284 /* Create LRO for the RQ */ 2285 if (if_getcapenable(ndev) & IFCAP_LRO) { 2286 err = tcp_lro_init(&rxq->lro); 2287 if (err) { 2288 if_printf(ndev, "Failed to create LRO for rxq %d\n", 2289 rxq_idx); 2290 } else { 2291 rxq->lro.ifp = ndev; 2292 } 2293 } 2294 2295 mana_alloc_counters((counter_u64_t *)&rxq->stats, 2296 sizeof(rxq->stats)); 2297 2298 rq_size = ALIGN(rq_size, PAGE_SIZE); 2299 cq_size = ALIGN(cq_size, PAGE_SIZE); 2300 2301 /* Create RQ */ 2302 memset(&spec, 0, sizeof(spec)); 2303 spec.type = GDMA_RQ; 2304 spec.monitor_avl_buf = true; 2305 spec.queue_size = rq_size; 2306 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq); 2307 if (err) 2308 goto out; 2309 2310 /* Create RQ's CQ */ 2311 cq = &rxq->rx_cq; 2312 cq->type = MANA_CQ_TYPE_RX; 2313 cq->rxq = rxq; 2314 2315 memset(&spec, 0, sizeof(spec)); 2316 spec.type = GDMA_CQ; 2317 spec.monitor_avl_buf = false; 2318 spec.queue_size = cq_size; 2319 spec.cq.callback = mana_schedule_task; 2320 spec.cq.parent_eq = eq->eq; 2321 spec.cq.context = cq; 2322 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); 2323 if (err) 2324 goto out; 2325 2326 memset(&wq_spec, 0, sizeof(wq_spec)); 2327 memset(&cq_spec, 0, sizeof(cq_spec)); 2328 wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle; 2329 wq_spec.queue_size = rxq->gdma_rq->queue_size; 2330 2331 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; 2332 cq_spec.queue_size = cq->gdma_cq->queue_size; 2333 cq_spec.modr_ctx_id = 0; 2334 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; 2335 2336 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ, 2337 &wq_spec, &cq_spec, &rxq->rxobj); 2338 if (err) 2339 goto out; 2340 2341 rxq->gdma_rq->id = wq_spec.queue_index; 2342 cq->gdma_cq->id = cq_spec.queue_index; 2343 2344 rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; 2345 cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; 2346 2347 rxq->gdma_id = rxq->gdma_rq->id; 2348 cq->gdma_id = cq->gdma_cq->id; 2349 2350 err = mana_push_wqe(rxq); 2351 if (err) 2352 goto out; 2353 2354 if (cq->gdma_id >= gc->max_num_cqs) { 2355 err = EINVAL; 2356 goto out; 2357 } 2358 2359 gc->cq_table[cq->gdma_id] = cq->gdma_cq; 2360 2361 /* Allocate and start the cleanup task on CQ */ 2362 cq->do_not_ring_db = false; 2363 2364 NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq); 2365 cq->cleanup_tq = 2366 taskqueue_create_fast("mana rx cq cleanup", 2367 M_WAITOK, taskqueue_thread_enqueue, 2368 &cq->cleanup_tq); 2369 2370 if (apc->last_rx_cq_bind_cpu < 0) 2371 apc->last_rx_cq_bind_cpu = CPU_FIRST(); 2372 cq->cpu = apc->last_rx_cq_bind_cpu; 2373 apc->last_rx_cq_bind_cpu = CPU_NEXT(apc->last_rx_cq_bind_cpu); 2374 2375 if (apc->bind_cleanup_thread_cpu) { 2376 cpuset_t cpu_mask; 2377 CPU_SETOF(cq->cpu, &cpu_mask); 2378 taskqueue_start_threads_cpuset(&cq->cleanup_tq, 2379 1, PI_NET, &cpu_mask, 2380 "mana cq p%u-rx%u-cpu%d", 2381 apc->port_idx, rxq->rxq_idx, cq->cpu); 2382 } else { 2383 taskqueue_start_threads(&cq->cleanup_tq, 1, 2384 PI_NET, "mana cq p%u-rx%u", 2385 apc->port_idx, rxq->rxq_idx); 2386 } 2387 2388 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); 2389 out: 2390 if (!err) 2391 return rxq; 2392 2393 if_printf(ndev, "Failed to create RXQ: err = %d\n", err); 2394 2395 mana_destroy_rxq(apc, rxq, false); 2396 2397 if (cq) 2398 mana_deinit_cq(apc, cq); 2399 2400 return NULL; 2401 } 2402 2403 static int 2404 mana_add_rx_queues(struct mana_port_context *apc, if_t ndev) 2405 { 2406 struct mana_context *ac = apc->ac; 2407 struct mana_rxq *rxq; 2408 int err = 0; 2409 int i; 2410 2411 for (i = 0; i < apc->num_queues; i++) { 2412 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev); 2413 if (!rxq) { 2414 err = ENOMEM; 2415 goto out; 2416 } 2417 2418 apc->rxqs[i] = rxq; 2419 } 2420 2421 apc->default_rxobj = apc->rxqs[0]->rxobj; 2422 out: 2423 return err; 2424 } 2425 2426 static void 2427 mana_destroy_vport(struct mana_port_context *apc) 2428 { 2429 struct mana_rxq *rxq; 2430 uint32_t rxq_idx; 2431 2432 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { 2433 rxq = apc->rxqs[rxq_idx]; 2434 if (!rxq) 2435 continue; 2436 2437 mana_destroy_rxq(apc, rxq, true); 2438 apc->rxqs[rxq_idx] = NULL; 2439 } 2440 2441 mana_destroy_txq(apc); 2442 2443 mana_uncfg_vport(apc); 2444 } 2445 2446 static int 2447 mana_create_vport(struct mana_port_context *apc, if_t net) 2448 { 2449 struct gdma_dev *gd = apc->ac->gdma_dev; 2450 int err; 2451 2452 apc->default_rxobj = INVALID_MANA_HANDLE; 2453 2454 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell); 2455 if (err) 2456 return err; 2457 2458 return mana_create_txq(apc, net); 2459 } 2460 2461 2462 static void mana_rss_table_init(struct mana_port_context *apc) 2463 { 2464 int i; 2465 2466 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) 2467 apc->indir_table[i] = i % apc->num_queues; 2468 } 2469 2470 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx, 2471 bool update_hash, bool update_tab) 2472 { 2473 uint32_t queue_idx; 2474 int err; 2475 int i; 2476 2477 if (update_tab) { 2478 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) { 2479 queue_idx = apc->indir_table[i]; 2480 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj; 2481 } 2482 } 2483 2484 err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab); 2485 if (err) 2486 return err; 2487 2488 mana_fence_rqs(apc); 2489 2490 return 0; 2491 } 2492 2493 static int 2494 mana_init_port(if_t ndev) 2495 { 2496 struct mana_port_context *apc = if_getsoftc(ndev); 2497 uint32_t max_txq, max_rxq, max_queues; 2498 int port_idx = apc->port_idx; 2499 uint32_t num_indirect_entries; 2500 int err; 2501 2502 err = mana_init_port_context(apc); 2503 if (err) 2504 return err; 2505 2506 err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq, 2507 &num_indirect_entries); 2508 if (err) { 2509 if_printf(ndev, "Failed to query info for vPort %d\n", 2510 port_idx); 2511 goto reset_apc; 2512 } 2513 2514 max_queues = min_t(uint32_t, max_txq, max_rxq); 2515 if (apc->max_queues > max_queues) 2516 apc->max_queues = max_queues; 2517 2518 if (apc->num_queues > apc->max_queues) 2519 apc->num_queues = apc->max_queues; 2520 2521 return 0; 2522 2523 reset_apc: 2524 bus_dma_tag_destroy(apc->rx_buf_tag); 2525 apc->rx_buf_tag = NULL; 2526 free(apc->rxqs, M_DEVBUF); 2527 apc->rxqs = NULL; 2528 return err; 2529 } 2530 2531 int 2532 mana_alloc_queues(if_t ndev) 2533 { 2534 struct mana_port_context *apc = if_getsoftc(ndev); 2535 int err; 2536 2537 err = mana_create_vport(apc, ndev); 2538 if (err) 2539 return err; 2540 2541 err = mana_add_rx_queues(apc, ndev); 2542 if (err) 2543 goto destroy_vport; 2544 2545 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE; 2546 2547 mana_rss_table_init(apc); 2548 2549 err = mana_config_rss(apc, TRI_STATE_TRUE, true, true); 2550 if (err) 2551 goto destroy_vport; 2552 2553 return 0; 2554 2555 destroy_vport: 2556 mana_destroy_vport(apc); 2557 return err; 2558 } 2559 2560 static int 2561 mana_up(struct mana_port_context *apc) 2562 { 2563 int err; 2564 2565 mana_dbg(NULL, "mana_up called\n"); 2566 2567 err = mana_alloc_queues(apc->ndev); 2568 if (err) { 2569 mana_err(NULL, "Faile alloc mana queues: %d\n", err); 2570 return err; 2571 } 2572 2573 /* Add queue specific sysctl */ 2574 mana_sysctl_add_queues(apc); 2575 2576 apc->port_is_up = true; 2577 2578 /* Ensure port state updated before txq state */ 2579 wmb(); 2580 2581 if_link_state_change(apc->ndev, LINK_STATE_UP); 2582 if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 2583 2584 return 0; 2585 } 2586 2587 2588 static void 2589 mana_init(void *arg) 2590 { 2591 struct mana_port_context *apc = (struct mana_port_context *)arg; 2592 2593 MANA_APC_LOCK_LOCK(apc); 2594 if (!apc->port_is_up) { 2595 mana_up(apc); 2596 } 2597 MANA_APC_LOCK_UNLOCK(apc); 2598 } 2599 2600 static int 2601 mana_dealloc_queues(if_t ndev) 2602 { 2603 struct mana_port_context *apc = if_getsoftc(ndev); 2604 struct mana_txq *txq; 2605 int i, err; 2606 2607 if (apc->port_is_up) 2608 return EINVAL; 2609 2610 /* No packet can be transmitted now since apc->port_is_up is false. 2611 * There is still a tiny chance that mana_poll_tx_cq() can re-enable 2612 * a txq because it may not timely see apc->port_is_up being cleared 2613 * to false, but it doesn't matter since mana_start_xmit() drops any 2614 * new packets due to apc->port_is_up being false. 2615 * 2616 * Drain all the in-flight TX packets 2617 */ 2618 for (i = 0; i < apc->num_queues; i++) { 2619 txq = &apc->tx_qp[i].txq; 2620 2621 struct mana_cq *tx_cq = &apc->tx_qp[i].tx_cq; 2622 struct mana_cq *rx_cq = &(apc->rxqs[i]->rx_cq); 2623 2624 tx_cq->do_not_ring_db = true; 2625 rx_cq->do_not_ring_db = true; 2626 2627 /* Schedule a cleanup task */ 2628 taskqueue_enqueue(tx_cq->cleanup_tq, &tx_cq->cleanup_task); 2629 2630 while (atomic_read(&txq->pending_sends) > 0) 2631 usleep_range(1000, 2000); 2632 } 2633 2634 /* We're 100% sure the queues can no longer be woken up, because 2635 * we're sure now mana_poll_tx_cq() can't be running. 2636 */ 2637 2638 apc->rss_state = TRI_STATE_FALSE; 2639 err = mana_config_rss(apc, TRI_STATE_FALSE, false, false); 2640 if (err) { 2641 if_printf(ndev, "Failed to disable vPort: %d\n", err); 2642 return err; 2643 } 2644 2645 mana_destroy_vport(apc); 2646 2647 return 0; 2648 } 2649 2650 static int 2651 mana_down(struct mana_port_context *apc) 2652 { 2653 int err = 0; 2654 2655 apc->port_st_save = apc->port_is_up; 2656 apc->port_is_up = false; 2657 2658 /* Ensure port state updated before txq state */ 2659 wmb(); 2660 2661 if (apc->port_st_save) { 2662 if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE, 2663 IFF_DRV_RUNNING); 2664 if_link_state_change(apc->ndev, LINK_STATE_DOWN); 2665 2666 mana_sysctl_free_queues(apc); 2667 2668 err = mana_dealloc_queues(apc->ndev); 2669 if (err) { 2670 if_printf(apc->ndev, 2671 "Failed to bring down mana interface: %d\n", err); 2672 } 2673 } 2674 2675 return err; 2676 } 2677 2678 int 2679 mana_detach(if_t ndev) 2680 { 2681 struct mana_port_context *apc = if_getsoftc(ndev); 2682 int err; 2683 2684 ether_ifdetach(ndev); 2685 2686 if (!apc) 2687 return 0; 2688 2689 MANA_APC_LOCK_LOCK(apc); 2690 err = mana_down(apc); 2691 MANA_APC_LOCK_UNLOCK(apc); 2692 2693 mana_cleanup_port_context(apc); 2694 2695 MANA_APC_LOCK_DESTROY(apc); 2696 2697 free(apc, M_DEVBUF); 2698 2699 return err; 2700 } 2701 2702 static int 2703 mana_probe_port(struct mana_context *ac, int port_idx, 2704 if_t *ndev_storage) 2705 { 2706 struct gdma_context *gc = ac->gdma_dev->gdma_context; 2707 struct mana_port_context *apc; 2708 if_t ndev; 2709 int err; 2710 2711 ndev = if_alloc_dev(IFT_ETHER, gc->dev); 2712 if (!ndev) { 2713 mana_err(NULL, "Failed to allocate ifnet struct\n"); 2714 return ENOMEM; 2715 } 2716 2717 *ndev_storage = ndev; 2718 2719 apc = malloc(sizeof(*apc), M_DEVBUF, M_WAITOK | M_ZERO); 2720 if (!apc) { 2721 mana_err(NULL, "Failed to allocate port context\n"); 2722 err = ENOMEM; 2723 goto free_net; 2724 } 2725 2726 apc->ac = ac; 2727 apc->ndev = ndev; 2728 apc->max_queues = gc->max_num_queues; 2729 apc->num_queues = min_t(unsigned int, 2730 gc->max_num_queues, MANA_MAX_NUM_QUEUES); 2731 apc->port_handle = INVALID_MANA_HANDLE; 2732 apc->port_idx = port_idx; 2733 apc->frame_size = DEFAULT_FRAME_SIZE; 2734 apc->last_tx_cq_bind_cpu = -1; 2735 apc->last_rx_cq_bind_cpu = -1; 2736 apc->vport_use_count = 0; 2737 2738 MANA_APC_LOCK_INIT(apc); 2739 2740 if_initname(ndev, device_get_name(gc->dev), port_idx); 2741 if_setdev(ndev,gc->dev); 2742 if_setsoftc(ndev, apc); 2743 2744 if_setflags(ndev, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 2745 if_setinitfn(ndev, mana_init); 2746 if_settransmitfn(ndev, mana_start_xmit); 2747 if_setqflushfn(ndev, mana_qflush); 2748 if_setioctlfn(ndev, mana_ioctl); 2749 if_setgetcounterfn(ndev, mana_get_counter); 2750 2751 if_setmtu(ndev, ETHERMTU); 2752 if_setbaudrate(ndev, IF_Gbps(100)); 2753 2754 mana_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE); 2755 2756 err = mana_init_port(ndev); 2757 if (err) 2758 goto reset_apc; 2759 2760 if_setcapabilitiesbit(ndev, 2761 IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | 2762 IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | 2763 IFCAP_TSO4 | IFCAP_TSO6 | 2764 IFCAP_LRO | IFCAP_LINKSTATE, 0); 2765 2766 /* Enable all available capabilities by default. */ 2767 if_setcapenable(ndev, if_getcapabilities(ndev)); 2768 2769 /* TSO parameters */ 2770 if_sethwtsomax(ndev, MAX_MBUF_FRAGS * MANA_TSO_MAXSEG_SZ - 2771 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)); 2772 if_sethwtsomaxsegcount(ndev, MAX_MBUF_FRAGS); 2773 if_sethwtsomaxsegsize(ndev, PAGE_SIZE); 2774 2775 ifmedia_init(&apc->media, IFM_IMASK, 2776 mana_ifmedia_change, mana_ifmedia_status); 2777 ifmedia_add(&apc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2778 ifmedia_set(&apc->media, IFM_ETHER | IFM_AUTO); 2779 2780 ether_ifattach(ndev, apc->mac_addr); 2781 2782 /* Initialize statistics */ 2783 mana_alloc_counters((counter_u64_t *)&apc->port_stats, 2784 sizeof(struct mana_port_stats)); 2785 mana_sysctl_add_port(apc); 2786 2787 /* Tell the stack that the interface is not active */ 2788 if_setdrvflagbits(ndev, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2789 2790 return 0; 2791 2792 reset_apc: 2793 free(apc, M_DEVBUF); 2794 free_net: 2795 *ndev_storage = NULL; 2796 if_printf(ndev, "Failed to probe vPort %d: %d\n", port_idx, err); 2797 if_free(ndev); 2798 return err; 2799 } 2800 2801 int mana_probe(struct gdma_dev *gd) 2802 { 2803 struct gdma_context *gc = gd->gdma_context; 2804 device_t dev = gc->dev; 2805 struct mana_context *ac; 2806 int err; 2807 int i; 2808 2809 device_printf(dev, "%s protocol version: %d.%d.%d\n", DEVICE_NAME, 2810 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION); 2811 2812 err = mana_gd_register_device(gd); 2813 if (err) 2814 return err; 2815 2816 ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO); 2817 if (!ac) 2818 return ENOMEM; 2819 2820 ac->gdma_dev = gd; 2821 ac->num_ports = 1; 2822 gd->driver_data = ac; 2823 2824 err = mana_create_eq(ac); 2825 if (err) 2826 goto out; 2827 2828 err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION, 2829 MANA_MICRO_VERSION, &ac->num_ports); 2830 if (err) 2831 goto out; 2832 2833 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV) 2834 ac->num_ports = MAX_PORTS_IN_MANA_DEV; 2835 2836 for (i = 0; i < ac->num_ports; i++) { 2837 err = mana_probe_port(ac, i, &ac->ports[i]); 2838 if (err) { 2839 device_printf(dev, 2840 "Failed to probe mana port %d\n", i); 2841 break; 2842 } 2843 } 2844 2845 out: 2846 if (err) 2847 mana_remove(gd); 2848 2849 return err; 2850 } 2851 2852 void 2853 mana_remove(struct gdma_dev *gd) 2854 { 2855 struct gdma_context *gc = gd->gdma_context; 2856 struct mana_context *ac = gd->driver_data; 2857 device_t dev = gc->dev; 2858 if_t ndev; 2859 int i; 2860 2861 for (i = 0; i < ac->num_ports; i++) { 2862 ndev = ac->ports[i]; 2863 if (!ndev) { 2864 if (i == 0) 2865 device_printf(dev, "No net device to remove\n"); 2866 goto out; 2867 } 2868 2869 mana_detach(ndev); 2870 2871 if_free(ndev); 2872 } 2873 2874 mana_destroy_eq(ac); 2875 2876 out: 2877 mana_gd_deregister_device(gd); 2878 gd->driver_data = NULL; 2879 gd->gdma_context = NULL; 2880 free(ac, M_DEVBUF); 2881 } 2882