1 /*- 2 * Copyright (c) 2014 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 34 #ifdef DEV_NETMAP 35 #include <sys/param.h> 36 #include <sys/eventhandler.h> 37 #include <sys/lock.h> 38 #include <sys/types.h> 39 #include <sys/mbuf.h> 40 #include <sys/selinfo.h> 41 #include <sys/socket.h> 42 #include <sys/sockio.h> 43 #include <machine/bus.h> 44 #include <net/ethernet.h> 45 #include <net/if.h> 46 #include <net/if_media.h> 47 #include <net/if_var.h> 48 #include <net/if_clone.h> 49 #include <net/if_types.h> 50 #include <net/netmap.h> 51 #include <dev/netmap/netmap_kern.h> 52 53 #include "common/common.h" 54 #include "common/t4_regs.h" 55 #include "common/t4_regs_values.h" 56 57 extern int fl_pad; /* XXXNM */ 58 extern int spg_len; /* XXXNM */ 59 extern int fl_pktshift; /* XXXNM */ 60 61 SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe netmap parameters"); 62 63 /* 64 * 0 = normal netmap rx 65 * 1 = black hole 66 * 2 = supermassive black hole (buffer packing enabled) 67 */ 68 int black_hole = 0; 69 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_black_hole, CTLFLAG_RDTUN, &black_hole, 0, 70 "Sink incoming packets."); 71 72 int rx_ndesc = 256; 73 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_ndesc, CTLFLAG_RWTUN, 74 &rx_ndesc, 0, "# of rx descriptors after which the hw cidx is updated."); 75 76 int holdoff_tmr_idx = 2; 77 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN, 78 &holdoff_tmr_idx, 0, "Holdoff timer index for netmap rx queues."); 79 80 /* netmap ifnet routines */ 81 static void cxgbe_nm_init(void *); 82 static int cxgbe_nm_ioctl(struct ifnet *, unsigned long, caddr_t); 83 static int cxgbe_nm_transmit(struct ifnet *, struct mbuf *); 84 static void cxgbe_nm_qflush(struct ifnet *); 85 86 static int cxgbe_nm_init_synchronized(struct port_info *); 87 static int cxgbe_nm_uninit_synchronized(struct port_info *); 88 89 static void 90 cxgbe_nm_init(void *arg) 91 { 92 struct port_info *pi = arg; 93 struct adapter *sc = pi->adapter; 94 95 if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nminit") != 0) 96 return; 97 cxgbe_nm_init_synchronized(pi); 98 end_synchronized_op(sc, 0); 99 100 return; 101 } 102 103 static int 104 cxgbe_nm_init_synchronized(struct port_info *pi) 105 { 106 struct adapter *sc = pi->adapter; 107 struct ifnet *ifp = pi->nm_ifp; 108 int rc = 0; 109 110 ASSERT_SYNCHRONIZED_OP(sc); 111 112 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 113 return (0); /* already running */ 114 115 if (!(sc->flags & FULL_INIT_DONE) && 116 ((rc = adapter_full_init(sc)) != 0)) 117 return (rc); /* error message displayed already */ 118 119 if (!(pi->flags & PORT_INIT_DONE) && 120 ((rc = port_full_init(pi)) != 0)) 121 return (rc); /* error message displayed already */ 122 123 rc = update_mac_settings(ifp, XGMAC_ALL); 124 if (rc) 125 return (rc); /* error message displayed already */ 126 127 ifp->if_drv_flags |= IFF_DRV_RUNNING; 128 129 return (rc); 130 } 131 132 static int 133 cxgbe_nm_uninit_synchronized(struct port_info *pi) 134 { 135 #ifdef INVARIANTS 136 struct adapter *sc = pi->adapter; 137 #endif 138 struct ifnet *ifp = pi->nm_ifp; 139 140 ASSERT_SYNCHRONIZED_OP(sc); 141 142 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 143 144 return (0); 145 } 146 147 static int 148 cxgbe_nm_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 149 { 150 int rc = 0, mtu, flags; 151 struct port_info *pi = ifp->if_softc; 152 struct adapter *sc = pi->adapter; 153 struct ifreq *ifr = (struct ifreq *)data; 154 uint32_t mask; 155 156 MPASS(pi->nm_ifp == ifp); 157 158 switch (cmd) { 159 case SIOCSIFMTU: 160 mtu = ifr->ifr_mtu; 161 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) 162 return (EINVAL); 163 164 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nmtu"); 165 if (rc) 166 return (rc); 167 ifp->if_mtu = mtu; 168 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 169 rc = update_mac_settings(ifp, XGMAC_MTU); 170 end_synchronized_op(sc, 0); 171 break; 172 173 case SIOCSIFFLAGS: 174 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nflg"); 175 if (rc) 176 return (rc); 177 178 if (ifp->if_flags & IFF_UP) { 179 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 180 flags = pi->nmif_flags; 181 if ((ifp->if_flags ^ flags) & 182 (IFF_PROMISC | IFF_ALLMULTI)) { 183 rc = update_mac_settings(ifp, 184 XGMAC_PROMISC | XGMAC_ALLMULTI); 185 } 186 } else 187 rc = cxgbe_nm_init_synchronized(pi); 188 pi->nmif_flags = ifp->if_flags; 189 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) 190 rc = cxgbe_nm_uninit_synchronized(pi); 191 end_synchronized_op(sc, 0); 192 break; 193 194 case SIOCADDMULTI: 195 case SIOCDELMULTI: /* these two are called with a mutex held :-( */ 196 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4nmulti"); 197 if (rc) 198 return (rc); 199 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 200 rc = update_mac_settings(ifp, XGMAC_MCADDRS); 201 end_synchronized_op(sc, LOCK_HELD); 202 break; 203 204 case SIOCSIFCAP: 205 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 206 if (mask & IFCAP_TXCSUM) { 207 ifp->if_capenable ^= IFCAP_TXCSUM; 208 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 209 } 210 if (mask & IFCAP_TXCSUM_IPV6) { 211 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 212 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 213 } 214 if (mask & IFCAP_RXCSUM) 215 ifp->if_capenable ^= IFCAP_RXCSUM; 216 if (mask & IFCAP_RXCSUM_IPV6) 217 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 218 break; 219 220 case SIOCSIFMEDIA: 221 case SIOCGIFMEDIA: 222 ifmedia_ioctl(ifp, ifr, &pi->nm_media, cmd); 223 break; 224 225 default: 226 rc = ether_ioctl(ifp, cmd, data); 227 } 228 229 return (rc); 230 } 231 232 static int 233 cxgbe_nm_transmit(struct ifnet *ifp, struct mbuf *m) 234 { 235 236 m_freem(m); 237 return (0); 238 } 239 240 static void 241 cxgbe_nm_qflush(struct ifnet *ifp) 242 { 243 244 return; 245 } 246 247 static int 248 alloc_nm_rxq_hwq(struct port_info *pi, struct sge_nm_rxq *nm_rxq, int cong) 249 { 250 int rc, cntxt_id, i; 251 __be32 v; 252 struct adapter *sc = pi->adapter; 253 struct netmap_adapter *na = NA(pi->nm_ifp); 254 struct fw_iq_cmd c; 255 256 MPASS(na != NULL); 257 MPASS(nm_rxq->iq_desc != NULL); 258 MPASS(nm_rxq->fl_desc != NULL); 259 260 bzero(nm_rxq->iq_desc, pi->qsize_rxq * IQ_ESIZE); 261 bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + spg_len); 262 263 bzero(&c, sizeof(c)); 264 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 265 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 266 V_FW_IQ_CMD_VFN(0)); 267 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 268 FW_LEN16(c)); 269 if (pi->flags & INTR_NM_RXQ) { 270 KASSERT(nm_rxq->intr_idx < sc->intr_count, 271 ("%s: invalid direct intr_idx %d", __func__, 272 nm_rxq->intr_idx)); 273 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx); 274 } else { 275 CXGBE_UNIMPLEMENTED(__func__); /* XXXNM: needs review */ 276 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx) | 277 F_FW_IQ_CMD_IQANDST; 278 } 279 c.type_to_iqandstindex = htobe32(v | 280 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 281 V_FW_IQ_CMD_VIID(pi->nm_viid) | 282 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 283 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | 284 F_FW_IQ_CMD_IQGTSMODE | 285 V_FW_IQ_CMD_IQINTCNTTHRESH(0) | 286 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 287 c.iqsize = htobe16(pi->qsize_rxq); 288 c.iqaddr = htobe64(nm_rxq->iq_ba); 289 if (cong >= 0) { 290 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN | 291 V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF | 292 F_FW_IQ_CMD_FL0CONGEN); 293 } 294 c.iqns_to_fl0congen |= 295 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 296 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 297 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 298 (black_hole == 2 ? F_FW_IQ_CMD_FL0PACKEN : 0)); 299 c.fl0dcaen_to_fl0cidxfthresh = 300 htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_128B) | 301 V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B)); 302 c.fl0size = htobe16(na->num_rx_desc / 8 + spg_len / EQ_ESIZE); 303 c.fl0addr = htobe64(nm_rxq->fl_ba); 304 305 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 306 if (rc != 0) { 307 device_printf(sc->dev, 308 "failed to create netmap ingress queue: %d\n", rc); 309 return (rc); 310 } 311 312 nm_rxq->iq_cidx = 0; 313 MPASS(nm_rxq->iq_sidx == pi->qsize_rxq - spg_len / IQ_ESIZE); 314 nm_rxq->iq_gen = F_RSPD_GEN; 315 nm_rxq->iq_cntxt_id = be16toh(c.iqid); 316 nm_rxq->iq_abs_id = be16toh(c.physiqid); 317 cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start; 318 if (cntxt_id >= sc->sge.niq) { 319 panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)", 320 __func__, cntxt_id, sc->sge.niq - 1); 321 } 322 sc->sge.iqmap[cntxt_id] = (void *)nm_rxq; 323 324 nm_rxq->fl_cntxt_id = be16toh(c.fl0id); 325 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 326 MPASS(nm_rxq->fl_sidx == na->num_rx_desc); 327 cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start; 328 if (cntxt_id >= sc->sge.neq) { 329 panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)", 330 __func__, cntxt_id, sc->sge.neq - 1); 331 } 332 sc->sge.eqmap[cntxt_id] = (void *)nm_rxq; 333 334 nm_rxq->fl_db_val = F_DBPRIO | V_QID(nm_rxq->fl_cntxt_id) | V_PIDX(0); 335 if (is_t5(sc)) 336 nm_rxq->fl_db_val |= F_DBTYPE; 337 338 if (is_t5(sc) && cong >= 0) { 339 uint32_t param, val; 340 341 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 342 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 343 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id); 344 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 345 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 346 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id); 347 if (cong == 0) 348 val = 1 << 19; 349 else { 350 val = 2 << 19; 351 for (i = 0; i < 4; i++) { 352 if (cong & (1 << i)) 353 val |= 1 << (i << 2); 354 } 355 } 356 357 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 358 if (rc != 0) { 359 /* report error but carry on */ 360 device_printf(sc->dev, 361 "failed to set congestion manager context for " 362 "ingress queue %d: %d\n", nm_rxq->iq_cntxt_id, rc); 363 } 364 } 365 366 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 367 V_INGRESSQID(nm_rxq->iq_cntxt_id) | 368 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 369 370 return (rc); 371 } 372 373 static int 374 free_nm_rxq_hwq(struct port_info *pi, struct sge_nm_rxq *nm_rxq) 375 { 376 struct adapter *sc = pi->adapter; 377 int rc; 378 379 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 380 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff); 381 if (rc != 0) 382 device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n", 383 __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc); 384 return (rc); 385 } 386 387 static int 388 alloc_nm_txq_hwq(struct port_info *pi, struct sge_nm_txq *nm_txq) 389 { 390 int rc, cntxt_id; 391 size_t len; 392 struct adapter *sc = pi->adapter; 393 struct netmap_adapter *na = NA(pi->nm_ifp); 394 struct fw_eq_eth_cmd c; 395 396 MPASS(na != NULL); 397 MPASS(nm_txq->desc != NULL); 398 399 len = na->num_tx_desc * EQ_ESIZE + spg_len; 400 bzero(nm_txq->desc, len); 401 402 bzero(&c, sizeof(c)); 403 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 404 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 405 V_FW_EQ_ETH_CMD_VFN(0)); 406 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 407 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 408 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | 409 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(pi->nm_viid)); 410 c.fetchszm_to_iqid = 411 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 412 V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 413 V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id)); 414 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 415 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 416 V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE)); 417 c.eqaddr = htobe64(nm_txq->ba); 418 419 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 420 if (rc != 0) { 421 device_printf(pi->dev, 422 "failed to create netmap egress queue: %d\n", rc); 423 return (rc); 424 } 425 426 nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 427 cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start; 428 if (cntxt_id >= sc->sge.neq) 429 panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__, 430 cntxt_id, sc->sge.neq - 1); 431 sc->sge.eqmap[cntxt_id] = (void *)nm_txq; 432 433 nm_txq->pidx = nm_txq->cidx = 0; 434 MPASS(nm_txq->sidx == na->num_tx_desc); 435 nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0; 436 437 nm_txq->doorbells = sc->doorbells; 438 if (isset(&nm_txq->doorbells, DOORBELL_UDB) || 439 isset(&nm_txq->doorbells, DOORBELL_UDBWC) || 440 isset(&nm_txq->doorbells, DOORBELL_WCWR)) { 441 uint32_t s_qpp = sc->sge.eq_s_qpp; 442 uint32_t mask = (1 << s_qpp) - 1; 443 volatile uint8_t *udb; 444 445 udb = sc->udbs_base + UDBS_DB_OFFSET; 446 udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT; 447 nm_txq->udb_qid = nm_txq->cntxt_id & mask; 448 if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 449 clrbit(&nm_txq->doorbells, DOORBELL_WCWR); 450 else { 451 udb += nm_txq->udb_qid << UDBS_SEG_SHIFT; 452 nm_txq->udb_qid = 0; 453 } 454 nm_txq->udb = (volatile void *)udb; 455 } 456 457 return (rc); 458 } 459 460 static int 461 free_nm_txq_hwq(struct port_info *pi, struct sge_nm_txq *nm_txq) 462 { 463 struct adapter *sc = pi->adapter; 464 int rc; 465 466 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id); 467 if (rc != 0) 468 device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__, 469 nm_txq->cntxt_id, rc); 470 return (rc); 471 } 472 473 static int 474 cxgbe_netmap_on(struct adapter *sc, struct port_info *pi, struct ifnet *ifp, 475 struct netmap_adapter *na) 476 { 477 struct netmap_slot *slot; 478 struct sge_nm_rxq *nm_rxq; 479 struct sge_nm_txq *nm_txq; 480 int rc, i, j, hwidx; 481 struct hw_buf_info *hwb; 482 uint16_t *rss; 483 484 ASSERT_SYNCHRONIZED_OP(sc); 485 486 if ((pi->flags & PORT_INIT_DONE) == 0 || 487 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 488 return (EAGAIN); 489 490 hwb = &sc->sge.hw_buf_info[0]; 491 for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) { 492 if (hwb->size == NETMAP_BUF_SIZE(na)) 493 break; 494 } 495 if (i >= SGE_FLBUF_SIZES) { 496 if_printf(ifp, "no hwidx for netmap buffer size %d.\n", 497 NETMAP_BUF_SIZE(na)); 498 return (ENXIO); 499 } 500 hwidx = i; 501 502 /* Must set caps before calling netmap_reset */ 503 nm_set_native_flags(na); 504 505 for_each_nm_rxq(pi, i, nm_rxq) { 506 alloc_nm_rxq_hwq(pi, nm_rxq, tnl_cong(pi)); 507 nm_rxq->fl_hwidx = hwidx; 508 slot = netmap_reset(na, NR_RX, i, 0); 509 MPASS(slot != NULL); /* XXXNM: error check, not assert */ 510 511 /* We deal with 8 bufs at a time */ 512 MPASS((na->num_rx_desc & 7) == 0); 513 MPASS(na->num_rx_desc == nm_rxq->fl_sidx); 514 for (j = 0; j < nm_rxq->fl_sidx; j++) { 515 uint64_t ba; 516 517 PNMB(na, &slot[j], &ba); 518 MPASS(ba != 0); 519 nm_rxq->fl_desc[j] = htobe64(ba | hwidx); 520 } 521 j = nm_rxq->fl_pidx = nm_rxq->fl_sidx - 8; 522 MPASS((j & 7) == 0); 523 j /= 8; /* driver pidx to hardware pidx */ 524 wmb(); 525 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), 526 nm_rxq->fl_db_val | V_PIDX(j)); 527 } 528 529 for_each_nm_txq(pi, i, nm_txq) { 530 alloc_nm_txq_hwq(pi, nm_txq); 531 slot = netmap_reset(na, NR_TX, i, 0); 532 MPASS(slot != NULL); /* XXXNM: error check, not assert */ 533 } 534 535 rss = malloc(pi->nm_rss_size * sizeof (*rss), M_CXGBE, M_ZERO | 536 M_WAITOK); 537 for (i = 0; i < pi->nm_rss_size;) { 538 for_each_nm_rxq(pi, j, nm_rxq) { 539 rss[i++] = nm_rxq->iq_abs_id; 540 if (i == pi->nm_rss_size) 541 break; 542 } 543 } 544 rc = -t4_config_rss_range(sc, sc->mbox, pi->nm_viid, 0, pi->nm_rss_size, 545 rss, pi->nm_rss_size); 546 if (rc != 0) 547 if_printf(ifp, "netmap rss_config failed: %d\n", rc); 548 free(rss, M_CXGBE); 549 550 rc = -t4_enable_vi(sc, sc->mbox, pi->nm_viid, true, true); 551 if (rc != 0) 552 if_printf(ifp, "netmap enable_vi failed: %d\n", rc); 553 554 return (rc); 555 } 556 557 static int 558 cxgbe_netmap_off(struct adapter *sc, struct port_info *pi, struct ifnet *ifp, 559 struct netmap_adapter *na) 560 { 561 int rc, i; 562 struct sge_nm_txq *nm_txq; 563 struct sge_nm_rxq *nm_rxq; 564 565 ASSERT_SYNCHRONIZED_OP(sc); 566 567 rc = -t4_enable_vi(sc, sc->mbox, pi->nm_viid, false, false); 568 if (rc != 0) 569 if_printf(ifp, "netmap disable_vi failed: %d\n", rc); 570 nm_clear_native_flags(na); 571 572 for_each_nm_txq(pi, i, nm_txq) { 573 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; 574 575 /* Wait for hw pidx to catch up ... */ 576 while (be16toh(nm_txq->pidx) != spg->pidx) 577 pause("nmpidx", 1); 578 579 /* ... and then for the cidx. */ 580 while (spg->pidx != spg->cidx) 581 pause("nmcidx", 1); 582 583 free_nm_txq_hwq(pi, nm_txq); 584 } 585 for_each_nm_rxq(pi, i, nm_rxq) { 586 free_nm_rxq_hwq(pi, nm_rxq); 587 } 588 589 return (rc); 590 } 591 592 static int 593 cxgbe_netmap_reg(struct netmap_adapter *na, int on) 594 { 595 struct ifnet *ifp = na->ifp; 596 struct port_info *pi = ifp->if_softc; 597 struct adapter *sc = pi->adapter; 598 int rc; 599 600 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nmreg"); 601 if (rc != 0) 602 return (rc); 603 if (on) 604 rc = cxgbe_netmap_on(sc, pi, ifp, na); 605 else 606 rc = cxgbe_netmap_off(sc, pi, ifp, na); 607 end_synchronized_op(sc, 0); 608 609 return (rc); 610 } 611 612 /* How many packets can a single type1 WR carry in n descriptors */ 613 static inline int 614 ndesc_to_npkt(const int n) 615 { 616 617 MPASS(n > 0 && n <= SGE_MAX_WR_NDESC); 618 619 return (n * 2 - 1); 620 } 621 #define MAX_NPKT_IN_TYPE1_WR (ndesc_to_npkt(SGE_MAX_WR_NDESC)) 622 623 /* Space (in descriptors) needed for a type1 WR that carries n packets */ 624 static inline int 625 npkt_to_ndesc(const int n) 626 { 627 628 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 629 630 return ((n + 2) / 2); 631 } 632 633 /* Space (in 16B units) needed for a type1 WR that carries n packets */ 634 static inline int 635 npkt_to_len16(const int n) 636 { 637 638 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 639 640 return (n * 2 + 1); 641 } 642 643 #define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx) 644 645 static void 646 ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq) 647 { 648 int n; 649 u_int db = nm_txq->doorbells; 650 651 MPASS(nm_txq->pidx != nm_txq->dbidx); 652 653 n = NMIDXDIFF(nm_txq, dbidx); 654 if (n > 1) 655 clrbit(&db, DOORBELL_WCWR); 656 wmb(); 657 658 switch (ffs(db) - 1) { 659 case DOORBELL_UDB: 660 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 661 break; 662 663 case DOORBELL_WCWR: { 664 volatile uint64_t *dst, *src; 665 666 /* 667 * Queues whose 128B doorbell segment fits in the page do not 668 * use relative qid (udb_qid is always 0). Only queues with 669 * doorbell segments can do WCWR. 670 */ 671 KASSERT(nm_txq->udb_qid == 0 && n == 1, 672 ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p", 673 __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq)); 674 675 dst = (volatile void *)((uintptr_t)nm_txq->udb + 676 UDBS_WR_OFFSET - UDBS_DB_OFFSET); 677 src = (void *)&nm_txq->desc[nm_txq->dbidx]; 678 while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1]) 679 *dst++ = *src++; 680 wmb(); 681 break; 682 } 683 684 case DOORBELL_UDBWC: 685 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 686 wmb(); 687 break; 688 689 case DOORBELL_KDB: 690 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), 691 V_QID(nm_txq->cntxt_id) | V_PIDX(n)); 692 break; 693 } 694 nm_txq->dbidx = nm_txq->pidx; 695 } 696 697 int lazy_tx_credit_flush = 1; 698 699 /* 700 * Write work requests to send 'npkt' frames and ring the doorbell to send them 701 * on their way. No need to check for wraparound. 702 */ 703 static void 704 cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq, 705 struct netmap_kring *kring, int npkt, int npkt_remaining, int txcsum) 706 { 707 struct netmap_ring *ring = kring->ring; 708 struct netmap_slot *slot; 709 const u_int lim = kring->nkr_num_slots - 1; 710 struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx]; 711 uint16_t len; 712 uint64_t ba; 713 struct cpl_tx_pkt_core *cpl; 714 struct ulptx_sgl *usgl; 715 int i, n; 716 717 while (npkt) { 718 n = min(npkt, MAX_NPKT_IN_TYPE1_WR); 719 len = 0; 720 721 wr = (void *)&nm_txq->desc[nm_txq->pidx]; 722 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 723 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n))); 724 wr->npkt = n; 725 wr->r3 = 0; 726 wr->type = 1; 727 cpl = (void *)(wr + 1); 728 729 for (i = 0; i < n; i++) { 730 slot = &ring->slot[kring->nr_hwcur]; 731 PNMB(kring->na, slot, &ba); 732 MPASS(ba != 0); 733 734 cpl->ctrl0 = nm_txq->cpl_ctrl0; 735 cpl->pack = 0; 736 cpl->len = htobe16(slot->len); 737 /* 738 * netmap(4) says "netmap does not use features such as 739 * checksum offloading, TCP segmentation offloading, 740 * encryption, VLAN encapsulation/decapsulation, etc." 741 * 742 * So the ncxl interfaces have tx hardware checksumming 743 * disabled by default. But you can override netmap by 744 * enabling IFCAP_TXCSUM on the interface manully. 745 */ 746 cpl->ctrl1 = txcsum ? 0 : 747 htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS); 748 749 usgl = (void *)(cpl + 1); 750 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 751 V_ULPTX_NSGE(1)); 752 usgl->len0 = htobe32(slot->len); 753 usgl->addr0 = htobe64(ba); 754 755 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 756 cpl = (void *)(usgl + 1); 757 MPASS(slot->len + len <= UINT16_MAX); 758 len += slot->len; 759 kring->nr_hwcur = nm_next(kring->nr_hwcur, lim); 760 } 761 wr->plen = htobe16(len); 762 763 npkt -= n; 764 nm_txq->pidx += npkt_to_ndesc(n); 765 MPASS(nm_txq->pidx <= nm_txq->sidx); 766 if (__predict_false(nm_txq->pidx == nm_txq->sidx)) { 767 /* 768 * This routine doesn't know how to write WRs that wrap 769 * around. Make sure it wasn't asked to. 770 */ 771 MPASS(npkt == 0); 772 nm_txq->pidx = 0; 773 } 774 775 if (npkt == 0 && npkt_remaining == 0) { 776 /* All done. */ 777 if (lazy_tx_credit_flush == 0) { 778 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | 779 F_FW_WR_EQUIQ); 780 nm_txq->equeqidx = nm_txq->pidx; 781 nm_txq->equiqidx = nm_txq->pidx; 782 } 783 ring_nm_txq_db(sc, nm_txq); 784 return; 785 } 786 787 if (NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) { 788 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | 789 F_FW_WR_EQUIQ); 790 nm_txq->equeqidx = nm_txq->pidx; 791 nm_txq->equiqidx = nm_txq->pidx; 792 } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) { 793 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 794 nm_txq->equeqidx = nm_txq->pidx; 795 } 796 if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC) 797 ring_nm_txq_db(sc, nm_txq); 798 } 799 800 /* Will get called again. */ 801 MPASS(npkt_remaining); 802 } 803 804 /* How many contiguous free descriptors starting at pidx */ 805 static inline int 806 contiguous_ndesc_available(struct sge_nm_txq *nm_txq) 807 { 808 809 if (nm_txq->cidx > nm_txq->pidx) 810 return (nm_txq->cidx - nm_txq->pidx - 1); 811 else if (nm_txq->cidx > 0) 812 return (nm_txq->sidx - nm_txq->pidx); 813 else 814 return (nm_txq->sidx - nm_txq->pidx - 1); 815 } 816 817 static int 818 reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq) 819 { 820 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; 821 uint16_t hw_cidx = spg->cidx; /* snapshot */ 822 struct fw_eth_tx_pkts_wr *wr; 823 int n = 0; 824 825 hw_cidx = be16toh(hw_cidx); 826 827 while (nm_txq->cidx != hw_cidx) { 828 wr = (void *)&nm_txq->desc[nm_txq->cidx]; 829 830 MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR))); 831 MPASS(wr->type == 1); 832 MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR); 833 834 n += wr->npkt; 835 nm_txq->cidx += npkt_to_ndesc(wr->npkt); 836 837 /* 838 * We never sent a WR that wrapped around so the credits coming 839 * back, WR by WR, should never cause the cidx to wrap around 840 * either. 841 */ 842 MPASS(nm_txq->cidx <= nm_txq->sidx); 843 if (__predict_false(nm_txq->cidx == nm_txq->sidx)) 844 nm_txq->cidx = 0; 845 } 846 847 return (n); 848 } 849 850 static int 851 cxgbe_netmap_txsync(struct netmap_kring *kring, int flags) 852 { 853 struct netmap_adapter *na = kring->na; 854 struct ifnet *ifp = na->ifp; 855 struct port_info *pi = ifp->if_softc; 856 struct adapter *sc = pi->adapter; 857 struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[pi->first_nm_txq + kring->ring_id]; 858 const u_int head = kring->rhead; 859 u_int reclaimed = 0; 860 int n, d, npkt_remaining, ndesc_remaining, txcsum; 861 862 /* 863 * Tx was at kring->nr_hwcur last time around and now we need to advance 864 * to kring->rhead. Note that the driver's pidx moves independent of 865 * netmap's kring->nr_hwcur (pidx counts descriptors and the relation 866 * between descriptors and frames isn't 1:1). 867 */ 868 869 npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 870 kring->nkr_num_slots - kring->nr_hwcur + head; 871 txcsum = ifp->if_capenable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6); 872 while (npkt_remaining) { 873 reclaimed += reclaim_nm_tx_desc(nm_txq); 874 ndesc_remaining = contiguous_ndesc_available(nm_txq); 875 /* Can't run out of descriptors with packets still remaining */ 876 MPASS(ndesc_remaining > 0); 877 878 /* # of desc needed to tx all remaining packets */ 879 d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC; 880 if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR) 881 d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR); 882 883 if (d <= ndesc_remaining) 884 n = npkt_remaining; 885 else { 886 /* Can't send all, calculate how many can be sent */ 887 n = (ndesc_remaining / SGE_MAX_WR_NDESC) * 888 MAX_NPKT_IN_TYPE1_WR; 889 if (ndesc_remaining % SGE_MAX_WR_NDESC) 890 n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC); 891 } 892 893 /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */ 894 npkt_remaining -= n; 895 cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining, txcsum); 896 } 897 MPASS(npkt_remaining == 0); 898 MPASS(kring->nr_hwcur == head); 899 MPASS(nm_txq->dbidx == nm_txq->pidx); 900 901 /* 902 * Second part: reclaim buffers for completed transmissions. 903 */ 904 if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) { 905 reclaimed += reclaim_nm_tx_desc(nm_txq); 906 kring->nr_hwtail += reclaimed; 907 if (kring->nr_hwtail >= kring->nkr_num_slots) 908 kring->nr_hwtail -= kring->nkr_num_slots; 909 } 910 911 nm_txsync_finalize(kring); 912 913 return (0); 914 } 915 916 static int 917 cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags) 918 { 919 struct netmap_adapter *na = kring->na; 920 struct netmap_ring *ring = kring->ring; 921 struct ifnet *ifp = na->ifp; 922 struct port_info *pi = ifp->if_softc; 923 struct adapter *sc = pi->adapter; 924 struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[pi->first_nm_rxq + kring->ring_id]; 925 u_int const head = nm_rxsync_prologue(kring); 926 u_int n; 927 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 928 929 if (black_hole) 930 return (0); /* No updates ever. */ 931 932 if (netmap_no_pendintr || force_update) { 933 kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx); 934 kring->nr_kflags &= ~NKR_PENDINTR; 935 } 936 937 /* Userspace done with buffers from kring->nr_hwcur to head */ 938 n = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 939 kring->nkr_num_slots - kring->nr_hwcur + head; 940 n &= ~7U; 941 if (n > 0) { 942 u_int fl_pidx = nm_rxq->fl_pidx; 943 struct netmap_slot *slot = &ring->slot[fl_pidx]; 944 uint64_t ba; 945 int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx; 946 947 /* 948 * We always deal with 8 buffers at a time. We must have 949 * stopped at an 8B boundary (fl_pidx) last time around and we 950 * must have a multiple of 8B buffers to give to the freelist. 951 */ 952 MPASS((fl_pidx & 7) == 0); 953 MPASS((n & 7) == 0); 954 955 IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots); 956 IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx); 957 958 while (n > 0) { 959 for (i = 0; i < 8; i++, fl_pidx++, slot++) { 960 PNMB(na, slot, &ba); 961 MPASS(ba != 0); 962 nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx); 963 slot->flags &= ~NS_BUF_CHANGED; 964 MPASS(fl_pidx <= nm_rxq->fl_sidx); 965 } 966 n -= 8; 967 if (fl_pidx == nm_rxq->fl_sidx) { 968 fl_pidx = 0; 969 slot = &ring->slot[0]; 970 } 971 if (++dbinc == 8 && n >= 32) { 972 wmb(); 973 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), 974 nm_rxq->fl_db_val | V_PIDX(dbinc)); 975 dbinc = 0; 976 } 977 } 978 MPASS(nm_rxq->fl_pidx == fl_pidx); 979 980 if (dbinc > 0) { 981 wmb(); 982 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), 983 nm_rxq->fl_db_val | V_PIDX(dbinc)); 984 } 985 } 986 987 nm_rxsync_finalize(kring); 988 989 return (0); 990 } 991 992 /* 993 * Create an ifnet solely for netmap use and register it with the kernel. 994 */ 995 int 996 create_netmap_ifnet(struct port_info *pi) 997 { 998 struct adapter *sc = pi->adapter; 999 struct netmap_adapter na; 1000 struct ifnet *ifp; 1001 device_t dev = pi->dev; 1002 uint8_t mac[ETHER_ADDR_LEN]; 1003 int rc; 1004 1005 if (pi->nnmtxq <= 0 || pi->nnmrxq <= 0) 1006 return (0); 1007 MPASS(pi->nm_ifp == NULL); 1008 1009 /* 1010 * Allocate a virtual interface exclusively for netmap use. Give it the 1011 * MAC address normally reserved for use by a TOE interface. (The TOE 1012 * driver on FreeBSD doesn't use it). 1013 */ 1014 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, &mac[0], 1015 &pi->nm_rss_size, FW_VI_FUNC_OFLD, 0); 1016 if (rc < 0) { 1017 device_printf(dev, "unable to allocate netmap virtual " 1018 "interface for port %d: %d\n", pi->port_id, -rc); 1019 return (-rc); 1020 } 1021 pi->nm_viid = rc; 1022 pi->nm_xact_addr_filt = -1; 1023 1024 ifp = if_alloc(IFT_ETHER); 1025 if (ifp == NULL) { 1026 device_printf(dev, "Cannot allocate netmap ifnet\n"); 1027 return (ENOMEM); 1028 } 1029 pi->nm_ifp = ifp; 1030 ifp->if_softc = pi; 1031 1032 if_initname(ifp, is_t4(pi->adapter) ? "ncxgbe" : "ncxl", 1033 device_get_unit(dev)); 1034 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1035 1036 ifp->if_init = cxgbe_nm_init; 1037 ifp->if_ioctl = cxgbe_nm_ioctl; 1038 ifp->if_transmit = cxgbe_nm_transmit; 1039 ifp->if_qflush = cxgbe_nm_qflush; 1040 1041 /* 1042 * netmap(4) says "netmap does not use features such as checksum 1043 * offloading, TCP segmentation offloading, encryption, VLAN 1044 * encapsulation/decapsulation, etc." 1045 * 1046 * By default we comply with the statement above. But we do declare the 1047 * ifnet capable of L3/L4 checksumming so that a user can override 1048 * netmap and have the hardware do the L3/L4 checksums. 1049 */ 1050 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_JUMBO_MTU | 1051 IFCAP_HWCSUM_IPV6; 1052 ifp->if_capenable = 0; 1053 ifp->if_hwassist = 0; 1054 1055 /* nm_media has already been setup by the caller */ 1056 1057 ether_ifattach(ifp, mac); 1058 1059 /* 1060 * Register with netmap in the kernel. 1061 */ 1062 bzero(&na, sizeof(na)); 1063 1064 na.ifp = pi->nm_ifp; 1065 na.na_flags = NAF_BDG_MAYSLEEP; 1066 1067 /* Netmap doesn't know about the space reserved for the status page. */ 1068 na.num_tx_desc = pi->qsize_txq - spg_len / EQ_ESIZE; 1069 1070 /* 1071 * The freelist's cidx/pidx drives netmap's rx cidx/pidx. So 1072 * num_rx_desc is based on the number of buffers that can be held in the 1073 * freelist, and not the number of entries in the iq. (These two are 1074 * not exactly the same due to the space taken up by the status page). 1075 */ 1076 na.num_rx_desc = (pi->qsize_rxq / 8) * 8; 1077 na.nm_txsync = cxgbe_netmap_txsync; 1078 na.nm_rxsync = cxgbe_netmap_rxsync; 1079 na.nm_register = cxgbe_netmap_reg; 1080 na.num_tx_rings = pi->nnmtxq; 1081 na.num_rx_rings = pi->nnmrxq; 1082 netmap_attach(&na); /* This adds IFCAP_NETMAP to if_capabilities */ 1083 1084 return (0); 1085 } 1086 1087 int 1088 destroy_netmap_ifnet(struct port_info *pi) 1089 { 1090 struct adapter *sc = pi->adapter; 1091 1092 if (pi->nm_ifp == NULL) 1093 return (0); 1094 1095 netmap_detach(pi->nm_ifp); 1096 ifmedia_removeall(&pi->nm_media); 1097 ether_ifdetach(pi->nm_ifp); 1098 if_free(pi->nm_ifp); 1099 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->nm_viid); 1100 1101 return (0); 1102 } 1103 1104 static void 1105 handle_nm_fw6_msg(struct adapter *sc, struct ifnet *ifp, 1106 const struct cpl_fw6_msg *cpl) 1107 { 1108 const struct cpl_sge_egr_update *egr; 1109 uint32_t oq; 1110 struct sge_nm_txq *nm_txq; 1111 1112 if (cpl->type != FW_TYPE_RSSCPL && cpl->type != FW6_TYPE_RSSCPL) 1113 panic("%s: FW_TYPE 0x%x on nm_rxq.", __func__, cpl->type); 1114 1115 /* data[0] is RSS header */ 1116 egr = (const void *)&cpl->data[1]; 1117 oq = be32toh(egr->opcode_qid); 1118 MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE); 1119 nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start]; 1120 1121 netmap_tx_irq(ifp, nm_txq->nid); 1122 } 1123 1124 void 1125 t4_nm_intr(void *arg) 1126 { 1127 struct sge_nm_rxq *nm_rxq = arg; 1128 struct port_info *pi = nm_rxq->pi; 1129 struct adapter *sc = pi->adapter; 1130 struct ifnet *ifp = pi->nm_ifp; 1131 struct netmap_adapter *na = NA(ifp); 1132 struct netmap_kring *kring = &na->rx_rings[nm_rxq->nid]; 1133 struct netmap_ring *ring = kring->ring; 1134 struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx]; 1135 uint32_t lq; 1136 u_int n = 0, work = 0; 1137 uint8_t opcode; 1138 uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx); 1139 u_int fl_credits = fl_cidx & 7; 1140 1141 while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) { 1142 1143 rmb(); 1144 1145 lq = be32toh(d->rsp.pldbuflen_qid); 1146 opcode = d->rss.opcode; 1147 1148 switch (G_RSPD_TYPE(d->rsp.u.type_gen)) { 1149 case X_RSPD_TYPE_FLBUF: 1150 if (black_hole != 2) { 1151 /* No buffer packing so new buf every time */ 1152 MPASS(lq & F_RSPD_NEWBUF); 1153 } 1154 1155 /* fall through */ 1156 1157 case X_RSPD_TYPE_CPL: 1158 MPASS(opcode < NUM_CPL_CMDS); 1159 1160 switch (opcode) { 1161 case CPL_FW4_MSG: 1162 case CPL_FW6_MSG: 1163 handle_nm_fw6_msg(sc, ifp, 1164 (const void *)&d->cpl[0]); 1165 break; 1166 case CPL_RX_PKT: 1167 ring->slot[fl_cidx].len = G_RSPD_LEN(lq) - fl_pktshift; 1168 ring->slot[fl_cidx].flags = kring->nkr_slot_flags; 1169 fl_cidx += (lq & F_RSPD_NEWBUF) ? 1 : 0; 1170 fl_credits += (lq & F_RSPD_NEWBUF) ? 1 : 0; 1171 if (__predict_false(fl_cidx == nm_rxq->fl_sidx)) 1172 fl_cidx = 0; 1173 break; 1174 default: 1175 panic("%s: unexpected opcode 0x%x on nm_rxq %p", 1176 __func__, opcode, nm_rxq); 1177 } 1178 break; 1179 1180 case X_RSPD_TYPE_INTR: 1181 /* Not equipped to handle forwarded interrupts. */ 1182 panic("%s: netmap queue received interrupt for iq %u\n", 1183 __func__, lq); 1184 1185 default: 1186 panic("%s: illegal response type %d on nm_rxq %p", 1187 __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq); 1188 } 1189 1190 d++; 1191 if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) { 1192 nm_rxq->iq_cidx = 0; 1193 d = &nm_rxq->iq_desc[0]; 1194 nm_rxq->iq_gen ^= F_RSPD_GEN; 1195 } 1196 1197 if (__predict_false(++n == rx_ndesc)) { 1198 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 1199 if (black_hole && fl_credits >= 8) { 1200 fl_credits /= 8; 1201 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, 1202 nm_rxq->fl_sidx); 1203 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), 1204 nm_rxq->fl_db_val | V_PIDX(fl_credits)); 1205 fl_credits = fl_cidx & 7; 1206 } else if (!black_hole) { 1207 netmap_rx_irq(ifp, nm_rxq->nid, &work); 1208 MPASS(work != 0); 1209 } 1210 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 1211 V_CIDXINC(n) | V_INGRESSQID(nm_rxq->iq_cntxt_id) | 1212 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1213 n = 0; 1214 } 1215 } 1216 1217 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 1218 if (black_hole) { 1219 fl_credits /= 8; 1220 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx); 1221 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), 1222 nm_rxq->fl_db_val | V_PIDX(fl_credits)); 1223 } else 1224 netmap_rx_irq(ifp, nm_rxq->nid, &work); 1225 1226 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(n) | 1227 V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) | 1228 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 1229 } 1230 #endif 1231