1 /*- 2 * Copyright (c) 2014 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 34 #ifdef DEV_NETMAP 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/eventhandler.h> 38 #include <sys/lock.h> 39 #include <sys/mbuf.h> 40 #include <sys/module.h> 41 #include <sys/selinfo.h> 42 #include <sys/socket.h> 43 #include <sys/sockio.h> 44 #include <machine/bus.h> 45 #include <net/ethernet.h> 46 #include <net/if.h> 47 #include <net/if_media.h> 48 #include <net/if_var.h> 49 #include <net/if_clone.h> 50 #include <net/if_types.h> 51 #include <net/netmap.h> 52 #include <dev/netmap/netmap_kern.h> 53 54 #include "common/common.h" 55 #include "common/t4_regs.h" 56 #include "common/t4_regs_values.h" 57 58 extern int fl_pad; /* XXXNM */ 59 60 SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe netmap parameters"); 61 62 /* 63 * 0 = normal netmap rx 64 * 1 = black hole 65 * 2 = supermassive black hole (buffer packing enabled) 66 */ 67 int black_hole = 0; 68 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_black_hole, CTLFLAG_RDTUN, &black_hole, 0, 69 "Sink incoming packets."); 70 71 int rx_ndesc = 256; 72 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_ndesc, CTLFLAG_RWTUN, 73 &rx_ndesc, 0, "# of rx descriptors after which the hw cidx is updated."); 74 75 int holdoff_tmr_idx = 2; 76 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN, 77 &holdoff_tmr_idx, 0, "Holdoff timer index for netmap rx queues."); 78 79 /* 80 * Congestion drops. 81 * -1: no congestion feedback (not recommended). 82 * 0: backpressure the channel instead of dropping packets right away. 83 * 1: no backpressure, drop packets for the congested queue immediately. 84 */ 85 static int nm_cong_drop = 1; 86 TUNABLE_INT("hw.cxgbe.nm_cong_drop", &nm_cong_drop); 87 88 static int 89 alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong) 90 { 91 int rc, cntxt_id, i; 92 __be32 v; 93 struct adapter *sc = vi->pi->adapter; 94 struct sge_params *sp = &sc->params.sge; 95 struct netmap_adapter *na = NA(vi->ifp); 96 struct fw_iq_cmd c; 97 98 MPASS(na != NULL); 99 MPASS(nm_rxq->iq_desc != NULL); 100 MPASS(nm_rxq->fl_desc != NULL); 101 102 bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE); 103 bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + sp->spg_len); 104 105 bzero(&c, sizeof(c)); 106 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 107 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 108 V_FW_IQ_CMD_VFN(0)); 109 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 110 FW_LEN16(c)); 111 if (vi->flags & INTR_RXQ) { 112 KASSERT(nm_rxq->intr_idx < sc->intr_count, 113 ("%s: invalid direct intr_idx %d", __func__, 114 nm_rxq->intr_idx)); 115 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx); 116 } else { 117 CXGBE_UNIMPLEMENTED(__func__); /* XXXNM: needs review */ 118 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx) | 119 F_FW_IQ_CMD_IQANDST; 120 } 121 c.type_to_iqandstindex = htobe32(v | 122 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 123 V_FW_IQ_CMD_VIID(vi->viid) | 124 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 125 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(vi->pi->tx_chan) | 126 F_FW_IQ_CMD_IQGTSMODE | 127 V_FW_IQ_CMD_IQINTCNTTHRESH(0) | 128 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 129 c.iqsize = htobe16(vi->qsize_rxq); 130 c.iqaddr = htobe64(nm_rxq->iq_ba); 131 if (cong >= 0) { 132 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN | 133 V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF | 134 F_FW_IQ_CMD_FL0CONGEN); 135 } 136 c.iqns_to_fl0congen |= 137 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 138 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 139 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 140 (black_hole == 2 ? F_FW_IQ_CMD_FL0PACKEN : 0)); 141 c.fl0dcaen_to_fl0cidxfthresh = 142 htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? 143 X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B) | 144 V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? 145 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); 146 c.fl0size = htobe16(na->num_rx_desc / 8 + sp->spg_len / EQ_ESIZE); 147 c.fl0addr = htobe64(nm_rxq->fl_ba); 148 149 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 150 if (rc != 0) { 151 device_printf(sc->dev, 152 "failed to create netmap ingress queue: %d\n", rc); 153 return (rc); 154 } 155 156 nm_rxq->iq_cidx = 0; 157 MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - sp->spg_len / IQ_ESIZE); 158 nm_rxq->iq_gen = F_RSPD_GEN; 159 nm_rxq->iq_cntxt_id = be16toh(c.iqid); 160 nm_rxq->iq_abs_id = be16toh(c.physiqid); 161 cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start; 162 if (cntxt_id >= sc->sge.niq) { 163 panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)", 164 __func__, cntxt_id, sc->sge.niq - 1); 165 } 166 sc->sge.iqmap[cntxt_id] = (void *)nm_rxq; 167 168 nm_rxq->fl_cntxt_id = be16toh(c.fl0id); 169 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 170 MPASS(nm_rxq->fl_sidx == na->num_rx_desc); 171 cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start; 172 if (cntxt_id >= sc->sge.neq) { 173 panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)", 174 __func__, cntxt_id, sc->sge.neq - 1); 175 } 176 sc->sge.eqmap[cntxt_id] = (void *)nm_rxq; 177 178 nm_rxq->fl_db_val = V_QID(nm_rxq->fl_cntxt_id) | 179 sc->chip_params->sge_fl_db; 180 181 if (chip_id(sc) >= CHELSIO_T5 && cong >= 0) { 182 uint32_t param, val; 183 184 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 185 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 186 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id); 187 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 188 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 189 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id); 190 if (cong == 0) 191 val = 1 << 19; 192 else { 193 val = 2 << 19; 194 for (i = 0; i < 4; i++) { 195 if (cong & (1 << i)) 196 val |= 1 << (i << 2); 197 } 198 } 199 200 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 201 if (rc != 0) { 202 /* report error but carry on */ 203 device_printf(sc->dev, 204 "failed to set congestion manager context for " 205 "ingress queue %d: %d\n", nm_rxq->iq_cntxt_id, rc); 206 } 207 } 208 209 t4_write_reg(sc, sc->sge_gts_reg, 210 V_INGRESSQID(nm_rxq->iq_cntxt_id) | 211 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 212 213 return (rc); 214 } 215 216 static int 217 free_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 218 { 219 struct adapter *sc = vi->pi->adapter; 220 int rc; 221 222 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 223 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff); 224 if (rc != 0) 225 device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n", 226 __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc); 227 return (rc); 228 } 229 230 static int 231 alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 232 { 233 int rc, cntxt_id; 234 size_t len; 235 struct adapter *sc = vi->pi->adapter; 236 struct netmap_adapter *na = NA(vi->ifp); 237 struct fw_eq_eth_cmd c; 238 239 MPASS(na != NULL); 240 MPASS(nm_txq->desc != NULL); 241 242 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; 243 bzero(nm_txq->desc, len); 244 245 bzero(&c, sizeof(c)); 246 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 247 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 248 V_FW_EQ_ETH_CMD_VFN(0)); 249 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 250 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 251 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | 252 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); 253 c.fetchszm_to_iqid = 254 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 255 V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 256 V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id)); 257 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 258 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 259 V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE)); 260 c.eqaddr = htobe64(nm_txq->ba); 261 262 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 263 if (rc != 0) { 264 device_printf(vi->dev, 265 "failed to create netmap egress queue: %d\n", rc); 266 return (rc); 267 } 268 269 nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 270 cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start; 271 if (cntxt_id >= sc->sge.neq) 272 panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__, 273 cntxt_id, sc->sge.neq - 1); 274 sc->sge.eqmap[cntxt_id] = (void *)nm_txq; 275 276 nm_txq->pidx = nm_txq->cidx = 0; 277 MPASS(nm_txq->sidx == na->num_tx_desc); 278 nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0; 279 280 nm_txq->doorbells = sc->doorbells; 281 if (isset(&nm_txq->doorbells, DOORBELL_UDB) || 282 isset(&nm_txq->doorbells, DOORBELL_UDBWC) || 283 isset(&nm_txq->doorbells, DOORBELL_WCWR)) { 284 uint32_t s_qpp = sc->params.sge.eq_s_qpp; 285 uint32_t mask = (1 << s_qpp) - 1; 286 volatile uint8_t *udb; 287 288 udb = sc->udbs_base + UDBS_DB_OFFSET; 289 udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT; 290 nm_txq->udb_qid = nm_txq->cntxt_id & mask; 291 if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 292 clrbit(&nm_txq->doorbells, DOORBELL_WCWR); 293 else { 294 udb += nm_txq->udb_qid << UDBS_SEG_SHIFT; 295 nm_txq->udb_qid = 0; 296 } 297 nm_txq->udb = (volatile void *)udb; 298 } 299 300 return (rc); 301 } 302 303 static int 304 free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 305 { 306 struct adapter *sc = vi->pi->adapter; 307 int rc; 308 309 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id); 310 if (rc != 0) 311 device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__, 312 nm_txq->cntxt_id, rc); 313 return (rc); 314 } 315 316 static int 317 cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, 318 struct netmap_adapter *na) 319 { 320 struct netmap_slot *slot; 321 struct sge_nm_rxq *nm_rxq; 322 struct sge_nm_txq *nm_txq; 323 int rc, i, j, hwidx; 324 struct hw_buf_info *hwb; 325 326 ASSERT_SYNCHRONIZED_OP(sc); 327 328 if ((vi->flags & VI_INIT_DONE) == 0 || 329 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 330 return (EAGAIN); 331 332 hwb = &sc->sge.hw_buf_info[0]; 333 for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) { 334 if (hwb->size == NETMAP_BUF_SIZE(na)) 335 break; 336 } 337 if (i >= SGE_FLBUF_SIZES) { 338 if_printf(ifp, "no hwidx for netmap buffer size %d.\n", 339 NETMAP_BUF_SIZE(na)); 340 return (ENXIO); 341 } 342 hwidx = i; 343 344 /* Must set caps before calling netmap_reset */ 345 nm_set_native_flags(na); 346 347 for_each_nm_rxq(vi, i, nm_rxq) { 348 struct irq *irq = &sc->irq[vi->first_intr + i]; 349 350 alloc_nm_rxq_hwq(vi, nm_rxq, tnl_cong(vi->pi, nm_cong_drop)); 351 nm_rxq->fl_hwidx = hwidx; 352 slot = netmap_reset(na, NR_RX, i, 0); 353 MPASS(slot != NULL); /* XXXNM: error check, not assert */ 354 355 /* We deal with 8 bufs at a time */ 356 MPASS((na->num_rx_desc & 7) == 0); 357 MPASS(na->num_rx_desc == nm_rxq->fl_sidx); 358 for (j = 0; j < nm_rxq->fl_sidx; j++) { 359 uint64_t ba; 360 361 PNMB(na, &slot[j], &ba); 362 MPASS(ba != 0); 363 nm_rxq->fl_desc[j] = htobe64(ba | hwidx); 364 } 365 j = nm_rxq->fl_pidx = nm_rxq->fl_sidx - 8; 366 MPASS((j & 7) == 0); 367 j /= 8; /* driver pidx to hardware pidx */ 368 wmb(); 369 t4_write_reg(sc, sc->sge_kdoorbell_reg, 370 nm_rxq->fl_db_val | V_PIDX(j)); 371 372 atomic_cmpset_int(&irq->nm_state, NM_OFF, NM_ON); 373 } 374 375 for_each_nm_txq(vi, i, nm_txq) { 376 alloc_nm_txq_hwq(vi, nm_txq); 377 slot = netmap_reset(na, NR_TX, i, 0); 378 MPASS(slot != NULL); /* XXXNM: error check, not assert */ 379 } 380 381 if (vi->nm_rss == NULL) { 382 vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE, 383 M_ZERO | M_WAITOK); 384 } 385 for (i = 0; i < vi->rss_size;) { 386 for_each_nm_rxq(vi, j, nm_rxq) { 387 vi->nm_rss[i++] = nm_rxq->iq_abs_id; 388 if (i == vi->rss_size) 389 break; 390 } 391 } 392 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, 393 vi->nm_rss, vi->rss_size); 394 if (rc != 0) 395 if_printf(ifp, "netmap rss_config failed: %d\n", rc); 396 397 return (rc); 398 } 399 400 static int 401 cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, 402 struct netmap_adapter *na) 403 { 404 int rc, i; 405 struct sge_nm_txq *nm_txq; 406 struct sge_nm_rxq *nm_rxq; 407 408 ASSERT_SYNCHRONIZED_OP(sc); 409 410 if ((vi->flags & VI_INIT_DONE) == 0) 411 return (0); 412 413 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, 414 vi->rss, vi->rss_size); 415 if (rc != 0) 416 if_printf(ifp, "failed to restore RSS config: %d\n", rc); 417 nm_clear_native_flags(na); 418 419 for_each_nm_txq(vi, i, nm_txq) { 420 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; 421 422 /* Wait for hw pidx to catch up ... */ 423 while (be16toh(nm_txq->pidx) != spg->pidx) 424 pause("nmpidx", 1); 425 426 /* ... and then for the cidx. */ 427 while (spg->pidx != spg->cidx) 428 pause("nmcidx", 1); 429 430 free_nm_txq_hwq(vi, nm_txq); 431 } 432 for_each_nm_rxq(vi, i, nm_rxq) { 433 struct irq *irq = &sc->irq[vi->first_intr + i]; 434 435 while (!atomic_cmpset_int(&irq->nm_state, NM_ON, NM_OFF)) 436 pause("nmst", 1); 437 438 free_nm_rxq_hwq(vi, nm_rxq); 439 } 440 441 return (rc); 442 } 443 444 static int 445 cxgbe_netmap_reg(struct netmap_adapter *na, int on) 446 { 447 struct ifnet *ifp = na->ifp; 448 struct vi_info *vi = ifp->if_softc; 449 struct adapter *sc = vi->pi->adapter; 450 int rc; 451 452 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmreg"); 453 if (rc != 0) 454 return (rc); 455 if (on) 456 rc = cxgbe_netmap_on(sc, vi, ifp, na); 457 else 458 rc = cxgbe_netmap_off(sc, vi, ifp, na); 459 end_synchronized_op(sc, 0); 460 461 return (rc); 462 } 463 464 /* How many packets can a single type1 WR carry in n descriptors */ 465 static inline int 466 ndesc_to_npkt(const int n) 467 { 468 469 MPASS(n > 0 && n <= SGE_MAX_WR_NDESC); 470 471 return (n * 2 - 1); 472 } 473 #define MAX_NPKT_IN_TYPE1_WR (ndesc_to_npkt(SGE_MAX_WR_NDESC)) 474 475 /* Space (in descriptors) needed for a type1 WR that carries n packets */ 476 static inline int 477 npkt_to_ndesc(const int n) 478 { 479 480 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 481 482 return ((n + 2) / 2); 483 } 484 485 /* Space (in 16B units) needed for a type1 WR that carries n packets */ 486 static inline int 487 npkt_to_len16(const int n) 488 { 489 490 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 491 492 return (n * 2 + 1); 493 } 494 495 #define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx) 496 497 static void 498 ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq) 499 { 500 int n; 501 u_int db = nm_txq->doorbells; 502 503 MPASS(nm_txq->pidx != nm_txq->dbidx); 504 505 n = NMIDXDIFF(nm_txq, dbidx); 506 if (n > 1) 507 clrbit(&db, DOORBELL_WCWR); 508 wmb(); 509 510 switch (ffs(db) - 1) { 511 case DOORBELL_UDB: 512 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 513 break; 514 515 case DOORBELL_WCWR: { 516 volatile uint64_t *dst, *src; 517 518 /* 519 * Queues whose 128B doorbell segment fits in the page do not 520 * use relative qid (udb_qid is always 0). Only queues with 521 * doorbell segments can do WCWR. 522 */ 523 KASSERT(nm_txq->udb_qid == 0 && n == 1, 524 ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p", 525 __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq)); 526 527 dst = (volatile void *)((uintptr_t)nm_txq->udb + 528 UDBS_WR_OFFSET - UDBS_DB_OFFSET); 529 src = (void *)&nm_txq->desc[nm_txq->dbidx]; 530 while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1]) 531 *dst++ = *src++; 532 wmb(); 533 break; 534 } 535 536 case DOORBELL_UDBWC: 537 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 538 wmb(); 539 break; 540 541 case DOORBELL_KDB: 542 t4_write_reg(sc, sc->sge_kdoorbell_reg, 543 V_QID(nm_txq->cntxt_id) | V_PIDX(n)); 544 break; 545 } 546 nm_txq->dbidx = nm_txq->pidx; 547 } 548 549 int lazy_tx_credit_flush = 1; 550 551 /* 552 * Write work requests to send 'npkt' frames and ring the doorbell to send them 553 * on their way. No need to check for wraparound. 554 */ 555 static void 556 cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq, 557 struct netmap_kring *kring, int npkt, int npkt_remaining, int txcsum) 558 { 559 struct netmap_ring *ring = kring->ring; 560 struct netmap_slot *slot; 561 const u_int lim = kring->nkr_num_slots - 1; 562 struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx]; 563 uint16_t len; 564 uint64_t ba; 565 struct cpl_tx_pkt_core *cpl; 566 struct ulptx_sgl *usgl; 567 int i, n; 568 569 while (npkt) { 570 n = min(npkt, MAX_NPKT_IN_TYPE1_WR); 571 len = 0; 572 573 wr = (void *)&nm_txq->desc[nm_txq->pidx]; 574 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 575 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n))); 576 wr->npkt = n; 577 wr->r3 = 0; 578 wr->type = 1; 579 cpl = (void *)(wr + 1); 580 581 for (i = 0; i < n; i++) { 582 slot = &ring->slot[kring->nr_hwcur]; 583 PNMB(kring->na, slot, &ba); 584 MPASS(ba != 0); 585 586 cpl->ctrl0 = nm_txq->cpl_ctrl0; 587 cpl->pack = 0; 588 cpl->len = htobe16(slot->len); 589 /* 590 * netmap(4) says "netmap does not use features such as 591 * checksum offloading, TCP segmentation offloading, 592 * encryption, VLAN encapsulation/decapsulation, etc." 593 * 594 * So the ncxl interfaces have tx hardware checksumming 595 * disabled by default. But you can override netmap by 596 * enabling IFCAP_TXCSUM on the interface manully. 597 */ 598 cpl->ctrl1 = txcsum ? 0 : 599 htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS); 600 601 usgl = (void *)(cpl + 1); 602 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 603 V_ULPTX_NSGE(1)); 604 usgl->len0 = htobe32(slot->len); 605 usgl->addr0 = htobe64(ba); 606 607 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 608 cpl = (void *)(usgl + 1); 609 MPASS(slot->len + len <= UINT16_MAX); 610 len += slot->len; 611 kring->nr_hwcur = nm_next(kring->nr_hwcur, lim); 612 } 613 wr->plen = htobe16(len); 614 615 npkt -= n; 616 nm_txq->pidx += npkt_to_ndesc(n); 617 MPASS(nm_txq->pidx <= nm_txq->sidx); 618 if (__predict_false(nm_txq->pidx == nm_txq->sidx)) { 619 /* 620 * This routine doesn't know how to write WRs that wrap 621 * around. Make sure it wasn't asked to. 622 */ 623 MPASS(npkt == 0); 624 nm_txq->pidx = 0; 625 } 626 627 if (npkt == 0 && npkt_remaining == 0) { 628 /* All done. */ 629 if (lazy_tx_credit_flush == 0) { 630 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | 631 F_FW_WR_EQUIQ); 632 nm_txq->equeqidx = nm_txq->pidx; 633 nm_txq->equiqidx = nm_txq->pidx; 634 } 635 ring_nm_txq_db(sc, nm_txq); 636 return; 637 } 638 639 if (NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) { 640 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | 641 F_FW_WR_EQUIQ); 642 nm_txq->equeqidx = nm_txq->pidx; 643 nm_txq->equiqidx = nm_txq->pidx; 644 } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) { 645 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 646 nm_txq->equeqidx = nm_txq->pidx; 647 } 648 if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC) 649 ring_nm_txq_db(sc, nm_txq); 650 } 651 652 /* Will get called again. */ 653 MPASS(npkt_remaining); 654 } 655 656 /* How many contiguous free descriptors starting at pidx */ 657 static inline int 658 contiguous_ndesc_available(struct sge_nm_txq *nm_txq) 659 { 660 661 if (nm_txq->cidx > nm_txq->pidx) 662 return (nm_txq->cidx - nm_txq->pidx - 1); 663 else if (nm_txq->cidx > 0) 664 return (nm_txq->sidx - nm_txq->pidx); 665 else 666 return (nm_txq->sidx - nm_txq->pidx - 1); 667 } 668 669 static int 670 reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq) 671 { 672 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; 673 uint16_t hw_cidx = spg->cidx; /* snapshot */ 674 struct fw_eth_tx_pkts_wr *wr; 675 int n = 0; 676 677 hw_cidx = be16toh(hw_cidx); 678 679 while (nm_txq->cidx != hw_cidx) { 680 wr = (void *)&nm_txq->desc[nm_txq->cidx]; 681 682 MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR))); 683 MPASS(wr->type == 1); 684 MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR); 685 686 n += wr->npkt; 687 nm_txq->cidx += npkt_to_ndesc(wr->npkt); 688 689 /* 690 * We never sent a WR that wrapped around so the credits coming 691 * back, WR by WR, should never cause the cidx to wrap around 692 * either. 693 */ 694 MPASS(nm_txq->cidx <= nm_txq->sidx); 695 if (__predict_false(nm_txq->cidx == nm_txq->sidx)) 696 nm_txq->cidx = 0; 697 } 698 699 return (n); 700 } 701 702 static int 703 cxgbe_netmap_txsync(struct netmap_kring *kring, int flags) 704 { 705 struct netmap_adapter *na = kring->na; 706 struct ifnet *ifp = na->ifp; 707 struct vi_info *vi = ifp->if_softc; 708 struct adapter *sc = vi->pi->adapter; 709 struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id]; 710 const u_int head = kring->rhead; 711 u_int reclaimed = 0; 712 int n, d, npkt_remaining, ndesc_remaining, txcsum; 713 714 /* 715 * Tx was at kring->nr_hwcur last time around and now we need to advance 716 * to kring->rhead. Note that the driver's pidx moves independent of 717 * netmap's kring->nr_hwcur (pidx counts descriptors and the relation 718 * between descriptors and frames isn't 1:1). 719 */ 720 721 npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 722 kring->nkr_num_slots - kring->nr_hwcur + head; 723 txcsum = ifp->if_capenable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6); 724 while (npkt_remaining) { 725 reclaimed += reclaim_nm_tx_desc(nm_txq); 726 ndesc_remaining = contiguous_ndesc_available(nm_txq); 727 /* Can't run out of descriptors with packets still remaining */ 728 MPASS(ndesc_remaining > 0); 729 730 /* # of desc needed to tx all remaining packets */ 731 d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC; 732 if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR) 733 d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR); 734 735 if (d <= ndesc_remaining) 736 n = npkt_remaining; 737 else { 738 /* Can't send all, calculate how many can be sent */ 739 n = (ndesc_remaining / SGE_MAX_WR_NDESC) * 740 MAX_NPKT_IN_TYPE1_WR; 741 if (ndesc_remaining % SGE_MAX_WR_NDESC) 742 n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC); 743 } 744 745 /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */ 746 npkt_remaining -= n; 747 cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining, txcsum); 748 } 749 MPASS(npkt_remaining == 0); 750 MPASS(kring->nr_hwcur == head); 751 MPASS(nm_txq->dbidx == nm_txq->pidx); 752 753 /* 754 * Second part: reclaim buffers for completed transmissions. 755 */ 756 if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) { 757 reclaimed += reclaim_nm_tx_desc(nm_txq); 758 kring->nr_hwtail += reclaimed; 759 if (kring->nr_hwtail >= kring->nkr_num_slots) 760 kring->nr_hwtail -= kring->nkr_num_slots; 761 } 762 763 return (0); 764 } 765 766 static int 767 cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags) 768 { 769 struct netmap_adapter *na = kring->na; 770 struct netmap_ring *ring = kring->ring; 771 struct ifnet *ifp = na->ifp; 772 struct vi_info *vi = ifp->if_softc; 773 struct adapter *sc = vi->pi->adapter; 774 struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq + kring->ring_id]; 775 u_int const head = kring->rhead; 776 u_int n; 777 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 778 779 if (black_hole) 780 return (0); /* No updates ever. */ 781 782 if (netmap_no_pendintr || force_update) { 783 kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx); 784 kring->nr_kflags &= ~NKR_PENDINTR; 785 } 786 787 /* Userspace done with buffers from kring->nr_hwcur to head */ 788 n = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 789 kring->nkr_num_slots - kring->nr_hwcur + head; 790 n &= ~7U; 791 if (n > 0) { 792 u_int fl_pidx = nm_rxq->fl_pidx; 793 struct netmap_slot *slot = &ring->slot[fl_pidx]; 794 uint64_t ba; 795 int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx; 796 797 /* 798 * We always deal with 8 buffers at a time. We must have 799 * stopped at an 8B boundary (fl_pidx) last time around and we 800 * must have a multiple of 8B buffers to give to the freelist. 801 */ 802 MPASS((fl_pidx & 7) == 0); 803 MPASS((n & 7) == 0); 804 805 IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots); 806 IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx); 807 808 while (n > 0) { 809 for (i = 0; i < 8; i++, fl_pidx++, slot++) { 810 PNMB(na, slot, &ba); 811 MPASS(ba != 0); 812 nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx); 813 slot->flags &= ~NS_BUF_CHANGED; 814 MPASS(fl_pidx <= nm_rxq->fl_sidx); 815 } 816 n -= 8; 817 if (fl_pidx == nm_rxq->fl_sidx) { 818 fl_pidx = 0; 819 slot = &ring->slot[0]; 820 } 821 if (++dbinc == 8 && n >= 32) { 822 wmb(); 823 t4_write_reg(sc, sc->sge_kdoorbell_reg, 824 nm_rxq->fl_db_val | V_PIDX(dbinc)); 825 dbinc = 0; 826 } 827 } 828 MPASS(nm_rxq->fl_pidx == fl_pidx); 829 830 if (dbinc > 0) { 831 wmb(); 832 t4_write_reg(sc, sc->sge_kdoorbell_reg, 833 nm_rxq->fl_db_val | V_PIDX(dbinc)); 834 } 835 } 836 837 return (0); 838 } 839 840 void 841 cxgbe_nm_attach(struct vi_info *vi) 842 { 843 struct port_info *pi; 844 struct adapter *sc; 845 struct netmap_adapter na; 846 847 MPASS(vi->nnmrxq > 0); 848 MPASS(vi->ifp != NULL); 849 850 pi = vi->pi; 851 sc = pi->adapter; 852 853 bzero(&na, sizeof(na)); 854 855 na.ifp = vi->ifp; 856 na.na_flags = NAF_BDG_MAYSLEEP; 857 858 /* Netmap doesn't know about the space reserved for the status page. */ 859 na.num_tx_desc = vi->qsize_txq - sc->params.sge.spg_len / EQ_ESIZE; 860 861 /* 862 * The freelist's cidx/pidx drives netmap's rx cidx/pidx. So 863 * num_rx_desc is based on the number of buffers that can be held in the 864 * freelist, and not the number of entries in the iq. (These two are 865 * not exactly the same due to the space taken up by the status page). 866 */ 867 na.num_rx_desc = rounddown(vi->qsize_rxq, 8); 868 na.nm_txsync = cxgbe_netmap_txsync; 869 na.nm_rxsync = cxgbe_netmap_rxsync; 870 na.nm_register = cxgbe_netmap_reg; 871 na.num_tx_rings = vi->nnmtxq; 872 na.num_rx_rings = vi->nnmrxq; 873 netmap_attach(&na); 874 } 875 876 void 877 cxgbe_nm_detach(struct vi_info *vi) 878 { 879 880 MPASS(vi->nnmrxq > 0); 881 MPASS(vi->ifp != NULL); 882 883 netmap_detach(vi->ifp); 884 } 885 886 static inline const void * 887 unwrap_nm_fw6_msg(const struct cpl_fw6_msg *cpl) 888 { 889 890 MPASS(cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL); 891 892 /* data[0] is RSS header */ 893 return (&cpl->data[1]); 894 } 895 896 static void 897 handle_nm_sge_egr_update(struct adapter *sc, struct ifnet *ifp, 898 const struct cpl_sge_egr_update *egr) 899 { 900 uint32_t oq; 901 struct sge_nm_txq *nm_txq; 902 903 oq = be32toh(egr->opcode_qid); 904 MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE); 905 nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start]; 906 907 netmap_tx_irq(ifp, nm_txq->nid); 908 } 909 910 void 911 t4_nm_intr(void *arg) 912 { 913 struct sge_nm_rxq *nm_rxq = arg; 914 struct vi_info *vi = nm_rxq->vi; 915 struct adapter *sc = vi->pi->adapter; 916 struct ifnet *ifp = vi->ifp; 917 struct netmap_adapter *na = NA(ifp); 918 struct netmap_kring *kring = &na->rx_rings[nm_rxq->nid]; 919 struct netmap_ring *ring = kring->ring; 920 struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx]; 921 const void *cpl; 922 uint32_t lq; 923 u_int n = 0, work = 0; 924 uint8_t opcode; 925 uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx); 926 u_int fl_credits = fl_cidx & 7; 927 928 while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) { 929 930 rmb(); 931 932 lq = be32toh(d->rsp.pldbuflen_qid); 933 opcode = d->rss.opcode; 934 cpl = &d->cpl[0]; 935 936 switch (G_RSPD_TYPE(d->rsp.u.type_gen)) { 937 case X_RSPD_TYPE_FLBUF: 938 if (black_hole != 2) { 939 /* No buffer packing so new buf every time */ 940 MPASS(lq & F_RSPD_NEWBUF); 941 } 942 943 /* fall through */ 944 945 case X_RSPD_TYPE_CPL: 946 MPASS(opcode < NUM_CPL_CMDS); 947 948 switch (opcode) { 949 case CPL_FW4_MSG: 950 case CPL_FW6_MSG: 951 cpl = unwrap_nm_fw6_msg(cpl); 952 /* fall through */ 953 case CPL_SGE_EGR_UPDATE: 954 handle_nm_sge_egr_update(sc, ifp, cpl); 955 break; 956 case CPL_RX_PKT: 957 ring->slot[fl_cidx].len = G_RSPD_LEN(lq) - 958 sc->params.sge.fl_pktshift; 959 ring->slot[fl_cidx].flags = kring->nkr_slot_flags; 960 fl_cidx += (lq & F_RSPD_NEWBUF) ? 1 : 0; 961 fl_credits += (lq & F_RSPD_NEWBUF) ? 1 : 0; 962 if (__predict_false(fl_cidx == nm_rxq->fl_sidx)) 963 fl_cidx = 0; 964 break; 965 default: 966 panic("%s: unexpected opcode 0x%x on nm_rxq %p", 967 __func__, opcode, nm_rxq); 968 } 969 break; 970 971 case X_RSPD_TYPE_INTR: 972 /* Not equipped to handle forwarded interrupts. */ 973 panic("%s: netmap queue received interrupt for iq %u\n", 974 __func__, lq); 975 976 default: 977 panic("%s: illegal response type %d on nm_rxq %p", 978 __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq); 979 } 980 981 d++; 982 if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) { 983 nm_rxq->iq_cidx = 0; 984 d = &nm_rxq->iq_desc[0]; 985 nm_rxq->iq_gen ^= F_RSPD_GEN; 986 } 987 988 if (__predict_false(++n == rx_ndesc)) { 989 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 990 if (black_hole && fl_credits >= 8) { 991 fl_credits /= 8; 992 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, 993 nm_rxq->fl_sidx); 994 t4_write_reg(sc, sc->sge_kdoorbell_reg, 995 nm_rxq->fl_db_val | V_PIDX(fl_credits)); 996 fl_credits = fl_cidx & 7; 997 } else if (!black_hole) { 998 netmap_rx_irq(ifp, nm_rxq->nid, &work); 999 MPASS(work != 0); 1000 } 1001 t4_write_reg(sc, sc->sge_gts_reg, 1002 V_CIDXINC(n) | V_INGRESSQID(nm_rxq->iq_cntxt_id) | 1003 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1004 n = 0; 1005 } 1006 } 1007 1008 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 1009 if (black_hole) { 1010 fl_credits /= 8; 1011 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx); 1012 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1013 nm_rxq->fl_db_val | V_PIDX(fl_credits)); 1014 } else 1015 netmap_rx_irq(ifp, nm_rxq->nid, &work); 1016 1017 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(n) | 1018 V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) | 1019 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 1020 } 1021 #endif 1022