1 /*- 2 * Copyright (c) 2014 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 #include "opt_inet.h" 30 #include "opt_inet6.h" 31 32 #ifdef DEV_NETMAP 33 #include <sys/param.h> 34 #include <sys/bus.h> 35 #include <sys/eventhandler.h> 36 #include <sys/lock.h> 37 #include <sys/mbuf.h> 38 #include <sys/module.h> 39 #include <sys/selinfo.h> 40 #include <sys/socket.h> 41 #include <sys/sockio.h> 42 #include <machine/bus.h> 43 #include <net/ethernet.h> 44 #include <net/if.h> 45 #include <net/if_media.h> 46 #include <net/if_var.h> 47 #include <net/if_clone.h> 48 #include <net/if_types.h> 49 #include <net/netmap.h> 50 #include <dev/netmap/netmap_kern.h> 51 52 #include "common/common.h" 53 #include "common/t4_regs.h" 54 #include "common/t4_regs_values.h" 55 56 extern int fl_pad; /* XXXNM */ 57 58 /* 59 * 0 = normal netmap rx 60 * 1 = black hole 61 * 2 = supermassive black hole (buffer packing enabled) 62 */ 63 int black_hole = 0; 64 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_black_hole, CTLFLAG_RWTUN, &black_hole, 0, 65 "Sink incoming packets."); 66 67 int rx_ndesc = 256; 68 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_ndesc, CTLFLAG_RWTUN, 69 &rx_ndesc, 0, "# of rx descriptors after which the hw cidx is updated."); 70 71 int rx_nframes = 64; 72 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_nframes, CTLFLAG_RWTUN, 73 &rx_nframes, 0, "max # of frames received before waking up netmap rx."); 74 75 int holdoff_tmr_idx = 2; 76 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN, 77 &holdoff_tmr_idx, 0, "Holdoff timer index for netmap rx queues."); 78 79 /* 80 * Congestion drops. 81 * -1: no congestion feedback (not recommended). 82 * 0: backpressure the channel instead of dropping packets right away. 83 * 1: no backpressure, drop packets for the congested queue immediately. 84 */ 85 static int nm_cong_drop = 1; 86 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_cong_drop, CTLFLAG_RWTUN, 87 &nm_cong_drop, 0, 88 "Congestion control for netmap rx queues (0 = backpressure, 1 = drop"); 89 90 int starve_fl = 0; 91 SYSCTL_INT(_hw_cxgbe, OID_AUTO, starve_fl, CTLFLAG_RWTUN, 92 &starve_fl, 0, "Don't ring fl db for netmap rx queues."); 93 94 /* 95 * Try to process tx credits in bulk. This may cause a delay in the return of 96 * tx credits and is suitable for bursty or non-stop tx only. 97 */ 98 int lazy_tx_credit_flush = 1; 99 SYSCTL_INT(_hw_cxgbe, OID_AUTO, lazy_tx_credit_flush, CTLFLAG_RWTUN, 100 &lazy_tx_credit_flush, 0, "lazy credit flush for netmap tx queues."); 101 102 /* 103 * Split the netmap rx queues into two groups that populate separate halves of 104 * the RSS indirection table. This allows filters with hashmask to steer to a 105 * particular group of queues. 106 */ 107 static int nm_split_rss = 0; 108 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_split_rss, CTLFLAG_RWTUN, 109 &nm_split_rss, 0, "Split the netmap rx queues into two groups."); 110 111 /* 112 * netmap(4) says "netmap does not use features such as checksum offloading, TCP 113 * segmentation offloading, encryption, VLAN encapsulation/decapsulation, etc." 114 * but this knob can be used to get the hardware to checksum all tx traffic 115 * anyway. 116 */ 117 static int nm_txcsum = 0; 118 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_txcsum, CTLFLAG_RWTUN, 119 &nm_txcsum, 0, "Enable transmit checksum offloading."); 120 121 static int free_nm_rxq_hwq(struct vi_info *, struct sge_nm_rxq *); 122 static int free_nm_txq_hwq(struct vi_info *, struct sge_nm_txq *); 123 124 int 125 alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx, 126 int idx) 127 { 128 int rc; 129 struct sysctl_oid *oid; 130 struct sysctl_oid_list *children; 131 struct sysctl_ctx_list *ctx; 132 char name[16]; 133 size_t len; 134 struct adapter *sc = vi->adapter; 135 struct netmap_adapter *na = NA(vi->ifp); 136 137 MPASS(na != NULL); 138 139 len = vi->qsize_rxq * IQ_ESIZE; 140 rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map, 141 &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc); 142 if (rc != 0) 143 return (rc); 144 145 len = na->num_rx_desc * EQ_ESIZE + sc->params.sge.spg_len; 146 rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map, 147 &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc); 148 if (rc != 0) 149 return (rc); 150 151 nm_rxq->vi = vi; 152 nm_rxq->nid = idx; 153 nm_rxq->iq_cidx = 0; 154 nm_rxq->iq_sidx = vi->qsize_rxq - sc->params.sge.spg_len / IQ_ESIZE; 155 nm_rxq->iq_gen = F_RSPD_GEN; 156 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 157 nm_rxq->fl_sidx = na->num_rx_desc; 158 nm_rxq->fl_sidx2 = nm_rxq->fl_sidx; /* copy for rxsync cacheline */ 159 nm_rxq->intr_idx = intr_idx; 160 nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID; 161 162 ctx = &vi->ctx; 163 children = SYSCTL_CHILDREN(vi->nm_rxq_oid); 164 165 snprintf(name, sizeof(name), "%d", idx); 166 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name, 167 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queue"); 168 children = SYSCTL_CHILDREN(oid); 169 170 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD, 171 &nm_rxq->iq_abs_id, 0, "absolute id of the queue"); 172 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 173 &nm_rxq->iq_cntxt_id, 0, "SGE context id of the queue"); 174 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 175 &nm_rxq->iq_cidx, 0, "consumer index"); 176 177 children = SYSCTL_CHILDREN(oid); 178 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", 179 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist"); 180 children = SYSCTL_CHILDREN(oid); 181 182 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 183 &nm_rxq->fl_cntxt_id, 0, "SGE context id of the freelist"); 184 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 185 &nm_rxq->fl_cidx, 0, "consumer index"); 186 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, 187 &nm_rxq->fl_pidx, 0, "producer index"); 188 189 return (rc); 190 } 191 192 int 193 free_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 194 { 195 struct adapter *sc = vi->adapter; 196 197 if (!(vi->flags & VI_INIT_DONE)) 198 return (0); 199 200 if (nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID) 201 free_nm_rxq_hwq(vi, nm_rxq); 202 MPASS(nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID); 203 204 free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba, 205 nm_rxq->iq_desc); 206 free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba, 207 nm_rxq->fl_desc); 208 209 return (0); 210 } 211 212 int 213 alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx) 214 { 215 int rc; 216 size_t len; 217 struct port_info *pi = vi->pi; 218 struct adapter *sc = pi->adapter; 219 struct netmap_adapter *na = NA(vi->ifp); 220 char name[16]; 221 struct sysctl_oid *oid; 222 struct sysctl_oid_list *children = SYSCTL_CHILDREN(vi->nm_txq_oid); 223 224 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; 225 rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map, 226 &nm_txq->ba, (void **)&nm_txq->desc); 227 if (rc) 228 return (rc); 229 230 nm_txq->pidx = nm_txq->cidx = 0; 231 nm_txq->sidx = na->num_tx_desc; 232 nm_txq->nid = idx; 233 nm_txq->iqidx = iqidx; 234 nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 235 V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) | 236 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); 237 if (sc->params.fw_vers >= FW_VERSION32(1, 24, 11, 0)) 238 nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); 239 else 240 nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 241 nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID; 242 243 snprintf(name, sizeof(name), "%d", idx); 244 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, 245 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap tx queue"); 246 children = SYSCTL_CHILDREN(oid); 247 248 SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 249 &nm_txq->cntxt_id, 0, "SGE context id of the queue"); 250 SYSCTL_ADD_U16(&vi->ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 251 &nm_txq->cidx, 0, "consumer index"); 252 SYSCTL_ADD_U16(&vi->ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, 253 &nm_txq->pidx, 0, "producer index"); 254 255 return (rc); 256 } 257 258 int 259 free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 260 { 261 struct adapter *sc = vi->adapter; 262 263 if (!(vi->flags & VI_INIT_DONE)) 264 return (0); 265 266 if (nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID) 267 free_nm_txq_hwq(vi, nm_txq); 268 MPASS(nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID); 269 270 free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba, 271 nm_txq->desc); 272 273 return (0); 274 } 275 276 static int 277 alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 278 { 279 int rc, cntxt_id, cong_map; 280 __be32 v; 281 struct adapter *sc = vi->adapter; 282 struct port_info *pi = vi->pi; 283 struct sge_params *sp = &sc->params.sge; 284 struct netmap_adapter *na = NA(vi->ifp); 285 struct fw_iq_cmd c; 286 const int cong_drop = nm_cong_drop; 287 288 MPASS(na != NULL); 289 MPASS(nm_rxq->iq_desc != NULL); 290 MPASS(nm_rxq->fl_desc != NULL); 291 292 bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE); 293 bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + sp->spg_len); 294 295 bzero(&c, sizeof(c)); 296 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 297 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 298 V_FW_IQ_CMD_VFN(0)); 299 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_IQSTART | FW_LEN16(c)); 300 if (nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID) 301 c.alloc_to_len16 |= htobe32(F_FW_IQ_CMD_ALLOC); 302 else { 303 c.iqid = htobe16(nm_rxq->iq_cntxt_id); 304 c.fl0id = htobe16(nm_rxq->fl_cntxt_id); 305 c.fl1id = htobe16(0xffff); 306 c.physiqid = htobe16(nm_rxq->iq_abs_id); 307 } 308 MPASS(!forwarding_intr_to_fwq(sc)); 309 KASSERT(nm_rxq->intr_idx < sc->intr_count, 310 ("%s: invalid direct intr_idx %d", __func__, nm_rxq->intr_idx)); 311 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx); 312 c.type_to_iqandstindex = htobe32(v | 313 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 314 V_FW_IQ_CMD_VIID(vi->viid) | 315 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 316 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->hw_port) | 317 F_FW_IQ_CMD_IQGTSMODE | 318 V_FW_IQ_CMD_IQINTCNTTHRESH(0) | 319 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 320 c.iqsize = htobe16(vi->qsize_rxq); 321 c.iqaddr = htobe64(nm_rxq->iq_ba); 322 if (cong_drop != -1) { 323 if (chip_id(sc) >= CHELSIO_T7) 324 cong_map = 1 << pi->hw_port; 325 else 326 cong_map = pi->rx_e_chan_map; 327 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN | 328 V_FW_IQ_CMD_FL0CNGCHMAP(cong_map) | F_FW_IQ_CMD_FL0CONGCIF | 329 F_FW_IQ_CMD_FL0CONGEN); 330 } 331 c.iqns_to_fl0congen |= 332 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 333 V_FW_IQ_CMD_IQTYPE(FW_IQ_IQTYPE_NIC) | 334 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 335 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 336 (black_hole == 2 ? F_FW_IQ_CMD_FL0PACKEN : 0)); 337 c.fl0dcaen_to_fl0cidxfthresh = 338 htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? 339 X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B_T6) | 340 V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? 341 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); 342 c.fl0size = htobe16(na->num_rx_desc / 8 + sp->spg_len / EQ_ESIZE); 343 c.fl0addr = htobe64(nm_rxq->fl_ba); 344 345 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 346 if (rc != 0) { 347 device_printf(sc->dev, 348 "failed to create netmap ingress queue: %d\n", rc); 349 return (rc); 350 } 351 352 nm_rxq->iq_cidx = 0; 353 MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - sp->spg_len / IQ_ESIZE); 354 nm_rxq->iq_gen = F_RSPD_GEN; 355 nm_rxq->iq_cntxt_id = be16toh(c.iqid); 356 nm_rxq->iq_abs_id = be16toh(c.physiqid); 357 cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start; 358 if (cntxt_id >= sc->sge.iqmap_sz) { 359 panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)", 360 __func__, cntxt_id, sc->sge.iqmap_sz - 1); 361 } 362 sc->sge.iqmap[cntxt_id] = (void *)nm_rxq; 363 364 nm_rxq->fl_cntxt_id = be16toh(c.fl0id); 365 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 366 nm_rxq->fl_db_saved = 0; 367 /* matches the X_FETCHBURSTMAX_512B or X_FETCHBURSTMAX_256B above. */ 368 nm_rxq->fl_db_threshold = chip_id(sc) <= CHELSIO_T5 ? 8 : 4; 369 MPASS(nm_rxq->fl_sidx == na->num_rx_desc); 370 cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start; 371 if (cntxt_id >= sc->sge.eqmap_sz) { 372 panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)", 373 __func__, cntxt_id, sc->sge.eqmap_sz - 1); 374 } 375 sc->sge.eqmap[cntxt_id] = (void *)nm_rxq; 376 377 nm_rxq->fl_db_val = V_QID(nm_rxq->fl_cntxt_id) | 378 sc->chip_params->sge_fl_db; 379 380 if (chip_id(sc) >= CHELSIO_T5 && cong_drop != -1) { 381 t4_sge_set_conm_context(sc, nm_rxq->iq_cntxt_id, cong_drop, 382 cong_map); 383 } 384 385 t4_write_reg(sc, sc->sge_gts_reg, 386 V_INGRESSQID(nm_rxq->iq_cntxt_id) | 387 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 388 389 return (rc); 390 } 391 392 static int 393 free_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 394 { 395 struct adapter *sc = vi->adapter; 396 int rc; 397 398 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 399 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff); 400 if (rc != 0) 401 device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n", 402 __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc); 403 nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID; 404 return (rc); 405 } 406 407 static int 408 alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 409 { 410 int rc, cntxt_id; 411 size_t len; 412 struct adapter *sc = vi->adapter; 413 struct netmap_adapter *na = NA(vi->ifp); 414 struct fw_eq_eth_cmd c; 415 416 MPASS(na != NULL); 417 MPASS(nm_txq->desc != NULL); 418 419 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; 420 bzero(nm_txq->desc, len); 421 422 bzero(&c, sizeof(c)); 423 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 424 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 425 V_FW_EQ_ETH_CMD_VFN(0)); 426 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 427 if (nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID) { 428 const int core = sc->params.ncores > 1 ? 429 nm_txq->nid % sc->params.ncores : 0; 430 431 c.alloc_to_len16 |= htobe32(F_FW_EQ_ETH_CMD_ALLOC | 432 V_FW_EQ_ETH_CMD_COREGROUP(core)); 433 } else 434 c.eqid_pkd = htobe32(V_FW_EQ_ETH_CMD_EQID(nm_txq->cntxt_id)); 435 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | 436 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); 437 c.fetchszm_to_iqid = 438 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 439 V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->hw_port) | F_FW_EQ_ETH_CMD_FETCHRO | 440 V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id)); 441 c.dcaen_to_eqsize = 442 htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 443 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 444 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 445 V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE)); 446 c.eqaddr = htobe64(nm_txq->ba); 447 448 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 449 if (rc != 0) { 450 device_printf(vi->dev, 451 "failed to create netmap egress queue: %d\n", rc); 452 return (rc); 453 } 454 455 nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 456 cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start; 457 if (cntxt_id >= sc->sge.eqmap_sz) 458 panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__, 459 cntxt_id, sc->sge.eqmap_sz - 1); 460 sc->sge.eqmap[cntxt_id] = (void *)nm_txq; 461 462 nm_txq->pidx = nm_txq->cidx = 0; 463 MPASS(nm_txq->sidx == na->num_tx_desc); 464 nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0; 465 466 nm_txq->doorbells = sc->doorbells; 467 if (isset(&nm_txq->doorbells, DOORBELL_UDB) || 468 isset(&nm_txq->doorbells, DOORBELL_UDBWC) || 469 isset(&nm_txq->doorbells, DOORBELL_WCWR)) { 470 uint32_t s_qpp = sc->params.sge.eq_s_qpp; 471 uint32_t mask = (1 << s_qpp) - 1; 472 volatile uint8_t *udb; 473 474 udb = sc->udbs_base + UDBS_DB_OFFSET; 475 udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT; 476 nm_txq->udb_qid = nm_txq->cntxt_id & mask; 477 if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 478 clrbit(&nm_txq->doorbells, DOORBELL_WCWR); 479 else { 480 udb += nm_txq->udb_qid << UDBS_SEG_SHIFT; 481 nm_txq->udb_qid = 0; 482 } 483 nm_txq->udb = (volatile void *)udb; 484 } 485 486 if (sc->params.fw_vers < FW_VERSION32(1, 25, 1, 0)) { 487 uint32_t param, val; 488 489 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 490 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH) | 491 V_FW_PARAMS_PARAM_YZ(nm_txq->cntxt_id); 492 val = 0xff; 493 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 494 if (rc != 0) { 495 device_printf(vi->dev, 496 "failed to bind netmap txq %d to class 0xff: %d\n", 497 nm_txq->cntxt_id, rc); 498 rc = 0; 499 } 500 } 501 502 return (rc); 503 } 504 505 static int 506 free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 507 { 508 struct adapter *sc = vi->adapter; 509 int rc; 510 511 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id); 512 if (rc != 0) 513 device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__, 514 nm_txq->cntxt_id, rc); 515 nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID; 516 return (rc); 517 } 518 519 static int 520 cxgbe_netmap_simple_rss(struct adapter *sc, struct vi_info *vi, 521 if_t ifp, struct netmap_adapter *na) 522 { 523 struct netmap_kring *kring; 524 struct sge_nm_rxq *nm_rxq; 525 int rc, i, j, nm_state, defq; 526 uint16_t *rss; 527 528 /* 529 * Check if there's at least one active (or about to go active) netmap 530 * rx queue. 531 */ 532 defq = -1; 533 for_each_nm_rxq(vi, j, nm_rxq) { 534 nm_state = atomic_load_int(&nm_rxq->nm_state); 535 kring = na->rx_rings[nm_rxq->nid]; 536 if ((nm_state != NM_OFF && !nm_kring_pending_off(kring)) || 537 (nm_state == NM_OFF && nm_kring_pending_on(kring))) { 538 MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 539 if (defq == -1) { 540 defq = nm_rxq->iq_abs_id; 541 break; 542 } 543 } 544 } 545 546 if (defq == -1) { 547 /* No active netmap queues. Switch back to NIC queues. */ 548 rss = vi->rss; 549 defq = vi->rss[0]; 550 } else { 551 for (i = 0; i < vi->rss_size;) { 552 for_each_nm_rxq(vi, j, nm_rxq) { 553 nm_state = atomic_load_int(&nm_rxq->nm_state); 554 kring = na->rx_rings[nm_rxq->nid]; 555 if ((nm_state != NM_OFF && 556 !nm_kring_pending_off(kring)) || 557 (nm_state == NM_OFF && 558 nm_kring_pending_on(kring))) { 559 MPASS(nm_rxq->iq_cntxt_id != 560 INVALID_NM_RXQ_CNTXT_ID); 561 vi->nm_rss[i++] = nm_rxq->iq_abs_id; 562 if (i == vi->rss_size) 563 break; 564 } 565 } 566 } 567 rss = vi->nm_rss; 568 } 569 570 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss, 571 vi->rss_size); 572 if (rc != 0) 573 if_printf(ifp, "netmap rss_config failed: %d\n", rc); 574 575 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, defq, 0, 0); 576 if (rc != 0) { 577 if_printf(ifp, "netmap defaultq config failed: %d\n", rc); 578 } 579 580 return (rc); 581 } 582 583 /* 584 * Odd number of rx queues work best for split RSS mode as the first queue can 585 * be dedicated for non-RSS traffic and the rest divided into two equal halves. 586 */ 587 static int 588 cxgbe_netmap_split_rss(struct adapter *sc, struct vi_info *vi, 589 if_t ifp, struct netmap_adapter *na) 590 { 591 struct netmap_kring *kring; 592 struct sge_nm_rxq *nm_rxq; 593 int rc, i, j, nm_state, defq; 594 int nactive[2] = {0, 0}; 595 int dq[2] = {-1, -1}; 596 bool dq_norss; /* default queue should not be in RSS table. */ 597 598 MPASS(nm_split_rss != 0); 599 MPASS(vi->nnmrxq > 1); 600 601 for_each_nm_rxq(vi, i, nm_rxq) { 602 j = i / ((vi->nnmrxq + 1) / 2); 603 nm_state = atomic_load_int(&nm_rxq->nm_state); 604 kring = na->rx_rings[nm_rxq->nid]; 605 if ((nm_state != NM_OFF && !nm_kring_pending_off(kring)) || 606 (nm_state == NM_OFF && nm_kring_pending_on(kring))) { 607 MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 608 nactive[j]++; 609 if (dq[j] == -1) 610 dq[j] = nm_rxq->iq_abs_id; 611 } 612 } 613 614 if (nactive[0] == 0 || nactive[1] == 0) 615 return (cxgbe_netmap_simple_rss(sc, vi, ifp, na)); 616 617 MPASS(dq[0] != -1 && dq[1] != -1); 618 if (nactive[0] > nactive[1]) { 619 defq = dq[0]; 620 dq_norss = true; 621 } else if (nactive[0] < nactive[1]) { 622 defq = dq[1]; 623 dq_norss = true; 624 } else { 625 defq = dq[0]; 626 dq_norss = false; 627 } 628 629 i = 0; 630 nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq]; 631 while (i < vi->rss_size / 2) { 632 for (j = 0; j < (vi->nnmrxq + 1) / 2; j++) { 633 nm_state = atomic_load_int(&nm_rxq[j].nm_state); 634 kring = na->rx_rings[nm_rxq[j].nid]; 635 if ((nm_state == NM_OFF && 636 !nm_kring_pending_on(kring)) || 637 (nm_state == NM_ON && 638 nm_kring_pending_off(kring))) { 639 continue; 640 } 641 MPASS(nm_rxq[j].iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 642 if (dq_norss && defq == nm_rxq[j].iq_abs_id) 643 continue; 644 vi->nm_rss[i++] = nm_rxq[j].iq_abs_id; 645 if (i == vi->rss_size / 2) 646 break; 647 } 648 } 649 while (i < vi->rss_size) { 650 for (j = (vi->nnmrxq + 1) / 2; j < vi->nnmrxq; j++) { 651 nm_state = atomic_load_int(&nm_rxq[j].nm_state); 652 kring = na->rx_rings[nm_rxq[j].nid]; 653 if ((nm_state == NM_OFF && 654 !nm_kring_pending_on(kring)) || 655 (nm_state == NM_ON && 656 nm_kring_pending_off(kring))) { 657 continue; 658 } 659 MPASS(nm_rxq[j].iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 660 if (dq_norss && defq == nm_rxq[j].iq_abs_id) 661 continue; 662 vi->nm_rss[i++] = nm_rxq[j].iq_abs_id; 663 if (i == vi->rss_size) 664 break; 665 } 666 } 667 668 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, 669 vi->nm_rss, vi->rss_size); 670 if (rc != 0) 671 if_printf(ifp, "netmap split_rss_config failed: %d\n", rc); 672 673 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, defq, 0, 0); 674 if (rc != 0) 675 if_printf(ifp, "netmap defaultq config failed: %d\n", rc); 676 677 return (rc); 678 } 679 680 static inline int 681 cxgbe_netmap_rss(struct adapter *sc, struct vi_info *vi, if_t ifp, 682 struct netmap_adapter *na) 683 { 684 685 if (nm_split_rss == 0 || vi->nnmrxq == 1) 686 return (cxgbe_netmap_simple_rss(sc, vi, ifp, na)); 687 else 688 return (cxgbe_netmap_split_rss(sc, vi, ifp, na)); 689 } 690 691 static int 692 cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, if_t ifp, 693 struct netmap_adapter *na) 694 { 695 struct netmap_slot *slot; 696 struct netmap_kring *kring; 697 struct sge_nm_rxq *nm_rxq; 698 struct sge_nm_txq *nm_txq; 699 int i, j, hwidx; 700 struct rx_buf_info *rxb; 701 702 ASSERT_SYNCHRONIZED_OP(sc); 703 MPASS(vi->nnmrxq > 0); 704 MPASS(vi->nnmtxq > 0); 705 706 if ((vi->flags & VI_INIT_DONE) == 0 || 707 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 708 if_printf(ifp, "cannot enable netmap operation because " 709 "interface is not UP.\n"); 710 return (EAGAIN); 711 } 712 713 rxb = &sc->sge.rx_buf_info[0]; 714 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 715 if (rxb->size1 == NETMAP_BUF_SIZE(na)) { 716 hwidx = rxb->hwidx1; 717 break; 718 } 719 if (rxb->size2 == NETMAP_BUF_SIZE(na)) { 720 hwidx = rxb->hwidx2; 721 break; 722 } 723 } 724 if (i >= SW_ZONE_SIZES) { 725 if_printf(ifp, "no hwidx for netmap buffer size %d.\n", 726 NETMAP_BUF_SIZE(na)); 727 return (ENXIO); 728 } 729 730 /* Must set caps before calling netmap_reset */ 731 nm_set_native_flags(na); 732 733 for_each_nm_rxq(vi, i, nm_rxq) { 734 kring = na->rx_rings[nm_rxq->nid]; 735 if (!nm_kring_pending_on(kring)) 736 continue; 737 738 alloc_nm_rxq_hwq(vi, nm_rxq); 739 nm_rxq->fl_hwidx = hwidx; 740 slot = netmap_reset(na, NR_RX, i, 0); 741 MPASS(slot != NULL); /* XXXNM: error check, not assert */ 742 743 /* We deal with 8 bufs at a time */ 744 MPASS((na->num_rx_desc & 7) == 0); 745 MPASS(na->num_rx_desc == nm_rxq->fl_sidx); 746 for (j = 0; j < nm_rxq->fl_sidx; j++) { 747 uint64_t ba; 748 749 PNMB(na, &slot[j], &ba); 750 MPASS(ba != 0); 751 nm_rxq->fl_desc[j] = htobe64(ba | hwidx); 752 } 753 j = nm_rxq->fl_pidx = nm_rxq->fl_sidx - 8; 754 MPASS((j & 7) == 0); 755 j /= 8; /* driver pidx to hardware pidx */ 756 wmb(); 757 t4_write_reg(sc, sc->sge_kdoorbell_reg, 758 nm_rxq->fl_db_val | V_PIDX(j)); 759 760 (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_OFF, NM_ON); 761 } 762 763 for_each_nm_txq(vi, i, nm_txq) { 764 kring = na->tx_rings[nm_txq->nid]; 765 if (!nm_kring_pending_on(kring)) 766 continue; 767 768 alloc_nm_txq_hwq(vi, nm_txq); 769 slot = netmap_reset(na, NR_TX, i, 0); 770 MPASS(slot != NULL); /* XXXNM: error check, not assert */ 771 } 772 773 if (vi->nm_rss == NULL) { 774 vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE, 775 M_ZERO | M_WAITOK); 776 } 777 778 return (cxgbe_netmap_rss(sc, vi, ifp, na)); 779 } 780 781 static int 782 cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, if_t ifp, 783 struct netmap_adapter *na) 784 { 785 struct netmap_kring *kring; 786 int rc, i, nm_state, nactive; 787 struct sge_nm_txq *nm_txq; 788 struct sge_nm_rxq *nm_rxq; 789 790 ASSERT_SYNCHRONIZED_OP(sc); 791 MPASS(vi->nnmrxq > 0); 792 MPASS(vi->nnmtxq > 0); 793 794 if (!nm_netmap_on(na)) 795 return (0); 796 797 if ((vi->flags & VI_INIT_DONE) == 0) 798 return (0); 799 800 /* First remove the queues that are stopping from the RSS table. */ 801 rc = cxgbe_netmap_rss(sc, vi, ifp, na); 802 if (rc != 0) 803 return (rc); /* error message logged already. */ 804 805 for_each_nm_txq(vi, i, nm_txq) { 806 kring = na->tx_rings[nm_txq->nid]; 807 if (!nm_kring_pending_off(kring)) 808 continue; 809 MPASS(nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID); 810 811 rc = -t4_eth_eq_stop(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id); 812 if (rc != 0) { 813 device_printf(vi->dev, 814 "failed to stop nm_txq[%d]: %d.\n", i, rc); 815 return (rc); 816 } 817 818 /* XXX: netmap, not the driver, should do this. */ 819 kring->rhead = kring->rcur = kring->nr_hwcur = 0; 820 kring->rtail = kring->nr_hwtail = kring->nkr_num_slots - 1; 821 } 822 nactive = 0; 823 for_each_nm_rxq(vi, i, nm_rxq) { 824 nm_state = atomic_load_int(&nm_rxq->nm_state); 825 kring = na->rx_rings[nm_rxq->nid]; 826 if (nm_state != NM_OFF && !nm_kring_pending_off(kring)) 827 nactive++; 828 if (!nm_kring_pending_off(kring)) 829 continue; 830 MPASS(nm_state != NM_OFF); 831 MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 832 833 rc = -t4_iq_stop(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 834 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff); 835 if (rc != 0) { 836 device_printf(vi->dev, 837 "failed to stop nm_rxq[%d]: %d.\n", i, rc); 838 return (rc); 839 } 840 841 while (!atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_OFF)) 842 pause("nmst", 1); 843 844 /* XXX: netmap, not the driver, should do this. */ 845 kring->rhead = kring->rcur = kring->nr_hwcur = 0; 846 kring->rtail = kring->nr_hwtail = 0; 847 } 848 netmap_krings_mode_commit(na, 0); 849 if (nactive == 0) 850 nm_clear_native_flags(na); 851 852 return (rc); 853 } 854 855 static int 856 cxgbe_netmap_reg(struct netmap_adapter *na, int on) 857 { 858 if_t ifp = na->ifp; 859 struct vi_info *vi = if_getsoftc(ifp); 860 struct adapter *sc = vi->adapter; 861 int rc; 862 863 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmreg"); 864 if (rc != 0) 865 return (rc); 866 if (on) 867 rc = cxgbe_netmap_on(sc, vi, ifp, na); 868 else 869 rc = cxgbe_netmap_off(sc, vi, ifp, na); 870 end_synchronized_op(sc, 0); 871 872 return (rc); 873 } 874 875 /* How many packets can a single type1 WR carry in n descriptors */ 876 static inline int 877 ndesc_to_npkt(const int n) 878 { 879 880 MPASS(n > 0 && n <= SGE_MAX_WR_NDESC); 881 882 return (n * 2 - 1); 883 } 884 #define MAX_NPKT_IN_TYPE1_WR (ndesc_to_npkt(SGE_MAX_WR_NDESC)) 885 886 /* 887 * Space (in descriptors) needed for a type1 WR (TX_PKTS or TX_PKTS2) that 888 * carries n packets 889 */ 890 static inline int 891 npkt_to_ndesc(const int n) 892 { 893 894 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 895 896 return ((n + 2) / 2); 897 } 898 899 /* 900 * Space (in 16B units) needed for a type1 WR (TX_PKTS or TX_PKTS2) that 901 * carries n packets 902 */ 903 static inline int 904 npkt_to_len16(const int n) 905 { 906 907 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 908 909 return (n * 2 + 1); 910 } 911 912 #define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx) 913 914 static void 915 ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq) 916 { 917 int n; 918 u_int db = nm_txq->doorbells; 919 920 MPASS(nm_txq->pidx != nm_txq->dbidx); 921 922 n = NMIDXDIFF(nm_txq, dbidx); 923 if (n > 1) 924 clrbit(&db, DOORBELL_WCWR); 925 wmb(); 926 927 switch (ffs(db) - 1) { 928 case DOORBELL_UDB: 929 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 930 break; 931 932 case DOORBELL_WCWR: { 933 volatile uint64_t *dst, *src; 934 935 /* 936 * Queues whose 128B doorbell segment fits in the page do not 937 * use relative qid (udb_qid is always 0). Only queues with 938 * doorbell segments can do WCWR. 939 */ 940 KASSERT(nm_txq->udb_qid == 0 && n == 1, 941 ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p", 942 __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq)); 943 944 dst = (volatile void *)((uintptr_t)nm_txq->udb + 945 UDBS_WR_OFFSET - UDBS_DB_OFFSET); 946 src = (void *)&nm_txq->desc[nm_txq->dbidx]; 947 while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1]) 948 *dst++ = *src++; 949 wmb(); 950 break; 951 } 952 953 case DOORBELL_UDBWC: 954 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 955 wmb(); 956 break; 957 958 case DOORBELL_KDB: 959 t4_write_reg(sc, sc->sge_kdoorbell_reg, 960 V_QID(nm_txq->cntxt_id) | V_PIDX(n)); 961 break; 962 } 963 nm_txq->dbidx = nm_txq->pidx; 964 } 965 966 /* 967 * Write work requests to send 'npkt' frames and ring the doorbell to send them 968 * on their way. No need to check for wraparound. 969 */ 970 static void 971 cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq, 972 struct netmap_kring *kring, int npkt, int npkt_remaining) 973 { 974 struct netmap_ring *ring = kring->ring; 975 struct netmap_slot *slot; 976 const u_int lim = kring->nkr_num_slots - 1; 977 struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx]; 978 uint16_t len; 979 uint64_t ba; 980 struct cpl_tx_pkt_core *cpl; 981 struct ulptx_sgl *usgl; 982 int i, n; 983 984 while (npkt) { 985 n = min(npkt, MAX_NPKT_IN_TYPE1_WR); 986 len = 0; 987 988 wr = (void *)&nm_txq->desc[nm_txq->pidx]; 989 wr->op_pkd = nm_txq->op_pkd; 990 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n))); 991 wr->npkt = n; 992 wr->r3 = 0; 993 wr->type = 1; 994 cpl = (void *)(wr + 1); 995 996 for (i = 0; i < n; i++) { 997 slot = &ring->slot[kring->nr_hwcur]; 998 PNMB(kring->na, slot, &ba); 999 MPASS(ba != 0); 1000 1001 cpl->ctrl0 = nm_txq->cpl_ctrl0; 1002 cpl->pack = 0; 1003 cpl->len = htobe16(slot->len); 1004 cpl->ctrl1 = nm_txcsum ? 0 : 1005 htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS); 1006 1007 usgl = (void *)(cpl + 1); 1008 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 1009 V_ULPTX_NSGE(1)); 1010 usgl->len0 = htobe32(slot->len); 1011 usgl->addr0 = htobe64(ba + nm_get_offset(kring, slot)); 1012 1013 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 1014 cpl = (void *)(usgl + 1); 1015 MPASS(slot->len + len <= UINT16_MAX); 1016 len += slot->len; 1017 kring->nr_hwcur = nm_next(kring->nr_hwcur, lim); 1018 } 1019 wr->plen = htobe16(len); 1020 1021 npkt -= n; 1022 nm_txq->pidx += npkt_to_ndesc(n); 1023 MPASS(nm_txq->pidx <= nm_txq->sidx); 1024 if (__predict_false(nm_txq->pidx == nm_txq->sidx)) { 1025 /* 1026 * This routine doesn't know how to write WRs that wrap 1027 * around. Make sure it wasn't asked to. 1028 */ 1029 MPASS(npkt == 0); 1030 nm_txq->pidx = 0; 1031 } 1032 1033 if (npkt + npkt_remaining == 0) { 1034 /* All done. */ 1035 if (lazy_tx_credit_flush == 0 || 1036 NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) { 1037 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | 1038 F_FW_WR_EQUIQ); 1039 nm_txq->equeqidx = nm_txq->pidx; 1040 nm_txq->equiqidx = nm_txq->pidx; 1041 } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) { 1042 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 1043 nm_txq->equeqidx = nm_txq->pidx; 1044 } 1045 ring_nm_txq_db(sc, nm_txq); 1046 return; 1047 } 1048 if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC) { 1049 if (NMIDXDIFF(nm_txq, equeqidx) >= 64) { 1050 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 1051 nm_txq->equeqidx = nm_txq->pidx; 1052 } 1053 ring_nm_txq_db(sc, nm_txq); 1054 } 1055 } 1056 1057 /* Will get called again. */ 1058 MPASS(npkt_remaining); 1059 } 1060 1061 /* How many contiguous free descriptors starting at pidx */ 1062 static inline int 1063 contiguous_ndesc_available(struct sge_nm_txq *nm_txq) 1064 { 1065 1066 if (nm_txq->cidx > nm_txq->pidx) 1067 return (nm_txq->cidx - nm_txq->pidx - 1); 1068 else if (nm_txq->cidx > 0) 1069 return (nm_txq->sidx - nm_txq->pidx); 1070 else 1071 return (nm_txq->sidx - nm_txq->pidx - 1); 1072 } 1073 1074 static int 1075 reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq) 1076 { 1077 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; 1078 uint16_t hw_cidx = spg->cidx; /* snapshot */ 1079 struct fw_eth_tx_pkts_wr *wr; 1080 int n = 0; 1081 1082 hw_cidx = be16toh(hw_cidx); 1083 1084 while (nm_txq->cidx != hw_cidx) { 1085 wr = (void *)&nm_txq->desc[nm_txq->cidx]; 1086 1087 MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)) || 1088 wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR))); 1089 MPASS(wr->type == 1); 1090 MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR); 1091 1092 n += wr->npkt; 1093 nm_txq->cidx += npkt_to_ndesc(wr->npkt); 1094 1095 /* 1096 * We never sent a WR that wrapped around so the credits coming 1097 * back, WR by WR, should never cause the cidx to wrap around 1098 * either. 1099 */ 1100 MPASS(nm_txq->cidx <= nm_txq->sidx); 1101 if (__predict_false(nm_txq->cidx == nm_txq->sidx)) 1102 nm_txq->cidx = 0; 1103 } 1104 1105 return (n); 1106 } 1107 1108 static int 1109 cxgbe_netmap_txsync(struct netmap_kring *kring, int flags) 1110 { 1111 struct netmap_adapter *na = kring->na; 1112 if_t ifp = na->ifp; 1113 struct vi_info *vi = if_getsoftc(ifp); 1114 struct adapter *sc = vi->adapter; 1115 struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id]; 1116 const u_int head = kring->rhead; 1117 u_int reclaimed = 0; 1118 int n, d, npkt_remaining, ndesc_remaining; 1119 1120 /* 1121 * Tx was at kring->nr_hwcur last time around and now we need to advance 1122 * to kring->rhead. Note that the driver's pidx moves independent of 1123 * netmap's kring->nr_hwcur (pidx counts descriptors and the relation 1124 * between descriptors and frames isn't 1:1). 1125 */ 1126 1127 npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 1128 kring->nkr_num_slots - kring->nr_hwcur + head; 1129 while (npkt_remaining) { 1130 reclaimed += reclaim_nm_tx_desc(nm_txq); 1131 ndesc_remaining = contiguous_ndesc_available(nm_txq); 1132 /* Can't run out of descriptors with packets still remaining */ 1133 MPASS(ndesc_remaining > 0); 1134 1135 /* # of desc needed to tx all remaining packets */ 1136 d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC; 1137 if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR) 1138 d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR); 1139 1140 if (d <= ndesc_remaining) 1141 n = npkt_remaining; 1142 else { 1143 /* Can't send all, calculate how many can be sent */ 1144 n = (ndesc_remaining / SGE_MAX_WR_NDESC) * 1145 MAX_NPKT_IN_TYPE1_WR; 1146 if (ndesc_remaining % SGE_MAX_WR_NDESC) 1147 n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC); 1148 } 1149 1150 /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */ 1151 npkt_remaining -= n; 1152 cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining); 1153 } 1154 MPASS(npkt_remaining == 0); 1155 MPASS(kring->nr_hwcur == head); 1156 MPASS(nm_txq->dbidx == nm_txq->pidx); 1157 1158 /* 1159 * Second part: reclaim buffers for completed transmissions. 1160 */ 1161 if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) { 1162 reclaimed += reclaim_nm_tx_desc(nm_txq); 1163 kring->nr_hwtail += reclaimed; 1164 if (kring->nr_hwtail >= kring->nkr_num_slots) 1165 kring->nr_hwtail -= kring->nkr_num_slots; 1166 } 1167 1168 return (0); 1169 } 1170 1171 static int 1172 cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags) 1173 { 1174 struct netmap_adapter *na = kring->na; 1175 struct netmap_ring *ring = kring->ring; 1176 if_t ifp = na->ifp; 1177 struct vi_info *vi = if_getsoftc(ifp); 1178 struct adapter *sc = vi->adapter; 1179 struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq + kring->ring_id]; 1180 u_int const head = kring->rhead; 1181 u_int n; 1182 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 1183 1184 if (black_hole) 1185 return (0); /* No updates ever. */ 1186 1187 if (netmap_no_pendintr || force_update) { 1188 kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx); 1189 kring->nr_kflags &= ~NKR_PENDINTR; 1190 } 1191 1192 if (nm_rxq->fl_db_saved > 0 && starve_fl == 0) { 1193 wmb(); 1194 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1195 nm_rxq->fl_db_val | V_PIDX(nm_rxq->fl_db_saved)); 1196 nm_rxq->fl_db_saved = 0; 1197 } 1198 1199 /* Userspace done with buffers from kring->nr_hwcur to head */ 1200 n = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 1201 kring->nkr_num_slots - kring->nr_hwcur + head; 1202 n &= ~7U; 1203 if (n > 0) { 1204 u_int fl_pidx = nm_rxq->fl_pidx; 1205 struct netmap_slot *slot = &ring->slot[fl_pidx]; 1206 uint64_t ba; 1207 int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx; 1208 1209 /* 1210 * We always deal with 8 buffers at a time. We must have 1211 * stopped at an 8B boundary (fl_pidx) last time around and we 1212 * must have a multiple of 8B buffers to give to the freelist. 1213 */ 1214 MPASS((fl_pidx & 7) == 0); 1215 MPASS((n & 7) == 0); 1216 1217 IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots); 1218 IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx2); 1219 1220 while (n > 0) { 1221 for (i = 0; i < 8; i++, fl_pidx++, slot++) { 1222 PNMB(na, slot, &ba); 1223 MPASS(ba != 0); 1224 nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx); 1225 slot->flags &= ~NS_BUF_CHANGED; 1226 MPASS(fl_pidx <= nm_rxq->fl_sidx2); 1227 } 1228 n -= 8; 1229 if (fl_pidx == nm_rxq->fl_sidx2) { 1230 fl_pidx = 0; 1231 slot = &ring->slot[0]; 1232 } 1233 if (++dbinc == nm_rxq->fl_db_threshold) { 1234 wmb(); 1235 if (starve_fl) 1236 nm_rxq->fl_db_saved += dbinc; 1237 else { 1238 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1239 nm_rxq->fl_db_val | V_PIDX(dbinc)); 1240 } 1241 dbinc = 0; 1242 } 1243 } 1244 MPASS(nm_rxq->fl_pidx == fl_pidx); 1245 1246 if (dbinc > 0) { 1247 wmb(); 1248 if (starve_fl) 1249 nm_rxq->fl_db_saved += dbinc; 1250 else { 1251 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1252 nm_rxq->fl_db_val | V_PIDX(dbinc)); 1253 } 1254 } 1255 } 1256 1257 return (0); 1258 } 1259 1260 void 1261 cxgbe_nm_attach(struct vi_info *vi) 1262 { 1263 struct port_info *pi; 1264 struct adapter *sc; 1265 struct netmap_adapter na; 1266 1267 MPASS(vi->nnmrxq > 0); 1268 MPASS(vi->ifp != NULL); 1269 1270 pi = vi->pi; 1271 sc = pi->adapter; 1272 1273 bzero(&na, sizeof(na)); 1274 1275 na.ifp = vi->ifp; 1276 na.na_flags = NAF_BDG_MAYSLEEP | NAF_OFFSETS; 1277 1278 /* Netmap doesn't know about the space reserved for the status page. */ 1279 na.num_tx_desc = vi->qsize_txq - sc->params.sge.spg_len / EQ_ESIZE; 1280 1281 /* 1282 * The freelist's cidx/pidx drives netmap's rx cidx/pidx. So 1283 * num_rx_desc is based on the number of buffers that can be held in the 1284 * freelist, and not the number of entries in the iq. (These two are 1285 * not exactly the same due to the space taken up by the status page). 1286 */ 1287 na.num_rx_desc = rounddown(vi->qsize_rxq, 8); 1288 na.nm_txsync = cxgbe_netmap_txsync; 1289 na.nm_rxsync = cxgbe_netmap_rxsync; 1290 na.nm_register = cxgbe_netmap_reg; 1291 na.num_tx_rings = vi->nnmtxq; 1292 na.num_rx_rings = vi->nnmrxq; 1293 na.rx_buf_maxsize = MAX_MTU + sc->params.sge.fl_pktshift; 1294 netmap_attach(&na); /* This adds IFCAP_NETMAP to if_capabilities */ 1295 } 1296 1297 void 1298 cxgbe_nm_detach(struct vi_info *vi) 1299 { 1300 1301 MPASS(vi->nnmrxq > 0); 1302 MPASS(vi->ifp != NULL); 1303 1304 netmap_detach(vi->ifp); 1305 } 1306 1307 static inline const void * 1308 unwrap_nm_fw6_msg(const struct cpl_fw6_msg *cpl) 1309 { 1310 1311 MPASS(cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL); 1312 1313 /* data[0] is RSS header */ 1314 return (&cpl->data[1]); 1315 } 1316 1317 static void 1318 handle_nm_sge_egr_update(struct adapter *sc, if_t ifp, 1319 const struct cpl_sge_egr_update *egr) 1320 { 1321 uint32_t oq; 1322 struct sge_nm_txq *nm_txq; 1323 1324 oq = be32toh(egr->opcode_qid); 1325 MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE); 1326 nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start]; 1327 1328 netmap_tx_irq(ifp, nm_txq->nid); 1329 } 1330 1331 void 1332 service_nm_rxq(struct sge_nm_rxq *nm_rxq) 1333 { 1334 struct vi_info *vi = nm_rxq->vi; 1335 struct adapter *sc = vi->adapter; 1336 if_t ifp = vi->ifp; 1337 struct netmap_adapter *na = NA(ifp); 1338 struct netmap_kring *kring = na->rx_rings[nm_rxq->nid]; 1339 struct netmap_ring *ring = kring->ring; 1340 struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx]; 1341 const void *cpl; 1342 uint32_t lq; 1343 u_int work = 0; 1344 uint8_t opcode; 1345 uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx); 1346 u_int fl_credits = fl_cidx & 7; 1347 u_int ndesc = 0; /* desc processed since last cidx update */ 1348 u_int nframes = 0; /* frames processed since last netmap wakeup */ 1349 1350 while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) { 1351 1352 rmb(); 1353 1354 lq = be32toh(d->rsp.pldbuflen_qid); 1355 opcode = d->rss.opcode; 1356 cpl = &d->cpl[0]; 1357 1358 switch (G_RSPD_TYPE(d->rsp.u.type_gen)) { 1359 case X_RSPD_TYPE_FLBUF: 1360 1361 /* fall through */ 1362 1363 case X_RSPD_TYPE_CPL: 1364 MPASS(opcode < NUM_CPL_CMDS); 1365 1366 switch (opcode) { 1367 case CPL_FW4_MSG: 1368 case CPL_FW6_MSG: 1369 cpl = unwrap_nm_fw6_msg(cpl); 1370 /* fall through */ 1371 case CPL_SGE_EGR_UPDATE: 1372 handle_nm_sge_egr_update(sc, ifp, cpl); 1373 break; 1374 case CPL_RX_PKT: 1375 /* 1376 * Note that the application must have netmap 1377 * offsets (NETMAP_REQ_OPT_OFFSETS) enabled on 1378 * the ring or its rx will not work correctly 1379 * when fl_pktshift > 0. 1380 */ 1381 nm_write_offset(kring, &ring->slot[fl_cidx], 1382 sc->params.sge.fl_pktshift); 1383 ring->slot[fl_cidx].len = G_RSPD_LEN(lq) - 1384 sc->params.sge.fl_pktshift; 1385 ring->slot[fl_cidx].flags = 0; 1386 nframes++; 1387 if (!(lq & F_RSPD_NEWBUF)) { 1388 MPASS(black_hole == 2); 1389 break; 1390 } 1391 fl_credits++; 1392 if (__predict_false(++fl_cidx == nm_rxq->fl_sidx)) 1393 fl_cidx = 0; 1394 break; 1395 default: 1396 panic("%s: unexpected opcode 0x%x on nm_rxq %p", 1397 __func__, opcode, nm_rxq); 1398 } 1399 break; 1400 1401 case X_RSPD_TYPE_INTR: 1402 /* Not equipped to handle forwarded interrupts. */ 1403 panic("%s: netmap queue received interrupt for iq %u\n", 1404 __func__, lq); 1405 1406 default: 1407 panic("%s: illegal response type %d on nm_rxq %p", 1408 __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq); 1409 } 1410 1411 d++; 1412 if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) { 1413 nm_rxq->iq_cidx = 0; 1414 d = &nm_rxq->iq_desc[0]; 1415 nm_rxq->iq_gen ^= F_RSPD_GEN; 1416 } 1417 1418 if (__predict_false(++nframes == rx_nframes) && !black_hole) { 1419 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 1420 netmap_rx_irq(ifp, nm_rxq->nid, &work); 1421 nframes = 0; 1422 } 1423 1424 if (__predict_false(++ndesc == rx_ndesc)) { 1425 if (black_hole && fl_credits >= 8) { 1426 fl_credits /= 8; 1427 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, 1428 nm_rxq->fl_sidx); 1429 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1430 nm_rxq->fl_db_val | V_PIDX(fl_credits)); 1431 fl_credits = fl_cidx & 7; 1432 } 1433 t4_write_reg(sc, sc->sge_gts_reg, 1434 V_CIDXINC(ndesc) | 1435 V_INGRESSQID(nm_rxq->iq_cntxt_id) | 1436 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1437 ndesc = 0; 1438 } 1439 } 1440 1441 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 1442 if (black_hole) { 1443 fl_credits /= 8; 1444 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx); 1445 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1446 nm_rxq->fl_db_val | V_PIDX(fl_credits)); 1447 } else if (nframes > 0) 1448 netmap_rx_irq(ifp, nm_rxq->nid, &work); 1449 1450 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndesc) | 1451 V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) | 1452 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 1453 } 1454 #endif 1455