1 /*- 2 * Copyright (c) 2014 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 34 #ifdef DEV_NETMAP 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/eventhandler.h> 38 #include <sys/lock.h> 39 #include <sys/mbuf.h> 40 #include <sys/module.h> 41 #include <sys/selinfo.h> 42 #include <sys/socket.h> 43 #include <sys/sockio.h> 44 #include <machine/bus.h> 45 #include <net/ethernet.h> 46 #include <net/if.h> 47 #include <net/if_media.h> 48 #include <net/if_var.h> 49 #include <net/if_clone.h> 50 #include <net/if_types.h> 51 #include <net/netmap.h> 52 #include <dev/netmap/netmap_kern.h> 53 54 #include "common/common.h" 55 #include "common/t4_regs.h" 56 #include "common/t4_regs_values.h" 57 58 extern int fl_pad; /* XXXNM */ 59 60 SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe netmap parameters"); 61 62 /* 63 * 0 = normal netmap rx 64 * 1 = black hole 65 * 2 = supermassive black hole (buffer packing enabled) 66 */ 67 int black_hole = 0; 68 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_black_hole, CTLFLAG_RDTUN, &black_hole, 0, 69 "Sink incoming packets."); 70 71 int rx_ndesc = 256; 72 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_ndesc, CTLFLAG_RWTUN, 73 &rx_ndesc, 0, "# of rx descriptors after which the hw cidx is updated."); 74 75 int rx_nframes = 64; 76 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_nframes, CTLFLAG_RWTUN, 77 &rx_nframes, 0, "max # of frames received before waking up netmap rx."); 78 79 int holdoff_tmr_idx = 2; 80 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN, 81 &holdoff_tmr_idx, 0, "Holdoff timer index for netmap rx queues."); 82 83 /* 84 * Congestion drops. 85 * -1: no congestion feedback (not recommended). 86 * 0: backpressure the channel instead of dropping packets right away. 87 * 1: no backpressure, drop packets for the congested queue immediately. 88 */ 89 static int nm_cong_drop = 1; 90 TUNABLE_INT("hw.cxgbe.nm_cong_drop", &nm_cong_drop); 91 92 int starve_fl = 0; 93 SYSCTL_INT(_hw_cxgbe, OID_AUTO, starve_fl, CTLFLAG_RWTUN, 94 &starve_fl, 0, "Don't ring fl db for netmap rx queues."); 95 96 /* 97 * Try to process tx credits in bulk. This may cause a delay in the return of 98 * tx credits and is suitable for bursty or non-stop tx only. 99 */ 100 int lazy_tx_credit_flush = 1; 101 SYSCTL_INT(_hw_cxgbe, OID_AUTO, lazy_tx_credit_flush, CTLFLAG_RWTUN, 102 &lazy_tx_credit_flush, 0, "lazy credit flush for netmap tx queues."); 103 104 static int 105 alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong) 106 { 107 int rc, cntxt_id, i; 108 __be32 v; 109 struct adapter *sc = vi->pi->adapter; 110 struct sge_params *sp = &sc->params.sge; 111 struct netmap_adapter *na = NA(vi->ifp); 112 struct fw_iq_cmd c; 113 114 MPASS(na != NULL); 115 MPASS(nm_rxq->iq_desc != NULL); 116 MPASS(nm_rxq->fl_desc != NULL); 117 118 bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE); 119 bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + sp->spg_len); 120 121 bzero(&c, sizeof(c)); 122 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 123 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 124 V_FW_IQ_CMD_VFN(0)); 125 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 126 FW_LEN16(c)); 127 MPASS(!forwarding_intr_to_fwq(sc)); 128 KASSERT(nm_rxq->intr_idx < sc->intr_count, 129 ("%s: invalid direct intr_idx %d", __func__, nm_rxq->intr_idx)); 130 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx); 131 c.type_to_iqandstindex = htobe32(v | 132 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 133 V_FW_IQ_CMD_VIID(vi->viid) | 134 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 135 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(vi->pi->tx_chan) | 136 F_FW_IQ_CMD_IQGTSMODE | 137 V_FW_IQ_CMD_IQINTCNTTHRESH(0) | 138 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 139 c.iqsize = htobe16(vi->qsize_rxq); 140 c.iqaddr = htobe64(nm_rxq->iq_ba); 141 if (cong >= 0) { 142 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN | 143 V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF | 144 F_FW_IQ_CMD_FL0CONGEN); 145 } 146 c.iqns_to_fl0congen |= 147 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 148 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 149 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 150 (black_hole == 2 ? F_FW_IQ_CMD_FL0PACKEN : 0)); 151 c.fl0dcaen_to_fl0cidxfthresh = 152 htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? 153 X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B) | 154 V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? 155 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); 156 c.fl0size = htobe16(na->num_rx_desc / 8 + sp->spg_len / EQ_ESIZE); 157 c.fl0addr = htobe64(nm_rxq->fl_ba); 158 159 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 160 if (rc != 0) { 161 device_printf(sc->dev, 162 "failed to create netmap ingress queue: %d\n", rc); 163 return (rc); 164 } 165 166 nm_rxq->iq_cidx = 0; 167 MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - sp->spg_len / IQ_ESIZE); 168 nm_rxq->iq_gen = F_RSPD_GEN; 169 nm_rxq->iq_cntxt_id = be16toh(c.iqid); 170 nm_rxq->iq_abs_id = be16toh(c.physiqid); 171 cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start; 172 if (cntxt_id >= sc->sge.niq) { 173 panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)", 174 __func__, cntxt_id, sc->sge.niq - 1); 175 } 176 sc->sge.iqmap[cntxt_id] = (void *)nm_rxq; 177 178 nm_rxq->fl_cntxt_id = be16toh(c.fl0id); 179 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 180 MPASS(nm_rxq->fl_sidx == na->num_rx_desc); 181 cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start; 182 if (cntxt_id >= sc->sge.neq) { 183 panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)", 184 __func__, cntxt_id, sc->sge.neq - 1); 185 } 186 sc->sge.eqmap[cntxt_id] = (void *)nm_rxq; 187 188 nm_rxq->fl_db_val = V_QID(nm_rxq->fl_cntxt_id) | 189 sc->chip_params->sge_fl_db; 190 191 if (chip_id(sc) >= CHELSIO_T5 && cong >= 0) { 192 uint32_t param, val; 193 194 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 195 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 196 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id); 197 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 198 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 199 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id); 200 if (cong == 0) 201 val = 1 << 19; 202 else { 203 val = 2 << 19; 204 for (i = 0; i < 4; i++) { 205 if (cong & (1 << i)) 206 val |= 1 << (i << 2); 207 } 208 } 209 210 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 211 if (rc != 0) { 212 /* report error but carry on */ 213 device_printf(sc->dev, 214 "failed to set congestion manager context for " 215 "ingress queue %d: %d\n", nm_rxq->iq_cntxt_id, rc); 216 } 217 } 218 219 t4_write_reg(sc, sc->sge_gts_reg, 220 V_INGRESSQID(nm_rxq->iq_cntxt_id) | 221 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 222 223 return (rc); 224 } 225 226 static int 227 free_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 228 { 229 struct adapter *sc = vi->pi->adapter; 230 int rc; 231 232 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 233 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff); 234 if (rc != 0) 235 device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n", 236 __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc); 237 nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID; 238 return (rc); 239 } 240 241 static int 242 alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 243 { 244 int rc, cntxt_id; 245 size_t len; 246 struct adapter *sc = vi->pi->adapter; 247 struct netmap_adapter *na = NA(vi->ifp); 248 struct fw_eq_eth_cmd c; 249 250 MPASS(na != NULL); 251 MPASS(nm_txq->desc != NULL); 252 253 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; 254 bzero(nm_txq->desc, len); 255 256 bzero(&c, sizeof(c)); 257 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 258 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 259 V_FW_EQ_ETH_CMD_VFN(0)); 260 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 261 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 262 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | 263 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); 264 c.fetchszm_to_iqid = 265 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 266 V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 267 V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id)); 268 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 269 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 270 V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE)); 271 c.eqaddr = htobe64(nm_txq->ba); 272 273 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 274 if (rc != 0) { 275 device_printf(vi->dev, 276 "failed to create netmap egress queue: %d\n", rc); 277 return (rc); 278 } 279 280 nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 281 cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start; 282 if (cntxt_id >= sc->sge.neq) 283 panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__, 284 cntxt_id, sc->sge.neq - 1); 285 sc->sge.eqmap[cntxt_id] = (void *)nm_txq; 286 287 nm_txq->pidx = nm_txq->cidx = 0; 288 MPASS(nm_txq->sidx == na->num_tx_desc); 289 nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0; 290 291 nm_txq->doorbells = sc->doorbells; 292 if (isset(&nm_txq->doorbells, DOORBELL_UDB) || 293 isset(&nm_txq->doorbells, DOORBELL_UDBWC) || 294 isset(&nm_txq->doorbells, DOORBELL_WCWR)) { 295 uint32_t s_qpp = sc->params.sge.eq_s_qpp; 296 uint32_t mask = (1 << s_qpp) - 1; 297 volatile uint8_t *udb; 298 299 udb = sc->udbs_base + UDBS_DB_OFFSET; 300 udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT; 301 nm_txq->udb_qid = nm_txq->cntxt_id & mask; 302 if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 303 clrbit(&nm_txq->doorbells, DOORBELL_WCWR); 304 else { 305 udb += nm_txq->udb_qid << UDBS_SEG_SHIFT; 306 nm_txq->udb_qid = 0; 307 } 308 nm_txq->udb = (volatile void *)udb; 309 } 310 311 return (rc); 312 } 313 314 static int 315 free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 316 { 317 struct adapter *sc = vi->pi->adapter; 318 int rc; 319 320 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id); 321 if (rc != 0) 322 device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__, 323 nm_txq->cntxt_id, rc); 324 nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID; 325 return (rc); 326 } 327 328 static int 329 cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, 330 struct netmap_adapter *na) 331 { 332 struct netmap_slot *slot; 333 struct netmap_kring *kring; 334 struct sge_nm_rxq *nm_rxq; 335 struct sge_nm_txq *nm_txq; 336 int rc, i, j, hwidx; 337 struct hw_buf_info *hwb; 338 339 ASSERT_SYNCHRONIZED_OP(sc); 340 341 if ((vi->flags & VI_INIT_DONE) == 0 || 342 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 343 return (EAGAIN); 344 345 hwb = &sc->sge.hw_buf_info[0]; 346 for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) { 347 if (hwb->size == NETMAP_BUF_SIZE(na)) 348 break; 349 } 350 if (i >= SGE_FLBUF_SIZES) { 351 if_printf(ifp, "no hwidx for netmap buffer size %d.\n", 352 NETMAP_BUF_SIZE(na)); 353 return (ENXIO); 354 } 355 hwidx = i; 356 357 /* Must set caps before calling netmap_reset */ 358 nm_set_native_flags(na); 359 360 for_each_nm_rxq(vi, i, nm_rxq) { 361 struct irq *irq = &sc->irq[vi->first_intr + i]; 362 363 kring = na->rx_rings[nm_rxq->nid]; 364 if (!nm_kring_pending_on(kring) || 365 nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID) 366 continue; 367 368 alloc_nm_rxq_hwq(vi, nm_rxq, tnl_cong(vi->pi, nm_cong_drop)); 369 nm_rxq->fl_hwidx = hwidx; 370 slot = netmap_reset(na, NR_RX, i, 0); 371 MPASS(slot != NULL); /* XXXNM: error check, not assert */ 372 373 /* We deal with 8 bufs at a time */ 374 MPASS((na->num_rx_desc & 7) == 0); 375 MPASS(na->num_rx_desc == nm_rxq->fl_sidx); 376 for (j = 0; j < nm_rxq->fl_sidx; j++) { 377 uint64_t ba; 378 379 PNMB(na, &slot[j], &ba); 380 MPASS(ba != 0); 381 nm_rxq->fl_desc[j] = htobe64(ba | hwidx); 382 } 383 j = nm_rxq->fl_pidx = nm_rxq->fl_sidx - 8; 384 MPASS((j & 7) == 0); 385 j /= 8; /* driver pidx to hardware pidx */ 386 wmb(); 387 t4_write_reg(sc, sc->sge_kdoorbell_reg, 388 nm_rxq->fl_db_val | V_PIDX(j)); 389 390 atomic_cmpset_int(&irq->nm_state, NM_OFF, NM_ON); 391 } 392 393 for_each_nm_txq(vi, i, nm_txq) { 394 kring = na->tx_rings[nm_txq->nid]; 395 if (!nm_kring_pending_on(kring) || 396 nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID) 397 continue; 398 399 alloc_nm_txq_hwq(vi, nm_txq); 400 slot = netmap_reset(na, NR_TX, i, 0); 401 MPASS(slot != NULL); /* XXXNM: error check, not assert */ 402 } 403 404 if (vi->nm_rss == NULL) { 405 vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE, 406 M_ZERO | M_WAITOK); 407 } 408 for (i = 0; i < vi->rss_size;) { 409 for_each_nm_rxq(vi, j, nm_rxq) { 410 vi->nm_rss[i++] = nm_rxq->iq_abs_id; 411 if (i == vi->rss_size) 412 break; 413 } 414 } 415 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, 416 vi->nm_rss, vi->rss_size); 417 if (rc != 0) 418 if_printf(ifp, "netmap rss_config failed: %d\n", rc); 419 420 return (rc); 421 } 422 423 static int 424 cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, 425 struct netmap_adapter *na) 426 { 427 struct netmap_kring *kring; 428 int rc, i; 429 struct sge_nm_txq *nm_txq; 430 struct sge_nm_rxq *nm_rxq; 431 432 ASSERT_SYNCHRONIZED_OP(sc); 433 434 if ((vi->flags & VI_INIT_DONE) == 0) 435 return (0); 436 437 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, 438 vi->rss, vi->rss_size); 439 if (rc != 0) 440 if_printf(ifp, "failed to restore RSS config: %d\n", rc); 441 nm_clear_native_flags(na); 442 443 for_each_nm_txq(vi, i, nm_txq) { 444 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; 445 446 kring = na->tx_rings[nm_txq->nid]; 447 if (!nm_kring_pending_off(kring) || 448 nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID) 449 continue; 450 451 /* Wait for hw pidx to catch up ... */ 452 while (be16toh(nm_txq->pidx) != spg->pidx) 453 pause("nmpidx", 1); 454 455 /* ... and then for the cidx. */ 456 while (spg->pidx != spg->cidx) 457 pause("nmcidx", 1); 458 459 free_nm_txq_hwq(vi, nm_txq); 460 } 461 for_each_nm_rxq(vi, i, nm_rxq) { 462 struct irq *irq = &sc->irq[vi->first_intr + i]; 463 464 kring = na->rx_rings[nm_rxq->nid]; 465 if (!nm_kring_pending_off(kring) || 466 nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID) 467 continue; 468 469 while (!atomic_cmpset_int(&irq->nm_state, NM_ON, NM_OFF)) 470 pause("nmst", 1); 471 472 free_nm_rxq_hwq(vi, nm_rxq); 473 } 474 475 return (rc); 476 } 477 478 static int 479 cxgbe_netmap_reg(struct netmap_adapter *na, int on) 480 { 481 struct ifnet *ifp = na->ifp; 482 struct vi_info *vi = ifp->if_softc; 483 struct adapter *sc = vi->pi->adapter; 484 int rc; 485 486 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmreg"); 487 if (rc != 0) 488 return (rc); 489 if (on) 490 rc = cxgbe_netmap_on(sc, vi, ifp, na); 491 else 492 rc = cxgbe_netmap_off(sc, vi, ifp, na); 493 end_synchronized_op(sc, 0); 494 495 return (rc); 496 } 497 498 /* How many packets can a single type1 WR carry in n descriptors */ 499 static inline int 500 ndesc_to_npkt(const int n) 501 { 502 503 MPASS(n > 0 && n <= SGE_MAX_WR_NDESC); 504 505 return (n * 2 - 1); 506 } 507 #define MAX_NPKT_IN_TYPE1_WR (ndesc_to_npkt(SGE_MAX_WR_NDESC)) 508 509 /* Space (in descriptors) needed for a type1 WR that carries n packets */ 510 static inline int 511 npkt_to_ndesc(const int n) 512 { 513 514 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 515 516 return ((n + 2) / 2); 517 } 518 519 /* Space (in 16B units) needed for a type1 WR that carries n packets */ 520 static inline int 521 npkt_to_len16(const int n) 522 { 523 524 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 525 526 return (n * 2 + 1); 527 } 528 529 #define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx) 530 531 static void 532 ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq) 533 { 534 int n; 535 u_int db = nm_txq->doorbells; 536 537 MPASS(nm_txq->pidx != nm_txq->dbidx); 538 539 n = NMIDXDIFF(nm_txq, dbidx); 540 if (n > 1) 541 clrbit(&db, DOORBELL_WCWR); 542 wmb(); 543 544 switch (ffs(db) - 1) { 545 case DOORBELL_UDB: 546 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 547 break; 548 549 case DOORBELL_WCWR: { 550 volatile uint64_t *dst, *src; 551 552 /* 553 * Queues whose 128B doorbell segment fits in the page do not 554 * use relative qid (udb_qid is always 0). Only queues with 555 * doorbell segments can do WCWR. 556 */ 557 KASSERT(nm_txq->udb_qid == 0 && n == 1, 558 ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p", 559 __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq)); 560 561 dst = (volatile void *)((uintptr_t)nm_txq->udb + 562 UDBS_WR_OFFSET - UDBS_DB_OFFSET); 563 src = (void *)&nm_txq->desc[nm_txq->dbidx]; 564 while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1]) 565 *dst++ = *src++; 566 wmb(); 567 break; 568 } 569 570 case DOORBELL_UDBWC: 571 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 572 wmb(); 573 break; 574 575 case DOORBELL_KDB: 576 t4_write_reg(sc, sc->sge_kdoorbell_reg, 577 V_QID(nm_txq->cntxt_id) | V_PIDX(n)); 578 break; 579 } 580 nm_txq->dbidx = nm_txq->pidx; 581 } 582 583 /* 584 * Write work requests to send 'npkt' frames and ring the doorbell to send them 585 * on their way. No need to check for wraparound. 586 */ 587 static void 588 cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq, 589 struct netmap_kring *kring, int npkt, int npkt_remaining, int txcsum) 590 { 591 struct netmap_ring *ring = kring->ring; 592 struct netmap_slot *slot; 593 const u_int lim = kring->nkr_num_slots - 1; 594 struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx]; 595 uint16_t len; 596 uint64_t ba; 597 struct cpl_tx_pkt_core *cpl; 598 struct ulptx_sgl *usgl; 599 int i, n; 600 601 while (npkt) { 602 n = min(npkt, MAX_NPKT_IN_TYPE1_WR); 603 len = 0; 604 605 wr = (void *)&nm_txq->desc[nm_txq->pidx]; 606 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 607 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n))); 608 wr->npkt = n; 609 wr->r3 = 0; 610 wr->type = 1; 611 cpl = (void *)(wr + 1); 612 613 for (i = 0; i < n; i++) { 614 slot = &ring->slot[kring->nr_hwcur]; 615 PNMB(kring->na, slot, &ba); 616 MPASS(ba != 0); 617 618 cpl->ctrl0 = nm_txq->cpl_ctrl0; 619 cpl->pack = 0; 620 cpl->len = htobe16(slot->len); 621 /* 622 * netmap(4) says "netmap does not use features such as 623 * checksum offloading, TCP segmentation offloading, 624 * encryption, VLAN encapsulation/decapsulation, etc." 625 * 626 * So the ncxl interfaces have tx hardware checksumming 627 * disabled by default. But you can override netmap by 628 * enabling IFCAP_TXCSUM on the interface manully. 629 */ 630 cpl->ctrl1 = txcsum ? 0 : 631 htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS); 632 633 usgl = (void *)(cpl + 1); 634 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 635 V_ULPTX_NSGE(1)); 636 usgl->len0 = htobe32(slot->len); 637 usgl->addr0 = htobe64(ba); 638 639 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 640 cpl = (void *)(usgl + 1); 641 MPASS(slot->len + len <= UINT16_MAX); 642 len += slot->len; 643 kring->nr_hwcur = nm_next(kring->nr_hwcur, lim); 644 } 645 wr->plen = htobe16(len); 646 647 npkt -= n; 648 nm_txq->pidx += npkt_to_ndesc(n); 649 MPASS(nm_txq->pidx <= nm_txq->sidx); 650 if (__predict_false(nm_txq->pidx == nm_txq->sidx)) { 651 /* 652 * This routine doesn't know how to write WRs that wrap 653 * around. Make sure it wasn't asked to. 654 */ 655 MPASS(npkt == 0); 656 nm_txq->pidx = 0; 657 } 658 659 if (npkt == 0 && npkt_remaining == 0) { 660 /* All done. */ 661 if (lazy_tx_credit_flush == 0) { 662 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | 663 F_FW_WR_EQUIQ); 664 nm_txq->equeqidx = nm_txq->pidx; 665 nm_txq->equiqidx = nm_txq->pidx; 666 } 667 ring_nm_txq_db(sc, nm_txq); 668 return; 669 } 670 671 if (NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) { 672 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | 673 F_FW_WR_EQUIQ); 674 nm_txq->equeqidx = nm_txq->pidx; 675 nm_txq->equiqidx = nm_txq->pidx; 676 } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) { 677 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 678 nm_txq->equeqidx = nm_txq->pidx; 679 } 680 if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC) 681 ring_nm_txq_db(sc, nm_txq); 682 } 683 684 /* Will get called again. */ 685 MPASS(npkt_remaining); 686 } 687 688 /* How many contiguous free descriptors starting at pidx */ 689 static inline int 690 contiguous_ndesc_available(struct sge_nm_txq *nm_txq) 691 { 692 693 if (nm_txq->cidx > nm_txq->pidx) 694 return (nm_txq->cidx - nm_txq->pidx - 1); 695 else if (nm_txq->cidx > 0) 696 return (nm_txq->sidx - nm_txq->pidx); 697 else 698 return (nm_txq->sidx - nm_txq->pidx - 1); 699 } 700 701 static int 702 reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq) 703 { 704 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; 705 uint16_t hw_cidx = spg->cidx; /* snapshot */ 706 struct fw_eth_tx_pkts_wr *wr; 707 int n = 0; 708 709 hw_cidx = be16toh(hw_cidx); 710 711 while (nm_txq->cidx != hw_cidx) { 712 wr = (void *)&nm_txq->desc[nm_txq->cidx]; 713 714 MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR))); 715 MPASS(wr->type == 1); 716 MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR); 717 718 n += wr->npkt; 719 nm_txq->cidx += npkt_to_ndesc(wr->npkt); 720 721 /* 722 * We never sent a WR that wrapped around so the credits coming 723 * back, WR by WR, should never cause the cidx to wrap around 724 * either. 725 */ 726 MPASS(nm_txq->cidx <= nm_txq->sidx); 727 if (__predict_false(nm_txq->cidx == nm_txq->sidx)) 728 nm_txq->cidx = 0; 729 } 730 731 return (n); 732 } 733 734 static int 735 cxgbe_netmap_txsync(struct netmap_kring *kring, int flags) 736 { 737 struct netmap_adapter *na = kring->na; 738 struct ifnet *ifp = na->ifp; 739 struct vi_info *vi = ifp->if_softc; 740 struct adapter *sc = vi->pi->adapter; 741 struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id]; 742 const u_int head = kring->rhead; 743 u_int reclaimed = 0; 744 int n, d, npkt_remaining, ndesc_remaining, txcsum; 745 746 /* 747 * Tx was at kring->nr_hwcur last time around and now we need to advance 748 * to kring->rhead. Note that the driver's pidx moves independent of 749 * netmap's kring->nr_hwcur (pidx counts descriptors and the relation 750 * between descriptors and frames isn't 1:1). 751 */ 752 753 npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 754 kring->nkr_num_slots - kring->nr_hwcur + head; 755 txcsum = ifp->if_capenable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6); 756 while (npkt_remaining) { 757 reclaimed += reclaim_nm_tx_desc(nm_txq); 758 ndesc_remaining = contiguous_ndesc_available(nm_txq); 759 /* Can't run out of descriptors with packets still remaining */ 760 MPASS(ndesc_remaining > 0); 761 762 /* # of desc needed to tx all remaining packets */ 763 d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC; 764 if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR) 765 d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR); 766 767 if (d <= ndesc_remaining) 768 n = npkt_remaining; 769 else { 770 /* Can't send all, calculate how many can be sent */ 771 n = (ndesc_remaining / SGE_MAX_WR_NDESC) * 772 MAX_NPKT_IN_TYPE1_WR; 773 if (ndesc_remaining % SGE_MAX_WR_NDESC) 774 n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC); 775 } 776 777 /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */ 778 npkt_remaining -= n; 779 cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining, txcsum); 780 } 781 MPASS(npkt_remaining == 0); 782 MPASS(kring->nr_hwcur == head); 783 MPASS(nm_txq->dbidx == nm_txq->pidx); 784 785 /* 786 * Second part: reclaim buffers for completed transmissions. 787 */ 788 if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) { 789 reclaimed += reclaim_nm_tx_desc(nm_txq); 790 kring->nr_hwtail += reclaimed; 791 if (kring->nr_hwtail >= kring->nkr_num_slots) 792 kring->nr_hwtail -= kring->nkr_num_slots; 793 } 794 795 return (0); 796 } 797 798 static int 799 cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags) 800 { 801 struct netmap_adapter *na = kring->na; 802 struct netmap_ring *ring = kring->ring; 803 struct ifnet *ifp = na->ifp; 804 struct vi_info *vi = ifp->if_softc; 805 struct adapter *sc = vi->pi->adapter; 806 struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq + kring->ring_id]; 807 u_int const head = kring->rhead; 808 u_int n; 809 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 810 811 if (black_hole) 812 return (0); /* No updates ever. */ 813 814 if (netmap_no_pendintr || force_update) { 815 kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx); 816 kring->nr_kflags &= ~NKR_PENDINTR; 817 } 818 819 if (nm_rxq->fl_db_saved > 0 && starve_fl == 0) { 820 wmb(); 821 t4_write_reg(sc, sc->sge_kdoorbell_reg, 822 nm_rxq->fl_db_val | V_PIDX(nm_rxq->fl_db_saved)); 823 nm_rxq->fl_db_saved = 0; 824 } 825 826 /* Userspace done with buffers from kring->nr_hwcur to head */ 827 n = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 828 kring->nkr_num_slots - kring->nr_hwcur + head; 829 n &= ~7U; 830 if (n > 0) { 831 u_int fl_pidx = nm_rxq->fl_pidx; 832 struct netmap_slot *slot = &ring->slot[fl_pidx]; 833 uint64_t ba; 834 int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx; 835 836 /* 837 * We always deal with 8 buffers at a time. We must have 838 * stopped at an 8B boundary (fl_pidx) last time around and we 839 * must have a multiple of 8B buffers to give to the freelist. 840 */ 841 MPASS((fl_pidx & 7) == 0); 842 MPASS((n & 7) == 0); 843 844 IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots); 845 IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx); 846 847 while (n > 0) { 848 for (i = 0; i < 8; i++, fl_pidx++, slot++) { 849 PNMB(na, slot, &ba); 850 MPASS(ba != 0); 851 nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx); 852 slot->flags &= ~NS_BUF_CHANGED; 853 MPASS(fl_pidx <= nm_rxq->fl_sidx); 854 } 855 n -= 8; 856 if (fl_pidx == nm_rxq->fl_sidx) { 857 fl_pidx = 0; 858 slot = &ring->slot[0]; 859 } 860 if (++dbinc == 8 && n >= 32) { 861 wmb(); 862 if (starve_fl) 863 nm_rxq->fl_db_saved += dbinc; 864 else { 865 t4_write_reg(sc, sc->sge_kdoorbell_reg, 866 nm_rxq->fl_db_val | V_PIDX(dbinc)); 867 } 868 dbinc = 0; 869 } 870 } 871 MPASS(nm_rxq->fl_pidx == fl_pidx); 872 873 if (dbinc > 0) { 874 wmb(); 875 if (starve_fl) 876 nm_rxq->fl_db_saved += dbinc; 877 else { 878 t4_write_reg(sc, sc->sge_kdoorbell_reg, 879 nm_rxq->fl_db_val | V_PIDX(dbinc)); 880 } 881 } 882 } 883 884 return (0); 885 } 886 887 void 888 cxgbe_nm_attach(struct vi_info *vi) 889 { 890 struct port_info *pi; 891 struct adapter *sc; 892 struct netmap_adapter na; 893 894 MPASS(vi->nnmrxq > 0); 895 MPASS(vi->ifp != NULL); 896 897 pi = vi->pi; 898 sc = pi->adapter; 899 900 bzero(&na, sizeof(na)); 901 902 na.ifp = vi->ifp; 903 na.na_flags = NAF_BDG_MAYSLEEP; 904 905 /* Netmap doesn't know about the space reserved for the status page. */ 906 na.num_tx_desc = vi->qsize_txq - sc->params.sge.spg_len / EQ_ESIZE; 907 908 /* 909 * The freelist's cidx/pidx drives netmap's rx cidx/pidx. So 910 * num_rx_desc is based on the number of buffers that can be held in the 911 * freelist, and not the number of entries in the iq. (These two are 912 * not exactly the same due to the space taken up by the status page). 913 */ 914 na.num_rx_desc = rounddown(vi->qsize_rxq, 8); 915 na.nm_txsync = cxgbe_netmap_txsync; 916 na.nm_rxsync = cxgbe_netmap_rxsync; 917 na.nm_register = cxgbe_netmap_reg; 918 na.num_tx_rings = vi->nnmtxq; 919 na.num_rx_rings = vi->nnmrxq; 920 netmap_attach(&na); 921 } 922 923 void 924 cxgbe_nm_detach(struct vi_info *vi) 925 { 926 927 MPASS(vi->nnmrxq > 0); 928 MPASS(vi->ifp != NULL); 929 930 netmap_detach(vi->ifp); 931 } 932 933 static inline const void * 934 unwrap_nm_fw6_msg(const struct cpl_fw6_msg *cpl) 935 { 936 937 MPASS(cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL); 938 939 /* data[0] is RSS header */ 940 return (&cpl->data[1]); 941 } 942 943 static void 944 handle_nm_sge_egr_update(struct adapter *sc, struct ifnet *ifp, 945 const struct cpl_sge_egr_update *egr) 946 { 947 uint32_t oq; 948 struct sge_nm_txq *nm_txq; 949 950 oq = be32toh(egr->opcode_qid); 951 MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE); 952 nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start]; 953 954 netmap_tx_irq(ifp, nm_txq->nid); 955 } 956 957 void 958 t4_nm_intr(void *arg) 959 { 960 struct sge_nm_rxq *nm_rxq = arg; 961 struct vi_info *vi = nm_rxq->vi; 962 struct adapter *sc = vi->pi->adapter; 963 struct ifnet *ifp = vi->ifp; 964 struct netmap_adapter *na = NA(ifp); 965 struct netmap_kring *kring = na->rx_rings[nm_rxq->nid]; 966 struct netmap_ring *ring = kring->ring; 967 struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx]; 968 const void *cpl; 969 uint32_t lq; 970 u_int work = 0; 971 uint8_t opcode; 972 uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx); 973 u_int fl_credits = fl_cidx & 7; 974 u_int ndesc = 0; /* desc processed since last cidx update */ 975 u_int nframes = 0; /* frames processed since last netmap wakeup */ 976 977 while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) { 978 979 rmb(); 980 981 lq = be32toh(d->rsp.pldbuflen_qid); 982 opcode = d->rss.opcode; 983 cpl = &d->cpl[0]; 984 985 switch (G_RSPD_TYPE(d->rsp.u.type_gen)) { 986 case X_RSPD_TYPE_FLBUF: 987 988 /* fall through */ 989 990 case X_RSPD_TYPE_CPL: 991 MPASS(opcode < NUM_CPL_CMDS); 992 993 switch (opcode) { 994 case CPL_FW4_MSG: 995 case CPL_FW6_MSG: 996 cpl = unwrap_nm_fw6_msg(cpl); 997 /* fall through */ 998 case CPL_SGE_EGR_UPDATE: 999 handle_nm_sge_egr_update(sc, ifp, cpl); 1000 break; 1001 case CPL_RX_PKT: 1002 ring->slot[fl_cidx].len = G_RSPD_LEN(lq) - 1003 sc->params.sge.fl_pktshift; 1004 ring->slot[fl_cidx].flags = 0; 1005 nframes++; 1006 if (!(lq & F_RSPD_NEWBUF)) { 1007 MPASS(black_hole == 2); 1008 break; 1009 } 1010 fl_credits++; 1011 if (__predict_false(++fl_cidx == nm_rxq->fl_sidx)) 1012 fl_cidx = 0; 1013 break; 1014 default: 1015 panic("%s: unexpected opcode 0x%x on nm_rxq %p", 1016 __func__, opcode, nm_rxq); 1017 } 1018 break; 1019 1020 case X_RSPD_TYPE_INTR: 1021 /* Not equipped to handle forwarded interrupts. */ 1022 panic("%s: netmap queue received interrupt for iq %u\n", 1023 __func__, lq); 1024 1025 default: 1026 panic("%s: illegal response type %d on nm_rxq %p", 1027 __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq); 1028 } 1029 1030 d++; 1031 if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) { 1032 nm_rxq->iq_cidx = 0; 1033 d = &nm_rxq->iq_desc[0]; 1034 nm_rxq->iq_gen ^= F_RSPD_GEN; 1035 } 1036 1037 if (__predict_false(++nframes == rx_nframes) && !black_hole) { 1038 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 1039 netmap_rx_irq(ifp, nm_rxq->nid, &work); 1040 nframes = 0; 1041 } 1042 1043 if (__predict_false(++ndesc == rx_ndesc)) { 1044 if (black_hole && fl_credits >= 8) { 1045 fl_credits /= 8; 1046 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, 1047 nm_rxq->fl_sidx); 1048 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1049 nm_rxq->fl_db_val | V_PIDX(fl_credits)); 1050 fl_credits = fl_cidx & 7; 1051 } 1052 t4_write_reg(sc, sc->sge_gts_reg, 1053 V_CIDXINC(ndesc) | 1054 V_INGRESSQID(nm_rxq->iq_cntxt_id) | 1055 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1056 ndesc = 0; 1057 } 1058 } 1059 1060 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 1061 if (black_hole) { 1062 fl_credits /= 8; 1063 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx); 1064 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1065 nm_rxq->fl_db_val | V_PIDX(fl_credits)); 1066 } else if (nframes > 0) 1067 netmap_rx_irq(ifp, nm_rxq->nid, &work); 1068 1069 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndesc) | 1070 V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) | 1071 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 1072 } 1073 #endif 1074