Lines Matching +full:p0 +full:- +full:retry +full:- +full:params

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
308 * Each tunable is set to a default value here if it's known at compile-time.
309 * Otherwise it is set to -n as an indication to tweak_tunables() that it should
326 int t4_ntxq = -NTXQ;
332 int t4_nrxq = -NRXQ;
338 static int t4_ntxq_vi = -NTXQ_VI;
343 static int t4_nrxq_vi = -NRXQ_VI;
349 0, "Reserve TX queue 0 of each VI for non-flowid packets");
353 static int t4_nofldtxq = -NOFLDTXQ;
358 static int t4_nofldtxq_vi = -NOFLDTXQ_VI;
365 static int t4_nofldrxq = -NOFLDRXQ;
370 static int t4_nofldrxq_vi = -NOFLDRXQ_VI;
379 #define PKTC_IDX_OFLD (-1)
384 /* 0 means chip/fw default, non-zero number is value in microseconds */
389 /* 0 means chip/fw default, non-zero number is value in microseconds */
394 /* 0 means chip/fw default, non-zero number is # of keepalives before abort */
399 /* 0 means chip/fw default, non-zero number is value in microseconds */
404 /* 0 means chip/fw default, non-zero number is value in microseconds */
409 /* 0 means chip/fw default, non-zero number is # of rexmt before abort */
414 /* -1 means chip/fw default, other values are raw backoff values to use */
416 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
472 static int t4_nnmtxq = -NNMTXQ;
477 static int t4_nnmrxq = -NNMRXQ;
482 static int t4_nnmtxq_vi = -NNMTXQ_VI;
487 static int t4_nnmrxq_vi = -NNMRXQ_VI;
501 #define PKTC_IDX (-1)
519 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
523 0, "Interrupt types allowed (bit 0 = INTx, 1 = MSI, 2 = MSI-X)");
529 #define BUILTIN_CF "built-in"
553 * -1 to run with the firmware default. Same as FEC_AUTO (bit 5)
556 static int t4_fec = -1;
561 t4_fec_bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD1\5RSVD2\6auto\7module";
567 * -1 to set FORCE_FEC iff requested_fec != AUTO. Multiple FEC bits are okay.
574 static int t4_force_fec = -1;
580 * -1 to run with the firmware default.
584 static int t4_autoneg = -1;
589 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
590 * encouraged respectively). '-n' is the same as 'n' except the firmware
595 "Firmware auto-install (0 = prohibited, 1 = allowed, 2 = encouraged)");
614 static int t4_nvmecaps_allowed = -1;
628 static int t4_toecaps_allowed = -1;
632 static int t4_rdmacaps_allowed = -1;
636 static int t4_cryptocaps_allowed = -1;
640 static int t4_iscsicaps_allowed = -1;
663 * -1: driver should figure out a good value.
668 static int pcie_relaxed_ordering = -1;
694 * Set to non-zero to enable the attack filter. A packet that matches any of
767 &t4_tls_short_records, 0, "Use cipher-only mode for short records.");
771 &t4_tls_partial_ghash, 0, "Use partial GHASH for AES-GCM records.");
786 uint16_t intr_type; /* INTx, MSI, or MSI-X */
957 {0x4400, "Chelsio T440-dbg"},
958 {0x4401, "Chelsio T420-CR"},
959 {0x4402, "Chelsio T422-CR"},
960 {0x4403, "Chelsio T440-CR"},
961 {0x4404, "Chelsio T420-BCH"},
962 {0x4405, "Chelsio T440-BCH"},
963 {0x4406, "Chelsio T440-CH"},
964 {0x4407, "Chelsio T420-SO"},
965 {0x4408, "Chelsio T420-CX"},
966 {0x4409, "Chelsio T420-BT"},
967 {0x440a, "Chelsio T404-BT"},
968 {0x440e, "Chelsio T440-LP-CR"},
971 {0x5400, "Chelsio T580-dbg"},
972 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */
973 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */
974 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */
975 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */
976 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */
977 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */
978 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */
979 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */
980 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */
981 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */
982 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */
983 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */
984 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */
985 {0x5418, "Chelsio T540-BT"}, /* 4 x 10GBaseT */
986 {0x5419, "Chelsio T540-LP-BT"}, /* 4 x 10GBaseT */
987 {0x541a, "Chelsio T540-SO-BT"}, /* 4 x 10GBaseT, nomem */
988 {0x541b, "Chelsio T540-SO-CR"}, /* 4 x 10G, nomem */
991 {0x5483, "Custom T540-CR"},
992 {0x5484, "Custom T540-BT"},
995 {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */
996 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */
997 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */
998 {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */
999 {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */
1000 {0x6405, "Chelsio T6225-SO-OCP3"}, /* 2 x 10/25G, nomem */
1001 {0x6406, "Chelsio T6225-OCP3"}, /* 2 x 10/25G */
1002 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */
1003 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */
1004 {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */
1005 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */
1006 {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */
1007 {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */
1008 {0x6414, "Chelsio T62100-SO-OCP3"}, /* 2 x 40/50/100G, nomem */
1009 {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */
1012 {0x6480, "Custom T6225-CR"},
1013 {0x6481, "Custom T62100-CR"},
1014 {0x6482, "Custom T6225-CR"},
1015 {0x6483, "Custom T62100-CR"},
1016 {0x6484, "Custom T64100-CR"},
1017 {0x6485, "Custom T6240-SO"},
1018 {0x6486, "Custom T6225-SO-CR"},
1019 {0x6487, "Custom T6225-CR"},
1022 {0x7400, "Chelsio T72200-DBG"}, /* 2 x 200G, debug */
1029 {0x7407, "Chelsio T72200-FH"}, /* 2 x 40/100/200G, 2 mem */
1031 {0x7409, "Chelsio S7210-BT"}, /* 2 x 10GBASE-T, nomem */
1032 {0x740a, "Chelsio T7450-RC"}, /* 4 x 10/25/50G, 1 mem, RC */
1033 {0x740b, "Chelsio T72200-RC"}, /* 2 x 40/100/200G, 1 mem, RC */
1034 {0x740c, "Chelsio T72200-FH-RC"}, /* 2 x 40/100/200G, 2 mem, RC */
1035 {0x740d, "Chelsio S72200-OCP3"}, /* 2 x 40/100/200G OCP3 */
1036 {0x740e, "Chelsio S7450-OCP3"}, /* 4 x 1/20/25/50G OCP3 */
1037 {0x740f, "Chelsio S7410-BT-OCP3"}, /* 4 x 10GBASE-T OCP3 */
1038 {0x7410, "Chelsio S7210-BT-A"}, /* 2 x 10GBASE-T */
1216 device_printf(sc->dev, "chip id %d is not supported.\n", id); in t4_init_devnames()
1217 sc->names = NULL; in t4_init_devnames()
1218 } else if (id - CHELSIO_T4 < nitems(devnames)) in t4_init_devnames()
1219 sc->names = &devnames[id - CHELSIO_T4]; in t4_init_devnames()
1221 sc->names = &devnames[nitems(devnames) - 1]; in t4_init_devnames()
1232 parent = device_get_nameunit(sc->dev); in t4_ifnet_unit()
1233 name = sc->names->ifnet_name; in t4_ifnet_unit()
1236 value == pi->port_id) in t4_ifnet_unit()
1239 return (-1); in t4_ifnet_unit()
1257 cur = &sc->cal_info[sc->cal_current]; in t4_calibration()
1258 next_up = (sc->cal_current + 1) % CNT_CAL_INFO; in t4_calibration()
1259 nex = &sc->cal_info[next_up]; in t4_calibration()
1260 if (__predict_false(sc->cal_count == 0)) { in t4_calibration()
1262 cur->hw_cur = hw; in t4_calibration()
1263 cur->sbt_cur = sbt; in t4_calibration()
1264 sc->cal_count++; in t4_calibration()
1268 if (cur->hw_cur == hw) { in t4_calibration()
1270 sc->cal_count = 0; in t4_calibration()
1271 atomic_store_rel_int(&cur->gen, 0); in t4_calibration()
1275 seqc_write_begin(&nex->gen); in t4_calibration()
1276 nex->hw_prev = cur->hw_cur; in t4_calibration()
1277 nex->sbt_prev = cur->sbt_cur; in t4_calibration()
1278 nex->hw_cur = hw; in t4_calibration()
1279 nex->sbt_cur = sbt; in t4_calibration()
1280 seqc_write_end(&nex->gen); in t4_calibration()
1281 sc->cal_current = next_up; in t4_calibration()
1283 callout_reset_sbt_curcpu(&sc->cal_callout, SBT_1S, 0, t4_calibration, in t4_calibration()
1298 sc->cal_info[i].gen = 0; in t4_calibration_start()
1300 sc->cal_current = 0; in t4_calibration_start()
1301 sc->cal_count = 0; in t4_calibration_start()
1302 sc->cal_gen = 0; in t4_calibration_start()
1327 sc->dev = dev; in t4_attach()
1328 sysctl_ctx_init(&sc->ctx); in t4_attach()
1329 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags); in t4_attach()
1339 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); in t4_attach()
1351 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS); in t4_attach()
1352 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL); in t4_attach()
1353 sc->traceq = -1; in t4_attach()
1354 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); in t4_attach()
1355 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", in t4_attach()
1358 snprintf(sc->lockname, sizeof(sc->lockname), "%s", in t4_attach()
1360 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); in t4_attach()
1363 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); in t4_attach()
1364 TAILQ_INIT(&sc->sfl); in t4_attach()
1365 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0); in t4_attach()
1367 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF); in t4_attach()
1369 sc->policy = NULL; in t4_attach()
1370 rw_init(&sc->policy_lock, "connection offload policy"); in t4_attach()
1372 callout_init(&sc->ktls_tick, 1); in t4_attach()
1374 callout_init(&sc->cal_callout, 1); in t4_attach()
1376 refcount_init(&sc->vxlan_refcount, 0); in t4_attach()
1378 TASK_INIT(&sc->reset_task, 0, reset_adapter_task, sc); in t4_attach()
1379 TASK_INIT(&sc->fatal_error_task, 0, fatal_error_task, sc); in t4_attach()
1381 sc->ctrlq_oid = SYSCTL_ADD_NODE(&sc->ctx, in t4_attach()
1382 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "ctrlq", in t4_attach()
1384 sc->fwq_oid = SYSCTL_ADD_NODE(&sc->ctx, in t4_attach()
1385 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "fwq", in t4_attach()
1392 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); in t4_attach()
1393 memset(sc->port_map, 0xff, sizeof(sc->port_map)); in t4_attach()
1397 rc = -t4_prep_adapter(sc, buf); in t4_attach()
1411 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j); in t4_attach()
1412 sc->mbox = sc->pf; in t4_attach()
1415 if (sc->names == NULL) { in t4_attach()
1434 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev)); in t4_attach()
1457 MPASS(sc->flags & FW_OK); in t4_attach()
1463 if (sc->flags & MASTER_PF) { in t4_attach()
1490 * First pass over all the ports - allocate VIs and initialize some in t4_attach()
1497 sc->port[i] = pi; in t4_attach()
1500 pi->adapter = sc; in t4_attach()
1501 pi->port_id = i; in t4_attach()
1504 * pi->nvi's final value is known. in t4_attach()
1506 pi->vi = malloc(sizeof(struct vi_info) * t4_num_vis, M_CXGBE, in t4_attach()
1513 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); in t4_attach()
1517 free(pi->vi, M_CXGBE); in t4_attach()
1519 sc->port[i] = NULL; in t4_attach()
1523 if (is_bt(pi->port_type)) in t4_attach()
1524 setbit(&sc->bt_map, pi->hw_port); in t4_attach()
1526 MPASS(!isset(&sc->bt_map, pi->hw_port)); in t4_attach()
1528 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", in t4_attach()
1530 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); in t4_attach()
1531 for (j = 0; j < sc->params.tp.lb_nchan; j++) in t4_attach()
1532 sc->chan_map[pi->tx_chan + j] = i; in t4_attach()
1533 sc->port_map[pi->hw_port] = i; in t4_attach()
1542 pi->fcs_reg = -1; in t4_attach()
1544 pi->fcs_reg = A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L; in t4_attach()
1545 pi->fcs_base = 0; in t4_attach()
1548 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change, in t4_attach()
1556 pi->flags |= FIXED_IFMEDIA; in t4_attach()
1559 pi->dev = device_add_child(dev, sc->names->ifnet_name, in t4_attach()
1561 if (pi->dev == NULL) { in t4_attach()
1567 pi->vi[0].dev = pi->dev; in t4_attach()
1568 device_set_softc(pi->dev, pi); in t4_attach()
1574 nports = sc->params.nports; in t4_attach()
1580 sc->intr_type = iaq.intr_type; in t4_attach()
1581 sc->intr_count = iaq.nirq; in t4_attach()
1583 s = &sc->sge; in t4_attach()
1584 s->nctrlq = max(sc->params.nports, sc->params.ncores); in t4_attach()
1585 s->nrxq = nports * iaq.nrxq; in t4_attach()
1586 s->ntxq = nports * iaq.ntxq; in t4_attach()
1588 s->nrxq += nports * (num_vis - 1) * iaq.nrxq_vi; in t4_attach()
1589 s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi; in t4_attach()
1591 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ in t4_attach()
1592 s->neq += nports; /* ctrl queues: 1 per port */ in t4_attach()
1593 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ in t4_attach()
1596 s->nofldtxq = nports * iaq.nofldtxq; in t4_attach()
1598 s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi; in t4_attach()
1599 s->neq += s->nofldtxq; in t4_attach()
1601 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_ofld_txq), in t4_attach()
1607 s->nofldrxq = nports * iaq.nofldrxq; in t4_attach()
1609 s->nofldrxq += nports * (num_vis - 1) * iaq.nofldrxq_vi; in t4_attach()
1610 s->neq += s->nofldrxq; /* free list */ in t4_attach()
1611 s->niq += s->nofldrxq; in t4_attach()
1613 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), in t4_attach()
1618 s->nnmrxq = 0; in t4_attach()
1619 s->nnmtxq = 0; in t4_attach()
1621 s->nnmrxq += nports * iaq.nnmrxq; in t4_attach()
1622 s->nnmtxq += nports * iaq.nnmtxq; in t4_attach()
1625 s->nnmrxq += nports * (num_vis - 1) * iaq.nnmrxq_vi; in t4_attach()
1626 s->nnmtxq += nports * (num_vis - 1) * iaq.nnmtxq_vi; in t4_attach()
1628 s->neq += s->nnmtxq + s->nnmrxq; in t4_attach()
1629 s->niq += s->nnmrxq; in t4_attach()
1631 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), in t4_attach()
1633 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), in t4_attach()
1636 MPASS(s->niq <= s->iqmap_sz); in t4_attach()
1637 MPASS(s->neq <= s->eqmap_sz); in t4_attach()
1639 s->ctrlq = malloc(s->nctrlq * sizeof(struct sge_wrq), M_CXGBE, in t4_attach()
1641 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, in t4_attach()
1643 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, in t4_attach()
1645 s->iqmap = malloc(s->iqmap_sz * sizeof(struct sge_iq *), M_CXGBE, in t4_attach()
1647 s->eqmap = malloc(s->eqmap_sz * sizeof(struct sge_eq *), M_CXGBE, in t4_attach()
1650 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, in t4_attach()
1663 if (sc->vres.key.size != 0) in t4_attach()
1664 sc->key_map = vmem_create("T4TLS key map", sc->vres.key.start, in t4_attach()
1665 sc->vres.key.size, 32, 0, M_FIRSTFIT | M_WAITOK); in t4_attach()
1683 struct port_info *pi = sc->port[i]; in t4_attach()
1689 pi->nvi = num_vis; in t4_attach()
1691 vi->pi = pi; in t4_attach()
1692 vi->adapter = sc; in t4_attach()
1693 vi->first_intr = -1; in t4_attach()
1694 vi->qsize_rxq = t4_qsize_rxq; in t4_attach()
1695 vi->qsize_txq = t4_qsize_txq; in t4_attach()
1697 vi->first_rxq = rqidx; in t4_attach()
1698 vi->first_txq = tqidx; in t4_attach()
1699 vi->tmr_idx = t4_tmr_idx; in t4_attach()
1700 vi->pktc_idx = t4_pktc_idx; in t4_attach()
1701 vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi; in t4_attach()
1702 vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi; in t4_attach()
1704 rqidx += vi->nrxq; in t4_attach()
1705 tqidx += vi->ntxq; in t4_attach()
1707 if (j == 0 && vi->ntxq > 1) in t4_attach()
1708 vi->rsrv_noflowq = t4_rsrv_noflowq ? 1 : 0; in t4_attach()
1710 vi->rsrv_noflowq = 0; in t4_attach()
1713 vi->first_ofld_txq = ofld_tqidx; in t4_attach()
1714 vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi; in t4_attach()
1715 ofld_tqidx += vi->nofldtxq; in t4_attach()
1718 vi->ofld_tmr_idx = t4_tmr_idx_ofld; in t4_attach()
1719 vi->ofld_pktc_idx = t4_pktc_idx_ofld; in t4_attach()
1720 vi->first_ofld_rxq = ofld_rqidx; in t4_attach()
1721 vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi; in t4_attach()
1723 ofld_rqidx += vi->nofldrxq; in t4_attach()
1726 vi->first_nm_rxq = nm_rqidx; in t4_attach()
1727 vi->first_nm_txq = nm_tqidx; in t4_attach()
1729 vi->nnmrxq = iaq.nnmrxq; in t4_attach()
1730 vi->nnmtxq = iaq.nnmtxq; in t4_attach()
1732 vi->nnmrxq = iaq.nnmrxq_vi; in t4_attach()
1733 vi->nnmtxq = iaq.nnmtxq_vi; in t4_attach()
1735 nm_rqidx += vi->nnmrxq; in t4_attach()
1736 nm_tqidx += vi->nnmtxq; in t4_attach()
1751 * Ensure thread-safe mailbox access (in debug builds). in t4_attach()
1757 sc->flags |= CHK_MBOX_ACCESS; in t4_attach()
1764 sc->params.pci.speed, sc->params.pci.width, sc->params.nports, in t4_attach()
1765 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : in t4_attach()
1766 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), in t4_attach()
1767 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); in t4_attach()
1774 if (rc != 0 && sc->cdev) { in t4_attach()
1798 pi = sc->port[i]; in t4_child_location()
1799 if (pi != NULL && pi->dev == dev) { in t4_child_location()
1800 sbuf_printf(sb, "port=%d", pi->port_id); in t4_child_location()
1813 if (sc->flags & FW_OK) in t4_ready()
1827 pi = sc->port[port]; in t4_read_port_device()
1828 if (pi == NULL || pi->dev == NULL) in t4_read_port_device()
1830 *child = pi->dev; in t4_read_port_device()
1894 if (sc->cdev) { in t4_detach_common()
1895 destroy_dev(sc->cdev); in t4_detach_common()
1896 sc->cdev = NULL; in t4_detach_common()
1903 sc->flags &= ~CHK_MBOX_ACCESS; in t4_detach_common()
1904 if (sc->flags & FULL_INIT_DONE) { in t4_detach_common()
1905 if (!(sc->flags & IS_VF)) in t4_detach_common()
1918 for (i = 0; i < sc->intr_count; i++) in t4_detach_common()
1919 t4_free_irq(sc, &sc->irq[i]); in t4_detach_common()
1921 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) in t4_detach_common()
1925 pi = sc->port[i]; in t4_detach_common()
1927 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid); in t4_detach_common()
1929 mtx_destroy(&pi->pi_lock); in t4_detach_common()
1930 free(pi->vi, M_CXGBE); in t4_detach_common()
1934 callout_stop(&sc->cal_callout); in t4_detach_common()
1935 callout_drain(&sc->cal_callout); in t4_detach_common()
1937 sysctl_ctx_free(&sc->ctx); in t4_detach_common()
1940 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) in t4_detach_common()
1941 t4_fw_bye(sc, sc->mbox); in t4_detach_common()
1943 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) in t4_detach_common()
1946 if (sc->regs_res) in t4_detach_common()
1947 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, in t4_detach_common()
1948 sc->regs_res); in t4_detach_common()
1950 if (sc->udbs_res) in t4_detach_common()
1951 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, in t4_detach_common()
1952 sc->udbs_res); in t4_detach_common()
1954 if (sc->msix_res) in t4_detach_common()
1955 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, in t4_detach_common()
1956 sc->msix_res); in t4_detach_common()
1958 if (sc->l2t) in t4_detach_common()
1960 if (sc->smt) in t4_detach_common()
1961 t4_free_smt(sc->smt); in t4_detach_common()
1966 if (sc->key_map) in t4_detach_common()
1967 vmem_destroy(sc->key_map); in t4_detach_common()
1974 free(sc->sge.ofld_txq, M_CXGBE); in t4_detach_common()
1977 free(sc->sge.ofld_rxq, M_CXGBE); in t4_detach_common()
1980 free(sc->sge.nm_rxq, M_CXGBE); in t4_detach_common()
1981 free(sc->sge.nm_txq, M_CXGBE); in t4_detach_common()
1983 free(sc->irq, M_CXGBE); in t4_detach_common()
1984 free(sc->sge.rxq, M_CXGBE); in t4_detach_common()
1985 free(sc->sge.txq, M_CXGBE); in t4_detach_common()
1986 free(sc->sge.ctrlq, M_CXGBE); in t4_detach_common()
1987 free(sc->sge.iqmap, M_CXGBE); in t4_detach_common()
1988 free(sc->sge.eqmap, M_CXGBE); in t4_detach_common()
1989 free(sc->tids.ftid_tab, M_CXGBE); in t4_detach_common()
1990 free(sc->tids.hpftid_tab, M_CXGBE); in t4_detach_common()
1991 free_hftid_hash(&sc->tids); in t4_detach_common()
1992 free(sc->tids.tid_tab, M_CXGBE); in t4_detach_common()
1995 callout_drain(&sc->ktls_tick); in t4_detach_common()
1996 callout_drain(&sc->sfl_callout); in t4_detach_common()
1997 if (mtx_initialized(&sc->tids.ftid_lock)) { in t4_detach_common()
1998 mtx_destroy(&sc->tids.ftid_lock); in t4_detach_common()
1999 cv_destroy(&sc->tids.ftid_cv); in t4_detach_common()
2001 if (mtx_initialized(&sc->tids.atid_lock)) in t4_detach_common()
2002 mtx_destroy(&sc->tids.atid_lock); in t4_detach_common()
2003 if (mtx_initialized(&sc->ifp_lock)) in t4_detach_common()
2004 mtx_destroy(&sc->ifp_lock); in t4_detach_common()
2006 if (rw_initialized(&sc->policy_lock)) { in t4_detach_common()
2007 rw_destroy(&sc->policy_lock); in t4_detach_common()
2009 if (sc->policy != NULL) in t4_detach_common()
2010 free_offload_policy(sc->policy); in t4_detach_common()
2015 struct memwin *mw = &sc->memwin[i]; in t4_detach_common()
2017 if (rw_initialized(&mw->mw_lock)) in t4_detach_common()
2018 rw_destroy(&mw->mw_lock); in t4_detach_common()
2021 mtx_destroy(&sc->sfl_lock); in t4_detach_common()
2022 mtx_destroy(&sc->reg_lock); in t4_detach_common()
2023 mtx_destroy(&sc->sc_lock); in t4_detach_common()
2036 if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_STOPPED))) { in stop_adapter()
2038 __func__, curthread, sc->flags, sc->error_flags); in stop_adapter()
2042 sc->flags, sc->error_flags); in stop_adapter()
2045 pi = sc->port[i]; in stop_adapter()
2049 if (pi->up_vis > 0 && pi->link_cfg.link_ok) { in stop_adapter()
2056 pi->link_cfg.link_ok = false; in stop_adapter()
2070 if (!atomic_testandclear_int(&sc->error_flags, ilog2(ADAP_STOPPED))) { in restart_adapter()
2072 __func__, curthread, sc->flags, sc->error_flags); in restart_adapter()
2076 sc->flags, sc->error_flags); in restart_adapter()
2079 MPASS((sc->flags & FW_OK) == 0); in restart_adapter()
2080 MPASS((sc->flags & MASTER_PF) == 0); in restart_adapter()
2081 MPASS(sc->reset_thread == NULL); in restart_adapter()
2088 sc->reset_thread = curthread; in restart_adapter()
2092 sc->reset_thread = NULL; in restart_adapter()
2093 atomic_set_int(&sc->error_flags, ADAP_STOPPED); in restart_adapter()
2096 atomic_clear_int(&sc->error_flags, ADAP_FATAL_ERR); in restart_adapter()
2097 atomic_add_int(&sc->incarnation, 1); in restart_adapter()
2098 atomic_add_int(&sc->num_resets, 1); in restart_adapter()
2109 MPASS(sc->reset_thread == curthread); in set_adapter_hwstatus()
2110 mtx_lock(&sc->reg_lock); in set_adapter_hwstatus()
2111 atomic_clear_int(&sc->error_flags, HW_OFF_LIMITS); in set_adapter_hwstatus()
2112 mtx_unlock(&sc->reg_lock); in set_adapter_hwstatus()
2116 mtx_lock(&sc->reg_lock); in set_adapter_hwstatus()
2117 atomic_set_int(&sc->error_flags, HW_OFF_LIMITS); in set_adapter_hwstatus()
2118 mtx_unlock(&sc->reg_lock); in set_adapter_hwstatus()
2119 sc->flags &= ~(FW_OK | MASTER_PF); in set_adapter_hwstatus()
2120 sc->reset_thread = NULL; in set_adapter_hwstatus()
2158 pi = sc->port[i]; in stop_lld()
2161 pi->vxlan_tcam_entry = false; in stop_lld()
2163 vi->xact_addr_filt = -1; in stop_lld()
2164 mtx_lock(&vi->tick_mtx); in stop_lld()
2165 vi->flags |= VI_SKIP_STATS; in stop_lld()
2166 mtx_unlock(&vi->tick_mtx); in stop_lld()
2167 if (!(vi->flags & VI_INIT_DONE)) in stop_lld()
2170 ifp = vi->ifp; in stop_lld()
2172 mtx_lock(&vi->tick_mtx); in stop_lld()
2173 callout_stop(&vi->tick); in stop_lld()
2174 mtx_unlock(&vi->tick_mtx); in stop_lld()
2175 callout_drain(&vi->tick); in stop_lld()
2183 txq->eq.flags &= ~(EQ_ENABLED | EQ_HW_ALLOCATED); in stop_lld()
2188 TXQ_LOCK(&ofld_txq->wrq); in stop_lld()
2189 ofld_txq->wrq.eq.flags &= ~EQ_HW_ALLOCATED; in stop_lld()
2190 TXQ_UNLOCK(&ofld_txq->wrq); in stop_lld()
2194 rxq->iq.flags &= ~IQ_HW_ALLOCATED; in stop_lld()
2198 ofld_rxq->iq.flags &= ~IQ_HW_ALLOCATED; in stop_lld()
2205 if (sc->flags & FULL_INIT_DONE) { in stop_lld()
2207 wrq = &sc->sge.ctrlq[i]; in stop_lld()
2209 wrq->eq.flags &= ~EQ_HW_ALLOCATED; in stop_lld()
2214 if (pi->flags & HAS_TRACEQ) { in stop_lld()
2215 pi->flags &= ~HAS_TRACEQ; in stop_lld()
2216 sc->traceq = -1; in stop_lld()
2217 sc->tracer_valid = 0; in stop_lld()
2218 sc->tracer_enabled = 0; in stop_lld()
2221 if (sc->flags & FULL_INIT_DONE) { in stop_lld()
2223 sc->sge.fwq.flags &= ~IQ_HW_ALLOCATED; in stop_lld()
2224 quiesce_iq_fl(sc, &sc->sge.fwq, NULL); in stop_lld()
2228 callout_stop(&sc->cal_callout); in stop_lld()
2229 callout_drain(&sc->cal_callout); in stop_lld()
2287 struct adapter_params params; member
2303 o->flags = sc->flags; in save_caps_and_params()
2305 o->nbmcaps = sc->nbmcaps; in save_caps_and_params()
2306 o->linkcaps = sc->linkcaps; in save_caps_and_params()
2307 o->switchcaps = sc->switchcaps; in save_caps_and_params()
2308 o->nvmecaps = sc->nvmecaps; in save_caps_and_params()
2309 o->niccaps = sc->niccaps; in save_caps_and_params()
2310 o->toecaps = sc->toecaps; in save_caps_and_params()
2311 o->rdmacaps = sc->rdmacaps; in save_caps_and_params()
2312 o->cryptocaps = sc->cryptocaps; in save_caps_and_params()
2313 o->iscsicaps = sc->iscsicaps; in save_caps_and_params()
2314 o->fcoecaps = sc->fcoecaps; in save_caps_and_params()
2316 o->cfcsum = sc->cfcsum; in save_caps_and_params()
2317 MPASS(sizeof(o->cfg_file) == sizeof(sc->cfg_file)); in save_caps_and_params()
2318 memcpy(o->cfg_file, sc->cfg_file, sizeof(o->cfg_file)); in save_caps_and_params()
2320 o->params = sc->params; in save_caps_and_params()
2321 o->vres = sc->vres; in save_caps_and_params()
2322 o->tids = sc->tids; in save_caps_and_params()
2323 o->sge = sc->sge; in save_caps_and_params()
2325 o->rawf_base = sc->rawf_base; in save_caps_and_params()
2326 o->nrawf = sc->nrawf; in save_caps_and_params()
2338 if (o->c##caps != sc->c##caps) { \ in compare_caps_and_params()
2339 CH_ERR(sc, "%scaps 0x%04x -> 0x%04x.\n", #c, o->c##caps, \ in compare_caps_and_params()
2340 sc->c##caps); \ in compare_caps_and_params()
2357 if (o->cfcsum != sc->cfcsum) { in compare_caps_and_params()
2358 CH_ERR(sc, "config file %s (0x%x) -> %s (0x%x)\n", o->cfg_file, in compare_caps_and_params()
2359 o->cfcsum, sc->cfg_file, sc->cfcsum); in compare_caps_and_params()
2364 if (o->p != sc->p) { \ in compare_caps_and_params()
2365 CH_ERR(sc, #name " %d -> %d\n", o->p, sc->p); \ in compare_caps_and_params()
2384 COMPARE_PARAM(params.mps_bg_map, mps_bg_map); in compare_caps_and_params()
2385 COMPARE_PARAM(params.filter2_wr_support, filter2_wr_support); in compare_caps_and_params()
2386 COMPARE_PARAM(params.ulptx_memwrite_dsgl, ulptx_memwrite_dsgl); in compare_caps_and_params()
2387 COMPARE_PARAM(params.fr_nsmr_tpte_wr_support, fr_nsmr_tpte_wr_support); in compare_caps_and_params()
2388 COMPARE_PARAM(params.max_pkts_per_eth_tx_pkts_wr, max_pkts_per_eth_tx_pkts_wr); in compare_caps_and_params()
2393 COMPARE_PARAM(params.eo_wr_cred, eo_wr_cred); in compare_caps_and_params()
2394 COMPARE_PARAM(params.ethoffload, ethoffload); in compare_caps_and_params()
2399 COMPARE_PARAM(params.ofldq_wr_cred, ofldq_wr_cred); in compare_caps_and_params()
2414 COMPARE_PARAM(params.max_ordird_qp, max_ordird_qp); in compare_caps_and_params()
2415 COMPARE_PARAM(params.max_ird_adapter, max_ird_adapter); in compare_caps_and_params()
2457 MPASS(sc->flags & FW_OK); in restart_lld()
2459 if (sc->flags & MASTER_PF) { in restart_lld()
2478 pi = sc->port[i]; in restart_lld()
2480 MPASS(pi->vi != NULL); in restart_lld()
2481 MPASS(pi->vi[0].dev == pi->dev); in restart_lld()
2483 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); in restart_lld()
2486 "failed to re-initialize port %d: %d\n", i, rc); in restart_lld()
2489 MPASS(sc->chan_map[pi->tx_chan] == i); in restart_lld()
2501 "failed to re-allocate extra VI: %d\n", rc); in restart_lld()
2514 if (sc->flags & FULL_INIT_DONE) { in restart_lld()
2517 CH_ERR(sc, "failed to re-initialize adapter: %d\n", rc); in restart_lld()
2521 if (sc->vxlan_refcount > 0) in restart_lld()
2525 pi = sc->port[i]; in restart_lld()
2527 mtx_lock(&vi->tick_mtx); in restart_lld()
2528 vi->flags &= ~VI_SKIP_STATS; in restart_lld()
2529 mtx_unlock(&vi->tick_mtx); in restart_lld()
2530 if (!(vi->flags & VI_INIT_DONE)) in restart_lld()
2534 CH_ERR(vi, "failed to re-initialize " in restart_lld()
2538 if (sc->traceq < 0 && IS_MAIN_VI(vi)) { in restart_lld()
2539 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; in restart_lld()
2540 t4_set_trace_rss_control(sc, pi->tx_chan, sc->traceq); in restart_lld()
2541 pi->flags |= HAS_TRACEQ; in restart_lld()
2544 ifp = vi->ifp; in restart_lld()
2556 CH_ERR(vi, "failed to re-configure MAC: %d\n", rc); in restart_lld()
2559 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, in restart_lld()
2562 CH_ERR(vi, "failed to re-enable VI: %d\n", rc); in restart_lld()
2567 txq->eq.flags |= EQ_ENABLED; in restart_lld()
2570 mtx_lock(&vi->tick_mtx); in restart_lld()
2571 callout_schedule(&vi->tick, hz); in restart_lld()
2572 mtx_unlock(&vi->tick_mtx); in restart_lld()
2575 if (pi->up_vis > 0) { in restart_lld()
2580 if (pi->link_cfg.link_ok) in restart_lld()
2588 pi = sc->port[i]; in restart_lld()
2590 if (!(vi->flags & VI_INIT_DONE)) in restart_lld()
2592 ifp = vi->ifp; in restart_lld()
2597 CH_ERR(vi, "failed to re-configure MCAST MACs: %d\n", rc); in restart_lld()
2662 MPASS(sc->error_flags & HW_OFF_LIMITS); in reset_adapter_with_pl_rst()
2663 bus_space_write_4(sc->bt, sc->bh, A_PL_RST, in reset_adapter_with_pl_rst()
2672 device_t pdev = device_get_parent(sc->dev); in reset_adapter_with_pcie_sbr()
2718 device_t pdev = device_get_parent(sc->dev); in reset_adapter_with_pcie_link_bounce()
2793 const int flags = sc->flags; in reset_adapter_task()
2794 const int eflags = sc->error_flags; in reset_adapter_task()
2802 "flags 0x%08x -> 0x%08x, err_flags 0x%08x -> 0x%08x.\n", in reset_adapter_task()
2803 rc, flags, sc->flags, eflags, sc->error_flags); in reset_adapter_task()
2812 device_set_descf(dev, "port %d", pi->port_id); in cxgbe_probe()
2828 struct sysctl_ctx_list *ctx = &vi->ctx; in cxgbe_vi_attach()
2831 struct adapter *sc = vi->adapter; in cxgbe_vi_attach()
2834 children = SYSCTL_CHILDREN(device_get_sysctl_tree(vi->dev)); in cxgbe_vi_attach()
2835 vi->rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rxq", in cxgbe_vi_attach()
2837 vi->txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "txq", in cxgbe_vi_attach()
2840 vi->nm_rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "nm_rxq", in cxgbe_vi_attach()
2842 vi->nm_txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "nm_txq", in cxgbe_vi_attach()
2846 vi->ofld_rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "ofld_rxq", in cxgbe_vi_attach()
2850 vi->ofld_txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "ofld_txq", in cxgbe_vi_attach()
2854 vi->xact_addr_filt = -1; in cxgbe_vi_attach()
2855 mtx_init(&vi->tick_mtx, "vi tick", NULL, MTX_DEF); in cxgbe_vi_attach()
2856 callout_init_mtx(&vi->tick, &vi->tick_mtx, 0); in cxgbe_vi_attach()
2857 if (sc->flags & IS_VF || t4_tx_vm_wr != 0) in cxgbe_vi_attach()
2858 vi->flags |= TX_USES_VM_WR; in cxgbe_vi_attach()
2862 vi->ifp = ifp; in cxgbe_vi_attach()
2872 if (vi->pi->nvi > 1 || sc->flags & IS_VF) in cxgbe_vi_attach()
2896 if (vi->nofldrxq != 0) in cxgbe_vi_attach()
2900 if (is_ethoffload(sc) && vi->nofldtxq != 0) { in cxgbe_vi_attach()
2907 if (vi->flags & TX_USES_VM_WR) in cxgbe_vi_attach()
2912 if (is_ethoffload(sc) && vi->nofldtxq != 0) in cxgbe_vi_attach()
2919 if (sc->flags & KERN_TLS_ON || !is_t6(sc)) in cxgbe_vi_attach()
2924 ether_ifattach(ifp, vi->hw_addr); in cxgbe_vi_attach()
2926 if (vi->nnmrxq != 0) in cxgbe_vi_attach()
2930 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq); in cxgbe_vi_attach()
2934 sbuf_printf(sb, "; %d txq (TOE)", vi->nofldtxq); in cxgbe_vi_attach()
2937 sbuf_printf(sb, "; %d txq (TOE/ETHOFLD)", vi->nofldtxq); in cxgbe_vi_attach()
2940 sbuf_printf(sb, "; %d txq (ETHOFLD)", vi->nofldtxq); in cxgbe_vi_attach()
2946 sbuf_printf(sb, ", %d rxq (TOE)", vi->nofldrxq); in cxgbe_vi_attach()
2951 vi->nnmtxq, vi->nnmrxq); in cxgbe_vi_attach()
2963 vi->pfil = pfil_head_register(&pa); in cxgbe_vi_attach()
2970 struct adapter *sc = pi->adapter; in cxgbe_attach()
2974 sysctl_ctx_init(&pi->ctx); in cxgbe_attach()
2976 cxgbe_vi_attach(dev, &pi->vi[0]); in cxgbe_attach()
2981 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, DEVICE_UNIT_ANY); in cxgbe_attach()
2982 if (vi->dev == NULL) { in cxgbe_attach()
2986 device_set_softc(vi->dev, vi); in cxgbe_attach()
2999 if_t ifp = vi->ifp; in cxgbe_vi_detach()
3001 if (vi->pfil != NULL) { in cxgbe_vi_detach()
3002 pfil_head_unregister(vi->pfil); in cxgbe_vi_detach()
3003 vi->pfil = NULL; in cxgbe_vi_detach()
3014 callout_drain(&vi->tick); in cxgbe_vi_detach()
3015 mtx_destroy(&vi->tick_mtx); in cxgbe_vi_detach()
3016 sysctl_ctx_free(&vi->ctx); in cxgbe_vi_detach()
3019 if_free(vi->ifp); in cxgbe_vi_detach()
3020 vi->ifp = NULL; in cxgbe_vi_detach()
3027 struct adapter *sc = pi->adapter; in cxgbe_detach()
3035 sysctl_ctx_free(&pi->ctx); in cxgbe_detach()
3036 begin_vi_detach(sc, &pi->vi[0]); in cxgbe_detach()
3037 if (pi->flags & HAS_TRACEQ) { in cxgbe_detach()
3038 sc->traceq = -1; /* cloner should not create ifnet */ in cxgbe_detach()
3041 cxgbe_vi_detach(&pi->vi[0]); in cxgbe_detach()
3042 ifmedia_removeall(&pi->media); in cxgbe_detach()
3043 end_vi_detach(sc, &pi->vi[0]); in cxgbe_detach()
3052 struct adapter *sc = vi->adapter; in cxgbe_init()
3065 struct port_info *pi = vi->pi; in cxgbe_ioctl()
3066 struct adapter *sc = pi->adapter; in cxgbe_ioctl()
3072 mtu = ifr->ifr_mtu; in cxgbe_ioctl()
3080 if (vi->flags & VI_INIT_DONE) { in cxgbe_ioctl()
3101 flags = vi->if_flags; in cxgbe_ioctl()
3110 vi->if_flags = if_getflags(ifp); in cxgbe_ioctl()
3132 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); in cxgbe_ioctl()
3142 "tso4 disabled due to -txcsum.\n"); in cxgbe_ioctl()
3154 "tso6 disabled due to -txcsum6.\n"); in cxgbe_ioctl()
3194 rxq->iq.flags |= IQ_LRO_ENABLED; in cxgbe_ioctl()
3196 rxq->iq.flags &= ~IQ_LRO_ENABLED; in cxgbe_ioctl()
3219 /* Need to find out how to disable auto-mtu-inflation */ in cxgbe_ioctl()
3236 rxq->iq.flags |= IQ_RX_TIMESTAMP; in cxgbe_ioctl()
3238 rxq->iq.flags &= ~IQ_RX_TIMESTAMP; in cxgbe_ioctl()
3277 rc = ifmedia_ioctl(ifp, ifr, &pi->media, cmd); in cxgbe_ioctl()
3300 rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr, in cxgbe_ioctl()
3319 struct port_info *pi = vi->pi; in cxgbe_transmit()
3326 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ in cxgbe_transmit()
3328 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) in cxgbe_transmit()
3329 MPASS(m->m_pkthdr.snd_tag->ifp == ifp); in cxgbe_transmit()
3332 if (__predict_false(pi->link_cfg.link_ok == false)) { in cxgbe_transmit()
3337 rc = parse_pkt(&m, vi->flags & TX_USES_VM_WR); in cxgbe_transmit()
3346 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ in cxgbe_transmit()
3351 sc = vi->adapter; in cxgbe_transmit()
3352 txq = &sc->sge.txq[vi->first_txq]; in cxgbe_transmit()
3354 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) + in cxgbe_transmit()
3355 vi->rsrv_noflowq); in cxgbe_transmit()
3358 rc = mp_ring_enqueue(txq->r, items, 1, 256); in cxgbe_transmit()
3373 if (vi->flags & VI_INIT_DONE) { in cxgbe_qflush()
3376 txq->eq.flags |= EQ_QFLUSH; in cxgbe_qflush()
3378 while (!mp_ring_is_idle(txq->r)) { in cxgbe_qflush()
3379 mp_ring_check_drainage(txq->r, 4096); in cxgbe_qflush()
3383 txq->eq.flags &= ~EQ_QFLUSH; in cxgbe_qflush()
3394 struct fw_vi_stats_vf *s = &vi->stats; in vi_get_counter()
3396 mtx_lock(&vi->tick_mtx); in vi_get_counter()
3398 mtx_unlock(&vi->tick_mtx); in vi_get_counter()
3402 return (s->rx_bcast_frames + s->rx_mcast_frames + in vi_get_counter()
3403 s->rx_ucast_frames); in vi_get_counter()
3405 return (s->rx_err_frames); in vi_get_counter()
3407 return (s->tx_bcast_frames + s->tx_mcast_frames + in vi_get_counter()
3408 s->tx_ucast_frames + s->tx_offload_frames); in vi_get_counter()
3410 return (s->tx_drop_frames); in vi_get_counter()
3412 return (s->rx_bcast_bytes + s->rx_mcast_bytes + in vi_get_counter()
3413 s->rx_ucast_bytes); in vi_get_counter()
3415 return (s->tx_bcast_bytes + s->tx_mcast_bytes + in vi_get_counter()
3416 s->tx_ucast_bytes + s->tx_offload_bytes); in vi_get_counter()
3418 return (s->rx_mcast_frames); in vi_get_counter()
3420 return (s->tx_mcast_frames); in vi_get_counter()
3425 if (vi->flags & VI_INIT_DONE) { in vi_get_counter()
3430 drops += counter_u64_fetch(txq->r->dropped); in vi_get_counter()
3446 struct port_info *pi = vi->pi; in cxgbe_get_counter()
3447 struct port_stats *s = &pi->stats; in cxgbe_get_counter()
3449 mtx_lock(&vi->tick_mtx); in cxgbe_get_counter()
3451 mtx_unlock(&vi->tick_mtx); in cxgbe_get_counter()
3455 return (s->rx_frames); in cxgbe_get_counter()
3458 return (s->rx_jabber + s->rx_runt + s->rx_too_long + in cxgbe_get_counter()
3459 s->rx_fcs_err + s->rx_len_err); in cxgbe_get_counter()
3462 return (s->tx_frames); in cxgbe_get_counter()
3465 return (s->tx_error_frames); in cxgbe_get_counter()
3468 return (s->rx_octets); in cxgbe_get_counter()
3471 return (s->tx_octets); in cxgbe_get_counter()
3474 return (s->rx_mcast_frames); in cxgbe_get_counter()
3477 return (s->tx_mcast_frames); in cxgbe_get_counter()
3480 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + in cxgbe_get_counter()
3481 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + in cxgbe_get_counter()
3482 s->rx_trunc3 + pi->tnl_cong_drops); in cxgbe_get_counter()
3487 drops = s->tx_drop; in cxgbe_get_counter()
3488 if (vi->flags & VI_INIT_DONE) { in cxgbe_get_counter()
3493 drops += counter_u64_fetch(txq->r->dropped); in cxgbe_get_counter()
3507 cxgbe_snd_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params, in cxgbe_snd_tag_alloc() argument
3512 switch (params->hdr.type) { in cxgbe_snd_tag_alloc()
3515 error = cxgbe_rate_tag_alloc(ifp, params, pt); in cxgbe_snd_tag_alloc()
3523 if (is_t6(vi->pi->adapter)) in cxgbe_snd_tag_alloc()
3524 error = t6_tls_tag_alloc(ifp, params, pt); in cxgbe_snd_tag_alloc()
3526 error = t7_tls_tag_alloc(ifp, params, pt); in cxgbe_snd_tag_alloc()
3545 struct port_info *pi = vi->pi; in cxgbe_media_change()
3546 struct ifmedia *ifm = &pi->media; in cxgbe_media_change()
3547 struct link_config *lc = &pi->link_cfg; in cxgbe_media_change()
3548 struct adapter *sc = pi->adapter; in cxgbe_media_change()
3555 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { in cxgbe_media_change()
3557 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { in cxgbe_media_change()
3561 lc->requested_aneg = AUTONEG_ENABLE; in cxgbe_media_change()
3562 lc->requested_speed = 0; in cxgbe_media_change()
3563 lc->requested_fc |= PAUSE_AUTONEG; in cxgbe_media_change()
3565 lc->requested_aneg = AUTONEG_DISABLE; in cxgbe_media_change()
3566 lc->requested_speed = in cxgbe_media_change()
3567 ifmedia_baudrate(ifm->ifm_media) / 1000000; in cxgbe_media_change()
3568 lc->requested_fc = 0; in cxgbe_media_change()
3569 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE) in cxgbe_media_change()
3570 lc->requested_fc |= PAUSE_RX; in cxgbe_media_change()
3571 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE) in cxgbe_media_change()
3572 lc->requested_fc |= PAUSE_TX; in cxgbe_media_change()
3574 if (pi->up_vis > 0 && hw_all_ok(sc)) { in cxgbe_media_change()
3595 switch(pi->port_type) { in port_mword()
3657 switch (pi->mod_type) { in port_mword()
3748 if (chip_id(pi->adapter) >= CHELSIO_T7) in port_mword()
3762 struct port_info *pi = vi->pi; in cxgbe_media_status()
3763 struct adapter *sc = pi->adapter; in cxgbe_media_status()
3764 struct link_config *lc = &pi->link_cfg; in cxgbe_media_status()
3770 if (pi->up_vis == 0 && hw_all_ok(sc)) { in cxgbe_media_status()
3783 ifmr->ifm_status = IFM_AVALID; in cxgbe_media_status()
3784 if (lc->link_ok == false) in cxgbe_media_status()
3786 ifmr->ifm_status |= IFM_ACTIVE; in cxgbe_media_status()
3789 ifmr->ifm_active = IFM_ETHER | IFM_FDX; in cxgbe_media_status()
3790 ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE); in cxgbe_media_status()
3791 if (lc->fc & PAUSE_RX) in cxgbe_media_status()
3792 ifmr->ifm_active |= IFM_ETH_RXPAUSE; in cxgbe_media_status()
3793 if (lc->fc & PAUSE_TX) in cxgbe_media_status()
3794 ifmr->ifm_active |= IFM_ETH_TXPAUSE; in cxgbe_media_status()
3795 ifmr->ifm_active |= port_mword(pi, speed_to_fwcap(lc->speed)); in cxgbe_media_status()
3806 device_set_descf(dev, "port %d vi %td", vi->pi->port_id, in vcxgbe_probe()
3807 vi - vi->pi->vi); in vcxgbe_probe()
3820 index = vi - pi->vi; in alloc_extra_vi()
3824 device_get_nameunit(vi->dev))); in alloc_extra_vi()
3826 rc = t4_alloc_vi_func(sc, sc->mbox, pi->hw_port, sc->pf, 0, 1, in alloc_extra_vi()
3827 vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0); in alloc_extra_vi()
3830 "for port %d: %d\n", index, pi->port_id, -rc); in alloc_extra_vi()
3831 return (-rc); in alloc_extra_vi()
3833 vi->viid = rc; in alloc_extra_vi()
3835 if (vi->rss_size == 1) { in alloc_extra_vi()
3842 device_printf(vi->dev, "RSS table not available.\n"); in alloc_extra_vi()
3843 vi->rss_base = 0xffff; in alloc_extra_vi()
3850 V_FW_PARAMS_PARAM_YZ(vi->viid); in alloc_extra_vi()
3851 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in alloc_extra_vi()
3853 vi->rss_base = 0xffff; in alloc_extra_vi()
3855 MPASS((val >> 16) == vi->rss_size); in alloc_extra_vi()
3856 vi->rss_base = val & 0xffff; in alloc_extra_vi()
3871 pi = vi->pi; in vcxgbe_attach()
3872 sc = pi->adapter; in vcxgbe_attach()
3894 sc = vi->adapter; in vcxgbe_detach()
3898 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); in vcxgbe_detach()
3912 panic("%s: panic on fatal error", device_get_nameunit(sc->dev)); in delayed_panic()
3921 if (atomic_testandclear_int(&sc->error_flags, ilog2(ADAP_CIM_ERR))) { in fatal_error_task()
3946 const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0; in t4_fatal_err()
3949 if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_FATAL_ERR))) in t4_fatal_err()
3961 atomic_set_int(&sc->error_flags, ADAP_CIM_ERR); in t4_fatal_err()
3965 device_get_nameunit(sc->dev), fw_error); in t4_fatal_err()
3966 taskqueue_enqueue(reset_tq, &sc->fatal_error_task); in t4_fatal_err()
3980 sc->regs_rid = PCIR_BAR(0); in t4_map_bars_0_and_4()
3981 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, in t4_map_bars_0_and_4()
3982 &sc->regs_rid, RF_ACTIVE); in t4_map_bars_0_and_4()
3983 if (sc->regs_res == NULL) { in t4_map_bars_0_and_4()
3984 device_printf(sc->dev, "cannot map registers.\n"); in t4_map_bars_0_and_4()
3987 sc->bt = rman_get_bustag(sc->regs_res); in t4_map_bars_0_and_4()
3988 sc->bh = rman_get_bushandle(sc->regs_res); in t4_map_bars_0_and_4()
3989 sc->mmio_len = rman_get_size(sc->regs_res); in t4_map_bars_0_and_4()
3990 setbit(&sc->doorbells, DOORBELL_KDB); in t4_map_bars_0_and_4()
3992 sc->msix_rid = PCIR_BAR(4); in t4_map_bars_0_and_4()
3993 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, in t4_map_bars_0_and_4()
3994 &sc->msix_rid, RF_ACTIVE); in t4_map_bars_0_and_4()
3995 if (sc->msix_res == NULL) { in t4_map_bars_0_and_4()
3996 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); in t4_map_bars_0_and_4()
4011 if (is_t4(sc) && sc->rdmacaps == 0) in t4_map_bar_2()
4014 sc->udbs_rid = PCIR_BAR(2); in t4_map_bar_2()
4015 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, in t4_map_bar_2()
4016 &sc->udbs_rid, RF_ACTIVE); in t4_map_bar_2()
4017 if (sc->udbs_res == NULL) { in t4_map_bar_2()
4018 device_printf(sc->dev, "cannot map doorbell BAR.\n"); in t4_map_bar_2()
4021 sc->udbs_base = rman_get_virtual(sc->udbs_res); in t4_map_bar_2()
4024 setbit(&sc->doorbells, DOORBELL_UDB); in t4_map_bar_2()
4038 rc = pmap_change_attr((vm_offset_t)sc->udbs_base, in t4_map_bar_2()
4039 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); in t4_map_bar_2()
4041 clrbit(&sc->doorbells, DOORBELL_UDB); in t4_map_bar_2()
4042 setbit(&sc->doorbells, DOORBELL_WCWR); in t4_map_bar_2()
4043 setbit(&sc->doorbells, DOORBELL_UDBWC); in t4_map_bar_2()
4045 device_printf(sc->dev, in t4_map_bar_2()
4056 sc->iwt.wc_en = isset(&sc->doorbells, DOORBELL_UDBWC) ? 1 : 0; in t4_map_bar_2()
4064 if ((sc->doorbells & t4_doorbells_allowed) != 0) { in t4_adj_doorbells()
4065 sc->doorbells &= t4_doorbells_allowed; in t4_adj_doorbells()
4069 sc->doorbells, t4_doorbells_allowed); in t4_adj_doorbells()
4117 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) { in setup_memwin()
4118 if (!rw_initialized(&mw->mw_lock)) { in setup_memwin()
4119 rw_init(&mw->mw_lock, "memory window access"); in setup_memwin()
4120 mw->mw_base = mw_init->base; in setup_memwin()
4121 mw->mw_aperture = mw_init->aperture; in setup_memwin()
4122 mw->mw_curpos = 0; in setup_memwin()
4127 t4_write_reg(sc, reg, (mw->mw_base + bar0) | V_BIR(0) | in setup_memwin()
4128 V_WINDOW(ilog2(mw->mw_aperture) - 10)); in setup_memwin()
4129 rw_wlock(&mw->mw_lock); in setup_memwin()
4130 position_memwin(sc, i, mw->mw_curpos); in setup_memwin()
4131 rw_wunlock(&mw->mw_lock); in setup_memwin()
4141 * address prior to the requested address. mw->mw_curpos always has the actual
4151 mw = &sc->memwin[idx]; in position_memwin()
4152 rw_assert(&mw->mw_lock, RA_WLOCKED); in position_memwin()
4156 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */ in position_memwin()
4158 pf = V_PFNUM(sc->pf); in position_memwin()
4159 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */ in position_memwin()
4163 val = (mw->mw_curpos >> X_T7_MEMOFST_SHIFT) | pf; in position_memwin()
4166 val = mw->mw_curpos | pf; in position_memwin()
4185 mw = &sc->memwin[idx]; in rw_via_memwin()
4187 rw_rlock(&mw->mw_lock); in rw_via_memwin()
4188 mw_end = mw->mw_curpos + mw->mw_aperture; in rw_via_memwin()
4189 if (addr >= mw_end || addr < mw->mw_curpos) { in rw_via_memwin()
4191 if (!rw_try_upgrade(&mw->mw_lock)) { in rw_via_memwin()
4192 rw_runlock(&mw->mw_lock); in rw_via_memwin()
4193 rw_wlock(&mw->mw_lock); in rw_via_memwin()
4195 rw_assert(&mw->mw_lock, RA_WLOCKED); in rw_via_memwin()
4197 rw_downgrade(&mw->mw_lock); in rw_via_memwin()
4198 mw_end = mw->mw_curpos + mw->mw_aperture; in rw_via_memwin()
4200 rw_assert(&mw->mw_lock, RA_RLOCKED); in rw_via_memwin()
4203 v = t4_read_reg(sc, mw->mw_base + addr - in rw_via_memwin()
4204 mw->mw_curpos); in rw_via_memwin()
4208 t4_write_reg(sc, mw->mw_base + addr - in rw_via_memwin()
4209 mw->mw_curpos, htole32(v)); in rw_via_memwin()
4212 len -= 4; in rw_via_memwin()
4214 rw_runlock(&mw->mw_lock); in rw_via_memwin()
4229 t = &sc->tids; in t4_init_atid_table()
4230 if (t->natids == 0) in t4_init_atid_table()
4233 MPASS(t->atid_tab == NULL); in t4_init_atid_table()
4235 t->atid_tab = malloc(t->natids * sizeof(*t->atid_tab), M_CXGBE, in t4_init_atid_table()
4237 mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF); in t4_init_atid_table()
4238 t->afree = t->atid_tab; in t4_init_atid_table()
4239 t->atids_in_use = 0; in t4_init_atid_table()
4240 t->atid_alloc_stopped = false; in t4_init_atid_table()
4241 for (i = 1; i < t->natids; i++) in t4_init_atid_table()
4242 t->atid_tab[i - 1].next = &t->atid_tab[i]; in t4_init_atid_table()
4243 t->atid_tab[t->natids - 1].next = NULL; in t4_init_atid_table()
4251 t = &sc->tids; in t4_free_atid_table()
4253 KASSERT(t->atids_in_use == 0, in t4_free_atid_table()
4254 ("%s: %d atids still in use.", __func__, t->atids_in_use)); in t4_free_atid_table()
4256 if (mtx_initialized(&t->atid_lock)) in t4_free_atid_table()
4257 mtx_destroy(&t->atid_lock); in t4_free_atid_table()
4258 free(t->atid_tab, M_CXGBE); in t4_free_atid_table()
4259 t->atid_tab = NULL; in t4_free_atid_table()
4265 struct tid_info *t = &sc->tids; in stop_atid_allocator()
4267 if (t->natids == 0) in stop_atid_allocator()
4269 mtx_lock(&t->atid_lock); in stop_atid_allocator()
4270 t->atid_alloc_stopped = true; in stop_atid_allocator()
4271 mtx_unlock(&t->atid_lock); in stop_atid_allocator()
4277 struct tid_info *t = &sc->tids; in restart_atid_allocator()
4279 if (t->natids == 0) in restart_atid_allocator()
4281 mtx_lock(&t->atid_lock); in restart_atid_allocator()
4282 KASSERT(t->atids_in_use == 0, in restart_atid_allocator()
4283 ("%s: %d atids still in use.", __func__, t->atids_in_use)); in restart_atid_allocator()
4284 t->atid_alloc_stopped = false; in restart_atid_allocator()
4285 mtx_unlock(&t->atid_lock); in restart_atid_allocator()
4291 struct tid_info *t = &sc->tids; in alloc_atid()
4292 int atid = -1; in alloc_atid()
4294 mtx_lock(&t->atid_lock); in alloc_atid()
4295 if (t->afree && !t->atid_alloc_stopped) { in alloc_atid()
4296 union aopen_entry *p = t->afree; in alloc_atid()
4298 atid = p - t->atid_tab; in alloc_atid()
4300 t->afree = p->next; in alloc_atid()
4301 p->data = ctx; in alloc_atid()
4302 t->atids_in_use++; in alloc_atid()
4304 mtx_unlock(&t->atid_lock); in alloc_atid()
4311 struct tid_info *t = &sc->tids; in lookup_atid()
4313 return (t->atid_tab[atid].data); in lookup_atid()
4319 struct tid_info *t = &sc->tids; in free_atid()
4320 union aopen_entry *p = &t->atid_tab[atid]; in free_atid()
4322 mtx_lock(&t->atid_lock); in free_atid()
4323 p->next = t->afree; in free_atid()
4324 t->afree = p; in free_atid()
4325 t->atids_in_use--; in free_atid()
4326 mtx_unlock(&t->atid_lock); in free_atid()
4357 return ((const struct t4_range *)a)->start - in t4_range_cmp()
4358 ((const struct t4_range *)b)->start; in t4_range_cmp()
4384 r->size = G_EDRAM0_SIZE(addr_len) << 20; in validate_mem_range()
4385 if (r->size > 0) { in validate_mem_range()
4386 r->start = G_EDRAM0_BASE(addr_len) << 20; in validate_mem_range()
4387 if (addr >= r->start && in validate_mem_range()
4388 addr + len <= r->start + r->size) in validate_mem_range()
4396 r->size = G_EDRAM1_SIZE(addr_len) << 20; in validate_mem_range()
4397 if (r->size > 0) { in validate_mem_range()
4398 r->start = G_EDRAM1_BASE(addr_len) << 20; in validate_mem_range()
4399 if (addr >= r->start && in validate_mem_range()
4400 addr + len <= r->start + r->size) in validate_mem_range()
4408 r->size = G_EXT_MEM_SIZE(addr_len) << 20; in validate_mem_range()
4409 if (r->size > 0) { in validate_mem_range()
4410 r->start = G_EXT_MEM_BASE(addr_len) << 20; in validate_mem_range()
4411 if (addr >= r->start && in validate_mem_range()
4412 addr + len <= r->start + r->size) in validate_mem_range()
4420 r->size = G_EXT_MEM1_SIZE(addr_len) << 20; in validate_mem_range()
4421 if (r->size > 0) { in validate_mem_range()
4422 r->start = G_EXT_MEM1_BASE(addr_len) << 20; in validate_mem_range()
4423 if (addr >= r->start && in validate_mem_range()
4424 addr + len <= r->start + r->size) in validate_mem_range()
4436 /* Start from index 0 and examine the next n - 1 entries. */ in validate_mem_range()
4438 for (remaining = n - 1; remaining > 0; remaining--, r++) { in validate_mem_range()
4440 MPASS(r->size > 0); /* r is a valid entry. */ in validate_mem_range()
4442 MPASS(next->size > 0); /* and so is the next one. */ in validate_mem_range()
4444 while (r->start + r->size >= next->start) { in validate_mem_range()
4446 r->size = max(r->start + r->size, in validate_mem_range()
4447 next->start + next->size) - r->start; in validate_mem_range()
4448 n--; /* One fewer entry in total. */ in validate_mem_range()
4449 if (--remaining == 0) in validate_mem_range()
4459 MPASS(next->size > 0); /* must be valid */ in validate_mem_range()
4463 * This so that the foo->size assertion in the in validate_mem_range()
4469 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) * in validate_mem_range()
4479 if (addr >= r->start && in validate_mem_range()
4480 addr + len <= r->start + r->size) in validate_mem_range()
4558 struct devlog_params *dparams = &sc->params.devlog; in fixup_devlog_params()
4561 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start, in fixup_devlog_params()
4562 dparams->size, &dparams->addr); in fixup_devlog_params()
4571 iaq->nirq = T4_EXTRA_INTR; in update_nirq()
4572 iaq->nirq += nports * max(iaq->nrxq, iaq->nnmrxq); in update_nirq()
4573 iaq->nirq += nports * iaq->nofldrxq; in update_nirq()
4574 iaq->nirq += nports * (iaq->num_vis - 1) * in update_nirq()
4575 max(iaq->nrxq_vi, iaq->nnmrxq_vi); in update_nirq()
4576 iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi; in update_nirq()
4587 const int nports = sc->params.nports; in calculate_iaq()
4593 iaq->intr_type = itype; in calculate_iaq()
4594 iaq->num_vis = t4_num_vis; in calculate_iaq()
4595 iaq->ntxq = t4_ntxq; in calculate_iaq()
4596 iaq->ntxq_vi = t4_ntxq_vi; in calculate_iaq()
4597 iaq->nrxq = t4_nrxq; in calculate_iaq()
4598 iaq->nrxq_vi = t4_nrxq_vi; in calculate_iaq()
4601 if (sc->params.tid_qid_sel_mask == 0) { in calculate_iaq()
4602 iaq->nofldtxq = t4_nofldtxq; in calculate_iaq()
4603 iaq->nofldtxq_vi = t4_nofldtxq_vi; in calculate_iaq()
4605 iaq->nofldtxq = roundup(t4_nofldtxq, sc->params.ncores); in calculate_iaq()
4606 iaq->nofldtxq_vi = roundup(t4_nofldtxq_vi, in calculate_iaq()
4607 sc->params.ncores); in calculate_iaq()
4608 if (iaq->nofldtxq != t4_nofldtxq) in calculate_iaq()
4609 device_printf(sc->dev, in calculate_iaq()
4610 "nofldtxq updated (%d -> %d) for correct" in calculate_iaq()
4612 t4_nofldtxq, iaq->nofldtxq, in calculate_iaq()
4613 sc->params.ncores); in calculate_iaq()
4614 if (iaq->num_vis > 1 && in calculate_iaq()
4615 iaq->nofldtxq_vi != t4_nofldtxq_vi) in calculate_iaq()
4616 device_printf(sc->dev, in calculate_iaq()
4617 "nofldtxq_vi updated (%d -> %d) for correct" in calculate_iaq()
4619 t4_nofldtxq_vi, iaq->nofldtxq_vi, in calculate_iaq()
4620 sc->params.ncores); in calculate_iaq()
4626 iaq->nofldrxq = t4_nofldrxq; in calculate_iaq()
4627 iaq->nofldrxq_vi = t4_nofldrxq_vi; in calculate_iaq()
4632 iaq->nnmtxq = t4_nnmtxq; in calculate_iaq()
4633 iaq->nnmrxq = t4_nnmrxq; in calculate_iaq()
4636 iaq->nnmtxq_vi = t4_nnmtxq_vi; in calculate_iaq()
4637 iaq->nnmrxq_vi = t4_nnmrxq_vi; in calculate_iaq()
4642 if (iaq->nirq <= navail && in calculate_iaq()
4643 (itype != INTR_MSI || powerof2(iaq->nirq))) { in calculate_iaq()
4645 * This is the normal case -- there are enough interrupts for in calculate_iaq()
4655 while (iaq->num_vis > 1) { in calculate_iaq()
4656 iaq->num_vis--; in calculate_iaq()
4658 if (iaq->nirq <= navail && in calculate_iaq()
4659 (itype != INTR_MSI || powerof2(iaq->nirq))) { in calculate_iaq()
4660 device_printf(sc->dev, "virtual interfaces per port " in calculate_iaq()
4664 iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq, in calculate_iaq()
4665 iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi, in calculate_iaq()
4666 itype, navail, iaq->nirq); in calculate_iaq()
4674 MPASS(iaq->num_vis == 1); in calculate_iaq()
4675 iaq->ntxq_vi = iaq->nrxq_vi = 0; in calculate_iaq()
4676 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0; in calculate_iaq()
4677 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0; in calculate_iaq()
4678 if (iaq->num_vis != t4_num_vis) { in calculate_iaq()
4679 device_printf(sc->dev, "extra virtual interfaces disabled. " in calculate_iaq()
4682 iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi, in calculate_iaq()
4683 iaq->nnmrxq_vi, itype, navail, iaq->nirq); in calculate_iaq()
4692 if (iaq->nrxq > 1) { in calculate_iaq()
4693 iaq->nrxq = rounddown_pow_of_two(iaq->nrxq - 1); in calculate_iaq()
4694 if (iaq->nnmrxq > iaq->nrxq) in calculate_iaq()
4695 iaq->nnmrxq = iaq->nrxq; in calculate_iaq()
4697 if (iaq->nofldrxq > 1) in calculate_iaq()
4698 iaq->nofldrxq >>= 1; in calculate_iaq()
4700 old_nirq = iaq->nirq; in calculate_iaq()
4702 if (iaq->nirq <= navail && in calculate_iaq()
4703 (itype != INTR_MSI || powerof2(iaq->nirq))) { in calculate_iaq()
4704 device_printf(sc->dev, "running with reduced number of " in calculate_iaq()
4707 "itype %d, navail %u, nirq %d.\n", iaq->nrxq, in calculate_iaq()
4708 iaq->nofldrxq, itype, navail, iaq->nirq); in calculate_iaq()
4711 } while (old_nirq != iaq->nirq); in calculate_iaq()
4714 device_printf(sc->dev, "running with minimal number of queues. " in calculate_iaq()
4716 iaq->nirq = 1; in calculate_iaq()
4717 iaq->nrxq = 1; in calculate_iaq()
4718 iaq->ntxq = 1; in calculate_iaq()
4719 if (iaq->nofldrxq > 0) { in calculate_iaq()
4720 iaq->nofldrxq = 1; in calculate_iaq()
4721 iaq->nofldtxq = 1; in calculate_iaq()
4722 if (sc->params.tid_qid_sel_mask == 0) in calculate_iaq()
4723 iaq->nofldtxq = 1; in calculate_iaq()
4725 iaq->nofldtxq = sc->params.ncores; in calculate_iaq()
4727 iaq->nnmtxq = 0; in calculate_iaq()
4728 iaq->nnmrxq = 0; in calculate_iaq()
4730 MPASS(iaq->num_vis > 0); in calculate_iaq()
4731 if (iaq->num_vis > 1) { in calculate_iaq()
4732 MPASS(iaq->nrxq_vi > 0); in calculate_iaq()
4733 MPASS(iaq->ntxq_vi > 0); in calculate_iaq()
4735 MPASS(iaq->nirq > 0); in calculate_iaq()
4736 MPASS(iaq->nrxq > 0); in calculate_iaq()
4737 MPASS(iaq->ntxq > 0); in calculate_iaq()
4739 MPASS(powerof2(iaq->nirq)); in calculate_iaq()
4740 if (sc->params.tid_qid_sel_mask != 0) in calculate_iaq()
4741 MPASS(iaq->nofldtxq % sc->params.ncores == 0); in calculate_iaq()
4755 navail = pci_msix_count(sc->dev); in cfg_itype_and_nqueues()
4757 navail = pci_msi_count(sc->dev); in cfg_itype_and_nqueues()
4765 nalloc = iaq->nirq; in cfg_itype_and_nqueues()
4768 rc = pci_alloc_msix(sc->dev, &nalloc); in cfg_itype_and_nqueues()
4770 rc = pci_alloc_msi(sc->dev, &nalloc); in cfg_itype_and_nqueues()
4773 if (nalloc == iaq->nirq) in cfg_itype_and_nqueues()
4780 device_printf(sc->dev, "fewer vectors than requested, " in cfg_itype_and_nqueues()
4782 itype, iaq->nirq, nalloc); in cfg_itype_and_nqueues()
4783 pci_release_msi(sc->dev); in cfg_itype_and_nqueues()
4788 device_printf(sc->dev, in cfg_itype_and_nqueues()
4790 itype, rc, iaq->nirq, nalloc); in cfg_itype_and_nqueues()
4793 device_printf(sc->dev, in cfg_itype_and_nqueues()
4795 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, in cfg_itype_and_nqueues()
4796 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); in cfg_itype_and_nqueues()
4923 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) in fw_compatible()
4930 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) in fw_compatible()
4931 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && in fw_compatible()
4952 device_printf(sc->dev, in load_fw_module()
4958 *dcfg = firmware_get(fw_info->kld_name); in load_fw_module()
4961 *fw = firmware_get(fw_info->fw_mod_name); in load_fw_module()
4990 const uint32_t c = be32toh(card_fw->fw_ver); in install_kld_firmware()
4998 fw_install = t4_fw_install < 0 ? -t4_fw_install : t4_fw_install; in install_kld_firmware()
5004 device_printf(sc->dev, in install_kld_firmware()
5006 " will use compiled-in firmware version for" in install_kld_firmware()
5010 memcpy(&bundled_fw, fw->data, sizeof(bundled_fw)); in install_kld_firmware()
5019 if ((sc->flags & FW_OK) == 0) { in install_kld_firmware()
5055 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " in install_kld_firmware()
5071 device_printf(sc->dev, in install_kld_firmware()
5078 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " in install_kld_firmware()
5083 rc = sc->flags & FW_OK ? 0 : ENOENT; in install_kld_firmware()
5086 k = be32toh(((const struct fw_hdr *)fw->data)->fw_ver); in install_kld_firmware()
5089 device_printf(sc->dev, in install_kld_firmware()
5096 rc = sc->flags & FW_OK ? 0 : EINVAL; in install_kld_firmware()
5100 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " in install_kld_firmware()
5107 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); in install_kld_firmware()
5109 device_printf(sc->dev, "failed to install firmware: %d\n", rc); in install_kld_firmware()
5113 memcpy(card_fw, fw->data, sizeof(*card_fw)); in install_kld_firmware()
5138 device_printf(sc->dev, in contact_firmware()
5143 drv_fw = &fw_info->fw_h; in contact_firmware()
5148 rc = -t4_get_fw_hdr(sc, card_fw); in contact_firmware()
5150 device_printf(sc->dev, in contact_firmware()
5163 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); in contact_firmware()
5165 rc = -rc; in contact_firmware()
5166 device_printf(sc->dev, in contact_firmware()
5176 MPASS(be32toh(card_fw->flags) & FW_HDR_FLAGS_RESET_HALT); in contact_firmware()
5177 sc->flags |= FW_OK; /* The firmware responded to the FW_HELLO. */ in contact_firmware()
5179 if (rc == sc->pf) { in contact_firmware()
5180 sc->flags |= MASTER_PF; in contact_firmware()
5193 device_printf(sc->dev, "couldn't be master(%d), " in contact_firmware()
5203 device_printf(sc->dev, "PF%d is master, device state %d. " in contact_firmware()
5205 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", rc); in contact_firmware()
5206 sc->cfcsum = 0; in contact_firmware()
5210 if (rc != 0 && sc->flags & FW_OK) { in contact_firmware()
5211 t4_fw_bye(sc, sc->mbox); in contact_firmware()
5212 sc->flags &= ~FW_OK; in contact_firmware()
5232 if (pci_get_device(sc->dev) == 0x440a) in copy_cfg_file_to_card()
5240 device_printf(sc->dev, in copy_cfg_file_to_card()
5245 cfdata = dcfg->data; in copy_cfg_file_to_card()
5246 cflen = dcfg->datasize & ~3; in copy_cfg_file_to_card()
5252 device_printf(sc->dev, in copy_cfg_file_to_card()
5258 snprintf(s, sizeof(s), "%s_%s", fw_info->kld_name, cfg_file); in copy_cfg_file_to_card()
5262 device_printf(sc->dev, in copy_cfg_file_to_card()
5268 cfdata = rcfg->data; in copy_cfg_file_to_card()
5269 cflen = rcfg->datasize & ~3; in copy_cfg_file_to_card()
5273 device_printf(sc->dev, in copy_cfg_file_to_card()
5282 device_printf(sc->dev, in copy_cfg_file_to_card()
5331 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST); in apply_cfg_and_initialize()
5333 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); in apply_cfg_and_initialize()
5356 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in apply_cfg_and_initialize()
5359 device_printf(sc->dev, in apply_cfg_and_initialize()
5372 device_printf(sc->dev, in apply_cfg_and_initialize()
5377 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); in apply_cfg_and_initialize()
5379 device_printf(sc->dev, "failed to pre-process config file: %d " in apply_cfg_and_initialize()
5387 device_printf(sc->dev, in apply_cfg_and_initialize()
5391 sc->cfcsum = cfcsum; in apply_cfg_and_initialize()
5392 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", cfg_file); in apply_cfg_and_initialize()
5399 caps.x##caps &= htobe16(caps_allowed->x##caps); \ in apply_cfg_and_initialize()
5416 * to cope with the situation in non-debug builds by disabling in apply_cfg_and_initialize()
5430 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); in apply_cfg_and_initialize()
5432 device_printf(sc->dev, in apply_cfg_and_initialize()
5441 rc = -t4_fw_initialize(sc, sc->mbox); in apply_cfg_and_initialize()
5443 device_printf(sc->dev, "fw_initialize failed: %d.\n", rc); in apply_cfg_and_initialize()
5462 MPASS(sc->flags & MASTER_PF); in partition_resources()
5478 fallback = sc->debug_flags & DF_DISABLE_CFG_RETRY ? false : true; in partition_resources()
5480 retry: in partition_resources()
5484 device_printf(sc->dev, in partition_resources()
5486 "will fall back to a basic configuration and retry.\n", in partition_resources()
5493 goto retry; in partition_resources()
5510 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", in get_params__pre_init()
5511 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), in get_params__pre_init()
5512 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), in get_params__pre_init()
5513 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), in get_params__pre_init()
5514 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); in get_params__pre_init()
5516 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u", in get_params__pre_init()
5517 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers), in get_params__pre_init()
5518 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers), in get_params__pre_init()
5519 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers), in get_params__pre_init()
5520 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers)); in get_params__pre_init()
5522 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u", in get_params__pre_init()
5523 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), in get_params__pre_init()
5524 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), in get_params__pre_init()
5525 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), in get_params__pre_init()
5526 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); in get_params__pre_init()
5528 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u", in get_params__pre_init()
5529 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers), in get_params__pre_init()
5530 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers), in get_params__pre_init()
5531 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers), in get_params__pre_init()
5532 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers)); in get_params__pre_init()
5536 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); in get_params__pre_init()
5538 device_printf(sc->dev, in get_params__pre_init()
5543 sc->params.portvec = val[0]; in get_params__pre_init()
5544 sc->params.nports = bitcount32(val[0]); in get_params__pre_init()
5545 sc->params.vpd.cclk = val[1]; in get_params__pre_init()
5548 rc = -t4_init_devlog_ncores_params(sc, 1); in get_params__pre_init()
5552 device_printf(sc->dev, in get_params__pre_init()
5561 * Any params that need to be set before FW_INITIALIZE.
5572 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in set_params__pre_init()
5575 sc->params.fw_vers < FW_VERSION32(1, 20, 1, 0)) { in set_params__pre_init()
5579 device_printf(sc->dev, in set_params__pre_init()
5585 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in set_params__pre_init()
5587 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, in set_params__pre_init()
5590 device_printf(sc->dev, in set_params__pre_init()
5599 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in set_params__pre_init()
5601 sc->params.viid_smt_extn_support = true; in set_params__pre_init()
5603 sc->params.viid_smt_extn_support = false; in set_params__pre_init()
5628 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 7, param, val); in get_params__post_init()
5630 device_printf(sc->dev, in get_params__post_init()
5635 sc->sge.iq_start = val[0]; in get_params__post_init()
5636 sc->sge.eq_start = val[1]; in get_params__post_init()
5638 sc->tids.ftid_base = val[2]; in get_params__post_init()
5639 sc->tids.ftid_end = val[3]; in get_params__post_init()
5640 sc->tids.nftids = val[3] - val[2] + 1; in get_params__post_init()
5642 sc->vres.l2t.start = val[4]; in get_params__post_init()
5643 sc->vres.l2t.size = val[5] - val[4] + 1; in get_params__post_init()
5645 if (sc->vres.l2t.size > 0) in get_params__post_init()
5647 sc->params.core_vdd = val[6]; in get_params__post_init()
5651 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); in get_params__post_init()
5653 device_printf(sc->dev, in get_params__post_init()
5657 MPASS((int)val[0] >= sc->sge.iq_start); in get_params__post_init()
5658 sc->sge.iqmap_sz = val[0] - sc->sge.iq_start + 1; in get_params__post_init()
5659 MPASS((int)val[1] >= sc->sge.eq_start); in get_params__post_init()
5660 sc->sge.eqmap_sz = val[1] - sc->sge.eq_start + 1; in get_params__post_init()
5664 sc->tids.tid_base = t4_read_reg(sc, in get_params__post_init()
5669 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); in get_params__post_init()
5671 device_printf(sc->dev, in get_params__post_init()
5676 sc->tids.hpftid_base = val[0]; in get_params__post_init()
5677 sc->tids.hpftid_end = val[1]; in get_params__post_init()
5678 sc->tids.nhpftids = val[1] - val[0] + 1; in get_params__post_init()
5684 MPASS(sc->tids.hpftid_base == 0); in get_params__post_init()
5685 MPASS(sc->tids.tid_base == sc->tids.nhpftids); in get_params__post_init()
5690 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); in get_params__post_init()
5692 device_printf(sc->dev, in get_params__post_init()
5697 sc->rawf_base = val[0]; in get_params__post_init()
5698 sc->nrawf = val[1] - val[0] + 1; in get_params__post_init()
5702 if (sc->params.ncores > 1) { in get_params__post_init()
5706 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5707 sc->params.tid_qid_sel_mask = rc == 0 ? val[0] : 0; in get_params__post_init()
5721 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5723 sc->params.mps_bg_map = val[0]; in get_params__post_init()
5725 sc->params.mps_bg_map = UINT32_MAX; /* Not a legal value. */ in get_params__post_init()
5729 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5731 sc->params.tp_ch_map = val[0]; in get_params__post_init()
5733 sc->params.tp_ch_map = UINT32_MAX; /* Not a legal value. */ in get_params__post_init()
5737 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5739 sc->params.tx_tp_ch_map = val[0]; in get_params__post_init()
5741 sc->params.tx_tp_ch_map = UINT32_MAX; /* Not a legal value. */ in get_params__post_init()
5748 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5750 sc->params.filter2_wr_support = val[0] != 0; in get_params__post_init()
5752 sc->params.filter2_wr_support = 0; in get_params__post_init()
5759 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5761 sc->params.ulptx_memwrite_dsgl = val[0] != 0; in get_params__post_init()
5763 sc->params.ulptx_memwrite_dsgl = false; in get_params__post_init()
5767 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5769 sc->params.fr_nsmr_tpte_wr_support = val[0] != 0; in get_params__post_init()
5771 sc->params.fr_nsmr_tpte_wr_support = false; in get_params__post_init()
5775 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5777 sc->params.dev_512sgl_mr = val[0] != 0; in get_params__post_init()
5779 sc->params.dev_512sgl_mr = false; in get_params__post_init()
5782 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5784 sc->params.max_pkts_per_eth_tx_pkts_wr = val[0]; in get_params__post_init()
5786 sc->params.max_pkts_per_eth_tx_pkts_wr = 15; in get_params__post_init()
5789 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5792 sc->params.nsched_cls = val[0]; in get_params__post_init()
5794 sc->params.nsched_cls = sc->chip_params->nsched_cls; in get_params__post_init()
5801 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); in get_params__post_init()
5803 device_printf(sc->dev, in get_params__post_init()
5809 sc->x = htobe16(caps.x); \ in get_params__post_init()
5822 if (sc->niccaps & FW_CAPS_CONFIG_NIC_HASHFILTER) { in get_params__post_init()
5824 MPASS(sc->toecaps == 0); in get_params__post_init()
5825 sc->toecaps = 0; in get_params__post_init()
5828 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5830 device_printf(sc->dev, in get_params__post_init()
5834 sc->tids.ntids = val[0]; in get_params__post_init()
5835 if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) { in get_params__post_init()
5836 MPASS(sc->tids.ntids >= sc->tids.nhpftids); in get_params__post_init()
5837 sc->tids.ntids -= sc->tids.nhpftids; in get_params__post_init()
5839 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); in get_params__post_init()
5840 sc->params.hash_filter = 1; in get_params__post_init()
5842 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { in get_params__post_init()
5846 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); in get_params__post_init()
5848 device_printf(sc->dev, in get_params__post_init()
5853 sc->tids.etid_base = val[0]; in get_params__post_init()
5854 sc->tids.etid_end = val[1]; in get_params__post_init()
5855 sc->tids.netids = val[1] - val[0] + 1; in get_params__post_init()
5856 sc->params.eo_wr_cred = val[2]; in get_params__post_init()
5857 sc->params.ethoffload = 1; in get_params__post_init()
5860 if (sc->toecaps) { in get_params__post_init()
5861 /* query offload-related parameters */ in get_params__post_init()
5868 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); in get_params__post_init()
5870 device_printf(sc->dev, in get_params__post_init()
5874 sc->tids.ntids = val[0]; in get_params__post_init()
5875 if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) { in get_params__post_init()
5876 MPASS(sc->tids.ntids >= sc->tids.nhpftids); in get_params__post_init()
5877 sc->tids.ntids -= sc->tids.nhpftids; in get_params__post_init()
5879 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); in get_params__post_init()
5881 sc->tids.stid_base = val[1]; in get_params__post_init()
5882 sc->tids.nstids = val[2] - val[1] + 1; in get_params__post_init()
5884 sc->vres.ddp.start = val[3]; in get_params__post_init()
5885 sc->vres.ddp.size = val[4] - val[3] + 1; in get_params__post_init()
5886 sc->params.ofldq_wr_cred = val[5]; in get_params__post_init()
5887 sc->params.offload = 1; in get_params__post_init()
5890 * The firmware attempts memfree TOE configuration for -SO cards in get_params__post_init()
5897 sc->iscsicaps = 0; in get_params__post_init()
5898 sc->nvmecaps = 0; in get_params__post_init()
5899 sc->rdmacaps = 0; in get_params__post_init()
5901 if (sc->nvmecaps || sc->rdmacaps) { in get_params__post_init()
5906 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val); in get_params__post_init()
5908 device_printf(sc->dev, in get_params__post_init()
5912 sc->vres.stag.start = val[0]; in get_params__post_init()
5913 sc->vres.stag.size = val[1] - val[0] + 1; in get_params__post_init()
5914 sc->vres.pbl.start = val[2]; in get_params__post_init()
5915 sc->vres.pbl.size = val[3] - val[2] + 1; in get_params__post_init()
5917 if (sc->rdmacaps) { in get_params__post_init()
5924 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); in get_params__post_init()
5926 device_printf(sc->dev, in get_params__post_init()
5930 sc->vres.rq.start = val[0]; in get_params__post_init()
5931 sc->vres.rq.size = val[1] - val[0] + 1; in get_params__post_init()
5932 sc->vres.qp.start = val[2]; in get_params__post_init()
5933 sc->vres.qp.size = val[3] - val[2] + 1; in get_params__post_init()
5934 sc->vres.cq.start = val[4]; in get_params__post_init()
5935 sc->vres.cq.size = val[5] - val[4] + 1; in get_params__post_init()
5943 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); in get_params__post_init()
5945 device_printf(sc->dev, in get_params__post_init()
5949 sc->vres.ocq.start = val[0]; in get_params__post_init()
5950 sc->vres.ocq.size = val[1] - val[0] + 1; in get_params__post_init()
5951 sc->vres.srq.start = val[2]; in get_params__post_init()
5952 sc->vres.srq.size = val[3] - val[2] + 1; in get_params__post_init()
5953 sc->params.max_ordird_qp = val[4]; in get_params__post_init()
5954 sc->params.max_ird_adapter = val[5]; in get_params__post_init()
5956 if (sc->iscsicaps) { in get_params__post_init()
5959 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); in get_params__post_init()
5961 device_printf(sc->dev, in get_params__post_init()
5965 sc->vres.iscsi.start = val[0]; in get_params__post_init()
5966 sc->vres.iscsi.size = val[1] - val[0] + 1; in get_params__post_init()
5968 if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) { in get_params__post_init()
5971 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); in get_params__post_init()
5973 device_printf(sc->dev, in get_params__post_init()
5977 sc->vres.key.start = val[0]; in get_params__post_init()
5978 sc->vres.key.size = val[1] - val[0] + 1; in get_params__post_init()
5982 * We've got the params we wanted to query directly from the firmware. in get_params__post_init()
5987 t4_read_mtu_tbl(sc, sc->params.mtus, NULL); in get_params__post_init()
5988 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd); in get_params__post_init()
6009 callout_schedule_sbt(&sc->ktls_tick, SBT_1MS, 0, C_HARDCLOCK); in ktls_tick()
6021 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &param); in t6_config_kern_tls()
6029 sc->flags |= KERN_TLS_ON; in t6_config_kern_tls()
6030 callout_reset_sbt(&sc->ktls_tick, SBT_1MS, 0, ktls_tick, sc, in t6_config_kern_tls()
6033 sc->flags &= ~KERN_TLS_ON; in t6_config_kern_tls()
6034 callout_stop(&sc->ktls_tick); in t6_config_kern_tls()
6052 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in set_params__post_init()
6057 if (t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val) == 0) in set_params__post_init()
6058 sc->params.port_caps32 = 1; in set_params__post_init()
6061 val = 1 << (G_MASKSIZE(t4_read_reg(sc, A_TP_RSS_CONFIG_TNL)) - 1); in set_params__post_init()
6063 V_MASKFILTER(val - 1)); in set_params__post_init()
6139 if (t4_toe_rexmt_backoff[i] != -1) { in set_params__post_init()
6158 sc->tlst.inline_keys = t4_tls_inline_keys; in set_params__post_init()
6160 sc->tlst.combo_wrs = t4_tls_combo_wrs; in set_params__post_init()
6163 sc->tlst.short_records = t4_tls_short_records; in set_params__post_init()
6164 sc->tlst.partial_ghash = t4_tls_partial_ghash; in set_params__post_init()
6177 struct adapter_params *p = &sc->params; in t4_set_desc()
6179 device_set_descf(sc->dev, "Chelsio %s", p->vpd.id); in t4_set_desc()
6208 ifm = &pi->media; in set_current_media()
6209 if (ifm->ifm_cur != NULL && in set_current_media()
6210 IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_NONE) in set_current_media()
6213 lc = &pi->link_cfg; in set_current_media()
6214 if (lc->requested_aneg != AUTONEG_DISABLE && in set_current_media()
6215 lc->pcaps & FW_PORT_CAP32_ANEG) { in set_current_media()
6220 if (lc->requested_fc & PAUSE_TX) in set_current_media()
6222 if (lc->requested_fc & PAUSE_RX) in set_current_media()
6224 if (lc->requested_speed == 0) in set_current_media()
6225 speed = port_top_speed(pi) * 1000; /* Gbps -> Mbps */ in set_current_media()
6227 speed = lc->requested_speed; in set_current_media()
6239 return (pi->port_type == FW_PORT_TYPE_BT_SGMII || in fixed_ifmedia()
6240 pi->port_type == FW_PORT_TYPE_BT_XFI || in fixed_ifmedia()
6241 pi->port_type == FW_PORT_TYPE_BT_XAUI || in fixed_ifmedia()
6242 pi->port_type == FW_PORT_TYPE_KX4 || in fixed_ifmedia()
6243 pi->port_type == FW_PORT_TYPE_KX || in fixed_ifmedia()
6244 pi->port_type == FW_PORT_TYPE_KR || in fixed_ifmedia()
6245 pi->port_type == FW_PORT_TYPE_BP_AP || in fixed_ifmedia()
6246 pi->port_type == FW_PORT_TYPE_BP4_AP || in fixed_ifmedia()
6247 pi->port_type == FW_PORT_TYPE_BP40_BA || in fixed_ifmedia()
6248 pi->port_type == FW_PORT_TYPE_KR4_100G || in fixed_ifmedia()
6249 pi->port_type == FW_PORT_TYPE_KR_SFP28 || in fixed_ifmedia()
6250 pi->port_type == FW_PORT_TYPE_KR_XLAUI); in fixed_ifmedia()
6263 if (pi->flags & FIXED_IFMEDIA) in build_medialist()
6269 ifm = &pi->media; in build_medialist()
6271 lc = &pi->link_cfg; in build_medialist()
6272 ss = G_FW_PORT_CAP32_SPEED(lc->pcaps); /* Supported Speeds */ in build_medialist()
6276 MPASS(LIST_EMPTY(&ifm->ifm_list)); in build_medialist()
6298 if (lc->pcaps & FW_PORT_CAP32_ANEG) in build_medialist()
6310 struct link_config *lc = &pi->link_cfg; in init_link_config()
6314 lc->requested_caps = 0; in init_link_config()
6315 lc->requested_speed = 0; in init_link_config()
6318 lc->requested_aneg = AUTONEG_DISABLE; in init_link_config()
6320 lc->requested_aneg = AUTONEG_ENABLE; in init_link_config()
6322 lc->requested_aneg = AUTONEG_AUTO; in init_link_config()
6324 lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX | in init_link_config()
6328 lc->requested_fec = FEC_AUTO; in init_link_config()
6330 lc->requested_fec = FEC_NONE; in init_link_config()
6332 /* -1 is handled by the FEC_AUTO block above and not here. */ in init_link_config()
6333 lc->requested_fec = t4_fec & in init_link_config()
6335 if (lc->requested_fec == 0) in init_link_config()
6336 lc->requested_fec = FEC_AUTO; in init_link_config()
6339 lc->force_fec = -1; in init_link_config()
6341 lc->force_fec = 1; in init_link_config()
6343 lc->force_fec = 0; in init_link_config()
6354 struct link_config *lc = &pi->link_cfg; in fixup_link_config()
6360 if (lc->requested_speed != 0) { in fixup_link_config()
6361 fwspeed = speed_to_fwcap(lc->requested_speed); in fixup_link_config()
6362 if ((fwspeed & lc->pcaps) == 0) { in fixup_link_config()
6364 lc->requested_speed = 0; in fixup_link_config()
6369 MPASS(lc->requested_aneg == AUTONEG_ENABLE || in fixup_link_config()
6370 lc->requested_aneg == AUTONEG_DISABLE || in fixup_link_config()
6371 lc->requested_aneg == AUTONEG_AUTO); in fixup_link_config()
6372 if (lc->requested_aneg == AUTONEG_ENABLE && in fixup_link_config()
6373 !(lc->pcaps & FW_PORT_CAP32_ANEG)) { in fixup_link_config()
6375 lc->requested_aneg = AUTONEG_AUTO; in fixup_link_config()
6379 MPASS((lc->requested_fc & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) == 0); in fixup_link_config()
6380 if (lc->requested_fc & PAUSE_TX && in fixup_link_config()
6381 !(lc->pcaps & FW_PORT_CAP32_FC_TX)) { in fixup_link_config()
6383 lc->requested_fc &= ~PAUSE_TX; in fixup_link_config()
6385 if (lc->requested_fc & PAUSE_RX && in fixup_link_config()
6386 !(lc->pcaps & FW_PORT_CAP32_FC_RX)) { in fixup_link_config()
6388 lc->requested_fc &= ~PAUSE_RX; in fixup_link_config()
6390 if (!(lc->requested_fc & PAUSE_AUTONEG) && in fixup_link_config()
6391 !(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)) { in fixup_link_config()
6393 lc->requested_fc |= PAUSE_AUTONEG; in fixup_link_config()
6397 if ((lc->requested_fec & FEC_RS && in fixup_link_config()
6398 !(lc->pcaps & FW_PORT_CAP32_FEC_RS)) || in fixup_link_config()
6399 (lc->requested_fec & FEC_BASER_RS && in fixup_link_config()
6400 !(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS))) { in fixup_link_config()
6402 lc->requested_fec = FEC_AUTO; in fixup_link_config()
6415 struct adapter *sc = pi->adapter; in apply_link_config()
6416 struct link_config *lc = &pi->link_cfg; in apply_link_config()
6423 if (lc->requested_aneg == AUTONEG_ENABLE) in apply_link_config()
6424 MPASS(lc->pcaps & FW_PORT_CAP32_ANEG); in apply_link_config()
6425 if (!(lc->requested_fc & PAUSE_AUTONEG)) in apply_link_config()
6426 MPASS(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE); in apply_link_config()
6427 if (lc->requested_fc & PAUSE_TX) in apply_link_config()
6428 MPASS(lc->pcaps & FW_PORT_CAP32_FC_TX); in apply_link_config()
6429 if (lc->requested_fc & PAUSE_RX) in apply_link_config()
6430 MPASS(lc->pcaps & FW_PORT_CAP32_FC_RX); in apply_link_config()
6431 if (lc->requested_fec & FEC_RS) in apply_link_config()
6432 MPASS(lc->pcaps & FW_PORT_CAP32_FEC_RS); in apply_link_config()
6433 if (lc->requested_fec & FEC_BASER_RS) in apply_link_config()
6434 MPASS(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS); in apply_link_config()
6436 if (!(sc->flags & IS_VF)) { in apply_link_config()
6437 rc = -t4_link_l1cfg(sc, sc->mbox, pi->hw_port, lc); in apply_link_config()
6439 device_printf(pi->dev, "l1cfg failed: %d\n", rc); in apply_link_config()
6445 * An L1_CFG will almost always result in a link-change event if the in apply_link_config()
6453 if (lc->link_ok && !(lc->requested_fc & PAUSE_AUTONEG)) in apply_link_config()
6454 lc->fc = lc->requested_fc & (PAUSE_TX | PAUSE_RX); in apply_link_config()
6473 struct vi_info *vi = if_getsoftc(ctx->ifp); in add_maddr()
6474 struct port_info *pi = vi->pi; in add_maddr()
6475 struct adapter *sc = pi->adapter; in add_maddr()
6477 if (ctx->rc < 0) in add_maddr()
6480 ctx->mcaddr[ctx->i] = LLADDR(sdl); in add_maddr()
6481 MPASS(ETHER_IS_MULTICAST(ctx->mcaddr[ctx->i])); in add_maddr()
6482 ctx->i++; in add_maddr()
6484 if (ctx->i == FW_MAC_EXACT_CHUNK) { in add_maddr()
6485 ctx->rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, ctx->del, in add_maddr()
6486 ctx->i, ctx->mcaddr, NULL, &ctx->hash, 0); in add_maddr()
6487 if (ctx->rc < 0) { in add_maddr()
6490 for (j = 0; j < ctx->i; j++) { in add_maddr()
6491 if_printf(ctx->ifp, in add_maddr()
6495 ctx->mcaddr[j][0], ctx->mcaddr[j][1], in add_maddr()
6496 ctx->mcaddr[j][2], ctx->mcaddr[j][3], in add_maddr()
6497 ctx->mcaddr[j][4], ctx->mcaddr[j][5], in add_maddr()
6498 -ctx->rc); in add_maddr()
6502 ctx->del = 0; in add_maddr()
6503 ctx->i = 0; in add_maddr()
6518 struct port_info *pi = vi->pi; in update_mac_settings()
6519 struct adapter *sc = pi->adapter; in update_mac_settings()
6520 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; in update_mac_settings()
6539 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc, in update_mac_settings()
6552 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt, in update_mac_settings()
6553 ucaddr, true, &vi->smt_idx); in update_mac_settings()
6555 rc = -rc; in update_mac_settings()
6559 vi->xact_addr_filt = rc; in update_mac_settings()
6584 rc = -ctx.rc; in update_mac_settings()
6588 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, in update_mac_settings()
6592 rc = -rc; in update_mac_settings()
6609 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, ctx.hash, 0); in update_mac_settings()
6615 pi->vxlan_tcam_entry = false; in update_mac_settings()
6619 if (IS_MAIN_VI(vi) && sc->vxlan_refcount > 0 && in update_mac_settings()
6620 pi->vxlan_tcam_entry == false) { in update_mac_settings()
6621 rc = t4_alloc_raw_mac_filt(sc, vi->viid, match_all_mac, in update_mac_settings()
6622 match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id, in update_mac_settings()
6625 rc = -rc; in update_mac_settings()
6629 MPASS(rc == sc->rawf_base + pi->port_id); in update_mac_settings()
6631 pi->vxlan_tcam_entry = true; in update_mac_settings()
6670 if (mtx_sleep(&sc->flags, &sc->sc_lock, in begin_synchronized_op()
6680 sc->last_op = wmesg; in begin_synchronized_op()
6681 sc->last_op_thr = curthread; in begin_synchronized_op()
6682 sc->last_op_flags = flags; in begin_synchronized_op()
6702 wakeup(&sc->flags); in begin_vi_detach()
6704 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); in begin_vi_detach()
6707 sc->last_op = "t4detach"; in begin_vi_detach()
6708 sc->last_op_thr = curthread; in begin_vi_detach()
6709 sc->last_op_flags = 0; in begin_vi_detach()
6721 wakeup(&sc->flags); in end_vi_detach()
6739 wakeup(&sc->flags); in end_synchronized_op()
6746 struct port_info *pi = vi->pi; in cxgbe_init_synchronized()
6747 struct adapter *sc = pi->adapter; in cxgbe_init_synchronized()
6748 if_t ifp = vi->ifp; in cxgbe_init_synchronized()
6757 if (!(sc->flags & FULL_INIT_DONE) && ((rc = adapter_init(sc)) != 0)) in cxgbe_init_synchronized()
6760 if (!(vi->flags & VI_INIT_DONE) && ((rc = vi_init(vi)) != 0)) in cxgbe_init_synchronized()
6768 if (pi->up_vis == 0) { in cxgbe_init_synchronized()
6775 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); in cxgbe_init_synchronized()
6789 txq->eq.flags |= EQ_ENABLED; in cxgbe_init_synchronized()
6796 if (sc->traceq < 0 && IS_MAIN_VI(vi)) { in cxgbe_init_synchronized()
6797 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; in cxgbe_init_synchronized()
6798 t4_set_trace_rss_control(sc, pi->tx_chan, sc->traceq); in cxgbe_init_synchronized()
6799 pi->flags |= HAS_TRACEQ; in cxgbe_init_synchronized()
6803 pi->up_vis++; in cxgbe_init_synchronized()
6805 if (pi->link_cfg.link_ok) in cxgbe_init_synchronized()
6809 mtx_lock(&vi->tick_mtx); in cxgbe_init_synchronized()
6810 if (vi->pi->nvi > 1 || sc->flags & IS_VF) in cxgbe_init_synchronized()
6811 callout_reset(&vi->tick, hz, vi_tick, vi); in cxgbe_init_synchronized()
6813 callout_reset(&vi->tick, hz, cxgbe_tick, vi); in cxgbe_init_synchronized()
6814 mtx_unlock(&vi->tick_mtx); in cxgbe_init_synchronized()
6828 struct port_info *pi = vi->pi; in cxgbe_uninit_synchronized()
6829 struct adapter *sc = pi->adapter; in cxgbe_uninit_synchronized()
6830 if_t ifp = vi->ifp; in cxgbe_uninit_synchronized()
6836 if (!(vi->flags & VI_INIT_DONE)) { in cxgbe_uninit_synchronized()
6840 "vi->flags 0x%016lx, if_flags 0x%08x, " in cxgbe_uninit_synchronized()
6841 "if_drv_flags 0x%08x\n", vi->flags, if_getflags(ifp), in cxgbe_uninit_synchronized()
6854 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false); in cxgbe_uninit_synchronized()
6862 txq->eq.flags &= ~EQ_ENABLED; in cxgbe_uninit_synchronized()
6866 mtx_lock(&vi->tick_mtx); in cxgbe_uninit_synchronized()
6867 callout_stop(&vi->tick); in cxgbe_uninit_synchronized()
6868 mtx_unlock(&vi->tick_mtx); in cxgbe_uninit_synchronized()
6876 pi->up_vis--; in cxgbe_uninit_synchronized()
6877 if (pi->up_vis > 0) { in cxgbe_uninit_synchronized()
6882 pi->link_cfg.link_ok = false; in cxgbe_uninit_synchronized()
6883 pi->link_cfg.speed = 0; in cxgbe_uninit_synchronized()
6884 pi->link_cfg.link_down_rc = 255; in cxgbe_uninit_synchronized()
6893 * will walk the entire sc->irq list and clean up whatever is valid.
6903 struct sge *sge = &sc->sge; in t4_setup_intr_handlers()
6918 irq = &sc->irq[0]; in t4_setup_intr_handlers()
6919 rid = sc->intr_type == INTR_INTX ? 0 : 1; in t4_setup_intr_handlers()
6924 if (sc->flags & IS_VF) in t4_setup_intr_handlers()
6925 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports, in t4_setup_intr_handlers()
6928 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, in t4_setup_intr_handlers()
6932 if (!(sc->flags & IS_VF)) { in t4_setup_intr_handlers()
6941 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt"); in t4_setup_intr_handlers()
6948 pi = sc->port[p]; in t4_setup_intr_handlers()
6950 vi->first_intr = rid - 1; in t4_setup_intr_handlers()
6952 if (vi->nnmrxq > 0) { in t4_setup_intr_handlers()
6953 int n = max(vi->nrxq, vi->nnmrxq); in t4_setup_intr_handlers()
6955 rxq = &sge->rxq[vi->first_rxq]; in t4_setup_intr_handlers()
6957 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq]; in t4_setup_intr_handlers()
6962 if (q < vi->nrxq) in t4_setup_intr_handlers()
6963 irq->rxq = rxq++; in t4_setup_intr_handlers()
6965 if (q < vi->nnmrxq) in t4_setup_intr_handlers()
6966 irq->nm_rxq = nm_rxq++; in t4_setup_intr_handlers()
6968 if (irq->nm_rxq != NULL && in t4_setup_intr_handlers()
6969 irq->rxq == NULL) { in t4_setup_intr_handlers()
6972 t4_nm_intr, irq->nm_rxq, s); in t4_setup_intr_handlers()
6974 if (irq->nm_rxq != NULL && in t4_setup_intr_handlers()
6975 irq->rxq != NULL) { in t4_setup_intr_handlers()
6981 if (irq->rxq != NULL && in t4_setup_intr_handlers()
6982 irq->nm_rxq == NULL) { in t4_setup_intr_handlers()
6985 t4_intr, irq->rxq, s); in t4_setup_intr_handlers()
6990 if (q < vi->nrxq) { in t4_setup_intr_handlers()
6991 bus_bind_intr(sc->dev, irq->res, in t4_setup_intr_handlers()
6997 vi->nintr++; in t4_setup_intr_handlers()
7008 bus_bind_intr(sc->dev, irq->res, in t4_setup_intr_handlers()
7013 vi->nintr++; in t4_setup_intr_handlers()
7025 vi->nintr++; in t4_setup_intr_handlers()
7030 MPASS(irq == &sc->irq[sc->intr_count]); in t4_setup_intr_handlers()
7047 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]); in write_global_rss_key()
7049 t4_write_rss_key(sc, &rss_key[0], -1, 1); in write_global_rss_key()
7070 MPASS(sc->params.nports <= nitems(sc->tq)); in adapter_full_init()
7071 for (i = 0; i < sc->params.nports; i++) { in adapter_full_init()
7072 if (sc->tq[i] != NULL) in adapter_full_init()
7074 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, in adapter_full_init()
7075 taskqueue_thread_enqueue, &sc->tq[i]); in adapter_full_init()
7076 if (sc->tq[i] == NULL) { in adapter_full_init()
7080 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", in adapter_full_init()
7081 device_get_nameunit(sc->dev), i); in adapter_full_init()
7084 if (!(sc->flags & IS_VF)) { in adapter_full_init()
7098 KASSERT((sc->flags & FULL_INIT_DONE) == 0, in adapter_init()
7105 sc->flags |= FULL_INIT_DONE; in adapter_init()
7120 for (i = 0; i < nitems(sc->tq); i++) { in adapter_full_uninit()
7121 if (sc->tq[i] == NULL) in adapter_full_uninit()
7123 taskqueue_free(sc->tq[i]); in adapter_full_uninit()
7124 sc->tq[i] = NULL; in adapter_full_uninit()
7127 sc->flags &= ~FULL_INIT_DONE; in adapter_full_uninit()
7172 * enabling any 4-tuple hash is nonsense configuration. in hashen_to_hashconfig()
7201 struct adapter *sc = vi->adapter; in vi_full_init()
7222 if (vi->nrxq > vi->rss_size) { in vi_full_init()
7224 "some queues will never receive traffic.\n", vi->nrxq, in vi_full_init()
7225 vi->rss_size); in vi_full_init()
7226 } else if (vi->rss_size % vi->nrxq) { in vi_full_init()
7228 "expect uneven traffic distribution.\n", vi->nrxq, in vi_full_init()
7229 vi->rss_size); in vi_full_init()
7232 if (vi->nrxq != nbuckets) { in vi_full_init()
7234 "performance will be impacted.\n", vi->nrxq, nbuckets); in vi_full_init()
7237 if (vi->rss == NULL) in vi_full_init()
7238 vi->rss = malloc(vi->rss_size * sizeof (*vi->rss), M_CXGBE, in vi_full_init()
7240 for (i = 0; i < vi->rss_size;) { in vi_full_init()
7243 j %= vi->nrxq; in vi_full_init()
7244 rxq = &sc->sge.rxq[vi->first_rxq + j]; in vi_full_init()
7245 vi->rss[i++] = rxq->iq.abs_id; in vi_full_init()
7248 vi->rss[i++] = rxq->iq.abs_id; in vi_full_init()
7249 if (i == vi->rss_size) in vi_full_init()
7255 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, in vi_full_init()
7256 vi->rss, vi->rss_size); in vi_full_init()
7263 vi->hashen = hashconfig_to_hashen(hashconfig); in vi_full_init()
7270 extra = hashen_to_hashconfig(vi->hashen) ^ hashconfig; in vi_full_init()
7287 CH_ALERT(vi, "IPv4 2-tuple hashing forced on.\n"); in vi_full_init()
7289 CH_ALERT(vi, "TCP/IPv4 4-tuple hashing forced on.\n"); in vi_full_init()
7291 CH_ALERT(vi, "IPv6 2-tuple hashing forced on.\n"); in vi_full_init()
7293 CH_ALERT(vi, "TCP/IPv6 4-tuple hashing forced on.\n"); in vi_full_init()
7295 CH_ALERT(vi, "UDP/IPv4 4-tuple hashing forced on.\n"); in vi_full_init()
7297 CH_ALERT(vi, "UDP/IPv6 4-tuple hashing forced on.\n"); in vi_full_init()
7299 vi->hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | in vi_full_init()
7304 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, vi->rss[0], in vi_full_init()
7319 ASSERT_SYNCHRONIZED_OP(vi->adapter); in vi_init()
7320 KASSERT((vi->flags & VI_INIT_DONE) == 0, in vi_init()
7327 vi->flags |= VI_INIT_DONE; in vi_init()
7339 if (vi->flags & VI_INIT_DONE) { in vi_full_uninit()
7341 free(vi->rss, M_CXGBE); in vi_full_uninit()
7342 free(vi->nm_rss, M_CXGBE); in vi_full_uninit()
7346 vi->flags &= ~VI_INIT_DONE; in vi_full_uninit()
7352 struct sge_eq *eq = &txq->eq; in quiesce_txq()
7353 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; in quiesce_txq()
7355 MPASS(eq->flags & EQ_SW_ALLOCATED); in quiesce_txq()
7356 MPASS(!(eq->flags & EQ_ENABLED)); in quiesce_txq()
7359 while (!mp_ring_is_idle(txq->r)) { in quiesce_txq()
7360 mp_ring_check_drainage(txq->r, 4096); in quiesce_txq()
7363 MPASS(txq->txp.npkt == 0); in quiesce_txq()
7365 if (eq->flags & EQ_HW_ALLOCATED) { in quiesce_txq()
7371 while (spg->cidx != htobe16(eq->pidx)) in quiesce_txq()
7373 while (eq->cidx != eq->pidx) in quiesce_txq()
7381 while (eq->cidx != eq->pidx) { in quiesce_txq()
7385 txsd = &txq->sdesc[eq->cidx]; in quiesce_txq()
7386 for (m = txsd->m; m != NULL; m = nextpkt) { in quiesce_txq()
7387 nextpkt = m->m_nextpkt; in quiesce_txq()
7388 m->m_nextpkt = NULL; in quiesce_txq()
7391 IDXINCR(eq->cidx, txsd->desc_used, eq->sidx); in quiesce_txq()
7393 spg->pidx = spg->cidx = htobe16(eq->cidx); in quiesce_txq()
7404 while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL) { in quiesce_wrq()
7405 STAILQ_REMOVE_HEAD(&wrq->wr_list, link); in quiesce_wrq()
7407 wrq->nwr_pending--; in quiesce_wrq()
7408 wrq->ndesc_needed -= howmany(wr->wr_len, EQ_ESIZE); in quiesce_wrq()
7412 MPASS(wrq->nwr_pending == 0); in quiesce_wrq()
7413 MPASS(wrq->ndesc_needed == 0); in quiesce_wrq()
7414 wrq->nwr_pending = 0; in quiesce_wrq()
7415 wrq->ndesc_needed = 0; in quiesce_wrq()
7423 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) in quiesce_iq_fl()
7427 MPASS(iq->flags & IQ_HAS_FL); in quiesce_iq_fl()
7429 mtx_lock(&sc->sfl_lock); in quiesce_iq_fl()
7431 fl->flags |= FL_DOOMED; in quiesce_iq_fl()
7433 callout_stop(&sc->sfl_callout); in quiesce_iq_fl()
7434 mtx_unlock(&sc->sfl_lock); in quiesce_iq_fl()
7436 KASSERT((fl->flags & FL_STARVING) == 0, in quiesce_iq_fl()
7440 if (!(iq->flags & IQ_HW_ALLOCATED)) in quiesce_iq_fl()
7454 struct adapter *sc = vi->adapter; in quiesce_vi()
7464 if (!(vi->flags & VI_INIT_DONE)) in quiesce_vi()
7473 quiesce_wrq(&ofld_txq->wrq); in quiesce_vi()
7478 quiesce_iq_fl(sc, &rxq->iq, &rxq->fl); in quiesce_vi()
7483 quiesce_iq_fl(sc, &ofld_rxq->iq, &ofld_rxq->fl); in quiesce_vi()
7494 irq->rid = rid; in t4_alloc_irq()
7495 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, in t4_alloc_irq()
7497 if (irq->res == NULL) { in t4_alloc_irq()
7498 device_printf(sc->dev, in t4_alloc_irq()
7503 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, in t4_alloc_irq()
7504 NULL, handler, arg, &irq->tag); in t4_alloc_irq()
7506 device_printf(sc->dev, in t4_alloc_irq()
7510 bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name); in t4_alloc_irq()
7518 if (irq->tag) in t4_free_irq()
7519 bus_teardown_intr(sc->dev, irq->res, irq->tag); in t4_free_irq()
7520 if (irq->res) in t4_free_irq()
7521 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); in t4_free_irq()
7532 regs->version = chip_id(sc) | chip_rev(sc) << 10; in get_regs()
7533 t4_get_regs(sc, buf, regs->len); in get_regs()
7560 if (sc->flags & IS_VF) { in read_vf_stat()
7564 mtx_assert(&sc->reg_lock, MA_OWNED); in read_vf_stat()
7580 if (!(sc->flags & IS_VF)) in t4_get_vi_stats()
7581 mtx_lock(&sc->reg_lock); in t4_get_vi_stats()
7582 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES); in t4_get_vi_stats()
7583 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); in t4_get_vi_stats()
7584 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES); in t4_get_vi_stats()
7585 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); in t4_get_vi_stats()
7586 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES); in t4_get_vi_stats()
7587 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); in t4_get_vi_stats()
7588 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES); in t4_get_vi_stats()
7589 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES); in t4_get_vi_stats()
7590 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES); in t4_get_vi_stats()
7591 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES); in t4_get_vi_stats()
7592 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); in t4_get_vi_stats()
7593 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES); in t4_get_vi_stats()
7594 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); in t4_get_vi_stats()
7595 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES); in t4_get_vi_stats()
7596 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); in t4_get_vi_stats()
7597 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES); in t4_get_vi_stats()
7598 if (!(sc->flags & IS_VF)) in t4_get_vi_stats()
7599 mtx_unlock(&sc->reg_lock); in t4_get_vi_stats()
7622 mtx_assert(&vi->tick_mtx, MA_OWNED); in vi_refresh_stats()
7624 if (vi->flags & VI_SKIP_STATS) in vi_refresh_stats()
7629 if (timevalcmp(&tv, &vi->last_refreshed, <)) in vi_refresh_stats()
7632 t4_get_vi_stats(vi->adapter, vi->vin, &vi->stats); in vi_refresh_stats()
7633 getmicrotime(&vi->last_refreshed); in vi_refresh_stats()
7645 mtx_assert(&vi->tick_mtx, MA_OWNED); in cxgbe_refresh_stats()
7647 if (vi->flags & VI_SKIP_STATS) in cxgbe_refresh_stats()
7652 if (timevalcmp(&tv, &vi->last_refreshed, <)) in cxgbe_refresh_stats()
7655 pi = vi->pi; in cxgbe_refresh_stats()
7656 sc = vi->adapter; in cxgbe_refresh_stats()
7658 t4_get_port_stats(sc, pi->hw_port, &pi->stats); in cxgbe_refresh_stats()
7659 chan_map = pi->rx_e_chan_map; in cxgbe_refresh_stats()
7661 i = ffs(chan_map) - 1; in cxgbe_refresh_stats()
7662 mtx_lock(&sc->reg_lock); in cxgbe_refresh_stats()
7665 mtx_unlock(&sc->reg_lock); in cxgbe_refresh_stats()
7669 pi->tnl_cong_drops = tnl_cong_drops; in cxgbe_refresh_stats()
7670 getmicrotime(&vi->last_refreshed); in cxgbe_refresh_stats()
7679 mtx_assert(&vi->tick_mtx, MA_OWNED); in cxgbe_tick()
7682 callout_schedule(&vi->tick, hz); in cxgbe_tick()
7690 mtx_assert(&vi->tick_mtx, MA_OWNED); in vi_tick()
7693 callout_schedule(&vi->tick, hz); in vi_tick()
7740 sc->params.ncores, "# of active CIM cores"); in cim_sysctls()
7742 for (i = 0; i < sc->params.ncores; i++) { in cim_sysctls()
7798 MPASS(qcount <= sc->chip_params->cim_num_ibq); in cim_sysctls()
7829 MPASS(qcount <= sc->chip_params->cim_num_obq); in cim_sysctls()
7866 struct sysctl_ctx_list *ctx = &sc->ctx; in t4_sysctls()
7874 oid = device_get_sysctl_tree(sc->dev); in t4_sysctls()
7877 sc->sc_do_rxcopy = 1; in t4_sysctls()
7879 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); in t4_sysctls()
7882 sc->params.nports, "# of ports"); in t4_sysctls()
7886 (uintptr_t)&sc->doorbells, sysctl_bitfield_8b, "A", in t4_sysctls()
7890 sc->params.vpd.cclk, "core clock frequency (in KHz)"); in t4_sysctls()
7894 sc->params.sge.timer_val, sizeof(sc->params.sge.timer_val), in t4_sysctls()
7899 sc->params.sge.counter_val, sizeof(sc->params.sge.counter_val), in t4_sysctls()
7904 sc->lro_timeout = 100; in t4_sysctls()
7906 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); in t4_sysctls()
7909 &sc->debug_flags, 0, "flags to enable runtime debugging"); in t4_sysctls()
7912 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version"); in t4_sysctls()
7915 CTLFLAG_RD, sc->fw_version, 0, "firmware version"); in t4_sysctls()
7917 if (sc->flags & IS_VF) in t4_sysctls()
7924 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number"); in t4_sysctls()
7927 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number"); in t4_sysctls()
7930 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change"); in t4_sysctls()
7933 CTLFLAG_RD, sc->params.vpd.md, 0, "manufacturing diags version"); in t4_sysctls()
7936 CTLFLAG_RD, sc->params.vpd.na, 0, "network address"); in t4_sysctls()
7939 sc->er_version, 0, "expansion ROM version"); in t4_sysctls()
7942 sc->bs_version, 0, "bootstrap firmware version"); in t4_sysctls()
7945 NULL, sc->params.scfg_vers, "serial config version"); in t4_sysctls()
7948 NULL, sc->params.vpd_vers, "VPD version"); in t4_sysctls()
7951 CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); in t4_sysctls()
7954 sc->cfcsum, "config file checksum"); in t4_sysctls()
7959 (uintptr_t)&sc->name, sysctl_bitfield_16b, "A", \ in t4_sysctls()
7975 NULL, sc->tids.nftids, "number of filters"); in t4_sysctls()
7997 &sc->swintr, 0, "software triggered interrupts"); in t4_sysctls()
8023 sysctl_ddp_stats, "A", "non-TCP DDP statistics"); in t4_sysctls()
8030 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, -1, in t4_sysctls()
8129 CTLFLAG_RW, &sc->tlst.inline_keys, 0, "Always pass TLS " in t4_sysctls()
8135 CTLFLAG_RW, &sc->tlst.combo_wrs, 0, "Attempt to " in t4_sysctls()
8140 CTLFLAG_RW, &sc->tlst.short_records, 0, in t4_sysctls()
8141 "Use cipher-only mode for short records."); in t4_sysctls()
8143 CTLFLAG_RW, &sc->tlst.partial_ghash, 0, in t4_sysctls()
8144 "Use partial GHASH for AES-GCM records."); in t4_sysctls()
8161 sc->tt.cong_algorithm = -1; in t4_sysctls()
8163 CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control " in t4_sysctls()
8164 "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, " in t4_sysctls()
8167 sc->tt.sndbuf = -1; in t4_sysctls()
8169 &sc->tt.sndbuf, 0, "hardware send buffer"); in t4_sysctls()
8171 sc->tt.ddp = 0; in t4_sysctls()
8173 CTLFLAG_RW | CTLFLAG_SKIP, &sc->tt.ddp, 0, ""); in t4_sysctls()
8175 &sc->tt.ddp, 0, "Enable zero-copy aio_read(2)"); in t4_sysctls()
8177 sc->tt.rx_coalesce = -1; in t4_sysctls()
8179 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); in t4_sysctls()
8181 sc->tt.tls = 1; in t4_sysctls()
8186 sc->tt.tx_align = -1; in t4_sysctls()
8188 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); in t4_sysctls()
8190 sc->tt.tx_zcopy = 0; in t4_sysctls()
8192 CTLFLAG_RW, &sc->tt.tx_zcopy, 0, in t4_sysctls()
8193 "Enable zero-copy aio_write(2)"); in t4_sysctls()
8195 sc->tt.cop_managed_offloading = !!t4_cop_managed_offloading; in t4_sysctls()
8198 &sc->tt.cop_managed_offloading, 0, in t4_sysctls()
8201 sc->tt.autorcvbuf_inc = 16 * 1024; in t4_sysctls()
8203 CTLFLAG_RW, &sc->tt.autorcvbuf_inc, 0, in t4_sysctls()
8206 sc->tt.update_hc_on_pmtu_change = 1; in t4_sysctls()
8209 &sc->tt.update_hc_on_pmtu_change, 0, in t4_sysctls()
8212 sc->tt.iso = 1; in t4_sysctls()
8214 &sc->tt.iso, 0, "Enable iSCSI segmentation offload"); in t4_sysctls()
8304 struct sysctl_ctx_list *ctx = &vi->ctx; in vi_sysctls()
8311 oid = device_get_sysctl_tree(vi->dev); in vi_sysctls()
8315 vi->viid, "VI identifer"); in vi_sysctls()
8317 &vi->nrxq, 0, "# of rx queues"); in vi_sysctls()
8319 &vi->ntxq, 0, "# of tx queues"); in vi_sysctls()
8321 &vi->first_rxq, 0, "index of first rx queue"); in vi_sysctls()
8323 &vi->first_txq, 0, "index of first tx queue"); in vi_sysctls()
8325 vi->rss_base, "start of RSS indirection table"); in vi_sysctls()
8327 vi->rss_size, "size of RSS indirection table"); in vi_sysctls()
8333 "Reserve queue 0 for non-flowid packets"); in vi_sysctls()
8336 if (vi->adapter->flags & IS_VF) { in vi_sysctls()
8337 MPASS(vi->flags & TX_USES_VM_WR); in vi_sysctls()
8347 if (vi->nofldrxq != 0) { in vi_sysctls()
8349 &vi->nofldrxq, 0, in vi_sysctls()
8352 CTLFLAG_RD, &vi->first_ofld_rxq, 0, in vi_sysctls()
8365 if (vi->nofldtxq != 0) { in vi_sysctls()
8367 &vi->nofldtxq, 0, in vi_sysctls()
8370 CTLFLAG_RD, &vi->first_ofld_txq, 0, in vi_sysctls()
8375 if (vi->nnmrxq != 0) { in vi_sysctls()
8377 &vi->nnmrxq, 0, "# of netmap rx queues"); in vi_sysctls()
8379 &vi->nnmtxq, 0, "# of netmap tx queues"); in vi_sysctls()
8381 CTLFLAG_RD, &vi->first_nm_rxq, 0, in vi_sysctls()
8384 CTLFLAG_RD, &vi->first_nm_txq, 0, in vi_sysctls()
8407 struct sysctl_ctx_list *ctx = &pi->ctx; in cxgbe_sysctls()
8410 struct adapter *sc = pi->adapter; in cxgbe_sysctls()
8418 oid = device_get_sysctl_tree(pi->dev); in cxgbe_sysctls()
8424 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { in cxgbe_sysctls()
8450 "autonegotiation (-1 = not supported)"); in cxgbe_sysctls()
8456 &pi->link_cfg.requested_caps, 0, "L1 config requested by driver"); in cxgbe_sysctls()
8458 &pi->link_cfg.pcaps, 0, "port capabilities"); in cxgbe_sysctls()
8460 &pi->link_cfg.acaps, 0, "advertised capabilities"); in cxgbe_sysctls()
8462 &pi->link_cfg.lpacaps, 0, "link partner advertised capabilities"); in cxgbe_sysctls()
8467 pi->mps_bg_map, "MPS buffer group map"); in cxgbe_sysctls()
8469 NULL, pi->rx_e_chan_map, "TP rx e-channel map"); in cxgbe_sysctls()
8471 pi->tx_chan, "TP tx c-channel"); in cxgbe_sysctls()
8473 pi->rx_chan, "TP rx c-channel"); in cxgbe_sysctls()
8475 if (sc->flags & IS_VF) in cxgbe_sysctls()
8486 CTLFLAG_RW, &pi->sched_params->pktsize, 0, in cxgbe_sysctls()
8487 "pktsize for per-flow cl-rl (0 means up to the driver )"); in cxgbe_sysctls()
8489 CTLFLAG_RW, &pi->sched_params->burstsize, 0, in cxgbe_sysctls()
8490 "burstsize for per-flow cl-rl (0 means up to the driver)"); in cxgbe_sysctls()
8491 for (i = 0; i < sc->params.nsched_cls; i++) { in cxgbe_sysctls()
8492 struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i]; in cxgbe_sysctls()
8499 CTLFLAG_RD, &tc->state, 0, "current state"); in cxgbe_sysctls()
8502 (uintptr_t)&tc->flags, sysctl_bitfield_8b, "A", "flags"); in cxgbe_sysctls()
8504 CTLFLAG_RD, &tc->refcount, 0, "references to this class"); in cxgbe_sysctls()
8505 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params", in cxgbe_sysctls()
8507 (pi->port_id << 16) | i, sysctl_tc_params, "A", in cxgbe_sysctls()
8518 &pi->tx_parse_error, 0, in cxgbe_sysctls()
8522 if (sc->params.tp.lb_mode) { \ in cxgbe_sysctls()
8530 t4_port_reg(sc, pi->tx_chan, A_MPS_PORT_STAT_##stat##_L), \ in cxgbe_sysctls()
8569 CTLFLAG_RD, &pi->stats.rx_fcs_err, in cxgbe_sysctls()
8602 if (pi->mps_bg_map & 1) { in cxgbe_sysctls()
8604 "# drops due to buffer-group 0 overflows"); in cxgbe_sysctls()
8606 "# of buffer-group 0 truncated packets"); in cxgbe_sysctls()
8608 if (pi->mps_bg_map & 2) { in cxgbe_sysctls()
8610 "# drops due to buffer-group 1 overflows"); in cxgbe_sysctls()
8612 "# of buffer-group 1 truncated packets"); in cxgbe_sysctls()
8614 if (pi->mps_bg_map & 4) { in cxgbe_sysctls()
8616 "# drops due to buffer-group 2 overflows"); in cxgbe_sysctls()
8618 "# of buffer-group 2 truncated packets"); in cxgbe_sysctls()
8620 if (pi->mps_bg_map & 8) { in cxgbe_sysctls()
8622 "# drops due to buffer-group 3 overflows"); in cxgbe_sysctls()
8624 "# of buffer-group 3 truncated packets"); in cxgbe_sysctls()
8636 for (i = arg1; arg2; arg2 -= sizeof(int), i++) { in sysctl_int_array()
8686 struct adapter *sc = pi->adapter; in sysctl_btphy()
8690 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt"); in sysctl_btphy()
8697 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, in sysctl_btphy()
8716 val = vi->rsrv_noflowq; in sysctl_noflowq()
8718 if (rc != 0 || req->newptr == NULL) in sysctl_noflowq()
8721 if ((val >= 1) && (vi->ntxq > 1)) in sysctl_noflowq()
8722 vi->rsrv_noflowq = 1; in sysctl_noflowq()
8724 vi->rsrv_noflowq = 0; in sysctl_noflowq()
8733 struct adapter *sc = vi->adapter; in sysctl_tx_vm_wr()
8736 MPASS(!(sc->flags & IS_VF)); in sysctl_tx_vm_wr()
8738 val = vi->flags & TX_USES_VM_WR ? 1 : 0; in sysctl_tx_vm_wr()
8740 if (rc != 0 || req->newptr == NULL) in sysctl_tx_vm_wr()
8752 else if (if_getdrvflags(vi->ifp) & IFF_DRV_RUNNING) { in sysctl_tx_vm_wr()
8760 struct port_info *pi = vi->pi; in sysctl_tx_vm_wr()
8763 uint8_t npkt = sc->params.max_pkts_per_eth_tx_pkts_wr; in sysctl_tx_vm_wr()
8766 vi->flags |= TX_USES_VM_WR; in sysctl_tx_vm_wr()
8767 if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_VM_TSO); in sysctl_tx_vm_wr()
8769 V_TXPKT_INTF(pi->hw_port)); in sysctl_tx_vm_wr()
8770 if (!(sc->flags & IS_VF)) in sysctl_tx_vm_wr()
8771 npkt--; in sysctl_tx_vm_wr()
8773 vi->flags &= ~TX_USES_VM_WR; in sysctl_tx_vm_wr()
8774 if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_TSO); in sysctl_tx_vm_wr()
8776 V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) | in sysctl_tx_vm_wr()
8777 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); in sysctl_tx_vm_wr()
8780 txq->cpl_ctrl0 = ctrl0; in sysctl_tx_vm_wr()
8781 txq->txp.max_npkt = npkt; in sysctl_tx_vm_wr()
8792 struct adapter *sc = vi->adapter; in sysctl_holdoff_tmr_idx()
8797 idx = vi->tmr_idx; in sysctl_holdoff_tmr_idx()
8800 if (rc != 0 || req->newptr == NULL) in sysctl_holdoff_tmr_idx()
8811 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1); in sysctl_holdoff_tmr_idx()
8814 atomic_store_rel_8(&rxq->iq.intr_params, v); in sysctl_holdoff_tmr_idx()
8816 rxq->iq.intr_params = v; in sysctl_holdoff_tmr_idx()
8819 vi->tmr_idx = idx; in sysctl_holdoff_tmr_idx()
8829 struct adapter *sc = vi->adapter; in sysctl_holdoff_pktc_idx()
8832 idx = vi->pktc_idx; in sysctl_holdoff_pktc_idx()
8835 if (rc != 0 || req->newptr == NULL) in sysctl_holdoff_pktc_idx()
8838 if (idx < -1 || idx >= SGE_NCOUNTERS) in sysctl_holdoff_pktc_idx()
8846 if (vi->flags & VI_INIT_DONE) in sysctl_holdoff_pktc_idx()
8849 vi->pktc_idx = idx; in sysctl_holdoff_pktc_idx()
8859 struct adapter *sc = vi->adapter; in sysctl_qsize_rxq()
8862 qsize = vi->qsize_rxq; in sysctl_qsize_rxq()
8865 if (rc != 0 || req->newptr == NULL) in sysctl_qsize_rxq()
8876 if (vi->flags & VI_INIT_DONE) in sysctl_qsize_rxq()
8879 vi->qsize_rxq = qsize; in sysctl_qsize_rxq()
8889 struct adapter *sc = vi->adapter; in sysctl_qsize_txq()
8892 qsize = vi->qsize_txq; in sysctl_qsize_txq()
8895 if (rc != 0 || req->newptr == NULL) in sysctl_qsize_txq()
8906 if (vi->flags & VI_INIT_DONE) in sysctl_qsize_txq()
8909 vi->qsize_txq = qsize; in sysctl_qsize_txq()
8919 struct adapter *sc = pi->adapter; in sysctl_pause_settings()
8920 struct link_config *lc = &pi->link_cfg; in sysctl_pause_settings()
8923 if (req->newptr == NULL) { in sysctl_pause_settings()
8931 if (lc->link_ok) { in sysctl_pause_settings()
8932 sbuf_printf(sb, "%b", (lc->fc & (PAUSE_TX | PAUSE_RX)) | in sysctl_pause_settings()
8933 (lc->requested_fc & PAUSE_AUTONEG), bits); in sysctl_pause_settings()
8935 sbuf_printf(sb, "%b", lc->requested_fc & (PAUSE_TX | in sysctl_pause_settings()
8944 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX | in sysctl_pause_settings()
8956 n = s[0] - '0'; in sysctl_pause_settings()
8960 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, in sysctl_pause_settings()
8966 lc->requested_fc = n; in sysctl_pause_settings()
8968 if (pi->up_vis > 0) in sysctl_pause_settings()
8983 struct link_config *lc = &pi->link_cfg; in sysctl_link_fec()
8990 if (lc->link_ok) in sysctl_link_fec()
8991 sbuf_printf(sb, "%b", lc->fec, t4_fec_bits); in sysctl_link_fec()
9004 struct adapter *sc = pi->adapter; in sysctl_requested_fec()
9005 struct link_config *lc = &pi->link_cfg; in sysctl_requested_fec()
9009 if (req->newptr == NULL) { in sysctl_requested_fec()
9016 sbuf_printf(sb, "%b", lc->requested_fec, t4_fec_bits); in sysctl_requested_fec()
9024 lc->requested_fec == FEC_AUTO ? -1 : in sysctl_requested_fec()
9025 lc->requested_fec & (M_FW_PORT_CAP32_FEC | FEC_MODULE)); in sysctl_requested_fec()
9037 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, in sysctl_requested_fec()
9042 old = lc->requested_fec; in sysctl_requested_fec()
9044 lc->requested_fec = FEC_AUTO; in sysctl_requested_fec()
9046 lc->requested_fec = FEC_NONE; in sysctl_requested_fec()
9048 if ((lc->pcaps | in sysctl_requested_fec()
9050 lc->pcaps) { in sysctl_requested_fec()
9054 lc->requested_fec = n & (M_FW_PORT_CAP32_FEC | in sysctl_requested_fec()
9059 if (pi->up_vis > 0) { in sysctl_requested_fec()
9062 lc->requested_fec = old; in sysctl_requested_fec()
9080 struct adapter *sc = pi->adapter; in sysctl_module_fec()
9081 struct link_config *lc = &pi->link_cfg; in sysctl_module_fec()
9099 if (pi->up_vis == 0) { in sysctl_module_fec()
9109 fec = lc->fec_hint; in sysctl_module_fec()
9110 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE || in sysctl_module_fec()
9111 !fec_supported(lc->pcaps)) { in sysctl_module_fec()
9132 struct adapter *sc = pi->adapter; in sysctl_autoneg()
9133 struct link_config *lc = &pi->link_cfg; in sysctl_autoneg()
9136 if (lc->pcaps & FW_PORT_CAP32_ANEG) in sysctl_autoneg()
9137 val = lc->requested_aneg == AUTONEG_DISABLE ? 0 : 1; in sysctl_autoneg()
9139 val = -1; in sysctl_autoneg()
9141 if (rc != 0 || req->newptr == NULL) in sysctl_autoneg()
9150 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, in sysctl_autoneg()
9155 if (val == AUTONEG_ENABLE && !(lc->pcaps & FW_PORT_CAP32_ANEG)) { in sysctl_autoneg()
9159 lc->requested_aneg = val; in sysctl_autoneg()
9162 if (pi->up_vis > 0) in sysctl_autoneg()
9176 struct adapter *sc = pi->adapter; in sysctl_force_fec()
9177 struct link_config *lc = &pi->link_cfg; in sysctl_force_fec()
9180 val = lc->force_fec; in sysctl_force_fec()
9181 MPASS(val >= -1 && val <= 1); in sysctl_force_fec()
9183 if (rc != 0 || req->newptr == NULL) in sysctl_force_fec()
9185 if (!(lc->pcaps & FW_PORT_CAP32_FORCE_FEC)) in sysctl_force_fec()
9187 if (val < -1 || val > 1) in sysctl_force_fec()
9190 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4ff"); in sysctl_force_fec()
9194 lc->force_fec = val; in sysctl_force_fec()
9197 if (pi->up_vis > 0) in sysctl_force_fec()
9212 mtx_lock(&sc->reg_lock); in sysctl_handle_t4_reg64()
9219 mtx_unlock(&sc->reg_lock); in sysctl_handle_t4_reg64()
9229 struct adapter *sc = pi->adapter; in sysctl_handle_t4_portstat64()
9233 mtx_lock(&sc->reg_lock); in sysctl_handle_t4_portstat64()
9238 for (i = 0; i < sc->params.tp.lb_nchan; i++) { in sysctl_handle_t4_portstat64()
9240 t4_port_reg(sc, pi->tx_chan + i, reg)); in sysctl_handle_t4_portstat64()
9244 mtx_unlock(&sc->reg_lock); in sysctl_handle_t4_portstat64()
9266 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in sysctl_temperature()
9272 /* unknown is returned as 0 but we display -1 in that case */ in sysctl_temperature()
9273 t = val == 0 ? -1 : val; in sysctl_temperature()
9286 if (sc->params.core_vdd == 0) { in sysctl_vdd()
9297 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, in sysctl_vdd()
9303 sc->params.core_vdd = val; in sysctl_vdd()
9306 return (sysctl_handle_int(oidp, &sc->params.core_vdd, 0, req)); in sysctl_vdd()
9316 v = sc->sensor_resets; in sysctl_reset_sensor()
9318 if (rc != 0 || req->newptr == NULL || v <= 0) in sysctl_reset_sensor()
9321 if (sc->params.fw_vers < FW_VERSION32(1, 24, 7, 0) || in sysctl_reset_sensor()
9335 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in sysctl_reset_sensor()
9339 sc->sensor_resets++; in sysctl_reset_sensor()
9352 KASSERT(coreid < sc->params.ncores, in sysctl_loadavg()
9364 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in sysctl_loadavg()
9404 mtx_lock(&sc->reg_lock); in sysctl_cctrl()
9409 mtx_unlock(&sc->reg_lock); in sysctl_cctrl()
9420 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); in sysctl_cctrl()
9440 KASSERT(qid >= 0 && qid < sc->chip_params->cim_num_ibq, in sysctl_cim_ibq()
9442 KASSERT(coreid >= 0 && coreid < sc->params.ncores, in sysctl_cim_ibq()
9447 mtx_lock(&sc->reg_lock); in sysctl_cim_ibq()
9449 rc = -ENXIO; in sysctl_cim_ibq()
9452 mtx_unlock(&sc->reg_lock); in sysctl_cim_ibq()
9454 rc = -rc; in sysctl_cim_ibq()
9485 KASSERT(qid >= 0 && qid < sc->chip_params->cim_num_obq, in sysctl_cim_obq()
9487 KASSERT(coreid >= 0 && coreid < sc->params.ncores, in sysctl_cim_obq()
9492 mtx_lock(&sc->reg_lock); in sysctl_cim_obq()
9494 rc = -ENXIO; in sysctl_cim_obq()
9497 mtx_unlock(&sc->reg_lock); in sysctl_cim_obq()
9499 rc = -rc; in sysctl_cim_obq()
9532 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) { in sbuf_cim_la4()
9562 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) { in sbuf_cim_la6()
9592 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, in sbuf_cim_la()
9597 mtx_lock(&sc->reg_lock); in sbuf_cim_la()
9601 rc = -t4_cim_read_core(sc, 1, coreid, A_UP_UP_DBG_LA_CFG, 1, in sbuf_cim_la()
9604 rc = -t4_cim_read_la_core(sc, coreid, buf, NULL); in sbuf_cim_la()
9606 mtx_unlock(&sc->reg_lock); in sbuf_cim_la()
9640 device_get_nameunit(sc->dev), in dump_cim_regs()
9647 device_get_nameunit(sc->dev), in dump_cim_regs()
9663 device_get_nameunit(sc->dev)); in dump_cimla()
9671 device_get_nameunit(sc->dev), sbuf_data(&sb)); in dump_cimla()
9680 atomic_set_int(&sc->error_flags, ADAP_CIM_ERR); in t4_os_cim_err()
9700 mtx_lock(&sc->reg_lock); in sysctl_cim_ma_la()
9705 mtx_unlock(&sc->reg_lock); in sysctl_cim_ma_la()
9748 mtx_lock(&sc->reg_lock); in sysctl_cim_pif_la()
9753 mtx_unlock(&sc->reg_lock); in sysctl_cim_pif_la()
9791 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ in sysctl_cim_qcfg()
9792 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ in sysctl_cim_qcfg()
9793 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ in sysctl_cim_qcfg()
9798 cim_num_obq = sc->chip_params->cim_num_obq; in sysctl_cim_qcfg()
9808 mtx_lock(&sc->reg_lock); in sysctl_cim_qcfg()
9812 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); in sysctl_cim_qcfg()
9814 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, in sysctl_cim_qcfg()
9820 mtx_unlock(&sc->reg_lock); in sysctl_cim_qcfg()
9839 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), in sysctl_cim_qcfg()
9862 "TP0", "TP1", "TP2", "TP3", "ULP", "SGE0", "SGE1", "NC-SI", in sysctl_cim_qcfg_t7()
9866 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", "SGE0-RX", in sysctl_cim_qcfg_t7()
9875 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "RSVD", "SGE0-RX", in sysctl_cim_qcfg_t7()
9882 mtx_lock(&sc->reg_lock); in sysctl_cim_qcfg_t7()
9886 rc = -t4_cim_read_core(sc, 1, coreid, in sysctl_cim_qcfg_t7()
9891 rc = -t4_cim_read_core(sc, 1, coreid, in sysctl_cim_qcfg_t7()
9899 rc = -t4_cim_read_core(sc, 1, coreid, addr, 1, in sysctl_cim_qcfg_t7()
9907 mtx_unlock(&sc->reg_lock); in sysctl_cim_qcfg_t7()
9934 coreid == 0 ? qname_obq_t7[i - CIM_NUM_IBQ_T7] : in sysctl_cim_qcfg_t7()
9935 qname_obq_sec_t7[i - CIM_NUM_IBQ_T7], in sysctl_cim_qcfg_t7()
9959 mtx_lock(&sc->reg_lock); in sysctl_cpl_stats()
9964 mtx_unlock(&sc->reg_lock); in sysctl_cpl_stats()
9968 if (sc->chip_params->nchan > 2) { in sysctl_cpl_stats()
10002 mtx_lock(&sc->reg_lock); in sysctl_ddp_stats()
10007 mtx_unlock(&sc->reg_lock); in sysctl_ddp_stats()
10032 mtx_lock(&sc->reg_lock); in sysctl_tid_stats()
10037 mtx_unlock(&sc->reg_lock); in sysctl_tid_stats()
10091 struct devlog_params *dparams = &sc->params.devlog; in sbuf_devlog()
10096 KASSERT(coreid >= 0 && coreid < sc->params.ncores, in sbuf_devlog()
10099 if (dparams->addr == 0) in sbuf_devlog()
10102 size = dparams->size / sc->params.ncores; in sbuf_devlog()
10103 addr = dparams->addr + coreid * size; in sbuf_devlog()
10110 mtx_lock(&sc->reg_lock); in sbuf_devlog()
10115 mtx_unlock(&sc->reg_lock); in sbuf_devlog()
10123 if (e->timestamp == 0) in sbuf_devlog()
10126 e->timestamp = be64toh(e->timestamp); in sbuf_devlog()
10127 e->seqno = be32toh(e->seqno); in sbuf_devlog()
10129 e->params[j] = be32toh(e->params[j]); in sbuf_devlog()
10131 if (e->timestamp < ftstamp) { in sbuf_devlog()
10132 ftstamp = e->timestamp; in sbuf_devlog()
10146 if (e->timestamp == 0) in sbuf_devlog()
10150 e->seqno, e->timestamp, in sbuf_devlog()
10151 (e->level < nitems(devlog_level_strings) ? in sbuf_devlog()
10152 devlog_level_strings[e->level] : "UNKNOWN"), in sbuf_devlog()
10153 (e->facility < nitems(devlog_facility_strings) ? in sbuf_devlog()
10154 devlog_facility_strings[e->facility] : "UNKNOWN")); in sbuf_devlog()
10155 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], in sbuf_devlog()
10156 e->params[2], e->params[3], e->params[4], in sbuf_devlog()
10157 e->params[5], e->params[6], e->params[7]); in sbuf_devlog()
10177 if (coreid == -1) { in sysctl_devlog()
10178 /* -1 means all cores */ in sysctl_devlog()
10179 for (i = rc = 0; i < sc->params.ncores && rc == 0; i++) { in sysctl_devlog()
10180 if (sc->params.ncores > 0) in sysctl_devlog()
10185 KASSERT(coreid >= 0 && coreid < sc->params.ncores, in sysctl_devlog()
10203 device_get_nameunit(sc->dev)); in dump_devlog()
10206 for (i = rc = 0; i < sc->params.ncores && rc == 0; i++) { in dump_devlog()
10207 if (sc->params.ncores > 0) in dump_devlog()
10214 device_get_nameunit(sc->dev), sbuf_data(&sb)); in dump_devlog()
10226 int i, nchan = sc->chip_params->nchan; in sysctl_fcoe_stats()
10229 mtx_lock(&sc->reg_lock); in sysctl_fcoe_stats()
10236 mtx_unlock(&sc->reg_lock); in sysctl_fcoe_stats()
10285 mtx_lock(&sc->reg_lock); in sysctl_hw_sched()
10287 mtx_unlock(&sc->reg_lock); in sysctl_hw_sched()
10295 mtx_unlock(&sc->reg_lock); in sysctl_hw_sched()
10302 sbuf_printf(sb, "\n %u %-5s %u ", i, in sysctl_hw_sched()
10331 uint64_t *p0, *p1; in sysctl_lb_stats() local
10350 for (i = 0; i < sc->chip_params->nchan; i += 2) { in sysctl_lb_stats()
10351 mtx_lock(&sc->reg_lock); in sysctl_lb_stats()
10358 mtx_unlock(&sc->reg_lock); in sysctl_lb_stats()
10362 p0 = &s[0].octets; in sysctl_lb_stats()
10368 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], in sysctl_lb_stats()
10369 *p0++, *p1++); in sysctl_lb_stats()
10384 struct link_config *lc = &pi->link_cfg; in sysctl_linkdnrc()
10391 if (lc->link_ok || lc->link_down_rc == 255) in sysctl_linkdnrc()
10394 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc)); in sysctl_linkdnrc()
10411 const uint64_t v1 = ((const struct mem_desc *)a)->base; in mem_desc_cmp()
10412 const uint64_t v2 = ((const struct mem_desc *)b)->base; in mem_desc_cmp()
10415 return (-1); in mem_desc_cmp()
10430 size = to - from + 1; in mem_region_show()
10435 sbuf_printf(sb, "%-18s 0x%012jx-0x%012jx [%ju]\n", name, in mem_region_show()
10438 sbuf_printf(sb, "%-18s 0x%08jx-0x%08jx [%ju]\n", name, in mem_region_show()
10461 "ULPTX state:", "RoCE RRQ region:", "On-chip queues:", in sysctl_meminfo()
10480 mtx_lock(&sc->reg_lock); in sysctl_meminfo()
10585 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); in sysctl_meminfo()
10586 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); in sysctl_meminfo()
10587 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); in sysctl_meminfo()
10588 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); in sysctl_meminfo()
10589 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); in sysctl_meminfo()
10590 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); in sysctl_meminfo()
10591 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); in sysctl_meminfo()
10592 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); in sysctl_meminfo()
10593 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); in sysctl_meminfo()
10596 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); in sysctl_meminfo()
10597 md->limit = md->base - 1 + in sysctl_meminfo()
10602 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); in sysctl_meminfo()
10603 md->limit = md->base - 1 + in sysctl_meminfo()
10610 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); in sysctl_meminfo()
10612 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR); in sysctl_meminfo()
10613 md->limit = 0; in sysctl_meminfo()
10615 md->base = 0; in sysctl_meminfo()
10616 md->idx = nitems(region); /* hide it */ in sysctl_meminfo()
10622 md->base = (uint64_t)t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT) << shift; \ in sysctl_meminfo()
10623 md->limit = (uint64_t)t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) << shift; \ in sysctl_meminfo()
10624 md->limit += (1 << shift) - 1; \ in sysctl_meminfo()
10629 md->base = 0; \ in sysctl_meminfo()
10630 md->idx = nitems(region); \ in sysctl_meminfo()
10667 md->base = 0; in sysctl_meminfo()
10669 md->idx = nitems(region); in sysctl_meminfo()
10682 md->base = t4_read_reg(sc, A_SGE_DBVFIFO_BADDR); in sysctl_meminfo()
10683 md->limit = md->base + size - 1; in sysctl_meminfo()
10685 md->idx = nitems(region); in sysctl_meminfo()
10689 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); in sysctl_meminfo()
10690 md->limit = 0; in sysctl_meminfo()
10692 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); in sysctl_meminfo()
10693 md->limit = 0; in sysctl_meminfo()
10698 md->base = lo; in sysctl_meminfo()
10700 md->base = 0; in sysctl_meminfo()
10701 md->idx = nitems(region); in sysctl_meminfo()
10705 md->base = sc->vres.ocq.start; in sysctl_meminfo()
10706 if (sc->vres.ocq.size) in sysctl_meminfo()
10707 md->limit = md->base + sc->vres.ocq.size - 1; in sysctl_meminfo()
10709 md->idx = nitems(region); /* hide it */ in sysctl_meminfo()
10712 /* add any address-space holes, there can be up to 3 */ in sysctl_meminfo()
10713 for (n = 0; n < i - 1; n++) in sysctl_meminfo()
10715 (md++)->base = avail[n].limit; in sysctl_meminfo()
10717 (md++)->base = avail[n].limit; in sysctl_meminfo()
10719 n = md - mem; in sysctl_meminfo()
10725 avail[lo].limit - 1); in sysctl_meminfo()
10732 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; in sysctl_meminfo()
10738 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; in sysctl_meminfo()
10739 if (hi != lo - 1) { in sysctl_meminfo()
10745 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; in sysctl_meminfo()
10746 if (hi != lo - 1) in sysctl_meminfo()
10772 sbuf_printf(sb, "%u p-structs (%u free)\n", in sysctl_meminfo()
10792 for (i = 0; i < sc->chip_params->nchan; i++) { in sysctl_meminfo()
10810 mtx_unlock(&sc->reg_lock); in sysctl_meminfo()
10840 " VF Replication P0 P1 P2 P3 ML"); in sysctl_mps_tcam()
10842 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { in sysctl_mps_tcam()
10847 mtx_lock(&sc->reg_lock); in sysctl_mps_tcam()
10854 mtx_unlock(&sc->reg_lock); in sysctl_mps_tcam()
10860 mtx_lock(&sc->reg_lock); in sysctl_mps_tcam()
10867 mtx_unlock(&sc->reg_lock); in sysctl_mps_tcam()
10875 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); in sysctl_mps_tcam()
10897 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, in sysctl_mps_tcam()
10942 " P0 P1 P2 P3 ML"); in sysctl_mps_tcam_t6()
10945 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { in sysctl_mps_tcam_t6()
10956 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1); in sysctl_mps_tcam_t6()
10957 mtx_lock(&sc->reg_lock); in sysctl_mps_tcam_t6()
10967 mtx_unlock(&sc->reg_lock); in sysctl_mps_tcam_t6()
10987 mtx_lock(&sc->reg_lock); in sysctl_mps_tcam_t6()
10997 mtx_unlock(&sc->reg_lock); in sysctl_mps_tcam_t6()
11012 mtx_lock(&sc->reg_lock); in sysctl_mps_tcam_t6()
11019 mtx_unlock(&sc->reg_lock); in sysctl_mps_tcam_t6()
11025 "%012jx %06x %06x - - %3c" in sysctl_mps_tcam_t6()
11031 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); in sysctl_mps_tcam_t6()
11034 "%012jx - - ", i, addr[0], addr[1], in sysctl_mps_tcam_t6()
11041 sbuf_printf(sb, " - N "); in sysctl_mps_tcam_t6()
11043 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", in sysctl_mps_tcam_t6()
11047 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); in sysctl_mps_tcam_t6()
11071 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, in sysctl_mps_tcam_t6()
11122 " P0 P1 P2 P3 ML"); in sysctl_mps_tcam_t7()
11125 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { in sysctl_mps_tcam_t7()
11138 ctl |= V_CTLTCAMINDEX(i - 256) | V_T7_CTLTCAMSEL(1); in sysctl_mps_tcam_t7()
11146 ctl |= V_CTLTCAMINDEX(i - 512) | V_T7_CTLTCAMSEL(1); in sysctl_mps_tcam_t7()
11148 ctl |= V_CTLTCAMINDEX(i - 1024) | V_T7_CTLTCAMSEL(2); in sysctl_mps_tcam_t7()
11151 mtx_lock(&sc->reg_lock); in sysctl_mps_tcam_t7()
11161 mtx_unlock(&sc->reg_lock); in sysctl_mps_tcam_t7()
11181 mtx_lock(&sc->reg_lock); in sysctl_mps_tcam_t7()
11191 mtx_unlock(&sc->reg_lock); in sysctl_mps_tcam_t7()
11206 mtx_lock(&sc->reg_lock); in sysctl_mps_tcam_t7()
11220 mtx_unlock(&sc->reg_lock); in sysctl_mps_tcam_t7()
11226 "%012jx %06x %06x - - %3c" in sysctl_mps_tcam_t7()
11232 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); in sysctl_mps_tcam_t7()
11235 "%012jx - - ", i, addr[0], addr[1], in sysctl_mps_tcam_t7()
11242 sbuf_printf(sb, " - N "); in sysctl_mps_tcam_t7()
11244 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", in sysctl_mps_tcam_t7()
11248 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); in sysctl_mps_tcam_t7()
11271 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, in sysctl_mps_tcam_t7()
11315 mtx_lock(&sc->reg_lock); in sysctl_path_mtus()
11320 mtx_unlock(&sc->reg_lock); in sysctl_path_mtus()
11358 mtx_lock(&sc->reg_lock); in sysctl_pm_stats()
11367 mtx_unlock(&sc->reg_lock); in sysctl_pm_stats()
11377 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], in sysctl_pm_stats()
11383 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], in sysctl_pm_stats()
11390 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], in sysctl_pm_stats()
11392 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], in sysctl_pm_stats()
11400 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], in sysctl_pm_stats()
11402 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], in sysctl_pm_stats()
11409 sbuf_printf(sb, "%-40s %u\n", "ReqWrite", stats[i++]); in sysctl_pm_stats()
11410 sbuf_printf(sb, "%-40s %u\n", "ReqReadInv", stats[i++]); in sysctl_pm_stats()
11411 sbuf_printf(sb, "%-40s %u\n", "ReqReadNoInv", stats[i++]); in sysctl_pm_stats()
11412 sbuf_printf(sb, "%-40s %u\n", "Write Split Request", in sysctl_pm_stats()
11414 sbuf_printf(sb, "%-40s %u\n", in sysctl_pm_stats()
11416 sbuf_printf(sb, "%-40s %u\n", in sysctl_pm_stats()
11419 sbuf_printf(sb, "%-40s %u\n", "Write Hit", stats[i++]); in sysctl_pm_stats()
11420 sbuf_printf(sb, "%-40s %u\n", "Normal Read Hit", in sysctl_pm_stats()
11422 sbuf_printf(sb, "%-40s %u\n", "Feedback Read Hit", in sysctl_pm_stats()
11424 sbuf_printf(sb, "%-40s %u\n", "Normal Read Hit Full Avail", in sysctl_pm_stats()
11426 sbuf_printf(sb, "%-40s %u\n", "Normal Read Hit Full UnAvail", in sysctl_pm_stats()
11428 sbuf_printf(sb, "%-40s %u\n", in sysctl_pm_stats()
11431 sbuf_printf(sb, "%-40s %u\n", "FB Read Hit Full Avail", in sysctl_pm_stats()
11433 sbuf_printf(sb, "%-40s %u\n", "FB Read Hit Full UnAvail", in sysctl_pm_stats()
11435 sbuf_printf(sb, "%-40s %u\n", "FB Read Hit Partial Avail", in sysctl_pm_stats()
11437 sbuf_printf(sb, "%-40s %u\n", "Normal Read Full Free", in sysctl_pm_stats()
11439 sbuf_printf(sb, "%-40s %u\n", in sysctl_pm_stats()
11440 "Normal Read Part-avail Mul-Regions", in sysctl_pm_stats()
11442 sbuf_printf(sb, "%-40s %u\n", in sysctl_pm_stats()
11443 "FB Read Part-avail Mul-Regions", in sysctl_pm_stats()
11445 sbuf_printf(sb, "%-40s %u\n", "Write Miss FL Used", in sysctl_pm_stats()
11447 sbuf_printf(sb, "%-40s %u\n", "Write Miss LRU Used", in sysctl_pm_stats()
11449 sbuf_printf(sb, "%-40s %u\n", in sysctl_pm_stats()
11450 "Write Miss LRU-Multiple Evict", stats[i++]); in sysctl_pm_stats()
11451 sbuf_printf(sb, "%-40s %u\n", in sysctl_pm_stats()
11453 sbuf_printf(sb, "%-40s %u\n", in sysctl_pm_stats()
11455 sbuf_printf(sb, "%-40s %u\n", "Write Overflow Eviction", in sysctl_pm_stats()
11457 sbuf_printf(sb, "%-40s %u", "Read Overflow Eviction", in sysctl_pm_stats()
11476 mtx_lock(&sc->reg_lock); in sysctl_rdma_stats()
11481 mtx_unlock(&sc->reg_lock); in sysctl_rdma_stats()
11507 mtx_lock(&sc->reg_lock); in sysctl_tcp_stats()
11512 mtx_unlock(&sc->reg_lock); in sysctl_tcp_stats()
11544 struct tid_info *t = &sc->tids; in sysctl_tids()
11551 if (t->natids) { in sysctl_tids()
11552 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, in sysctl_tids()
11553 t->atids_in_use); in sysctl_tids()
11556 if (t->nhpftids) { in sysctl_tids()
11557 sbuf_printf(sb, "HPFTID range: %u-%u, in use: %u\n", in sysctl_tids()
11558 t->hpftid_base, t->hpftid_end, t->hpftids_in_use); in sysctl_tids()
11561 if (t->ntids) { in sysctl_tids()
11564 mtx_lock(&sc->reg_lock); in sysctl_tids()
11577 mtx_unlock(&sc->reg_lock); in sysctl_tids()
11584 sbuf_printf(sb, "%u-%u, ", t->tid_base, x - 1); in sysctl_tids()
11585 sbuf_printf(sb, "%u-%u", y, t->ntids - 1); in sysctl_tids()
11587 sbuf_printf(sb, "%u-%u", t->tid_base, t->tid_base + in sysctl_tids()
11588 t->ntids - 1); in sysctl_tids()
11591 atomic_load_acq_int(&t->tids_in_use)); in sysctl_tids()
11594 if (t->nstids) { in sysctl_tids()
11595 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, in sysctl_tids()
11596 t->stid_base + t->nstids - 1, t->stids_in_use); in sysctl_tids()
11599 if (t->nftids) { in sysctl_tids()
11600 sbuf_printf(sb, "FTID range: %u-%u, in use: %u\n", t->ftid_base, in sysctl_tids()
11601 t->ftid_end, t->ftids_in_use); in sysctl_tids()
11604 if (t->netids) { in sysctl_tids()
11605 sbuf_printf(sb, "ETID range: %u-%u, in use: %u\n", t->etid_base, in sysctl_tids()
11606 t->etid_base + t->netids - 1, t->etids_in_use); in sysctl_tids()
11609 mtx_lock(&sc->reg_lock); in sysctl_tids()
11616 mtx_unlock(&sc->reg_lock); in sysctl_tids()
11639 mtx_lock(&sc->reg_lock); in sysctl_tp_err_stats()
11644 mtx_unlock(&sc->reg_lock); in sysctl_tp_err_stats()
11652 if (sc->chip_params->nchan > 2) { in sysctl_tp_err_stats()
11717 mtx_lock(&sc->reg_lock); in sysctl_tnl_stats()
11722 mtx_unlock(&sc->reg_lock); in sysctl_tnl_stats()
11730 if (sc->chip_params->nchan > 2) { in sysctl_tnl_stats()
11757 struct tp_params *tpp = &sc->params.tp; in sysctl_tp_la_mask()
11761 mask = tpp->la_mask >> 16; in sysctl_tp_la_mask()
11763 if (rc != 0 || req->newptr == NULL) in sysctl_tp_la_mask()
11767 mtx_lock(&sc->reg_lock); in sysctl_tp_la_mask()
11771 tpp->la_mask = mask << 16; in sysctl_tp_la_mask()
11773 tpp->la_mask); in sysctl_tp_la_mask()
11775 mtx_unlock(&sc->reg_lock); in sysctl_tp_la_mask()
11792 while (f->name) { in field_desc_show()
11793 uint64_t mask = (1ULL << f->width) - 1; in field_desc_show()
11794 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, in field_desc_show()
11795 ((uintmax_t)v >> f->start) & mask); in field_desc_show()
11955 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) in tp_la_show2()
11966 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) in tp_la_show3()
11987 mtx_lock(&sc->reg_lock); in sysctl_tp_la()
12006 mtx_unlock(&sc->reg_lock); in sysctl_tp_la()
12029 mtx_lock(&sc->reg_lock); in sysctl_tx_rate()
12034 mtx_unlock(&sc->reg_lock); in sysctl_tx_rate()
12042 if (sc->chip_params->nchan > 2) { in sysctl_tx_rate()
12079 mtx_lock(&sc->reg_lock); in sysctl_ulprx_la()
12084 mtx_unlock(&sc->reg_lock); in sysctl_ulprx_la()
12113 mtx_lock(&sc->reg_lock); in sysctl_wcwr_stats()
12121 mtx_unlock(&sc->reg_lock); in sysctl_wcwr_stats()
12158 rc = bus_get_cpus(sc->dev, op, sizeof(cpuset), &cpuset); in sysctl_cpus()
12181 val = atomic_load_int(&sc->num_resets); in sysctl_reset()
12183 if (rc != 0 || req->newptr == NULL) in sysctl_reset()
12188 atomic_store_int(&sc->num_resets, 0); in sysctl_reset()
12198 taskqueue_enqueue(reset_tq, &sc->reset_task); in sysctl_reset()
12210 v = sc->tt.tls; in sysctl_tls()
12212 if (rc != 0 || req->newptr == NULL) in sysctl_tls()
12215 if (v != 0 && !(sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS)) in sysctl_tls()
12224 sc->tt.tls = !!v; in sysctl_tls()
12226 for_each_vi(sc->port[i], j, vi) { in sysctl_tls()
12227 if (vi->flags & VI_INIT_DONE) in sysctl_tls()
12228 t4_update_fl_bufsize(vi->ifp); in sysctl_tls()
12258 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; in sysctl_tp_tick()
12260 mtx_lock(&sc->reg_lock); in sysctl_tp_tick()
12262 res = (u_int)-1; in sysctl_tp_tick()
12265 mtx_unlock(&sc->reg_lock); in sysctl_tp_tick()
12266 if (res == (u_int)-1) in sysctl_tp_tick()
12297 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; in sysctl_tp_dack_timer()
12299 mtx_lock(&sc->reg_lock); in sysctl_tp_dack_timer()
12308 mtx_unlock(&sc->reg_lock); in sysctl_tp_dack_timer()
12324 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; in sysctl_tp_timer()
12331 mtx_lock(&sc->reg_lock); in sysctl_tp_timer()
12343 mtx_unlock(&sc->reg_lock); in sysctl_tp_timer()
12363 mtx_lock(&sc->reg_lock); in sysctl_tp_shift_cnt()
12370 mtx_unlock(&sc->reg_lock); in sysctl_tp_shift_cnt()
12388 mtx_lock(&sc->reg_lock); in sysctl_tp_backoff()
12395 mtx_unlock(&sc->reg_lock); in sysctl_tp_backoff()
12406 struct adapter *sc = vi->adapter; in sysctl_holdoff_tmr_idx_ofld()
12411 idx = vi->ofld_tmr_idx; in sysctl_holdoff_tmr_idx_ofld()
12414 if (rc != 0 || req->newptr == NULL) in sysctl_holdoff_tmr_idx_ofld()
12425 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1); in sysctl_holdoff_tmr_idx_ofld()
12428 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); in sysctl_holdoff_tmr_idx_ofld()
12430 ofld_rxq->iq.intr_params = v; in sysctl_holdoff_tmr_idx_ofld()
12433 vi->ofld_tmr_idx = idx; in sysctl_holdoff_tmr_idx_ofld()
12443 struct adapter *sc = vi->adapter; in sysctl_holdoff_pktc_idx_ofld()
12446 idx = vi->ofld_pktc_idx; in sysctl_holdoff_pktc_idx_ofld()
12449 if (rc != 0 || req->newptr == NULL) in sysctl_holdoff_pktc_idx_ofld()
12452 if (idx < -1 || idx >= SGE_NCOUNTERS) in sysctl_holdoff_pktc_idx_ofld()
12460 if (vi->flags & VI_INIT_DONE) in sysctl_holdoff_pktc_idx_ofld()
12463 vi->ofld_pktc_idx = idx; in sysctl_holdoff_pktc_idx_ofld()
12476 if (len < sc->chip_params->sge_ctxt_size) in get_sge_context()
12493 if (sc->flags & FW_OK) { in get_sge_context()
12494 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cid, mem_id, data); in get_sge_context()
12503 rc = -t4_sge_ctxt_rd_bd(sc, cid, mem_id, data); in get_sge_context()
12530 if (sc->flags & FULL_INIT_DONE && in load_fw()
12531 (sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) { in load_fw()
12536 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); in load_fw()
12538 rc = copyin(fw->data, fw_data, fw->len); in load_fw()
12540 rc = -t4_load_fw(sc, fw_data, fw->len); in load_fw()
12563 if (cfg->len == 0) { in load_cfg()
12565 rc = -t4_load_cfg(sc, NULL, 0); in load_cfg()
12569 cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK); in load_cfg()
12571 rc = copyin(cfg->data, cfg_data, cfg->len); in load_cfg()
12573 rc = -t4_load_cfg(sc, cfg_data, cfg->len); in load_cfg()
12588 if (br->len > 1024 * 1024) in load_boot()
12591 if (br->pf_offset == 0) { in load_boot()
12593 if (br->pfidx_addr > 7) in load_boot()
12595 offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr, in load_boot()
12597 } else if (br->pf_offset == 1) { in load_boot()
12599 offset = G_OFFSET(br->pfidx_addr); in load_boot()
12613 if (br->len == 0) { in load_boot()
12615 rc = -t4_load_boot(sc, NULL, offset, 0); in load_boot()
12619 br_data = malloc(br->len, M_CXGBE, M_WAITOK); in load_boot()
12621 rc = copyin(br->data, br_data, br->len); in load_boot()
12623 rc = -t4_load_boot(sc, br_data, offset, br->len); in load_boot()
12646 if (bc->len == 0) { in load_bootcfg()
12648 rc = -t4_load_bootcfg(sc, NULL, 0); in load_bootcfg()
12652 bc_data = malloc(bc->len, M_CXGBE, M_WAITOK); in load_bootcfg()
12654 rc = copyin(bc->data, bc_data, bc->len); in load_bootcfg()
12656 rc = -t4_load_bootcfg(sc, bc_data, bc->len); in load_bootcfg()
12672 buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO); in cudbg_dump()
12683 cudbg->adap = sc; in cudbg_dump()
12684 cudbg->print = (cudbg_print_cb)printf; in cudbg_dump()
12687 device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n", in cudbg_dump()
12688 __func__, dump->wr_flash, dump->len, dump->data); in cudbg_dump()
12691 if (dump->wr_flash) in cudbg_dump()
12692 cudbg->use_flash = 1; in cudbg_dump()
12693 MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap)); in cudbg_dump()
12694 memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap)); in cudbg_dump()
12696 rc = cudbg_collect(handle, buf, &dump->len); in cudbg_dump()
12700 rc = copyout(buf, dump->data, dump->len); in cudbg_dump()
12716 r = &op->rule[0]; in free_offload_policy()
12717 for (i = 0; i < op->nrules; i++, r++) { in free_offload_policy()
12718 free(r->bpf_prog.bf_insns, M_CXGBE); in free_offload_policy()
12720 free(op->rule, M_CXGBE); in free_offload_policy()
12737 if (uop->nrules == 0) { in set_offload_policy()
12741 } else if (uop->nrules > 256) { /* arbitrary */ in set_offload_policy()
12747 op->nrules = uop->nrules; in set_offload_policy()
12748 len = op->nrules * sizeof(struct offload_rule); in set_offload_policy()
12749 op->rule = malloc(len, M_CXGBE, M_ZERO | M_WAITOK); in set_offload_policy()
12750 rc = copyin(uop->rule, op->rule, len); in set_offload_policy()
12752 free(op->rule, M_CXGBE); in set_offload_policy()
12757 r = &op->rule[0]; in set_offload_policy()
12758 for (i = 0; i < op->nrules; i++, r++) { in set_offload_policy()
12761 if (r->open_type != OPEN_TYPE_LISTEN && in set_offload_policy()
12762 r->open_type != OPEN_TYPE_ACTIVE && in set_offload_policy()
12763 r->open_type != OPEN_TYPE_PASSIVE && in set_offload_policy()
12764 r->open_type != OPEN_TYPE_DONTCARE) { in set_offload_policy()
12771 op->nrules = i; in set_offload_policy()
12777 s = &r->settings; in set_offload_policy()
12778 if ((s->offload != 0 && s->offload != 1) || in set_offload_policy()
12779 s->cong_algo < -1 || s->cong_algo > CONG_ALG_HIGHSPEED || in set_offload_policy()
12780 s->sched_class < -1 || in set_offload_policy()
12781 s->sched_class >= sc->params.nsched_cls) { in set_offload_policy()
12786 bf = &r->bpf_prog; in set_offload_policy()
12787 u = bf->bf_insns; /* userspace ptr */ in set_offload_policy()
12788 bf->bf_insns = NULL; in set_offload_policy()
12789 if (bf->bf_len == 0) { in set_offload_policy()
12793 len = bf->bf_len * sizeof(*bf->bf_insns); in set_offload_policy()
12794 bf->bf_insns = malloc(len, M_CXGBE, M_ZERO | M_WAITOK); in set_offload_policy()
12795 rc = copyin(u, bf->bf_insns, len); in set_offload_policy()
12799 if (!bpf_validate(bf->bf_insns, bf->bf_len)) { in set_offload_policy()
12805 rw_wlock(&sc->policy_lock); in set_offload_policy()
12806 old = sc->policy; in set_offload_policy()
12807 sc->policy = op; in set_offload_policy()
12808 rw_wunlock(&sc->policy_lock); in set_offload_policy()
12823 mtx_lock(&sc->reg_lock); in read_card_mem()
12827 rc = validate_mem_range(sc, mr->addr, mr->len); in read_card_mem()
12828 mtx_unlock(&sc->reg_lock); in read_card_mem()
12832 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK); in read_card_mem()
12833 addr = mr->addr; in read_card_mem()
12834 remaining = mr->len; in read_card_mem()
12835 dst = (void *)mr->data; in read_card_mem()
12839 mtx_lock(&sc->reg_lock); in read_card_mem()
12844 mtx_unlock(&sc->reg_lock); in read_card_mem()
12853 remaining -= n; in read_card_mem()
12867 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) in read_i2c()
12870 if (i2cd->len > sizeof(i2cd->data)) in read_i2c()
12879 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, in read_i2c()
12880 i2cd->offset, i2cd->len, &i2cd->data[0]); in read_i2c()
12902 if (port_id >= sc->params.nports) in clear_stats()
12904 pi = sc->port[port_id]; in clear_stats()
12908 mtx_lock(&sc->reg_lock); in clear_stats()
12911 t4_clr_port_stats(sc, pi->hw_port); in clear_stats()
12913 if (pi->fcs_reg != -1) in clear_stats()
12914 pi->fcs_base = t4_read_reg64(sc, in clear_stats()
12915 t4_port_reg(sc, pi->tx_chan, pi->fcs_reg)); in clear_stats()
12917 pi->stats.rx_fcs_err = 0; in clear_stats()
12920 if (vi->flags & VI_INIT_DONE) in clear_stats()
12921 t4_clr_vi_stats(sc, vi->vin); in clear_stats()
12923 chan_map = pi->rx_e_chan_map; in clear_stats()
12926 i = ffs(chan_map) - 1; in clear_stats()
12932 mtx_unlock(&sc->reg_lock); in clear_stats()
12933 pi->tx_parse_error = 0; in clear_stats()
12934 pi->tnl_cong_drops = 0; in clear_stats()
12941 if (vi->flags & VI_INIT_DONE) { in clear_stats()
12945 rxq->lro.lro_queued = 0; in clear_stats()
12946 rxq->lro.lro_flushed = 0; in clear_stats()
12948 rxq->rxcsum = 0; in clear_stats()
12949 rxq->vlan_extraction = 0; in clear_stats()
12950 rxq->vxlan_rxcsum = 0; in clear_stats()
12952 rxq->fl.cl_allocated = 0; in clear_stats()
12953 rxq->fl.cl_recycled = 0; in clear_stats()
12954 rxq->fl.cl_fast_recycled = 0; in clear_stats()
12958 txq->txcsum = 0; in clear_stats()
12959 txq->tso_wrs = 0; in clear_stats()
12960 txq->vlan_insertion = 0; in clear_stats()
12961 txq->imm_wrs = 0; in clear_stats()
12962 txq->sgl_wrs = 0; in clear_stats()
12963 txq->txpkt_wrs = 0; in clear_stats()
12964 txq->txpkts0_wrs = 0; in clear_stats()
12965 txq->txpkts1_wrs = 0; in clear_stats()
12966 txq->txpkts0_pkts = 0; in clear_stats()
12967 txq->txpkts1_pkts = 0; in clear_stats()
12968 txq->txpkts_flush = 0; in clear_stats()
12969 txq->raw_wrs = 0; in clear_stats()
12970 txq->vxlan_tso_wrs = 0; in clear_stats()
12971 txq->vxlan_txcsum = 0; in clear_stats()
12972 txq->kern_tls_records = 0; in clear_stats()
12973 txq->kern_tls_short = 0; in clear_stats()
12974 txq->kern_tls_partial = 0; in clear_stats()
12975 txq->kern_tls_full = 0; in clear_stats()
12976 txq->kern_tls_octets = 0; in clear_stats()
12977 txq->kern_tls_waste = 0; in clear_stats()
12978 txq->kern_tls_header = 0; in clear_stats()
12979 txq->kern_tls_fin_short = 0; in clear_stats()
12980 txq->kern_tls_cbc = 0; in clear_stats()
12981 txq->kern_tls_gcm = 0; in clear_stats()
12983 txq->kern_tls_options = 0; in clear_stats()
12984 txq->kern_tls_fin = 0; in clear_stats()
12986 txq->kern_tls_ghash_received = 0; in clear_stats()
12987 txq->kern_tls_ghash_requested = 0; in clear_stats()
12988 txq->kern_tls_lso = 0; in clear_stats()
12989 txq->kern_tls_partial_ghash = 0; in clear_stats()
12990 txq->kern_tls_splitmode = 0; in clear_stats()
12991 txq->kern_tls_trailer = 0; in clear_stats()
12993 mp_ring_reset_stats(txq->r); in clear_stats()
12998 ofld_txq->wrq.tx_wrs_direct = 0; in clear_stats()
12999 ofld_txq->wrq.tx_wrs_copied = 0; in clear_stats()
13000 counter_u64_zero(ofld_txq->tx_iscsi_pdus); in clear_stats()
13001 counter_u64_zero(ofld_txq->tx_iscsi_octets); in clear_stats()
13002 counter_u64_zero(ofld_txq->tx_iscsi_iso_wrs); in clear_stats()
13003 counter_u64_zero(ofld_txq->tx_nvme_pdus); in clear_stats()
13004 counter_u64_zero(ofld_txq->tx_nvme_octets); in clear_stats()
13005 counter_u64_zero(ofld_txq->tx_nvme_iso_wrs); in clear_stats()
13006 counter_u64_zero(ofld_txq->tx_aio_jobs); in clear_stats()
13007 counter_u64_zero(ofld_txq->tx_aio_octets); in clear_stats()
13008 counter_u64_zero(ofld_txq->tx_toe_tls_records); in clear_stats()
13009 counter_u64_zero(ofld_txq->tx_toe_tls_octets); in clear_stats()
13014 ofld_rxq->fl.cl_allocated = 0; in clear_stats()
13015 ofld_rxq->fl.cl_recycled = 0; in clear_stats()
13016 ofld_rxq->fl.cl_fast_recycled = 0; in clear_stats()
13018 ofld_rxq->rx_iscsi_ddp_setup_ok); in clear_stats()
13020 ofld_rxq->rx_iscsi_ddp_setup_error); in clear_stats()
13021 ofld_rxq->rx_iscsi_ddp_pdus = 0; in clear_stats()
13022 ofld_rxq->rx_iscsi_ddp_octets = 0; in clear_stats()
13023 ofld_rxq->rx_iscsi_fl_pdus = 0; in clear_stats()
13024 ofld_rxq->rx_iscsi_fl_octets = 0; in clear_stats()
13026 ofld_rxq->rx_nvme_ddp_setup_ok); in clear_stats()
13028 ofld_rxq->rx_nvme_ddp_setup_no_stag); in clear_stats()
13030 ofld_rxq->rx_nvme_ddp_setup_error); in clear_stats()
13031 counter_u64_zero(ofld_rxq->rx_nvme_ddp_pdus); in clear_stats()
13032 counter_u64_zero(ofld_rxq->rx_nvme_ddp_octets); in clear_stats()
13033 counter_u64_zero(ofld_rxq->rx_nvme_fl_pdus); in clear_stats()
13034 counter_u64_zero(ofld_rxq->rx_nvme_fl_octets); in clear_stats()
13036 ofld_rxq->rx_nvme_invalid_headers); in clear_stats()
13038 ofld_rxq->rx_nvme_header_digest_errors); in clear_stats()
13040 ofld_rxq->rx_nvme_data_digest_errors); in clear_stats()
13041 ofld_rxq->rx_aio_ddp_jobs = 0; in clear_stats()
13042 ofld_rxq->rx_aio_ddp_octets = 0; in clear_stats()
13043 ofld_rxq->rx_toe_tls_records = 0; in clear_stats()
13044 ofld_rxq->rx_toe_tls_octets = 0; in clear_stats()
13045 ofld_rxq->rx_toe_ddp_octets = 0; in clear_stats()
13046 counter_u64_zero(ofld_rxq->ddp_buffer_alloc); in clear_stats()
13047 counter_u64_zero(ofld_rxq->ddp_buffer_reuse); in clear_stats()
13048 counter_u64_zero(ofld_rxq->ddp_buffer_free); in clear_stats()
13053 wrq = &sc->sge.ctrlq[pi->port_id]; in clear_stats()
13054 wrq->tx_wrs_direct = 0; in clear_stats()
13055 wrq->tx_wrs_copied = 0; in clear_stats()
13069 bcopy(&ca->addr[0], &in6.s6_addr[0], sizeof(in6.s6_addr)); in hold_clip_addr()
13085 bcopy(&ca->addr[0], &in6.s6_addr[0], sizeof(in6.s6_addr)); in release_clip_addr()
13097 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); in t4_os_find_pci_capability()
13103 struct adapter *sc = pi->adapter; in t4_os_portmod_changed()
13111 KASSERT((pi->flags & FIXED_IFMEDIA) == 0, in t4_os_portmod_changed()
13112 ("%s: port_type %u", __func__, pi->port_type)); in t4_os_portmod_changed()
13114 vi = &pi->vi[0]; in t4_os_portmod_changed()
13118 if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) { in t4_os_portmod_changed()
13126 ifp = vi->ifp; in t4_os_portmod_changed()
13127 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) in t4_os_portmod_changed()
13129 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) in t4_os_portmod_changed()
13131 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) in t4_os_portmod_changed()
13133 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { in t4_os_portmod_changed()
13135 port_top_speed(pi), mod_str[pi->mod_type]); in t4_os_portmod_changed()
13138 pi->mod_type); in t4_os_portmod_changed()
13147 struct link_config *lc = &pi->link_cfg; in t4_os_link_changed()
13148 struct adapter *sc = pi->adapter; in t4_os_link_changed()
13154 if (lc->link_ok) { in t4_os_link_changed()
13155 if (lc->speed > 25000 || in t4_os_link_changed()
13156 (lc->speed == 25000 && lc->fec == FEC_RS)) in t4_os_link_changed()
13157 pi->fcs_reg = A_MAC_PORT_AFRAMECHECKSEQUENCEERRORS; in t4_os_link_changed()
13159 pi->fcs_reg = A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS; in t4_os_link_changed()
13160 pi->fcs_base = t4_read_reg64(sc, in t4_os_link_changed()
13161 t4_port_reg(sc, pi->tx_chan, pi->fcs_reg)); in t4_os_link_changed()
13162 pi->stats.rx_fcs_err = 0; in t4_os_link_changed()
13164 pi->fcs_reg = -1; in t4_os_link_changed()
13167 MPASS(pi->fcs_reg != -1); in t4_os_link_changed()
13168 MPASS(pi->fcs_base == 0); in t4_os_link_changed()
13172 ifp = vi->ifp; in t4_os_link_changed()
13176 if (lc->link_ok) { in t4_os_link_changed()
13177 if_setbaudrate(ifp, IF_Mbps(lc->speed)); in t4_os_link_changed()
13194 * in - the only guarantee is that sc->sc_lock is a valid lock. in t4_iterate()
13206 struct adapter *sc = dev->si_drv1; in t4_ioctl()
13216 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) in t4_ioctl()
13219 mtx_lock(&sc->reg_lock); in t4_ioctl()
13222 else if (edata->size == 4) in t4_ioctl()
13223 edata->val = t4_read_reg(sc, edata->addr); in t4_ioctl()
13224 else if (edata->size == 8) in t4_ioctl()
13225 edata->val = t4_read_reg64(sc, edata->addr); in t4_ioctl()
13228 mtx_unlock(&sc->reg_lock); in t4_ioctl()
13235 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) in t4_ioctl()
13238 mtx_lock(&sc->reg_lock); in t4_ioctl()
13241 else if (edata->size == 4) { in t4_ioctl()
13242 if (edata->val & 0xffffffff00000000) in t4_ioctl()
13244 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); in t4_ioctl()
13245 } else if (edata->size == 8) in t4_ioctl()
13246 t4_write_reg64(sc, edata->addr, edata->val); in t4_ioctl()
13249 mtx_unlock(&sc->reg_lock); in t4_ioctl()
13258 if (regs->len < reglen) { in t4_ioctl()
13259 regs->len = reglen; /* hint to the caller */ in t4_ioctl()
13263 regs->len = reglen; in t4_ioctl()
13265 mtx_lock(&sc->reg_lock); in t4_ioctl()
13270 mtx_unlock(&sc->reg_lock); in t4_ioctl()
13272 rc = copyout(buf, regs->data, reglen); in t4_ioctl()
13297 rc = get_sge_context(sc, ctxt->mem_id, ctxt->cid, in t4_ioctl()
13298 sizeof(ctxt->data), &ctxt->data[0]); in t4_ioctl()
13349 rc = get_sge_context(sc, ctxt->mem_id, ctxt->cid, in t4_ioctl()
13350 sizeof(ctxt->data), &ctxt->data[0]); in t4_ioctl()
13365 struct port_info *pi = vi->pi; in toe_capability()
13366 struct adapter *sc = pi->adapter; in toe_capability()
13377 if (sc->flags & KERN_TLS_ON && is_t6(sc)) { in toe_capability()
13388 p = sc->port[i]; in toe_capability()
13390 if (if_getcapenable(v->ifp) & IFCAP_TXTLS) { in toe_capability()
13393 device_get_nameunit(v->dev)); in toe_capability()
13409 if ((if_getcapenable(vi->ifp) & IFCAP_TOE) != 0) { in toe_capability()
13419 if (!(vi->flags & VI_INIT_DONE) && ((rc = vi_init(vi)) != 0)) in toe_capability()
13421 if (!(pi->vi[0].flags & VI_INIT_DONE) && in toe_capability()
13422 ((rc = vi_init(&pi->vi[0])) != 0)) in toe_capability()
13425 if (isset(&sc->offload_map, pi->port_id)) { in toe_capability()
13427 MPASS(pi->uld_vis > 0); in toe_capability()
13428 pi->uld_vis++; in toe_capability()
13441 KASSERT(sc->tom_softc != NULL, in toe_capability()
13458 if (pi->uld_vis++ == 0) in toe_capability()
13459 setbit(&sc->offload_map, pi->port_id); in toe_capability()
13461 if ((if_getcapenable(vi->ifp) & IFCAP_TOE) == 0) { in toe_capability()
13465 MPASS(isset(&sc->offload_map, pi->port_id)); in toe_capability()
13466 MPASS(pi->uld_vis > 0); in toe_capability()
13467 if (--pi->uld_vis == 0) in toe_capability()
13468 clrbit(&sc->offload_map, pi->port_id); in toe_capability()
13519 if (!(sc->flags & FULL_INIT_DONE)) { in t4_activate_uld()
13529 rc = t4_uld_list[id]->uld_activate(sc); in t4_activate_uld()
13531 setbit(&sc->active_ulds, id); in t4_activate_uld()
13552 rc = t4_uld_list[id]->uld_deactivate(sc); in t4_deactivate_uld()
13554 clrbit(&sc->active_ulds, id); in t4_deactivate_uld()
13573 rc = t4_uld_list[i]->uld_deactivate(sc); in deactivate_all_uld()
13576 clrbit(&sc->active_ulds, i); in deactivate_all_uld()
13594 t4_uld_list[i]->uld_stop == NULL) in stop_all_uld()
13596 (void) t4_uld_list[i]->uld_stop(sc); in stop_all_uld()
13612 t4_uld_list[i]->uld_restart == NULL) in restart_all_uld()
13614 (void) t4_uld_list[i]->uld_restart(sc); in restart_all_uld()
13626 return (isset(&sc->active_ulds, id)); in uld_active()
13644 if (sc->flags & KERN_TLS_ON) in ktls_capability()
13646 if (sc->offload_map != 0) { in ktls_capability()
13675 nq = *t < 0 ? -*t : c; in calculate_nqueues()
13718 if (t4_toecaps_allowed == -1) in tweak_tunables()
13721 if (t4_toecaps_allowed == -1) in tweak_tunables()
13726 if (t4_rdmacaps_allowed == -1) { in tweak_tunables()
13731 if (t4_iscsicaps_allowed == -1) { in tweak_tunables()
13737 if (t4_nvmecaps_allowed == -1) in tweak_tunables()
13743 if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS) in tweak_tunables()
13746 if (t4_rdmacaps_allowed == -1) in tweak_tunables()
13749 if (t4_iscsicaps_allowed == -1) in tweak_tunables()
13752 if (t4_nvmecaps_allowed == -1) in tweak_tunables()
13766 if (t4_pktc_idx < -1 || t4_pktc_idx >= SGE_NCOUNTERS) in tweak_tunables()
13780 * Number of VIs to create per-port. The first VI is the "main" regular in tweak_tunables()
13814 base = sc->memwin[2].mw_base; in t4_dump_mem()
13820 pf = V_PFNUM(sc->pf); in t4_dump_mem()
13823 off = addr - win_pos; in t4_dump_mem()
13840 len -= sizeof(buf); in t4_dump_mem()
13861 struct devlog_params *dparams = &sc->params.devlog; in t4_dump_devlog()
13866 if (dparams->start == 0) { in t4_dump_devlog()
13867 db_printf("devlog params not valid\n"); in t4_dump_devlog()
13871 nentries = dparams->size / sizeof(struct fw_devlog_e); in t4_dump_devlog()
13872 m = fwmtype_to_hwmtype(dparams->memtype); in t4_dump_devlog()
13875 first = -1; in t4_dump_devlog()
13877 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), in t4_dump_devlog()
13892 if (first == -1) in t4_dump_devlog()
13897 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), in t4_dump_devlog()
13908 e.params[j] = be32toh(e.params[j]); in t4_dump_devlog()
13916 db_printf(e.fmt, e.params[0], e.params[1], e.params[2], in t4_dump_devlog()
13917 e.params[3], e.params[4], e.params[5], e.params[6], in t4_dump_devlog()
13918 e.params[7]); in t4_dump_devlog()
14053 t4_write_reg(sc, A_MPS_RX_VXLAN_TYPE, V_VXLAN(sc->vxlan_port) | in enable_vxlan_rx()
14056 pi = sc->port[i]; in enable_vxlan_rx()
14057 if (pi->vxlan_tcam_entry == true) in enable_vxlan_rx()
14059 rc = t4_alloc_raw_mac_filt(sc, pi->vi[0].viid, match_all_mac, in enable_vxlan_rx()
14060 match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id, in enable_vxlan_rx()
14063 rc = -rc; in enable_vxlan_rx()
14064 CH_ERR(&pi->vi[0], in enable_vxlan_rx()
14067 MPASS(rc == sc->rawf_base + pi->port_id); in enable_vxlan_rx()
14068 pi->vxlan_tcam_entry = true; in enable_vxlan_rx()
14078 if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5) in t4_vxlan_start()
14083 if (sc->vxlan_refcount == 0) { in t4_vxlan_start()
14084 sc->vxlan_port = v->port; in t4_vxlan_start()
14085 sc->vxlan_refcount = 1; in t4_vxlan_start()
14088 } else if (sc->vxlan_port == v->port) { in t4_vxlan_start()
14089 sc->vxlan_refcount++; in t4_vxlan_start()
14093 sc->vxlan_port, v->port); in t4_vxlan_start()
14103 if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5) in t4_vxlan_stop()
14113 if (sc->vxlan_port != v->port) in t4_vxlan_stop()
14115 if (sc->vxlan_refcount == 0) { in t4_vxlan_stop()
14117 "ignoring attempt to stop it again.\n", sc->vxlan_port); in t4_vxlan_stop()
14118 } else if (--sc->vxlan_refcount == 0 && !hw_off_limits(sc)) in t4_vxlan_stop()
14212 if (--loaded == 0) { in mod_event()