Lines Matching +full:x +full:- +full:rc

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
270 * Each tunable is set to a default value here if it's known at compile-time.
271 * Otherwise it is set to -n as an indication to tweak_tunables() that it should
288 int t4_ntxq = -NTXQ;
294 int t4_nrxq = -NRXQ;
300 static int t4_ntxq_vi = -NTXQ_VI;
305 static int t4_nrxq_vi = -NRXQ_VI;
311 0, "Reserve TX queue 0 of each VI for non-flowid packets");
315 static int t4_nofldtxq = -NOFLDTXQ;
320 static int t4_nofldtxq_vi = -NOFLDTXQ_VI;
327 static int t4_nofldrxq = -NOFLDRXQ;
332 static int t4_nofldrxq_vi = -NOFLDRXQ_VI;
341 #define PKTC_IDX_OFLD (-1)
346 /* 0 means chip/fw default, non-zero number is value in microseconds */
351 /* 0 means chip/fw default, non-zero number is value in microseconds */
356 /* 0 means chip/fw default, non-zero number is # of keepalives before abort */
361 /* 0 means chip/fw default, non-zero number is value in microseconds */
366 /* 0 means chip/fw default, non-zero number is value in microseconds */
371 /* 0 means chip/fw default, non-zero number is # of rexmt before abort */
376 /* -1 means chip/fw default, other values are raw backoff values to use */
378 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
434 static int t4_nnmtxq = -NNMTXQ;
439 static int t4_nnmrxq = -NNMRXQ;
444 static int t4_nnmtxq_vi = -NNMTXQ_VI;
449 static int t4_nnmrxq_vi = -NNMRXQ_VI;
463 #define PKTC_IDX (-1)
481 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
485 0, "Interrupt types allowed (bit 0 = INTx, 1 = MSI, 2 = MSI-X)");
491 #define BUILTIN_CF "built-in"
515 * -1 to run with the firmware default. Same as FEC_AUTO (bit 5)
518 static int t4_fec = -1;
526 * -1 to set FORCE_FEC iff requested_fec != AUTO. Multiple FEC bits are okay.
533 static int t4_force_fec = -1;
539 * -1 to run with the firmware default.
543 static int t4_autoneg = -1;
548 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
549 * encouraged respectively). '-n' is the same as 'n' except the firmware
554 "Firmware auto-install (0 = prohibited, 1 = allowed, 2 = encouraged)");
583 static int t4_toecaps_allowed = -1;
587 static int t4_rdmacaps_allowed = -1;
591 static int t4_cryptocaps_allowed = -1;
595 static int t4_iscsicaps_allowed = -1;
618 * -1: driver should figure out a good value.
623 static int pcie_relaxed_ordering = -1;
649 * Set to non-zero to enable the attack filter. A packet that matches any of
654 * 4) IP && source address is loopback (127.x.y.z).
655 * 5) IP && destination address is loopback (127.x.y.z).
733 uint16_t intr_type; /* INTx, MSI, or MSI-X */
900 {0x4400, "Chelsio T440-dbg"},
901 {0x4401, "Chelsio T420-CR"},
902 {0x4402, "Chelsio T422-CR"},
903 {0x4403, "Chelsio T440-CR"},
904 {0x4404, "Chelsio T420-BCH"},
905 {0x4405, "Chelsio T440-BCH"},
906 {0x4406, "Chelsio T440-CH"},
907 {0x4407, "Chelsio T420-SO"},
908 {0x4408, "Chelsio T420-CX"},
909 {0x4409, "Chelsio T420-BT"},
910 {0x440a, "Chelsio T404-BT"},
911 {0x440e, "Chelsio T440-LP-CR"},
914 {0x5400, "Chelsio T580-dbg"},
915 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */
916 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */
917 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */
918 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */
919 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */
920 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */
921 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */
922 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */
923 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */
924 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */
925 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */
926 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */
927 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */
928 {0x5418, "Chelsio T540-BT"}, /* 4 x 10GBaseT */
929 {0x5419, "Chelsio T540-LP-BT"}, /* 4 x 10GBaseT */
930 {0x541a, "Chelsio T540-SO-BT"}, /* 4 x 10GBaseT, nomem */
931 {0x541b, "Chelsio T540-SO-CR"}, /* 4 x 10G, nomem */
934 {0x5483, "Custom T540-CR"},
935 {0x5484, "Custom T540-BT"},
938 {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */
939 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */
940 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */
941 {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */
942 {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */
943 {0x6405, "Chelsio T6225-SO-OCP3"}, /* 2 x 10/25G, nomem */
944 {0x6406, "Chelsio T6225-OCP3"}, /* 2 x 10/25G */
945 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */
946 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */
947 {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */
948 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */
949 {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */
950 {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */
951 {0x6414, "Chelsio T62100-SO-OCP3"}, /* 2 x 40/50/100G, nomem */
952 {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */
955 {0x6480, "Custom T6225-CR"},
956 {0x6481, "Custom T62100-CR"},
957 {0x6482, "Custom T6225-CR"},
958 {0x6483, "Custom T62100-CR"},
959 {0x6484, "Custom T64100-CR"},
960 {0x6485, "Custom T6240-SO"},
961 {0x6486, "Custom T6225-SO-CR"},
962 {0x6487, "Custom T6225-CR"},
1103 if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames)) in t4_init_devnames()
1104 sc->names = &devnames[id - CHELSIO_T4]; in t4_init_devnames()
1106 device_printf(sc->dev, "chip id %d is not supported.\n", id); in t4_init_devnames()
1107 sc->names = NULL; in t4_init_devnames()
1119 parent = device_get_nameunit(sc->dev); in t4_ifnet_unit()
1120 name = sc->names->ifnet_name; in t4_ifnet_unit()
1123 value == pi->port_id) in t4_ifnet_unit()
1126 return (-1); in t4_ifnet_unit()
1144 cur = &sc->cal_info[sc->cal_current]; in t4_calibration()
1145 next_up = (sc->cal_current + 1) % CNT_CAL_INFO; in t4_calibration()
1146 nex = &sc->cal_info[next_up]; in t4_calibration()
1147 if (__predict_false(sc->cal_count == 0)) { in t4_calibration()
1149 cur->hw_cur = hw; in t4_calibration()
1150 cur->sbt_cur = sbt; in t4_calibration()
1151 sc->cal_count++; in t4_calibration()
1155 if (cur->hw_cur == hw) { in t4_calibration()
1157 sc->cal_count = 0; in t4_calibration()
1158 atomic_store_rel_int(&cur->gen, 0); in t4_calibration()
1162 seqc_write_begin(&nex->gen); in t4_calibration()
1163 nex->hw_prev = cur->hw_cur; in t4_calibration()
1164 nex->sbt_prev = cur->sbt_cur; in t4_calibration()
1165 nex->hw_cur = hw; in t4_calibration()
1166 nex->sbt_cur = sbt; in t4_calibration()
1167 seqc_write_end(&nex->gen); in t4_calibration()
1168 sc->cal_current = next_up; in t4_calibration()
1170 callout_reset_sbt_curcpu(&sc->cal_callout, SBT_1S, 0, t4_calibration, in t4_calibration()
1185 sc->cal_info[i].gen = 0; in t4_calibration_start()
1187 sc->cal_current = 0; in t4_calibration_start()
1188 sc->cal_count = 0; in t4_calibration_start()
1189 sc->cal_gen = 0; in t4_calibration_start()
1197 int rc = 0, i, j, rqidx, tqidx, nports; in t4_attach() local
1214 sc->dev = dev; in t4_attach()
1215 sysctl_ctx_init(&sc->ctx); in t4_attach()
1216 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags); in t4_attach()
1226 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); in t4_attach()
1238 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS); in t4_attach()
1239 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL); in t4_attach()
1240 sc->traceq = -1; in t4_attach()
1241 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); in t4_attach()
1242 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", in t4_attach()
1245 snprintf(sc->lockname, sizeof(sc->lockname), "%s", in t4_attach()
1247 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); in t4_attach()
1250 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); in t4_attach()
1251 TAILQ_INIT(&sc->sfl); in t4_attach()
1252 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0); in t4_attach()
1254 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF); in t4_attach()
1256 sc->policy = NULL; in t4_attach()
1257 rw_init(&sc->policy_lock, "connection offload policy"); in t4_attach()
1259 callout_init(&sc->ktls_tick, 1); in t4_attach()
1261 callout_init(&sc->cal_callout, 1); in t4_attach()
1263 refcount_init(&sc->vxlan_refcount, 0); in t4_attach()
1265 TASK_INIT(&sc->reset_task, 0, reset_adapter_task, sc); in t4_attach()
1266 TASK_INIT(&sc->fatal_error_task, 0, fatal_error_task, sc); in t4_attach()
1268 sc->ctrlq_oid = SYSCTL_ADD_NODE(&sc->ctx, in t4_attach()
1269 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "ctrlq", in t4_attach()
1271 sc->fwq_oid = SYSCTL_ADD_NODE(&sc->ctx, in t4_attach()
1272 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "fwq", in t4_attach()
1275 rc = t4_map_bars_0_and_4(sc); in t4_attach()
1276 if (rc != 0) in t4_attach()
1279 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); in t4_attach()
1283 rc = -t4_prep_adapter(sc, buf); in t4_attach()
1285 if (rc != 0) { in t4_attach()
1286 device_printf(dev, "failed to prepare adapter: %d.\n", rc); in t4_attach()
1297 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j); in t4_attach()
1298 sc->mbox = sc->pf; in t4_attach()
1301 if (sc->names == NULL) { in t4_attach()
1302 rc = ENOTSUP; in t4_attach()
1320 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev)); in t4_attach()
1321 if (rc != 0) in t4_attach()
1323 rc); in t4_attach()
1334 rc = ENOTSUP; in t4_attach()
1340 rc = contact_firmware(sc); in t4_attach()
1341 if (rc != 0) in t4_attach()
1343 MPASS(sc->flags & FW_OK); in t4_attach()
1345 rc = get_params__pre_init(sc); in t4_attach()
1346 if (rc != 0) in t4_attach()
1349 if (sc->flags & MASTER_PF) { in t4_attach()
1350 rc = partition_resources(sc); in t4_attach()
1351 if (rc != 0) in t4_attach()
1355 rc = get_params__post_init(sc); in t4_attach()
1356 if (rc != 0) in t4_attach()
1359 rc = set_params__post_init(sc); in t4_attach()
1360 if (rc != 0) in t4_attach()
1363 rc = t4_map_bar_2(sc); in t4_attach()
1364 if (rc != 0) in t4_attach()
1367 rc = t4_adj_doorbells(sc); in t4_attach()
1368 if (rc != 0) in t4_attach()
1371 rc = t4_create_dma_tag(sc); in t4_attach()
1372 if (rc != 0) in t4_attach()
1376 * First pass over all the ports - allocate VIs and initialize some in t4_attach()
1383 sc->port[i] = pi; in t4_attach()
1386 pi->adapter = sc; in t4_attach()
1387 pi->port_id = i; in t4_attach()
1390 * pi->nvi's final value is known. in t4_attach()
1392 pi->vi = malloc(sizeof(struct vi_info) * t4_num_vis, M_CXGBE, in t4_attach()
1399 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); in t4_attach()
1400 if (rc != 0) { in t4_attach()
1402 i, rc); in t4_attach()
1403 free(pi->vi, M_CXGBE); in t4_attach()
1405 sc->port[i] = NULL; in t4_attach()
1409 if (is_bt(pi->port_type)) in t4_attach()
1410 setbit(&sc->bt_map, pi->tx_chan); in t4_attach()
1412 MPASS(!isset(&sc->bt_map, pi->tx_chan)); in t4_attach()
1414 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", in t4_attach()
1416 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); in t4_attach()
1417 sc->chan_map[pi->tx_chan] = i; in t4_attach()
1426 pi->fcs_reg = -1; in t4_attach()
1428 pi->fcs_reg = t4_port_reg(sc, pi->tx_chan, in t4_attach()
1431 pi->fcs_base = 0; in t4_attach()
1434 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change, in t4_attach()
1442 pi->flags |= FIXED_IFMEDIA; in t4_attach()
1445 pi->dev = device_add_child(dev, sc->names->ifnet_name, in t4_attach()
1447 if (pi->dev == NULL) { in t4_attach()
1450 rc = ENXIO; in t4_attach()
1453 pi->vi[0].dev = pi->dev; in t4_attach()
1454 device_set_softc(pi->dev, pi); in t4_attach()
1460 nports = sc->params.nports; in t4_attach()
1461 rc = cfg_itype_and_nqueues(sc, &iaq); in t4_attach()
1462 if (rc != 0) in t4_attach()
1466 sc->intr_type = iaq.intr_type; in t4_attach()
1467 sc->intr_count = iaq.nirq; in t4_attach()
1469 s = &sc->sge; in t4_attach()
1470 s->nrxq = nports * iaq.nrxq; in t4_attach()
1471 s->ntxq = nports * iaq.ntxq; in t4_attach()
1473 s->nrxq += nports * (num_vis - 1) * iaq.nrxq_vi; in t4_attach()
1474 s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi; in t4_attach()
1476 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ in t4_attach()
1477 s->neq += nports; /* ctrl queues: 1 per port */ in t4_attach()
1478 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ in t4_attach()
1481 s->nofldtxq = nports * iaq.nofldtxq; in t4_attach()
1483 s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi; in t4_attach()
1484 s->neq += s->nofldtxq; in t4_attach()
1486 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_ofld_txq), in t4_attach()
1492 s->nofldrxq = nports * iaq.nofldrxq; in t4_attach()
1494 s->nofldrxq += nports * (num_vis - 1) * iaq.nofldrxq_vi; in t4_attach()
1495 s->neq += s->nofldrxq; /* free list */ in t4_attach()
1496 s->niq += s->nofldrxq; in t4_attach()
1498 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), in t4_attach()
1503 s->nnmrxq = 0; in t4_attach()
1504 s->nnmtxq = 0; in t4_attach()
1506 s->nnmrxq += nports * iaq.nnmrxq; in t4_attach()
1507 s->nnmtxq += nports * iaq.nnmtxq; in t4_attach()
1510 s->nnmrxq += nports * (num_vis - 1) * iaq.nnmrxq_vi; in t4_attach()
1511 s->nnmtxq += nports * (num_vis - 1) * iaq.nnmtxq_vi; in t4_attach()
1513 s->neq += s->nnmtxq + s->nnmrxq; in t4_attach()
1514 s->niq += s->nnmrxq; in t4_attach()
1516 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), in t4_attach()
1518 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), in t4_attach()
1521 MPASS(s->niq <= s->iqmap_sz); in t4_attach()
1522 MPASS(s->neq <= s->eqmap_sz); in t4_attach()
1524 s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE, in t4_attach()
1526 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, in t4_attach()
1528 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, in t4_attach()
1530 s->iqmap = malloc(s->iqmap_sz * sizeof(struct sge_iq *), M_CXGBE, in t4_attach()
1532 s->eqmap = malloc(s->eqmap_sz * sizeof(struct sge_eq *), M_CXGBE, in t4_attach()
1535 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, in t4_attach()
1548 if (sc->vres.key.size != 0) in t4_attach()
1549 sc->key_map = vmem_create("T4TLS key map", sc->vres.key.start, in t4_attach()
1550 sc->vres.key.size, 32, 0, M_FIRSTFIT | M_WAITOK); in t4_attach()
1567 struct port_info *pi = sc->port[i]; in t4_attach()
1573 pi->nvi = num_vis; in t4_attach()
1575 vi->pi = pi; in t4_attach()
1576 vi->adapter = sc; in t4_attach()
1577 vi->first_intr = -1; in t4_attach()
1578 vi->qsize_rxq = t4_qsize_rxq; in t4_attach()
1579 vi->qsize_txq = t4_qsize_txq; in t4_attach()
1581 vi->first_rxq = rqidx; in t4_attach()
1582 vi->first_txq = tqidx; in t4_attach()
1583 vi->tmr_idx = t4_tmr_idx; in t4_attach()
1584 vi->pktc_idx = t4_pktc_idx; in t4_attach()
1585 vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi; in t4_attach()
1586 vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi; in t4_attach()
1588 rqidx += vi->nrxq; in t4_attach()
1589 tqidx += vi->ntxq; in t4_attach()
1591 if (j == 0 && vi->ntxq > 1) in t4_attach()
1592 vi->rsrv_noflowq = t4_rsrv_noflowq ? 1 : 0; in t4_attach()
1594 vi->rsrv_noflowq = 0; in t4_attach()
1597 vi->first_ofld_txq = ofld_tqidx; in t4_attach()
1598 vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi; in t4_attach()
1599 ofld_tqidx += vi->nofldtxq; in t4_attach()
1602 vi->ofld_tmr_idx = t4_tmr_idx_ofld; in t4_attach()
1603 vi->ofld_pktc_idx = t4_pktc_idx_ofld; in t4_attach()
1604 vi->first_ofld_rxq = ofld_rqidx; in t4_attach()
1605 vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi; in t4_attach()
1607 ofld_rqidx += vi->nofldrxq; in t4_attach()
1610 vi->first_nm_rxq = nm_rqidx; in t4_attach()
1611 vi->first_nm_txq = nm_tqidx; in t4_attach()
1613 vi->nnmrxq = iaq.nnmrxq; in t4_attach()
1614 vi->nnmtxq = iaq.nnmtxq; in t4_attach()
1616 vi->nnmrxq = iaq.nnmrxq_vi; in t4_attach()
1617 vi->nnmtxq = iaq.nnmtxq_vi; in t4_attach()
1619 nm_rqidx += vi->nnmrxq; in t4_attach()
1620 nm_tqidx += vi->nnmtxq; in t4_attach()
1625 rc = t4_setup_intr_handlers(sc); in t4_attach()
1626 if (rc != 0) { in t4_attach()
1628 "failed to setup interrupt handlers: %d\n", rc); in t4_attach()
1635 * Ensure thread-safe mailbox access (in debug builds). in t4_attach()
1641 sc->flags |= CHK_MBOX_ACCESS; in t4_attach()
1647 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n", in t4_attach()
1648 sc->params.pci.speed, sc->params.pci.width, sc->params.nports, in t4_attach()
1649 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : in t4_attach()
1650 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), in t4_attach()
1651 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); in t4_attach()
1658 if (rc != 0 && sc->cdev) { in t4_attach()
1662 rc = 0; in t4_attach()
1665 if (rc != 0) in t4_attach()
1670 return (rc); in t4_attach()
1682 pi = sc->port[i]; in t4_child_location()
1683 if (pi != NULL && pi->dev == dev) { in t4_child_location()
1684 sbuf_printf(sb, "port=%d", pi->port_id); in t4_child_location()
1697 if (sc->flags & FW_OK) in t4_ready()
1711 pi = sc->port[port]; in t4_read_port_device()
1712 if (pi == NULL || pi->dev == NULL) in t4_read_port_device()
1714 *child = pi->dev; in t4_read_port_device()
1748 int rc; in t4_detach() local
1750 rc = notify_siblings(dev, 1); in t4_detach()
1751 if (rc) { in t4_detach()
1753 "failed to detach sibling devices: %d\n", rc); in t4_detach()
1754 return (rc); in t4_detach()
1765 int i, rc; in t4_detach_common() local
1770 rc = deactivate_all_uld(sc); in t4_detach_common()
1771 if (rc) { in t4_detach_common()
1773 "failed to detach upper layer drivers: %d\n", rc); in t4_detach_common()
1774 return (rc); in t4_detach_common()
1778 if (sc->cdev) { in t4_detach_common()
1779 destroy_dev(sc->cdev); in t4_detach_common()
1780 sc->cdev = NULL; in t4_detach_common()
1787 sc->flags &= ~CHK_MBOX_ACCESS; in t4_detach_common()
1788 if (sc->flags & FULL_INIT_DONE) { in t4_detach_common()
1789 if (!(sc->flags & IS_VF)) in t4_detach_common()
1794 rc = bus_detach_children(dev); in t4_detach_common()
1795 if (rc) { in t4_detach_common()
1797 "failed to detach child devices: %d\n", rc); in t4_detach_common()
1798 return (rc); in t4_detach_common()
1802 for (i = 0; i < sc->intr_count; i++) in t4_detach_common()
1803 t4_free_irq(sc, &sc->irq[i]); in t4_detach_common()
1805 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) in t4_detach_common()
1809 pi = sc->port[i]; in t4_detach_common()
1811 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid); in t4_detach_common()
1813 mtx_destroy(&pi->pi_lock); in t4_detach_common()
1814 free(pi->vi, M_CXGBE); in t4_detach_common()
1818 callout_stop(&sc->cal_callout); in t4_detach_common()
1819 callout_drain(&sc->cal_callout); in t4_detach_common()
1821 sysctl_ctx_free(&sc->ctx); in t4_detach_common()
1824 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) in t4_detach_common()
1825 t4_fw_bye(sc, sc->mbox); in t4_detach_common()
1827 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) in t4_detach_common()
1830 if (sc->regs_res) in t4_detach_common()
1831 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, in t4_detach_common()
1832 sc->regs_res); in t4_detach_common()
1834 if (sc->udbs_res) in t4_detach_common()
1835 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, in t4_detach_common()
1836 sc->udbs_res); in t4_detach_common()
1838 if (sc->msix_res) in t4_detach_common()
1839 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, in t4_detach_common()
1840 sc->msix_res); in t4_detach_common()
1842 if (sc->l2t) in t4_detach_common()
1844 if (sc->smt) in t4_detach_common()
1845 t4_free_smt(sc->smt); in t4_detach_common()
1850 if (sc->key_map) in t4_detach_common()
1851 vmem_destroy(sc->key_map); in t4_detach_common()
1857 free(sc->sge.ofld_txq, M_CXGBE); in t4_detach_common()
1860 free(sc->sge.ofld_rxq, M_CXGBE); in t4_detach_common()
1863 free(sc->sge.nm_rxq, M_CXGBE); in t4_detach_common()
1864 free(sc->sge.nm_txq, M_CXGBE); in t4_detach_common()
1866 free(sc->irq, M_CXGBE); in t4_detach_common()
1867 free(sc->sge.rxq, M_CXGBE); in t4_detach_common()
1868 free(sc->sge.txq, M_CXGBE); in t4_detach_common()
1869 free(sc->sge.ctrlq, M_CXGBE); in t4_detach_common()
1870 free(sc->sge.iqmap, M_CXGBE); in t4_detach_common()
1871 free(sc->sge.eqmap, M_CXGBE); in t4_detach_common()
1872 free(sc->tids.ftid_tab, M_CXGBE); in t4_detach_common()
1873 free(sc->tids.hpftid_tab, M_CXGBE); in t4_detach_common()
1874 free_hftid_hash(&sc->tids); in t4_detach_common()
1875 free(sc->tids.tid_tab, M_CXGBE); in t4_detach_common()
1878 callout_drain(&sc->ktls_tick); in t4_detach_common()
1879 callout_drain(&sc->sfl_callout); in t4_detach_common()
1880 if (mtx_initialized(&sc->tids.ftid_lock)) { in t4_detach_common()
1881 mtx_destroy(&sc->tids.ftid_lock); in t4_detach_common()
1882 cv_destroy(&sc->tids.ftid_cv); in t4_detach_common()
1884 if (mtx_initialized(&sc->tids.atid_lock)) in t4_detach_common()
1885 mtx_destroy(&sc->tids.atid_lock); in t4_detach_common()
1886 if (mtx_initialized(&sc->ifp_lock)) in t4_detach_common()
1887 mtx_destroy(&sc->ifp_lock); in t4_detach_common()
1889 if (rw_initialized(&sc->policy_lock)) { in t4_detach_common()
1890 rw_destroy(&sc->policy_lock); in t4_detach_common()
1892 if (sc->policy != NULL) in t4_detach_common()
1893 free_offload_policy(sc->policy); in t4_detach_common()
1898 struct memwin *mw = &sc->memwin[i]; in t4_detach_common()
1900 if (rw_initialized(&mw->mw_lock)) in t4_detach_common()
1901 rw_destroy(&mw->mw_lock); in t4_detach_common()
1904 mtx_destroy(&sc->sfl_lock); in t4_detach_common()
1905 mtx_destroy(&sc->reg_lock); in t4_detach_common()
1906 mtx_destroy(&sc->sc_lock); in t4_detach_common()
1919 if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_STOPPED))) { in stop_adapter()
1920 CH_ALERT(sc, "%s from %p, flags 0x%08x,0x%08x, EALREADY\n", in stop_adapter()
1921 __func__, curthread, sc->flags, sc->error_flags); in stop_adapter()
1924 CH_ALERT(sc, "%s from %p, flags 0x%08x,0x%08x\n", __func__, curthread, in stop_adapter()
1925 sc->flags, sc->error_flags); in stop_adapter()
1928 pi = sc->port[i]; in stop_adapter()
1932 if (pi->up_vis > 0 && pi->link_cfg.link_ok) { in stop_adapter()
1939 pi->link_cfg.link_ok = false; in stop_adapter()
1953 if (!atomic_testandclear_int(&sc->error_flags, ilog2(ADAP_STOPPED))) { in restart_adapter()
1954 CH_ALERT(sc, "%s from %p, flags 0x%08x,0x%08x, EALREADY\n", in restart_adapter()
1955 __func__, curthread, sc->flags, sc->error_flags); in restart_adapter()
1958 CH_ALERT(sc, "%s from %p, flags 0x%08x,0x%08x\n", __func__, curthread, in restart_adapter()
1959 sc->flags, sc->error_flags); in restart_adapter()
1962 MPASS((sc->flags & FW_OK) == 0); in restart_adapter()
1963 MPASS((sc->flags & MASTER_PF) == 0); in restart_adapter()
1964 MPASS(sc->reset_thread == NULL); in restart_adapter()
1971 sc->reset_thread = curthread; in restart_adapter()
1975 sc->reset_thread = NULL; in restart_adapter()
1976 atomic_set_int(&sc->error_flags, ADAP_STOPPED); in restart_adapter()
1979 atomic_clear_int(&sc->error_flags, ADAP_FATAL_ERR); in restart_adapter()
1980 atomic_add_int(&sc->incarnation, 1); in restart_adapter()
1981 atomic_add_int(&sc->num_resets, 1); in restart_adapter()
1992 MPASS(sc->reset_thread == curthread); in set_adapter_hwstatus()
1993 mtx_lock(&sc->reg_lock); in set_adapter_hwstatus()
1994 atomic_clear_int(&sc->error_flags, HW_OFF_LIMITS); in set_adapter_hwstatus()
1995 mtx_unlock(&sc->reg_lock); in set_adapter_hwstatus()
1999 mtx_lock(&sc->reg_lock); in set_adapter_hwstatus()
2000 atomic_set_int(&sc->error_flags, HW_OFF_LIMITS); in set_adapter_hwstatus()
2001 mtx_unlock(&sc->reg_lock); in set_adapter_hwstatus()
2002 sc->flags &= ~(FW_OK | MASTER_PF); in set_adapter_hwstatus()
2003 sc->reset_thread = NULL; in set_adapter_hwstatus()
2023 int rc, i, j, k; in stop_lld() local
2035 rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4slld"); in stop_lld()
2036 if (rc != 0) in stop_lld()
2041 pi = sc->port[i]; in stop_lld()
2044 pi->vxlan_tcam_entry = false; in stop_lld()
2046 vi->xact_addr_filt = -1; in stop_lld()
2047 mtx_lock(&vi->tick_mtx); in stop_lld()
2048 vi->flags |= VI_SKIP_STATS; in stop_lld()
2049 mtx_unlock(&vi->tick_mtx); in stop_lld()
2050 if (!(vi->flags & VI_INIT_DONE)) in stop_lld()
2053 ifp = vi->ifp; in stop_lld()
2055 mtx_lock(&vi->tick_mtx); in stop_lld()
2056 callout_stop(&vi->tick); in stop_lld()
2057 mtx_unlock(&vi->tick_mtx); in stop_lld()
2058 callout_drain(&vi->tick); in stop_lld()
2066 txq->eq.flags &= ~(EQ_ENABLED | EQ_HW_ALLOCATED); in stop_lld()
2071 TXQ_LOCK(&ofld_txq->wrq); in stop_lld()
2072 ofld_txq->wrq.eq.flags &= ~EQ_HW_ALLOCATED; in stop_lld()
2073 TXQ_UNLOCK(&ofld_txq->wrq); in stop_lld()
2077 rxq->iq.flags &= ~IQ_HW_ALLOCATED; in stop_lld()
2081 ofld_rxq->iq.flags &= ~IQ_HW_ALLOCATED; in stop_lld()
2088 if (sc->flags & FULL_INIT_DONE) { in stop_lld()
2090 wrq = &sc->sge.ctrlq[i]; in stop_lld()
2092 wrq->eq.flags &= ~EQ_HW_ALLOCATED; in stop_lld()
2097 if (pi->flags & HAS_TRACEQ) { in stop_lld()
2098 pi->flags &= ~HAS_TRACEQ; in stop_lld()
2099 sc->traceq = -1; in stop_lld()
2100 sc->tracer_valid = 0; in stop_lld()
2101 sc->tracer_enabled = 0; in stop_lld()
2104 if (sc->flags & FULL_INIT_DONE) { in stop_lld()
2106 sc->sge.fwq.flags &= ~IQ_HW_ALLOCATED; in stop_lld()
2107 quiesce_iq_fl(sc, &sc->sge.fwq, NULL); in stop_lld()
2111 callout_stop(&sc->cal_callout); in stop_lld()
2112 callout_drain(&sc->cal_callout); in stop_lld()
2125 return (rc); in stop_lld()
2145 int rc; in t4_suspend() local
2148 rc = suspend_adapter(sc); in t4_suspend()
2151 return (rc); in t4_suspend()
2185 o->flags = sc->flags; in save_caps_and_params()
2187 o->nbmcaps = sc->nbmcaps; in save_caps_and_params()
2188 o->linkcaps = sc->linkcaps; in save_caps_and_params()
2189 o->switchcaps = sc->switchcaps; in save_caps_and_params()
2190 o->niccaps = sc->niccaps; in save_caps_and_params()
2191 o->toecaps = sc->toecaps; in save_caps_and_params()
2192 o->rdmacaps = sc->rdmacaps; in save_caps_and_params()
2193 o->cryptocaps = sc->cryptocaps; in save_caps_and_params()
2194 o->iscsicaps = sc->iscsicaps; in save_caps_and_params()
2195 o->fcoecaps = sc->fcoecaps; in save_caps_and_params()
2197 o->cfcsum = sc->cfcsum; in save_caps_and_params()
2198 MPASS(sizeof(o->cfg_file) == sizeof(sc->cfg_file)); in save_caps_and_params()
2199 memcpy(o->cfg_file, sc->cfg_file, sizeof(o->cfg_file)); in save_caps_and_params()
2201 o->params = sc->params; in save_caps_and_params()
2202 o->vres = sc->vres; in save_caps_and_params()
2203 o->tids = sc->tids; in save_caps_and_params()
2204 o->sge = sc->sge; in save_caps_and_params()
2206 o->rawf_base = sc->rawf_base; in save_caps_and_params()
2207 o->nrawf = sc->nrawf; in save_caps_and_params()
2213 int rc = 0; in compare_caps_and_params() local
2219 if (o->c##caps != sc->c##caps) { \ in compare_caps_and_params()
2220 CH_ERR(sc, "%scaps 0x%04x -> 0x%04x.\n", #c, o->c##caps, \ in compare_caps_and_params()
2221 sc->c##caps); \ in compare_caps_and_params()
2222 rc = EINVAL; \ in compare_caps_and_params()
2237 if (o->cfcsum != sc->cfcsum) { in compare_caps_and_params()
2238 CH_ERR(sc, "config file %s (0x%x) -> %s (0x%x)\n", o->cfg_file, in compare_caps_and_params()
2239 o->cfcsum, sc->cfg_file, sc->cfcsum); in compare_caps_and_params()
2240 rc = EINVAL; in compare_caps_and_params()
2244 if (o->p != sc->p) { \ in compare_caps_and_params()
2245 CH_ERR(sc, #name " %d -> %d\n", o->p, sc->p); \ in compare_caps_and_params()
2246 rc = EINVAL; \ in compare_caps_and_params()
2302 return (rc); in compare_caps_and_params()
2313 int rc, i, j, k; in restart_lld() local
2315 rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4rlld"); in restart_lld()
2316 if (rc != 0) in restart_lld()
2325 rc = 0; in restart_lld()
2334 rc = contact_firmware(sc); in restart_lld()
2335 if (rc != 0) in restart_lld()
2337 MPASS(sc->flags & FW_OK); in restart_lld()
2339 if (sc->flags & MASTER_PF) { in restart_lld()
2340 rc = partition_resources(sc); in restart_lld()
2341 if (rc != 0) in restart_lld()
2345 rc = get_params__post_init(sc); in restart_lld()
2346 if (rc != 0) in restart_lld()
2349 rc = set_params__post_init(sc); in restart_lld()
2350 if (rc != 0) in restart_lld()
2353 rc = compare_caps_and_params(sc, old_state); in restart_lld()
2354 if (rc != 0) in restart_lld()
2358 pi = sc->port[i]; in restart_lld()
2360 MPASS(pi->vi != NULL); in restart_lld()
2361 MPASS(pi->vi[0].dev == pi->dev); in restart_lld()
2363 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); in restart_lld()
2364 if (rc != 0) { in restart_lld()
2366 "failed to re-initialize port %d: %d\n", i, rc); in restart_lld()
2369 MPASS(sc->chan_map[pi->tx_chan] == i); in restart_lld()
2378 rc = alloc_extra_vi(sc, pi, vi); in restart_lld()
2379 if (rc != 0) { in restart_lld()
2381 "failed to re-allocate extra VI: %d\n", rc); in restart_lld()
2394 if (sc->flags & FULL_INIT_DONE) { in restart_lld()
2395 rc = adapter_full_init(sc); in restart_lld()
2396 if (rc != 0) { in restart_lld()
2397 CH_ERR(sc, "failed to re-initialize adapter: %d\n", rc); in restart_lld()
2401 if (sc->vxlan_refcount > 0) in restart_lld()
2405 pi = sc->port[i]; in restart_lld()
2407 mtx_lock(&vi->tick_mtx); in restart_lld()
2408 vi->flags &= ~VI_SKIP_STATS; in restart_lld()
2409 mtx_unlock(&vi->tick_mtx); in restart_lld()
2410 if (!(vi->flags & VI_INIT_DONE)) in restart_lld()
2412 rc = vi_full_init(vi); in restart_lld()
2413 if (rc != 0) { in restart_lld()
2414 CH_ERR(vi, "failed to re-initialize " in restart_lld()
2415 "interface: %d\n", rc); in restart_lld()
2418 if (sc->traceq < 0 && IS_MAIN_VI(vi)) { in restart_lld()
2419 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; in restart_lld()
2423 V_RSSCONTROL(pi->tx_chan) | in restart_lld()
2424 V_QUEUENUMBER(sc->traceq)); in restart_lld()
2425 pi->flags |= HAS_TRACEQ; in restart_lld()
2428 ifp = vi->ifp; in restart_lld()
2437 rc = update_mac_settings(ifp, XGMAC_ALL & in restart_lld()
2439 if (rc != 0) { in restart_lld()
2440 CH_ERR(vi, "failed to re-configure MAC: %d\n", rc); in restart_lld()
2443 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, in restart_lld()
2445 if (rc != 0) { in restart_lld()
2446 CH_ERR(vi, "failed to re-enable VI: %d\n", rc); in restart_lld()
2451 txq->eq.flags |= EQ_ENABLED; in restart_lld()
2454 mtx_lock(&vi->tick_mtx); in restart_lld()
2455 callout_schedule(&vi->tick, hz); in restart_lld()
2456 mtx_unlock(&vi->tick_mtx); in restart_lld()
2459 if (pi->up_vis > 0) { in restart_lld()
2464 if (pi->link_cfg.link_ok) in restart_lld()
2472 pi = sc->port[i]; in restart_lld()
2474 if (!(vi->flags & VI_INIT_DONE)) in restart_lld()
2476 ifp = vi->ifp; in restart_lld()
2479 rc = update_mac_settings(ifp, XGMAC_MCADDRS); in restart_lld()
2480 if (rc != 0) { in restart_lld()
2481 CH_ERR(vi, "failed to re-configure MCAST MACs: %d\n", rc); in restart_lld()
2482 rc = 0; /* carry on */ in restart_lld()
2497 return (rc); in restart_lld()
2515 int rc; in t4_resume() local
2518 rc = resume_adapter(sc); in t4_resume()
2521 return (rc); in t4_resume()
2546 MPASS(sc->error_flags & HW_OFF_LIMITS); in reset_adapter_with_pl_rst()
2547 bus_space_write_4(sc->bt, sc->bh, A_PL_RST, in reset_adapter_with_pl_rst()
2556 device_t pdev = device_get_parent(sc->dev); in reset_adapter_with_pcie_sbr()
2559 int rc, i, lcap, lsta, nchildren; in reset_adapter_with_pcie_sbr() local
2562 rc = pci_find_cap(gpdev, PCIY_EXPRESS, &v); in reset_adapter_with_pcie_sbr()
2563 if (rc != 0) { in reset_adapter_with_pcie_sbr()
2565 device_get_nameunit(gpdev), rc); in reset_adapter_with_pcie_sbr()
2582 rc = v & PCIEM_LINK_STA_DL_ACTIVE ? 0 : ETIMEDOUT; in reset_adapter_with_pcie_sbr()
2584 rc = ETIMEDOUT; in reset_adapter_with_pcie_sbr()
2586 rc = 0; in reset_adapter_with_pcie_sbr()
2587 if (rc != 0) in reset_adapter_with_pcie_sbr()
2588 CH_ERR(sc, "%s: PCIe link is down after reset, LINK_STA 0x%x\n", in reset_adapter_with_pcie_sbr()
2596 return (rc); in reset_adapter_with_pcie_sbr()
2602 device_t pdev = device_get_parent(sc->dev); in reset_adapter_with_pcie_link_bounce()
2605 int rc, i, lcap, lctl, lsta, nchildren; in reset_adapter_with_pcie_link_bounce() local
2608 rc = pci_find_cap(gpdev, PCIY_EXPRESS, &v); in reset_adapter_with_pcie_link_bounce()
2609 if (rc != 0) { in reset_adapter_with_pcie_link_bounce()
2611 device_get_nameunit(gpdev), rc); in reset_adapter_with_pcie_link_bounce()
2629 rc = v & PCIEM_LINK_STA_DL_ACTIVE ? 0 : ETIMEDOUT; in reset_adapter_with_pcie_link_bounce()
2631 rc = ETIMEDOUT; in reset_adapter_with_pcie_link_bounce()
2633 rc = 0; in reset_adapter_with_pcie_link_bounce()
2634 if (rc != 0) in reset_adapter_with_pcie_link_bounce()
2635 CH_ERR(sc, "%s: PCIe link is down after reset, LINK_STA 0x%x\n", in reset_adapter_with_pcie_link_bounce()
2643 return (rc); in reset_adapter_with_pcie_link_bounce()
2649 int rc; in reset_adapter() local
2652 rc = suspend_adapter(sc); in reset_adapter()
2653 if (rc != 0) in reset_adapter()
2654 return (rc); in reset_adapter()
2658 rc = reset_adapter_with_pcie_sbr(sc); in reset_adapter()
2661 rc = reset_adapter_with_pcie_link_bounce(sc); in reset_adapter()
2665 rc = reset_adapter_with_pl_rst(sc); in reset_adapter()
2668 if (rc == 0) in reset_adapter()
2669 rc = resume_adapter(sc); in reset_adapter()
2670 return (rc); in reset_adapter()
2677 const int flags = sc->flags; in reset_adapter_task()
2678 const int eflags = sc->error_flags; in reset_adapter_task()
2679 int rc; in reset_adapter_task() local
2683 rc = reset_adapter(sc); in reset_adapter_task()
2684 if (rc != 0) { in reset_adapter_task()
2685 CH_ERR(sc, "adapter did not reset properly, rc = %d, " in reset_adapter_task()
2686 "flags 0x%08x -> 0x%08x, err_flags 0x%08x -> 0x%08x.\n", in reset_adapter_task()
2687 rc, flags, sc->flags, eflags, sc->error_flags); in reset_adapter_task()
2696 device_set_descf(dev, "port %d", pi->port_id); in cxgbe_probe()
2712 struct sysctl_ctx_list *ctx = &vi->ctx; in cxgbe_vi_attach()
2715 struct adapter *sc = vi->adapter; in cxgbe_vi_attach()
2718 children = SYSCTL_CHILDREN(device_get_sysctl_tree(vi->dev)); in cxgbe_vi_attach()
2719 vi->rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rxq", in cxgbe_vi_attach()
2721 vi->txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "txq", in cxgbe_vi_attach()
2724 vi->nm_rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "nm_rxq", in cxgbe_vi_attach()
2726 vi->nm_txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "nm_txq", in cxgbe_vi_attach()
2730 vi->ofld_rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "ofld_rxq", in cxgbe_vi_attach()
2734 vi->ofld_txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "ofld_txq", in cxgbe_vi_attach()
2738 vi->xact_addr_filt = -1; in cxgbe_vi_attach()
2739 mtx_init(&vi->tick_mtx, "vi tick", NULL, MTX_DEF); in cxgbe_vi_attach()
2740 callout_init_mtx(&vi->tick, &vi->tick_mtx, 0); in cxgbe_vi_attach()
2741 if (sc->flags & IS_VF || t4_tx_vm_wr != 0) in cxgbe_vi_attach()
2742 vi->flags |= TX_USES_VM_WR; in cxgbe_vi_attach()
2746 vi->ifp = ifp; in cxgbe_vi_attach()
2756 if (vi->pi->nvi > 1 || sc->flags & IS_VF) in cxgbe_vi_attach()
2780 if (vi->nofldrxq != 0) in cxgbe_vi_attach()
2784 if (is_ethoffload(sc) && vi->nofldtxq != 0) { in cxgbe_vi_attach()
2791 if (vi->flags & TX_USES_VM_WR) in cxgbe_vi_attach()
2796 if (is_ethoffload(sc) && vi->nofldtxq != 0) in cxgbe_vi_attach()
2803 if (sc->flags & KERN_TLS_ON || !is_t6(sc)) in cxgbe_vi_attach()
2808 ether_ifattach(ifp, vi->hw_addr); in cxgbe_vi_attach()
2810 if (vi->nnmrxq != 0) in cxgbe_vi_attach()
2814 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq); in cxgbe_vi_attach()
2818 sbuf_printf(sb, "; %d txq (TOE)", vi->nofldtxq); in cxgbe_vi_attach()
2821 sbuf_printf(sb, "; %d txq (TOE/ETHOFLD)", vi->nofldtxq); in cxgbe_vi_attach()
2824 sbuf_printf(sb, "; %d txq (ETHOFLD)", vi->nofldtxq); in cxgbe_vi_attach()
2830 sbuf_printf(sb, ", %d rxq (TOE)", vi->nofldrxq); in cxgbe_vi_attach()
2835 vi->nnmtxq, vi->nnmrxq); in cxgbe_vi_attach()
2847 vi->pfil = pfil_head_register(&pa); in cxgbe_vi_attach()
2854 struct adapter *sc = pi->adapter; in cxgbe_attach()
2858 sysctl_ctx_init(&pi->ctx); in cxgbe_attach()
2860 cxgbe_vi_attach(dev, &pi->vi[0]); in cxgbe_attach()
2865 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, DEVICE_UNIT_ANY); in cxgbe_attach()
2866 if (vi->dev == NULL) { in cxgbe_attach()
2870 device_set_softc(vi->dev, vi); in cxgbe_attach()
2883 if_t ifp = vi->ifp; in cxgbe_vi_detach()
2885 if (vi->pfil != NULL) { in cxgbe_vi_detach()
2886 pfil_head_unregister(vi->pfil); in cxgbe_vi_detach()
2887 vi->pfil = NULL; in cxgbe_vi_detach()
2898 callout_drain(&vi->tick); in cxgbe_vi_detach()
2899 mtx_destroy(&vi->tick_mtx); in cxgbe_vi_detach()
2900 sysctl_ctx_free(&vi->ctx); in cxgbe_vi_detach()
2903 if_free(vi->ifp); in cxgbe_vi_detach()
2904 vi->ifp = NULL; in cxgbe_vi_detach()
2911 struct adapter *sc = pi->adapter; in cxgbe_detach()
2912 int rc; in cxgbe_detach() local
2915 rc = bus_generic_detach(dev); in cxgbe_detach()
2916 if (rc) in cxgbe_detach()
2917 return (rc); in cxgbe_detach()
2919 sysctl_ctx_free(&pi->ctx); in cxgbe_detach()
2920 begin_vi_detach(sc, &pi->vi[0]); in cxgbe_detach()
2921 if (pi->flags & HAS_TRACEQ) { in cxgbe_detach()
2922 sc->traceq = -1; /* cloner should not create ifnet */ in cxgbe_detach()
2925 cxgbe_vi_detach(&pi->vi[0]); in cxgbe_detach()
2926 ifmedia_removeall(&pi->media); in cxgbe_detach()
2927 end_vi_detach(sc, &pi->vi[0]); in cxgbe_detach()
2936 struct adapter *sc = vi->adapter; in cxgbe_init()
2947 int rc = 0, mtu, flags; in cxgbe_ioctl() local
2949 struct port_info *pi = vi->pi; in cxgbe_ioctl()
2950 struct adapter *sc = pi->adapter; in cxgbe_ioctl()
2956 mtu = ifr->ifr_mtu; in cxgbe_ioctl()
2960 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu"); in cxgbe_ioctl()
2961 if (rc) in cxgbe_ioctl()
2962 return (rc); in cxgbe_ioctl()
2964 if (vi->flags & VI_INIT_DONE) { in cxgbe_ioctl()
2968 rc = update_mac_settings(ifp, XGMAC_MTU); in cxgbe_ioctl()
2974 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4flg"); in cxgbe_ioctl()
2975 if (rc) in cxgbe_ioctl()
2976 return (rc); in cxgbe_ioctl()
2979 rc = ENXIO; in cxgbe_ioctl()
2985 flags = vi->if_flags; in cxgbe_ioctl()
2988 rc = update_mac_settings(ifp, in cxgbe_ioctl()
2992 rc = cxgbe_init_synchronized(vi); in cxgbe_ioctl()
2994 vi->if_flags = if_getflags(ifp); in cxgbe_ioctl()
2996 rc = cxgbe_uninit_synchronized(vi); in cxgbe_ioctl()
3003 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4multi"); in cxgbe_ioctl()
3004 if (rc) in cxgbe_ioctl()
3005 return (rc); in cxgbe_ioctl()
3007 rc = update_mac_settings(ifp, XGMAC_MCADDRS); in cxgbe_ioctl()
3012 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap"); in cxgbe_ioctl()
3013 if (rc) in cxgbe_ioctl()
3014 return (rc); in cxgbe_ioctl()
3016 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); in cxgbe_ioctl()
3026 "tso4 disabled due to -txcsum.\n"); in cxgbe_ioctl()
3038 "tso6 disabled due to -txcsum6.\n"); in cxgbe_ioctl()
3056 rc = EAGAIN; in cxgbe_ioctl()
3065 rc = EAGAIN; in cxgbe_ioctl()
3078 rxq->iq.flags |= IQ_LRO_ENABLED; in cxgbe_ioctl()
3080 rxq->iq.flags &= ~IQ_LRO_ENABLED; in cxgbe_ioctl()
3088 rc = toe_capability(vi, enable); in cxgbe_ioctl()
3089 if (rc != 0) in cxgbe_ioctl()
3098 rc = update_mac_settings(ifp, XGMAC_VLANEX); in cxgbe_ioctl()
3103 /* Need to find out how to disable auto-mtu-inflation */ in cxgbe_ioctl()
3120 rxq->iq.flags |= IQ_RX_TIMESTAMP; in cxgbe_ioctl()
3122 rxq->iq.flags &= ~IQ_RX_TIMESTAMP; in cxgbe_ioctl()
3132 rc = ktls_capability(sc, enable); in cxgbe_ioctl()
3133 if (rc != 0) in cxgbe_ioctl()
3161 rc = ifmedia_ioctl(ifp, ifr, &pi->media, cmd); in cxgbe_ioctl()
3167 rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); in cxgbe_ioctl()
3168 if (rc != 0) in cxgbe_ioctl()
3171 rc = EPERM; in cxgbe_ioctl()
3175 rc = EINVAL; in cxgbe_ioctl()
3178 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c"); in cxgbe_ioctl()
3179 if (rc) in cxgbe_ioctl()
3180 return (rc); in cxgbe_ioctl()
3182 rc = ENXIO; in cxgbe_ioctl()
3184 rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr, in cxgbe_ioctl()
3187 if (rc == 0) in cxgbe_ioctl()
3188 rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); in cxgbe_ioctl()
3193 rc = ether_ioctl(ifp, cmd, data); in cxgbe_ioctl()
3196 return (rc); in cxgbe_ioctl()
3203 struct port_info *pi = vi->pi; in cxgbe_transmit()
3207 int rc; in cxgbe_transmit() local
3210 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ in cxgbe_transmit()
3212 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) in cxgbe_transmit()
3213 MPASS(m->m_pkthdr.snd_tag->ifp == ifp); in cxgbe_transmit()
3216 if (__predict_false(pi->link_cfg.link_ok == false)) { in cxgbe_transmit()
3221 rc = parse_pkt(&m, vi->flags & TX_USES_VM_WR); in cxgbe_transmit()
3222 if (__predict_false(rc != 0)) { in cxgbe_transmit()
3223 if (__predict_true(rc == EINPROGRESS)) { in cxgbe_transmit()
3230 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ in cxgbe_transmit()
3231 return (rc); in cxgbe_transmit()
3235 sc = vi->adapter; in cxgbe_transmit()
3236 txq = &sc->sge.txq[vi->first_txq]; in cxgbe_transmit()
3238 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) + in cxgbe_transmit()
3239 vi->rsrv_noflowq); in cxgbe_transmit()
3242 rc = mp_ring_enqueue(txq->r, items, 1, 256); in cxgbe_transmit()
3243 if (__predict_false(rc != 0)) in cxgbe_transmit()
3246 return (rc); in cxgbe_transmit()
3257 if (vi->flags & VI_INIT_DONE) { in cxgbe_qflush()
3260 txq->eq.flags |= EQ_QFLUSH; in cxgbe_qflush()
3262 while (!mp_ring_is_idle(txq->r)) { in cxgbe_qflush()
3263 mp_ring_check_drainage(txq->r, 4096); in cxgbe_qflush()
3267 txq->eq.flags &= ~EQ_QFLUSH; in cxgbe_qflush()
3278 struct fw_vi_stats_vf *s = &vi->stats; in vi_get_counter()
3280 mtx_lock(&vi->tick_mtx); in vi_get_counter()
3282 mtx_unlock(&vi->tick_mtx); in vi_get_counter()
3286 return (s->rx_bcast_frames + s->rx_mcast_frames + in vi_get_counter()
3287 s->rx_ucast_frames); in vi_get_counter()
3289 return (s->rx_err_frames); in vi_get_counter()
3291 return (s->tx_bcast_frames + s->tx_mcast_frames + in vi_get_counter()
3292 s->tx_ucast_frames + s->tx_offload_frames); in vi_get_counter()
3294 return (s->tx_drop_frames); in vi_get_counter()
3296 return (s->rx_bcast_bytes + s->rx_mcast_bytes + in vi_get_counter()
3297 s->rx_ucast_bytes); in vi_get_counter()
3299 return (s->tx_bcast_bytes + s->tx_mcast_bytes + in vi_get_counter()
3300 s->tx_ucast_bytes + s->tx_offload_bytes); in vi_get_counter()
3302 return (s->rx_mcast_frames); in vi_get_counter()
3304 return (s->tx_mcast_frames); in vi_get_counter()
3309 if (vi->flags & VI_INIT_DONE) { in vi_get_counter()
3314 drops += counter_u64_fetch(txq->r->dropped); in vi_get_counter()
3330 struct port_info *pi = vi->pi; in cxgbe_get_counter()
3331 struct port_stats *s = &pi->stats; in cxgbe_get_counter()
3333 mtx_lock(&vi->tick_mtx); in cxgbe_get_counter()
3335 mtx_unlock(&vi->tick_mtx); in cxgbe_get_counter()
3339 return (s->rx_frames); in cxgbe_get_counter()
3342 return (s->rx_jabber + s->rx_runt + s->rx_too_long + in cxgbe_get_counter()
3343 s->rx_fcs_err + s->rx_len_err); in cxgbe_get_counter()
3346 return (s->tx_frames); in cxgbe_get_counter()
3349 return (s->tx_error_frames); in cxgbe_get_counter()
3352 return (s->rx_octets); in cxgbe_get_counter()
3355 return (s->tx_octets); in cxgbe_get_counter()
3358 return (s->rx_mcast_frames); in cxgbe_get_counter()
3361 return (s->tx_mcast_frames); in cxgbe_get_counter()
3364 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + in cxgbe_get_counter()
3365 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + in cxgbe_get_counter()
3366 s->rx_trunc3 + pi->tnl_cong_drops); in cxgbe_get_counter()
3371 drops = s->tx_drop; in cxgbe_get_counter()
3372 if (vi->flags & VI_INIT_DONE) { in cxgbe_get_counter()
3377 drops += counter_u64_fetch(txq->r->dropped); in cxgbe_get_counter()
3396 switch (params->hdr.type) { in cxgbe_snd_tag_alloc()
3407 if (is_t6(vi->pi->adapter)) in cxgbe_snd_tag_alloc()
3429 struct port_info *pi = vi->pi; in cxgbe_media_change()
3430 struct ifmedia *ifm = &pi->media; in cxgbe_media_change()
3431 struct link_config *lc = &pi->link_cfg; in cxgbe_media_change()
3432 struct adapter *sc = pi->adapter; in cxgbe_media_change()
3433 int rc; in cxgbe_media_change() local
3435 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mec"); in cxgbe_media_change()
3436 if (rc != 0) in cxgbe_media_change()
3437 return (rc); in cxgbe_media_change()
3439 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { in cxgbe_media_change()
3441 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { in cxgbe_media_change()
3442 rc = ENOTSUP; /* AN not supported by transceiver */ in cxgbe_media_change()
3445 lc->requested_aneg = AUTONEG_ENABLE; in cxgbe_media_change()
3446 lc->requested_speed = 0; in cxgbe_media_change()
3447 lc->requested_fc |= PAUSE_AUTONEG; in cxgbe_media_change()
3449 lc->requested_aneg = AUTONEG_DISABLE; in cxgbe_media_change()
3450 lc->requested_speed = in cxgbe_media_change()
3451 ifmedia_baudrate(ifm->ifm_media) / 1000000; in cxgbe_media_change()
3452 lc->requested_fc = 0; in cxgbe_media_change()
3453 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE) in cxgbe_media_change()
3454 lc->requested_fc |= PAUSE_RX; in cxgbe_media_change()
3455 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE) in cxgbe_media_change()
3456 lc->requested_fc |= PAUSE_TX; in cxgbe_media_change()
3458 if (pi->up_vis > 0 && hw_all_ok(sc)) { in cxgbe_media_change()
3460 rc = apply_link_config(pi); in cxgbe_media_change()
3465 return (rc); in cxgbe_media_change()
3479 switch(pi->port_type) { in port_mword()
3538 switch (pi->mod_type) { in port_mword()
3623 struct port_info *pi = vi->pi; in cxgbe_media_status()
3624 struct adapter *sc = pi->adapter; in cxgbe_media_status()
3625 struct link_config *lc = &pi->link_cfg; in cxgbe_media_status()
3631 if (pi->up_vis == 0 && hw_all_ok(sc)) { in cxgbe_media_status()
3644 ifmr->ifm_status = IFM_AVALID; in cxgbe_media_status()
3645 if (lc->link_ok == false) in cxgbe_media_status()
3647 ifmr->ifm_status |= IFM_ACTIVE; in cxgbe_media_status()
3650 ifmr->ifm_active = IFM_ETHER | IFM_FDX; in cxgbe_media_status()
3651 ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE); in cxgbe_media_status()
3652 if (lc->fc & PAUSE_RX) in cxgbe_media_status()
3653 ifmr->ifm_active |= IFM_ETH_RXPAUSE; in cxgbe_media_status()
3654 if (lc->fc & PAUSE_TX) in cxgbe_media_status()
3655 ifmr->ifm_active |= IFM_ETH_TXPAUSE; in cxgbe_media_status()
3656 ifmr->ifm_active |= port_mword(pi, speed_to_fwcap(lc->speed)); in cxgbe_media_status()
3667 device_set_descf(dev, "port %d vi %td", vi->pi->port_id, in vcxgbe_probe()
3668 vi - vi->pi->vi); in vcxgbe_probe()
3676 int func, index, rc; in alloc_extra_vi() local
3681 index = vi - pi->vi; in alloc_extra_vi()
3685 device_get_nameunit(vi->dev))); in alloc_extra_vi()
3687 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, in alloc_extra_vi()
3688 vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0); in alloc_extra_vi()
3689 if (rc < 0) { in alloc_extra_vi()
3691 "for port %d: %d\n", index, pi->port_id, -rc); in alloc_extra_vi()
3692 return (-rc); in alloc_extra_vi()
3694 vi->viid = rc; in alloc_extra_vi()
3696 if (vi->rss_size == 1) { in alloc_extra_vi()
3703 device_printf(vi->dev, "RSS table not available.\n"); in alloc_extra_vi()
3704 vi->rss_base = 0xffff; in alloc_extra_vi()
3711 V_FW_PARAMS_PARAM_YZ(vi->viid); in alloc_extra_vi()
3712 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in alloc_extra_vi()
3713 if (rc) in alloc_extra_vi()
3714 vi->rss_base = 0xffff; in alloc_extra_vi()
3716 MPASS((val >> 16) == vi->rss_size); in alloc_extra_vi()
3717 vi->rss_base = val & 0xffff; in alloc_extra_vi()
3729 int rc; in vcxgbe_attach() local
3732 pi = vi->pi; in vcxgbe_attach()
3733 sc = pi->adapter; in vcxgbe_attach()
3735 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4via"); in vcxgbe_attach()
3736 if (rc) in vcxgbe_attach()
3737 return (rc); in vcxgbe_attach()
3738 rc = alloc_extra_vi(sc, pi, vi); in vcxgbe_attach()
3740 if (rc) in vcxgbe_attach()
3741 return (rc); in vcxgbe_attach()
3755 sc = vi->adapter; in vcxgbe_detach()
3759 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); in vcxgbe_detach()
3773 panic("%s: panic on fatal error", device_get_nameunit(sc->dev)); in delayed_panic()
3780 int rc; in fatal_error_task() local
3782 if (atomic_testandclear_int(&sc->error_flags, ilog2(ADAP_CIM_ERR))) { in fatal_error_task()
3790 rc = reset_adapter(sc); in fatal_error_task()
3791 if (rc == 0 && t4_panic_on_fatal_err) { in fatal_error_task()
3807 const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0; in t4_fatal_err()
3810 if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_FATAL_ERR))) in t4_fatal_err()
3822 atomic_set_int(&sc->error_flags, ADAP_CIM_ERR); in t4_fatal_err()
3826 device_get_nameunit(sc->dev), fw_error); in t4_fatal_err()
3827 taskqueue_enqueue(reset_tq, &sc->fatal_error_task); in t4_fatal_err()
3841 sc->regs_rid = PCIR_BAR(0); in t4_map_bars_0_and_4()
3842 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, in t4_map_bars_0_and_4()
3843 &sc->regs_rid, RF_ACTIVE); in t4_map_bars_0_and_4()
3844 if (sc->regs_res == NULL) { in t4_map_bars_0_and_4()
3845 device_printf(sc->dev, "cannot map registers.\n"); in t4_map_bars_0_and_4()
3848 sc->bt = rman_get_bustag(sc->regs_res); in t4_map_bars_0_and_4()
3849 sc->bh = rman_get_bushandle(sc->regs_res); in t4_map_bars_0_and_4()
3850 sc->mmio_len = rman_get_size(sc->regs_res); in t4_map_bars_0_and_4()
3851 setbit(&sc->doorbells, DOORBELL_KDB); in t4_map_bars_0_and_4()
3853 sc->msix_rid = PCIR_BAR(4); in t4_map_bars_0_and_4()
3854 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, in t4_map_bars_0_and_4()
3855 &sc->msix_rid, RF_ACTIVE); in t4_map_bars_0_and_4()
3856 if (sc->msix_res == NULL) { in t4_map_bars_0_and_4()
3857 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); in t4_map_bars_0_and_4()
3872 if (is_t4(sc) && sc->rdmacaps == 0) in t4_map_bar_2()
3875 sc->udbs_rid = PCIR_BAR(2); in t4_map_bar_2()
3876 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, in t4_map_bar_2()
3877 &sc->udbs_rid, RF_ACTIVE); in t4_map_bar_2()
3878 if (sc->udbs_res == NULL) { in t4_map_bar_2()
3879 device_printf(sc->dev, "cannot map doorbell BAR.\n"); in t4_map_bar_2()
3882 sc->udbs_base = rman_get_virtual(sc->udbs_res); in t4_map_bar_2()
3885 setbit(&sc->doorbells, DOORBELL_UDB); in t4_map_bar_2()
3888 int rc, mode; in t4_map_bar_2() local
3899 rc = pmap_change_attr((vm_offset_t)sc->udbs_base, in t4_map_bar_2()
3900 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); in t4_map_bar_2()
3901 if (rc == 0) { in t4_map_bar_2()
3902 clrbit(&sc->doorbells, DOORBELL_UDB); in t4_map_bar_2()
3903 setbit(&sc->doorbells, DOORBELL_WCWR); in t4_map_bar_2()
3904 setbit(&sc->doorbells, DOORBELL_UDBWC); in t4_map_bar_2()
3906 device_printf(sc->dev, in t4_map_bar_2()
3908 rc); in t4_map_bar_2()
3917 sc->iwt.wc_en = isset(&sc->doorbells, DOORBELL_UDBWC) ? 1 : 0; in t4_map_bar_2()
3925 if ((sc->doorbells & t4_doorbells_allowed) != 0) { in t4_adj_doorbells()
3926 sc->doorbells &= t4_doorbells_allowed; in t4_adj_doorbells()
3929 CH_ERR(sc, "No usable doorbell (available = 0x%x, allowed = 0x%x).\n", in t4_adj_doorbells()
3930 sc->doorbells, t4_doorbells_allowed); in t4_adj_doorbells()
3978 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) { in setup_memwin()
3979 if (!rw_initialized(&mw->mw_lock)) { in setup_memwin()
3980 rw_init(&mw->mw_lock, "memory window access"); in setup_memwin()
3981 mw->mw_base = mw_init->base; in setup_memwin()
3982 mw->mw_aperture = mw_init->aperture; in setup_memwin()
3983 mw->mw_curpos = 0; in setup_memwin()
3987 (mw->mw_base + bar0) | V_BIR(0) | in setup_memwin()
3988 V_WINDOW(ilog2(mw->mw_aperture) - 10)); in setup_memwin()
3989 rw_wlock(&mw->mw_lock); in setup_memwin()
3990 position_memwin(sc, i, mw->mw_curpos); in setup_memwin()
3991 rw_wunlock(&mw->mw_lock); in setup_memwin()
4001 * address prior to the requested address. mw->mw_curpos always has the actual
4012 mw = &sc->memwin[idx]; in position_memwin()
4013 rw_assert(&mw->mw_lock, RA_WLOCKED); in position_memwin()
4017 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */ in position_memwin()
4019 pf = V_PFNUM(sc->pf); in position_memwin()
4020 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */ in position_memwin()
4023 t4_write_reg(sc, reg, mw->mw_curpos | pf); in position_memwin()
4040 mw = &sc->memwin[idx]; in rw_via_memwin()
4042 rw_rlock(&mw->mw_lock); in rw_via_memwin()
4043 mw_end = mw->mw_curpos + mw->mw_aperture; in rw_via_memwin()
4044 if (addr >= mw_end || addr < mw->mw_curpos) { in rw_via_memwin()
4046 if (!rw_try_upgrade(&mw->mw_lock)) { in rw_via_memwin()
4047 rw_runlock(&mw->mw_lock); in rw_via_memwin()
4048 rw_wlock(&mw->mw_lock); in rw_via_memwin()
4050 rw_assert(&mw->mw_lock, RA_WLOCKED); in rw_via_memwin()
4052 rw_downgrade(&mw->mw_lock); in rw_via_memwin()
4053 mw_end = mw->mw_curpos + mw->mw_aperture; in rw_via_memwin()
4055 rw_assert(&mw->mw_lock, RA_RLOCKED); in rw_via_memwin()
4058 v = t4_read_reg(sc, mw->mw_base + addr - in rw_via_memwin()
4059 mw->mw_curpos); in rw_via_memwin()
4063 t4_write_reg(sc, mw->mw_base + addr - in rw_via_memwin()
4064 mw->mw_curpos, htole32(v)); in rw_via_memwin()
4067 len -= 4; in rw_via_memwin()
4069 rw_runlock(&mw->mw_lock); in rw_via_memwin()
4084 t = &sc->tids; in t4_init_atid_table()
4085 if (t->natids == 0) in t4_init_atid_table()
4088 MPASS(t->atid_tab == NULL); in t4_init_atid_table()
4090 t->atid_tab = malloc(t->natids * sizeof(*t->atid_tab), M_CXGBE, in t4_init_atid_table()
4092 mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF); in t4_init_atid_table()
4093 t->afree = t->atid_tab; in t4_init_atid_table()
4094 t->atids_in_use = 0; in t4_init_atid_table()
4095 t->atid_alloc_stopped = false; in t4_init_atid_table()
4096 for (i = 1; i < t->natids; i++) in t4_init_atid_table()
4097 t->atid_tab[i - 1].next = &t->atid_tab[i]; in t4_init_atid_table()
4098 t->atid_tab[t->natids - 1].next = NULL; in t4_init_atid_table()
4106 t = &sc->tids; in t4_free_atid_table()
4108 KASSERT(t->atids_in_use == 0, in t4_free_atid_table()
4109 ("%s: %d atids still in use.", __func__, t->atids_in_use)); in t4_free_atid_table()
4111 if (mtx_initialized(&t->atid_lock)) in t4_free_atid_table()
4112 mtx_destroy(&t->atid_lock); in t4_free_atid_table()
4113 free(t->atid_tab, M_CXGBE); in t4_free_atid_table()
4114 t->atid_tab = NULL; in t4_free_atid_table()
4120 struct tid_info *t = &sc->tids; in stop_atid_allocator()
4122 if (t->natids == 0) in stop_atid_allocator()
4124 mtx_lock(&t->atid_lock); in stop_atid_allocator()
4125 t->atid_alloc_stopped = true; in stop_atid_allocator()
4126 mtx_unlock(&t->atid_lock); in stop_atid_allocator()
4132 struct tid_info *t = &sc->tids; in restart_atid_allocator()
4134 if (t->natids == 0) in restart_atid_allocator()
4136 mtx_lock(&t->atid_lock); in restart_atid_allocator()
4137 KASSERT(t->atids_in_use == 0, in restart_atid_allocator()
4138 ("%s: %d atids still in use.", __func__, t->atids_in_use)); in restart_atid_allocator()
4139 t->atid_alloc_stopped = false; in restart_atid_allocator()
4140 mtx_unlock(&t->atid_lock); in restart_atid_allocator()
4146 struct tid_info *t = &sc->tids; in alloc_atid()
4147 int atid = -1; in alloc_atid()
4149 mtx_lock(&t->atid_lock); in alloc_atid()
4150 if (t->afree && !t->atid_alloc_stopped) { in alloc_atid()
4151 union aopen_entry *p = t->afree; in alloc_atid()
4153 atid = p - t->atid_tab; in alloc_atid()
4155 t->afree = p->next; in alloc_atid()
4156 p->data = ctx; in alloc_atid()
4157 t->atids_in_use++; in alloc_atid()
4159 mtx_unlock(&t->atid_lock); in alloc_atid()
4166 struct tid_info *t = &sc->tids; in lookup_atid()
4168 return (t->atid_tab[atid].data); in lookup_atid()
4174 struct tid_info *t = &sc->tids; in free_atid()
4175 union aopen_entry *p = &t->atid_tab[atid]; in free_atid()
4177 mtx_lock(&t->atid_lock); in free_atid()
4178 p->next = t->afree; in free_atid()
4179 t->afree = p; in free_atid()
4180 t->atids_in_use--; in free_atid()
4181 mtx_unlock(&t->atid_lock); in free_atid()
4212 return ((const struct t4_range *)a)->start - in t4_range_cmp()
4213 ((const struct t4_range *)b)->start; in t4_range_cmp()
4239 r->size = G_EDRAM0_SIZE(addr_len) << 20; in validate_mem_range()
4240 if (r->size > 0) { in validate_mem_range()
4241 r->start = G_EDRAM0_BASE(addr_len) << 20; in validate_mem_range()
4242 if (addr >= r->start && in validate_mem_range()
4243 addr + len <= r->start + r->size) in validate_mem_range()
4251 r->size = G_EDRAM1_SIZE(addr_len) << 20; in validate_mem_range()
4252 if (r->size > 0) { in validate_mem_range()
4253 r->start = G_EDRAM1_BASE(addr_len) << 20; in validate_mem_range()
4254 if (addr >= r->start && in validate_mem_range()
4255 addr + len <= r->start + r->size) in validate_mem_range()
4263 r->size = G_EXT_MEM_SIZE(addr_len) << 20; in validate_mem_range()
4264 if (r->size > 0) { in validate_mem_range()
4265 r->start = G_EXT_MEM_BASE(addr_len) << 20; in validate_mem_range()
4266 if (addr >= r->start && in validate_mem_range()
4267 addr + len <= r->start + r->size) in validate_mem_range()
4275 r->size = G_EXT_MEM1_SIZE(addr_len) << 20; in validate_mem_range()
4276 if (r->size > 0) { in validate_mem_range()
4277 r->start = G_EXT_MEM1_BASE(addr_len) << 20; in validate_mem_range()
4278 if (addr >= r->start && in validate_mem_range()
4279 addr + len <= r->start + r->size) in validate_mem_range()
4291 /* Start from index 0 and examine the next n - 1 entries. */ in validate_mem_range()
4293 for (remaining = n - 1; remaining > 0; remaining--, r++) { in validate_mem_range()
4295 MPASS(r->size > 0); /* r is a valid entry. */ in validate_mem_range()
4297 MPASS(next->size > 0); /* and so is the next one. */ in validate_mem_range()
4299 while (r->start + r->size >= next->start) { in validate_mem_range()
4301 r->size = max(r->start + r->size, in validate_mem_range()
4302 next->start + next->size) - r->start; in validate_mem_range()
4303 n--; /* One fewer entry in total. */ in validate_mem_range()
4304 if (--remaining == 0) in validate_mem_range()
4314 MPASS(next->size > 0); /* must be valid */ in validate_mem_range()
4318 * This so that the foo->size assertion in the in validate_mem_range()
4324 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) * in validate_mem_range()
4334 if (addr >= r->start && in validate_mem_range()
4335 addr + len <= r->start + r->size) in validate_mem_range()
4413 struct devlog_params *dparams = &sc->params.devlog; in fixup_devlog_params()
4414 int rc; in fixup_devlog_params() local
4416 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start, in fixup_devlog_params()
4417 dparams->size, &dparams->addr); in fixup_devlog_params()
4419 return (rc); in fixup_devlog_params()
4426 iaq->nirq = T4_EXTRA_INTR; in update_nirq()
4427 iaq->nirq += nports * max(iaq->nrxq, iaq->nnmrxq); in update_nirq()
4428 iaq->nirq += nports * iaq->nofldrxq; in update_nirq()
4429 iaq->nirq += nports * (iaq->num_vis - 1) * in update_nirq()
4430 max(iaq->nrxq_vi, iaq->nnmrxq_vi); in update_nirq()
4431 iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi; in update_nirq()
4442 const int nports = sc->params.nports; in calculate_iaq()
4448 iaq->intr_type = itype; in calculate_iaq()
4449 iaq->num_vis = t4_num_vis; in calculate_iaq()
4450 iaq->ntxq = t4_ntxq; in calculate_iaq()
4451 iaq->ntxq_vi = t4_ntxq_vi; in calculate_iaq()
4452 iaq->nrxq = t4_nrxq; in calculate_iaq()
4453 iaq->nrxq_vi = t4_nrxq_vi; in calculate_iaq()
4456 iaq->nofldtxq = t4_nofldtxq; in calculate_iaq()
4457 iaq->nofldtxq_vi = t4_nofldtxq_vi; in calculate_iaq()
4462 iaq->nofldrxq = t4_nofldrxq; in calculate_iaq()
4463 iaq->nofldrxq_vi = t4_nofldrxq_vi; in calculate_iaq()
4468 iaq->nnmtxq = t4_nnmtxq; in calculate_iaq()
4469 iaq->nnmrxq = t4_nnmrxq; in calculate_iaq()
4472 iaq->nnmtxq_vi = t4_nnmtxq_vi; in calculate_iaq()
4473 iaq->nnmrxq_vi = t4_nnmrxq_vi; in calculate_iaq()
4478 if (iaq->nirq <= navail && in calculate_iaq()
4479 (itype != INTR_MSI || powerof2(iaq->nirq))) { in calculate_iaq()
4481 * This is the normal case -- there are enough interrupts for in calculate_iaq()
4491 while (iaq->num_vis > 1) { in calculate_iaq()
4492 iaq->num_vis--; in calculate_iaq()
4494 if (iaq->nirq <= navail && in calculate_iaq()
4495 (itype != INTR_MSI || powerof2(iaq->nirq))) { in calculate_iaq()
4496 device_printf(sc->dev, "virtual interfaces per port " in calculate_iaq()
4500 iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq, in calculate_iaq()
4501 iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi, in calculate_iaq()
4502 itype, navail, iaq->nirq); in calculate_iaq()
4510 MPASS(iaq->num_vis == 1); in calculate_iaq()
4511 iaq->ntxq_vi = iaq->nrxq_vi = 0; in calculate_iaq()
4512 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0; in calculate_iaq()
4513 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0; in calculate_iaq()
4514 if (iaq->num_vis != t4_num_vis) { in calculate_iaq()
4515 device_printf(sc->dev, "extra virtual interfaces disabled. " in calculate_iaq()
4518 iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi, in calculate_iaq()
4519 iaq->nnmrxq_vi, itype, navail, iaq->nirq); in calculate_iaq()
4528 if (iaq->nrxq > 1) { in calculate_iaq()
4529 iaq->nrxq = rounddown_pow_of_two(iaq->nrxq - 1); in calculate_iaq()
4530 if (iaq->nnmrxq > iaq->nrxq) in calculate_iaq()
4531 iaq->nnmrxq = iaq->nrxq; in calculate_iaq()
4533 if (iaq->nofldrxq > 1) in calculate_iaq()
4534 iaq->nofldrxq >>= 1; in calculate_iaq()
4536 old_nirq = iaq->nirq; in calculate_iaq()
4538 if (iaq->nirq <= navail && in calculate_iaq()
4539 (itype != INTR_MSI || powerof2(iaq->nirq))) { in calculate_iaq()
4540 device_printf(sc->dev, "running with reduced number of " in calculate_iaq()
4543 "itype %d, navail %u, nirq %d.\n", iaq->nrxq, in calculate_iaq()
4544 iaq->nofldrxq, itype, navail, iaq->nirq); in calculate_iaq()
4547 } while (old_nirq != iaq->nirq); in calculate_iaq()
4550 device_printf(sc->dev, "running with minimal number of queues. " in calculate_iaq()
4552 iaq->nirq = 1; in calculate_iaq()
4553 iaq->nrxq = 1; in calculate_iaq()
4554 iaq->ntxq = 1; in calculate_iaq()
4555 if (iaq->nofldrxq > 0) { in calculate_iaq()
4556 iaq->nofldrxq = 1; in calculate_iaq()
4557 iaq->nofldtxq = 1; in calculate_iaq()
4559 iaq->nnmtxq = 0; in calculate_iaq()
4560 iaq->nnmrxq = 0; in calculate_iaq()
4562 MPASS(iaq->num_vis > 0); in calculate_iaq()
4563 if (iaq->num_vis > 1) { in calculate_iaq()
4564 MPASS(iaq->nrxq_vi > 0); in calculate_iaq()
4565 MPASS(iaq->ntxq_vi > 0); in calculate_iaq()
4567 MPASS(iaq->nirq > 0); in calculate_iaq()
4568 MPASS(iaq->nrxq > 0); in calculate_iaq()
4569 MPASS(iaq->ntxq > 0); in calculate_iaq()
4571 MPASS(powerof2(iaq->nirq)); in calculate_iaq()
4578 int rc, itype, navail, nalloc; in cfg_itype_and_nqueues() local
4586 navail = pci_msix_count(sc->dev); in cfg_itype_and_nqueues()
4588 navail = pci_msi_count(sc->dev); in cfg_itype_and_nqueues()
4596 nalloc = iaq->nirq; in cfg_itype_and_nqueues()
4597 rc = 0; in cfg_itype_and_nqueues()
4599 rc = pci_alloc_msix(sc->dev, &nalloc); in cfg_itype_and_nqueues()
4601 rc = pci_alloc_msi(sc->dev, &nalloc); in cfg_itype_and_nqueues()
4603 if (rc == 0 && nalloc > 0) { in cfg_itype_and_nqueues()
4604 if (nalloc == iaq->nirq) in cfg_itype_and_nqueues()
4611 device_printf(sc->dev, "fewer vectors than requested, " in cfg_itype_and_nqueues()
4613 itype, iaq->nirq, nalloc); in cfg_itype_and_nqueues()
4614 pci_release_msi(sc->dev); in cfg_itype_and_nqueues()
4619 device_printf(sc->dev, in cfg_itype_and_nqueues()
4621 itype, rc, iaq->nirq, nalloc); in cfg_itype_and_nqueues()
4624 device_printf(sc->dev, in cfg_itype_and_nqueues()
4626 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, in cfg_itype_and_nqueues()
4627 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); in cfg_itype_and_nqueues()
4738 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) in fw_compatible()
4745 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) in fw_compatible() argument
4746 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && in fw_compatible()
4767 device_printf(sc->dev, in load_fw_module()
4773 *dcfg = firmware_get(fw_info->kld_name); in load_fw_module()
4776 *fw = firmware_get(fw_info->fw_mod_name); in load_fw_module()
4805 const uint32_t c = be32toh(card_fw->fw_ver); in install_kld_firmware()
4807 int rc, fw_install; in install_kld_firmware() local
4813 fw_install = t4_fw_install < 0 ? -t4_fw_install : t4_fw_install; in install_kld_firmware()
4817 rc = load_fw_module(sc, &cfg, &fw); in install_kld_firmware()
4818 if (rc != 0 || fw == NULL) { in install_kld_firmware()
4819 device_printf(sc->dev, in install_kld_firmware()
4821 " will use compiled-in firmware version for" in install_kld_firmware()
4823 rc, cfg, fw); in install_kld_firmware()
4825 memcpy(&bundled_fw, fw->data, sizeof(bundled_fw)); in install_kld_firmware()
4834 if ((sc->flags & FW_OK) == 0) { in install_kld_firmware()
4841 rc = 0; in install_kld_firmware()
4861 rc = 0; in install_kld_firmware()
4865 rc = 0; in install_kld_firmware()
4870 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " in install_kld_firmware()
4884 rc = load_fw_module(sc, &cfg, &fw); in install_kld_firmware()
4885 if (rc != 0 || fw == NULL) { in install_kld_firmware()
4886 device_printf(sc->dev, in install_kld_firmware()
4888 rc, cfg, fw); in install_kld_firmware()
4893 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " in install_kld_firmware()
4898 rc = sc->flags & FW_OK ? 0 : ENOENT; in install_kld_firmware()
4901 k = be32toh(((const struct fw_hdr *)fw->data)->fw_ver); in install_kld_firmware()
4904 device_printf(sc->dev, in install_kld_firmware()
4911 rc = sc->flags & FW_OK ? 0 : EINVAL; in install_kld_firmware()
4915 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " in install_kld_firmware()
4922 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); in install_kld_firmware()
4923 if (rc != 0) { in install_kld_firmware()
4924 device_printf(sc->dev, "failed to install firmware: %d\n", rc); in install_kld_firmware()
4927 rc = ERESTART; in install_kld_firmware()
4928 memcpy(card_fw, fw->data, sizeof(*card_fw)); in install_kld_firmware()
4933 return (rc); in install_kld_firmware()
4945 int rc, already = 0; in contact_firmware() local
4953 device_printf(sc->dev, in contact_firmware()
4958 drv_fw = &fw_info->fw_h; in contact_firmware()
4963 rc = -t4_get_fw_hdr(sc, card_fw); in contact_firmware()
4964 if (rc != 0) { in contact_firmware()
4965 device_printf(sc->dev, in contact_firmware()
4967 rc); in contact_firmware()
4971 rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, NULL, in contact_firmware()
4973 if (rc == ERESTART) in contact_firmware()
4975 if (rc != 0) in contact_firmware()
4978 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); in contact_firmware()
4979 if (rc < 0 || state == DEV_STATE_ERR) { in contact_firmware()
4980 rc = -rc; in contact_firmware()
4981 device_printf(sc->dev, in contact_firmware()
4983 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW)); in contact_firmware()
4991 MPASS(be32toh(card_fw->flags) & FW_HDR_FLAGS_RESET_HALT); in contact_firmware()
4992 sc->flags |= FW_OK; /* The firmware responded to the FW_HELLO. */ in contact_firmware()
4994 if (rc == sc->pf) { in contact_firmware()
4995 sc->flags |= MASTER_PF; in contact_firmware()
4996 rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, in contact_firmware()
4998 if (rc == ERESTART) in contact_firmware()
4999 rc = 0; in contact_firmware()
5000 else if (rc != 0) in contact_firmware()
5008 device_printf(sc->dev, "couldn't be master(%d), " in contact_firmware()
5010 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW)); in contact_firmware()
5011 rc = EPROTO; in contact_firmware()
5018 device_printf(sc->dev, "PF%d is master, device state %d. " in contact_firmware()
5019 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW)); in contact_firmware()
5020 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", rc); in contact_firmware()
5021 sc->cfcsum = 0; in contact_firmware()
5022 rc = 0; in contact_firmware()
5025 if (rc != 0 && sc->flags & FW_OK) { in contact_firmware()
5026 t4_fw_bye(sc, sc->mbox); in contact_firmware()
5027 sc->flags &= ~FW_OK; in contact_firmware()
5030 return (rc); in contact_firmware()
5041 int rc; in copy_cfg_file_to_card() local
5047 if (pci_get_device(sc->dev) == 0x440a) in copy_cfg_file_to_card()
5055 device_printf(sc->dev, in copy_cfg_file_to_card()
5057 rc = ENOENT; in copy_cfg_file_to_card()
5060 cfdata = dcfg->data; in copy_cfg_file_to_card()
5061 cflen = dcfg->datasize & ~3; in copy_cfg_file_to_card()
5067 device_printf(sc->dev, in copy_cfg_file_to_card()
5070 rc = EINVAL; in copy_cfg_file_to_card()
5073 snprintf(s, sizeof(s), "%s_%s", fw_info->kld_name, cfg_file); in copy_cfg_file_to_card()
5077 device_printf(sc->dev, in copy_cfg_file_to_card()
5080 rc = ENOENT; in copy_cfg_file_to_card()
5083 cfdata = rcfg->data; in copy_cfg_file_to_card()
5084 cflen = rcfg->datasize & ~3; in copy_cfg_file_to_card()
5088 device_printf(sc->dev, in copy_cfg_file_to_card()
5091 rc = EINVAL; in copy_cfg_file_to_card()
5095 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); in copy_cfg_file_to_card()
5096 if (rc != 0) { in copy_cfg_file_to_card()
5097 device_printf(sc->dev, in copy_cfg_file_to_card()
5098 "%s: addr (%d/0x%x) or len %d is not valid: %d.\n", in copy_cfg_file_to_card()
5099 __func__, mtype, moff, cflen, rc); in copy_cfg_file_to_card()
5100 rc = EINVAL; in copy_cfg_file_to_card()
5108 return (rc); in copy_cfg_file_to_card()
5139 int rc; in apply_cfg_and_initialize() local
5143 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST); in apply_cfg_and_initialize()
5144 if (rc != 0) { in apply_cfg_and_initialize()
5145 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); in apply_cfg_and_initialize()
5146 return (rc); in apply_cfg_and_initialize()
5168 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in apply_cfg_and_initialize()
5169 if (rc != 0) { in apply_cfg_and_initialize()
5171 device_printf(sc->dev, in apply_cfg_and_initialize()
5172 "failed to query config file location: %d.\n", rc); in apply_cfg_and_initialize()
5182 rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff); in apply_cfg_and_initialize()
5183 if (rc != 0) { in apply_cfg_and_initialize()
5184 device_printf(sc->dev, in apply_cfg_and_initialize()
5185 "failed to upload config file to card: %d.\n", rc); in apply_cfg_and_initialize()
5189 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); in apply_cfg_and_initialize()
5190 if (rc != 0) { in apply_cfg_and_initialize()
5191 device_printf(sc->dev, "failed to pre-process config file: %d " in apply_cfg_and_initialize()
5192 "(mtype %d, moff 0x%x).\n", rc, mtype, moff); in apply_cfg_and_initialize()
5199 device_printf(sc->dev, in apply_cfg_and_initialize()
5200 "WARNING: config file checksum mismatch: %08x %08x\n", in apply_cfg_and_initialize()
5203 sc->cfcsum = cfcsum; in apply_cfg_and_initialize()
5204 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", cfg_file); in apply_cfg_and_initialize()
5210 #define LIMIT_CAPS(x) do { \ in apply_cfg_and_initialize() argument
5211 caps.x##caps &= htobe16(caps_allowed->x##caps); \ in apply_cfg_and_initialize()
5227 * to cope with the situation in non-debug builds by disabling in apply_cfg_and_initialize()
5240 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); in apply_cfg_and_initialize()
5241 if (rc != 0) { in apply_cfg_and_initialize()
5242 device_printf(sc->dev, in apply_cfg_and_initialize()
5243 "failed to process config file: %d.\n", rc); in apply_cfg_and_initialize()
5251 rc = -t4_fw_initialize(sc, sc->mbox); in apply_cfg_and_initialize()
5252 if (rc != 0) { in apply_cfg_and_initialize()
5253 device_printf(sc->dev, "fw_initialize failed: %d.\n", rc); in apply_cfg_and_initialize()
5257 return (rc); in apply_cfg_and_initialize()
5268 int rc; in partition_resources() local
5272 MPASS(sc->flags & MASTER_PF); in partition_resources()
5274 #define COPY_CAPS(x) do { \ in partition_resources() argument
5275 caps_allowed.x##caps = t4_##x##caps_allowed; \ in partition_resources()
5287 fallback = sc->debug_flags & DF_DISABLE_CFG_RETRY ? false : true; in partition_resources()
5290 rc = apply_cfg_and_initialize(sc, cfg_file, &caps_allowed); in partition_resources()
5291 if (rc != 0 && fallback) { in partition_resources()
5293 device_printf(sc->dev, in partition_resources()
5296 rc, cfg_file); in partition_resources()
5305 return (rc); in partition_resources()
5314 int rc; in get_params__pre_init() local
5319 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", in get_params__pre_init()
5320 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), in get_params__pre_init()
5321 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), in get_params__pre_init()
5322 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), in get_params__pre_init()
5323 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); in get_params__pre_init()
5325 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u", in get_params__pre_init()
5326 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers), in get_params__pre_init()
5327 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers), in get_params__pre_init()
5328 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers), in get_params__pre_init()
5329 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers)); in get_params__pre_init()
5331 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u", in get_params__pre_init()
5332 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), in get_params__pre_init()
5333 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), in get_params__pre_init()
5334 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), in get_params__pre_init()
5335 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); in get_params__pre_init()
5337 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u", in get_params__pre_init()
5338 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers), in get_params__pre_init()
5339 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers), in get_params__pre_init()
5340 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers), in get_params__pre_init()
5341 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers)); in get_params__pre_init()
5345 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); in get_params__pre_init()
5346 if (rc != 0) { in get_params__pre_init()
5347 device_printf(sc->dev, in get_params__pre_init()
5348 "failed to query parameters (pre_init): %d.\n", rc); in get_params__pre_init()
5349 return (rc); in get_params__pre_init()
5352 sc->params.portvec = val[0]; in get_params__pre_init()
5353 sc->params.nports = bitcount32(val[0]); in get_params__pre_init()
5354 sc->params.vpd.cclk = val[1]; in get_params__pre_init()
5357 rc = -t4_init_devlog_params(sc, 1); in get_params__pre_init()
5358 if (rc == 0) in get_params__pre_init()
5361 device_printf(sc->dev, in get_params__pre_init()
5362 "failed to get devlog parameters: %d.\n", rc); in get_params__pre_init()
5363 rc = 0; /* devlog isn't critical for device operation */ in get_params__pre_init()
5366 return (rc); in get_params__pre_init()
5375 int rc = 0; in set_params__pre_init() local
5381 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in set_params__pre_init()
5383 if (rc == FW_EINVAL && in set_params__pre_init()
5384 sc->params.fw_vers < FW_VERSION32(1, 20, 1, 0)) { in set_params__pre_init()
5385 rc = 0; in set_params__pre_init()
5387 if (rc != 0) { in set_params__pre_init()
5388 device_printf(sc->dev, in set_params__pre_init()
5390 rc); in set_params__pre_init()
5394 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in set_params__pre_init()
5395 if (rc == 0 && val == 1) { in set_params__pre_init()
5396 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, in set_params__pre_init()
5398 if (rc != 0) { in set_params__pre_init()
5399 device_printf(sc->dev, in set_params__pre_init()
5400 "failed to set PPOD_EDRAM: %d.\n", rc); in set_params__pre_init()
5408 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in set_params__pre_init()
5409 if (rc == 0 && val == 1) in set_params__pre_init()
5410 sc->params.viid_smt_extn_support = true; in set_params__pre_init()
5412 sc->params.viid_smt_extn_support = false; in set_params__pre_init()
5414 return (rc); in set_params__pre_init()
5424 int rc; in get_params__post_init() local
5437 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 7, param, val); in get_params__post_init()
5438 if (rc != 0) { in get_params__post_init()
5439 device_printf(sc->dev, in get_params__post_init()
5440 "failed to query parameters (post_init): %d.\n", rc); in get_params__post_init()
5441 return (rc); in get_params__post_init()
5444 sc->sge.iq_start = val[0]; in get_params__post_init()
5445 sc->sge.eq_start = val[1]; in get_params__post_init()
5447 sc->tids.ftid_base = val[2]; in get_params__post_init()
5448 sc->tids.ftid_end = val[3]; in get_params__post_init()
5449 sc->tids.nftids = val[3] - val[2] + 1; in get_params__post_init()
5451 sc->vres.l2t.start = val[4]; in get_params__post_init()
5452 sc->vres.l2t.size = val[5] - val[4] + 1; in get_params__post_init()
5454 if (sc->vres.l2t.size > 0) in get_params__post_init()
5456 sc->params.core_vdd = val[6]; in get_params__post_init()
5460 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); in get_params__post_init()
5461 if (rc != 0) { in get_params__post_init()
5462 device_printf(sc->dev, in get_params__post_init()
5463 "failed to query parameters (post_init2): %d.\n", rc); in get_params__post_init()
5464 return (rc); in get_params__post_init()
5466 MPASS((int)val[0] >= sc->sge.iq_start); in get_params__post_init()
5467 sc->sge.iqmap_sz = val[0] - sc->sge.iq_start + 1; in get_params__post_init()
5468 MPASS((int)val[1] >= sc->sge.eq_start); in get_params__post_init()
5469 sc->sge.eqmap_sz = val[1] - sc->sge.eq_start + 1; in get_params__post_init()
5473 sc->tids.tid_base = t4_read_reg(sc, in get_params__post_init()
5478 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); in get_params__post_init()
5479 if (rc != 0) { in get_params__post_init()
5480 device_printf(sc->dev, in get_params__post_init()
5481 "failed to query hpfilter parameters: %d.\n", rc); in get_params__post_init()
5482 return (rc); in get_params__post_init()
5485 sc->tids.hpftid_base = val[0]; in get_params__post_init()
5486 sc->tids.hpftid_end = val[1]; in get_params__post_init()
5487 sc->tids.nhpftids = val[1] - val[0] + 1; in get_params__post_init()
5493 MPASS(sc->tids.hpftid_base == 0); in get_params__post_init()
5494 MPASS(sc->tids.tid_base == sc->tids.nhpftids); in get_params__post_init()
5499 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); in get_params__post_init()
5500 if (rc != 0) { in get_params__post_init()
5501 device_printf(sc->dev, in get_params__post_init()
5502 "failed to query rawf parameters: %d.\n", rc); in get_params__post_init()
5503 return (rc); in get_params__post_init()
5506 sc->rawf_base = val[0]; in get_params__post_init()
5507 sc->nrawf = val[1] - val[0] + 1; in get_params__post_init()
5522 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5523 if (rc == 0) in get_params__post_init()
5524 sc->params.mps_bg_map = val[0]; in get_params__post_init()
5526 sc->params.mps_bg_map = UINT32_MAX; /* Not a legal value. */ in get_params__post_init()
5530 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5531 if (rc == 0) in get_params__post_init()
5532 sc->params.tp_ch_map = val[0]; in get_params__post_init()
5534 sc->params.tp_ch_map = UINT32_MAX; /* Not a legal value. */ in get_params__post_init()
5541 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5542 if (rc == 0) in get_params__post_init()
5543 sc->params.filter2_wr_support = val[0] != 0; in get_params__post_init()
5545 sc->params.filter2_wr_support = 0; in get_params__post_init()
5552 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5553 if (rc == 0) in get_params__post_init()
5554 sc->params.ulptx_memwrite_dsgl = val[0] != 0; in get_params__post_init()
5556 sc->params.ulptx_memwrite_dsgl = false; in get_params__post_init()
5560 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5561 if (rc == 0) in get_params__post_init()
5562 sc->params.fr_nsmr_tpte_wr_support = val[0] != 0; in get_params__post_init()
5564 sc->params.fr_nsmr_tpte_wr_support = false; in get_params__post_init()
5568 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5569 if (rc == 0) in get_params__post_init()
5570 sc->params.dev_512sgl_mr = val[0] != 0; in get_params__post_init()
5572 sc->params.dev_512sgl_mr = false; in get_params__post_init()
5575 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5576 if (rc == 0) in get_params__post_init()
5577 sc->params.max_pkts_per_eth_tx_pkts_wr = val[0]; in get_params__post_init()
5579 sc->params.max_pkts_per_eth_tx_pkts_wr = 15; in get_params__post_init()
5582 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5583 if (rc == 0) { in get_params__post_init()
5585 sc->params.nsched_cls = val[0]; in get_params__post_init()
5587 sc->params.nsched_cls = sc->chip_params->nsched_cls; in get_params__post_init()
5594 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); in get_params__post_init()
5595 if (rc != 0) { in get_params__post_init()
5596 device_printf(sc->dev, in get_params__post_init()
5597 "failed to get card capabilities: %d.\n", rc); in get_params__post_init()
5598 return (rc); in get_params__post_init()
5601 #define READ_CAPS(x) do { \ in get_params__post_init() argument
5602 sc->x = htobe16(caps.x); \ in get_params__post_init()
5614 if (sc->niccaps & FW_CAPS_CONFIG_NIC_HASHFILTER) { in get_params__post_init()
5616 MPASS(sc->toecaps == 0); in get_params__post_init()
5617 sc->toecaps = 0; in get_params__post_init()
5620 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); in get_params__post_init()
5621 if (rc != 0) { in get_params__post_init()
5622 device_printf(sc->dev, in get_params__post_init()
5623 "failed to query HASHFILTER parameters: %d.\n", rc); in get_params__post_init()
5624 return (rc); in get_params__post_init()
5626 sc->tids.ntids = val[0]; in get_params__post_init()
5627 if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) { in get_params__post_init()
5628 MPASS(sc->tids.ntids >= sc->tids.nhpftids); in get_params__post_init()
5629 sc->tids.ntids -= sc->tids.nhpftids; in get_params__post_init()
5631 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); in get_params__post_init()
5632 sc->params.hash_filter = 1; in get_params__post_init()
5634 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { in get_params__post_init()
5638 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); in get_params__post_init()
5639 if (rc != 0) { in get_params__post_init()
5640 device_printf(sc->dev, in get_params__post_init()
5641 "failed to query NIC parameters: %d.\n", rc); in get_params__post_init()
5642 return (rc); in get_params__post_init()
5645 sc->tids.etid_base = val[0]; in get_params__post_init()
5646 sc->tids.etid_end = val[1]; in get_params__post_init()
5647 sc->tids.netids = val[1] - val[0] + 1; in get_params__post_init()
5648 sc->params.eo_wr_cred = val[2]; in get_params__post_init()
5649 sc->params.ethoffload = 1; in get_params__post_init()
5652 if (sc->toecaps) { in get_params__post_init()
5653 /* query offload-related parameters */ in get_params__post_init()
5660 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); in get_params__post_init()
5661 if (rc != 0) { in get_params__post_init()
5662 device_printf(sc->dev, in get_params__post_init()
5663 "failed to query TOE parameters: %d.\n", rc); in get_params__post_init()
5664 return (rc); in get_params__post_init()
5666 sc->tids.ntids = val[0]; in get_params__post_init()
5667 if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) { in get_params__post_init()
5668 MPASS(sc->tids.ntids >= sc->tids.nhpftids); in get_params__post_init()
5669 sc->tids.ntids -= sc->tids.nhpftids; in get_params__post_init()
5671 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); in get_params__post_init()
5673 sc->tids.stid_base = val[1]; in get_params__post_init()
5674 sc->tids.nstids = val[2] - val[1] + 1; in get_params__post_init()
5676 sc->vres.ddp.start = val[3]; in get_params__post_init()
5677 sc->vres.ddp.size = val[4] - val[3] + 1; in get_params__post_init()
5678 sc->params.ofldq_wr_cred = val[5]; in get_params__post_init()
5679 sc->params.offload = 1; in get_params__post_init()
5682 * The firmware attempts memfree TOE configuration for -SO cards in get_params__post_init()
5689 sc->iscsicaps = 0; in get_params__post_init()
5690 sc->rdmacaps = 0; in get_params__post_init()
5692 if (sc->rdmacaps) { in get_params__post_init()
5699 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); in get_params__post_init()
5700 if (rc != 0) { in get_params__post_init()
5701 device_printf(sc->dev, in get_params__post_init()
5702 "failed to query RDMA parameters(1): %d.\n", rc); in get_params__post_init()
5703 return (rc); in get_params__post_init()
5705 sc->vres.stag.start = val[0]; in get_params__post_init()
5706 sc->vres.stag.size = val[1] - val[0] + 1; in get_params__post_init()
5707 sc->vres.rq.start = val[2]; in get_params__post_init()
5708 sc->vres.rq.size = val[3] - val[2] + 1; in get_params__post_init()
5709 sc->vres.pbl.start = val[4]; in get_params__post_init()
5710 sc->vres.pbl.size = val[5] - val[4] + 1; in get_params__post_init()
5718 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); in get_params__post_init()
5719 if (rc != 0) { in get_params__post_init()
5720 device_printf(sc->dev, in get_params__post_init()
5721 "failed to query RDMA parameters(2): %d.\n", rc); in get_params__post_init()
5722 return (rc); in get_params__post_init()
5724 sc->vres.qp.start = val[0]; in get_params__post_init()
5725 sc->vres.qp.size = val[1] - val[0] + 1; in get_params__post_init()
5726 sc->vres.cq.start = val[2]; in get_params__post_init()
5727 sc->vres.cq.size = val[3] - val[2] + 1; in get_params__post_init()
5728 sc->vres.ocq.start = val[4]; in get_params__post_init()
5729 sc->vres.ocq.size = val[5] - val[4] + 1; in get_params__post_init()
5735 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val); in get_params__post_init()
5736 if (rc != 0) { in get_params__post_init()
5737 device_printf(sc->dev, in get_params__post_init()
5738 "failed to query RDMA parameters(3): %d.\n", rc); in get_params__post_init()
5739 return (rc); in get_params__post_init()
5741 sc->vres.srq.start = val[0]; in get_params__post_init()
5742 sc->vres.srq.size = val[1] - val[0] + 1; in get_params__post_init()
5743 sc->params.max_ordird_qp = val[2]; in get_params__post_init()
5744 sc->params.max_ird_adapter = val[3]; in get_params__post_init()
5746 if (sc->iscsicaps) { in get_params__post_init()
5749 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); in get_params__post_init()
5750 if (rc != 0) { in get_params__post_init()
5751 device_printf(sc->dev, in get_params__post_init()
5752 "failed to query iSCSI parameters: %d.\n", rc); in get_params__post_init()
5753 return (rc); in get_params__post_init()
5755 sc->vres.iscsi.start = val[0]; in get_params__post_init()
5756 sc->vres.iscsi.size = val[1] - val[0] + 1; in get_params__post_init()
5758 if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) { in get_params__post_init()
5761 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); in get_params__post_init()
5762 if (rc != 0) { in get_params__post_init()
5763 device_printf(sc->dev, in get_params__post_init()
5764 "failed to query TLS parameters: %d.\n", rc); in get_params__post_init()
5765 return (rc); in get_params__post_init()
5767 sc->vres.key.start = val[0]; in get_params__post_init()
5768 sc->vres.key.size = val[1] - val[0] + 1; in get_params__post_init()
5777 t4_read_mtu_tbl(sc, sc->params.mtus, NULL); in get_params__post_init()
5778 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd); in get_params__post_init()
5780 rc = t4_verify_chip_settings(sc); in get_params__post_init()
5781 if (rc != 0) in get_params__post_init()
5782 return (rc); in get_params__post_init()
5785 return (rc); in get_params__post_init()
5799 callout_schedule_sbt(&sc->ktls_tick, SBT_1MS, 0, C_HARDCLOCK); in ktls_tick()
5805 int rc; in t6_config_kern_tls() local
5811 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &param); in t6_config_kern_tls()
5812 if (rc != 0) { in t6_config_kern_tls()
5814 enable ? "enable" : "disable", rc); in t6_config_kern_tls()
5815 return (rc); in t6_config_kern_tls()
5819 sc->flags |= KERN_TLS_ON; in t6_config_kern_tls()
5820 callout_reset_sbt(&sc->ktls_tick, SBT_1MS, 0, ktls_tick, sc, in t6_config_kern_tls()
5823 sc->flags &= ~KERN_TLS_ON; in t6_config_kern_tls()
5824 callout_stop(&sc->ktls_tick); in t6_config_kern_tls()
5827 return (rc); in t6_config_kern_tls()
5842 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in set_params__post_init()
5847 if (t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val) == 0) in set_params__post_init()
5848 sc->params.port_caps32 = 1; in set_params__post_init()
5851 val = 1 << (G_MASKSIZE(t4_read_reg(sc, A_TP_RSS_CONFIG_TNL)) - 1); in set_params__post_init()
5853 V_MASKFILTER(val - 1)); in set_params__post_init()
5929 if (t4_toe_rexmt_backoff[i] != -1) { in set_params__post_init()
5948 sc->tlst.inline_keys = t4_tls_inline_keys; in set_params__post_init()
5949 sc->tlst.combo_wrs = t4_tls_combo_wrs; in set_params__post_init()
5963 struct adapter_params *p = &sc->params; in t4_set_desc()
5965 device_set_descf(sc->dev, "Chelsio %s", p->vpd.id); in t4_set_desc()
5994 ifm = &pi->media; in set_current_media()
5995 if (ifm->ifm_cur != NULL && in set_current_media()
5996 IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_NONE) in set_current_media()
5999 lc = &pi->link_cfg; in set_current_media()
6000 if (lc->requested_aneg != AUTONEG_DISABLE && in set_current_media()
6001 lc->pcaps & FW_PORT_CAP32_ANEG) { in set_current_media()
6006 if (lc->requested_fc & PAUSE_TX) in set_current_media()
6008 if (lc->requested_fc & PAUSE_RX) in set_current_media()
6010 if (lc->requested_speed == 0) in set_current_media()
6011 speed = port_top_speed(pi) * 1000; /* Gbps -> Mbps */ in set_current_media()
6013 speed = lc->requested_speed; in set_current_media()
6025 return (pi->port_type == FW_PORT_TYPE_BT_SGMII || in fixed_ifmedia()
6026 pi->port_type == FW_PORT_TYPE_BT_XFI || in fixed_ifmedia()
6027 pi->port_type == FW_PORT_TYPE_BT_XAUI || in fixed_ifmedia()
6028 pi->port_type == FW_PORT_TYPE_KX4 || in fixed_ifmedia()
6029 pi->port_type == FW_PORT_TYPE_KX || in fixed_ifmedia()
6030 pi->port_type == FW_PORT_TYPE_KR || in fixed_ifmedia()
6031 pi->port_type == FW_PORT_TYPE_BP_AP || in fixed_ifmedia()
6032 pi->port_type == FW_PORT_TYPE_BP4_AP || in fixed_ifmedia()
6033 pi->port_type == FW_PORT_TYPE_BP40_BA || in fixed_ifmedia()
6034 pi->port_type == FW_PORT_TYPE_KR4_100G || in fixed_ifmedia()
6035 pi->port_type == FW_PORT_TYPE_KR_SFP28 || in fixed_ifmedia()
6036 pi->port_type == FW_PORT_TYPE_KR_XLAUI); in fixed_ifmedia()
6049 if (pi->flags & FIXED_IFMEDIA) in build_medialist()
6055 ifm = &pi->media; in build_medialist()
6057 lc = &pi->link_cfg; in build_medialist()
6058 ss = G_FW_PORT_CAP32_SPEED(lc->pcaps); /* Supported Speeds */ in build_medialist()
6062 MPASS(LIST_EMPTY(&ifm->ifm_list)); in build_medialist()
6084 if (lc->pcaps & FW_PORT_CAP32_ANEG) in build_medialist()
6096 struct link_config *lc = &pi->link_cfg; in init_link_config()
6100 lc->requested_caps = 0; in init_link_config()
6101 lc->requested_speed = 0; in init_link_config()
6104 lc->requested_aneg = AUTONEG_DISABLE; in init_link_config()
6106 lc->requested_aneg = AUTONEG_ENABLE; in init_link_config()
6108 lc->requested_aneg = AUTONEG_AUTO; in init_link_config()
6110 lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX | in init_link_config()
6114 lc->requested_fec = FEC_AUTO; in init_link_config()
6116 lc->requested_fec = FEC_NONE; in init_link_config()
6118 /* -1 is handled by the FEC_AUTO block above and not here. */ in init_link_config()
6119 lc->requested_fec = t4_fec & in init_link_config()
6121 if (lc->requested_fec == 0) in init_link_config()
6122 lc->requested_fec = FEC_AUTO; in init_link_config()
6125 lc->force_fec = -1; in init_link_config()
6127 lc->force_fec = 1; in init_link_config()
6129 lc->force_fec = 0; in init_link_config()
6140 struct link_config *lc = &pi->link_cfg; in fixup_link_config()
6146 if (lc->requested_speed != 0) { in fixup_link_config()
6147 fwspeed = speed_to_fwcap(lc->requested_speed); in fixup_link_config()
6148 if ((fwspeed & lc->pcaps) == 0) { in fixup_link_config()
6150 lc->requested_speed = 0; in fixup_link_config()
6155 MPASS(lc->requested_aneg == AUTONEG_ENABLE || in fixup_link_config()
6156 lc->requested_aneg == AUTONEG_DISABLE || in fixup_link_config()
6157 lc->requested_aneg == AUTONEG_AUTO); in fixup_link_config()
6158 if (lc->requested_aneg == AUTONEG_ENABLE && in fixup_link_config()
6159 !(lc->pcaps & FW_PORT_CAP32_ANEG)) { in fixup_link_config()
6161 lc->requested_aneg = AUTONEG_AUTO; in fixup_link_config()
6165 MPASS((lc->requested_fc & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) == 0); in fixup_link_config()
6166 if (lc->requested_fc & PAUSE_TX && in fixup_link_config()
6167 !(lc->pcaps & FW_PORT_CAP32_FC_TX)) { in fixup_link_config()
6169 lc->requested_fc &= ~PAUSE_TX; in fixup_link_config()
6171 if (lc->requested_fc & PAUSE_RX && in fixup_link_config()
6172 !(lc->pcaps & FW_PORT_CAP32_FC_RX)) { in fixup_link_config()
6174 lc->requested_fc &= ~PAUSE_RX; in fixup_link_config()
6176 if (!(lc->requested_fc & PAUSE_AUTONEG) && in fixup_link_config()
6177 !(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)) { in fixup_link_config()
6179 lc->requested_fc |= PAUSE_AUTONEG; in fixup_link_config()
6183 if ((lc->requested_fec & FEC_RS && in fixup_link_config()
6184 !(lc->pcaps & FW_PORT_CAP32_FEC_RS)) || in fixup_link_config()
6185 (lc->requested_fec & FEC_BASER_RS && in fixup_link_config()
6186 !(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS))) { in fixup_link_config()
6188 lc->requested_fec = FEC_AUTO; in fixup_link_config()
6201 struct adapter *sc = pi->adapter; in apply_link_config()
6202 struct link_config *lc = &pi->link_cfg; in apply_link_config()
6203 int rc; in apply_link_config() local
6209 if (lc->requested_aneg == AUTONEG_ENABLE) in apply_link_config()
6210 MPASS(lc->pcaps & FW_PORT_CAP32_ANEG); in apply_link_config()
6211 if (!(lc->requested_fc & PAUSE_AUTONEG)) in apply_link_config()
6212 MPASS(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE); in apply_link_config()
6213 if (lc->requested_fc & PAUSE_TX) in apply_link_config()
6214 MPASS(lc->pcaps & FW_PORT_CAP32_FC_TX); in apply_link_config()
6215 if (lc->requested_fc & PAUSE_RX) in apply_link_config()
6216 MPASS(lc->pcaps & FW_PORT_CAP32_FC_RX); in apply_link_config()
6217 if (lc->requested_fec & FEC_RS) in apply_link_config()
6218 MPASS(lc->pcaps & FW_PORT_CAP32_FEC_RS); in apply_link_config()
6219 if (lc->requested_fec & FEC_BASER_RS) in apply_link_config()
6220 MPASS(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS); in apply_link_config()
6222 if (!(sc->flags & IS_VF)) { in apply_link_config()
6223 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); in apply_link_config()
6224 if (rc != 0) { in apply_link_config()
6225 device_printf(pi->dev, "l1cfg failed: %d\n", rc); in apply_link_config()
6226 return (rc); in apply_link_config()
6231 * An L1_CFG will almost always result in a link-change event if the in apply_link_config()
6239 if (lc->link_ok && !(lc->requested_fc & PAUSE_AUTONEG)) in apply_link_config()
6240 lc->fc = lc->requested_fc & (PAUSE_TX | PAUSE_RX); in apply_link_config()
6252 int rc; member
6259 struct vi_info *vi = if_getsoftc(ctx->ifp); in add_maddr()
6260 struct port_info *pi = vi->pi; in add_maddr()
6261 struct adapter *sc = pi->adapter; in add_maddr()
6263 if (ctx->rc < 0) in add_maddr()
6266 ctx->mcaddr[ctx->i] = LLADDR(sdl); in add_maddr()
6267 MPASS(ETHER_IS_MULTICAST(ctx->mcaddr[ctx->i])); in add_maddr()
6268 ctx->i++; in add_maddr()
6270 if (ctx->i == FW_MAC_EXACT_CHUNK) { in add_maddr()
6271 ctx->rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, ctx->del, in add_maddr()
6272 ctx->i, ctx->mcaddr, NULL, &ctx->hash, 0); in add_maddr()
6273 if (ctx->rc < 0) { in add_maddr()
6276 for (j = 0; j < ctx->i; j++) { in add_maddr()
6277 if_printf(ctx->ifp, in add_maddr()
6279 " %02x:%02x:%02x:" in add_maddr()
6280 "%02x:%02x:%02x rc=%d\n", in add_maddr()
6281 ctx->mcaddr[j][0], ctx->mcaddr[j][1], in add_maddr()
6282 ctx->mcaddr[j][2], ctx->mcaddr[j][3], in add_maddr()
6283 ctx->mcaddr[j][4], ctx->mcaddr[j][5], in add_maddr()
6284 -ctx->rc); in add_maddr()
6288 ctx->del = 0; in add_maddr()
6289 ctx->i = 0; in add_maddr()
6302 int rc = 0; in update_mac_settings() local
6304 struct port_info *pi = vi->pi; in update_mac_settings()
6305 struct adapter *sc = pi->adapter; in update_mac_settings()
6306 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; in update_mac_settings()
6325 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc, in update_mac_settings()
6327 if (rc) { in update_mac_settings()
6328 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, in update_mac_settings()
6329 rc); in update_mac_settings()
6330 return (rc); in update_mac_settings()
6338 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt, in update_mac_settings()
6339 ucaddr, true, &vi->smt_idx); in update_mac_settings()
6340 if (rc < 0) { in update_mac_settings()
6341 rc = -rc; in update_mac_settings()
6342 if_printf(ifp, "change_mac failed: %d\n", rc); in update_mac_settings()
6343 return (rc); in update_mac_settings()
6345 vi->xact_addr_filt = rc; in update_mac_settings()
6346 rc = 0; in update_mac_settings()
6359 ctx.rc = 0; in update_mac_settings()
6368 if (ctx.rc < 0) { in update_mac_settings()
6370 rc = -ctx.rc; in update_mac_settings()
6371 return (rc); in update_mac_settings()
6374 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, in update_mac_settings()
6377 if (rc < 0) { in update_mac_settings()
6378 rc = -rc; in update_mac_settings()
6382 " %02x:%02x:%02x:" in update_mac_settings()
6383 "%02x:%02x:%02x rc=%d\n", in update_mac_settings()
6387 rc); in update_mac_settings()
6389 return (rc); in update_mac_settings()
6395 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, ctx.hash, 0); in update_mac_settings()
6396 if (rc != 0) in update_mac_settings()
6398 rc); in update_mac_settings()
6401 pi->vxlan_tcam_entry = false; in update_mac_settings()
6405 if (IS_MAIN_VI(vi) && sc->vxlan_refcount > 0 && in update_mac_settings()
6406 pi->vxlan_tcam_entry == false) { in update_mac_settings()
6407 rc = t4_alloc_raw_mac_filt(sc, vi->viid, match_all_mac, in update_mac_settings()
6408 match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id, in update_mac_settings()
6410 if (rc < 0) { in update_mac_settings()
6411 rc = -rc; in update_mac_settings()
6413 rc); in update_mac_settings()
6415 MPASS(rc == sc->rawf_base + pi->port_id); in update_mac_settings()
6416 rc = 0; in update_mac_settings()
6417 pi->vxlan_tcam_entry = true; in update_mac_settings()
6421 return (rc); in update_mac_settings()
6431 int rc; in begin_synchronized_op() local
6442 rc = ENXIO; in begin_synchronized_op()
6447 rc = 0; in begin_synchronized_op()
6452 rc = EBUSY; in begin_synchronized_op()
6456 if (mtx_sleep(&sc->flags, &sc->sc_lock, in begin_synchronized_op()
6458 rc = EINTR; in begin_synchronized_op()
6466 sc->last_op = wmesg; in begin_synchronized_op()
6467 sc->last_op_thr = curthread; in begin_synchronized_op()
6468 sc->last_op_flags = flags; in begin_synchronized_op()
6472 if (!(flags & HOLD_LOCK) || rc) in begin_synchronized_op()
6475 return (rc); in begin_synchronized_op()
6488 wakeup(&sc->flags); in begin_vi_detach()
6490 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); in begin_vi_detach()
6493 sc->last_op = "t4detach"; in begin_vi_detach()
6494 sc->last_op_thr = curthread; in begin_vi_detach()
6495 sc->last_op_flags = 0; in begin_vi_detach()
6507 wakeup(&sc->flags); in end_vi_detach()
6525 wakeup(&sc->flags); in end_synchronized_op()
6532 struct port_info *pi = vi->pi; in cxgbe_init_synchronized()
6533 struct adapter *sc = pi->adapter; in cxgbe_init_synchronized()
6534 if_t ifp = vi->ifp; in cxgbe_init_synchronized()
6535 int rc = 0, i; in cxgbe_init_synchronized() local
6543 if (!(sc->flags & FULL_INIT_DONE) && ((rc = adapter_init(sc)) != 0)) in cxgbe_init_synchronized()
6544 return (rc); /* error message displayed already */ in cxgbe_init_synchronized()
6546 if (!(vi->flags & VI_INIT_DONE) && ((rc = vi_init(vi)) != 0)) in cxgbe_init_synchronized()
6547 return (rc); /* error message displayed already */ in cxgbe_init_synchronized()
6549 rc = update_mac_settings(ifp, XGMAC_ALL); in cxgbe_init_synchronized()
6550 if (rc) in cxgbe_init_synchronized()
6554 if (pi->up_vis == 0) { in cxgbe_init_synchronized()
6561 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); in cxgbe_init_synchronized()
6562 if (rc != 0) { in cxgbe_init_synchronized()
6563 if_printf(ifp, "enable_vi failed: %d\n", rc); in cxgbe_init_synchronized()
6575 txq->eq.flags |= EQ_ENABLED; in cxgbe_init_synchronized()
6582 if (sc->traceq < 0 && IS_MAIN_VI(vi)) { in cxgbe_init_synchronized()
6583 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; in cxgbe_init_synchronized()
6585 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | in cxgbe_init_synchronized()
6586 V_QUEUENUMBER(sc->traceq)); in cxgbe_init_synchronized()
6587 pi->flags |= HAS_TRACEQ; in cxgbe_init_synchronized()
6591 pi->up_vis++; in cxgbe_init_synchronized()
6593 if (pi->link_cfg.link_ok) in cxgbe_init_synchronized()
6597 mtx_lock(&vi->tick_mtx); in cxgbe_init_synchronized()
6598 if (vi->pi->nvi > 1 || sc->flags & IS_VF) in cxgbe_init_synchronized()
6599 callout_reset(&vi->tick, hz, vi_tick, vi); in cxgbe_init_synchronized()
6601 callout_reset(&vi->tick, hz, cxgbe_tick, vi); in cxgbe_init_synchronized()
6602 mtx_unlock(&vi->tick_mtx); in cxgbe_init_synchronized()
6604 if (rc != 0) in cxgbe_init_synchronized()
6607 return (rc); in cxgbe_init_synchronized()
6616 struct port_info *pi = vi->pi; in cxgbe_uninit_synchronized()
6617 struct adapter *sc = pi->adapter; in cxgbe_uninit_synchronized()
6618 if_t ifp = vi->ifp; in cxgbe_uninit_synchronized()
6619 int rc, i; in cxgbe_uninit_synchronized() local
6624 if (!(vi->flags & VI_INIT_DONE)) { in cxgbe_uninit_synchronized()
6628 "vi->flags 0x%016lx, if_flags 0x%08x, " in cxgbe_uninit_synchronized()
6629 "if_drv_flags 0x%08x\n", vi->flags, if_getflags(ifp), in cxgbe_uninit_synchronized()
6642 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false); in cxgbe_uninit_synchronized()
6643 if (rc) { in cxgbe_uninit_synchronized()
6644 if_printf(ifp, "disable_vi failed: %d\n", rc); in cxgbe_uninit_synchronized()
6645 return (rc); in cxgbe_uninit_synchronized()
6650 txq->eq.flags &= ~EQ_ENABLED; in cxgbe_uninit_synchronized()
6654 mtx_lock(&vi->tick_mtx); in cxgbe_uninit_synchronized()
6655 callout_stop(&vi->tick); in cxgbe_uninit_synchronized()
6656 mtx_unlock(&vi->tick_mtx); in cxgbe_uninit_synchronized()
6664 pi->up_vis--; in cxgbe_uninit_synchronized()
6665 if (pi->up_vis > 0) { in cxgbe_uninit_synchronized()
6670 pi->link_cfg.link_ok = false; in cxgbe_uninit_synchronized()
6671 pi->link_cfg.speed = 0; in cxgbe_uninit_synchronized()
6672 pi->link_cfg.link_down_rc = 255; in cxgbe_uninit_synchronized()
6681 * will walk the entire sc->irq list and clean up whatever is valid.
6686 int rc, rid, p, q, v; in t4_setup_intr_handlers() local
6691 struct sge *sge = &sc->sge; in t4_setup_intr_handlers()
6706 irq = &sc->irq[0]; in t4_setup_intr_handlers()
6707 rid = sc->intr_type == INTR_INTX ? 0 : 1; in t4_setup_intr_handlers()
6712 if (sc->flags & IS_VF) in t4_setup_intr_handlers()
6713 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports, in t4_setup_intr_handlers()
6716 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, in t4_setup_intr_handlers()
6720 if (!(sc->flags & IS_VF)) { in t4_setup_intr_handlers()
6721 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); in t4_setup_intr_handlers()
6722 if (rc != 0) in t4_setup_intr_handlers()
6723 return (rc); in t4_setup_intr_handlers()
6729 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt"); in t4_setup_intr_handlers()
6730 if (rc != 0) in t4_setup_intr_handlers()
6731 return (rc); in t4_setup_intr_handlers()
6736 pi = sc->port[p]; in t4_setup_intr_handlers()
6738 vi->first_intr = rid - 1; in t4_setup_intr_handlers()
6740 if (vi->nnmrxq > 0) { in t4_setup_intr_handlers()
6741 int n = max(vi->nrxq, vi->nnmrxq); in t4_setup_intr_handlers()
6743 rxq = &sge->rxq[vi->first_rxq]; in t4_setup_intr_handlers()
6745 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq]; in t4_setup_intr_handlers()
6748 snprintf(s, sizeof(s), "%x%c%x", p, in t4_setup_intr_handlers()
6750 if (q < vi->nrxq) in t4_setup_intr_handlers()
6751 irq->rxq = rxq++; in t4_setup_intr_handlers()
6753 if (q < vi->nnmrxq) in t4_setup_intr_handlers()
6754 irq->nm_rxq = nm_rxq++; in t4_setup_intr_handlers()
6756 if (irq->nm_rxq != NULL && in t4_setup_intr_handlers()
6757 irq->rxq == NULL) { in t4_setup_intr_handlers()
6759 rc = t4_alloc_irq(sc, irq, rid, in t4_setup_intr_handlers()
6760 t4_nm_intr, irq->nm_rxq, s); in t4_setup_intr_handlers()
6762 if (irq->nm_rxq != NULL && in t4_setup_intr_handlers()
6763 irq->rxq != NULL) { in t4_setup_intr_handlers()
6765 rc = t4_alloc_irq(sc, irq, rid, in t4_setup_intr_handlers()
6769 if (irq->rxq != NULL && in t4_setup_intr_handlers()
6770 irq->nm_rxq == NULL) { in t4_setup_intr_handlers()
6772 rc = t4_alloc_irq(sc, irq, rid, in t4_setup_intr_handlers()
6773 t4_intr, irq->rxq, s); in t4_setup_intr_handlers()
6775 if (rc != 0) in t4_setup_intr_handlers()
6776 return (rc); in t4_setup_intr_handlers()
6778 if (q < vi->nrxq) { in t4_setup_intr_handlers()
6779 bus_bind_intr(sc->dev, irq->res, in t4_setup_intr_handlers()
6785 vi->nintr++; in t4_setup_intr_handlers()
6789 snprintf(s, sizeof(s), "%x%c%x", p, in t4_setup_intr_handlers()
6791 rc = t4_alloc_irq(sc, irq, rid, in t4_setup_intr_handlers()
6793 if (rc != 0) in t4_setup_intr_handlers()
6794 return (rc); in t4_setup_intr_handlers()
6796 bus_bind_intr(sc->dev, irq->res, in t4_setup_intr_handlers()
6801 vi->nintr++; in t4_setup_intr_handlers()
6806 snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q); in t4_setup_intr_handlers()
6807 rc = t4_alloc_irq(sc, irq, rid, t4_intr, in t4_setup_intr_handlers()
6809 if (rc != 0) in t4_setup_intr_handlers()
6810 return (rc); in t4_setup_intr_handlers()
6813 vi->nintr++; in t4_setup_intr_handlers()
6818 MPASS(irq == &sc->irq[sc->intr_count]); in t4_setup_intr_handlers()
6835 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]); in write_global_rss_key()
6837 t4_write_rss_key(sc, &rss_key[0], -1, 1); in write_global_rss_key()
6847 int rc, i; in adapter_full_init() local
6854 rc = t4_setup_adapter_queues(sc); in adapter_full_init()
6855 if (rc != 0) in adapter_full_init()
6856 return (rc); in adapter_full_init()
6858 MPASS(sc->params.nports <= nitems(sc->tq)); in adapter_full_init()
6859 for (i = 0; i < sc->params.nports; i++) { in adapter_full_init()
6860 if (sc->tq[i] != NULL) in adapter_full_init()
6862 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, in adapter_full_init()
6863 taskqueue_thread_enqueue, &sc->tq[i]); in adapter_full_init()
6864 if (sc->tq[i] == NULL) { in adapter_full_init()
6868 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", in adapter_full_init()
6869 device_get_nameunit(sc->dev), i); in adapter_full_init()
6872 if (!(sc->flags & IS_VF)) { in adapter_full_init()
6882 int rc; in adapter_init() local
6886 KASSERT((sc->flags & FULL_INIT_DONE) == 0, in adapter_init()
6889 rc = adapter_full_init(sc); in adapter_init()
6890 if (rc != 0) in adapter_init()
6893 sc->flags |= FULL_INIT_DONE; in adapter_init()
6895 return (rc); in adapter_init()
6908 for (i = 0; i < nitems(sc->tq); i++) { in adapter_full_uninit()
6909 if (sc->tq[i] == NULL) in adapter_full_uninit()
6911 taskqueue_free(sc->tq[i]); in adapter_full_uninit()
6912 sc->tq[i] = NULL; in adapter_full_uninit()
6915 sc->flags &= ~FULL_INIT_DONE; in adapter_full_uninit()
6960 * enabling any 4-tuple hash is nonsense configuration. in hashen_to_hashconfig()
6989 struct adapter *sc = vi->adapter; in vi_full_init()
6991 int rc, i, j; in vi_full_init() local
7003 rc = t4_setup_vi_queues(vi); in vi_full_init()
7004 if (rc != 0) in vi_full_init()
7005 return (rc); in vi_full_init()
7010 if (vi->nrxq > vi->rss_size) { in vi_full_init()
7012 "some queues will never receive traffic.\n", vi->nrxq, in vi_full_init()
7013 vi->rss_size); in vi_full_init()
7014 } else if (vi->rss_size % vi->nrxq) { in vi_full_init()
7016 "expect uneven traffic distribution.\n", vi->nrxq, in vi_full_init()
7017 vi->rss_size); in vi_full_init()
7020 if (vi->nrxq != nbuckets) { in vi_full_init()
7022 "performance will be impacted.\n", vi->nrxq, nbuckets); in vi_full_init()
7025 if (vi->rss == NULL) in vi_full_init()
7026 vi->rss = malloc(vi->rss_size * sizeof (*vi->rss), M_CXGBE, in vi_full_init()
7028 for (i = 0; i < vi->rss_size;) { in vi_full_init()
7031 j %= vi->nrxq; in vi_full_init()
7032 rxq = &sc->sge.rxq[vi->first_rxq + j]; in vi_full_init()
7033 vi->rss[i++] = rxq->iq.abs_id; in vi_full_init()
7036 vi->rss[i++] = rxq->iq.abs_id; in vi_full_init()
7037 if (i == vi->rss_size) in vi_full_init()
7043 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, in vi_full_init()
7044 vi->rss, vi->rss_size); in vi_full_init()
7045 if (rc != 0) { in vi_full_init()
7046 CH_ERR(vi, "rss_config failed: %d\n", rc); in vi_full_init()
7047 return (rc); in vi_full_init()
7051 vi->hashen = hashconfig_to_hashen(hashconfig); in vi_full_init()
7058 extra = hashen_to_hashconfig(vi->hashen) ^ hashconfig; in vi_full_init()
7071 "global RSS config (0x%x) cannot be accommodated.\n", in vi_full_init()
7075 CH_ALERT(vi, "IPv4 2-tuple hashing forced on.\n"); in vi_full_init()
7077 CH_ALERT(vi, "TCP/IPv4 4-tuple hashing forced on.\n"); in vi_full_init()
7079 CH_ALERT(vi, "IPv6 2-tuple hashing forced on.\n"); in vi_full_init()
7081 CH_ALERT(vi, "TCP/IPv6 4-tuple hashing forced on.\n"); in vi_full_init()
7083 CH_ALERT(vi, "UDP/IPv4 4-tuple hashing forced on.\n"); in vi_full_init()
7085 CH_ALERT(vi, "UDP/IPv6 4-tuple hashing forced on.\n"); in vi_full_init()
7087 vi->hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | in vi_full_init()
7092 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, vi->rss[0], in vi_full_init()
7094 if (rc != 0) { in vi_full_init()
7095 CH_ERR(vi, "rss hash/defaultq config failed: %d\n", rc); in vi_full_init()
7096 return (rc); in vi_full_init()
7105 int rc; in vi_init() local
7107 ASSERT_SYNCHRONIZED_OP(vi->adapter); in vi_init()
7108 KASSERT((vi->flags & VI_INIT_DONE) == 0, in vi_init()
7111 rc = vi_full_init(vi); in vi_init()
7112 if (rc != 0) in vi_init()
7115 vi->flags |= VI_INIT_DONE; in vi_init()
7117 return (rc); in vi_init()
7127 if (vi->flags & VI_INIT_DONE) { in vi_full_uninit()
7129 free(vi->rss, M_CXGBE); in vi_full_uninit()
7130 free(vi->nm_rss, M_CXGBE); in vi_full_uninit()
7134 vi->flags &= ~VI_INIT_DONE; in vi_full_uninit()
7140 struct sge_eq *eq = &txq->eq; in quiesce_txq()
7141 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; in quiesce_txq()
7143 MPASS(eq->flags & EQ_SW_ALLOCATED); in quiesce_txq()
7144 MPASS(!(eq->flags & EQ_ENABLED)); in quiesce_txq()
7147 while (!mp_ring_is_idle(txq->r)) { in quiesce_txq()
7148 mp_ring_check_drainage(txq->r, 4096); in quiesce_txq()
7151 MPASS(txq->txp.npkt == 0); in quiesce_txq()
7153 if (eq->flags & EQ_HW_ALLOCATED) { in quiesce_txq()
7159 while (spg->cidx != htobe16(eq->pidx)) in quiesce_txq()
7161 while (eq->cidx != eq->pidx) in quiesce_txq()
7169 while (eq->cidx != eq->pidx) { in quiesce_txq()
7173 txsd = &txq->sdesc[eq->cidx]; in quiesce_txq()
7174 for (m = txsd->m; m != NULL; m = nextpkt) { in quiesce_txq()
7175 nextpkt = m->m_nextpkt; in quiesce_txq()
7176 m->m_nextpkt = NULL; in quiesce_txq()
7179 IDXINCR(eq->cidx, txsd->desc_used, eq->sidx); in quiesce_txq()
7181 spg->pidx = spg->cidx = htobe16(eq->cidx); in quiesce_txq()
7192 while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL) { in quiesce_wrq()
7193 STAILQ_REMOVE_HEAD(&wrq->wr_list, link); in quiesce_wrq()
7195 wrq->nwr_pending--; in quiesce_wrq()
7196 wrq->ndesc_needed -= howmany(wr->wr_len, EQ_ESIZE); in quiesce_wrq()
7200 MPASS(wrq->nwr_pending == 0); in quiesce_wrq()
7201 MPASS(wrq->ndesc_needed == 0); in quiesce_wrq()
7202 wrq->nwr_pending = 0; in quiesce_wrq()
7203 wrq->ndesc_needed = 0; in quiesce_wrq()
7211 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) in quiesce_iq_fl()
7215 MPASS(iq->flags & IQ_HAS_FL); in quiesce_iq_fl()
7217 mtx_lock(&sc->sfl_lock); in quiesce_iq_fl()
7219 fl->flags |= FL_DOOMED; in quiesce_iq_fl()
7221 callout_stop(&sc->sfl_callout); in quiesce_iq_fl()
7222 mtx_unlock(&sc->sfl_lock); in quiesce_iq_fl()
7224 KASSERT((fl->flags & FL_STARVING) == 0, in quiesce_iq_fl()
7228 if (!(iq->flags & IQ_HW_ALLOCATED)) in quiesce_iq_fl()
7242 struct adapter *sc = vi->adapter; in quiesce_vi()
7252 if (!(vi->flags & VI_INIT_DONE)) in quiesce_vi()
7261 quiesce_wrq(&ofld_txq->wrq); in quiesce_vi()
7266 quiesce_iq_fl(sc, &rxq->iq, &rxq->fl); in quiesce_vi()
7271 quiesce_iq_fl(sc, &ofld_rxq->iq, &ofld_rxq->fl); in quiesce_vi()
7280 int rc; in t4_alloc_irq() local
7282 irq->rid = rid; in t4_alloc_irq()
7283 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, in t4_alloc_irq()
7285 if (irq->res == NULL) { in t4_alloc_irq()
7286 device_printf(sc->dev, in t4_alloc_irq()
7291 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, in t4_alloc_irq()
7292 NULL, handler, arg, &irq->tag); in t4_alloc_irq()
7293 if (rc != 0) { in t4_alloc_irq()
7294 device_printf(sc->dev, in t4_alloc_irq()
7296 rid, name, rc); in t4_alloc_irq()
7298 bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name); in t4_alloc_irq()
7300 return (rc); in t4_alloc_irq()
7306 if (irq->tag) in t4_free_irq()
7307 bus_teardown_intr(sc->dev, irq->res, irq->tag); in t4_free_irq()
7308 if (irq->res) in t4_free_irq()
7309 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); in t4_free_irq()
7320 regs->version = chip_id(sc) | chip_rev(sc) << 10; in get_regs()
7321 t4_get_regs(sc, buf, regs->len); in get_regs()
7328 #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC) argument
7329 #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC) argument
7333 #define V_PL_VFID(x) ((x) << S_PL_VFID) argument
7334 #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID) argument
7338 #define V_PL_ADDR(x) ((x) << S_PL_ADDR) argument
7339 #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR) argument
7348 if (sc->flags & IS_VF) { in read_vf_stat()
7352 mtx_assert(&sc->reg_lock, MA_OWNED); in read_vf_stat()
7368 if (!(sc->flags & IS_VF)) in t4_get_vi_stats()
7369 mtx_lock(&sc->reg_lock); in t4_get_vi_stats()
7370 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES); in t4_get_vi_stats()
7371 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); in t4_get_vi_stats()
7372 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES); in t4_get_vi_stats()
7373 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); in t4_get_vi_stats()
7374 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES); in t4_get_vi_stats()
7375 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); in t4_get_vi_stats()
7376 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES); in t4_get_vi_stats()
7377 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES); in t4_get_vi_stats()
7378 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES); in t4_get_vi_stats()
7379 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES); in t4_get_vi_stats()
7380 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); in t4_get_vi_stats()
7381 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES); in t4_get_vi_stats()
7382 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); in t4_get_vi_stats()
7383 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES); in t4_get_vi_stats()
7384 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); in t4_get_vi_stats()
7385 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES); in t4_get_vi_stats()
7386 if (!(sc->flags & IS_VF)) in t4_get_vi_stats()
7387 mtx_unlock(&sc->reg_lock); in t4_get_vi_stats()
7410 mtx_assert(&vi->tick_mtx, MA_OWNED); in vi_refresh_stats()
7412 if (vi->flags & VI_SKIP_STATS) in vi_refresh_stats()
7417 if (timevalcmp(&tv, &vi->last_refreshed, <)) in vi_refresh_stats()
7420 t4_get_vi_stats(vi->adapter, vi->vin, &vi->stats); in vi_refresh_stats()
7421 getmicrotime(&vi->last_refreshed); in vi_refresh_stats()
7433 mtx_assert(&vi->tick_mtx, MA_OWNED); in cxgbe_refresh_stats()
7435 if (vi->flags & VI_SKIP_STATS) in cxgbe_refresh_stats()
7440 if (timevalcmp(&tv, &vi->last_refreshed, <)) in cxgbe_refresh_stats()
7443 pi = vi->pi; in cxgbe_refresh_stats()
7444 sc = vi->adapter; in cxgbe_refresh_stats()
7446 t4_get_port_stats(sc, pi->port_id, &pi->stats); in cxgbe_refresh_stats()
7447 chan_map = pi->rx_e_chan_map; in cxgbe_refresh_stats()
7449 i = ffs(chan_map) - 1; in cxgbe_refresh_stats()
7450 mtx_lock(&sc->reg_lock); in cxgbe_refresh_stats()
7453 mtx_unlock(&sc->reg_lock); in cxgbe_refresh_stats()
7457 pi->tnl_cong_drops = tnl_cong_drops; in cxgbe_refresh_stats()
7458 getmicrotime(&vi->last_refreshed); in cxgbe_refresh_stats()
7467 mtx_assert(&vi->tick_mtx, MA_OWNED); in cxgbe_tick()
7470 callout_schedule(&vi->tick, hz); in cxgbe_tick()
7478 mtx_assert(&vi->tick_mtx, MA_OWNED); in vi_tick()
7481 callout_schedule(&vi->tick, hz); in vi_tick()
7509 struct sysctl_ctx_list *ctx = &sc->ctx; in t4_sysctls()
7515 * dev.t4nex.X. in t4_sysctls()
7517 oid = device_get_sysctl_tree(sc->dev); in t4_sysctls()
7520 sc->sc_do_rxcopy = 1; in t4_sysctls()
7522 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); in t4_sysctls()
7525 sc->params.nports, "# of ports"); in t4_sysctls()
7529 (uintptr_t)&sc->doorbells, sysctl_bitfield_8b, "A", in t4_sysctls()
7533 sc->params.vpd.cclk, "core clock frequency (in KHz)"); in t4_sysctls()
7537 sc->params.sge.timer_val, sizeof(sc->params.sge.timer_val), in t4_sysctls()
7542 sc->params.sge.counter_val, sizeof(sc->params.sge.counter_val), in t4_sysctls()
7547 sc->lro_timeout = 100; in t4_sysctls()
7549 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); in t4_sysctls()
7552 &sc->debug_flags, 0, "flags to enable runtime debugging"); in t4_sysctls()
7555 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version"); in t4_sysctls()
7558 CTLFLAG_RD, sc->fw_version, 0, "firmware version"); in t4_sysctls()
7560 if (sc->flags & IS_VF) in t4_sysctls()
7567 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number"); in t4_sysctls()
7570 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number"); in t4_sysctls()
7573 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change"); in t4_sysctls()
7576 CTLFLAG_RD, sc->params.vpd.md, 0, "manufacturing diags version"); in t4_sysctls()
7579 CTLFLAG_RD, sc->params.vpd.na, 0, "network address"); in t4_sysctls()
7582 sc->er_version, 0, "expansion ROM version"); in t4_sysctls()
7585 sc->bs_version, 0, "bootstrap firmware version"); in t4_sysctls()
7588 NULL, sc->params.scfg_vers, "serial config version"); in t4_sysctls()
7591 NULL, sc->params.vpd_vers, "VPD version"); in t4_sysctls()
7594 CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); in t4_sysctls()
7597 sc->cfcsum, "config file checksum"); in t4_sysctls()
7602 (uintptr_t)&sc->name, sysctl_bitfield_16b, "A", \ in t4_sysctls()
7617 NULL, sc->tids.nftids, "number of filters"); in t4_sysctls()
7644 &sc->swintr, 0, "software triggered interrupts"); in t4_sysctls()
7651 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. in t4_sysctls()
7722 "CIM OBQ 6 (SGE0-RX)"); in t4_sysctls()
7727 "CIM OBQ 7 (SGE1-RX)"); in t4_sysctls()
7744 sysctl_ddp_stats, "A", "non-TCP DDP statistics"); in t4_sysctls()
7849 CTLFLAG_RW, &sc->tlst.inline_keys, 0, "Always pass TLS " in t4_sysctls()
7855 CTLFLAG_RW, &sc->tlst.combo_wrs, 0, "Attempt to " in t4_sysctls()
7867 * dev.t4nex.X.toe. in t4_sysctls()
7873 sc->tt.cong_algorithm = -1; in t4_sysctls()
7875 CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control " in t4_sysctls()
7876 "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, " in t4_sysctls()
7879 sc->tt.sndbuf = -1; in t4_sysctls()
7881 &sc->tt.sndbuf, 0, "hardware send buffer"); in t4_sysctls()
7883 sc->tt.ddp = 0; in t4_sysctls()
7885 CTLFLAG_RW | CTLFLAG_SKIP, &sc->tt.ddp, 0, ""); in t4_sysctls()
7887 &sc->tt.ddp, 0, "Enable zero-copy aio_read(2)"); in t4_sysctls()
7889 sc->tt.rx_coalesce = -1; in t4_sysctls()
7891 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); in t4_sysctls()
7893 sc->tt.tls = 1; in t4_sysctls()
7898 sc->tt.tx_align = -1; in t4_sysctls()
7900 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); in t4_sysctls()
7902 sc->tt.tx_zcopy = 0; in t4_sysctls()
7904 CTLFLAG_RW, &sc->tt.tx_zcopy, 0, in t4_sysctls()
7905 "Enable zero-copy aio_write(2)"); in t4_sysctls()
7907 sc->tt.cop_managed_offloading = !!t4_cop_managed_offloading; in t4_sysctls()
7910 &sc->tt.cop_managed_offloading, 0, in t4_sysctls()
7913 sc->tt.autorcvbuf_inc = 16 * 1024; in t4_sysctls()
7915 CTLFLAG_RW, &sc->tt.autorcvbuf_inc, 0, in t4_sysctls()
7918 sc->tt.update_hc_on_pmtu_change = 1; in t4_sysctls()
7921 &sc->tt.update_hc_on_pmtu_change, 0, in t4_sysctls()
7924 sc->tt.iso = 1; in t4_sysctls()
7926 &sc->tt.iso, 0, "Enable iSCSI segmentation offload"); in t4_sysctls()
8016 struct sysctl_ctx_list *ctx = &vi->ctx; in vi_sysctls()
8021 * dev.v?(cxgbe|cxl).X. in vi_sysctls()
8023 oid = device_get_sysctl_tree(vi->dev); in vi_sysctls()
8027 vi->viid, "VI identifer"); in vi_sysctls()
8029 &vi->nrxq, 0, "# of rx queues"); in vi_sysctls()
8031 &vi->ntxq, 0, "# of tx queues"); in vi_sysctls()
8033 &vi->first_rxq, 0, "index of first rx queue"); in vi_sysctls()
8035 &vi->first_txq, 0, "index of first tx queue"); in vi_sysctls()
8037 vi->rss_base, "start of RSS indirection table"); in vi_sysctls()
8039 vi->rss_size, "size of RSS indirection table"); in vi_sysctls()
8045 "Reserve queue 0 for non-flowid packets"); in vi_sysctls()
8048 if (vi->adapter->flags & IS_VF) { in vi_sysctls()
8049 MPASS(vi->flags & TX_USES_VM_WR); in vi_sysctls()
8059 if (vi->nofldrxq != 0) { in vi_sysctls()
8061 &vi->nofldrxq, 0, in vi_sysctls()
8064 CTLFLAG_RD, &vi->first_ofld_rxq, 0, in vi_sysctls()
8077 if (vi->nofldtxq != 0) { in vi_sysctls()
8079 &vi->nofldtxq, 0, in vi_sysctls()
8082 CTLFLAG_RD, &vi->first_ofld_txq, 0, in vi_sysctls()
8087 if (vi->nnmrxq != 0) { in vi_sysctls()
8089 &vi->nnmrxq, 0, "# of netmap rx queues"); in vi_sysctls()
8091 &vi->nnmtxq, 0, "# of netmap tx queues"); in vi_sysctls()
8093 CTLFLAG_RD, &vi->first_nm_rxq, 0, in vi_sysctls()
8096 CTLFLAG_RD, &vi->first_nm_txq, 0, in vi_sysctls()
8119 struct sysctl_ctx_list *ctx = &pi->ctx; in cxgbe_sysctls()
8122 struct adapter *sc = pi->adapter; in cxgbe_sysctls()
8128 * dev.cxgbe.X. in cxgbe_sysctls()
8130 oid = device_get_sysctl_tree(pi->dev); in cxgbe_sysctls()
8136 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { in cxgbe_sysctls()
8162 "autonegotiation (-1 = not supported)"); in cxgbe_sysctls()
8168 &pi->link_cfg.requested_caps, 0, "L1 config requested by driver"); in cxgbe_sysctls()
8170 &pi->link_cfg.pcaps, 0, "port capabilities"); in cxgbe_sysctls()
8172 &pi->link_cfg.acaps, 0, "advertised capabilities"); in cxgbe_sysctls()
8174 &pi->link_cfg.lpacaps, 0, "link partner advertised capabilities"); in cxgbe_sysctls()
8179 pi->mps_bg_map, "MPS buffer group map"); in cxgbe_sysctls()
8181 NULL, pi->rx_e_chan_map, "TP rx e-channel map"); in cxgbe_sysctls()
8183 pi->tx_chan, "TP tx c-channel"); in cxgbe_sysctls()
8185 pi->rx_chan, "TP rx c-channel"); in cxgbe_sysctls()
8187 if (sc->flags & IS_VF) in cxgbe_sysctls()
8191 * dev.(cxgbe|cxl).X.tc. in cxgbe_sysctls()
8198 CTLFLAG_RW, &pi->sched_params->pktsize, 0, in cxgbe_sysctls()
8199 "pktsize for per-flow cl-rl (0 means up to the driver )"); in cxgbe_sysctls()
8201 CTLFLAG_RW, &pi->sched_params->burstsize, 0, in cxgbe_sysctls()
8202 "burstsize for per-flow cl-rl (0 means up to the driver)"); in cxgbe_sysctls()
8203 for (i = 0; i < sc->params.nsched_cls; i++) { in cxgbe_sysctls()
8204 struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i]; in cxgbe_sysctls()
8211 CTLFLAG_RD, &tc->state, 0, "current state"); in cxgbe_sysctls()
8214 (uintptr_t)&tc->flags, sysctl_bitfield_8b, "A", "flags"); in cxgbe_sysctls()
8216 CTLFLAG_RD, &tc->refcount, 0, "references to this class"); in cxgbe_sysctls()
8219 (pi->port_id << 16) | i, sysctl_tc_params, "A", in cxgbe_sysctls()
8224 * dev.cxgbe.X.stats. in cxgbe_sysctls()
8230 &pi->tx_parse_error, 0, in cxgbe_sysctls()
8236 t4_port_reg(sc, pi->tx_chan, A_MPS_PORT_STAT_##stat##_L), \ in cxgbe_sysctls()
8242 &pi->stats.name, desc) in cxgbe_sysctls()
8302 T4_PORTSTAT(rx_ovflow0, "# drops due to buffer-group 0 overflows"); in cxgbe_sysctls()
8303 T4_PORTSTAT(rx_ovflow1, "# drops due to buffer-group 1 overflows"); in cxgbe_sysctls()
8304 T4_PORTSTAT(rx_ovflow2, "# drops due to buffer-group 2 overflows"); in cxgbe_sysctls()
8305 T4_PORTSTAT(rx_ovflow3, "# drops due to buffer-group 3 overflows"); in cxgbe_sysctls()
8306 T4_PORTSTAT(rx_trunc0, "# of buffer-group 0 truncated packets"); in cxgbe_sysctls()
8307 T4_PORTSTAT(rx_trunc1, "# of buffer-group 1 truncated packets"); in cxgbe_sysctls()
8308 T4_PORTSTAT(rx_trunc2, "# of buffer-group 2 truncated packets"); in cxgbe_sysctls()
8309 T4_PORTSTAT(rx_trunc3, "# of buffer-group 3 truncated packets"); in cxgbe_sysctls()
8318 int rc, *i, space = 0; in sysctl_int_array() local
8322 for (i = arg1; arg2; arg2 -= sizeof(int), i++) { in sysctl_int_array()
8328 rc = sbuf_finish(&sb); in sysctl_int_array()
8330 return (rc); in sysctl_int_array()
8336 int rc; in sysctl_bitfield_8b() local
8344 rc = sbuf_finish(sb); in sysctl_bitfield_8b()
8347 return (rc); in sysctl_bitfield_8b()
8353 int rc; in sysctl_bitfield_16b() local
8361 rc = sbuf_finish(sb); in sysctl_bitfield_16b()
8364 return (rc); in sysctl_bitfield_16b()
8372 struct adapter *sc = pi->adapter; in sysctl_btphy()
8374 int rc; in sysctl_btphy() local
8376 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt"); in sysctl_btphy()
8377 if (rc) in sysctl_btphy()
8378 return (rc); in sysctl_btphy()
8380 rc = ENXIO; in sysctl_btphy()
8383 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, in sysctl_btphy()
8387 if (rc) in sysctl_btphy()
8388 return (rc); in sysctl_btphy()
8392 rc = sysctl_handle_int(oidp, &v, 0, req); in sysctl_btphy()
8393 return (rc); in sysctl_btphy()
8400 int rc, val; in sysctl_noflowq() local
8402 val = vi->rsrv_noflowq; in sysctl_noflowq()
8403 rc = sysctl_handle_int(oidp, &val, 0, req); in sysctl_noflowq()
8404 if (rc != 0 || req->newptr == NULL) in sysctl_noflowq()
8405 return (rc); in sysctl_noflowq()
8407 if ((val >= 1) && (vi->ntxq > 1)) in sysctl_noflowq()
8408 vi->rsrv_noflowq = 1; in sysctl_noflowq()
8410 vi->rsrv_noflowq = 0; in sysctl_noflowq()
8412 return (rc); in sysctl_noflowq()
8419 struct adapter *sc = vi->adapter; in sysctl_tx_vm_wr()
8420 int rc, val, i; in sysctl_tx_vm_wr() local
8422 MPASS(!(sc->flags & IS_VF)); in sysctl_tx_vm_wr()
8424 val = vi->flags & TX_USES_VM_WR ? 1 : 0; in sysctl_tx_vm_wr()
8425 rc = sysctl_handle_int(oidp, &val, 0, req); in sysctl_tx_vm_wr()
8426 if (rc != 0 || req->newptr == NULL) in sysctl_tx_vm_wr()
8427 return (rc); in sysctl_tx_vm_wr()
8432 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, in sysctl_tx_vm_wr()
8434 if (rc) in sysctl_tx_vm_wr()
8435 return (rc); in sysctl_tx_vm_wr()
8437 rc = ENXIO; in sysctl_tx_vm_wr()
8438 else if (if_getdrvflags(vi->ifp) & IFF_DRV_RUNNING) { in sysctl_tx_vm_wr()
8444 rc = EBUSY; in sysctl_tx_vm_wr()
8446 struct port_info *pi = vi->pi; in sysctl_tx_vm_wr()
8449 uint8_t npkt = sc->params.max_pkts_per_eth_tx_pkts_wr; in sysctl_tx_vm_wr()
8452 vi->flags |= TX_USES_VM_WR; in sysctl_tx_vm_wr()
8453 if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_VM_TSO); in sysctl_tx_vm_wr()
8455 V_TXPKT_INTF(pi->tx_chan)); in sysctl_tx_vm_wr()
8456 if (!(sc->flags & IS_VF)) in sysctl_tx_vm_wr()
8457 npkt--; in sysctl_tx_vm_wr()
8459 vi->flags &= ~TX_USES_VM_WR; in sysctl_tx_vm_wr()
8460 if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_TSO); in sysctl_tx_vm_wr()
8462 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | in sysctl_tx_vm_wr()
8463 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); in sysctl_tx_vm_wr()
8466 txq->cpl_ctrl0 = ctrl0; in sysctl_tx_vm_wr()
8467 txq->txp.max_npkt = npkt; in sysctl_tx_vm_wr()
8471 return (rc); in sysctl_tx_vm_wr()
8478 struct adapter *sc = vi->adapter; in sysctl_holdoff_tmr_idx()
8479 int idx, rc, i; in sysctl_holdoff_tmr_idx() local
8483 idx = vi->tmr_idx; in sysctl_holdoff_tmr_idx()
8485 rc = sysctl_handle_int(oidp, &idx, 0, req); in sysctl_holdoff_tmr_idx()
8486 if (rc != 0 || req->newptr == NULL) in sysctl_holdoff_tmr_idx()
8487 return (rc); in sysctl_holdoff_tmr_idx()
8492 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, in sysctl_holdoff_tmr_idx()
8494 if (rc) in sysctl_holdoff_tmr_idx()
8495 return (rc); in sysctl_holdoff_tmr_idx()
8497 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1); in sysctl_holdoff_tmr_idx()
8500 atomic_store_rel_8(&rxq->iq.intr_params, v); in sysctl_holdoff_tmr_idx()
8502 rxq->iq.intr_params = v; in sysctl_holdoff_tmr_idx()
8505 vi->tmr_idx = idx; in sysctl_holdoff_tmr_idx()
8515 struct adapter *sc = vi->adapter; in sysctl_holdoff_pktc_idx()
8516 int idx, rc; in sysctl_holdoff_pktc_idx() local
8518 idx = vi->pktc_idx; in sysctl_holdoff_pktc_idx()
8520 rc = sysctl_handle_int(oidp, &idx, 0, req); in sysctl_holdoff_pktc_idx()
8521 if (rc != 0 || req->newptr == NULL) in sysctl_holdoff_pktc_idx()
8522 return (rc); in sysctl_holdoff_pktc_idx()
8524 if (idx < -1 || idx >= SGE_NCOUNTERS) in sysctl_holdoff_pktc_idx()
8527 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, in sysctl_holdoff_pktc_idx()
8529 if (rc) in sysctl_holdoff_pktc_idx()
8530 return (rc); in sysctl_holdoff_pktc_idx()
8532 if (vi->flags & VI_INIT_DONE) in sysctl_holdoff_pktc_idx()
8533 rc = EBUSY; /* cannot be changed once the queues are created */ in sysctl_holdoff_pktc_idx()
8535 vi->pktc_idx = idx; in sysctl_holdoff_pktc_idx()
8538 return (rc); in sysctl_holdoff_pktc_idx()
8545 struct adapter *sc = vi->adapter; in sysctl_qsize_rxq()
8546 int qsize, rc; in sysctl_qsize_rxq() local
8548 qsize = vi->qsize_rxq; in sysctl_qsize_rxq()
8550 rc = sysctl_handle_int(oidp, &qsize, 0, req); in sysctl_qsize_rxq()
8551 if (rc != 0 || req->newptr == NULL) in sysctl_qsize_rxq()
8552 return (rc); in sysctl_qsize_rxq()
8557 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, in sysctl_qsize_rxq()
8559 if (rc) in sysctl_qsize_rxq()
8560 return (rc); in sysctl_qsize_rxq()
8562 if (vi->flags & VI_INIT_DONE) in sysctl_qsize_rxq()
8563 rc = EBUSY; /* cannot be changed once the queues are created */ in sysctl_qsize_rxq()
8565 vi->qsize_rxq = qsize; in sysctl_qsize_rxq()
8568 return (rc); in sysctl_qsize_rxq()
8575 struct adapter *sc = vi->adapter; in sysctl_qsize_txq()
8576 int qsize, rc; in sysctl_qsize_txq() local
8578 qsize = vi->qsize_txq; in sysctl_qsize_txq()
8580 rc = sysctl_handle_int(oidp, &qsize, 0, req); in sysctl_qsize_txq()
8581 if (rc != 0 || req->newptr == NULL) in sysctl_qsize_txq()
8582 return (rc); in sysctl_qsize_txq()
8587 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, in sysctl_qsize_txq()
8589 if (rc) in sysctl_qsize_txq()
8590 return (rc); in sysctl_qsize_txq()
8592 if (vi->flags & VI_INIT_DONE) in sysctl_qsize_txq()
8593 rc = EBUSY; /* cannot be changed once the queues are created */ in sysctl_qsize_txq()
8595 vi->qsize_txq = qsize; in sysctl_qsize_txq()
8598 return (rc); in sysctl_qsize_txq()
8605 struct adapter *sc = pi->adapter; in sysctl_pause_settings()
8606 struct link_config *lc = &pi->link_cfg; in sysctl_pause_settings()
8607 int rc; in sysctl_pause_settings() local
8609 if (req->newptr == NULL) { in sysctl_pause_settings()
8617 if (lc->link_ok) { in sysctl_pause_settings()
8618 sbuf_printf(sb, "%b", (lc->fc & (PAUSE_TX | PAUSE_RX)) | in sysctl_pause_settings()
8619 (lc->requested_fc & PAUSE_AUTONEG), bits); in sysctl_pause_settings()
8621 sbuf_printf(sb, "%b", lc->requested_fc & (PAUSE_TX | in sysctl_pause_settings()
8624 rc = sbuf_finish(sb); in sysctl_pause_settings()
8630 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX | in sysctl_pause_settings()
8634 rc = sysctl_handle_string(oidp, s, sizeof(s), req); in sysctl_pause_settings()
8635 if (rc != 0) in sysctl_pause_settings()
8636 return(rc); in sysctl_pause_settings()
8642 n = s[0] - '0'; in sysctl_pause_settings()
8646 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, in sysctl_pause_settings()
8648 if (rc) in sysctl_pause_settings()
8649 return (rc); in sysctl_pause_settings()
8652 lc->requested_fc = n; in sysctl_pause_settings()
8654 if (pi->up_vis > 0) in sysctl_pause_settings()
8655 rc = apply_link_config(pi); in sysctl_pause_settings()
8662 return (rc); in sysctl_pause_settings()
8669 struct link_config *lc = &pi->link_cfg; in sysctl_link_fec()
8670 int rc; in sysctl_link_fec() local
8672 static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD1\5RSVD2"; in sysctl_link_fec()
8677 if (lc->link_ok) in sysctl_link_fec()
8678 sbuf_printf(sb, "%b", lc->fec, bits); in sysctl_link_fec()
8681 rc = sbuf_finish(sb); in sysctl_link_fec()
8684 return (rc); in sysctl_link_fec()
8691 struct adapter *sc = pi->adapter; in sysctl_requested_fec()
8692 struct link_config *lc = &pi->link_cfg; in sysctl_requested_fec()
8693 int rc; in sysctl_requested_fec() local
8696 if (req->newptr == NULL) { in sysctl_requested_fec()
8698 static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2" in sysctl_requested_fec()
8705 sbuf_printf(sb, "%b", lc->requested_fec, bits); in sysctl_requested_fec()
8706 rc = sbuf_finish(sb); in sysctl_requested_fec()
8713 lc->requested_fec == FEC_AUTO ? -1 : in sysctl_requested_fec()
8714 lc->requested_fec & (M_FW_PORT_CAP32_FEC | FEC_MODULE)); in sysctl_requested_fec()
8716 rc = sysctl_handle_string(oidp, s, sizeof(s), req); in sysctl_requested_fec()
8717 if (rc != 0) in sysctl_requested_fec()
8718 return(rc); in sysctl_requested_fec()
8726 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, in sysctl_requested_fec()
8728 if (rc) in sysctl_requested_fec()
8729 return (rc); in sysctl_requested_fec()
8731 old = lc->requested_fec; in sysctl_requested_fec()
8733 lc->requested_fec = FEC_AUTO; in sysctl_requested_fec()
8735 lc->requested_fec = FEC_NONE; in sysctl_requested_fec()
8737 if ((lc->pcaps | in sysctl_requested_fec()
8739 lc->pcaps) { in sysctl_requested_fec()
8740 rc = ENOTSUP; in sysctl_requested_fec()
8743 lc->requested_fec = n & (M_FW_PORT_CAP32_FEC | in sysctl_requested_fec()
8748 if (pi->up_vis > 0) { in sysctl_requested_fec()
8749 rc = apply_link_config(pi); in sysctl_requested_fec()
8750 if (rc != 0) { in sysctl_requested_fec()
8751 lc->requested_fec = old; in sysctl_requested_fec()
8752 if (rc == FW_EPROTO) in sysctl_requested_fec()
8753 rc = ENOTSUP; in sysctl_requested_fec()
8762 return (rc); in sysctl_requested_fec()
8769 struct adapter *sc = pi->adapter; in sysctl_module_fec()
8770 struct link_config *lc = &pi->link_cfg; in sysctl_module_fec()
8771 int rc; in sysctl_module_fec() local
8774 static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2\5RSVD3"; in sysctl_module_fec()
8781 rc = EBUSY; in sysctl_module_fec()
8785 rc = ENXIO; in sysctl_module_fec()
8789 if (pi->up_vis == 0) { in sysctl_module_fec()
8799 fec = lc->fec_hint; in sysctl_module_fec()
8800 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE || in sysctl_module_fec()
8801 !fec_supported(lc->pcaps)) { in sysctl_module_fec()
8810 rc = sbuf_finish(sb); in sysctl_module_fec()
8815 return (rc); in sysctl_module_fec()
8822 struct adapter *sc = pi->adapter; in sysctl_autoneg()
8823 struct link_config *lc = &pi->link_cfg; in sysctl_autoneg()
8824 int rc, val; in sysctl_autoneg() local
8826 if (lc->pcaps & FW_PORT_CAP32_ANEG) in sysctl_autoneg()
8827 val = lc->requested_aneg == AUTONEG_DISABLE ? 0 : 1; in sysctl_autoneg()
8829 val = -1; in sysctl_autoneg()
8830 rc = sysctl_handle_int(oidp, &val, 0, req); in sysctl_autoneg()
8831 if (rc != 0 || req->newptr == NULL) in sysctl_autoneg()
8832 return (rc); in sysctl_autoneg()
8840 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, in sysctl_autoneg()
8842 if (rc) in sysctl_autoneg()
8843 return (rc); in sysctl_autoneg()
8845 if (val == AUTONEG_ENABLE && !(lc->pcaps & FW_PORT_CAP32_ANEG)) { in sysctl_autoneg()
8846 rc = ENOTSUP; in sysctl_autoneg()
8849 lc->requested_aneg = val; in sysctl_autoneg()
8852 if (pi->up_vis > 0) in sysctl_autoneg()
8853 rc = apply_link_config(pi); in sysctl_autoneg()
8859 return (rc); in sysctl_autoneg()
8866 struct adapter *sc = pi->adapter; in sysctl_force_fec()
8867 struct link_config *lc = &pi->link_cfg; in sysctl_force_fec()
8868 int rc, val; in sysctl_force_fec() local
8870 val = lc->force_fec; in sysctl_force_fec()
8871 MPASS(val >= -1 && val <= 1); in sysctl_force_fec()
8872 rc = sysctl_handle_int(oidp, &val, 0, req); in sysctl_force_fec()
8873 if (rc != 0 || req->newptr == NULL) in sysctl_force_fec()
8874 return (rc); in sysctl_force_fec()
8875 if (!(lc->pcaps & FW_PORT_CAP32_FORCE_FEC)) in sysctl_force_fec()
8877 if (val < -1 || val > 1) in sysctl_force_fec()
8880 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4ff"); in sysctl_force_fec()
8881 if (rc) in sysctl_force_fec()
8882 return (rc); in sysctl_force_fec()
8884 lc->force_fec = val; in sysctl_force_fec()
8887 if (pi->up_vis > 0) in sysctl_force_fec()
8888 rc = apply_link_config(pi); in sysctl_force_fec()
8892 return (rc); in sysctl_force_fec()
8899 int rc, reg = arg2; in sysctl_handle_t4_reg64() local
8902 mtx_lock(&sc->reg_lock); in sysctl_handle_t4_reg64()
8904 rc = ENXIO; in sysctl_handle_t4_reg64()
8906 rc = 0; in sysctl_handle_t4_reg64()
8909 mtx_unlock(&sc->reg_lock); in sysctl_handle_t4_reg64()
8910 if (rc == 0) in sysctl_handle_t4_reg64()
8911 rc = sysctl_handle_64(oidp, &val, 0, req); in sysctl_handle_t4_reg64()
8912 return (rc); in sysctl_handle_t4_reg64()
8919 int rc, t; in sysctl_temperature() local
8922 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); in sysctl_temperature()
8923 if (rc) in sysctl_temperature()
8924 return (rc); in sysctl_temperature()
8926 rc = ENXIO; in sysctl_temperature()
8931 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in sysctl_temperature()
8934 if (rc) in sysctl_temperature()
8935 return (rc); in sysctl_temperature()
8937 /* unknown is returned as 0 but we display -1 in that case */ in sysctl_temperature()
8938 t = val == 0 ? -1 : val; in sysctl_temperature()
8940 rc = sysctl_handle_int(oidp, &t, 0, req); in sysctl_temperature()
8941 return (rc); in sysctl_temperature()
8948 int rc; in sysctl_vdd() local
8951 if (sc->params.core_vdd == 0) { in sysctl_vdd()
8952 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, in sysctl_vdd()
8954 if (rc) in sysctl_vdd()
8955 return (rc); in sysctl_vdd()
8957 rc = ENXIO; in sysctl_vdd()
8962 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, in sysctl_vdd()
8966 if (rc) in sysctl_vdd()
8967 return (rc); in sysctl_vdd()
8968 sc->params.core_vdd = val; in sysctl_vdd()
8971 return (sysctl_handle_int(oidp, &sc->params.core_vdd, 0, req)); in sysctl_vdd()
8978 int rc, v; in sysctl_reset_sensor() local
8981 v = sc->sensor_resets; in sysctl_reset_sensor()
8982 rc = sysctl_handle_int(oidp, &v, 0, req); in sysctl_reset_sensor()
8983 if (rc != 0 || req->newptr == NULL || v <= 0) in sysctl_reset_sensor()
8984 return (rc); in sysctl_reset_sensor()
8986 if (sc->params.fw_vers < FW_VERSION32(1, 24, 7, 0) || in sysctl_reset_sensor()
8990 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4srst"); in sysctl_reset_sensor()
8991 if (rc) in sysctl_reset_sensor()
8992 return (rc); in sysctl_reset_sensor()
8994 rc = ENXIO; in sysctl_reset_sensor()
9000 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in sysctl_reset_sensor()
9003 if (rc == 0) in sysctl_reset_sensor()
9004 sc->sensor_resets++; in sysctl_reset_sensor()
9005 return (rc); in sysctl_reset_sensor()
9013 int rc; in sysctl_loadavg() local
9016 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg"); in sysctl_loadavg()
9017 if (rc) in sysctl_loadavg()
9018 return (rc); in sysctl_loadavg()
9020 rc = ENXIO; in sysctl_loadavg()
9024 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val); in sysctl_loadavg()
9027 if (rc) in sysctl_loadavg()
9028 return (rc); in sysctl_loadavg()
9041 rc = sbuf_finish(sb); in sysctl_loadavg()
9044 return (rc); in sysctl_loadavg()
9052 int rc, i; in sysctl_cctrl() local
9063 rc = 0; in sysctl_cctrl()
9064 mtx_lock(&sc->reg_lock); in sysctl_cctrl()
9066 rc = ENXIO; in sysctl_cctrl()
9069 mtx_unlock(&sc->reg_lock); in sysctl_cctrl()
9070 if (rc) in sysctl_cctrl()
9080 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); in sysctl_cctrl()
9083 rc = sbuf_finish(sb); in sysctl_cctrl()
9086 return (rc); in sysctl_cctrl()
9090 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
9091 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
9092 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
9100 int rc, i, n, qid = arg2; in sysctl_cim_ibq_obq() local
9103 u_int cim_num_obq = sc->chip_params->cim_num_obq; in sysctl_cim_ibq_obq()
9113 mtx_lock(&sc->reg_lock); in sysctl_cim_ibq_obq()
9115 rc = -ENXIO; in sysctl_cim_ibq_obq()
9117 rc = t4_read_cim_ibq(sc, qid, buf, n); in sysctl_cim_ibq_obq()
9118 mtx_unlock(&sc->reg_lock); in sysctl_cim_ibq_obq()
9122 qid -= CIM_NUM_IBQ; in sysctl_cim_ibq_obq()
9125 mtx_lock(&sc->reg_lock); in sysctl_cim_ibq_obq()
9127 rc = -ENXIO; in sysctl_cim_ibq_obq()
9129 rc = t4_read_cim_obq(sc, qid, buf, n); in sysctl_cim_ibq_obq()
9130 mtx_unlock(&sc->reg_lock); in sysctl_cim_ibq_obq()
9133 if (rc < 0) { in sysctl_cim_ibq_obq()
9134 rc = -rc; in sysctl_cim_ibq_obq()
9137 n = rc * sizeof(uint32_t); /* rc has # of words actually read */ in sysctl_cim_ibq_obq()
9141 rc = ENOMEM; in sysctl_cim_ibq_obq()
9147 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], in sysctl_cim_ibq_obq()
9150 rc = sbuf_finish(sb); in sysctl_cim_ibq_obq()
9154 return (rc); in sysctl_cim_ibq_obq()
9166 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) { in sbuf_cim_la4()
9168 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, in sbuf_cim_la4()
9170 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", in sbuf_cim_la4()
9173 sbuf_printf(sb, "\n %02x %x%07x %x%07x", in sbuf_cim_la4()
9178 "\n %02x %x%07x %x%07x %08x %08x " in sbuf_cim_la4()
9179 "%08x%08x%08x%08x", in sbuf_cim_la4()
9196 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) { in sbuf_cim_la6()
9198 sbuf_printf(sb, "\n %02x %08x %08x %08x", in sbuf_cim_la6()
9200 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x", in sbuf_cim_la6()
9203 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x", in sbuf_cim_la6()
9208 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x " in sbuf_cim_la6()
9209 "%08x %08x %08x %08x %08x %08x", in sbuf_cim_la6()
9223 int rc; in sbuf_cim_la() local
9226 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, in sbuf_cim_la()
9231 mtx_lock(&sc->reg_lock); in sbuf_cim_la()
9233 rc = ENXIO; in sbuf_cim_la()
9235 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); in sbuf_cim_la()
9236 if (rc == 0) in sbuf_cim_la()
9237 rc = -t4_cim_read_la(sc, buf, NULL); in sbuf_cim_la()
9239 mtx_unlock(&sc->reg_lock); in sbuf_cim_la()
9240 if (rc == 0) { in sbuf_cim_la()
9247 return (rc); in sbuf_cim_la()
9255 int rc; in sysctl_cim_la() local
9261 rc = sbuf_cim_la(sc, sb, M_WAITOK); in sysctl_cim_la()
9262 if (rc == 0) in sysctl_cim_la()
9263 rc = sbuf_finish(sb); in sysctl_cim_la()
9265 return (rc); in sysctl_cim_la()
9271 log(LOG_DEBUG, "%s: CIM debug regs1 %08x %08x %08x %08x %08x\n", in dump_cim_regs()
9272 device_get_nameunit(sc->dev), in dump_cim_regs()
9278 log(LOG_DEBUG, "%s: CIM debug regs2 %08x %08x %08x %08x %08x\n", in dump_cim_regs()
9279 device_get_nameunit(sc->dev), in dump_cim_regs()
9291 int rc; in dump_cimla() local
9295 device_get_nameunit(sc->dev)); in dump_cimla()
9298 rc = sbuf_cim_la(sc, &sb, M_WAITOK); in dump_cimla()
9299 if (rc == 0) { in dump_cimla()
9300 rc = sbuf_finish(&sb); in dump_cimla()
9301 if (rc == 0) { in dump_cimla()
9303 device_get_nameunit(sc->dev), sbuf_data(&sb)); in dump_cimla()
9312 atomic_set_int(&sc->error_flags, ADAP_CIM_ERR); in t4_os_cim_err()
9322 int rc; in sysctl_cim_ma_la() local
9331 rc = 0; in sysctl_cim_ma_la()
9332 mtx_lock(&sc->reg_lock); in sysctl_cim_ma_la()
9334 rc = ENXIO; in sysctl_cim_ma_la()
9337 mtx_unlock(&sc->reg_lock); in sysctl_cim_ma_la()
9338 if (rc) in sysctl_cim_ma_la()
9343 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], in sysctl_cim_ma_la()
9349 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", in sysctl_cim_ma_la()
9356 rc = sbuf_finish(sb); in sysctl_cim_ma_la()
9360 return (rc); in sysctl_cim_ma_la()
9370 int rc; in sysctl_cim_pif_la() local
9379 rc = 0; in sysctl_cim_pif_la()
9380 mtx_lock(&sc->reg_lock); in sysctl_cim_pif_la()
9382 rc = ENXIO; in sysctl_cim_pif_la()
9385 mtx_unlock(&sc->reg_lock); in sysctl_cim_pif_la()
9386 if (rc) in sysctl_cim_pif_la()
9392 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", in sysctl_cim_pif_la()
9399 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", in sysctl_cim_pif_la()
9403 rc = sbuf_finish(sb); in sysctl_cim_pif_la()
9407 return (rc); in sysctl_cim_pif_la()
9415 int rc, i; in sysctl_cim_qcfg() local
9423 cim_num_obq = sc->chip_params->cim_num_obq; in sysctl_cim_qcfg()
9433 mtx_lock(&sc->reg_lock); in sysctl_cim_qcfg()
9435 rc = ENXIO; in sysctl_cim_qcfg()
9437 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); in sysctl_cim_qcfg()
9438 if (rc == 0) { in sysctl_cim_qcfg()
9439 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, in sysctl_cim_qcfg()
9441 if (rc == 0) in sysctl_cim_qcfg()
9445 mtx_unlock(&sc->reg_lock); in sysctl_cim_qcfg()
9446 if (rc) in sysctl_cim_qcfg()
9447 return (rc); in sysctl_cim_qcfg()
9457 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", in sysctl_cim_qcfg()
9462 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], in sysctl_cim_qcfg()
9464 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), in sysctl_cim_qcfg()
9467 rc = sbuf_finish(sb); in sysctl_cim_qcfg()
9470 return (rc); in sysctl_cim_qcfg()
9478 int rc; in sysctl_cpl_stats() local
9485 rc = 0; in sysctl_cpl_stats()
9486 mtx_lock(&sc->reg_lock); in sysctl_cpl_stats()
9488 rc = ENXIO; in sysctl_cpl_stats()
9491 mtx_unlock(&sc->reg_lock); in sysctl_cpl_stats()
9492 if (rc) in sysctl_cpl_stats()
9495 if (sc->chip_params->nchan > 2) { in sysctl_cpl_stats()
9510 rc = sbuf_finish(sb); in sysctl_cpl_stats()
9513 return (rc); in sysctl_cpl_stats()
9521 int rc; in sysctl_ddp_stats() local
9528 rc = 0; in sysctl_ddp_stats()
9529 mtx_lock(&sc->reg_lock); in sysctl_ddp_stats()
9531 rc = ENXIO; in sysctl_ddp_stats()
9534 mtx_unlock(&sc->reg_lock); in sysctl_ddp_stats()
9535 if (rc == 0) { in sysctl_ddp_stats()
9539 rc = sbuf_finish(sb); in sysctl_ddp_stats()
9543 return (rc); in sysctl_ddp_stats()
9551 int rc; in sysctl_tid_stats() local
9558 rc = 0; in sysctl_tid_stats()
9559 mtx_lock(&sc->reg_lock); in sysctl_tid_stats()
9561 rc = ENXIO; in sysctl_tid_stats()
9564 mtx_unlock(&sc->reg_lock); in sysctl_tid_stats()
9565 if (rc == 0) { in sysctl_tid_stats()
9570 rc = sbuf_finish(sb); in sysctl_tid_stats()
9574 return (rc); in sysctl_tid_stats()
9617 int i, j, rc, nentries, first = 0; in sbuf_devlog() local
9618 struct devlog_params *dparams = &sc->params.devlog; in sbuf_devlog()
9622 if (dparams->addr == 0) in sbuf_devlog()
9626 buf = malloc(dparams->size, M_CXGBE, M_ZERO | flags); in sbuf_devlog()
9630 mtx_lock(&sc->reg_lock); in sbuf_devlog()
9632 rc = ENXIO; in sbuf_devlog()
9634 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, in sbuf_devlog()
9635 dparams->size); in sbuf_devlog()
9636 mtx_unlock(&sc->reg_lock); in sbuf_devlog()
9637 if (rc != 0) in sbuf_devlog()
9640 nentries = dparams->size / sizeof(struct fw_devlog_e); in sbuf_devlog()
9644 if (e->timestamp == 0) in sbuf_devlog()
9647 e->timestamp = be64toh(e->timestamp); in sbuf_devlog()
9648 e->seqno = be32toh(e->seqno); in sbuf_devlog()
9650 e->params[j] = be32toh(e->params[j]); in sbuf_devlog()
9652 if (e->timestamp < ftstamp) { in sbuf_devlog()
9653 ftstamp = e->timestamp; in sbuf_devlog()
9667 if (e->timestamp == 0) in sbuf_devlog()
9671 e->seqno, e->timestamp, in sbuf_devlog()
9672 (e->level < nitems(devlog_level_strings) ? in sbuf_devlog()
9673 devlog_level_strings[e->level] : "UNKNOWN"), in sbuf_devlog()
9674 (e->facility < nitems(devlog_facility_strings) ? in sbuf_devlog()
9675 devlog_facility_strings[e->facility] : "UNKNOWN")); in sbuf_devlog()
9676 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], in sbuf_devlog()
9677 e->params[2], e->params[3], e->params[4], in sbuf_devlog()
9678 e->params[5], e->params[6], e->params[7]); in sbuf_devlog()
9685 return (rc); in sbuf_devlog()
9692 int rc; in sysctl_devlog() local
9699 rc = sbuf_devlog(sc, sb, M_WAITOK); in sysctl_devlog()
9700 if (rc == 0) in sysctl_devlog()
9701 rc = sbuf_finish(sb); in sysctl_devlog()
9703 return (rc); in sysctl_devlog()
9709 int rc; in dump_devlog() local
9714 device_get_nameunit(sc->dev)); in dump_devlog()
9717 rc = sbuf_devlog(sc, &sb, M_WAITOK); in dump_devlog()
9718 if (rc == 0) { in dump_devlog()
9719 rc = sbuf_finish(&sb); in dump_devlog()
9720 if (rc == 0) { in dump_devlog()
9722 device_get_nameunit(sc->dev), sbuf_data(&sb)); in dump_devlog()
9733 int rc; in sysctl_fcoe_stats() local
9735 int i, nchan = sc->chip_params->nchan; in sysctl_fcoe_stats()
9737 rc = 0; in sysctl_fcoe_stats()
9738 mtx_lock(&sc->reg_lock); in sysctl_fcoe_stats()
9740 rc = ENXIO; in sysctl_fcoe_stats()
9745 mtx_unlock(&sc->reg_lock); in sysctl_fcoe_stats()
9746 if (rc != 0) in sysctl_fcoe_stats()
9747 return (rc); in sysctl_fcoe_stats()
9775 rc = sbuf_finish(sb); in sysctl_fcoe_stats()
9778 return (rc); in sysctl_fcoe_stats()
9786 int rc, i; in sysctl_hw_sched() local
9794 mtx_lock(&sc->reg_lock); in sysctl_hw_sched()
9796 mtx_unlock(&sc->reg_lock); in sysctl_hw_sched()
9797 rc = ENXIO; in sysctl_hw_sched()
9804 mtx_unlock(&sc->reg_lock); in sysctl_hw_sched()
9811 sbuf_printf(sb, "\n %u %-5s %u ", i, in sysctl_hw_sched()
9828 rc = sbuf_finish(sb); in sysctl_hw_sched()
9831 return (rc); in sysctl_hw_sched()
9839 int rc, i, j; in sysctl_lb_stats() local
9858 rc = 0; in sysctl_lb_stats()
9859 for (i = 0; i < sc->chip_params->nchan; i += 2) { in sysctl_lb_stats()
9860 mtx_lock(&sc->reg_lock); in sysctl_lb_stats()
9862 rc = ENXIO; in sysctl_lb_stats()
9867 mtx_unlock(&sc->reg_lock); in sysctl_lb_stats()
9868 if (rc != 0) in sysctl_lb_stats()
9877 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], in sysctl_lb_stats()
9881 if (rc == 0) in sysctl_lb_stats()
9882 rc = sbuf_finish(sb); in sysctl_lb_stats()
9885 return (rc); in sysctl_lb_stats()
9891 int rc = 0; in sysctl_linkdnrc() local
9893 struct link_config *lc = &pi->link_cfg; in sysctl_linkdnrc()
9900 if (lc->link_ok || lc->link_down_rc == 255) in sysctl_linkdnrc()
9903 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc)); in sysctl_linkdnrc()
9905 rc = sbuf_finish(sb); in sysctl_linkdnrc()
9908 return (rc); in sysctl_linkdnrc()
9920 const u_int v1 = ((const struct mem_desc *)a)->base; in mem_desc_cmp()
9921 const u_int v2 = ((const struct mem_desc *)b)->base; in mem_desc_cmp()
9924 return (-1); in mem_desc_cmp()
9940 size = to - from + 1; in mem_region_show()
9945 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); in mem_region_show()
9953 int rc, i, n; in sysctl_meminfo() local
9965 "ULPTX state:", "On-chip queues:", in sysctl_meminfo()
9971 rc = sysctl_wire_old_buffer(req, 0); in sysctl_meminfo()
9972 if (rc != 0) in sysctl_meminfo()
9973 return (rc); in sysctl_meminfo()
9984 mtx_lock(&sc->reg_lock); in sysctl_meminfo()
9986 rc = ENXIO; in sysctl_meminfo()
10033 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); in sysctl_meminfo()
10034 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); in sysctl_meminfo()
10035 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); in sysctl_meminfo()
10036 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); in sysctl_meminfo()
10037 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); in sysctl_meminfo()
10038 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); in sysctl_meminfo()
10039 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); in sysctl_meminfo()
10040 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); in sysctl_meminfo()
10041 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); in sysctl_meminfo()
10044 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); in sysctl_meminfo()
10045 md->limit = md->base - 1 + in sysctl_meminfo()
10050 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); in sysctl_meminfo()
10051 md->limit = md->base - 1 + in sysctl_meminfo()
10058 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); in sysctl_meminfo()
10060 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR); in sysctl_meminfo()
10061 md->limit = 0; in sysctl_meminfo()
10063 md->base = 0; in sysctl_meminfo()
10064 md->idx = nitems(region); /* hide it */ in sysctl_meminfo()
10069 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ in sysctl_meminfo()
10070 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) in sysctl_meminfo()
10080 if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) { in sysctl_meminfo()
10085 md->base = 0; in sysctl_meminfo()
10087 md->idx = nitems(region); in sysctl_meminfo()
10100 md->base = t4_read_reg(sc, A_SGE_DBVFIFO_BADDR); in sysctl_meminfo()
10101 md->limit = md->base + size - 1; in sysctl_meminfo()
10103 md->idx = nitems(region); in sysctl_meminfo()
10107 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); in sysctl_meminfo()
10108 md->limit = 0; in sysctl_meminfo()
10110 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); in sysctl_meminfo()
10111 md->limit = 0; in sysctl_meminfo()
10114 md->base = sc->vres.ocq.start; in sysctl_meminfo()
10115 if (sc->vres.ocq.size) in sysctl_meminfo()
10116 md->limit = md->base + sc->vres.ocq.size - 1; in sysctl_meminfo()
10118 md->idx = nitems(region); /* hide it */ in sysctl_meminfo()
10121 /* add any address-space holes, there can be up to 3 */ in sysctl_meminfo()
10122 for (n = 0; n < i - 1; n++) in sysctl_meminfo()
10124 (md++)->base = avail[n].limit; in sysctl_meminfo()
10126 (md++)->base = avail[n].limit; in sysctl_meminfo()
10128 n = md - mem; in sysctl_meminfo()
10134 avail[lo].limit - 1); in sysctl_meminfo()
10141 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; in sysctl_meminfo()
10148 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; in sysctl_meminfo()
10152 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; in sysctl_meminfo()
10171 sbuf_printf(sb, "%u p-structs (%u free)\n", in sysctl_meminfo()
10191 for (i = 0; i < sc->chip_params->nchan; i++) { in sysctl_meminfo()
10209 mtx_unlock(&sc->reg_lock); in sysctl_meminfo()
10210 if (rc == 0) in sysctl_meminfo()
10211 rc = sbuf_finish(sb); in sysctl_meminfo()
10213 return (rc); in sysctl_meminfo()
10217 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) in tcamxy2valmask() argument
10219 *mask = x | y; in tcamxy2valmask()
10229 int rc, i; in sysctl_mps_tcam() local
10240 rc = 0; in sysctl_mps_tcam()
10241 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { in sysctl_mps_tcam()
10246 mtx_lock(&sc->reg_lock); in sysctl_mps_tcam()
10248 rc = ENXIO; in sysctl_mps_tcam()
10253 mtx_unlock(&sc->reg_lock); in sysctl_mps_tcam()
10254 if (rc != 0) in sysctl_mps_tcam()
10259 mtx_lock(&sc->reg_lock); in sysctl_mps_tcam()
10261 rc = ENXIO; in sysctl_mps_tcam()
10266 mtx_unlock(&sc->reg_lock); in sysctl_mps_tcam()
10267 if (rc != 0) in sysctl_mps_tcam()
10269 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" in sysctl_mps_tcam()
10270 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], in sysctl_mps_tcam()
10274 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); in sysctl_mps_tcam()
10289 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, in sysctl_mps_tcam()
10291 if (rc) in sysctl_mps_tcam()
10294 rc = ENXIO; in sysctl_mps_tcam()
10296 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, in sysctl_mps_tcam()
10299 if (rc != 0) in sysctl_mps_tcam()
10302 sbuf_printf(sb, " %08x %08x %08x %08x", in sysctl_mps_tcam()
10311 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), in sysctl_mps_tcam()
10316 if (rc) in sysctl_mps_tcam()
10319 rc = sbuf_finish(sb); in sysctl_mps_tcam()
10322 return (rc); in sysctl_mps_tcam()
10330 int rc, i; in sysctl_mps_tcam_t6() local
10343 rc = 0; in sysctl_mps_tcam_t6()
10344 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { in sysctl_mps_tcam_t6()
10355 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1); in sysctl_mps_tcam_t6()
10356 mtx_lock(&sc->reg_lock); in sysctl_mps_tcam_t6()
10358 rc = ENXIO; in sysctl_mps_tcam_t6()
10366 mtx_unlock(&sc->reg_lock); in sysctl_mps_tcam_t6()
10367 if (rc != 0) in sysctl_mps_tcam_t6()
10386 mtx_lock(&sc->reg_lock); in sysctl_mps_tcam_t6()
10388 rc = ENXIO; in sysctl_mps_tcam_t6()
10396 mtx_unlock(&sc->reg_lock); in sysctl_mps_tcam_t6()
10397 if (rc != 0) in sysctl_mps_tcam_t6()
10411 mtx_lock(&sc->reg_lock); in sysctl_mps_tcam_t6()
10413 rc = ENXIO; in sysctl_mps_tcam_t6()
10418 mtx_unlock(&sc->reg_lock); in sysctl_mps_tcam_t6()
10419 if (rc != 0) in sysctl_mps_tcam_t6()
10423 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " in sysctl_mps_tcam_t6()
10424 "%012jx %06x %06x - - %3c" in sysctl_mps_tcam_t6()
10425 " I %4x %3c %#x%4u%4d", i, addr[0], in sysctl_mps_tcam_t6()
10430 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); in sysctl_mps_tcam_t6()
10432 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " in sysctl_mps_tcam_t6()
10433 "%012jx - - ", i, addr[0], addr[1], in sysctl_mps_tcam_t6()
10440 sbuf_printf(sb, " - N "); in sysctl_mps_tcam_t6()
10442 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", in sysctl_mps_tcam_t6()
10446 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); in sysctl_mps_tcam_t6()
10463 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, in sysctl_mps_tcam_t6()
10465 if (rc) in sysctl_mps_tcam_t6()
10468 rc = ENXIO; in sysctl_mps_tcam_t6()
10470 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, in sysctl_mps_tcam_t6()
10473 if (rc != 0) in sysctl_mps_tcam_t6()
10476 sbuf_printf(sb, " %08x %08x %08x %08x" in sysctl_mps_tcam_t6()
10477 " %08x %08x %08x %08x", in sysctl_mps_tcam_t6()
10490 sbuf_printf(sb, "%4u%3u%3u%3u %#x", in sysctl_mps_tcam_t6()
10496 if (rc) in sysctl_mps_tcam_t6()
10499 rc = sbuf_finish(sb); in sysctl_mps_tcam_t6()
10502 return (rc); in sysctl_mps_tcam_t6()
10510 int rc; in sysctl_path_mtus() local
10513 rc = 0; in sysctl_path_mtus()
10514 mtx_lock(&sc->reg_lock); in sysctl_path_mtus()
10516 rc = ENXIO; in sysctl_path_mtus()
10519 mtx_unlock(&sc->reg_lock); in sysctl_path_mtus()
10520 if (rc != 0) in sysctl_path_mtus()
10521 return (rc); in sysctl_path_mtus()
10532 rc = sbuf_finish(sb); in sysctl_path_mtus()
10535 return (rc); in sysctl_path_mtus()
10543 int rc, i; in sysctl_pm_stats() local
10555 rc = 0; in sysctl_pm_stats()
10556 mtx_lock(&sc->reg_lock); in sysctl_pm_stats()
10558 rc = ENXIO; in sysctl_pm_stats()
10563 mtx_unlock(&sc->reg_lock); in sysctl_pm_stats()
10564 if (rc != 0) in sysctl_pm_stats()
10565 return (rc); in sysctl_pm_stats()
10573 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], in sysctl_pm_stats()
10579 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], in sysctl_pm_stats()
10586 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], in sysctl_pm_stats()
10588 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], in sysctl_pm_stats()
10596 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], in sysctl_pm_stats()
10598 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], in sysctl_pm_stats()
10602 rc = sbuf_finish(sb); in sysctl_pm_stats()
10605 return (rc); in sysctl_pm_stats()
10613 int rc; in sysctl_rdma_stats() local
10616 rc = 0; in sysctl_rdma_stats()
10617 mtx_lock(&sc->reg_lock); in sysctl_rdma_stats()
10619 rc = ENXIO; in sysctl_rdma_stats()
10622 mtx_unlock(&sc->reg_lock); in sysctl_rdma_stats()
10623 if (rc != 0) in sysctl_rdma_stats()
10624 return (rc); in sysctl_rdma_stats()
10633 rc = sbuf_finish(sb); in sysctl_rdma_stats()
10636 return (rc); in sysctl_rdma_stats()
10644 int rc; in sysctl_tcp_stats() local
10647 rc = 0; in sysctl_tcp_stats()
10648 mtx_lock(&sc->reg_lock); in sysctl_tcp_stats()
10650 rc = ENXIO; in sysctl_tcp_stats()
10653 mtx_unlock(&sc->reg_lock); in sysctl_tcp_stats()
10654 if (rc != 0) in sysctl_tcp_stats()
10655 return (rc); in sysctl_tcp_stats()
10672 rc = sbuf_finish(sb); in sysctl_tcp_stats()
10675 return (rc); in sysctl_tcp_stats()
10683 int rc; in sysctl_tids() local
10684 uint32_t x, y; in sysctl_tids() local
10685 struct tid_info *t = &sc->tids; in sysctl_tids()
10687 rc = 0; in sysctl_tids()
10692 if (t->natids) { in sysctl_tids()
10693 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, in sysctl_tids()
10694 t->atids_in_use); in sysctl_tids()
10697 if (t->nhpftids) { in sysctl_tids()
10698 sbuf_printf(sb, "HPFTID range: %u-%u, in use: %u\n", in sysctl_tids()
10699 t->hpftid_base, t->hpftid_end, t->hpftids_in_use); in sysctl_tids()
10702 if (t->ntids) { in sysctl_tids()
10705 mtx_lock(&sc->reg_lock); in sysctl_tids()
10707 rc = ENXIO; in sysctl_tids()
10711 x = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; in sysctl_tids()
10714 x = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX); in sysctl_tids()
10718 mtx_unlock(&sc->reg_lock); in sysctl_tids()
10719 if (rc != 0) in sysctl_tids()
10724 if (x) in sysctl_tids()
10725 sbuf_printf(sb, "%u-%u, ", t->tid_base, x - 1); in sysctl_tids()
10726 sbuf_printf(sb, "%u-%u", y, t->ntids - 1); in sysctl_tids()
10728 sbuf_printf(sb, "%u-%u", t->tid_base, t->tid_base + in sysctl_tids()
10729 t->ntids - 1); in sysctl_tids()
10732 atomic_load_acq_int(&t->tids_in_use)); in sysctl_tids()
10735 if (t->nstids) { in sysctl_tids()
10736 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, in sysctl_tids()
10737 t->stid_base + t->nstids - 1, t->stids_in_use); in sysctl_tids()
10740 if (t->nftids) { in sysctl_tids()
10741 sbuf_printf(sb, "FTID range: %u-%u, in use: %u\n", t->ftid_base, in sysctl_tids()
10742 t->ftid_end, t->ftids_in_use); in sysctl_tids()
10745 if (t->netids) { in sysctl_tids()
10746 sbuf_printf(sb, "ETID range: %u-%u, in use: %u\n", t->etid_base, in sysctl_tids()
10747 t->etid_base + t->netids - 1, t->etids_in_use); in sysctl_tids()
10750 mtx_lock(&sc->reg_lock); in sysctl_tids()
10752 rc = ENXIO; in sysctl_tids()
10754 x = t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4); in sysctl_tids()
10757 mtx_unlock(&sc->reg_lock); in sysctl_tids()
10758 if (rc != 0) in sysctl_tids()
10760 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", x, y); in sysctl_tids()
10762 if (rc == 0) in sysctl_tids()
10763 rc = sbuf_finish(sb); in sysctl_tids()
10768 return (rc); in sysctl_tids()
10776 int rc; in sysctl_tp_err_stats() local
10779 rc = 0; in sysctl_tp_err_stats()
10780 mtx_lock(&sc->reg_lock); in sysctl_tp_err_stats()
10782 rc = ENXIO; in sysctl_tp_err_stats()
10785 mtx_unlock(&sc->reg_lock); in sysctl_tp_err_stats()
10786 if (rc != 0) in sysctl_tp_err_stats()
10787 return (rc); in sysctl_tp_err_stats()
10793 if (sc->chip_params->nchan > 2) { in sysctl_tp_err_stats()
10843 rc = sbuf_finish(sb); in sysctl_tp_err_stats()
10846 return (rc); in sysctl_tp_err_stats()
10854 int rc; in sysctl_tnl_stats() local
10857 rc = 0; in sysctl_tnl_stats()
10858 mtx_lock(&sc->reg_lock); in sysctl_tnl_stats()
10860 rc = ENXIO; in sysctl_tnl_stats()
10863 mtx_unlock(&sc->reg_lock); in sysctl_tnl_stats()
10864 if (rc != 0) in sysctl_tnl_stats()
10865 return (rc); in sysctl_tnl_stats()
10871 if (sc->chip_params->nchan > 2) { in sysctl_tnl_stats()
10888 rc = sbuf_finish(sb); in sysctl_tnl_stats()
10891 return (rc); in sysctl_tnl_stats()
10898 struct tp_params *tpp = &sc->params.tp; in sysctl_tp_la_mask()
10900 int rc; in sysctl_tp_la_mask() local
10902 mask = tpp->la_mask >> 16; in sysctl_tp_la_mask()
10903 rc = sysctl_handle_int(oidp, &mask, 0, req); in sysctl_tp_la_mask()
10904 if (rc != 0 || req->newptr == NULL) in sysctl_tp_la_mask()
10905 return (rc); in sysctl_tp_la_mask()
10908 mtx_lock(&sc->reg_lock); in sysctl_tp_la_mask()
10910 rc = ENXIO; in sysctl_tp_la_mask()
10912 tpp->la_mask = mask << 16; in sysctl_tp_la_mask()
10914 tpp->la_mask); in sysctl_tp_la_mask()
10916 mtx_unlock(&sc->reg_lock); in sysctl_tp_la_mask()
10918 return (rc); in sysctl_tp_la_mask()
10933 while (f->name) { in field_desc_show()
10934 uint64_t mask = (1ULL << f->width) - 1; in field_desc_show()
10935 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, in field_desc_show()
10936 ((uintmax_t)v >> f->start) & mask); in field_desc_show()
11096 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) in tp_la_show2()
11107 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) in tp_la_show3()
11117 int rc; in sysctl_tp_la() local
11121 rc = 0; in sysctl_tp_la()
11128 mtx_lock(&sc->reg_lock); in sysctl_tp_la()
11130 rc = ENXIO; in sysctl_tp_la()
11147 mtx_unlock(&sc->reg_lock); in sysctl_tp_la()
11148 if (rc != 0) in sysctl_tp_la()
11154 rc = sbuf_finish(sb); in sysctl_tp_la()
11158 return (rc); in sysctl_tp_la()
11166 int rc; in sysctl_tx_rate() local
11169 rc = 0; in sysctl_tx_rate()
11170 mtx_lock(&sc->reg_lock); in sysctl_tx_rate()
11172 rc = ENXIO; in sysctl_tx_rate()
11175 mtx_unlock(&sc->reg_lock); in sysctl_tx_rate()
11176 if (rc != 0) in sysctl_tx_rate()
11177 return (rc); in sysctl_tx_rate()
11183 if (sc->chip_params->nchan > 2) { in sysctl_tx_rate()
11198 rc = sbuf_finish(sb); in sysctl_tx_rate()
11201 return (rc); in sysctl_tx_rate()
11210 int rc, i; in sysctl_ulprx_la() local
11212 rc = 0; in sysctl_ulprx_la()
11220 mtx_lock(&sc->reg_lock); in sysctl_ulprx_la()
11222 rc = ENXIO; in sysctl_ulprx_la()
11225 mtx_unlock(&sc->reg_lock); in sysctl_ulprx_la()
11226 if (rc != 0) in sysctl_ulprx_la()
11233 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", in sysctl_ulprx_la()
11236 rc = sbuf_finish(sb); in sysctl_ulprx_la()
11240 return (rc); in sysctl_ulprx_la()
11248 int rc; in sysctl_wcwr_stats() local
11253 rc = 0; in sysctl_wcwr_stats()
11254 mtx_lock(&sc->reg_lock); in sysctl_wcwr_stats()
11256 rc = ENXIO; in sysctl_wcwr_stats()
11262 mtx_unlock(&sc->reg_lock); in sysctl_wcwr_stats()
11263 if (rc != 0) in sysctl_wcwr_stats()
11264 return (rc); in sysctl_wcwr_stats()
11281 rc = sbuf_finish(sb); in sysctl_wcwr_stats()
11284 return (rc); in sysctl_wcwr_stats()
11294 int i, rc; in sysctl_cpus() local
11299 rc = bus_get_cpus(sc->dev, op, sizeof(cpuset), &cpuset); in sysctl_cpus()
11300 if (rc != 0) in sysctl_cpus()
11301 return (rc); in sysctl_cpus()
11309 rc = sbuf_finish(sb); in sysctl_cpus()
11312 return (rc); in sysctl_cpus()
11320 int rc; in sysctl_reset() local
11322 val = atomic_load_int(&sc->num_resets); in sysctl_reset()
11323 rc = sysctl_handle_int(oidp, &val, 0, req); in sysctl_reset()
11324 if (rc != 0 || req->newptr == NULL) in sysctl_reset()
11325 return (rc); in sysctl_reset()
11329 atomic_store_int(&sc->num_resets, 0); in sysctl_reset()
11339 taskqueue_enqueue(reset_tq, &sc->reset_task); in sysctl_reset()
11348 int i, j, v, rc; in sysctl_tls() local
11351 v = sc->tt.tls; in sysctl_tls()
11352 rc = sysctl_handle_int(oidp, &v, 0, req); in sysctl_tls()
11353 if (rc != 0 || req->newptr == NULL) in sysctl_tls()
11354 return (rc); in sysctl_tls()
11356 if (v != 0 && !(sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS)) in sysctl_tls()
11359 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4stls"); in sysctl_tls()
11360 if (rc) in sysctl_tls()
11361 return (rc); in sysctl_tls()
11363 rc = ENXIO; in sysctl_tls()
11365 sc->tt.tls = !!v; in sysctl_tls()
11367 for_each_vi(sc->port[i], j, vi) { in sysctl_tls()
11368 if (vi->flags & VI_INIT_DONE) in sysctl_tls()
11369 t4_update_fl_bufsize(vi->ifp); in sysctl_tls()
11375 return (rc); in sysctl_tls()
11399 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; in sysctl_tp_tick()
11401 mtx_lock(&sc->reg_lock); in sysctl_tp_tick()
11403 res = (u_int)-1; in sysctl_tp_tick()
11406 mtx_unlock(&sc->reg_lock); in sysctl_tp_tick()
11407 if (res == (u_int)-1) in sysctl_tp_tick()
11436 int rc; in sysctl_tp_dack_timer() local
11438 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; in sysctl_tp_dack_timer()
11440 mtx_lock(&sc->reg_lock); in sysctl_tp_dack_timer()
11442 rc = ENXIO; in sysctl_tp_dack_timer()
11444 rc = 0; in sysctl_tp_dack_timer()
11449 mtx_unlock(&sc->reg_lock); in sysctl_tp_dack_timer()
11450 if (rc != 0) in sysctl_tp_dack_timer()
11451 return (rc); in sysctl_tp_dack_timer()
11462 int rc, reg = arg2; in sysctl_tp_timer() local
11465 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; in sysctl_tp_timer()
11472 mtx_lock(&sc->reg_lock); in sysctl_tp_timer()
11474 rc = ENXIO; in sysctl_tp_timer()
11476 rc = 0; in sysctl_tp_timer()
11484 mtx_unlock(&sc->reg_lock); in sysctl_tp_timer()
11485 if (rc != 0) in sysctl_tp_timer()
11486 return (rc); in sysctl_tp_timer()
11499 int rc, idx = arg2; in sysctl_tp_shift_cnt() local
11504 mtx_lock(&sc->reg_lock); in sysctl_tp_shift_cnt()
11506 rc = ENXIO; in sysctl_tp_shift_cnt()
11508 rc = 0; in sysctl_tp_shift_cnt()
11511 mtx_unlock(&sc->reg_lock); in sysctl_tp_shift_cnt()
11512 if (rc != 0) in sysctl_tp_shift_cnt()
11513 return (rc); in sysctl_tp_shift_cnt()
11522 int rc, idx = arg2; in sysctl_tp_backoff() local
11529 mtx_lock(&sc->reg_lock); in sysctl_tp_backoff()
11531 rc = ENXIO; in sysctl_tp_backoff()
11533 rc = 0; in sysctl_tp_backoff()
11536 mtx_unlock(&sc->reg_lock); in sysctl_tp_backoff()
11537 if (rc != 0) in sysctl_tp_backoff()
11538 return (rc); in sysctl_tp_backoff()
11547 struct adapter *sc = vi->adapter; in sysctl_holdoff_tmr_idx_ofld()
11548 int idx, rc, i; in sysctl_holdoff_tmr_idx_ofld() local
11552 idx = vi->ofld_tmr_idx; in sysctl_holdoff_tmr_idx_ofld()
11554 rc = sysctl_handle_int(oidp, &idx, 0, req); in sysctl_holdoff_tmr_idx_ofld()
11555 if (rc != 0 || req->newptr == NULL) in sysctl_holdoff_tmr_idx_ofld()
11556 return (rc); in sysctl_holdoff_tmr_idx_ofld()
11561 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, in sysctl_holdoff_tmr_idx_ofld()
11563 if (rc) in sysctl_holdoff_tmr_idx_ofld()
11564 return (rc); in sysctl_holdoff_tmr_idx_ofld()
11566 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1); in sysctl_holdoff_tmr_idx_ofld()
11569 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); in sysctl_holdoff_tmr_idx_ofld()
11571 ofld_rxq->iq.intr_params = v; in sysctl_holdoff_tmr_idx_ofld()
11574 vi->ofld_tmr_idx = idx; in sysctl_holdoff_tmr_idx_ofld()
11584 struct adapter *sc = vi->adapter; in sysctl_holdoff_pktc_idx_ofld()
11585 int idx, rc; in sysctl_holdoff_pktc_idx_ofld() local
11587 idx = vi->ofld_pktc_idx; in sysctl_holdoff_pktc_idx_ofld()
11589 rc = sysctl_handle_int(oidp, &idx, 0, req); in sysctl_holdoff_pktc_idx_ofld()
11590 if (rc != 0 || req->newptr == NULL) in sysctl_holdoff_pktc_idx_ofld()
11591 return (rc); in sysctl_holdoff_pktc_idx_ofld()
11593 if (idx < -1 || idx >= SGE_NCOUNTERS) in sysctl_holdoff_pktc_idx_ofld()
11596 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, in sysctl_holdoff_pktc_idx_ofld()
11598 if (rc) in sysctl_holdoff_pktc_idx_ofld()
11599 return (rc); in sysctl_holdoff_pktc_idx_ofld()
11601 if (vi->flags & VI_INIT_DONE) in sysctl_holdoff_pktc_idx_ofld()
11602 rc = EBUSY; /* cannot be changed once the queues are created */ in sysctl_holdoff_pktc_idx_ofld()
11604 vi->ofld_pktc_idx = idx; in sysctl_holdoff_pktc_idx_ofld()
11607 return (rc); in sysctl_holdoff_pktc_idx_ofld()
11614 int rc; in get_sge_context() local
11616 if (cntxt->cid > M_CTXTQID) in get_sge_context()
11619 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && in get_sge_context()
11620 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) in get_sge_context()
11623 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); in get_sge_context()
11624 if (rc) in get_sge_context()
11625 return (rc); in get_sge_context()
11628 rc = ENXIO; in get_sge_context()
11632 if (sc->flags & FW_OK) { in get_sge_context()
11633 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, in get_sge_context()
11634 &cntxt->data[0]); in get_sge_context()
11635 if (rc == 0) in get_sge_context()
11643 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); in get_sge_context()
11646 return (rc); in get_sge_context()
11652 int rc; in load_fw() local
11655 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); in load_fw()
11656 if (rc) in load_fw()
11657 return (rc); in load_fw()
11660 rc = ENXIO; in load_fw()
11670 if (sc->flags & FULL_INIT_DONE && in load_fw()
11671 (sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) { in load_fw()
11672 rc = EBUSY; in load_fw()
11676 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); in load_fw()
11678 rc = copyin(fw->data, fw_data, fw->len); in load_fw()
11679 if (rc == 0) in load_fw()
11680 rc = -t4_load_fw(sc, fw_data, fw->len); in load_fw()
11685 return (rc); in load_fw()
11691 int rc; in load_cfg() local
11694 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); in load_cfg()
11695 if (rc) in load_cfg()
11696 return (rc); in load_cfg()
11699 rc = ENXIO; in load_cfg()
11703 if (cfg->len == 0) { in load_cfg()
11705 rc = -t4_load_cfg(sc, NULL, 0); in load_cfg()
11709 cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK); in load_cfg()
11711 rc = copyin(cfg->data, cfg_data, cfg->len); in load_cfg()
11712 if (rc == 0) in load_cfg()
11713 rc = -t4_load_cfg(sc, cfg_data, cfg->len); in load_cfg()
11718 return (rc); in load_cfg()
11724 int rc; in load_boot() local
11728 if (br->len > 1024 * 1024) in load_boot()
11731 if (br->pf_offset == 0) { in load_boot()
11733 if (br->pfidx_addr > 7) in load_boot()
11735 offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr, in load_boot()
11737 } else if (br->pf_offset == 1) { in load_boot()
11739 offset = G_OFFSET(br->pfidx_addr); in load_boot()
11744 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr"); in load_boot()
11745 if (rc) in load_boot()
11746 return (rc); in load_boot()
11749 rc = ENXIO; in load_boot()
11753 if (br->len == 0) { in load_boot()
11755 rc = -t4_load_boot(sc, NULL, offset, 0); in load_boot()
11759 br_data = malloc(br->len, M_CXGBE, M_WAITOK); in load_boot()
11761 rc = copyin(br->data, br_data, br->len); in load_boot()
11762 if (rc == 0) in load_boot()
11763 rc = -t4_load_boot(sc, br_data, offset, br->len); in load_boot()
11768 return (rc); in load_boot()
11774 int rc; in load_bootcfg() local
11777 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); in load_bootcfg()
11778 if (rc) in load_bootcfg()
11779 return (rc); in load_bootcfg()
11782 rc = ENXIO; in load_bootcfg()
11786 if (bc->len == 0) { in load_bootcfg()
11788 rc = -t4_load_bootcfg(sc, NULL, 0); in load_bootcfg()
11792 bc_data = malloc(bc->len, M_CXGBE, M_WAITOK); in load_bootcfg()
11794 rc = copyin(bc->data, bc_data, bc->len); in load_bootcfg()
11795 if (rc == 0) in load_bootcfg()
11796 rc = -t4_load_bootcfg(sc, bc_data, bc->len); in load_bootcfg()
11801 return (rc); in load_bootcfg()
11807 int rc; in cudbg_dump() local
11812 buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO); in cudbg_dump()
11818 rc = ENOMEM; in cudbg_dump()
11823 cudbg->adap = sc; in cudbg_dump()
11824 cudbg->print = (cudbg_print_cb)printf; in cudbg_dump()
11827 device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n", in cudbg_dump()
11828 __func__, dump->wr_flash, dump->len, dump->data); in cudbg_dump()
11831 if (dump->wr_flash) in cudbg_dump()
11832 cudbg->use_flash = 1; in cudbg_dump()
11833 MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap)); in cudbg_dump()
11834 memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap)); in cudbg_dump()
11836 rc = cudbg_collect(handle, buf, &dump->len); in cudbg_dump()
11837 if (rc != 0) in cudbg_dump()
11840 rc = copyout(buf, dump->data, dump->len); in cudbg_dump()
11844 return (rc); in cudbg_dump()
11856 r = &op->rule[0]; in free_offload_policy()
11857 for (i = 0; i < op->nrules; i++, r++) { in free_offload_policy()
11858 free(r->bpf_prog.bf_insns, M_CXGBE); in free_offload_policy()
11860 free(op->rule, M_CXGBE); in free_offload_policy()
11867 int i, rc, len; in set_offload_policy() local
11877 if (uop->nrules == 0) { in set_offload_policy()
11881 } else if (uop->nrules > 256) { /* arbitrary */ in set_offload_policy()
11887 op->nrules = uop->nrules; in set_offload_policy()
11888 len = op->nrules * sizeof(struct offload_rule); in set_offload_policy()
11889 op->rule = malloc(len, M_CXGBE, M_ZERO | M_WAITOK); in set_offload_policy()
11890 rc = copyin(uop->rule, op->rule, len); in set_offload_policy()
11891 if (rc) { in set_offload_policy()
11892 free(op->rule, M_CXGBE); in set_offload_policy()
11894 return (rc); in set_offload_policy()
11897 r = &op->rule[0]; in set_offload_policy()
11898 for (i = 0; i < op->nrules; i++, r++) { in set_offload_policy()
11901 if (r->open_type != OPEN_TYPE_LISTEN && in set_offload_policy()
11902 r->open_type != OPEN_TYPE_ACTIVE && in set_offload_policy()
11903 r->open_type != OPEN_TYPE_PASSIVE && in set_offload_policy()
11904 r->open_type != OPEN_TYPE_DONTCARE) { in set_offload_policy()
11911 op->nrules = i; in set_offload_policy()
11913 return (rc); in set_offload_policy()
11917 s = &r->settings; in set_offload_policy()
11918 if ((s->offload != 0 && s->offload != 1) || in set_offload_policy()
11919 s->cong_algo < -1 || s->cong_algo > CONG_ALG_HIGHSPEED || in set_offload_policy()
11920 s->sched_class < -1 || in set_offload_policy()
11921 s->sched_class >= sc->params.nsched_cls) { in set_offload_policy()
11922 rc = EINVAL; in set_offload_policy()
11926 bf = &r->bpf_prog; in set_offload_policy()
11927 u = bf->bf_insns; /* userspace ptr */ in set_offload_policy()
11928 bf->bf_insns = NULL; in set_offload_policy()
11929 if (bf->bf_len == 0) { in set_offload_policy()
11933 len = bf->bf_len * sizeof(*bf->bf_insns); in set_offload_policy()
11934 bf->bf_insns = malloc(len, M_CXGBE, M_ZERO | M_WAITOK); in set_offload_policy()
11935 rc = copyin(u, bf->bf_insns, len); in set_offload_policy()
11936 if (rc != 0) in set_offload_policy()
11939 if (!bpf_validate(bf->bf_insns, bf->bf_len)) { in set_offload_policy()
11940 rc = EINVAL; in set_offload_policy()
11945 rw_wlock(&sc->policy_lock); in set_offload_policy()
11946 old = sc->policy; in set_offload_policy()
11947 sc->policy = op; in set_offload_policy()
11948 rw_wunlock(&sc->policy_lock); in set_offload_policy()
11960 int rc; in read_card_mem() local
11963 mtx_lock(&sc->reg_lock); in read_card_mem()
11965 rc = ENXIO; in read_card_mem()
11967 rc = validate_mem_range(sc, mr->addr, mr->len); in read_card_mem()
11968 mtx_unlock(&sc->reg_lock); in read_card_mem()
11969 if (rc != 0) in read_card_mem()
11970 return (rc); in read_card_mem()
11972 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK); in read_card_mem()
11973 addr = mr->addr; in read_card_mem()
11974 remaining = mr->len; in read_card_mem()
11975 dst = (void *)mr->data; in read_card_mem()
11979 mtx_lock(&sc->reg_lock); in read_card_mem()
11981 rc = ENXIO; in read_card_mem()
11984 mtx_unlock(&sc->reg_lock); in read_card_mem()
11985 if (rc != 0) in read_card_mem()
11988 rc = copyout(buf, dst, n); in read_card_mem()
11989 if (rc != 0) in read_card_mem()
11993 remaining -= n; in read_card_mem()
11998 return (rc); in read_card_mem()
12005 int rc; in read_i2c() local
12007 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) in read_i2c()
12010 if (i2cd->len > sizeof(i2cd->data)) in read_i2c()
12013 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); in read_i2c()
12014 if (rc) in read_i2c()
12015 return (rc); in read_i2c()
12017 rc = ENXIO; in read_i2c()
12019 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, in read_i2c()
12020 i2cd->offset, i2cd->len, &i2cd->data[0]); in read_i2c()
12023 return (rc); in read_i2c()
12042 if (port_id >= sc->params.nports) in clear_stats()
12044 pi = sc->port[port_id]; in clear_stats()
12048 mtx_lock(&sc->reg_lock); in clear_stats()
12051 t4_clr_port_stats(sc, pi->tx_chan); in clear_stats()
12053 if (pi->fcs_reg != -1) in clear_stats()
12054 pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg); in clear_stats()
12056 pi->stats.rx_fcs_err = 0; in clear_stats()
12059 if (vi->flags & VI_INIT_DONE) in clear_stats()
12060 t4_clr_vi_stats(sc, vi->vin); in clear_stats()
12062 chan_map = pi->rx_e_chan_map; in clear_stats()
12065 i = ffs(chan_map) - 1; in clear_stats()
12071 mtx_unlock(&sc->reg_lock); in clear_stats()
12072 pi->tx_parse_error = 0; in clear_stats()
12073 pi->tnl_cong_drops = 0; in clear_stats()
12080 if (vi->flags & VI_INIT_DONE) { in clear_stats()
12084 rxq->lro.lro_queued = 0; in clear_stats()
12085 rxq->lro.lro_flushed = 0; in clear_stats()
12087 rxq->rxcsum = 0; in clear_stats()
12088 rxq->vlan_extraction = 0; in clear_stats()
12089 rxq->vxlan_rxcsum = 0; in clear_stats()
12091 rxq->fl.cl_allocated = 0; in clear_stats()
12092 rxq->fl.cl_recycled = 0; in clear_stats()
12093 rxq->fl.cl_fast_recycled = 0; in clear_stats()
12097 txq->txcsum = 0; in clear_stats()
12098 txq->tso_wrs = 0; in clear_stats()
12099 txq->vlan_insertion = 0; in clear_stats()
12100 txq->imm_wrs = 0; in clear_stats()
12101 txq->sgl_wrs = 0; in clear_stats()
12102 txq->txpkt_wrs = 0; in clear_stats()
12103 txq->txpkts0_wrs = 0; in clear_stats()
12104 txq->txpkts1_wrs = 0; in clear_stats()
12105 txq->txpkts0_pkts = 0; in clear_stats()
12106 txq->txpkts1_pkts = 0; in clear_stats()
12107 txq->txpkts_flush = 0; in clear_stats()
12108 txq->raw_wrs = 0; in clear_stats()
12109 txq->vxlan_tso_wrs = 0; in clear_stats()
12110 txq->vxlan_txcsum = 0; in clear_stats()
12111 txq->kern_tls_records = 0; in clear_stats()
12112 txq->kern_tls_short = 0; in clear_stats()
12113 txq->kern_tls_partial = 0; in clear_stats()
12114 txq->kern_tls_full = 0; in clear_stats()
12115 txq->kern_tls_octets = 0; in clear_stats()
12116 txq->kern_tls_waste = 0; in clear_stats()
12117 txq->kern_tls_options = 0; in clear_stats()
12118 txq->kern_tls_header = 0; in clear_stats()
12119 txq->kern_tls_fin = 0; in clear_stats()
12120 txq->kern_tls_fin_short = 0; in clear_stats()
12121 txq->kern_tls_cbc = 0; in clear_stats()
12122 txq->kern_tls_gcm = 0; in clear_stats()
12123 mp_ring_reset_stats(txq->r); in clear_stats()
12128 ofld_txq->wrq.tx_wrs_direct = 0; in clear_stats()
12129 ofld_txq->wrq.tx_wrs_copied = 0; in clear_stats()
12130 counter_u64_zero(ofld_txq->tx_iscsi_pdus); in clear_stats()
12131 counter_u64_zero(ofld_txq->tx_iscsi_octets); in clear_stats()
12132 counter_u64_zero(ofld_txq->tx_iscsi_iso_wrs); in clear_stats()
12133 counter_u64_zero(ofld_txq->tx_aio_jobs); in clear_stats()
12134 counter_u64_zero(ofld_txq->tx_aio_octets); in clear_stats()
12135 counter_u64_zero(ofld_txq->tx_toe_tls_records); in clear_stats()
12136 counter_u64_zero(ofld_txq->tx_toe_tls_octets); in clear_stats()
12141 ofld_rxq->fl.cl_allocated = 0; in clear_stats()
12142 ofld_rxq->fl.cl_recycled = 0; in clear_stats()
12143 ofld_rxq->fl.cl_fast_recycled = 0; in clear_stats()
12145 ofld_rxq->rx_iscsi_ddp_setup_ok); in clear_stats()
12147 ofld_rxq->rx_iscsi_ddp_setup_error); in clear_stats()
12148 ofld_rxq->rx_iscsi_ddp_pdus = 0; in clear_stats()
12149 ofld_rxq->rx_iscsi_ddp_octets = 0; in clear_stats()
12150 ofld_rxq->rx_iscsi_fl_pdus = 0; in clear_stats()
12151 ofld_rxq->rx_iscsi_fl_octets = 0; in clear_stats()
12152 ofld_rxq->rx_aio_ddp_jobs = 0; in clear_stats()
12153 ofld_rxq->rx_aio_ddp_octets = 0; in clear_stats()
12154 ofld_rxq->rx_toe_tls_records = 0; in clear_stats()
12155 ofld_rxq->rx_toe_tls_octets = 0; in clear_stats()
12156 ofld_rxq->rx_toe_ddp_octets = 0; in clear_stats()
12157 counter_u64_zero(ofld_rxq->ddp_buffer_alloc); in clear_stats()
12158 counter_u64_zero(ofld_rxq->ddp_buffer_reuse); in clear_stats()
12159 counter_u64_zero(ofld_rxq->ddp_buffer_free); in clear_stats()
12164 wrq = &sc->sge.ctrlq[pi->port_id]; in clear_stats()
12165 wrq->tx_wrs_direct = 0; in clear_stats()
12166 wrq->tx_wrs_copied = 0; in clear_stats()
12180 bcopy(&ca->addr[0], &in6.s6_addr[0], sizeof(in6.s6_addr)); in hold_clip_addr()
12196 bcopy(&ca->addr[0], &in6.s6_addr[0], sizeof(in6.s6_addr)); in release_clip_addr()
12208 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); in t4_os_find_pci_capability()
12214 struct adapter *sc = pi->adapter; in t4_os_portmod_changed()
12222 KASSERT((pi->flags & FIXED_IFMEDIA) == 0, in t4_os_portmod_changed()
12223 ("%s: port_type %u", __func__, pi->port_type)); in t4_os_portmod_changed()
12225 vi = &pi->vi[0]; in t4_os_portmod_changed()
12229 if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) { in t4_os_portmod_changed()
12237 ifp = vi->ifp; in t4_os_portmod_changed()
12238 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) in t4_os_portmod_changed()
12240 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) in t4_os_portmod_changed()
12242 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) in t4_os_portmod_changed()
12244 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { in t4_os_portmod_changed()
12246 port_top_speed(pi), mod_str[pi->mod_type]); in t4_os_portmod_changed()
12249 pi->mod_type); in t4_os_portmod_changed()
12258 struct link_config *lc = &pi->link_cfg; in t4_os_link_changed()
12259 struct adapter *sc = pi->adapter; in t4_os_link_changed()
12265 if (lc->link_ok) { in t4_os_link_changed()
12266 if (lc->speed > 25000 || in t4_os_link_changed()
12267 (lc->speed == 25000 && lc->fec == FEC_RS)) { in t4_os_link_changed()
12268 pi->fcs_reg = T5_PORT_REG(pi->tx_chan, in t4_os_link_changed()
12271 pi->fcs_reg = T5_PORT_REG(pi->tx_chan, in t4_os_link_changed()
12274 pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg); in t4_os_link_changed()
12275 pi->stats.rx_fcs_err = 0; in t4_os_link_changed()
12277 pi->fcs_reg = -1; in t4_os_link_changed()
12280 MPASS(pi->fcs_reg != -1); in t4_os_link_changed()
12281 MPASS(pi->fcs_base == 0); in t4_os_link_changed()
12285 ifp = vi->ifp; in t4_os_link_changed()
12289 if (lc->link_ok) { in t4_os_link_changed()
12290 if_setbaudrate(ifp, IF_Mbps(lc->speed)); in t4_os_link_changed()
12307 * in - the only guarantee is that sc->sc_lock is a valid lock. in t4_iterate()
12318 int rc; in t4_ioctl() local
12319 struct adapter *sc = dev->si_drv1; in t4_ioctl()
12321 rc = priv_check(td, PRIV_DRIVER); in t4_ioctl()
12322 if (rc != 0) in t4_ioctl()
12323 return (rc); in t4_ioctl()
12329 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) in t4_ioctl()
12332 mtx_lock(&sc->reg_lock); in t4_ioctl()
12334 rc = ENXIO; in t4_ioctl()
12335 else if (edata->size == 4) in t4_ioctl()
12336 edata->val = t4_read_reg(sc, edata->addr); in t4_ioctl()
12337 else if (edata->size == 8) in t4_ioctl()
12338 edata->val = t4_read_reg64(sc, edata->addr); in t4_ioctl()
12340 rc = EINVAL; in t4_ioctl()
12341 mtx_unlock(&sc->reg_lock); in t4_ioctl()
12348 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) in t4_ioctl()
12351 mtx_lock(&sc->reg_lock); in t4_ioctl()
12353 rc = ENXIO; in t4_ioctl()
12354 else if (edata->size == 4) { in t4_ioctl()
12355 if (edata->val & 0xffffffff00000000) in t4_ioctl()
12356 rc = EINVAL; in t4_ioctl()
12357 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); in t4_ioctl()
12358 } else if (edata->size == 8) in t4_ioctl()
12359 t4_write_reg64(sc, edata->addr, edata->val); in t4_ioctl()
12361 rc = EINVAL; in t4_ioctl()
12362 mtx_unlock(&sc->reg_lock); in t4_ioctl()
12371 if (regs->len < reglen) { in t4_ioctl()
12372 regs->len = reglen; /* hint to the caller */ in t4_ioctl()
12376 regs->len = reglen; in t4_ioctl()
12378 mtx_lock(&sc->reg_lock); in t4_ioctl()
12380 rc = ENXIO; in t4_ioctl()
12383 mtx_unlock(&sc->reg_lock); in t4_ioctl()
12384 if (rc == 0) in t4_ioctl()
12385 rc = copyout(buf, regs->data, reglen); in t4_ioctl()
12390 rc = get_filter_mode(sc, (uint32_t *)data); in t4_ioctl()
12393 rc = set_filter_mode(sc, *(uint32_t *)data); in t4_ioctl()
12396 rc = set_filter_mask(sc, *(uint32_t *)data); in t4_ioctl()
12399 rc = get_filter(sc, (struct t4_filter *)data); in t4_ioctl()
12402 rc = set_filter(sc, (struct t4_filter *)data); in t4_ioctl()
12405 rc = del_filter(sc, (struct t4_filter *)data); in t4_ioctl()
12408 rc = get_sge_context(sc, (struct t4_sge_context *)data); in t4_ioctl()
12411 rc = load_fw(sc, (struct t4_data *)data); in t4_ioctl()
12414 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); in t4_ioctl()
12417 rc = read_i2c(sc, (struct t4_i2c_data *)data); in t4_ioctl()
12420 rc = clear_stats(sc, *(uint32_t *)data); in t4_ioctl()
12423 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data); in t4_ioctl()
12426 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data); in t4_ioctl()
12429 rc = t4_get_tracer(sc, (struct t4_tracer *)data); in t4_ioctl()
12432 rc = t4_set_tracer(sc, (struct t4_tracer *)data); in t4_ioctl()
12435 rc = load_cfg(sc, (struct t4_data *)data); in t4_ioctl()
12438 rc = load_boot(sc, (struct t4_bootrom *)data); in t4_ioctl()
12441 rc = load_bootcfg(sc, (struct t4_data *)data); in t4_ioctl()
12444 rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data); in t4_ioctl()
12447 rc = set_offload_policy(sc, (struct t4_offload_policy *)data); in t4_ioctl()
12450 rc = hold_clip_addr(sc, (struct t4_clip_addr *)data); in t4_ioctl()
12453 rc = release_clip_addr(sc, (struct t4_clip_addr *)data); in t4_ioctl()
12456 rc = ENOTTY; in t4_ioctl()
12459 return (rc); in t4_ioctl()
12466 int rc; in toe_capability() local
12467 struct port_info *pi = vi->pi; in toe_capability()
12468 struct adapter *sc = pi->adapter; in toe_capability()
12479 if (sc->flags & KERN_TLS_ON && is_t6(sc)) { in toe_capability()
12490 p = sc->port[i]; in toe_capability()
12492 if (if_getcapenable(v->ifp) & IFCAP_TXTLS) { in toe_capability()
12495 device_get_nameunit(v->dev)); in toe_capability()
12506 rc = t6_config_kern_tls(sc, false); in toe_capability()
12507 if (rc) in toe_capability()
12508 return (rc); in toe_capability()
12511 if ((if_getcapenable(vi->ifp) & IFCAP_TOE) != 0) { in toe_capability()
12521 if (!(vi->flags & VI_INIT_DONE) && ((rc = vi_init(vi)) != 0)) in toe_capability()
12522 return (rc); in toe_capability()
12523 if (!(pi->vi[0].flags & VI_INIT_DONE) && in toe_capability()
12524 ((rc = vi_init(&pi->vi[0])) != 0)) in toe_capability()
12525 return (rc); in toe_capability()
12527 if (isset(&sc->offload_map, pi->port_id)) { in toe_capability()
12529 MPASS(pi->uld_vis > 0); in toe_capability()
12530 pi->uld_vis++; in toe_capability()
12535 rc = t4_activate_uld(sc, ULD_TOM); in toe_capability()
12536 if (rc == EAGAIN) { in toe_capability()
12541 if (rc != 0) in toe_capability()
12542 return (rc); in toe_capability()
12543 KASSERT(sc->tom_softc != NULL, in toe_capability()
12555 if (pi->uld_vis++ == 0) in toe_capability()
12556 setbit(&sc->offload_map, pi->port_id); in toe_capability()
12558 if ((if_getcapenable(vi->ifp) & IFCAP_TOE) == 0) { in toe_capability()
12562 MPASS(isset(&sc->offload_map, pi->port_id)); in toe_capability()
12563 MPASS(pi->uld_vis > 0); in toe_capability()
12564 if (--pi->uld_vis == 0) in toe_capability()
12565 clrbit(&sc->offload_map, pi->port_id); in toe_capability()
12577 int rc; in t4_register_uld() local
12583 rc = EEXIST; in t4_register_uld()
12586 rc = 0; in t4_register_uld()
12589 return (rc); in t4_register_uld()
12608 int rc; in t4_activate_uld() local
12616 if (!(sc->flags & FULL_INIT_DONE)) { in t4_activate_uld()
12617 rc = adapter_init(sc); in t4_activate_uld()
12618 if (rc != 0) in t4_activate_uld()
12619 return (rc); in t4_activate_uld()
12624 rc = EAGAIN; /* load the KLD with this ULD and try again. */ in t4_activate_uld()
12626 rc = t4_uld_list[id]->uld_activate(sc); in t4_activate_uld()
12627 if (rc == 0) in t4_activate_uld()
12628 setbit(&sc->active_ulds, id); in t4_activate_uld()
12632 return (rc); in t4_activate_uld()
12638 int rc; in t4_deactivate_uld() local
12647 rc = ENXIO; in t4_deactivate_uld()
12649 rc = t4_uld_list[id]->uld_deactivate(sc); in t4_deactivate_uld()
12650 if (rc == 0) in t4_deactivate_uld()
12651 clrbit(&sc->active_ulds, id); in t4_deactivate_uld()
12655 return (rc); in t4_deactivate_uld()
12661 int i, rc; in deactivate_all_uld() local
12663 rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4detuld"); in deactivate_all_uld()
12664 if (rc != 0) in deactivate_all_uld()
12670 rc = t4_uld_list[i]->uld_deactivate(sc); in deactivate_all_uld()
12671 if (rc != 0) in deactivate_all_uld()
12673 clrbit(&sc->active_ulds, i); in deactivate_all_uld()
12678 return (rc); in deactivate_all_uld()
12691 t4_uld_list[i]->uld_stop == NULL) in stop_all_uld()
12693 (void) t4_uld_list[i]->uld_stop(sc); in stop_all_uld()
12709 t4_uld_list[i]->uld_restart == NULL) in restart_all_uld()
12711 (void) t4_uld_list[i]->uld_restart(sc); in restart_all_uld()
12723 return (isset(&sc->active_ulds, id)); in uld_active()
12741 if (sc->flags & KERN_TLS_ON) in ktls_capability()
12743 if (sc->offload_map != 0) { in ktls_capability()
12772 nq = *t < 0 ? -*t : c; in calculate_nqueues()
12815 if (t4_toecaps_allowed == -1) in tweak_tunables()
12818 if (t4_toecaps_allowed == -1) in tweak_tunables()
12823 if (t4_rdmacaps_allowed == -1) { in tweak_tunables()
12828 if (t4_iscsicaps_allowed == -1) { in tweak_tunables()
12837 if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS) in tweak_tunables()
12840 if (t4_rdmacaps_allowed == -1) in tweak_tunables()
12843 if (t4_iscsicaps_allowed == -1) in tweak_tunables()
12857 if (t4_pktc_idx < -1 || t4_pktc_idx >= SGE_NCOUNTERS) in tweak_tunables()
12871 * Number of VIs to create per-port. The first VI is the "main" regular in tweak_tunables()
12903 base = sc->memwin[2].mw_base; in t4_dump_mem()
12909 pf = V_PFNUM(sc->pf); in t4_dump_mem()
12912 off = addr - win_pos; in t4_dump_mem()
12921 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n", in t4_dump_mem()
12927 len -= sizeof(buf); in t4_dump_mem()
12948 struct devlog_params *dparams = &sc->params.devlog; in t4_dump_devlog()
12950 int i, first, j, m, nentries, rc; in t4_dump_devlog() local
12953 if (dparams->start == 0) { in t4_dump_devlog()
12958 nentries = dparams->size / sizeof(struct fw_devlog_e); in t4_dump_devlog()
12959 m = fwmtype_to_hwmtype(dparams->memtype); in t4_dump_devlog()
12962 first = -1; in t4_dump_devlog()
12964 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), in t4_dump_devlog()
12966 if (rc != 0) in t4_dump_devlog()
12979 if (first == -1) in t4_dump_devlog()
12984 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), in t4_dump_devlog()
12986 if (rc != 0) in t4_dump_devlog()
13134 int i, rc; in enable_vxlan_rx() local
13140 t4_write_reg(sc, A_MPS_RX_VXLAN_TYPE, V_VXLAN(sc->vxlan_port) | in enable_vxlan_rx()
13143 pi = sc->port[i]; in enable_vxlan_rx()
13144 if (pi->vxlan_tcam_entry == true) in enable_vxlan_rx()
13146 rc = t4_alloc_raw_mac_filt(sc, pi->vi[0].viid, match_all_mac, in enable_vxlan_rx()
13147 match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id, in enable_vxlan_rx()
13149 if (rc < 0) { in enable_vxlan_rx()
13150 rc = -rc; in enable_vxlan_rx()
13151 CH_ERR(&pi->vi[0], in enable_vxlan_rx()
13152 "failed to add VXLAN TCAM entry: %d.\n", rc); in enable_vxlan_rx()
13154 MPASS(rc == sc->rawf_base + pi->port_id); in enable_vxlan_rx()
13155 pi->vxlan_tcam_entry = true; in enable_vxlan_rx()
13165 if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5) in t4_vxlan_start()
13170 if (sc->vxlan_refcount == 0) { in t4_vxlan_start()
13171 sc->vxlan_port = v->port; in t4_vxlan_start()
13172 sc->vxlan_refcount = 1; in t4_vxlan_start()
13175 } else if (sc->vxlan_port == v->port) { in t4_vxlan_start()
13176 sc->vxlan_refcount++; in t4_vxlan_start()
13180 sc->vxlan_port, v->port); in t4_vxlan_start()
13190 if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5) in t4_vxlan_stop()
13200 if (sc->vxlan_port != v->port) in t4_vxlan_stop()
13202 if (sc->vxlan_refcount == 0) { in t4_vxlan_stop()
13204 "ignoring attempt to stop it again.\n", sc->vxlan_port); in t4_vxlan_stop()
13205 } else if (--sc->vxlan_refcount == 0 && !hw_off_limits(sc)) in t4_vxlan_stop()
13244 int rc = 0; in mod_event() local
13298 if (--loaded == 0) { in mod_event()
13315 rc = EBUSY; in mod_event()
13323 rc = EBUSY; in mod_event()
13352 rc = EBUSY; in mod_event()
13361 return (rc); in mod_event()