Lines Matching +full:tcam +full:- +full:based
2 SPDX-License-Identifier: BSD-2-Clause
4 Copyright (c) 2007-2009, Chelsio Inc.
151 {PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"},
152 {PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"},
174 nitems(cxgb_identifiers) - 1);
226 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
238 "MSI-X, MSI, INTx selector");
241 * The driver uses an auto-queue algorithm by default.
242 * To disable it and force a single queue-set per port, use multiq = 0
246 "use min(ncpus/ports, 8) queue-sets per port");
257 int cxgb_use_16k_clusters = -1;
261 static int nfilters = -1;
302 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
313 switch(adapter->params.rev) { in t3rev2char()
333 for (id = cxgb_identifiers; id->desc != NULL; id++) { in cxgb_get_ident()
334 if ((id->vendor == pci_get_vendor(dev)) && in cxgb_get_ident()
335 (id->device == pci_get_device(dev))) { in cxgb_get_ident()
352 ai = t3_get_adapter_info(id->index); in cxgb_get_adapter_info()
368 nports = ai->nports0 + ai->nports1; in cxgb_controller_probe()
374 device_set_descf(dev, "%s, %d %s", ai->desc, nports, ports); in cxgb_controller_probe()
390 device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME); in upgrade_fw()
393 device_printf(sc->dev, "installing firmware on card\n"); in upgrade_fw()
394 status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize); in upgrade_fw()
397 device_printf(sc->dev, "failed to install firmware: %d\n", in upgrade_fw()
401 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d", in upgrade_fw()
415 * 1. Determine if the device supports MSI or MSI-X.
420 * 5. Allocate the BAR for doing MSI-X.
421 * 6. Setup the line interrupt iff MSI-X is not supported.
424 * 9. Check if the firmware and SRAM are up-to-date. They will be
425 * auto-updated later (before FULL_INIT_DONE), if required.
449 sc->dev = dev; in cxgb_controller_attach()
450 sc->msi_count = 0; in cxgb_controller_attach()
453 snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d", in cxgb_controller_attach()
455 ADAPTER_LOCK_INIT(sc, sc->lockbuf); in cxgb_controller_attach()
457 snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d", in cxgb_controller_attach()
459 snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d", in cxgb_controller_attach()
461 snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d", in cxgb_controller_attach()
464 MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN); in cxgb_controller_attach()
465 MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF); in cxgb_controller_attach()
466 MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF); in cxgb_controller_attach()
477 sc->link_width = (lnk & PCIEM_LINK_STA_WIDTH) >> 4; in cxgb_controller_attach()
478 if (sc->link_width < 8 && in cxgb_controller_attach()
479 (ai->caps & SUPPORTED_10000baseT_Full)) { in cxgb_controller_attach()
480 device_printf(sc->dev, in cxgb_controller_attach()
482 sc->link_width); in cxgb_controller_attach()
494 sc->regs_rid = PCIR_BAR(0); in cxgb_controller_attach()
495 if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, in cxgb_controller_attach()
496 &sc->regs_rid, RF_ACTIVE)) == NULL) { in cxgb_controller_attach()
502 sc->bt = rman_get_bustag(sc->regs_res); in cxgb_controller_attach()
503 sc->bh = rman_get_bushandle(sc->regs_res); in cxgb_controller_attach()
504 sc->mmio_len = rman_get_size(sc->regs_res); in cxgb_controller_attach()
507 sc->port[i].adapter = sc; in cxgb_controller_attach()
515 sc->udbs_rid = PCIR_BAR(2); in cxgb_controller_attach()
516 sc->udbs_res = NULL; in cxgb_controller_attach()
518 ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, in cxgb_controller_attach()
519 &sc->udbs_rid, RF_ACTIVE)) == NULL)) { in cxgb_controller_attach()
525 /* Allocate the BAR for doing MSI-X. If it succeeds, try to allocate in cxgb_controller_attach()
530 sc->msix_regs_rid = 0x20; in cxgb_controller_attach()
532 (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, in cxgb_controller_attach()
533 &sc->msix_regs_rid, RF_ACTIVE)) != NULL) { in cxgb_controller_attach()
536 port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus); in cxgb_controller_attach()
537 msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1; in cxgb_controller_attach()
540 (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 || in cxgb_controller_attach()
541 sc->msi_count != msi_needed) { in cxgb_controller_attach()
542 device_printf(dev, "alloc msix failed - " in cxgb_controller_attach()
544 "will try MSI\n", sc->msi_count, in cxgb_controller_attach()
546 sc->msi_count = 0; in cxgb_controller_attach()
550 sc->msix_regs_rid, sc->msix_regs_res); in cxgb_controller_attach()
551 sc->msix_regs_res = NULL; in cxgb_controller_attach()
553 sc->flags |= USING_MSIX; in cxgb_controller_attach()
554 sc->cxgb_intr = cxgb_async_intr; in cxgb_controller_attach()
556 "using MSI-X interrupts (%u vectors)\n", in cxgb_controller_attach()
557 sc->msi_count); in cxgb_controller_attach()
561 if ((msi_allowed >= 1) && (sc->msi_count == 0)) { in cxgb_controller_attach()
562 sc->msi_count = 1; in cxgb_controller_attach()
563 if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) { in cxgb_controller_attach()
564 device_printf(dev, "alloc msi failed - " in cxgb_controller_attach()
566 sc->msi_count = 0; in cxgb_controller_attach()
570 sc->flags |= USING_MSI; in cxgb_controller_attach()
571 sc->cxgb_intr = t3_intr_msi; in cxgb_controller_attach()
575 if (sc->msi_count == 0) { in cxgb_controller_attach()
577 sc->cxgb_intr = t3b_intr; in cxgb_controller_attach()
581 sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT, in cxgb_controller_attach()
582 taskqueue_thread_enqueue, &sc->tq); in cxgb_controller_attach()
583 if (sc->tq == NULL) { in cxgb_controller_attach()
588 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq", in cxgb_controller_attach()
590 TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc); in cxgb_controller_attach()
594 callout_init(&sc->cxgb_tick_ch, 1); in cxgb_controller_attach()
602 sc->flags &= ~FW_UPTODATE; in cxgb_controller_attach()
604 sc->flags |= FW_UPTODATE; in cxgb_controller_attach()
611 device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n", in cxgb_controller_attach()
613 sc->flags &= ~TPS_UPTODATE; in cxgb_controller_attach()
615 sc->flags |= TPS_UPTODATE; in cxgb_controller_attach()
622 for (i = 0; i < (sc)->params.nports; i++) { in cxgb_controller_attach()
631 pi = &sc->port[i]; in cxgb_controller_attach()
632 pi->adapter = sc; in cxgb_controller_attach()
633 pi->nqsets = port_qsets; in cxgb_controller_attach()
634 pi->first_qset = i*port_qsets; in cxgb_controller_attach()
635 pi->port_id = i; in cxgb_controller_attach()
636 pi->tx_chan = i >= ai->nports0; in cxgb_controller_attach()
637 pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i; in cxgb_controller_attach()
638 sc->rxpkt_map[pi->txpkt_intf] = i; in cxgb_controller_attach()
639 sc->port[i].tx_chan = i >= ai->nports0; in cxgb_controller_attach()
640 sc->portdev[i] = child; in cxgb_controller_attach()
654 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d", in cxgb_controller_attach()
659 ai->desc, is_offload(sc) ? "R" : "", in cxgb_controller_attach()
660 sc->params.vpd.ec, sc->params.vpd.sn); in cxgb_controller_attach()
662 snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x", in cxgb_controller_attach()
663 sc->params.vpd.port_type[0], sc->params.vpd.port_type[1], in cxgb_controller_attach()
664 sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]); in cxgb_controller_attach()
666 device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]); in cxgb_controller_attach()
667 callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc); in cxgb_controller_attach()
672 sc->cpl_handler[i] = cpl_not_handled; in cxgb_controller_attach()
725 sc->flags |= CXGB_SHUTDOWN; in cxgb_free()
731 bus_detach_children(sc->dev); in cxgb_free()
732 for (i = 0; i < (sc)->params.nports; i++) { in cxgb_free()
733 if (sc->portdev[i] && in cxgb_free()
734 device_delete_child(sc->dev, sc->portdev[i]) != 0) in cxgb_free()
735 device_printf(sc->dev, "failed to delete child port\n"); in cxgb_free()
736 nqsets += sc->port[i].nqsets; in cxgb_free()
744 KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)", in cxgb_free()
745 __func__, sc->open_device_map)); in cxgb_free()
746 for (i = 0; i < sc->params.nports; i++) { in cxgb_free()
747 KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!", in cxgb_free()
754 callout_drain(&sc->cxgb_tick_ch); in cxgb_free()
755 callout_drain(&sc->sge_timer_ch); in cxgb_free()
761 if (sc->flags & FULL_INIT_DONE) { in cxgb_free()
763 sc->flags &= ~FULL_INIT_DONE; in cxgb_free()
770 if (sc->flags & (USING_MSI | USING_MSIX)) { in cxgb_free()
771 device_printf(sc->dev, "releasing msi message(s)\n"); in cxgb_free()
772 pci_release_msi(sc->dev); in cxgb_free()
774 device_printf(sc->dev, "no msi message to release\n"); in cxgb_free()
777 if (sc->msix_regs_res != NULL) { in cxgb_free()
778 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid, in cxgb_free()
779 sc->msix_regs_res); in cxgb_free()
785 if (sc->tq != NULL) { in cxgb_free()
786 taskqueue_free(sc->tq); in cxgb_free()
787 sc->tq = NULL; in cxgb_free()
790 free(sc->filters, M_DEVBUF); in cxgb_free()
793 if (sc->udbs_res != NULL) in cxgb_free()
794 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid, in cxgb_free()
795 sc->udbs_res); in cxgb_free()
797 if (sc->regs_res != NULL) in cxgb_free()
798 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid, in cxgb_free()
799 sc->regs_res); in cxgb_free()
801 MTX_DESTROY(&sc->mdio_lock); in cxgb_free()
802 MTX_DESTROY(&sc->sge.reg_lock); in cxgb_free()
803 MTX_DESTROY(&sc->elmer_lock); in cxgb_free()
811 * setup_sge_qsets - configure SGE Tx/Rx/response queues
815 * We support multiple queue sets per port if we have MSI-X, otherwise
825 device_printf(sc->dev, "t3_sge_alloc returned %d\n", err); in setup_sge_qsets()
829 if (sc->params.rev > 0 && !(sc->flags & USING_MSI)) in setup_sge_qsets()
830 irq_idx = -1; in setup_sge_qsets()
832 for (i = 0; i < (sc)->params.nports; i++) { in setup_sge_qsets()
833 struct port_info *pi = &sc->port[i]; in setup_sge_qsets()
835 for (j = 0; j < pi->nqsets; j++, qset_idx++) { in setup_sge_qsets()
836 err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports, in setup_sge_qsets()
837 (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx, in setup_sge_qsets()
838 &sc->params.sge.qset[qset_idx], ntxq, pi); in setup_sge_qsets()
841 device_printf(sc->dev, in setup_sge_qsets()
848 sc->nqsets = qset_idx; in setup_sge_qsets()
859 if (sc->msix_intr_tag[i] == NULL) { in cxgb_teardown_interrupts()
862 KASSERT(sc->msix_irq_res[i] == NULL && in cxgb_teardown_interrupts()
863 sc->msix_irq_rid[i] == 0, in cxgb_teardown_interrupts()
864 ("%s: half-done interrupt (%d).", __func__, i)); in cxgb_teardown_interrupts()
869 bus_teardown_intr(sc->dev, sc->msix_irq_res[i], in cxgb_teardown_interrupts()
870 sc->msix_intr_tag[i]); in cxgb_teardown_interrupts()
871 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i], in cxgb_teardown_interrupts()
872 sc->msix_irq_res[i]); in cxgb_teardown_interrupts()
874 sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL; in cxgb_teardown_interrupts()
875 sc->msix_irq_rid[i] = 0; in cxgb_teardown_interrupts()
878 if (sc->intr_tag) { in cxgb_teardown_interrupts()
879 KASSERT(sc->irq_res != NULL, in cxgb_teardown_interrupts()
880 ("%s: half-done interrupt.", __func__)); in cxgb_teardown_interrupts()
882 bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag); in cxgb_teardown_interrupts()
883 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, in cxgb_teardown_interrupts()
884 sc->irq_res); in cxgb_teardown_interrupts()
886 sc->irq_res = sc->intr_tag = NULL; in cxgb_teardown_interrupts()
887 sc->irq_rid = 0; in cxgb_teardown_interrupts()
896 int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX); in cxgb_setup_interrupts()
898 sc->irq_rid = intr_flag ? 1 : 0; in cxgb_setup_interrupts()
899 sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid, in cxgb_setup_interrupts()
901 if (sc->irq_res == NULL) { in cxgb_setup_interrupts()
902 device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n", in cxgb_setup_interrupts()
903 intr_flag, sc->irq_rid); in cxgb_setup_interrupts()
905 sc->irq_rid = 0; in cxgb_setup_interrupts()
907 err = bus_setup_intr(sc->dev, sc->irq_res, in cxgb_setup_interrupts()
909 sc->cxgb_intr, sc, &sc->intr_tag); in cxgb_setup_interrupts()
912 device_printf(sc->dev, in cxgb_setup_interrupts()
914 intr_flag, sc->irq_rid, err); in cxgb_setup_interrupts()
915 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, in cxgb_setup_interrupts()
916 sc->irq_res); in cxgb_setup_interrupts()
917 sc->irq_res = sc->intr_tag = NULL; in cxgb_setup_interrupts()
918 sc->irq_rid = 0; in cxgb_setup_interrupts()
926 bus_describe_intr(sc->dev, sc->irq_res, sc->intr_tag, "err"); in cxgb_setup_interrupts()
927 for (i = 0; i < sc->msi_count - 1; i++) { in cxgb_setup_interrupts()
929 res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid, in cxgb_setup_interrupts()
932 device_printf(sc->dev, "Cannot allocate interrupt " in cxgb_setup_interrupts()
938 err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET, in cxgb_setup_interrupts()
939 NULL, t3_intr_msix, &sc->sge.qs[i], &tag); in cxgb_setup_interrupts()
941 device_printf(sc->dev, "Cannot set up interrupt " in cxgb_setup_interrupts()
943 bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res); in cxgb_setup_interrupts()
947 sc->msix_irq_rid[i] = rid; in cxgb_setup_interrupts()
948 sc->msix_irq_res[i] = res; in cxgb_setup_interrupts()
949 sc->msix_intr_tag[i] = tag; in cxgb_setup_interrupts()
950 bus_describe_intr(sc->dev, res, tag, "qs%d", i); in cxgb_setup_interrupts()
967 desc = p->phy.desc; in cxgb_port_probe()
968 device_set_descf(dev, "Port %d %s", p->port_id, desc); in cxgb_port_probe()
977 pi->port_cdev = make_dev(&cxgb_cdevsw, if_getdunit(pi->ifp), in cxgb_makedev()
978 UID_ROOT, GID_WHEEL, 0600, "%s", if_name(pi->ifp)); in cxgb_makedev()
980 if (pi->port_cdev == NULL) in cxgb_makedev()
983 pi->port_cdev->si_drv1 = (void *)pi; in cxgb_makedev()
1002 sc = p->adapter; in cxgb_port_attach()
1003 snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d", in cxgb_port_attach()
1004 device_get_unit(device_get_parent(dev)), p->port_id); in cxgb_port_attach()
1005 PORT_LOCK_INIT(p, p->lockbuf); in cxgb_port_attach()
1007 callout_init(&p->link_check_ch, 1); in cxgb_port_attach()
1008 TASK_INIT(&p->link_check_task, 0, check_link_status, p); in cxgb_port_attach()
1011 ifp = p->ifp = if_alloc(IFT_ETHER); in cxgb_port_attach()
1034 * Disable TSO on 4-port - it isn't supported by the firmware. in cxgb_port_attach()
1036 if (sc->params.nports > 2) { in cxgb_port_attach()
1043 ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change, in cxgb_port_attach()
1047 ether_ifattach(ifp, p->hw_addr); in cxgb_port_attach()
1053 if (sc->params.nports <= 2) in cxgb_port_attach()
1081 sc = p->adapter; in cxgb_port_detach()
1086 wakeup(&sc->flags); in cxgb_port_detach()
1088 mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0); in cxgb_port_detach()
1092 if (p->port_cdev != NULL) in cxgb_port_detach()
1093 destroy_dev(p->port_cdev); in cxgb_port_detach()
1096 ether_ifdetach(p->ifp); in cxgb_port_detach()
1098 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { in cxgb_port_detach()
1099 struct sge_qset *qs = &sc->sge.qs[i]; in cxgb_port_detach()
1100 struct sge_txq *txq = &qs->txq[TXQ_ETH]; in cxgb_port_detach()
1102 callout_drain(&txq->txq_watchdog); in cxgb_port_detach()
1103 callout_drain(&txq->txq_timer); in cxgb_port_detach()
1107 if_free(p->ifp); in cxgb_port_detach()
1108 p->ifp = NULL; in cxgb_port_detach()
1112 wakeup_one(&sc->flags); in cxgb_port_detach()
1122 if (sc->flags & FULL_INIT_DONE) { in t3_fatal_err()
1130 device_printf(sc->dev,"encountered fatal error, operation suspended\n"); in t3_fatal_err()
1132 device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n", in t3_fatal_err()
1141 rc = pci_find_cap(sc->dev, cap, ®); in t3_os_find_pci_capability()
1148 pci_save_state(sc->dev); in t3_os_pci_save_state()
1155 pci_restore_state(sc->dev); in t3_os_pci_restore_state()
1160 * t3_os_link_changed - handle link status changes
1166 * @fc: the new flow-control setting
1168 * This is the OS-dependent handler for link status changes. The OS
1170 * then calls this handler for any OS-specific processing.
1176 struct port_info *pi = &adapter->port[port_id]; in t3_os_link_changed()
1177 if_t ifp = pi->ifp; in t3_os_link_changed()
1197 * t3_os_phymod_changed - handle PHY module changes
1201 * This is the OS-dependent handler for PHY module changes. It is
1202 * invoked when a PHY module is removed or inserted for any OS-specific
1208 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX-L", "unknown" in t3_os_phymod_changed()
1210 struct port_info *pi = &adap->port[port_id]; in t3_os_phymod_changed()
1211 int mod = pi->phy.modtype; in t3_os_phymod_changed()
1213 if (mod != pi->media.ifm_cur->ifm_data) in t3_os_phymod_changed()
1217 if_printf(pi->ifp, "PHY module unplugged\n"); in t3_os_phymod_changed()
1221 if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]); in t3_os_phymod_changed()
1236 bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN); in t3_os_set_hw_addr()
1240 * Programs the XGMAC based on the settings in the ifnet. These settings
1246 if_t ifp = p->ifp; in cxgb_update_mac_settings()
1248 struct cmac *mac = &p->mac; in cxgb_update_mac_settings()
1253 bcopy(if_getlladdr(ifp), p->hw_addr, ETHER_ADDR_LEN); in cxgb_update_mac_settings()
1262 t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging); in cxgb_update_mac_settings()
1263 t3_mac_set_address(mac, 0, p->hw_addr); in cxgb_update_mac_settings()
1275 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { in await_mgmt_replies()
1276 if (!--attempts) in await_mgmt_replies()
1289 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; in init_tp_parity()
1298 m->m_len = m->m_pkthdr.len = sizeof(*req); in init_tp_parity()
1300 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); in init_tp_parity()
1302 req->iff = i; in init_tp_parity()
1311 m->m_len = m->m_pkthdr.len = sizeof(*req); in init_tp_parity()
1313 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); in init_tp_parity()
1315 req->params = htonl(V_L2T_W_IDX(i)); in init_tp_parity()
1324 m->m_len = m->m_pkthdr.len = sizeof(*req); in init_tp_parity()
1326 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); in init_tp_parity()
1328 req->l2t_idx = htonl(V_L2T_W_IDX(i)); in init_tp_parity()
1334 m->m_len = m->m_pkthdr.len = sizeof(*greq); in init_tp_parity()
1336 greq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); in init_tp_parity()
1338 greq->mask = htobe64(1); in init_tp_parity()
1347 * setup_rss - configure Receive Side Steering (per-queue connection demux)
1373 nq[pi->tx_chan] += pi->nqsets; in setup_rss()
1382 adap->rrss_map[i] = 0xff; in setup_rss()
1384 if (adap->rrss_map[rspq_map[i]] == 0xff) in setup_rss()
1385 adap->rrss_map[rspq_map[i]] = i; in setup_rss()
1403 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT)); in send_pktsched_cmd()
1404 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET; in send_pktsched_cmd()
1405 req->sched = sched; in send_pktsched_cmd()
1406 req->idx = qidx; in send_pktsched_cmd()
1407 req->min = lo; in send_pktsched_cmd()
1408 req->max = hi; in send_pktsched_cmd()
1409 req->binding = port; in send_pktsched_cmd()
1410 m->m_len = m->m_pkthdr.len = sizeof(*req); in send_pktsched_cmd()
1420 for (i = 0; i < (sc)->params.nports; ++i) { in bind_qsets()
1423 for (j = 0; j < pi->nqsets; ++j) { in bind_qsets()
1424 send_pktsched_cmd(sc, 1, pi->first_qset + j, -1, in bind_qsets()
1425 -1, pi->tx_chan); in bind_qsets()
1453 device_printf(adap->dev, in update_tpeeprom()
1459 len = tpeeprom->datasize - 4; in update_tpeeprom()
1461 ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize); in update_tpeeprom()
1466 device_printf(adap->dev, in update_tpeeprom()
1472 ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize, in update_tpeeprom()
1476 device_printf(adap->dev, in update_tpeeprom()
1480 device_printf(adap->dev, in update_tpeeprom()
1503 device_printf(adap->dev, "could not load TP SRAM\n"); in update_tpsram()
1506 device_printf(adap->dev, "updating TP SRAM\n"); in update_tpsram()
1508 ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize); in update_tpsram()
1512 ret = t3_set_proto_sram(adap, tpsram->data); in update_tpsram()
1514 device_printf(adap->dev, "loading protocol SRAM failed\n"); in update_tpsram()
1523 * cxgb_up - enable the adapter
1534 unsigned int mxf = t3_mc5_size(&sc->mc5) - MC5_MIN_TIDS; in cxgb_up()
1536 KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)", in cxgb_up()
1537 __func__, sc->open_device_map)); in cxgb_up()
1539 if ((sc->flags & FULL_INIT_DONE) == 0) { in cxgb_up()
1543 if ((sc->flags & FW_UPTODATE) == 0) in cxgb_up()
1547 if ((sc->flags & TPS_UPTODATE) == 0) in cxgb_up()
1552 sc->params.mc5.nservers = 0; in cxgb_up()
1555 sc->params.mc5.nfilters = mxf; in cxgb_up()
1557 sc->params.mc5.nfilters = min(nfilters, mxf); in cxgb_up()
1565 t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); in cxgb_up()
1575 sc->flags |= FULL_INIT_DONE; in cxgb_up()
1582 if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) && in cxgb_up()
1584 sc->flags |= TP_PARITY_INIT; in cxgb_up()
1586 if (sc->flags & TP_PARITY_INIT) { in cxgb_up()
1591 if (!(sc->flags & QUEUES_BOUND)) { in cxgb_up()
1594 sc->flags |= QUEUES_BOUND; in cxgb_up()
1621 struct adapter *sc = p->adapter; in cxgb_init()
1631 struct adapter *sc = p->adapter; in cxgb_init_locked()
1632 if_t ifp = p->ifp; in cxgb_init_locked()
1633 struct cmac *mac = &p->mac; in cxgb_init_locked()
1640 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) { in cxgb_init_locked()
1652 * The code that runs during one-time adapter initialization can sleep in cxgb_init_locked()
1655 may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1; in cxgb_init_locked()
1663 if (sc->open_device_map == 0 && ((rc = cxgb_up(sc)) != 0)) in cxgb_init_locked()
1667 if (isset(&sc->open_device_map, p->port_id) && in cxgb_init_locked()
1672 t3_port_intr_enable(sc, p->port_id); in cxgb_init_locked()
1673 if (!mac->multiport) in cxgb_init_locked()
1676 t3_link_start(&p->phy, mac, &p->link_config); in cxgb_init_locked()
1681 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { in cxgb_init_locked()
1682 struct sge_qset *qs = &sc->sge.qs[i]; in cxgb_init_locked()
1683 struct sge_txq *txq = &qs->txq[TXQ_ETH]; in cxgb_init_locked()
1685 callout_reset_on(&txq->txq_watchdog, hz, cxgb_tx_watchdog, qs, in cxgb_init_locked()
1686 txq->txq_watchdog.c_cpu); in cxgb_init_locked()
1690 setbit(&sc->open_device_map, p->port_id); in cxgb_init_locked()
1691 callout_reset(&p->link_check_ch, in cxgb_init_locked()
1692 p->phy.caps & SUPPORTED_LINK_IRQ ? hz * 3 : hz / 4, in cxgb_init_locked()
1702 wakeup_one(&sc->flags); in cxgb_init_locked()
1710 struct adapter *sc = p->adapter; in cxgb_uninit_locked()
1716 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) { in cxgb_uninit_locked()
1734 wakeup_one(&sc->flags); in cxgb_uninit_locked()
1746 struct adapter *sc = pi->adapter; in cxgb_uninit_synchronized()
1747 if_t ifp = pi->ifp; in cxgb_uninit_synchronized()
1765 clrbit(&sc->open_device_map, pi->port_id); in cxgb_uninit_synchronized()
1766 t3_port_intr_disable(sc, pi->port_id); in cxgb_uninit_synchronized()
1767 taskqueue_drain(sc->tq, &sc->slow_intr_task); in cxgb_uninit_synchronized()
1768 taskqueue_drain(sc->tq, &sc->tick_task); in cxgb_uninit_synchronized()
1770 callout_drain(&pi->link_check_ch); in cxgb_uninit_synchronized()
1771 taskqueue_drain(sc->tq, &pi->link_check_task); in cxgb_uninit_synchronized()
1777 t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0); in cxgb_uninit_synchronized()
1780 t3_set_reg_field(sc, A_XGM_RXFIFO_CFG + pi->mac.offset, in cxgb_uninit_synchronized()
1786 t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset, in cxgb_uninit_synchronized()
1790 t3_mac_disable(&pi->mac, MAC_DIRECTION_RX); in cxgb_uninit_synchronized()
1792 pi->phy.ops->power_down(&pi->phy, 1); in cxgb_uninit_synchronized()
1796 pi->link_config.link_ok = 0; in cxgb_uninit_synchronized()
1797 t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0); in cxgb_uninit_synchronized()
1799 if (sc->open_device_map == 0) in cxgb_uninit_synchronized()
1800 cxgb_down(pi->adapter); in cxgb_uninit_synchronized()
1812 struct adapter *adp = p->adapter; in cxgb_set_lro()
1815 for (i = 0; i < p->nqsets; i++) { in cxgb_set_lro()
1816 q = &adp->sge.qs[p->first_qset + i]; in cxgb_set_lro()
1817 q->lro.enabled = (enabled != 0); in cxgb_set_lro()
1826 struct adapter *sc = p->adapter; in cxgb_ioctl()
1841 mtu = ifr->ifr_mtu; in cxgb_ioctl()
1860 flags = p->if_flags; in cxgb_ioctl()
1874 p->if_flags = if_getflags(ifp); in cxgb_ioctl()
1903 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); in cxgb_ioctl()
1913 "tso4 disabled due to -txcsum.\n"); in cxgb_ioctl()
1925 "tso6 disabled due to -txcsum6.\n"); in cxgb_ioctl()
2000 error = ifmedia_ioctl(ifp, ifr, &p->media, command); in cxgb_ioctl()
2016 * Translates phy->modtype to the correct Ethernet media subtype.
2048 struct cphy *phy = &p->phy; in cxgb_build_medialist()
2049 struct ifmedia *media = &p->media; in cxgb_build_medialist()
2050 int mod = phy->modtype; in cxgb_build_medialist()
2056 if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) { in cxgb_build_medialist()
2059 if (phy->caps & SUPPORTED_10000baseT_Full) in cxgb_build_medialist()
2062 if (phy->caps & SUPPORTED_1000baseT_Full) in cxgb_build_medialist()
2065 if (phy->caps & SUPPORTED_100baseT_Full) in cxgb_build_medialist()
2068 if (phy->caps & SUPPORTED_10baseT_Full) in cxgb_build_medialist()
2074 } else if (phy->caps & SUPPORTED_TP) { in cxgb_build_medialist()
2077 KASSERT(phy->caps & SUPPORTED_10000baseT_Full, in cxgb_build_medialist()
2078 ("%s: unexpected cap 0x%x", __func__, phy->caps)); in cxgb_build_medialist()
2083 } else if (phy->caps & SUPPORTED_FIBRE && in cxgb_build_medialist()
2084 phy->caps & SUPPORTED_10000baseT_Full) { in cxgb_build_medialist()
2094 } else if (phy->caps & SUPPORTED_FIBRE && in cxgb_build_medialist()
2095 phy->caps & SUPPORTED_1000baseT_Full) { in cxgb_build_medialist()
2098 /* XXX: Lie and claim to be SX, could actually be any 1G-X */ in cxgb_build_medialist()
2104 phy->caps)); in cxgb_build_medialist()
2114 struct ifmedia_entry *cur = p->media.ifm_cur; in cxgb_media_status()
2115 int speed = p->link_config.speed; in cxgb_media_status()
2117 if (cur->ifm_data != p->phy.modtype) { in cxgb_media_status()
2119 cur = p->media.ifm_cur; in cxgb_media_status()
2122 ifmr->ifm_status = IFM_AVALID; in cxgb_media_status()
2123 if (!p->link_config.link_ok) in cxgb_media_status()
2126 ifmr->ifm_status |= IFM_ACTIVE; in cxgb_media_status()
2132 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) in cxgb_media_status()
2134 KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg, in cxgb_media_status()
2135 ("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps)); in cxgb_media_status()
2137 ifmr->ifm_active = IFM_ETHER | IFM_FDX; in cxgb_media_status()
2139 ifmr->ifm_active |= IFM_10G_T; in cxgb_media_status()
2141 ifmr->ifm_active |= IFM_1000_T; in cxgb_media_status()
2143 ifmr->ifm_active |= IFM_100_TX; in cxgb_media_status()
2145 ifmr->ifm_active |= IFM_10_T; in cxgb_media_status()
2155 struct adapter *sc = pi->adapter; in cxgb_get_counter()
2156 struct cmac *mac = &pi->mac; in cxgb_get_counter()
2157 struct mac_stats *mstats = &mac->stats; in cxgb_get_counter()
2163 return (mstats->rx_frames); in cxgb_get_counter()
2166 return (mstats->rx_jabber + mstats->rx_data_errs + in cxgb_get_counter()
2167 mstats->rx_sequence_errs + mstats->rx_runt + in cxgb_get_counter()
2168 mstats->rx_too_long + mstats->rx_mac_internal_errs + in cxgb_get_counter()
2169 mstats->rx_short + mstats->rx_fcs_errs); in cxgb_get_counter()
2172 return (mstats->tx_frames); in cxgb_get_counter()
2175 return (mstats->tx_excess_collisions + mstats->tx_underrun + in cxgb_get_counter()
2176 mstats->tx_len_errs + mstats->tx_mac_internal_errs + in cxgb_get_counter()
2177 mstats->tx_excess_deferral + mstats->tx_fcs_errs); in cxgb_get_counter()
2180 return (mstats->tx_total_collisions); in cxgb_get_counter()
2183 return (mstats->rx_octets); in cxgb_get_counter()
2186 return (mstats->tx_octets); in cxgb_get_counter()
2189 return (mstats->rx_mcast_frames); in cxgb_get_counter()
2192 return (mstats->tx_mcast_frames); in cxgb_get_counter()
2195 return (mstats->rx_cong_drops); in cxgb_get_counter()
2202 if (sc->flags & FULL_INIT_DONE) { in cxgb_get_counter()
2203 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) in cxgb_get_counter()
2204 drops += sc->sge.qs[i].txq[TXQ_ETH].txq_mr->br_drops; in cxgb_get_counter()
2223 taskqueue_enqueue(sc->tq, &sc->slow_intr_task); in cxgb_async_intr()
2230 struct adapter *sc = pi->adapter; in link_check_callout()
2232 if (!isset(&sc->open_device_map, pi->port_id)) in link_check_callout()
2235 taskqueue_enqueue(sc->tq, &pi->link_check_task); in link_check_callout()
2242 struct adapter *sc = pi->adapter; in check_link_status()
2244 if (!isset(&sc->open_device_map, pi->port_id)) in check_link_status()
2247 t3_link_changed(sc, pi->port_id); in check_link_status()
2249 if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ) || in check_link_status()
2250 pi->link_config.link_ok == 0) in check_link_status()
2251 callout_reset(&pi->link_check_ch, hz, link_check_callout, pi); in check_link_status()
2262 callout_reset(&pi->link_check_ch, hz / 4, link_check_callout, pi); in t3_os_link_intr()
2270 if (sc->flags & CXGB_SHUTDOWN) in check_t3b2_mac()
2274 struct port_info *p = &sc->port[i]; in check_t3b2_mac()
2277 if_t ifp = p->ifp; in check_t3b2_mac()
2280 if (!isset(&sc->open_device_map, p->port_id) || p->link_fault || in check_t3b2_mac()
2281 !p->link_config.link_ok) in check_t3b2_mac()
2286 __func__, if_getdrvflags(ifp), sc->open_device_map)); in check_t3b2_mac()
2289 status = t3b2_mac_watchdog_task(&p->mac); in check_t3b2_mac()
2291 p->mac.stats.num_toggled++; in check_t3b2_mac()
2293 struct cmac *mac = &p->mac; in check_t3b2_mac()
2296 t3_link_start(&p->phy, mac, &p->link_config); in check_t3b2_mac()
2298 t3_port_intr_enable(sc, p->port_id); in check_t3b2_mac()
2299 p->mac.stats.num_resets++; in check_t3b2_mac()
2310 if (sc->flags & CXGB_SHUTDOWN) in cxgb_tick()
2313 taskqueue_enqueue(sc->tq, &sc->tick_task); in cxgb_tick()
2314 callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc); in cxgb_tick()
2325 if (timevalcmp(&tv, &pi->last_refreshed, <)) in cxgb_refresh_stats()
2329 t3_mac_update_stats(&pi->mac); in cxgb_refresh_stats()
2331 getmicrotime(&pi->last_refreshed); in cxgb_refresh_stats()
2338 const struct adapter_params *p = &sc->params; in cxgb_tick_handler()
2342 if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE)) in cxgb_tick_handler()
2345 if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map) in cxgb_tick_handler()
2350 struct sge_qset *qs = &sc->sge.qs[0]; in cxgb_tick_handler()
2376 for (i = 0; i < sc->params.nports; i++) { in cxgb_tick_handler()
2377 struct port_info *pi = &sc->port[i]; in cxgb_tick_handler()
2378 struct cmac *mac = &pi->mac; in cxgb_tick_handler()
2380 if (!isset(&sc->open_device_map, pi->port_id)) in cxgb_tick_handler()
2385 if (mac->multiport) in cxgb_tick_handler()
2389 cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset); in cxgb_tick_handler()
2392 mac->stats.rx_fifo_ovfl++; in cxgb_tick_handler()
2395 t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset); in cxgb_tick_handler()
2423 struct adapter *adapter = pi->adapter; in set_eeprom()
2434 aligned_offset + aligned_len - 4, in set_eeprom()
2435 (u32 *)&buf[aligned_len - 4]); in set_eeprom()
2446 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { in set_eeprom()
2483 struct port_info *pi = dev->si_drv1; in cxgb_extension_ioctl()
2484 adapter_t *sc = pi->adapter; in cxgb_extension_ioctl()
2503 struct cphy *phy = &pi->phy; in cxgb_extension_ioctl()
2506 if (!phy->mdio_read) in cxgb_extension_ioctl()
2509 mmd = mid->phy_id >> 8; in cxgb_extension_ioctl()
2515 error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd, in cxgb_extension_ioctl()
2516 mid->reg_num, &val); in cxgb_extension_ioctl()
2518 error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0, in cxgb_extension_ioctl()
2519 mid->reg_num & 0x1f, &val); in cxgb_extension_ioctl()
2521 mid->val_out = val; in cxgb_extension_ioctl()
2525 struct cphy *phy = &pi->phy; in cxgb_extension_ioctl()
2528 if (!phy->mdio_write) in cxgb_extension_ioctl()
2531 mmd = mid->phy_id >> 8; in cxgb_extension_ioctl()
2537 error = phy->mdio_write(sc, mid->phy_id & 0x1f, in cxgb_extension_ioctl()
2538 mmd, mid->reg_num, mid->val_in); in cxgb_extension_ioctl()
2540 error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0, in cxgb_extension_ioctl()
2541 mid->reg_num & 0x1f, in cxgb_extension_ioctl()
2542 mid->val_in); in cxgb_extension_ioctl()
2547 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) in cxgb_extension_ioctl()
2549 t3_write_reg(sc, edata->addr, edata->val); in cxgb_extension_ioctl()
2554 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) in cxgb_extension_ioctl()
2556 edata->val = t3_read_reg(sc, edata->addr); in cxgb_extension_ioctl()
2561 mtx_lock_spin(&sc->sge.reg_lock); in cxgb_extension_ioctl()
2562 switch (ecntxt->cntxt_type) { in cxgb_extension_ioctl()
2564 error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id, in cxgb_extension_ioctl()
2565 ecntxt->data); in cxgb_extension_ioctl()
2568 error = -t3_sge_read_fl(sc, ecntxt->cntxt_id, in cxgb_extension_ioctl()
2569 ecntxt->data); in cxgb_extension_ioctl()
2572 error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id, in cxgb_extension_ioctl()
2573 ecntxt->data); in cxgb_extension_ioctl()
2576 error = -t3_sge_read_cq(sc, ecntxt->cntxt_id, in cxgb_extension_ioctl()
2577 ecntxt->data); in cxgb_extension_ioctl()
2583 mtx_unlock_spin(&sc->sge.reg_lock); in cxgb_extension_ioctl()
2589 if (edesc->queue_num >= SGE_QSETS * 6) in cxgb_extension_ioctl()
2591 ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6], in cxgb_extension_ioctl()
2592 edesc->queue_num % 6, edesc->idx, edesc->data); in cxgb_extension_ioctl()
2595 edesc->size = ret; in cxgb_extension_ioctl()
2601 int q1 = pi->first_qset; in cxgb_extension_ioctl()
2602 int nqsets = pi->nqsets; in cxgb_extension_ioctl()
2605 if (t->qset_idx >= nqsets) in cxgb_extension_ioctl()
2608 i = q1 + t->qset_idx; in cxgb_extension_ioctl()
2609 q = &sc->params.sge.qset[i]; in cxgb_extension_ioctl()
2610 t->rspq_size = q->rspq_size; in cxgb_extension_ioctl()
2611 t->txq_size[0] = q->txq_size[0]; in cxgb_extension_ioctl()
2612 t->txq_size[1] = q->txq_size[1]; in cxgb_extension_ioctl()
2613 t->txq_size[2] = q->txq_size[2]; in cxgb_extension_ioctl()
2614 t->fl_size[0] = q->fl_size; in cxgb_extension_ioctl()
2615 t->fl_size[1] = q->jumbo_size; in cxgb_extension_ioctl()
2616 t->polling = q->polling; in cxgb_extension_ioctl()
2617 t->lro = q->lro; in cxgb_extension_ioctl()
2618 t->intr_lat = q->coalesce_usecs; in cxgb_extension_ioctl()
2619 t->cong_thres = q->cong_thres; in cxgb_extension_ioctl()
2620 t->qnum = i; in cxgb_extension_ioctl()
2622 if ((sc->flags & FULL_INIT_DONE) == 0) in cxgb_extension_ioctl()
2623 t->vector = 0; in cxgb_extension_ioctl()
2624 else if (sc->flags & USING_MSIX) in cxgb_extension_ioctl()
2625 t->vector = rman_get_start(sc->msix_irq_res[i]); in cxgb_extension_ioctl()
2627 t->vector = rman_get_start(sc->irq_res); in cxgb_extension_ioctl()
2633 edata->val = pi->nqsets; in cxgb_extension_ioctl()
2651 if (sc->open_device_map || sc->flags & FULL_INIT_DONE) { in cxgb_extension_ioctl()
2656 fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT); in cxgb_extension_ioctl()
2660 error = copyin(t->buf, fw_data, t->len); in cxgb_extension_ioctl()
2663 error = -t3_load_fw(sc, fw_data, t->len); in cxgb_extension_ioctl()
2666 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), in cxgb_extension_ioctl()
2672 sc->flags |= FW_UPTODATE; in cxgb_extension_ioctl()
2682 boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT); in cxgb_extension_ioctl()
2686 error = copyin(t->buf, boot_data, t->len); in cxgb_extension_ioctl()
2688 error = -t3_load_boot(sc, boot_data, t->len); in cxgb_extension_ioctl()
2695 struct tp_params *p = &sc->params.tp; in cxgb_extension_ioctl()
2700 m->tx_pg_sz = p->tx_pg_size; in cxgb_extension_ioctl()
2701 m->tx_num_pg = p->tx_num_pgs; in cxgb_extension_ioctl()
2702 m->rx_pg_sz = p->rx_pg_size; in cxgb_extension_ioctl()
2703 m->rx_num_pg = p->rx_num_pgs; in cxgb_extension_ioctl()
2704 m->pm_total = p->pmtx_size + p->chan_rx_size * p->nchan; in cxgb_extension_ioctl()
2710 struct tp_params *p = &sc->params.tp; in cxgb_extension_ioctl()
2714 if (sc->flags & FULL_INIT_DONE) in cxgb_extension_ioctl()
2717 if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) || in cxgb_extension_ioctl()
2718 !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1))) in cxgb_extension_ioctl()
2720 if (!(m->rx_pg_sz & 0x14000)) in cxgb_extension_ioctl()
2722 if (!(m->tx_pg_sz & 0x1554000)) in cxgb_extension_ioctl()
2724 if (m->tx_num_pg == -1) in cxgb_extension_ioctl()
2725 m->tx_num_pg = p->tx_num_pgs; in cxgb_extension_ioctl()
2726 if (m->rx_num_pg == -1) in cxgb_extension_ioctl()
2727 m->rx_num_pg = p->rx_num_pgs; in cxgb_extension_ioctl()
2728 if (m->tx_num_pg % 24 || m->rx_num_pg % 24) in cxgb_extension_ioctl()
2730 if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size || in cxgb_extension_ioctl()
2731 m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size) in cxgb_extension_ioctl()
2734 p->rx_pg_size = m->rx_pg_sz; in cxgb_extension_ioctl()
2735 p->tx_pg_size = m->tx_pg_sz; in cxgb_extension_ioctl()
2736 p->rx_num_pgs = m->rx_num_pg; in cxgb_extension_ioctl()
2737 p->tx_num_pgs = m->tx_num_pg; in cxgb_extension_ioctl()
2748 if (m->nmtus != NMTUS) in cxgb_extension_ioctl()
2750 if (m->mtus[0] < 81) /* accommodate SACK */ in cxgb_extension_ioctl()
2757 if (m->mtus[i] < m->mtus[i - 1]) in cxgb_extension_ioctl()
2760 memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus)); in cxgb_extension_ioctl()
2769 memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus)); in cxgb_extension_ioctl()
2770 m->nmtus = NMTUS; in cxgb_extension_ioctl()
2783 uint32_t len = t->len, addr = t->addr; in cxgb_extension_ioctl()
2787 if (!(sc->flags & FULL_INIT_DONE)) in cxgb_extension_ioctl()
2791 if (t->mem_id == MEM_CM) in cxgb_extension_ioctl()
2792 mem = &sc->cm; in cxgb_extension_ioctl()
2793 else if (t->mem_id == MEM_PMRX) in cxgb_extension_ioctl()
2794 mem = &sc->pmrx; in cxgb_extension_ioctl()
2795 else if (t->mem_id == MEM_PMTX) in cxgb_extension_ioctl()
2796 mem = &sc->pmtx; in cxgb_extension_ioctl()
2805 t->version = 3 | (sc->params.rev << 10); in cxgb_extension_ioctl()
2811 useraddr = (uint8_t *)t->buf; in cxgb_extension_ioctl()
2817 return (-error); in cxgb_extension_ioctl()
2822 len -= chunk; in cxgb_extension_ioctl()
2831 if (!(sc->flags & FULL_INIT_DONE)) in cxgb_extension_ioctl()
2833 return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf); in cxgb_extension_ioctl()
2840 tp = (const struct trace_params *)&t->sip; in cxgb_extension_ioctl()
2841 if (t->config_tx) in cxgb_extension_ioctl()
2842 t3_config_trace_filter(sc, tp, 0, t->invert_match, in cxgb_extension_ioctl()
2843 t->trace_tx); in cxgb_extension_ioctl()
2844 if (t->config_rx) in cxgb_extension_ioctl()
2845 t3_config_trace_filter(sc, tp, 1, t->invert_match, in cxgb_extension_ioctl()
2846 t->trace_rx); in cxgb_extension_ioctl()
2851 if (sc->open_device_map == 0) in cxgb_extension_ioctl()
2853 send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max, in cxgb_extension_ioctl()
2854 p->binding); in cxgb_extension_ioctl()
2864 if (regs->len > reglen) in cxgb_extension_ioctl()
2865 regs->len = reglen; in cxgb_extension_ioctl()
2866 else if (regs->len < reglen) in cxgb_extension_ioctl()
2871 error = copyout(buf, regs->data, reglen); in cxgb_extension_ioctl()
2881 if ((sc->flags & FULL_INIT_DONE) == 0) in cxgb_extension_ioctl()
2883 if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) || in cxgb_extension_ioctl()
2884 !in_range(t->channel, 0, 1) || in cxgb_extension_ioctl()
2885 !in_range(t->kbps, 0, 10000000) || in cxgb_extension_ioctl()
2886 !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) || in cxgb_extension_ioctl()
2887 !in_range(t->flow_ipg, 0, in cxgb_extension_ioctl()
2891 if (t->kbps >= 0) { in cxgb_extension_ioctl()
2892 error = t3_config_sched(sc, t->kbps, t->sched); in cxgb_extension_ioctl()
2894 return (-error); in cxgb_extension_ioctl()
2896 if (t->class_ipg >= 0) in cxgb_extension_ioctl()
2897 t3_set_sched_ipg(sc, t->sched, t->class_ipg); in cxgb_extension_ioctl()
2898 if (t->flow_ipg >= 0) { in cxgb_extension_ioctl()
2899 t->flow_ipg *= 1000; /* us -> ns */ in cxgb_extension_ioctl()
2900 t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1); in cxgb_extension_ioctl()
2902 if (t->mode >= 0) { in cxgb_extension_ioctl()
2903 int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched); in cxgb_extension_ioctl()
2906 bit, t->mode ? bit : 0); in cxgb_extension_ioctl()
2908 if (t->channel >= 0) in cxgb_extension_ioctl()
2910 1 << t->sched, t->channel << t->sched); in cxgb_extension_ioctl()
2918 if (e->offset & 3 || e->offset >= EEPROMSIZE || in cxgb_extension_ioctl()
2919 e->len > EEPROMSIZE || e->offset + e->len > EEPROMSIZE) { in cxgb_extension_ioctl()
2927 e->magic = EEPROM_MAGIC; in cxgb_extension_ioctl()
2928 for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4) in cxgb_extension_ioctl()
2929 error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]); in cxgb_extension_ioctl()
2932 error = copyout(buf + e->offset, e->data, e->len); in cxgb_extension_ioctl()
2938 if (!(sc->flags & FULL_INIT_DONE)) in cxgb_extension_ioctl()
2942 t3_mac_update_stats(&pi->mac); in cxgb_extension_ioctl()
2943 memset(&pi->mac.stats, 0, sizeof(pi->mac.stats)); in cxgb_extension_ioctl()
2953 if (la->bufsize < LA_BUFSIZE) in cxgb_extension_ioctl()
2957 error = -t3_get_up_la(sc, &la->stopped, &la->idx, in cxgb_extension_ioctl()
2958 &la->bufsize, buf); in cxgb_extension_ioctl()
2960 error = copyout(buf, la->data, la->bufsize); in cxgb_extension_ioctl()
2973 if (ioqs->bufsize < IOQS_BUFSIZE) in cxgb_extension_ioctl()
2977 error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf); in cxgb_extension_ioctl()
2982 ioqs->ioq_rx_enable = *v++; in cxgb_extension_ioctl()
2983 ioqs->ioq_tx_enable = *v++; in cxgb_extension_ioctl()
2984 ioqs->ioq_rx_status = *v++; in cxgb_extension_ioctl()
2985 ioqs->ioq_tx_status = *v++; in cxgb_extension_ioctl()
2987 error = copyout(v, ioqs->data, ioqs->bufsize); in cxgb_extension_ioctl()
2996 unsigned int nfilters = sc->params.mc5.nfilters; in cxgb_extension_ioctl()
2999 return (EOPNOTSUPP); /* No TCAM */ in cxgb_extension_ioctl()
3000 if (!(sc->flags & FULL_INIT_DONE)) in cxgb_extension_ioctl()
3003 return (EBUSY); /* TOE will use TCAM */ in cxgb_extension_ioctl()
3006 if (f->filter_id >= nfilters || in cxgb_extension_ioctl()
3007 (f->val.dip && f->mask.dip != 0xffffffff) || in cxgb_extension_ioctl()
3008 (f->val.sport && f->mask.sport != 0xffff) || in cxgb_extension_ioctl()
3009 (f->val.dport && f->mask.dport != 0xffff) || in cxgb_extension_ioctl()
3010 (f->val.vlan && f->mask.vlan != 0xfff) || in cxgb_extension_ioctl()
3011 (f->val.vlan_prio && in cxgb_extension_ioctl()
3012 f->mask.vlan_prio != FILTER_NO_VLAN_PRI) || in cxgb_extension_ioctl()
3013 (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) || in cxgb_extension_ioctl()
3014 f->qset >= SGE_QSETS || in cxgb_extension_ioctl()
3015 sc->rrss_map[f->qset] >= RSS_TABLE_SIZE) in cxgb_extension_ioctl()
3019 KASSERT(sc->filters, ("filter table NULL\n")); in cxgb_extension_ioctl()
3021 p = &sc->filters[f->filter_id]; in cxgb_extension_ioctl()
3022 if (p->locked) in cxgb_extension_ioctl()
3026 p->sip = f->val.sip; in cxgb_extension_ioctl()
3027 p->sip_mask = f->mask.sip; in cxgb_extension_ioctl()
3028 p->dip = f->val.dip; in cxgb_extension_ioctl()
3029 p->sport = f->val.sport; in cxgb_extension_ioctl()
3030 p->dport = f->val.dport; in cxgb_extension_ioctl()
3031 p->vlan = f->mask.vlan ? f->val.vlan : 0xfff; in cxgb_extension_ioctl()
3032 p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) : in cxgb_extension_ioctl()
3034 p->mac_hit = f->mac_hit; in cxgb_extension_ioctl()
3035 p->mac_vld = f->mac_addr_idx != 0xffff; in cxgb_extension_ioctl()
3036 p->mac_idx = f->mac_addr_idx; in cxgb_extension_ioctl()
3037 p->pkt_type = f->proto; in cxgb_extension_ioctl()
3038 p->report_filter_id = f->want_filter_id; in cxgb_extension_ioctl()
3039 p->pass = f->pass; in cxgb_extension_ioctl()
3040 p->rss = f->rss; in cxgb_extension_ioctl()
3041 p->qset = f->qset; in cxgb_extension_ioctl()
3043 error = set_filter(sc, f->filter_id, p); in cxgb_extension_ioctl()
3045 p->valid = 1; in cxgb_extension_ioctl()
3051 unsigned int nfilters = sc->params.mc5.nfilters; in cxgb_extension_ioctl()
3055 if (!(sc->flags & FULL_INIT_DONE)) in cxgb_extension_ioctl()
3057 if (nfilters == 0 || sc->filters == NULL) in cxgb_extension_ioctl()
3059 if (f->filter_id >= nfilters) in cxgb_extension_ioctl()
3062 p = &sc->filters[f->filter_id]; in cxgb_extension_ioctl()
3063 if (p->locked) in cxgb_extension_ioctl()
3065 if (!p->valid) in cxgb_extension_ioctl()
3069 p->sip = p->sip_mask = 0xffffffff; in cxgb_extension_ioctl()
3070 p->vlan = 0xfff; in cxgb_extension_ioctl()
3071 p->vlan_prio = FILTER_NO_VLAN_PRI; in cxgb_extension_ioctl()
3072 p->pkt_type = 1; in cxgb_extension_ioctl()
3073 error = set_filter(sc, f->filter_id, p); in cxgb_extension_ioctl()
3079 unsigned int i, nfilters = sc->params.mc5.nfilters; in cxgb_extension_ioctl()
3083 if (!(sc->flags & FULL_INIT_DONE)) in cxgb_extension_ioctl()
3085 if (nfilters == 0 || sc->filters == NULL) in cxgb_extension_ioctl()
3088 i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1; in cxgb_extension_ioctl()
3090 p = &sc->filters[i]; in cxgb_extension_ioctl()
3091 if (!p->valid) in cxgb_extension_ioctl()
3096 f->filter_id = i; in cxgb_extension_ioctl()
3097 f->val.sip = p->sip; in cxgb_extension_ioctl()
3098 f->mask.sip = p->sip_mask; in cxgb_extension_ioctl()
3099 f->val.dip = p->dip; in cxgb_extension_ioctl()
3100 f->mask.dip = p->dip ? 0xffffffff : 0; in cxgb_extension_ioctl()
3101 f->val.sport = p->sport; in cxgb_extension_ioctl()
3102 f->mask.sport = p->sport ? 0xffff : 0; in cxgb_extension_ioctl()
3103 f->val.dport = p->dport; in cxgb_extension_ioctl()
3104 f->mask.dport = p->dport ? 0xffff : 0; in cxgb_extension_ioctl()
3105 f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan; in cxgb_extension_ioctl()
3106 f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff; in cxgb_extension_ioctl()
3107 f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ? in cxgb_extension_ioctl()
3108 0 : p->vlan_prio; in cxgb_extension_ioctl()
3109 f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ? in cxgb_extension_ioctl()
3111 f->mac_hit = p->mac_hit; in cxgb_extension_ioctl()
3112 f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff; in cxgb_extension_ioctl()
3113 f->proto = p->pkt_type; in cxgb_extension_ioctl()
3114 f->want_filter_id = p->report_filter_id; in cxgb_extension_ioctl()
3115 f->pass = p->pass; in cxgb_extension_ioctl()
3116 f->rss = p->rss; in cxgb_extension_ioctl()
3117 f->qset = p->qset; in cxgb_extension_ioctl()
3123 f->filter_id = 0xffffffff; in cxgb_extension_ioctl()
3161 regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31); in cxgb_get_regs()
3164 * We skip the MAC statistics registers because they are clear-on-read. in cxgb_get_regs()
3165 * Also reading multi-register stats would need to synchronize with the in cxgb_get_regs()
3184 unsigned int nfilters = sc->params.mc5.nfilters; in alloc_filters()
3190 sc->filters = p; in alloc_filters()
3192 p = &sc->filters[nfilters - 1]; in alloc_filters()
3193 p->vlan = 0xfff; in alloc_filters()
3194 p->vlan_prio = FILTER_NO_VLAN_PRI; in alloc_filters()
3195 p->pass = p->rss = p->valid = p->locked = 1; in alloc_filters()
3204 unsigned int nfilters = sc->params.mc5.nfilters; in setup_hw_filters()
3206 if (!sc->filters) in setup_hw_filters()
3212 if (sc->filters[i].locked) in setup_hw_filters()
3213 rc = set_filter(sc, i, &sc->filters[i]); in setup_hw_filters()
3232 id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes - in set_filter()
3233 sc->params.mc5.nfilters; in set_filter()
3236 m->m_len = m->m_pkthdr.len = len; in set_filter()
3240 wr->wrh_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC); in set_filter()
3244 txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); in set_filter()
3245 txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8)); in set_filter()
3247 oreq->local_port = htons(f->dport); in set_filter()
3248 oreq->peer_port = htons(f->sport); in set_filter()
3249 oreq->local_ip = htonl(f->dip); in set_filter()
3250 oreq->peer_ip = htonl(f->sip); in set_filter()
3251 oreq->peer_netmask = htonl(f->sip_mask); in set_filter()
3252 oreq->opt0h = 0; in set_filter()
3253 oreq->opt0l = htonl(F_NO_OFFLOAD); in set_filter()
3254 oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) | in set_filter()
3256 V_VLAN_PRI(f->vlan_prio >> 1) | in set_filter()
3257 V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) | in set_filter()
3258 V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) | in set_filter()
3259 V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4))); in set_filter()
3263 (f->report_filter_id << 15) | (1 << 23) | in set_filter()
3264 ((u64)f->pass << 35) | ((u64)!f->rss << 36)); in set_filter()
3268 if (f->pass && !f->rss) { in set_filter()
3271 m->m_len = m->m_pkthdr.len = len; in set_filter()
3274 sreq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); in set_filter()
3276 (u64)sc->rrss_map[f->qset] << 19); in set_filter()
3287 req->reply = V_NO_REPLY(1); in mk_set_tcb_field()
3288 req->cpu_idx = 0; in mk_set_tcb_field()
3289 req->word = htons(word); in mk_set_tcb_field()
3290 req->mask = htobe64(mask); in mk_set_tcb_field()
3291 req->val = htobe64(val); in mk_set_tcb_field()
3300 txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); in set_tcb_field_ulp()
3301 txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8)); in set_tcb_field_ulp()
3314 * in - the only guarantee is that sc->sc_lock is a valid lock. in t3_iterate()
3326 struct adapter *sc = pi->adapter; in toe_capability()
3334 if (!(sc->flags & FULL_INIT_DONE)) { in toe_capability()
3340 if (isset(&sc->offload_map, pi->port_id)) in toe_capability()
3343 if (!(sc->flags & TOM_INIT_DONE)) { in toe_capability()
3352 KASSERT(sc->tom_softc != NULL, in toe_capability()
3354 KASSERT(sc->flags & TOM_INIT_DONE, in toe_capability()
3358 setbit(&sc->offload_map, pi->port_id); in toe_capability()
3365 if (!isset(&sc->offload_map, MAX_NPORTS) && in toe_capability()
3367 setbit(&sc->offload_map, MAX_NPORTS); in toe_capability()
3369 if (!isset(&sc->offload_map, pi->port_id)) in toe_capability()
3372 KASSERT(sc->flags & TOM_INIT_DONE, in toe_capability()
3374 clrbit(&sc->offload_map, pi->port_id); in toe_capability()
3391 if (u->uld_id == ui->uld_id) { in t3_register_uld()
3398 ui->refcount = 0; in t3_register_uld()
3414 if (ui->refcount > 0) { in t3_unregister_uld()
3438 if (ui->uld_id == id) { in t3_activate_uld()
3439 rc = ui->activate(sc); in t3_activate_uld()
3441 ui->refcount++; in t3_activate_uld()
3460 if (ui->uld_id == id) { in t3_deactivate_uld()
3461 rc = ui->deactivate(sc); in t3_deactivate_uld()
3463 ui->refcount--; in t3_deactivate_uld()
3490 loc = (uintptr_t *) &sc->cpl_handler[opcode]; in t3_register_cpl_handler()
3545 adap = pi->adapter; in cxgb_debugnet_init()
3547 *nrxr = adap->nqsets; in cxgb_debugnet_init()
3548 *ncl = adap->sge.qs[0].fl[1].size; in cxgb_debugnet_init()
3549 *clsize = adap->sge.qs[0].fl[1].buf_size; in cxgb_debugnet_init()
3562 for (i = 0; i < pi->adapter->nqsets; i++) { in cxgb_debugnet_event()
3563 qs = &pi->adapter->sge.qs[i]; in cxgb_debugnet_event()
3566 qs->fl[0].zone = zone_pack; in cxgb_debugnet_event()
3567 qs->fl[1].zone = zone_clust; in cxgb_debugnet_event()
3568 qs->lro.enabled = 0; in cxgb_debugnet_event()
3583 qs = &pi->adapter->sge.qs[pi->first_qset]; in cxgb_debugnet_transmit()
3598 adap = pi->adapter; in cxgb_debugnet_poll()
3599 for (i = 0; i < adap->nqsets; i++) in cxgb_debugnet_poll()
3600 (void)cxgb_debugnet_poll_rx(adap, &adap->sge.qs[i]); in cxgb_debugnet_poll()
3601 (void)cxgb_debugnet_poll_tx(&adap->sge.qs[pi->first_qset]); in cxgb_debugnet_poll()