Lines Matching +full:early +full:- +full:to +full:- +full:mid
2 SPDX-License-Identifier: BSD-2-Clause
4 Copyright (c) 2007-2009, Chelsio Inc.
14 contributors may be used to endorse or promote products derived from
18 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
153 {PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"},
154 {PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"},
176 nitems(cxgb_identifiers) - 1);
181 * Attachment glue for the ports. Attachment is done directly to the
228 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
240 "MSI-X, MSI, INTx selector");
243 * The driver uses an auto-queue algorithm by default.
244 * To disable it and force a single queue-set per port, use multiq = 0
248 "use min(ncpus/ports, 8) queue-sets per port");
257 "update firmware even if up to date");
259 int cxgb_use_16k_clusters = -1;
263 static int nfilters = -1;
304 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
315 switch(adapter->params.rev) { in t3rev2char()
335 for (id = cxgb_identifiers; id->desc != NULL; id++) { in cxgb_get_ident()
336 if ((id->vendor == pci_get_vendor(dev)) && in cxgb_get_ident()
337 (id->device == pci_get_device(dev))) { in cxgb_get_ident()
354 ai = t3_get_adapter_info(id->index); in cxgb_get_adapter_info()
370 nports = ai->nports0 + ai->nports1; in cxgb_controller_probe()
376 device_set_descf(dev, "%s, %d %s", ai->desc, nports, ports); in cxgb_controller_probe()
392 device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME); in upgrade_fw()
395 device_printf(sc->dev, "installing firmware on card\n"); in upgrade_fw()
396 status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize); in upgrade_fw()
399 device_printf(sc->dev, "failed to install firmware: %d\n", in upgrade_fw()
403 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d", in upgrade_fw()
417 * 1. Determine if the device supports MSI or MSI-X.
422 * 5. Allocate the BAR for doing MSI-X.
423 * 6. Setup the line interrupt iff MSI-X is not supported.
426 * 9. Check if the firmware and SRAM are up-to-date. They will be
427 * auto-updated later (before FULL_INIT_DONE), if required.
436 * the above comment. Failure to do so will result in problems on various
451 sc->dev = dev; in cxgb_controller_attach()
452 sc->msi_count = 0; in cxgb_controller_attach()
455 snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d", in cxgb_controller_attach()
457 ADAPTER_LOCK_INIT(sc, sc->lockbuf); in cxgb_controller_attach()
459 snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d", in cxgb_controller_attach()
461 snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d", in cxgb_controller_attach()
463 snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d", in cxgb_controller_attach()
466 MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN); in cxgb_controller_attach()
467 MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF); in cxgb_controller_attach()
468 MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF); in cxgb_controller_attach()
474 /* find the PCIe link width and set max read request to 4KB*/ in cxgb_controller_attach()
479 sc->link_width = (lnk & PCIEM_LINK_STA_WIDTH) >> 4; in cxgb_controller_attach()
480 if (sc->link_width < 8 && in cxgb_controller_attach()
481 (ai->caps & SUPPORTED_10000baseT_Full)) { in cxgb_controller_attach()
482 device_printf(sc->dev, in cxgb_controller_attach()
484 sc->link_width); in cxgb_controller_attach()
493 * Allocate the registers and make them available to the driver. in cxgb_controller_attach()
496 sc->regs_rid = PCIR_BAR(0); in cxgb_controller_attach()
497 if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, in cxgb_controller_attach()
498 &sc->regs_rid, RF_ACTIVE)) == NULL) { in cxgb_controller_attach()
504 sc->bt = rman_get_bustag(sc->regs_res); in cxgb_controller_attach()
505 sc->bh = rman_get_bushandle(sc->regs_res); in cxgb_controller_attach()
506 sc->mmio_len = rman_get_size(sc->regs_res); in cxgb_controller_attach()
509 sc->port[i].adapter = sc; in cxgb_controller_attach()
517 sc->udbs_rid = PCIR_BAR(2); in cxgb_controller_attach()
518 sc->udbs_res = NULL; in cxgb_controller_attach()
520 ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, in cxgb_controller_attach()
521 &sc->udbs_rid, RF_ACTIVE)) == NULL)) { in cxgb_controller_attach()
527 /* Allocate the BAR for doing MSI-X. If it succeeds, try to allocate in cxgb_controller_attach()
529 * back to MSI. If that fails, then try falling back to the legacy in cxgb_controller_attach()
532 sc->msix_regs_rid = 0x20; in cxgb_controller_attach()
534 (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, in cxgb_controller_attach()
535 &sc->msix_regs_rid, RF_ACTIVE)) != NULL) { in cxgb_controller_attach()
538 port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus); in cxgb_controller_attach()
539 msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1; in cxgb_controller_attach()
542 (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 || in cxgb_controller_attach()
543 sc->msi_count != msi_needed) { in cxgb_controller_attach()
544 device_printf(dev, "alloc msix failed - " in cxgb_controller_attach()
546 "will try MSI\n", sc->msi_count, in cxgb_controller_attach()
548 sc->msi_count = 0; in cxgb_controller_attach()
552 sc->msix_regs_rid, sc->msix_regs_res); in cxgb_controller_attach()
553 sc->msix_regs_res = NULL; in cxgb_controller_attach()
555 sc->flags |= USING_MSIX; in cxgb_controller_attach()
556 sc->cxgb_intr = cxgb_async_intr; in cxgb_controller_attach()
558 "using MSI-X interrupts (%u vectors)\n", in cxgb_controller_attach()
559 sc->msi_count); in cxgb_controller_attach()
563 if ((msi_allowed >= 1) && (sc->msi_count == 0)) { in cxgb_controller_attach()
564 sc->msi_count = 1; in cxgb_controller_attach()
565 if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) { in cxgb_controller_attach()
566 device_printf(dev, "alloc msi failed - " in cxgb_controller_attach()
568 sc->msi_count = 0; in cxgb_controller_attach()
572 sc->flags |= USING_MSI; in cxgb_controller_attach()
573 sc->cxgb_intr = t3_intr_msi; in cxgb_controller_attach()
577 if (sc->msi_count == 0) { in cxgb_controller_attach()
579 sc->cxgb_intr = t3b_intr; in cxgb_controller_attach()
583 sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT, in cxgb_controller_attach()
584 taskqueue_thread_enqueue, &sc->tq); in cxgb_controller_attach()
585 if (sc->tq == NULL) { in cxgb_controller_attach()
586 device_printf(dev, "failed to allocate controller task queue\n"); in cxgb_controller_attach()
590 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq", in cxgb_controller_attach()
592 TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc); in cxgb_controller_attach()
596 callout_init(&sc->cxgb_tick_ch, 1); in cxgb_controller_attach()
602 device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n", in cxgb_controller_attach()
604 sc->flags &= ~FW_UPTODATE; in cxgb_controller_attach()
606 sc->flags |= FW_UPTODATE; in cxgb_controller_attach()
613 device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n", in cxgb_controller_attach()
615 sc->flags &= ~TPS_UPTODATE; in cxgb_controller_attach()
617 sc->flags |= TPS_UPTODATE; in cxgb_controller_attach()
624 for (i = 0; i < (sc)->params.nports; i++) { in cxgb_controller_attach()
627 if ((child = device_add_child(dev, "cxgb", -1)) == NULL) { in cxgb_controller_attach()
628 device_printf(dev, "failed to add child port\n"); in cxgb_controller_attach()
632 pi = &sc->port[i]; in cxgb_controller_attach()
633 pi->adapter = sc; in cxgb_controller_attach()
634 pi->nqsets = port_qsets; in cxgb_controller_attach()
635 pi->first_qset = i*port_qsets; in cxgb_controller_attach()
636 pi->port_id = i; in cxgb_controller_attach()
637 pi->tx_chan = i >= ai->nports0; in cxgb_controller_attach()
638 pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i; in cxgb_controller_attach()
639 sc->rxpkt_map[pi->txpkt_intf] = i; in cxgb_controller_attach()
640 sc->port[i].tx_chan = i >= ai->nports0; in cxgb_controller_attach()
641 sc->portdev[i] = child; in cxgb_controller_attach()
655 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d", in cxgb_controller_attach()
660 ai->desc, is_offload(sc) ? "R" : "", in cxgb_controller_attach()
661 sc->params.vpd.ec, sc->params.vpd.sn); in cxgb_controller_attach()
663 snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x", in cxgb_controller_attach()
664 sc->params.vpd.port_type[0], sc->params.vpd.port_type[1], in cxgb_controller_attach()
665 sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]); in cxgb_controller_attach()
667 device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]); in cxgb_controller_attach()
668 callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc); in cxgb_controller_attach()
673 sc->cpl_handler[i] = cpl_not_handled; in cxgb_controller_attach()
704 * to tear down the structures that were built up in
726 sc->flags |= CXGB_SHUTDOWN; in cxgb_free()
732 bus_detach_children(sc->dev); in cxgb_free()
733 for (i = 0; i < (sc)->params.nports; i++) { in cxgb_free()
734 if (sc->portdev[i] && in cxgb_free()
735 device_delete_child(sc->dev, sc->portdev[i]) != 0) in cxgb_free()
736 device_printf(sc->dev, "failed to delete child port\n"); in cxgb_free()
737 nqsets += sc->port[i].nqsets; in cxgb_free()
745 KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)", in cxgb_free()
746 __func__, sc->open_device_map)); in cxgb_free()
747 for (i = 0; i < sc->params.nports; i++) { in cxgb_free()
748 KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!", in cxgb_free()
755 callout_drain(&sc->cxgb_tick_ch); in cxgb_free()
756 callout_drain(&sc->sge_timer_ch); in cxgb_free()
762 if (sc->flags & FULL_INIT_DONE) { in cxgb_free()
764 sc->flags &= ~FULL_INIT_DONE; in cxgb_free()
771 if (sc->flags & (USING_MSI | USING_MSIX)) { in cxgb_free()
772 device_printf(sc->dev, "releasing msi message(s)\n"); in cxgb_free()
773 pci_release_msi(sc->dev); in cxgb_free()
775 device_printf(sc->dev, "no msi message to release\n"); in cxgb_free()
778 if (sc->msix_regs_res != NULL) { in cxgb_free()
779 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid, in cxgb_free()
780 sc->msix_regs_res); in cxgb_free()
786 if (sc->tq != NULL) { in cxgb_free()
787 taskqueue_free(sc->tq); in cxgb_free()
788 sc->tq = NULL; in cxgb_free()
791 free(sc->filters, M_DEVBUF); in cxgb_free()
794 if (sc->udbs_res != NULL) in cxgb_free()
795 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid, in cxgb_free()
796 sc->udbs_res); in cxgb_free()
798 if (sc->regs_res != NULL) in cxgb_free()
799 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid, in cxgb_free()
800 sc->regs_res); in cxgb_free()
802 MTX_DESTROY(&sc->mdio_lock); in cxgb_free()
803 MTX_DESTROY(&sc->sge.reg_lock); in cxgb_free()
804 MTX_DESTROY(&sc->elmer_lock); in cxgb_free()
812 * setup_sge_qsets - configure SGE Tx/Rx/response queues
815 * Determines how many sets of SGE queues to use and initializes them.
816 * We support multiple queue sets per port if we have MSI-X, otherwise
826 device_printf(sc->dev, "t3_sge_alloc returned %d\n", err); in setup_sge_qsets()
830 if (sc->params.rev > 0 && !(sc->flags & USING_MSI)) in setup_sge_qsets()
831 irq_idx = -1; in setup_sge_qsets()
833 for (i = 0; i < (sc)->params.nports; i++) { in setup_sge_qsets()
834 struct port_info *pi = &sc->port[i]; in setup_sge_qsets()
836 for (j = 0; j < pi->nqsets; j++, qset_idx++) { in setup_sge_qsets()
837 err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports, in setup_sge_qsets()
838 (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx, in setup_sge_qsets()
839 &sc->params.sge.qset[qset_idx], ntxq, pi); in setup_sge_qsets()
842 device_printf(sc->dev, in setup_sge_qsets()
849 sc->nqsets = qset_idx; in setup_sge_qsets()
860 if (sc->msix_intr_tag[i] == NULL) { in cxgb_teardown_interrupts()
863 KASSERT(sc->msix_irq_res[i] == NULL && in cxgb_teardown_interrupts()
864 sc->msix_irq_rid[i] == 0, in cxgb_teardown_interrupts()
865 ("%s: half-done interrupt (%d).", __func__, i)); in cxgb_teardown_interrupts()
870 bus_teardown_intr(sc->dev, sc->msix_irq_res[i], in cxgb_teardown_interrupts()
871 sc->msix_intr_tag[i]); in cxgb_teardown_interrupts()
872 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i], in cxgb_teardown_interrupts()
873 sc->msix_irq_res[i]); in cxgb_teardown_interrupts()
875 sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL; in cxgb_teardown_interrupts()
876 sc->msix_irq_rid[i] = 0; in cxgb_teardown_interrupts()
879 if (sc->intr_tag) { in cxgb_teardown_interrupts()
880 KASSERT(sc->irq_res != NULL, in cxgb_teardown_interrupts()
881 ("%s: half-done interrupt.", __func__)); in cxgb_teardown_interrupts()
883 bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag); in cxgb_teardown_interrupts()
884 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, in cxgb_teardown_interrupts()
885 sc->irq_res); in cxgb_teardown_interrupts()
887 sc->irq_res = sc->intr_tag = NULL; in cxgb_teardown_interrupts()
888 sc->irq_rid = 0; in cxgb_teardown_interrupts()
897 int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX); in cxgb_setup_interrupts()
899 sc->irq_rid = intr_flag ? 1 : 0; in cxgb_setup_interrupts()
900 sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid, in cxgb_setup_interrupts()
902 if (sc->irq_res == NULL) { in cxgb_setup_interrupts()
903 device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n", in cxgb_setup_interrupts()
904 intr_flag, sc->irq_rid); in cxgb_setup_interrupts()
906 sc->irq_rid = 0; in cxgb_setup_interrupts()
908 err = bus_setup_intr(sc->dev, sc->irq_res, in cxgb_setup_interrupts()
910 sc->cxgb_intr, sc, &sc->intr_tag); in cxgb_setup_interrupts()
913 device_printf(sc->dev, in cxgb_setup_interrupts()
915 intr_flag, sc->irq_rid, err); in cxgb_setup_interrupts()
916 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, in cxgb_setup_interrupts()
917 sc->irq_res); in cxgb_setup_interrupts()
918 sc->irq_res = sc->intr_tag = NULL; in cxgb_setup_interrupts()
919 sc->irq_rid = 0; in cxgb_setup_interrupts()
927 bus_describe_intr(sc->dev, sc->irq_res, sc->intr_tag, "err"); in cxgb_setup_interrupts()
928 for (i = 0; i < sc->msi_count - 1; i++) { in cxgb_setup_interrupts()
930 res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid, in cxgb_setup_interrupts()
933 device_printf(sc->dev, "Cannot allocate interrupt " in cxgb_setup_interrupts()
939 err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET, in cxgb_setup_interrupts()
940 NULL, t3_intr_msix, &sc->sge.qs[i], &tag); in cxgb_setup_interrupts()
942 device_printf(sc->dev, "Cannot set up interrupt " in cxgb_setup_interrupts()
944 bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res); in cxgb_setup_interrupts()
948 sc->msix_irq_rid[i] = rid; in cxgb_setup_interrupts()
949 sc->msix_irq_res[i] = res; in cxgb_setup_interrupts()
950 sc->msix_intr_tag[i] = tag; in cxgb_setup_interrupts()
951 bus_describe_intr(sc->dev, res, tag, "qs%d", i); in cxgb_setup_interrupts()
968 desc = p->phy.desc; in cxgb_port_probe()
969 device_set_descf(dev, "Port %d %s", p->port_id, desc); in cxgb_port_probe()
978 pi->port_cdev = make_dev(&cxgb_cdevsw, if_getdunit(pi->ifp), in cxgb_makedev()
979 UID_ROOT, GID_WHEEL, 0600, "%s", if_name(pi->ifp)); in cxgb_makedev()
981 if (pi->port_cdev == NULL) in cxgb_makedev()
984 pi->port_cdev->si_drv1 = (void *)pi; in cxgb_makedev()
1003 sc = p->adapter; in cxgb_port_attach()
1004 snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d", in cxgb_port_attach()
1005 device_get_unit(device_get_parent(dev)), p->port_id); in cxgb_port_attach()
1006 PORT_LOCK_INIT(p, p->lockbuf); in cxgb_port_attach()
1008 callout_init(&p->link_check_ch, 1); in cxgb_port_attach()
1009 TASK_INIT(&p->link_check_task, 0, check_link_status, p); in cxgb_port_attach()
1012 ifp = p->ifp = if_alloc(IFT_ETHER); in cxgb_port_attach()
1035 * Disable TSO on 4-port - it isn't supported by the firmware. in cxgb_port_attach()
1037 if (sc->params.nports > 2) { in cxgb_port_attach()
1044 ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change, in cxgb_port_attach()
1048 ether_ifattach(ifp, p->hw_addr); in cxgb_port_attach()
1054 if (sc->params.nports <= 2) in cxgb_port_attach()
1082 sc = p->adapter; in cxgb_port_detach()
1087 wakeup(&sc->flags); in cxgb_port_detach()
1089 mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0); in cxgb_port_detach()
1093 if (p->port_cdev != NULL) in cxgb_port_detach()
1094 destroy_dev(p->port_cdev); in cxgb_port_detach()
1097 ether_ifdetach(p->ifp); in cxgb_port_detach()
1099 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { in cxgb_port_detach()
1100 struct sge_qset *qs = &sc->sge.qs[i]; in cxgb_port_detach()
1101 struct sge_txq *txq = &qs->txq[TXQ_ETH]; in cxgb_port_detach()
1103 callout_drain(&txq->txq_watchdog); in cxgb_port_detach()
1104 callout_drain(&txq->txq_timer); in cxgb_port_detach()
1108 if_free(p->ifp); in cxgb_port_detach()
1109 p->ifp = NULL; in cxgb_port_detach()
1113 wakeup_one(&sc->flags); in cxgb_port_detach()
1123 if (sc->flags & FULL_INIT_DONE) { in t3_fatal_err()
1131 device_printf(sc->dev,"encountered fatal error, operation suspended\n"); in t3_fatal_err()
1133 device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n", in t3_fatal_err()
1146 dev = sc->dev; in t3_os_find_pci_capability()
1148 cfg = &dinfo->cfg; in t3_os_find_pci_capability()
1154 switch (cfg->hdrtype & PCIM_HDRTYPE) { in t3_os_find_pci_capability()
1183 dev = sc->dev; in t3_os_pci_save_state()
1196 dev = sc->dev; in t3_os_pci_restore_state()
1204 * t3_os_link_changed - handle link status changes
1210 * @fc: the new flow-control setting
1212 * This is the OS-dependent handler for link status changes. The OS
1214 * then calls this handler for any OS-specific processing.
1220 struct port_info *pi = &adapter->port[port_id]; in t3_os_link_changed()
1221 if_t ifp = pi->ifp; in t3_os_link_changed()
1226 /* Reapply mac settings if they were lost due to a reset */ in t3_os_link_changed()
1241 * t3_os_phymod_changed - handle PHY module changes
1245 * This is the OS-dependent handler for PHY module changes. It is
1246 * invoked when a PHY module is removed or inserted for any OS-specific
1252 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX-L", "unknown" in t3_os_phymod_changed()
1254 struct port_info *pi = &adap->port[port_id]; in t3_os_phymod_changed()
1255 int mod = pi->phy.modtype; in t3_os_phymod_changed()
1257 if (mod != pi->media.ifm_cur->ifm_data) in t3_os_phymod_changed()
1261 if_printf(pi->ifp, "PHY module unplugged\n"); in t3_os_phymod_changed()
1265 if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]); in t3_os_phymod_changed()
1275 * as this is called early on in attach by t3_prep_adapter in t3_os_set_hw_addr()
1280 bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN); in t3_os_set_hw_addr()
1290 if_t ifp = p->ifp; in cxgb_update_mac_settings()
1292 struct cmac *mac = &p->mac; in cxgb_update_mac_settings()
1297 bcopy(if_getlladdr(ifp), p->hw_addr, ETHER_ADDR_LEN); in cxgb_update_mac_settings()
1306 t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging); in cxgb_update_mac_settings()
1307 t3_mac_set_address(mac, 0, p->hw_addr); in cxgb_update_mac_settings()
1319 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { in await_mgmt_replies()
1320 if (!--attempts) in await_mgmt_replies()
1333 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; in init_tp_parity()
1342 m->m_len = m->m_pkthdr.len = sizeof(*req); in init_tp_parity()
1344 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); in init_tp_parity()
1346 req->iff = i; in init_tp_parity()
1355 m->m_len = m->m_pkthdr.len = sizeof(*req); in init_tp_parity()
1357 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); in init_tp_parity()
1359 req->params = htonl(V_L2T_W_IDX(i)); in init_tp_parity()
1368 m->m_len = m->m_pkthdr.len = sizeof(*req); in init_tp_parity()
1370 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); in init_tp_parity()
1372 req->l2t_idx = htonl(V_L2T_W_IDX(i)); in init_tp_parity()
1378 m->m_len = m->m_pkthdr.len = sizeof(*greq); in init_tp_parity()
1380 greq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); in init_tp_parity()
1382 greq->mask = htobe64(1); in init_tp_parity()
1391 * setup_rss - configure Receive Side Steering (per-queue connection demux)
1394 * Sets up RSS to distribute packets to multiple receive queues. We
1395 * configure the RSS CPU lookup table to distribute to the number of HW
1396 * receive queues, and the response queue lookup table to narrow that
1397 * down to the response queues actually configured for each port.
1417 nq[pi->tx_chan] += pi->nqsets; in setup_rss()
1426 adap->rrss_map[i] = 0xff; in setup_rss()
1428 if (adap->rrss_map[rspq_map[i]] == 0xff) in setup_rss()
1429 adap->rrss_map[rspq_map[i]] = i; in setup_rss()
1447 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT)); in send_pktsched_cmd()
1448 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET; in send_pktsched_cmd()
1449 req->sched = sched; in send_pktsched_cmd()
1450 req->idx = qidx; in send_pktsched_cmd()
1451 req->min = lo; in send_pktsched_cmd()
1452 req->max = hi; in send_pktsched_cmd()
1453 req->binding = port; in send_pktsched_cmd()
1454 m->m_len = m->m_pkthdr.len = sizeof(*req); in send_pktsched_cmd()
1464 for (i = 0; i < (sc)->params.nports; ++i) { in bind_qsets()
1467 for (j = 0; j < pi->nqsets; ++j) { in bind_qsets()
1468 send_pktsched_cmd(sc, 1, pi->first_qset + j, -1, in bind_qsets()
1469 -1, pi->tx_chan); in bind_qsets()
1497 device_printf(adap->dev, in update_tpeeprom()
1498 "could not load TP EEPROM: unable to load %s\n", in update_tpeeprom()
1503 len = tpeeprom->datasize - 4; in update_tpeeprom()
1505 ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize); in update_tpeeprom()
1510 device_printf(adap->dev, in update_tpeeprom()
1516 ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize, in update_tpeeprom()
1520 device_printf(adap->dev, in update_tpeeprom()
1521 "Protocol SRAM image updated in EEPROM to %d.%d.%d\n", in update_tpeeprom()
1524 device_printf(adap->dev, in update_tpeeprom()
1547 device_printf(adap->dev, "could not load TP SRAM\n"); in update_tpsram()
1550 device_printf(adap->dev, "updating TP SRAM\n"); in update_tpsram()
1552 ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize); in update_tpsram()
1556 ret = t3_set_proto_sram(adap, tpsram->data); in update_tpsram()
1558 device_printf(adap->dev, "loading protocol SRAM failed\n"); in update_tpsram()
1567 * cxgb_up - enable the adapter
1571 * actions necessary to make an adapter operational, such as completing
1578 unsigned int mxf = t3_mc5_size(&sc->mc5) - MC5_MIN_TIDS; in cxgb_up()
1580 KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)", in cxgb_up()
1581 __func__, sc->open_device_map)); in cxgb_up()
1583 if ((sc->flags & FULL_INIT_DONE) == 0) { in cxgb_up()
1587 if ((sc->flags & FW_UPTODATE) == 0) in cxgb_up()
1591 if ((sc->flags & TPS_UPTODATE) == 0) in cxgb_up()
1596 sc->params.mc5.nservers = 0; in cxgb_up()
1599 sc->params.mc5.nfilters = mxf; in cxgb_up()
1601 sc->params.mc5.nfilters = min(nfilters, mxf); in cxgb_up()
1609 t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); in cxgb_up()
1619 sc->flags |= FULL_INIT_DONE; in cxgb_up()
1626 if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) && in cxgb_up()
1628 sc->flags |= TP_PARITY_INIT; in cxgb_up()
1630 if (sc->flags & TP_PARITY_INIT) { in cxgb_up()
1635 if (!(sc->flags & QUEUES_BOUND)) { in cxgb_up()
1638 sc->flags |= QUEUES_BOUND; in cxgb_up()
1665 struct adapter *sc = p->adapter; in cxgb_init()
1675 struct adapter *sc = p->adapter; in cxgb_init_locked()
1676 if_t ifp = p->ifp; in cxgb_init_locked()
1677 struct cmac *mac = &p->mac; in cxgb_init_locked()
1684 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) { in cxgb_init_locked()
1696 * The code that runs during one-time adapter initialization can sleep in cxgb_init_locked()
1697 * so it's important not to hold any locks across it. in cxgb_init_locked()
1699 may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1; in cxgb_init_locked()
1707 if (sc->open_device_map == 0 && ((rc = cxgb_up(sc)) != 0)) in cxgb_init_locked()
1711 if (isset(&sc->open_device_map, p->port_id) && in cxgb_init_locked()
1716 t3_port_intr_enable(sc, p->port_id); in cxgb_init_locked()
1717 if (!mac->multiport) in cxgb_init_locked()
1720 t3_link_start(&p->phy, mac, &p->link_config); in cxgb_init_locked()
1725 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { in cxgb_init_locked()
1726 struct sge_qset *qs = &sc->sge.qs[i]; in cxgb_init_locked()
1727 struct sge_txq *txq = &qs->txq[TXQ_ETH]; in cxgb_init_locked()
1729 callout_reset_on(&txq->txq_watchdog, hz, cxgb_tx_watchdog, qs, in cxgb_init_locked()
1730 txq->txq_watchdog.c_cpu); in cxgb_init_locked()
1734 setbit(&sc->open_device_map, p->port_id); in cxgb_init_locked()
1735 callout_reset(&p->link_check_ch, in cxgb_init_locked()
1736 p->phy.caps & SUPPORTED_LINK_IRQ ? hz * 3 : hz / 4, in cxgb_init_locked()
1746 wakeup_one(&sc->flags); in cxgb_init_locked()
1754 struct adapter *sc = p->adapter; in cxgb_uninit_locked()
1760 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) { in cxgb_uninit_locked()
1778 wakeup_one(&sc->flags); in cxgb_uninit_locked()
1790 struct adapter *sc = pi->adapter; in cxgb_uninit_synchronized()
1791 if_t ifp = pi->ifp; in cxgb_uninit_synchronized()
1802 * interrupt tasks won't be enqueued. The tick task will continue to in cxgb_uninit_synchronized()
1809 clrbit(&sc->open_device_map, pi->port_id); in cxgb_uninit_synchronized()
1810 t3_port_intr_disable(sc, pi->port_id); in cxgb_uninit_synchronized()
1811 taskqueue_drain(sc->tq, &sc->slow_intr_task); in cxgb_uninit_synchronized()
1812 taskqueue_drain(sc->tq, &sc->tick_task); in cxgb_uninit_synchronized()
1814 callout_drain(&pi->link_check_ch); in cxgb_uninit_synchronized()
1815 taskqueue_drain(sc->tq, &pi->link_check_task); in cxgb_uninit_synchronized()
1821 t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0); in cxgb_uninit_synchronized()
1824 t3_set_reg_field(sc, A_XGM_RXFIFO_CFG + pi->mac.offset, in cxgb_uninit_synchronized()
1830 t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset, in cxgb_uninit_synchronized()
1834 t3_mac_disable(&pi->mac, MAC_DIRECTION_RX); in cxgb_uninit_synchronized()
1836 pi->phy.ops->power_down(&pi->phy, 1); in cxgb_uninit_synchronized()
1840 pi->link_config.link_ok = 0; in cxgb_uninit_synchronized()
1841 t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0); in cxgb_uninit_synchronized()
1843 if (sc->open_device_map == 0) in cxgb_uninit_synchronized()
1844 cxgb_down(pi->adapter); in cxgb_uninit_synchronized()
1856 struct adapter *adp = p->adapter; in cxgb_set_lro()
1859 for (i = 0; i < p->nqsets; i++) { in cxgb_set_lro()
1860 q = &adp->sge.qs[p->first_qset + i]; in cxgb_set_lro()
1861 q->lro.enabled = (enabled != 0); in cxgb_set_lro()
1870 struct adapter *sc = p->adapter; in cxgb_ioctl()
1885 mtu = ifr->ifr_mtu; in cxgb_ioctl()
1904 flags = p->if_flags; in cxgb_ioctl()
1918 p->if_flags = if_getflags(ifp); in cxgb_ioctl()
1947 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); in cxgb_ioctl()
1957 "tso4 disabled due to -txcsum.\n"); in cxgb_ioctl()
1969 "tso6 disabled due to -txcsum6.\n"); in cxgb_ioctl()
1980 * sending a TSO request our way, so it's sufficient to toggle in cxgb_ioctl()
2004 /* Safe to do this even if cxgb_up not called yet */ in cxgb_ioctl()
2044 error = ifmedia_ioctl(ifp, ifr, &p->media, command); in cxgb_ioctl()
2060 * Translates phy->modtype to the correct Ethernet media subtype.
2092 struct cphy *phy = &p->phy; in cxgb_build_medialist()
2093 struct ifmedia *media = &p->media; in cxgb_build_medialist()
2094 int mod = phy->modtype; in cxgb_build_medialist()
2100 if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) { in cxgb_build_medialist()
2103 if (phy->caps & SUPPORTED_10000baseT_Full) in cxgb_build_medialist()
2106 if (phy->caps & SUPPORTED_1000baseT_Full) in cxgb_build_medialist()
2109 if (phy->caps & SUPPORTED_100baseT_Full) in cxgb_build_medialist()
2112 if (phy->caps & SUPPORTED_10baseT_Full) in cxgb_build_medialist()
2118 } else if (phy->caps & SUPPORTED_TP) { in cxgb_build_medialist()
2121 KASSERT(phy->caps & SUPPORTED_10000baseT_Full, in cxgb_build_medialist()
2122 ("%s: unexpected cap 0x%x", __func__, phy->caps)); in cxgb_build_medialist()
2127 } else if (phy->caps & SUPPORTED_FIBRE && in cxgb_build_medialist()
2128 phy->caps & SUPPORTED_10000baseT_Full) { in cxgb_build_medialist()
2138 } else if (phy->caps & SUPPORTED_FIBRE && in cxgb_build_medialist()
2139 phy->caps & SUPPORTED_1000baseT_Full) { in cxgb_build_medialist()
2142 /* XXX: Lie and claim to be SX, could actually be any 1G-X */ in cxgb_build_medialist()
2147 KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__, in cxgb_build_medialist()
2148 phy->caps)); in cxgb_build_medialist()
2158 struct ifmedia_entry *cur = p->media.ifm_cur; in cxgb_media_status()
2159 int speed = p->link_config.speed; in cxgb_media_status()
2161 if (cur->ifm_data != p->phy.modtype) { in cxgb_media_status()
2163 cur = p->media.ifm_cur; in cxgb_media_status()
2166 ifmr->ifm_status = IFM_AVALID; in cxgb_media_status()
2167 if (!p->link_config.link_ok) in cxgb_media_status()
2170 ifmr->ifm_status |= IFM_ACTIVE; in cxgb_media_status()
2176 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) in cxgb_media_status()
2178 KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg, in cxgb_media_status()
2179 ("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps)); in cxgb_media_status()
2181 ifmr->ifm_active = IFM_ETHER | IFM_FDX; in cxgb_media_status()
2183 ifmr->ifm_active |= IFM_10G_T; in cxgb_media_status()
2185 ifmr->ifm_active |= IFM_1000_T; in cxgb_media_status()
2187 ifmr->ifm_active |= IFM_100_TX; in cxgb_media_status()
2189 ifmr->ifm_active |= IFM_10_T; in cxgb_media_status()
2199 struct adapter *sc = pi->adapter; in cxgb_get_counter()
2200 struct cmac *mac = &pi->mac; in cxgb_get_counter()
2201 struct mac_stats *mstats = &mac->stats; in cxgb_get_counter()
2207 return (mstats->rx_frames); in cxgb_get_counter()
2210 return (mstats->rx_jabber + mstats->rx_data_errs + in cxgb_get_counter()
2211 mstats->rx_sequence_errs + mstats->rx_runt + in cxgb_get_counter()
2212 mstats->rx_too_long + mstats->rx_mac_internal_errs + in cxgb_get_counter()
2213 mstats->rx_short + mstats->rx_fcs_errs); in cxgb_get_counter()
2216 return (mstats->tx_frames); in cxgb_get_counter()
2219 return (mstats->tx_excess_collisions + mstats->tx_underrun + in cxgb_get_counter()
2220 mstats->tx_len_errs + mstats->tx_mac_internal_errs + in cxgb_get_counter()
2221 mstats->tx_excess_deferral + mstats->tx_fcs_errs); in cxgb_get_counter()
2224 return (mstats->tx_total_collisions); in cxgb_get_counter()
2227 return (mstats->rx_octets); in cxgb_get_counter()
2230 return (mstats->tx_octets); in cxgb_get_counter()
2233 return (mstats->rx_mcast_frames); in cxgb_get_counter()
2236 return (mstats->tx_mcast_frames); in cxgb_get_counter()
2239 return (mstats->rx_cong_drops); in cxgb_get_counter()
2246 if (sc->flags & FULL_INIT_DONE) { in cxgb_get_counter()
2247 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) in cxgb_get_counter()
2248 drops += sc->sge.qs[i].txq[TXQ_ETH].txq_mr->br_drops; in cxgb_get_counter()
2267 taskqueue_enqueue(sc->tq, &sc->slow_intr_task); in cxgb_async_intr()
2274 struct adapter *sc = pi->adapter; in link_check_callout()
2276 if (!isset(&sc->open_device_map, pi->port_id)) in link_check_callout()
2279 taskqueue_enqueue(sc->tq, &pi->link_check_task); in link_check_callout()
2286 struct adapter *sc = pi->adapter; in check_link_status()
2288 if (!isset(&sc->open_device_map, pi->port_id)) in check_link_status()
2291 t3_link_changed(sc, pi->port_id); in check_link_status()
2293 if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ) || in check_link_status()
2294 pi->link_config.link_ok == 0) in check_link_status()
2295 callout_reset(&pi->link_check_ch, hz, link_check_callout, pi); in check_link_status()
2306 callout_reset(&pi->link_check_ch, hz / 4, link_check_callout, pi); in t3_os_link_intr()
2314 if (sc->flags & CXGB_SHUTDOWN) in check_t3b2_mac()
2318 struct port_info *p = &sc->port[i]; in check_t3b2_mac()
2321 if_t ifp = p->ifp; in check_t3b2_mac()
2324 if (!isset(&sc->open_device_map, p->port_id) || p->link_fault || in check_t3b2_mac()
2325 !p->link_config.link_ok) in check_t3b2_mac()
2330 __func__, if_getdrvflags(ifp), sc->open_device_map)); in check_t3b2_mac()
2333 status = t3b2_mac_watchdog_task(&p->mac); in check_t3b2_mac()
2335 p->mac.stats.num_toggled++; in check_t3b2_mac()
2337 struct cmac *mac = &p->mac; in check_t3b2_mac()
2340 t3_link_start(&p->phy, mac, &p->link_config); in check_t3b2_mac()
2342 t3_port_intr_enable(sc, p->port_id); in check_t3b2_mac()
2343 p->mac.stats.num_resets++; in check_t3b2_mac()
2354 if (sc->flags & CXGB_SHUTDOWN) in cxgb_tick()
2357 taskqueue_enqueue(sc->tq, &sc->tick_task); in cxgb_tick()
2358 callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc); in cxgb_tick()
2369 if (timevalcmp(&tv, &pi->last_refreshed, <)) in cxgb_refresh_stats()
2373 t3_mac_update_stats(&pi->mac); in cxgb_refresh_stats()
2375 getmicrotime(&pi->last_refreshed); in cxgb_refresh_stats()
2382 const struct adapter_params *p = &sc->params; in cxgb_tick_handler()
2386 if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE)) in cxgb_tick_handler()
2389 if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map) in cxgb_tick_handler()
2394 struct sge_qset *qs = &sc->sge.qs[0]; in cxgb_tick_handler()
2420 for (i = 0; i < sc->params.nports; i++) { in cxgb_tick_handler()
2421 struct port_info *pi = &sc->port[i]; in cxgb_tick_handler()
2422 struct cmac *mac = &pi->mac; in cxgb_tick_handler()
2424 if (!isset(&sc->open_device_map, pi->port_id)) in cxgb_tick_handler()
2429 if (mac->multiport) in cxgb_tick_handler()
2433 cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset); in cxgb_tick_handler()
2436 mac->stats.rx_fifo_ovfl++; in cxgb_tick_handler()
2439 t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset); in cxgb_tick_handler()
2467 struct adapter *adapter = pi->adapter; in set_eeprom()
2478 aligned_offset + aligned_len - 4, in set_eeprom()
2479 (u32 *)&buf[aligned_len - 4]); in set_eeprom()
2490 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { in set_eeprom()
2527 struct port_info *pi = dev->si_drv1; in cxgb_extension_ioctl()
2528 adapter_t *sc = pi->adapter; in cxgb_extension_ioctl()
2533 printf("user does not have access to privileged ioctls\n"); in cxgb_extension_ioctl()
2539 printf("user does not have access to privileged ioctls\n"); in cxgb_extension_ioctl()
2547 struct cphy *phy = &pi->phy; in cxgb_extension_ioctl()
2548 struct ch_mii_data *mid = (struct ch_mii_data *)data; in cxgb_extension_ioctl() local
2550 if (!phy->mdio_read) in cxgb_extension_ioctl()
2553 mmd = mid->phy_id >> 8; in cxgb_extension_ioctl()
2559 error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd, in cxgb_extension_ioctl()
2560 mid->reg_num, &val); in cxgb_extension_ioctl()
2562 error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0, in cxgb_extension_ioctl()
2563 mid->reg_num & 0x1f, &val); in cxgb_extension_ioctl()
2565 mid->val_out = val; in cxgb_extension_ioctl()
2569 struct cphy *phy = &pi->phy; in cxgb_extension_ioctl()
2570 struct ch_mii_data *mid = (struct ch_mii_data *)data; in cxgb_extension_ioctl() local
2572 if (!phy->mdio_write) in cxgb_extension_ioctl()
2575 mmd = mid->phy_id >> 8; in cxgb_extension_ioctl()
2581 error = phy->mdio_write(sc, mid->phy_id & 0x1f, in cxgb_extension_ioctl()
2582 mmd, mid->reg_num, mid->val_in); in cxgb_extension_ioctl()
2584 error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0, in cxgb_extension_ioctl()
2585 mid->reg_num & 0x1f, in cxgb_extension_ioctl()
2586 mid->val_in); in cxgb_extension_ioctl()
2591 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) in cxgb_extension_ioctl()
2593 t3_write_reg(sc, edata->addr, edata->val); in cxgb_extension_ioctl()
2598 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) in cxgb_extension_ioctl()
2600 edata->val = t3_read_reg(sc, edata->addr); in cxgb_extension_ioctl()
2605 mtx_lock_spin(&sc->sge.reg_lock); in cxgb_extension_ioctl()
2606 switch (ecntxt->cntxt_type) { in cxgb_extension_ioctl()
2608 error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id, in cxgb_extension_ioctl()
2609 ecntxt->data); in cxgb_extension_ioctl()
2612 error = -t3_sge_read_fl(sc, ecntxt->cntxt_id, in cxgb_extension_ioctl()
2613 ecntxt->data); in cxgb_extension_ioctl()
2616 error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id, in cxgb_extension_ioctl()
2617 ecntxt->data); in cxgb_extension_ioctl()
2620 error = -t3_sge_read_cq(sc, ecntxt->cntxt_id, in cxgb_extension_ioctl()
2621 ecntxt->data); in cxgb_extension_ioctl()
2627 mtx_unlock_spin(&sc->sge.reg_lock); in cxgb_extension_ioctl()
2633 if (edesc->queue_num >= SGE_QSETS * 6) in cxgb_extension_ioctl()
2635 ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6], in cxgb_extension_ioctl()
2636 edesc->queue_num % 6, edesc->idx, edesc->data); in cxgb_extension_ioctl()
2639 edesc->size = ret; in cxgb_extension_ioctl()
2645 int q1 = pi->first_qset; in cxgb_extension_ioctl()
2646 int nqsets = pi->nqsets; in cxgb_extension_ioctl()
2649 if (t->qset_idx >= nqsets) in cxgb_extension_ioctl()
2652 i = q1 + t->qset_idx; in cxgb_extension_ioctl()
2653 q = &sc->params.sge.qset[i]; in cxgb_extension_ioctl()
2654 t->rspq_size = q->rspq_size; in cxgb_extension_ioctl()
2655 t->txq_size[0] = q->txq_size[0]; in cxgb_extension_ioctl()
2656 t->txq_size[1] = q->txq_size[1]; in cxgb_extension_ioctl()
2657 t->txq_size[2] = q->txq_size[2]; in cxgb_extension_ioctl()
2658 t->fl_size[0] = q->fl_size; in cxgb_extension_ioctl()
2659 t->fl_size[1] = q->jumbo_size; in cxgb_extension_ioctl()
2660 t->polling = q->polling; in cxgb_extension_ioctl()
2661 t->lro = q->lro; in cxgb_extension_ioctl()
2662 t->intr_lat = q->coalesce_usecs; in cxgb_extension_ioctl()
2663 t->cong_thres = q->cong_thres; in cxgb_extension_ioctl()
2664 t->qnum = i; in cxgb_extension_ioctl()
2666 if ((sc->flags & FULL_INIT_DONE) == 0) in cxgb_extension_ioctl()
2667 t->vector = 0; in cxgb_extension_ioctl()
2668 else if (sc->flags & USING_MSIX) in cxgb_extension_ioctl()
2669 t->vector = rman_get_start(sc->msix_irq_res[i]); in cxgb_extension_ioctl()
2671 t->vector = rman_get_start(sc->irq_res); in cxgb_extension_ioctl()
2677 edata->val = pi->nqsets; in cxgb_extension_ioctl()
2686 * You're allowed to load a firmware only before FULL_INIT_DONE in cxgb_extension_ioctl()
2690 * flexibility to load any firmware (and maybe shoot yourself in in cxgb_extension_ioctl()
2695 if (sc->open_device_map || sc->flags & FULL_INIT_DONE) { in cxgb_extension_ioctl()
2700 fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT); in cxgb_extension_ioctl()
2704 error = copyin(t->buf, fw_data, t->len); in cxgb_extension_ioctl()
2707 error = -t3_load_fw(sc, fw_data, t->len); in cxgb_extension_ioctl()
2710 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), in cxgb_extension_ioctl()
2716 sc->flags |= FW_UPTODATE; in cxgb_extension_ioctl()
2726 boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT); in cxgb_extension_ioctl()
2730 error = copyin(t->buf, boot_data, t->len); in cxgb_extension_ioctl()
2732 error = -t3_load_boot(sc, boot_data, t->len); in cxgb_extension_ioctl()
2739 struct tp_params *p = &sc->params.tp; in cxgb_extension_ioctl()
2744 m->tx_pg_sz = p->tx_pg_size; in cxgb_extension_ioctl()
2745 m->tx_num_pg = p->tx_num_pgs; in cxgb_extension_ioctl()
2746 m->rx_pg_sz = p->rx_pg_size; in cxgb_extension_ioctl()
2747 m->rx_num_pg = p->rx_num_pgs; in cxgb_extension_ioctl()
2748 m->pm_total = p->pmtx_size + p->chan_rx_size * p->nchan; in cxgb_extension_ioctl()
2754 struct tp_params *p = &sc->params.tp; in cxgb_extension_ioctl()
2758 if (sc->flags & FULL_INIT_DONE) in cxgb_extension_ioctl()
2761 if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) || in cxgb_extension_ioctl()
2762 !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1))) in cxgb_extension_ioctl()
2764 if (!(m->rx_pg_sz & 0x14000)) in cxgb_extension_ioctl()
2766 if (!(m->tx_pg_sz & 0x1554000)) in cxgb_extension_ioctl()
2768 if (m->tx_num_pg == -1) in cxgb_extension_ioctl()
2769 m->tx_num_pg = p->tx_num_pgs; in cxgb_extension_ioctl()
2770 if (m->rx_num_pg == -1) in cxgb_extension_ioctl()
2771 m->rx_num_pg = p->rx_num_pgs; in cxgb_extension_ioctl()
2772 if (m->tx_num_pg % 24 || m->rx_num_pg % 24) in cxgb_extension_ioctl()
2774 if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size || in cxgb_extension_ioctl()
2775 m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size) in cxgb_extension_ioctl()
2778 p->rx_pg_size = m->rx_pg_sz; in cxgb_extension_ioctl()
2779 p->tx_pg_size = m->tx_pg_sz; in cxgb_extension_ioctl()
2780 p->rx_num_pgs = m->rx_num_pg; in cxgb_extension_ioctl()
2781 p->tx_num_pgs = m->tx_num_pg; in cxgb_extension_ioctl()
2792 if (m->nmtus != NMTUS) in cxgb_extension_ioctl()
2794 if (m->mtus[0] < 81) /* accommodate SACK */ in cxgb_extension_ioctl()
2801 if (m->mtus[i] < m->mtus[i - 1]) in cxgb_extension_ioctl()
2804 memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus)); in cxgb_extension_ioctl()
2813 memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus)); in cxgb_extension_ioctl()
2814 m->nmtus = NMTUS; in cxgb_extension_ioctl()
2824 * Use these to avoid modifying len/addr in the return in cxgb_extension_ioctl()
2827 uint32_t len = t->len, addr = t->addr; in cxgb_extension_ioctl()
2831 if (!(sc->flags & FULL_INIT_DONE)) in cxgb_extension_ioctl()
2835 if (t->mem_id == MEM_CM) in cxgb_extension_ioctl()
2836 mem = &sc->cm; in cxgb_extension_ioctl()
2837 else if (t->mem_id == MEM_PMRX) in cxgb_extension_ioctl()
2838 mem = &sc->pmrx; in cxgb_extension_ioctl()
2839 else if (t->mem_id == MEM_PMTX) in cxgb_extension_ioctl()
2840 mem = &sc->pmtx; in cxgb_extension_ioctl()
2849 t->version = 3 | (sc->params.rev << 10); in cxgb_extension_ioctl()
2853 * want to use huge intermediate buffers. in cxgb_extension_ioctl()
2855 useraddr = (uint8_t *)t->buf; in cxgb_extension_ioctl()
2861 return (-error); in cxgb_extension_ioctl()
2866 len -= chunk; in cxgb_extension_ioctl()
2875 if (!(sc->flags & FULL_INIT_DONE)) in cxgb_extension_ioctl()
2877 return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf); in cxgb_extension_ioctl()
2884 tp = (const struct trace_params *)&t->sip; in cxgb_extension_ioctl()
2885 if (t->config_tx) in cxgb_extension_ioctl()
2886 t3_config_trace_filter(sc, tp, 0, t->invert_match, in cxgb_extension_ioctl()
2887 t->trace_tx); in cxgb_extension_ioctl()
2888 if (t->config_rx) in cxgb_extension_ioctl()
2889 t3_config_trace_filter(sc, tp, 1, t->invert_match, in cxgb_extension_ioctl()
2890 t->trace_rx); in cxgb_extension_ioctl()
2895 if (sc->open_device_map == 0) in cxgb_extension_ioctl()
2897 send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max, in cxgb_extension_ioctl()
2898 p->binding); in cxgb_extension_ioctl()
2908 if (regs->len > reglen) in cxgb_extension_ioctl()
2909 regs->len = reglen; in cxgb_extension_ioctl()
2910 else if (regs->len < reglen) in cxgb_extension_ioctl()
2915 error = copyout(buf, regs->data, reglen); in cxgb_extension_ioctl()
2925 if ((sc->flags & FULL_INIT_DONE) == 0) in cxgb_extension_ioctl()
2926 return (EAGAIN); /* need TP to be initialized */ in cxgb_extension_ioctl()
2927 if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) || in cxgb_extension_ioctl()
2928 !in_range(t->channel, 0, 1) || in cxgb_extension_ioctl()
2929 !in_range(t->kbps, 0, 10000000) || in cxgb_extension_ioctl()
2930 !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) || in cxgb_extension_ioctl()
2931 !in_range(t->flow_ipg, 0, in cxgb_extension_ioctl()
2935 if (t->kbps >= 0) { in cxgb_extension_ioctl()
2936 error = t3_config_sched(sc, t->kbps, t->sched); in cxgb_extension_ioctl()
2938 return (-error); in cxgb_extension_ioctl()
2940 if (t->class_ipg >= 0) in cxgb_extension_ioctl()
2941 t3_set_sched_ipg(sc, t->sched, t->class_ipg); in cxgb_extension_ioctl()
2942 if (t->flow_ipg >= 0) { in cxgb_extension_ioctl()
2943 t->flow_ipg *= 1000; /* us -> ns */ in cxgb_extension_ioctl()
2944 t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1); in cxgb_extension_ioctl()
2946 if (t->mode >= 0) { in cxgb_extension_ioctl()
2947 int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched); in cxgb_extension_ioctl()
2950 bit, t->mode ? bit : 0); in cxgb_extension_ioctl()
2952 if (t->channel >= 0) in cxgb_extension_ioctl()
2954 1 << t->sched, t->channel << t->sched); in cxgb_extension_ioctl()
2962 if (e->offset & 3 || e->offset >= EEPROMSIZE || in cxgb_extension_ioctl()
2963 e->len > EEPROMSIZE || e->offset + e->len > EEPROMSIZE) { in cxgb_extension_ioctl()
2971 e->magic = EEPROM_MAGIC; in cxgb_extension_ioctl()
2972 for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4) in cxgb_extension_ioctl()
2973 error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]); in cxgb_extension_ioctl()
2976 error = copyout(buf + e->offset, e->data, e->len); in cxgb_extension_ioctl()
2982 if (!(sc->flags & FULL_INIT_DONE)) in cxgb_extension_ioctl()
2986 t3_mac_update_stats(&pi->mac); in cxgb_extension_ioctl()
2987 memset(&pi->mac.stats, 0, sizeof(pi->mac.stats)); in cxgb_extension_ioctl()
2997 if (la->bufsize < LA_BUFSIZE) in cxgb_extension_ioctl()
3001 error = -t3_get_up_la(sc, &la->stopped, &la->idx, in cxgb_extension_ioctl()
3002 &la->bufsize, buf); in cxgb_extension_ioctl()
3004 error = copyout(buf, la->data, la->bufsize); in cxgb_extension_ioctl()
3017 if (ioqs->bufsize < IOQS_BUFSIZE) in cxgb_extension_ioctl()
3021 error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf); in cxgb_extension_ioctl()
3026 ioqs->ioq_rx_enable = *v++; in cxgb_extension_ioctl()
3027 ioqs->ioq_tx_enable = *v++; in cxgb_extension_ioctl()
3028 ioqs->ioq_rx_status = *v++; in cxgb_extension_ioctl()
3029 ioqs->ioq_tx_status = *v++; in cxgb_extension_ioctl()
3031 error = copyout(v, ioqs->data, ioqs->bufsize); in cxgb_extension_ioctl()
3040 unsigned int nfilters = sc->params.mc5.nfilters; in cxgb_extension_ioctl()
3044 if (!(sc->flags & FULL_INIT_DONE)) in cxgb_extension_ioctl()
3050 if (f->filter_id >= nfilters || in cxgb_extension_ioctl()
3051 (f->val.dip && f->mask.dip != 0xffffffff) || in cxgb_extension_ioctl()
3052 (f->val.sport && f->mask.sport != 0xffff) || in cxgb_extension_ioctl()
3053 (f->val.dport && f->mask.dport != 0xffff) || in cxgb_extension_ioctl()
3054 (f->val.vlan && f->mask.vlan != 0xfff) || in cxgb_extension_ioctl()
3055 (f->val.vlan_prio && in cxgb_extension_ioctl()
3056 f->mask.vlan_prio != FILTER_NO_VLAN_PRI) || in cxgb_extension_ioctl()
3057 (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) || in cxgb_extension_ioctl()
3058 f->qset >= SGE_QSETS || in cxgb_extension_ioctl()
3059 sc->rrss_map[f->qset] >= RSS_TABLE_SIZE) in cxgb_extension_ioctl()
3063 KASSERT(sc->filters, ("filter table NULL\n")); in cxgb_extension_ioctl()
3065 p = &sc->filters[f->filter_id]; in cxgb_extension_ioctl()
3066 if (p->locked) in cxgb_extension_ioctl()
3070 p->sip = f->val.sip; in cxgb_extension_ioctl()
3071 p->sip_mask = f->mask.sip; in cxgb_extension_ioctl()
3072 p->dip = f->val.dip; in cxgb_extension_ioctl()
3073 p->sport = f->val.sport; in cxgb_extension_ioctl()
3074 p->dport = f->val.dport; in cxgb_extension_ioctl()
3075 p->vlan = f->mask.vlan ? f->val.vlan : 0xfff; in cxgb_extension_ioctl()
3076 p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) : in cxgb_extension_ioctl()
3078 p->mac_hit = f->mac_hit; in cxgb_extension_ioctl()
3079 p->mac_vld = f->mac_addr_idx != 0xffff; in cxgb_extension_ioctl()
3080 p->mac_idx = f->mac_addr_idx; in cxgb_extension_ioctl()
3081 p->pkt_type = f->proto; in cxgb_extension_ioctl()
3082 p->report_filter_id = f->want_filter_id; in cxgb_extension_ioctl()
3083 p->pass = f->pass; in cxgb_extension_ioctl()
3084 p->rss = f->rss; in cxgb_extension_ioctl()
3085 p->qset = f->qset; in cxgb_extension_ioctl()
3087 error = set_filter(sc, f->filter_id, p); in cxgb_extension_ioctl()
3089 p->valid = 1; in cxgb_extension_ioctl()
3095 unsigned int nfilters = sc->params.mc5.nfilters; in cxgb_extension_ioctl()
3099 if (!(sc->flags & FULL_INIT_DONE)) in cxgb_extension_ioctl()
3101 if (nfilters == 0 || sc->filters == NULL) in cxgb_extension_ioctl()
3103 if (f->filter_id >= nfilters) in cxgb_extension_ioctl()
3106 p = &sc->filters[f->filter_id]; in cxgb_extension_ioctl()
3107 if (p->locked) in cxgb_extension_ioctl()
3109 if (!p->valid) in cxgb_extension_ioctl()
3113 p->sip = p->sip_mask = 0xffffffff; in cxgb_extension_ioctl()
3114 p->vlan = 0xfff; in cxgb_extension_ioctl()
3115 p->vlan_prio = FILTER_NO_VLAN_PRI; in cxgb_extension_ioctl()
3116 p->pkt_type = 1; in cxgb_extension_ioctl()
3117 error = set_filter(sc, f->filter_id, p); in cxgb_extension_ioctl()
3123 unsigned int i, nfilters = sc->params.mc5.nfilters; in cxgb_extension_ioctl()
3127 if (!(sc->flags & FULL_INIT_DONE)) in cxgb_extension_ioctl()
3129 if (nfilters == 0 || sc->filters == NULL) in cxgb_extension_ioctl()
3132 i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1; in cxgb_extension_ioctl()
3134 p = &sc->filters[i]; in cxgb_extension_ioctl()
3135 if (!p->valid) in cxgb_extension_ioctl()
3140 f->filter_id = i; in cxgb_extension_ioctl()
3141 f->val.sip = p->sip; in cxgb_extension_ioctl()
3142 f->mask.sip = p->sip_mask; in cxgb_extension_ioctl()
3143 f->val.dip = p->dip; in cxgb_extension_ioctl()
3144 f->mask.dip = p->dip ? 0xffffffff : 0; in cxgb_extension_ioctl()
3145 f->val.sport = p->sport; in cxgb_extension_ioctl()
3146 f->mask.sport = p->sport ? 0xffff : 0; in cxgb_extension_ioctl()
3147 f->val.dport = p->dport; in cxgb_extension_ioctl()
3148 f->mask.dport = p->dport ? 0xffff : 0; in cxgb_extension_ioctl()
3149 f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan; in cxgb_extension_ioctl()
3150 f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff; in cxgb_extension_ioctl()
3151 f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ? in cxgb_extension_ioctl()
3152 0 : p->vlan_prio; in cxgb_extension_ioctl()
3153 f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ? in cxgb_extension_ioctl()
3155 f->mac_hit = p->mac_hit; in cxgb_extension_ioctl()
3156 f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff; in cxgb_extension_ioctl()
3157 f->proto = p->pkt_type; in cxgb_extension_ioctl()
3158 f->want_filter_id = p->report_filter_id; in cxgb_extension_ioctl()
3159 f->pass = p->pass; in cxgb_extension_ioctl()
3160 f->rss = p->rss; in cxgb_extension_ioctl()
3161 f->qset = p->qset; in cxgb_extension_ioctl()
3167 f->filter_id = 0xffffffff; in cxgb_extension_ioctl()
3205 regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31); in cxgb_get_regs()
3208 * We skip the MAC statistics registers because they are clear-on-read. in cxgb_get_regs()
3209 * Also reading multi-register stats would need to synchronize with the in cxgb_get_regs()
3210 * periodic mac stats accumulation. Hard to justify the complexity. in cxgb_get_regs()
3228 unsigned int nfilters = sc->params.mc5.nfilters; in alloc_filters()
3234 sc->filters = p; in alloc_filters()
3236 p = &sc->filters[nfilters - 1]; in alloc_filters()
3237 p->vlan = 0xfff; in alloc_filters()
3238 p->vlan_prio = FILTER_NO_VLAN_PRI; in alloc_filters()
3239 p->pass = p->rss = p->valid = p->locked = 1; in alloc_filters()
3248 unsigned int nfilters = sc->params.mc5.nfilters; in setup_hw_filters()
3250 if (!sc->filters) in setup_hw_filters()
3256 if (sc->filters[i].locked) in setup_hw_filters()
3257 rc = set_filter(sc, i, &sc->filters[i]); in setup_hw_filters()
3276 id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes - in set_filter()
3277 sc->params.mc5.nfilters; in set_filter()
3280 m->m_len = m->m_pkthdr.len = len; in set_filter()
3284 wr->wrh_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC); in set_filter()
3288 txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); in set_filter()
3289 txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8)); in set_filter()
3291 oreq->local_port = htons(f->dport); in set_filter()
3292 oreq->peer_port = htons(f->sport); in set_filter()
3293 oreq->local_ip = htonl(f->dip); in set_filter()
3294 oreq->peer_ip = htonl(f->sip); in set_filter()
3295 oreq->peer_netmask = htonl(f->sip_mask); in set_filter()
3296 oreq->opt0h = 0; in set_filter()
3297 oreq->opt0l = htonl(F_NO_OFFLOAD); in set_filter()
3298 oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) | in set_filter()
3300 V_VLAN_PRI(f->vlan_prio >> 1) | in set_filter()
3301 V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) | in set_filter()
3302 V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) | in set_filter()
3303 V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4))); in set_filter()
3307 (f->report_filter_id << 15) | (1 << 23) | in set_filter()
3308 ((u64)f->pass << 35) | ((u64)!f->rss << 36)); in set_filter()
3312 if (f->pass && !f->rss) { in set_filter()
3315 m->m_len = m->m_pkthdr.len = len; in set_filter()
3318 sreq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); in set_filter()
3320 (u64)sc->rrss_map[f->qset] << 19); in set_filter()
3331 req->reply = V_NO_REPLY(1); in mk_set_tcb_field()
3332 req->cpu_idx = 0; in mk_set_tcb_field()
3333 req->word = htons(word); in mk_set_tcb_field()
3334 req->mask = htobe64(mask); in mk_set_tcb_field()
3335 req->val = htobe64(val); in mk_set_tcb_field()
3344 txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); in set_tcb_field_ulp()
3345 txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8)); in set_tcb_field_ulp()
3358 * in - the only guarantee is that sc->sc_lock is a valid lock. in t3_iterate()
3370 struct adapter *sc = pi->adapter; in toe_capability()
3378 if (!(sc->flags & FULL_INIT_DONE)) { in toe_capability()
3384 if (isset(&sc->offload_map, pi->port_id)) in toe_capability()
3387 if (!(sc->flags & TOM_INIT_DONE)) { in toe_capability()
3392 "to enable TOE on a cxgb interface.\n"); in toe_capability()
3396 KASSERT(sc->tom_softc != NULL, in toe_capability()
3398 KASSERT(sc->flags & TOM_INIT_DONE, in toe_capability()
3402 setbit(&sc->offload_map, pi->port_id); in toe_capability()
3405 * XXX: Temporary code to allow iWARP to be enabled when TOE is in toe_capability()
3406 * enabled on any port. Need to figure out how to enable, in toe_capability()
3409 if (!isset(&sc->offload_map, MAX_NPORTS) && in toe_capability()
3411 setbit(&sc->offload_map, MAX_NPORTS); in toe_capability()
3413 if (!isset(&sc->offload_map, pi->port_id)) in toe_capability()
3416 KASSERT(sc->flags & TOM_INIT_DONE, in toe_capability()
3418 clrbit(&sc->offload_map, pi->port_id); in toe_capability()
3425 * Add an upper layer driver to the global list.
3435 if (u->uld_id == ui->uld_id) { in t3_register_uld()
3442 ui->refcount = 0; in t3_register_uld()
3458 if (ui->refcount > 0) { in t3_unregister_uld()
3482 if (ui->uld_id == id) { in t3_activate_uld()
3483 rc = ui->activate(sc); in t3_activate_uld()
3485 ui->refcount++; in t3_activate_uld()
3504 if (ui->uld_id == id) { in t3_deactivate_uld()
3505 rc = ui->deactivate(sc); in t3_deactivate_uld()
3507 ui->refcount--; in t3_deactivate_uld()
3534 loc = (uintptr_t *) &sc->cpl_handler[opcode]; in t3_register_cpl_handler()
3589 adap = pi->adapter; in cxgb_debugnet_init()
3591 *nrxr = adap->nqsets; in cxgb_debugnet_init()
3592 *ncl = adap->sge.qs[0].fl[1].size; in cxgb_debugnet_init()
3593 *clsize = adap->sge.qs[0].fl[1].buf_size; in cxgb_debugnet_init()
3606 for (i = 0; i < pi->adapter->nqsets; i++) { in cxgb_debugnet_event()
3607 qs = &pi->adapter->sge.qs[i]; in cxgb_debugnet_event()
3609 /* Need to reinit after debugnet_mbuf_start(). */ in cxgb_debugnet_event()
3610 qs->fl[0].zone = zone_pack; in cxgb_debugnet_event()
3611 qs->fl[1].zone = zone_clust; in cxgb_debugnet_event()
3612 qs->lro.enabled = 0; in cxgb_debugnet_event()
3627 qs = &pi->adapter->sge.qs[pi->first_qset]; in cxgb_debugnet_transmit()
3642 adap = pi->adapter; in cxgb_debugnet_poll()
3643 for (i = 0; i < adap->nqsets; i++) in cxgb_debugnet_poll()
3644 (void)cxgb_debugnet_poll_rx(adap, &adap->sge.qs[i]); in cxgb_debugnet_poll()
3645 (void)cxgb_debugnet_poll_tx(&adap->sge.qs[pi->first_qset]); in cxgb_debugnet_poll()