1 /*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 33 #include <sys/param.h> 34 #include <sys/conf.h> 35 #include <sys/priv.h> 36 #include <sys/kernel.h> 37 #include <sys/bus.h> 38 #include <sys/module.h> 39 #include <sys/malloc.h> 40 #include <sys/queue.h> 41 #include <sys/taskqueue.h> 42 #include <sys/pciio.h> 43 #include <dev/pci/pcireg.h> 44 #include <dev/pci/pcivar.h> 45 #include <dev/pci/pci_private.h> 46 #include <sys/firmware.h> 47 #include <sys/sbuf.h> 48 #include <sys/smp.h> 49 #include <sys/socket.h> 50 #include <sys/sockio.h> 51 #include <sys/sysctl.h> 52 #include <net/ethernet.h> 53 #include <net/if.h> 54 #include <net/if_types.h> 55 #include <net/if_dl.h> 56 57 #include "common/t4_hw.h" 58 #include "common/common.h" 59 #include "common/t4_regs.h" 60 #include "common/t4_regs_values.h" 61 #include "common/t4fw_interface.h" 62 #include "t4_ioctl.h" 63 64 /* T4 bus driver interface */ 65 static int t4_probe(device_t); 66 static int t4_attach(device_t); 67 static int t4_detach(device_t); 68 static device_method_t t4_methods[] = { 69 DEVMETHOD(device_probe, t4_probe), 70 DEVMETHOD(device_attach, t4_attach), 71 DEVMETHOD(device_detach, t4_detach), 72 73 /* bus interface */ 74 DEVMETHOD(bus_print_child, bus_generic_print_child), 75 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 76 77 { 0, 0 } 78 }; 79 static driver_t t4_driver = { 80 "t4nex", 81 t4_methods, 82 sizeof(struct adapter) 83 }; 84 85 86 /* T4 port (cxgbe) interface */ 87 static int cxgbe_probe(device_t); 88 static int cxgbe_attach(device_t); 89 static int cxgbe_detach(device_t); 90 static device_method_t cxgbe_methods[] = { 91 DEVMETHOD(device_probe, cxgbe_probe), 92 DEVMETHOD(device_attach, cxgbe_attach), 93 DEVMETHOD(device_detach, cxgbe_detach), 94 { 0, 0 } 95 }; 96 static driver_t cxgbe_driver = { 97 "cxgbe", 98 cxgbe_methods, 99 sizeof(struct port_info) 100 }; 101 102 static d_ioctl_t t4_ioctl; 103 static d_open_t t4_open; 104 static d_close_t t4_close; 105 106 static struct cdevsw t4_cdevsw = { 107 .d_version = D_VERSION, 108 .d_flags = 0, 109 .d_open = t4_open, 110 .d_close = t4_close, 111 .d_ioctl = t4_ioctl, 112 .d_name = "t4nex", 113 }; 114 115 /* ifnet + media interface */ 116 static void cxgbe_init(void *); 117 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 118 static void cxgbe_start(struct ifnet *); 119 static int cxgbe_transmit(struct ifnet *, struct mbuf *); 120 static void cxgbe_qflush(struct ifnet *); 121 static int cxgbe_media_change(struct ifnet *); 122 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 123 124 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4 Ethernet driver and services"); 125 126 /* 127 * Tunables. 128 */ 129 SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe driver parameters"); 130 131 static int force_firmware_install = 0; 132 TUNABLE_INT("hw.cxgbe.force_firmware_install", &force_firmware_install); 133 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, force_firmware_install, CTLFLAG_RDTUN, 134 &force_firmware_install, 0, "install firmware on every attach."); 135 136 /* 137 * Holdoff timer and packet counter values. 138 */ 139 static unsigned int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200}; 140 static unsigned int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ 141 142 /* 143 * Max # of tx and rx queues to use for each 10G and 1G port. 144 */ 145 static unsigned int max_ntxq_10g = 8; 146 TUNABLE_INT("hw.cxgbe.max_ntxq_10G_port", &max_ntxq_10g); 147 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_10G_port, CTLFLAG_RDTUN, 148 &max_ntxq_10g, 0, "maximum number of tx queues per 10G port."); 149 150 static unsigned int max_nrxq_10g = 8; 151 TUNABLE_INT("hw.cxgbe.max_nrxq_10G_port", &max_nrxq_10g); 152 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_10G_port, CTLFLAG_RDTUN, 153 &max_nrxq_10g, 0, "maximum number of rxq's (per 10G port)."); 154 155 static unsigned int max_ntxq_1g = 2; 156 TUNABLE_INT("hw.cxgbe.max_ntxq_1G_port", &max_ntxq_1g); 157 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_1G_port, CTLFLAG_RDTUN, 158 &max_ntxq_1g, 0, "maximum number of tx queues per 1G port."); 159 160 static unsigned int max_nrxq_1g = 2; 161 TUNABLE_INT("hw.cxgbe.max_nrxq_1G_port", &max_nrxq_1g); 162 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_1G_port, CTLFLAG_RDTUN, 163 &max_nrxq_1g, 0, "maximum number of rxq's (per 1G port)."); 164 165 /* 166 * Holdoff parameters for 10G and 1G ports. 167 */ 168 static unsigned int tmr_idx_10g = 1; 169 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &tmr_idx_10g); 170 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_10G, CTLFLAG_RDTUN, 171 &tmr_idx_10g, 0, 172 "default timer index for interrupt holdoff (10G ports)."); 173 174 static int pktc_idx_10g = 2; 175 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &pktc_idx_10g); 176 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_10G, CTLFLAG_RDTUN, 177 &pktc_idx_10g, 0, 178 "default pkt counter index for interrupt holdoff (10G ports)."); 179 180 static unsigned int tmr_idx_1g = 1; 181 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &tmr_idx_1g); 182 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_1G, CTLFLAG_RDTUN, 183 &tmr_idx_1g, 0, 184 "default timer index for interrupt holdoff (1G ports)."); 185 186 static int pktc_idx_1g = 2; 187 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &pktc_idx_1g); 188 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_1G, CTLFLAG_RDTUN, 189 &pktc_idx_1g, 0, 190 "default pkt counter index for interrupt holdoff (1G ports)."); 191 192 /* 193 * Size (# of entries) of each tx and rx queue. 194 */ 195 static unsigned int qsize_txq = TX_EQ_QSIZE; 196 TUNABLE_INT("hw.cxgbe.qsize_txq", &qsize_txq); 197 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN, 198 &qsize_txq, 0, "default queue size of NIC tx queues."); 199 200 static unsigned int qsize_rxq = RX_IQ_QSIZE; 201 TUNABLE_INT("hw.cxgbe.qsize_rxq", &qsize_rxq); 202 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN, 203 &qsize_rxq, 0, "default queue size of NIC rx queues."); 204 205 /* 206 * Interrupt types allowed. 207 */ 208 static int intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 209 TUNABLE_INT("hw.cxgbe.interrupt_types", &intr_types); 210 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &intr_types, 0, 211 "interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively)"); 212 213 /* 214 * Force the driver to use interrupt forwarding. 215 */ 216 static int intr_fwd = 0; 217 TUNABLE_INT("hw.cxgbe.interrupt_forwarding", &intr_fwd); 218 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupt_forwarding, CTLFLAG_RDTUN, 219 &intr_fwd, 0, "always use forwarded interrupts"); 220 221 struct intrs_and_queues { 222 int intr_type; /* INTx, MSI, or MSI-X */ 223 int nirq; /* Number of vectors */ 224 int intr_fwd; /* Interrupts forwarded */ 225 int ntxq10g; /* # of NIC txq's for each 10G port */ 226 int nrxq10g; /* # of NIC rxq's for each 10G port */ 227 int ntxq1g; /* # of NIC txq's for each 1G port */ 228 int nrxq1g; /* # of NIC rxq's for each 1G port */ 229 }; 230 231 enum { 232 MEMWIN0_APERTURE = 2048, 233 MEMWIN0_BASE = 0x1b800, 234 MEMWIN1_APERTURE = 32768, 235 MEMWIN1_BASE = 0x28000, 236 MEMWIN2_APERTURE = 65536, 237 MEMWIN2_BASE = 0x30000, 238 }; 239 240 enum { 241 XGMAC_MTU = (1 << 0), 242 XGMAC_PROMISC = (1 << 1), 243 XGMAC_ALLMULTI = (1 << 2), 244 XGMAC_VLANEX = (1 << 3), 245 XGMAC_UCADDR = (1 << 4), 246 XGMAC_MCADDRS = (1 << 5), 247 248 XGMAC_ALL = 0xffff 249 }; 250 251 static int map_bars(struct adapter *); 252 static void setup_memwin(struct adapter *); 253 static int cfg_itype_and_nqueues(struct adapter *, int, int, 254 struct intrs_and_queues *); 255 static int prep_firmware(struct adapter *); 256 static int get_capabilities(struct adapter *, struct fw_caps_config_cmd *); 257 static int get_params(struct adapter *, struct fw_caps_config_cmd *); 258 static void t4_set_desc(struct adapter *); 259 static void build_medialist(struct port_info *); 260 static int update_mac_settings(struct port_info *, int); 261 static int cxgbe_init_locked(struct port_info *); 262 static int cxgbe_init_synchronized(struct port_info *); 263 static int cxgbe_uninit_locked(struct port_info *); 264 static int cxgbe_uninit_synchronized(struct port_info *); 265 static int first_port_up(struct adapter *); 266 static int last_port_down(struct adapter *); 267 static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 268 iq_intr_handler_t *, void *, char *); 269 static int t4_free_irq(struct adapter *, struct irq *); 270 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int, 271 unsigned int); 272 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 273 static void cxgbe_tick(void *); 274 static int t4_sysctls(struct adapter *); 275 static int cxgbe_sysctls(struct port_info *); 276 static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 277 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 278 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 279 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 280 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 281 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 282 static inline void txq_start(struct ifnet *, struct sge_txq *); 283 static int t4_mod_event(module_t, int, void *); 284 285 struct t4_pciids { 286 uint16_t device; 287 uint8_t mpf; 288 char *desc; 289 } t4_pciids[] = { 290 {0xa000, 0, "Chelsio Terminator 4 FPGA"}, 291 {0x4400, 4, "Chelsio T440-dbg"}, 292 {0x4401, 4, "Chelsio T420-CR"}, 293 {0x4402, 4, "Chelsio T422-CR"}, 294 {0x4403, 4, "Chelsio T440-CR"}, 295 {0x4404, 4, "Chelsio T420-BCH"}, 296 {0x4405, 4, "Chelsio T440-BCH"}, 297 {0x4406, 4, "Chelsio T440-CH"}, 298 {0x4407, 4, "Chelsio T420-SO"}, 299 {0x4408, 4, "Chelsio T420-CX"}, 300 {0x4409, 4, "Chelsio T420-BT"}, 301 {0x440a, 4, "Chelsio T404-BT"}, 302 }; 303 304 static int 305 t4_probe(device_t dev) 306 { 307 int i; 308 uint16_t v = pci_get_vendor(dev); 309 uint16_t d = pci_get_device(dev); 310 311 if (v != PCI_VENDOR_ID_CHELSIO) 312 return (ENXIO); 313 314 for (i = 0; i < ARRAY_SIZE(t4_pciids); i++) { 315 if (d == t4_pciids[i].device && 316 pci_get_function(dev) == t4_pciids[i].mpf) { 317 device_set_desc(dev, t4_pciids[i].desc); 318 return (BUS_PROBE_DEFAULT); 319 } 320 } 321 322 return (ENXIO); 323 } 324 325 static int 326 t4_attach(device_t dev) 327 { 328 struct adapter *sc; 329 int rc = 0, i, n10g, n1g, rqidx, tqidx; 330 struct fw_caps_config_cmd caps; 331 uint32_t p, v; 332 struct intrs_and_queues iaq; 333 struct sge *s; 334 335 sc = device_get_softc(dev); 336 sc->dev = dev; 337 sc->pf = pci_get_function(dev); 338 sc->mbox = sc->pf; 339 340 pci_enable_busmaster(dev); 341 pci_set_max_read_req(dev, 4096); 342 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 343 device_get_nameunit(dev)); 344 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 345 346 rc = map_bars(sc); 347 if (rc != 0) 348 goto done; /* error message displayed already */ 349 350 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 351 352 /* Prepare the adapter for operation */ 353 rc = -t4_prep_adapter(sc); 354 if (rc != 0) { 355 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 356 goto done; 357 } 358 359 /* Do this really early */ 360 sc->cdev = make_dev(&t4_cdevsw, device_get_unit(dev), UID_ROOT, 361 GID_WHEEL, 0600, "%s", device_get_nameunit(dev)); 362 sc->cdev->si_drv1 = sc; 363 364 /* Prepare the firmware for operation */ 365 rc = prep_firmware(sc); 366 if (rc != 0) 367 goto done; /* error message displayed already */ 368 369 /* Get device capabilities and select which ones we'll use */ 370 rc = get_capabilities(sc, &caps); 371 if (rc != 0) { 372 device_printf(dev, 373 "failed to initialize adapter capabilities: %d.\n", rc); 374 goto done; 375 } 376 377 /* Choose the global RSS mode. */ 378 rc = -t4_config_glbl_rss(sc, sc->mbox, 379 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, 380 F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | 381 F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ | 382 F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP); 383 if (rc != 0) { 384 device_printf(dev, 385 "failed to select global RSS mode: %d.\n", rc); 386 goto done; 387 } 388 389 /* These are total (sum of all ports) limits for a bus driver */ 390 rc = -t4_cfg_pfvf(sc, sc->mbox, sc->pf, 0, 391 64, /* max # of egress queues */ 392 64, /* max # of egress Ethernet or control queues */ 393 64, /* max # of ingress queues with fl/interrupt */ 394 0, /* max # of ingress queues without interrupt */ 395 0, /* PCIe traffic class */ 396 4, /* max # of virtual interfaces */ 397 M_FW_PFVF_CMD_CMASK, M_FW_PFVF_CMD_PMASK, 16, 398 FW_CMD_CAP_PF, FW_CMD_CAP_PF); 399 if (rc != 0) { 400 device_printf(dev, 401 "failed to configure pf/vf resources: %d.\n", rc); 402 goto done; 403 } 404 405 /* Need this before sge_init */ 406 for (i = 0; i < SGE_NTIMERS; i++) 407 sc->sge.timer_val[i] = min(intr_timer[i], 200U); 408 for (i = 0; i < SGE_NCOUNTERS; i++) 409 sc->sge.counter_val[i] = min(intr_pktcount[i], M_THRESHOLD_0); 410 411 /* Also need the cooked value of cclk before sge_init */ 412 p = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 413 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); 414 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &p, &v); 415 if (rc != 0) { 416 device_printf(sc->dev, 417 "failed to obtain core clock value: %d.\n", rc); 418 goto done; 419 } 420 sc->params.vpd.cclk = v; 421 422 t4_sge_init(sc); 423 424 /* 425 * XXX: This is the place to call t4_set_filter_mode() 426 */ 427 428 /* get basic stuff going */ 429 rc = -t4_early_init(sc, sc->mbox); 430 if (rc != 0) { 431 device_printf(dev, "early init failed: %d.\n", rc); 432 goto done; 433 } 434 435 rc = get_params(sc, &caps); 436 if (rc != 0) 437 goto done; /* error message displayed already */ 438 439 /* These are finalized by FW initialization, load their values now */ 440 v = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 441 sc->params.tp.tre = G_TIMERRESOLUTION(v); 442 sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v); 443 t4_read_mtu_tbl(sc, sc->params.mtus, NULL); 444 445 /* tweak some settings */ 446 t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) | 447 V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) | 448 V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9)); 449 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); 450 451 setup_memwin(sc); 452 453 rc = t4_create_dma_tag(sc); 454 if (rc != 0) 455 goto done; /* error message displayed already */ 456 457 /* 458 * First pass over all the ports - allocate VIs and initialize some 459 * basic parameters like mac address, port type, etc. We also figure 460 * out whether a port is 10G or 1G and use that information when 461 * calculating how many interrupts to attempt to allocate. 462 */ 463 n10g = n1g = 0; 464 for_each_port(sc, i) { 465 struct port_info *pi; 466 467 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 468 sc->port[i] = pi; 469 470 /* These must be set before t4_port_init */ 471 pi->adapter = sc; 472 pi->port_id = i; 473 474 /* Allocate the vi and initialize parameters like mac addr */ 475 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0); 476 if (rc != 0) { 477 device_printf(dev, "unable to initialize port %d: %d\n", 478 i, rc); 479 free(pi, M_CXGBE); 480 sc->port[i] = NULL; /* indicates init failed */ 481 continue; 482 } 483 484 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 485 device_get_nameunit(dev), i); 486 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 487 488 if (is_10G_port(pi)) { 489 n10g++; 490 pi->tmr_idx = tmr_idx_10g; 491 pi->pktc_idx = pktc_idx_10g; 492 } else { 493 n1g++; 494 pi->tmr_idx = tmr_idx_1g; 495 pi->pktc_idx = pktc_idx_1g; 496 } 497 498 pi->xact_addr_filt = -1; 499 500 pi->qsize_rxq = max(qsize_rxq, 128); 501 while (pi->qsize_rxq & 7) 502 pi->qsize_rxq++; 503 pi->qsize_txq = max(qsize_txq, 128); 504 505 if (pi->qsize_rxq != qsize_rxq) { 506 device_printf(dev, 507 "using %d instead of %d as the rx queue size.\n", 508 pi->qsize_rxq, qsize_rxq); 509 } 510 if (pi->qsize_txq != qsize_txq) { 511 device_printf(dev, 512 "using %d instead of %d as the tx queue size.\n", 513 pi->qsize_txq, qsize_txq); 514 } 515 516 pi->dev = device_add_child(dev, "cxgbe", -1); 517 if (pi->dev == NULL) { 518 device_printf(dev, 519 "failed to add device for port %d.\n", i); 520 rc = ENXIO; 521 goto done; 522 } 523 device_set_softc(pi->dev, pi); 524 525 setbit(&sc->registered_device_map, i); 526 } 527 528 if (sc->registered_device_map == 0) { 529 device_printf(dev, "no usable ports\n"); 530 rc = ENXIO; 531 goto done; 532 } 533 534 /* 535 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 536 */ 537 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq); 538 if (rc != 0) 539 goto done; /* error message displayed already */ 540 541 sc->intr_type = iaq.intr_type; 542 sc->intr_count = iaq.nirq; 543 544 s = &sc->sge; 545 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g; 546 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g; 547 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 548 s->neq += NCHAN; /* control queues, 1 per hw channel */ 549 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 550 if (iaq.intr_fwd) { 551 sc->flags |= INTR_FWD; 552 s->niq += NFIQ(sc); /* forwarded interrupt queues */ 553 s->fiq = malloc(NFIQ(sc) * sizeof(struct sge_iq), M_CXGBE, 554 M_ZERO | M_WAITOK); 555 } 556 s->ctrlq = malloc(NCHAN * sizeof(struct sge_ctrlq), M_CXGBE, 557 M_ZERO | M_WAITOK); 558 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 559 M_ZERO | M_WAITOK); 560 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 561 M_ZERO | M_WAITOK); 562 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, 563 M_ZERO | M_WAITOK); 564 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, 565 M_ZERO | M_WAITOK); 566 567 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 568 M_ZERO | M_WAITOK); 569 570 t4_sysctls(sc); 571 572 /* 573 * Second pass over the ports. This time we know the number of rx and 574 * tx queues that each port should get. 575 */ 576 rqidx = tqidx = 0; 577 for_each_port(sc, i) { 578 struct port_info *pi = sc->port[i]; 579 580 if (pi == NULL) 581 continue; 582 583 pi->first_rxq = rqidx; 584 pi->nrxq = is_10G_port(pi) ? iaq.nrxq10g : iaq.nrxq1g; 585 586 pi->first_txq = tqidx; 587 pi->ntxq = is_10G_port(pi) ? iaq.ntxq10g : iaq.ntxq1g; 588 589 rqidx += pi->nrxq; 590 tqidx += pi->ntxq; 591 } 592 593 rc = bus_generic_attach(dev); 594 if (rc != 0) { 595 device_printf(dev, 596 "failed to attach all child ports: %d\n", rc); 597 goto done; 598 } 599 600 #ifdef INVARIANTS 601 device_printf(dev, 602 "%p, %d ports (0x%x), %d intr_type, %d intr_count\n", 603 sc, sc->params.nports, sc->params.portvec, 604 sc->intr_type, sc->intr_count); 605 #endif 606 t4_set_desc(sc); 607 608 done: 609 if (rc != 0) 610 t4_detach(dev); 611 612 return (rc); 613 } 614 615 /* 616 * Idempotent 617 */ 618 static int 619 t4_detach(device_t dev) 620 { 621 struct adapter *sc; 622 struct port_info *pi; 623 int i; 624 625 sc = device_get_softc(dev); 626 627 if (sc->cdev) 628 destroy_dev(sc->cdev); 629 630 bus_generic_detach(dev); 631 for (i = 0; i < MAX_NPORTS; i++) { 632 pi = sc->port[i]; 633 if (pi) { 634 t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid); 635 if (pi->dev) 636 device_delete_child(dev, pi->dev); 637 638 mtx_destroy(&pi->pi_lock); 639 free(pi, M_CXGBE); 640 } 641 } 642 643 if (sc->flags & FW_OK) 644 t4_fw_bye(sc, sc->mbox); 645 646 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 647 pci_release_msi(dev); 648 649 if (sc->regs_res) 650 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 651 sc->regs_res); 652 653 if (sc->msix_res) 654 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 655 sc->msix_res); 656 657 free(sc->irq, M_CXGBE); 658 free(sc->sge.rxq, M_CXGBE); 659 free(sc->sge.txq, M_CXGBE); 660 free(sc->sge.ctrlq, M_CXGBE); 661 free(sc->sge.fiq, M_CXGBE); 662 free(sc->sge.iqmap, M_CXGBE); 663 free(sc->sge.eqmap, M_CXGBE); 664 t4_destroy_dma_tag(sc); 665 mtx_destroy(&sc->sc_lock); 666 667 bzero(sc, sizeof(*sc)); 668 669 return (0); 670 } 671 672 673 static int 674 cxgbe_probe(device_t dev) 675 { 676 char buf[128]; 677 struct port_info *pi = device_get_softc(dev); 678 679 snprintf(buf, sizeof(buf), "Port %d", pi->port_id); 680 device_set_desc_copy(dev, buf); 681 682 return (BUS_PROBE_DEFAULT); 683 } 684 685 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 686 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 687 IFCAP_VLAN_HWTSO) 688 #define T4_CAP_ENABLE (T4_CAP & ~IFCAP_TSO6) 689 690 static int 691 cxgbe_attach(device_t dev) 692 { 693 struct port_info *pi = device_get_softc(dev); 694 struct ifnet *ifp; 695 696 /* Allocate an ifnet and set it up */ 697 ifp = if_alloc(IFT_ETHER); 698 if (ifp == NULL) { 699 device_printf(dev, "Cannot allocate ifnet\n"); 700 return (ENOMEM); 701 } 702 pi->ifp = ifp; 703 ifp->if_softc = pi; 704 705 callout_init(&pi->tick, CALLOUT_MPSAFE); 706 pi->tq = taskqueue_create("cxgbe_taskq", M_NOWAIT, 707 taskqueue_thread_enqueue, &pi->tq); 708 if (pi->tq == NULL) { 709 device_printf(dev, "failed to allocate port task queue\n"); 710 if_free(pi->ifp); 711 return (ENOMEM); 712 } 713 taskqueue_start_threads(&pi->tq, 1, PI_NET, "%s taskq", 714 device_get_nameunit(dev)); 715 716 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 717 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 718 719 ifp->if_init = cxgbe_init; 720 ifp->if_ioctl = cxgbe_ioctl; 721 ifp->if_start = cxgbe_start; 722 ifp->if_transmit = cxgbe_transmit; 723 ifp->if_qflush = cxgbe_qflush; 724 725 ifp->if_snd.ifq_drv_maxlen = 1024; 726 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 727 IFQ_SET_READY(&ifp->if_snd); 728 729 ifp->if_capabilities = T4_CAP; 730 ifp->if_capenable = T4_CAP_ENABLE; 731 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO; 732 733 /* Initialize ifmedia for this port */ 734 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change, 735 cxgbe_media_status); 736 build_medialist(pi); 737 738 ether_ifattach(ifp, pi->hw_addr); 739 740 #ifdef INVARIANTS 741 device_printf(dev, "%p, %d txq, %d rxq\n", pi, pi->ntxq, pi->nrxq); 742 #endif 743 744 cxgbe_sysctls(pi); 745 746 return (0); 747 } 748 749 static int 750 cxgbe_detach(device_t dev) 751 { 752 struct port_info *pi = device_get_softc(dev); 753 struct adapter *sc = pi->adapter; 754 int rc; 755 756 /* Tell if_ioctl and if_init that the port is going away */ 757 ADAPTER_LOCK(sc); 758 SET_DOOMED(pi); 759 wakeup(&sc->flags); 760 while (IS_BUSY(sc)) 761 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 762 SET_BUSY(sc); 763 ADAPTER_UNLOCK(sc); 764 765 rc = cxgbe_uninit_synchronized(pi); 766 if (rc != 0) 767 device_printf(dev, "port uninit failed: %d.\n", rc); 768 769 taskqueue_free(pi->tq); 770 771 ifmedia_removeall(&pi->media); 772 ether_ifdetach(pi->ifp); 773 if_free(pi->ifp); 774 775 ADAPTER_LOCK(sc); 776 CLR_BUSY(sc); 777 wakeup_one(&sc->flags); 778 ADAPTER_UNLOCK(sc); 779 780 return (0); 781 } 782 783 static void 784 cxgbe_init(void *arg) 785 { 786 struct port_info *pi = arg; 787 struct adapter *sc = pi->adapter; 788 789 ADAPTER_LOCK(sc); 790 cxgbe_init_locked(pi); /* releases adapter lock */ 791 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 792 } 793 794 static int 795 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 796 { 797 int rc = 0, mtu, flags; 798 struct port_info *pi = ifp->if_softc; 799 struct adapter *sc = pi->adapter; 800 struct ifreq *ifr = (struct ifreq *)data; 801 uint32_t mask; 802 803 switch (cmd) { 804 case SIOCSIFMTU: 805 ADAPTER_LOCK(sc); 806 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 807 if (rc) { 808 fail: 809 ADAPTER_UNLOCK(sc); 810 return (rc); 811 } 812 813 mtu = ifr->ifr_mtu; 814 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) { 815 rc = EINVAL; 816 } else { 817 ifp->if_mtu = mtu; 818 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 819 t4_update_fl_bufsize(ifp); 820 PORT_LOCK(pi); 821 rc = update_mac_settings(pi, XGMAC_MTU); 822 PORT_UNLOCK(pi); 823 } 824 } 825 ADAPTER_UNLOCK(sc); 826 break; 827 828 case SIOCSIFFLAGS: 829 ADAPTER_LOCK(sc); 830 if (IS_DOOMED(pi)) { 831 rc = ENXIO; 832 goto fail; 833 } 834 if (ifp->if_flags & IFF_UP) { 835 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 836 flags = pi->if_flags; 837 if ((ifp->if_flags ^ flags) & 838 (IFF_PROMISC | IFF_ALLMULTI)) { 839 if (IS_BUSY(sc)) { 840 rc = EBUSY; 841 goto fail; 842 } 843 PORT_LOCK(pi); 844 rc = update_mac_settings(pi, 845 XGMAC_PROMISC | XGMAC_ALLMULTI); 846 PORT_UNLOCK(pi); 847 } 848 ADAPTER_UNLOCK(sc); 849 } else 850 rc = cxgbe_init_locked(pi); 851 pi->if_flags = ifp->if_flags; 852 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) 853 rc = cxgbe_uninit_locked(pi); 854 else 855 ADAPTER_UNLOCK(sc); 856 857 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 858 break; 859 860 case SIOCADDMULTI: 861 case SIOCDELMULTI: /* these two can be called with a mutex held :-( */ 862 ADAPTER_LOCK(sc); 863 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 864 if (rc) 865 goto fail; 866 867 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 868 PORT_LOCK(pi); 869 rc = update_mac_settings(pi, XGMAC_MCADDRS); 870 PORT_UNLOCK(pi); 871 } 872 ADAPTER_UNLOCK(sc); 873 break; 874 875 case SIOCSIFCAP: 876 ADAPTER_LOCK(sc); 877 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 878 if (rc) 879 goto fail; 880 881 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 882 if (mask & IFCAP_TXCSUM) { 883 ifp->if_capenable ^= IFCAP_TXCSUM; 884 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 885 886 if (IFCAP_TSO & ifp->if_capenable && 887 !(IFCAP_TXCSUM & ifp->if_capenable)) { 888 ifp->if_capenable &= ~IFCAP_TSO; 889 ifp->if_hwassist &= ~CSUM_TSO; 890 if_printf(ifp, 891 "tso disabled due to -txcsum.\n"); 892 } 893 } 894 if (mask & IFCAP_RXCSUM) 895 ifp->if_capenable ^= IFCAP_RXCSUM; 896 if (mask & IFCAP_TSO4) { 897 ifp->if_capenable ^= IFCAP_TSO4; 898 899 if (IFCAP_TSO & ifp->if_capenable) { 900 if (IFCAP_TXCSUM & ifp->if_capenable) 901 ifp->if_hwassist |= CSUM_TSO; 902 else { 903 ifp->if_capenable &= ~IFCAP_TSO; 904 ifp->if_hwassist &= ~CSUM_TSO; 905 if_printf(ifp, 906 "enable txcsum first.\n"); 907 rc = EAGAIN; 908 } 909 } else 910 ifp->if_hwassist &= ~CSUM_TSO; 911 } 912 if (mask & IFCAP_LRO) { 913 #ifdef INET 914 int i; 915 struct sge_rxq *rxq; 916 917 ifp->if_capenable ^= IFCAP_LRO; 918 for_each_rxq(pi, i, rxq) { 919 if (ifp->if_capenable & IFCAP_LRO) 920 rxq->flags |= RXQ_LRO_ENABLED; 921 else 922 rxq->flags &= ~RXQ_LRO_ENABLED; 923 } 924 #endif 925 } 926 #ifndef TCP_OFFLOAD_DISABLE 927 if (mask & IFCAP_TOE4) { 928 rc = EOPNOTSUPP; 929 } 930 #endif 931 if (mask & IFCAP_VLAN_HWTAGGING) { 932 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 933 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 934 PORT_LOCK(pi); 935 rc = update_mac_settings(pi, XGMAC_VLANEX); 936 PORT_UNLOCK(pi); 937 } 938 } 939 if (mask & IFCAP_VLAN_MTU) { 940 ifp->if_capenable ^= IFCAP_VLAN_MTU; 941 942 /* Need to find out how to disable auto-mtu-inflation */ 943 } 944 if (mask & IFCAP_VLAN_HWTSO) 945 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 946 if (mask & IFCAP_VLAN_HWCSUM) 947 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 948 949 #ifdef VLAN_CAPABILITIES 950 VLAN_CAPABILITIES(ifp); 951 #endif 952 ADAPTER_UNLOCK(sc); 953 break; 954 955 case SIOCSIFMEDIA: 956 case SIOCGIFMEDIA: 957 ifmedia_ioctl(ifp, ifr, &pi->media, cmd); 958 break; 959 960 default: 961 rc = ether_ioctl(ifp, cmd, data); 962 } 963 964 return (rc); 965 } 966 967 static void 968 cxgbe_start(struct ifnet *ifp) 969 { 970 struct port_info *pi = ifp->if_softc; 971 struct sge_txq *txq; 972 int i; 973 974 for_each_txq(pi, i, txq) { 975 if (TXQ_TRYLOCK(txq)) { 976 txq_start(ifp, txq); 977 TXQ_UNLOCK(txq); 978 } 979 } 980 } 981 982 static int 983 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 984 { 985 struct port_info *pi = ifp->if_softc; 986 struct adapter *sc = pi->adapter; 987 struct sge_txq *txq = &sc->sge.txq[pi->first_txq]; 988 struct buf_ring *br; 989 int rc; 990 991 M_ASSERTPKTHDR(m); 992 993 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 994 m_freem(m); 995 return (0); 996 } 997 998 if (m->m_flags & M_FLOWID) 999 txq += (m->m_pkthdr.flowid % pi->ntxq); 1000 br = txq->br; 1001 1002 if (TXQ_TRYLOCK(txq) == 0) { 1003 /* 1004 * XXX: make sure that this packet really is sent out. There is 1005 * a small race where t4_eth_tx may stop draining the drbr and 1006 * goes away, just before we enqueued this mbuf. 1007 */ 1008 1009 return (drbr_enqueue(ifp, br, m)); 1010 } 1011 1012 /* 1013 * txq->m is the mbuf that is held up due to a temporary shortage of 1014 * resources and it should be put on the wire first. Then what's in 1015 * drbr and finally the mbuf that was just passed in to us. 1016 * 1017 * Return code should indicate the fate of the mbuf that was passed in 1018 * this time. 1019 */ 1020 1021 TXQ_LOCK_ASSERT_OWNED(txq); 1022 if (drbr_needs_enqueue(ifp, br) || txq->m) { 1023 1024 /* Queued for transmission. */ 1025 1026 rc = drbr_enqueue(ifp, br, m); 1027 m = txq->m ? txq->m : drbr_dequeue(ifp, br); 1028 (void) t4_eth_tx(ifp, txq, m); 1029 TXQ_UNLOCK(txq); 1030 return (rc); 1031 } 1032 1033 /* Direct transmission. */ 1034 rc = t4_eth_tx(ifp, txq, m); 1035 if (rc != 0 && txq->m) 1036 rc = 0; /* held, will be transmitted soon (hopefully) */ 1037 1038 TXQ_UNLOCK(txq); 1039 return (rc); 1040 } 1041 1042 static void 1043 cxgbe_qflush(struct ifnet *ifp) 1044 { 1045 struct port_info *pi = ifp->if_softc; 1046 struct sge_txq *txq; 1047 int i; 1048 struct mbuf *m; 1049 1050 /* queues do not exist if !IFF_DRV_RUNNING. */ 1051 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1052 for_each_txq(pi, i, txq) { 1053 TXQ_LOCK(txq); 1054 m_freem(txq->m); 1055 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL) 1056 m_freem(m); 1057 TXQ_UNLOCK(txq); 1058 } 1059 } 1060 if_qflush(ifp); 1061 } 1062 1063 static int 1064 cxgbe_media_change(struct ifnet *ifp) 1065 { 1066 struct port_info *pi = ifp->if_softc; 1067 1068 device_printf(pi->dev, "%s unimplemented.\n", __func__); 1069 1070 return (EOPNOTSUPP); 1071 } 1072 1073 static void 1074 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1075 { 1076 struct port_info *pi = ifp->if_softc; 1077 struct ifmedia_entry *cur = pi->media.ifm_cur; 1078 int speed = pi->link_cfg.speed; 1079 int data = (pi->port_type << 8) | pi->mod_type; 1080 1081 if (cur->ifm_data != data) { 1082 build_medialist(pi); 1083 cur = pi->media.ifm_cur; 1084 } 1085 1086 ifmr->ifm_status = IFM_AVALID; 1087 if (!pi->link_cfg.link_ok) 1088 return; 1089 1090 ifmr->ifm_status |= IFM_ACTIVE; 1091 1092 /* active and current will differ iff current media is autoselect. */ 1093 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 1094 return; 1095 1096 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 1097 if (speed == SPEED_10000) 1098 ifmr->ifm_active |= IFM_10G_T; 1099 else if (speed == SPEED_1000) 1100 ifmr->ifm_active |= IFM_1000_T; 1101 else if (speed == SPEED_100) 1102 ifmr->ifm_active |= IFM_100_TX; 1103 else if (speed == SPEED_10) 1104 ifmr->ifm_active |= IFM_10_T; 1105 else 1106 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, 1107 speed)); 1108 } 1109 1110 void 1111 t4_fatal_err(struct adapter *sc) 1112 { 1113 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 1114 t4_intr_disable(sc); 1115 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n", 1116 device_get_nameunit(sc->dev)); 1117 } 1118 1119 static int 1120 map_bars(struct adapter *sc) 1121 { 1122 sc->regs_rid = PCIR_BAR(0); 1123 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1124 &sc->regs_rid, RF_ACTIVE); 1125 if (sc->regs_res == NULL) { 1126 device_printf(sc->dev, "cannot map registers.\n"); 1127 return (ENXIO); 1128 } 1129 sc->bt = rman_get_bustag(sc->regs_res); 1130 sc->bh = rman_get_bushandle(sc->regs_res); 1131 sc->mmio_len = rman_get_size(sc->regs_res); 1132 1133 sc->msix_rid = PCIR_BAR(4); 1134 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1135 &sc->msix_rid, RF_ACTIVE); 1136 if (sc->msix_res == NULL) { 1137 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 1138 return (ENXIO); 1139 } 1140 1141 return (0); 1142 } 1143 1144 static void 1145 setup_memwin(struct adapter *sc) 1146 { 1147 u_long bar0; 1148 1149 bar0 = rman_get_start(sc->regs_res); 1150 1151 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0), 1152 (bar0 + MEMWIN0_BASE) | V_BIR(0) | 1153 V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); 1154 1155 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1), 1156 (bar0 + MEMWIN1_BASE) | V_BIR(0) | 1157 V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); 1158 1159 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2), 1160 (bar0 + MEMWIN2_BASE) | V_BIR(0) | 1161 V_WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); 1162 } 1163 1164 static int 1165 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, 1166 struct intrs_and_queues *iaq) 1167 { 1168 int rc, itype, navail, nc, nrxq10g, nrxq1g; 1169 1170 bzero(iaq, sizeof(*iaq)); 1171 nc = mp_ncpus; /* our snapshot of the number of CPUs */ 1172 1173 for (itype = INTR_MSIX; itype; itype >>= 1) { 1174 1175 if ((itype & intr_types) == 0) 1176 continue; /* not allowed */ 1177 1178 if (itype == INTR_MSIX) 1179 navail = pci_msix_count(sc->dev); 1180 else if (itype == INTR_MSI) 1181 navail = pci_msi_count(sc->dev); 1182 else 1183 navail = 1; 1184 1185 if (navail == 0) 1186 continue; 1187 1188 iaq->intr_type = itype; 1189 1190 iaq->ntxq10g = min(nc, max_ntxq_10g); 1191 iaq->ntxq1g = min(nc, max_ntxq_1g); 1192 1193 nrxq10g = min(nc, max_nrxq_10g); 1194 nrxq1g = min(nc, max_nrxq_1g); 1195 1196 /* Extra 2 is for a) error interrupt b) firmware event */ 1197 iaq->nirq = n10g * nrxq10g + n1g * nrxq1g + 2; 1198 if (iaq->nirq <= navail && intr_fwd == 0) { 1199 1200 if (itype == INTR_MSI && !powerof2(iaq->nirq)) 1201 goto fwd; 1202 1203 /* One for err, one for fwq, and one for each rxq */ 1204 1205 iaq->intr_fwd = 0; 1206 iaq->nrxq10g = nrxq10g; 1207 iaq->nrxq1g = nrxq1g; 1208 1209 } else { 1210 fwd: 1211 iaq->intr_fwd = 1; 1212 1213 if (navail > nc) { 1214 if (itype == INTR_MSIX) 1215 navail = nc + 1; 1216 1217 /* navail is and must remain a pow2 for MSI */ 1218 if (itype == INTR_MSI) { 1219 KASSERT(powerof2(navail), 1220 ("%d not power of 2", navail)); 1221 1222 while (navail / 2 > nc) 1223 navail /= 2; 1224 } 1225 } 1226 iaq->nirq = navail; /* total # of interrupts */ 1227 1228 /* 1229 * If we have multiple vectors available reserve one 1230 * exclusively for errors. The rest will be shared by 1231 * the fwq and data. 1232 */ 1233 if (navail > 1) 1234 navail--; 1235 iaq->nrxq10g = min(nrxq10g, navail); 1236 iaq->nrxq1g = min(nrxq1g, navail); 1237 } 1238 1239 navail = iaq->nirq; 1240 rc = 0; 1241 if (itype == INTR_MSIX) 1242 rc = pci_alloc_msix(sc->dev, &navail); 1243 else if (itype == INTR_MSI) 1244 rc = pci_alloc_msi(sc->dev, &navail); 1245 1246 if (rc == 0) { 1247 if (navail == iaq->nirq) 1248 return (0); 1249 1250 /* 1251 * Didn't get the number requested. Use whatever number 1252 * the kernel is willing to allocate (it's in navail). 1253 */ 1254 pci_release_msi(sc->dev); 1255 goto fwd; 1256 } 1257 1258 device_printf(sc->dev, 1259 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 1260 itype, rc, iaq->nirq, navail); 1261 } 1262 1263 device_printf(sc->dev, 1264 "failed to find a usable interrupt type. " 1265 "allowed=%d, msi-x=%d, msi=%d, intx=1", intr_types, 1266 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 1267 1268 return (ENXIO); 1269 } 1270 1271 /* 1272 * Install a compatible firmware (if required), establish contact with it, 1273 * become the master, and reset the device. 1274 */ 1275 static int 1276 prep_firmware(struct adapter *sc) 1277 { 1278 const struct firmware *fw; 1279 int rc; 1280 enum dev_state state; 1281 1282 /* Check firmware version and install a different one if necessary */ 1283 rc = t4_check_fw_version(sc); 1284 if (rc != 0 || force_firmware_install) { 1285 uint32_t v = 0; 1286 1287 fw = firmware_get(T4_FWNAME); 1288 if (fw != NULL) { 1289 const struct fw_hdr *hdr = (const void *)fw->data; 1290 1291 v = ntohl(hdr->fw_ver); 1292 1293 /* 1294 * The firmware module will not be used if it isn't the 1295 * same major version as what the driver was compiled 1296 * with. This check trumps force_firmware_install. 1297 */ 1298 if (G_FW_HDR_FW_VER_MAJOR(v) != FW_VERSION_MAJOR) { 1299 device_printf(sc->dev, 1300 "Found firmware image but version %d " 1301 "can not be used with this driver (%d)\n", 1302 G_FW_HDR_FW_VER_MAJOR(v), FW_VERSION_MAJOR); 1303 1304 firmware_put(fw, FIRMWARE_UNLOAD); 1305 fw = NULL; 1306 } 1307 } 1308 1309 if (fw == NULL && (rc < 0 || force_firmware_install)) { 1310 device_printf(sc->dev, "No usable firmware. " 1311 "card has %d.%d.%d, driver compiled with %d.%d.%d, " 1312 "force_firmware_install%s set", 1313 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 1314 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 1315 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 1316 FW_VERSION_MAJOR, FW_VERSION_MINOR, 1317 FW_VERSION_MICRO, 1318 force_firmware_install ? "" : " not"); 1319 return (EAGAIN); 1320 } 1321 1322 /* 1323 * Always upgrade, even for minor/micro/build mismatches. 1324 * Downgrade only for a major version mismatch or if 1325 * force_firmware_install was specified. 1326 */ 1327 if (fw != NULL && (rc < 0 || force_firmware_install || 1328 v > sc->params.fw_vers)) { 1329 device_printf(sc->dev, 1330 "installing firmware %d.%d.%d.%d on card.\n", 1331 G_FW_HDR_FW_VER_MAJOR(v), G_FW_HDR_FW_VER_MINOR(v), 1332 G_FW_HDR_FW_VER_MICRO(v), G_FW_HDR_FW_VER_BUILD(v)); 1333 1334 rc = -t4_load_fw(sc, fw->data, fw->datasize); 1335 if (rc != 0) { 1336 device_printf(sc->dev, 1337 "failed to install firmware: %d\n", rc); 1338 firmware_put(fw, FIRMWARE_UNLOAD); 1339 return (rc); 1340 } else { 1341 /* refresh */ 1342 (void) t4_check_fw_version(sc); 1343 } 1344 } 1345 1346 if (fw != NULL) 1347 firmware_put(fw, FIRMWARE_UNLOAD); 1348 } 1349 1350 /* Contact firmware, request master */ 1351 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state); 1352 if (rc < 0) { 1353 rc = -rc; 1354 device_printf(sc->dev, 1355 "failed to connect to the firmware: %d.\n", rc); 1356 return (rc); 1357 } 1358 1359 /* Reset device */ 1360 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST); 1361 if (rc != 0) { 1362 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 1363 if (rc != ETIMEDOUT && rc != EIO) 1364 t4_fw_bye(sc, sc->mbox); 1365 return (rc); 1366 } 1367 1368 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 1369 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 1370 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 1371 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 1372 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 1373 sc->flags |= FW_OK; 1374 1375 return (0); 1376 } 1377 1378 static int 1379 get_capabilities(struct adapter *sc, struct fw_caps_config_cmd *caps) 1380 { 1381 int rc; 1382 1383 bzero(caps, sizeof(*caps)); 1384 caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1385 F_FW_CMD_REQUEST | F_FW_CMD_READ); 1386 caps->retval_len16 = htobe32(FW_LEN16(*caps)); 1387 1388 rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), caps); 1389 if (rc != 0) 1390 return (rc); 1391 1392 if (caps->niccaps & htobe16(FW_CAPS_CONFIG_NIC_VM)) 1393 caps->niccaps ^= htobe16(FW_CAPS_CONFIG_NIC_VM); 1394 1395 caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1396 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 1397 rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), NULL); 1398 1399 return (rc); 1400 } 1401 1402 static int 1403 get_params(struct adapter *sc, struct fw_caps_config_cmd *caps) 1404 { 1405 int rc; 1406 uint32_t params[7], val[7]; 1407 1408 #define FW_PARAM_DEV(param) \ 1409 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 1410 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 1411 #define FW_PARAM_PFVF(param) \ 1412 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 1413 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 1414 1415 params[0] = FW_PARAM_DEV(PORTVEC); 1416 params[1] = FW_PARAM_PFVF(IQFLINT_START); 1417 params[2] = FW_PARAM_PFVF(EQ_START); 1418 params[3] = FW_PARAM_PFVF(FILTER_START); 1419 params[4] = FW_PARAM_PFVF(FILTER_END); 1420 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 5, params, val); 1421 if (rc != 0) { 1422 device_printf(sc->dev, 1423 "failed to query parameters: %d.\n", rc); 1424 goto done; 1425 } 1426 1427 sc->params.portvec = val[0]; 1428 sc->params.nports = 0; 1429 while (val[0]) { 1430 sc->params.nports++; 1431 val[0] &= val[0] - 1; 1432 } 1433 1434 sc->sge.iq_start = val[1]; 1435 sc->sge.eq_start = val[2]; 1436 sc->tids.ftid_base = val[3]; 1437 sc->tids.nftids = val[4] - val[3] + 1; 1438 1439 if (caps->toecaps) { 1440 /* query offload-related parameters */ 1441 params[0] = FW_PARAM_DEV(NTID); 1442 params[1] = FW_PARAM_PFVF(SERVER_START); 1443 params[2] = FW_PARAM_PFVF(SERVER_END); 1444 params[3] = FW_PARAM_PFVF(TDDP_START); 1445 params[4] = FW_PARAM_PFVF(TDDP_END); 1446 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 1447 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val); 1448 if (rc != 0) { 1449 device_printf(sc->dev, 1450 "failed to query TOE parameters: %d.\n", rc); 1451 goto done; 1452 } 1453 sc->tids.ntids = val[0]; 1454 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 1455 sc->tids.stid_base = val[1]; 1456 sc->tids.nstids = val[2] - val[1] + 1; 1457 sc->vres.ddp.start = val[3]; 1458 sc->vres.ddp.size = val[4] - val[3] + 1; 1459 sc->params.ofldq_wr_cred = val[5]; 1460 sc->params.offload = 1; 1461 } 1462 if (caps->rdmacaps) { 1463 params[0] = FW_PARAM_PFVF(STAG_START); 1464 params[1] = FW_PARAM_PFVF(STAG_END); 1465 params[2] = FW_PARAM_PFVF(RQ_START); 1466 params[3] = FW_PARAM_PFVF(RQ_END); 1467 params[4] = FW_PARAM_PFVF(PBL_START); 1468 params[5] = FW_PARAM_PFVF(PBL_END); 1469 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val); 1470 if (rc != 0) { 1471 device_printf(sc->dev, 1472 "failed to query RDMA parameters: %d.\n", rc); 1473 goto done; 1474 } 1475 sc->vres.stag.start = val[0]; 1476 sc->vres.stag.size = val[1] - val[0] + 1; 1477 sc->vres.rq.start = val[2]; 1478 sc->vres.rq.size = val[3] - val[2] + 1; 1479 sc->vres.pbl.start = val[4]; 1480 sc->vres.pbl.size = val[5] - val[4] + 1; 1481 } 1482 if (caps->iscsicaps) { 1483 params[0] = FW_PARAM_PFVF(ISCSI_START); 1484 params[1] = FW_PARAM_PFVF(ISCSI_END); 1485 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, params, val); 1486 if (rc != 0) { 1487 device_printf(sc->dev, 1488 "failed to query iSCSI parameters: %d.\n", rc); 1489 goto done; 1490 } 1491 sc->vres.iscsi.start = val[0]; 1492 sc->vres.iscsi.size = val[1] - val[0] + 1; 1493 } 1494 #undef FW_PARAM_PFVF 1495 #undef FW_PARAM_DEV 1496 1497 done: 1498 return (rc); 1499 } 1500 1501 static void 1502 t4_set_desc(struct adapter *sc) 1503 { 1504 char buf[128]; 1505 struct adapter_params *p = &sc->params; 1506 1507 snprintf(buf, sizeof(buf), 1508 "Chelsio %s (rev %d) %d port %sNIC PCIe-x%d %d %s, S/N:%s, E/C:%s", 1509 p->vpd.id, p->rev, p->nports, is_offload(sc) ? "R" : "", 1510 p->pci.width, sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : 1511 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), p->vpd.sn, p->vpd.ec); 1512 1513 device_set_desc_copy(sc->dev, buf); 1514 } 1515 1516 static void 1517 build_medialist(struct port_info *pi) 1518 { 1519 struct ifmedia *media = &pi->media; 1520 int data, m; 1521 1522 PORT_LOCK(pi); 1523 1524 ifmedia_removeall(media); 1525 1526 m = IFM_ETHER | IFM_FDX; 1527 data = (pi->port_type << 8) | pi->mod_type; 1528 1529 switch(pi->port_type) { 1530 case FW_PORT_TYPE_BT_XFI: 1531 ifmedia_add(media, m | IFM_10G_T, data, NULL); 1532 break; 1533 1534 case FW_PORT_TYPE_BT_XAUI: 1535 ifmedia_add(media, m | IFM_10G_T, data, NULL); 1536 /* fall through */ 1537 1538 case FW_PORT_TYPE_BT_SGMII: 1539 ifmedia_add(media, m | IFM_1000_T, data, NULL); 1540 ifmedia_add(media, m | IFM_100_TX, data, NULL); 1541 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL); 1542 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 1543 break; 1544 1545 case FW_PORT_TYPE_CX4: 1546 ifmedia_add(media, m | IFM_10G_CX4, data, NULL); 1547 ifmedia_set(media, m | IFM_10G_CX4); 1548 break; 1549 1550 case FW_PORT_TYPE_SFP: 1551 case FW_PORT_TYPE_FIBER_XFI: 1552 case FW_PORT_TYPE_FIBER_XAUI: 1553 switch (pi->mod_type) { 1554 1555 case FW_PORT_MOD_TYPE_LR: 1556 ifmedia_add(media, m | IFM_10G_LR, data, NULL); 1557 ifmedia_set(media, m | IFM_10G_LR); 1558 break; 1559 1560 case FW_PORT_MOD_TYPE_SR: 1561 ifmedia_add(media, m | IFM_10G_SR, data, NULL); 1562 ifmedia_set(media, m | IFM_10G_SR); 1563 break; 1564 1565 case FW_PORT_MOD_TYPE_LRM: 1566 ifmedia_add(media, m | IFM_10G_LRM, data, NULL); 1567 ifmedia_set(media, m | IFM_10G_LRM); 1568 break; 1569 1570 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 1571 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 1572 ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL); 1573 ifmedia_set(media, m | IFM_10G_TWINAX); 1574 break; 1575 1576 case FW_PORT_MOD_TYPE_NONE: 1577 m &= ~IFM_FDX; 1578 ifmedia_add(media, m | IFM_NONE, data, NULL); 1579 ifmedia_set(media, m | IFM_NONE); 1580 break; 1581 1582 case FW_PORT_MOD_TYPE_NA: 1583 case FW_PORT_MOD_TYPE_ER: 1584 default: 1585 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL); 1586 ifmedia_set(media, m | IFM_UNKNOWN); 1587 break; 1588 } 1589 break; 1590 1591 case FW_PORT_TYPE_KX4: 1592 case FW_PORT_TYPE_KX: 1593 case FW_PORT_TYPE_KR: 1594 default: 1595 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL); 1596 ifmedia_set(media, m | IFM_UNKNOWN); 1597 break; 1598 } 1599 1600 PORT_UNLOCK(pi); 1601 } 1602 1603 /* 1604 * Program the port's XGMAC based on parameters in ifnet. The caller also 1605 * indicates which parameters should be programmed (the rest are left alone). 1606 */ 1607 static int 1608 update_mac_settings(struct port_info *pi, int flags) 1609 { 1610 int rc; 1611 struct ifnet *ifp = pi->ifp; 1612 struct adapter *sc = pi->adapter; 1613 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 1614 1615 PORT_LOCK_ASSERT_OWNED(pi); 1616 KASSERT(flags, ("%s: not told what to update.", __func__)); 1617 1618 if (flags & XGMAC_MTU) 1619 mtu = ifp->if_mtu; 1620 1621 if (flags & XGMAC_PROMISC) 1622 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 1623 1624 if (flags & XGMAC_ALLMULTI) 1625 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 1626 1627 if (flags & XGMAC_VLANEX) 1628 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 1629 1630 rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1, 1631 vlanex, false); 1632 if (rc) { 1633 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc); 1634 return (rc); 1635 } 1636 1637 if (flags & XGMAC_UCADDR) { 1638 uint8_t ucaddr[ETHER_ADDR_LEN]; 1639 1640 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 1641 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt, 1642 ucaddr, true, true); 1643 if (rc < 0) { 1644 rc = -rc; 1645 if_printf(ifp, "change_mac failed: %d\n", rc); 1646 return (rc); 1647 } else { 1648 pi->xact_addr_filt = rc; 1649 rc = 0; 1650 } 1651 } 1652 1653 if (flags & XGMAC_MCADDRS) { 1654 const uint8_t *mcaddr; 1655 int del = 1; 1656 uint64_t hash = 0; 1657 struct ifmultiaddr *ifma; 1658 1659 if_maddr_rlock(ifp); 1660 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1661 if (ifma->ifma_addr->sa_family != AF_LINK) 1662 continue; 1663 mcaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 1664 1665 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid, del, 1, 1666 &mcaddr, NULL, &hash, 0); 1667 if (rc < 0) { 1668 rc = -rc; 1669 if_printf(ifp, "failed to add mc address" 1670 " %02x:%02x:%02x:%02x:%02x:%02x rc=%d\n", 1671 mcaddr[0], mcaddr[1], mcaddr[2], mcaddr[3], 1672 mcaddr[4], mcaddr[5], rc); 1673 goto mcfail; 1674 } 1675 del = 0; 1676 } 1677 1678 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0); 1679 if (rc != 0) 1680 if_printf(ifp, "failed to set mc address hash: %d", rc); 1681 mcfail: 1682 if_maddr_runlock(ifp); 1683 } 1684 1685 return (rc); 1686 } 1687 1688 static int 1689 cxgbe_init_locked(struct port_info *pi) 1690 { 1691 struct adapter *sc = pi->adapter; 1692 int rc = 0; 1693 1694 ADAPTER_LOCK_ASSERT_OWNED(sc); 1695 1696 while (!IS_DOOMED(pi) && IS_BUSY(sc)) { 1697 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4init", 0)) { 1698 rc = EINTR; 1699 goto done; 1700 } 1701 } 1702 if (IS_DOOMED(pi)) { 1703 rc = ENXIO; 1704 goto done; 1705 } 1706 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 1707 1708 /* Give up the adapter lock, port init code can sleep. */ 1709 SET_BUSY(sc); 1710 ADAPTER_UNLOCK(sc); 1711 1712 rc = cxgbe_init_synchronized(pi); 1713 1714 done: 1715 ADAPTER_LOCK(sc); 1716 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 1717 CLR_BUSY(sc); 1718 wakeup_one(&sc->flags); 1719 ADAPTER_UNLOCK(sc); 1720 return (rc); 1721 } 1722 1723 static int 1724 cxgbe_init_synchronized(struct port_info *pi) 1725 { 1726 struct adapter *sc = pi->adapter; 1727 struct ifnet *ifp = pi->ifp; 1728 int rc = 0, i; 1729 uint16_t *rss; 1730 struct sge_rxq *rxq; 1731 1732 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1733 1734 if (isset(&sc->open_device_map, pi->port_id)) { 1735 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, 1736 ("mismatch between open_device_map and if_drv_flags")); 1737 return (0); /* already running */ 1738 } 1739 1740 if (sc->open_device_map == 0 && ((rc = first_port_up(sc)) != 0)) 1741 return (rc); /* error message displayed already */ 1742 1743 /* 1744 * Allocate tx/rx/fl queues for this port. 1745 */ 1746 rc = t4_setup_eth_queues(pi); 1747 if (rc != 0) 1748 goto done; /* error message displayed already */ 1749 1750 /* 1751 * Setup RSS for this port. 1752 */ 1753 rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); 1754 for_each_rxq(pi, i, rxq) { 1755 rss[i] = rxq->iq.abs_id; 1756 } 1757 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss, 1758 pi->nrxq); 1759 free(rss, M_CXGBE); 1760 if (rc != 0) { 1761 if_printf(ifp, "rss_config failed: %d\n", rc); 1762 goto done; 1763 } 1764 1765 PORT_LOCK(pi); 1766 rc = update_mac_settings(pi, XGMAC_ALL); 1767 PORT_UNLOCK(pi); 1768 if (rc) 1769 goto done; /* error message displayed already */ 1770 1771 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg); 1772 if (rc != 0) { 1773 if_printf(ifp, "start_link failed: %d\n", rc); 1774 goto done; 1775 } 1776 1777 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true); 1778 if (rc != 0) { 1779 if_printf(ifp, "enable_vi failed: %d\n", rc); 1780 goto done; 1781 } 1782 pi->flags |= VI_ENABLED; 1783 1784 /* all ok */ 1785 setbit(&sc->open_device_map, pi->port_id); 1786 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1787 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1788 1789 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 1790 done: 1791 if (rc != 0) 1792 cxgbe_uninit_synchronized(pi); 1793 1794 return (rc); 1795 } 1796 1797 static int 1798 cxgbe_uninit_locked(struct port_info *pi) 1799 { 1800 struct adapter *sc = pi->adapter; 1801 int rc; 1802 1803 ADAPTER_LOCK_ASSERT_OWNED(sc); 1804 1805 while (!IS_DOOMED(pi) && IS_BUSY(sc)) { 1806 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4uninit", 0)) { 1807 rc = EINTR; 1808 goto done; 1809 } 1810 } 1811 if (IS_DOOMED(pi)) { 1812 rc = ENXIO; 1813 goto done; 1814 } 1815 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 1816 SET_BUSY(sc); 1817 ADAPTER_UNLOCK(sc); 1818 1819 rc = cxgbe_uninit_synchronized(pi); 1820 1821 ADAPTER_LOCK(sc); 1822 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 1823 CLR_BUSY(sc); 1824 wakeup_one(&sc->flags); 1825 done: 1826 ADAPTER_UNLOCK(sc); 1827 return (rc); 1828 } 1829 1830 /* 1831 * Idempotent. 1832 */ 1833 static int 1834 cxgbe_uninit_synchronized(struct port_info *pi) 1835 { 1836 struct adapter *sc = pi->adapter; 1837 struct ifnet *ifp = pi->ifp; 1838 int rc; 1839 1840 /* 1841 * taskqueue_drain may cause a deadlock if the adapter lock is held. 1842 */ 1843 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1844 1845 /* 1846 * Clear this port's bit from the open device map, and then drain 1847 * tasks and callouts. 1848 */ 1849 clrbit(&sc->open_device_map, pi->port_id); 1850 1851 PORT_LOCK(pi); 1852 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1853 callout_stop(&pi->tick); 1854 PORT_UNLOCK(pi); 1855 callout_drain(&pi->tick); 1856 1857 /* 1858 * Stop and then free the queues' resources, including the queues 1859 * themselves. 1860 * 1861 * XXX: we could just stop the queues here (on ifconfig down) and free 1862 * them later (on port detach), but having up/down go through the entire 1863 * allocate/activate/deactivate/free sequence is a good way to find 1864 * leaks and bugs. 1865 */ 1866 rc = t4_teardown_eth_queues(pi); 1867 if (rc != 0) 1868 if_printf(ifp, "teardown failed: %d\n", rc); 1869 1870 if (pi->flags & VI_ENABLED) { 1871 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false); 1872 if (rc) 1873 if_printf(ifp, "disable_vi failed: %d\n", rc); 1874 else 1875 pi->flags &= ~VI_ENABLED; 1876 } 1877 1878 pi->link_cfg.link_ok = 0; 1879 pi->link_cfg.speed = 0; 1880 t4_os_link_changed(sc, pi->port_id, 0); 1881 1882 if (sc->open_device_map == 0) 1883 last_port_down(sc); 1884 1885 return (0); 1886 } 1887 1888 #define T4_ALLOC_IRQ(sc, irqid, rid, handler, arg, name) do { \ 1889 rc = t4_alloc_irq(sc, &sc->irq[irqid], rid, handler, arg, name); \ 1890 if (rc != 0) \ 1891 goto done; \ 1892 } while (0) 1893 static int 1894 first_port_up(struct adapter *sc) 1895 { 1896 int rc, i; 1897 char name[8]; 1898 1899 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1900 1901 /* 1902 * queues that belong to the adapter (not any particular port). 1903 */ 1904 rc = t4_setup_adapter_queues(sc); 1905 if (rc != 0) 1906 goto done; 1907 1908 /* 1909 * Setup interrupts. 1910 */ 1911 if (sc->intr_count == 1) { 1912 KASSERT(sc->flags & INTR_FWD, 1913 ("%s: single interrupt but not forwarded?", __func__)); 1914 T4_ALLOC_IRQ(sc, 0, 0, t4_intr_all, sc, "all"); 1915 } else { 1916 /* Multiple interrupts. The first one is always error intr */ 1917 T4_ALLOC_IRQ(sc, 0, 1, t4_intr_err, sc, "err"); 1918 1919 if (sc->flags & INTR_FWD) { 1920 /* The rest are shared by the fwq and all data intr */ 1921 for (i = 1; i < sc->intr_count; i++) { 1922 snprintf(name, sizeof(name), "mux%d", i - 1); 1923 T4_ALLOC_IRQ(sc, i, i + 1, t4_intr_fwd, 1924 &sc->sge.fiq[i - 1], name); 1925 } 1926 } else { 1927 struct port_info *pi; 1928 int p, q; 1929 1930 T4_ALLOC_IRQ(sc, 1, 2, t4_intr_evt, &sc->sge.fwq, 1931 "evt"); 1932 1933 p = q = 0; 1934 pi = sc->port[p]; 1935 for (i = 2; i < sc->intr_count; i++) { 1936 snprintf(name, sizeof(name), "p%dq%d", p, q); 1937 if (++q >= pi->nrxq) { 1938 p++; 1939 q = 0; 1940 pi = sc->port[p]; 1941 } 1942 T4_ALLOC_IRQ(sc, i, i + 1, t4_intr_data, 1943 &sc->sge.rxq[i - 2], name); 1944 } 1945 } 1946 } 1947 1948 t4_intr_enable(sc); 1949 sc->flags |= FULL_INIT_DONE; 1950 1951 done: 1952 if (rc != 0) 1953 last_port_down(sc); 1954 1955 return (rc); 1956 } 1957 #undef T4_ALLOC_IRQ 1958 1959 /* 1960 * Idempotent. 1961 */ 1962 static int 1963 last_port_down(struct adapter *sc) 1964 { 1965 int i; 1966 1967 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1968 1969 t4_intr_disable(sc); 1970 1971 t4_teardown_adapter_queues(sc); 1972 1973 for (i = 0; i < sc->intr_count; i++) 1974 t4_free_irq(sc, &sc->irq[i]); 1975 1976 sc->flags &= ~FULL_INIT_DONE; 1977 1978 return (0); 1979 } 1980 1981 static int 1982 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 1983 iq_intr_handler_t *handler, void *arg, char *name) 1984 { 1985 int rc; 1986 1987 irq->rid = rid; 1988 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 1989 RF_SHAREABLE | RF_ACTIVE); 1990 if (irq->res == NULL) { 1991 device_printf(sc->dev, 1992 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 1993 return (ENOMEM); 1994 } 1995 1996 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 1997 NULL, handler, arg, &irq->tag); 1998 if (rc != 0) { 1999 device_printf(sc->dev, 2000 "failed to setup interrupt for rid %d, name %s: %d\n", 2001 rid, name, rc); 2002 } else if (name) 2003 bus_describe_intr(sc->dev, irq->res, irq->tag, name); 2004 2005 return (rc); 2006 } 2007 2008 static int 2009 t4_free_irq(struct adapter *sc, struct irq *irq) 2010 { 2011 if (irq->tag) 2012 bus_teardown_intr(sc->dev, irq->res, irq->tag); 2013 if (irq->res) 2014 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 2015 2016 bzero(irq, sizeof(*irq)); 2017 2018 return (0); 2019 } 2020 2021 static void 2022 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start, 2023 unsigned int end) 2024 { 2025 uint32_t *p = (uint32_t *)(buf + start); 2026 2027 for ( ; start <= end; start += sizeof(uint32_t)) 2028 *p++ = t4_read_reg(sc, start); 2029 } 2030 2031 static void 2032 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 2033 { 2034 int i; 2035 static const unsigned int reg_ranges[] = { 2036 0x1008, 0x1108, 2037 0x1180, 0x11b4, 2038 0x11fc, 0x123c, 2039 0x1300, 0x173c, 2040 0x1800, 0x18fc, 2041 0x3000, 0x30d8, 2042 0x30e0, 0x5924, 2043 0x5960, 0x59d4, 2044 0x5a00, 0x5af8, 2045 0x6000, 0x6098, 2046 0x6100, 0x6150, 2047 0x6200, 0x6208, 2048 0x6240, 0x6248, 2049 0x6280, 0x6338, 2050 0x6370, 0x638c, 2051 0x6400, 0x643c, 2052 0x6500, 0x6524, 2053 0x6a00, 0x6a38, 2054 0x6a60, 0x6a78, 2055 0x6b00, 0x6b84, 2056 0x6bf0, 0x6c84, 2057 0x6cf0, 0x6d84, 2058 0x6df0, 0x6e84, 2059 0x6ef0, 0x6f84, 2060 0x6ff0, 0x7084, 2061 0x70f0, 0x7184, 2062 0x71f0, 0x7284, 2063 0x72f0, 0x7384, 2064 0x73f0, 0x7450, 2065 0x7500, 0x7530, 2066 0x7600, 0x761c, 2067 0x7680, 0x76cc, 2068 0x7700, 0x7798, 2069 0x77c0, 0x77fc, 2070 0x7900, 0x79fc, 2071 0x7b00, 0x7c38, 2072 0x7d00, 0x7efc, 2073 0x8dc0, 0x8e1c, 2074 0x8e30, 0x8e78, 2075 0x8ea0, 0x8f6c, 2076 0x8fc0, 0x9074, 2077 0x90fc, 0x90fc, 2078 0x9400, 0x9458, 2079 0x9600, 0x96bc, 2080 0x9800, 0x9808, 2081 0x9820, 0x983c, 2082 0x9850, 0x9864, 2083 0x9c00, 0x9c6c, 2084 0x9c80, 0x9cec, 2085 0x9d00, 0x9d6c, 2086 0x9d80, 0x9dec, 2087 0x9e00, 0x9e6c, 2088 0x9e80, 0x9eec, 2089 0x9f00, 0x9f6c, 2090 0x9f80, 0x9fec, 2091 0xd004, 0xd03c, 2092 0xdfc0, 0xdfe0, 2093 0xe000, 0xea7c, 2094 0xf000, 0x11190, 2095 0x19040, 0x19124, 2096 0x19150, 0x191b0, 2097 0x191d0, 0x191e8, 2098 0x19238, 0x1924c, 2099 0x193f8, 0x19474, 2100 0x19490, 0x194f8, 2101 0x19800, 0x19f30, 2102 0x1a000, 0x1a06c, 2103 0x1a0b0, 0x1a120, 2104 0x1a128, 0x1a138, 2105 0x1a190, 0x1a1c4, 2106 0x1a1fc, 0x1a1fc, 2107 0x1e040, 0x1e04c, 2108 0x1e240, 0x1e28c, 2109 0x1e2c0, 0x1e2c0, 2110 0x1e2e0, 0x1e2e0, 2111 0x1e300, 0x1e384, 2112 0x1e3c0, 0x1e3c8, 2113 0x1e440, 0x1e44c, 2114 0x1e640, 0x1e68c, 2115 0x1e6c0, 0x1e6c0, 2116 0x1e6e0, 0x1e6e0, 2117 0x1e700, 0x1e784, 2118 0x1e7c0, 0x1e7c8, 2119 0x1e840, 0x1e84c, 2120 0x1ea40, 0x1ea8c, 2121 0x1eac0, 0x1eac0, 2122 0x1eae0, 0x1eae0, 2123 0x1eb00, 0x1eb84, 2124 0x1ebc0, 0x1ebc8, 2125 0x1ec40, 0x1ec4c, 2126 0x1ee40, 0x1ee8c, 2127 0x1eec0, 0x1eec0, 2128 0x1eee0, 0x1eee0, 2129 0x1ef00, 0x1ef84, 2130 0x1efc0, 0x1efc8, 2131 0x1f040, 0x1f04c, 2132 0x1f240, 0x1f28c, 2133 0x1f2c0, 0x1f2c0, 2134 0x1f2e0, 0x1f2e0, 2135 0x1f300, 0x1f384, 2136 0x1f3c0, 0x1f3c8, 2137 0x1f440, 0x1f44c, 2138 0x1f640, 0x1f68c, 2139 0x1f6c0, 0x1f6c0, 2140 0x1f6e0, 0x1f6e0, 2141 0x1f700, 0x1f784, 2142 0x1f7c0, 0x1f7c8, 2143 0x1f840, 0x1f84c, 2144 0x1fa40, 0x1fa8c, 2145 0x1fac0, 0x1fac0, 2146 0x1fae0, 0x1fae0, 2147 0x1fb00, 0x1fb84, 2148 0x1fbc0, 0x1fbc8, 2149 0x1fc40, 0x1fc4c, 2150 0x1fe40, 0x1fe8c, 2151 0x1fec0, 0x1fec0, 2152 0x1fee0, 0x1fee0, 2153 0x1ff00, 0x1ff84, 2154 0x1ffc0, 0x1ffc8, 2155 0x20000, 0x2002c, 2156 0x20100, 0x2013c, 2157 0x20190, 0x201c8, 2158 0x20200, 0x20318, 2159 0x20400, 0x20528, 2160 0x20540, 0x20614, 2161 0x21000, 0x21040, 2162 0x2104c, 0x21060, 2163 0x210c0, 0x210ec, 2164 0x21200, 0x21268, 2165 0x21270, 0x21284, 2166 0x212fc, 0x21388, 2167 0x21400, 0x21404, 2168 0x21500, 0x21518, 2169 0x2152c, 0x2153c, 2170 0x21550, 0x21554, 2171 0x21600, 0x21600, 2172 0x21608, 0x21628, 2173 0x21630, 0x2163c, 2174 0x21700, 0x2171c, 2175 0x21780, 0x2178c, 2176 0x21800, 0x21c38, 2177 0x21c80, 0x21d7c, 2178 0x21e00, 0x21e04, 2179 0x22000, 0x2202c, 2180 0x22100, 0x2213c, 2181 0x22190, 0x221c8, 2182 0x22200, 0x22318, 2183 0x22400, 0x22528, 2184 0x22540, 0x22614, 2185 0x23000, 0x23040, 2186 0x2304c, 0x23060, 2187 0x230c0, 0x230ec, 2188 0x23200, 0x23268, 2189 0x23270, 0x23284, 2190 0x232fc, 0x23388, 2191 0x23400, 0x23404, 2192 0x23500, 0x23518, 2193 0x2352c, 0x2353c, 2194 0x23550, 0x23554, 2195 0x23600, 0x23600, 2196 0x23608, 0x23628, 2197 0x23630, 0x2363c, 2198 0x23700, 0x2371c, 2199 0x23780, 0x2378c, 2200 0x23800, 0x23c38, 2201 0x23c80, 0x23d7c, 2202 0x23e00, 0x23e04, 2203 0x24000, 0x2402c, 2204 0x24100, 0x2413c, 2205 0x24190, 0x241c8, 2206 0x24200, 0x24318, 2207 0x24400, 0x24528, 2208 0x24540, 0x24614, 2209 0x25000, 0x25040, 2210 0x2504c, 0x25060, 2211 0x250c0, 0x250ec, 2212 0x25200, 0x25268, 2213 0x25270, 0x25284, 2214 0x252fc, 0x25388, 2215 0x25400, 0x25404, 2216 0x25500, 0x25518, 2217 0x2552c, 0x2553c, 2218 0x25550, 0x25554, 2219 0x25600, 0x25600, 2220 0x25608, 0x25628, 2221 0x25630, 0x2563c, 2222 0x25700, 0x2571c, 2223 0x25780, 0x2578c, 2224 0x25800, 0x25c38, 2225 0x25c80, 0x25d7c, 2226 0x25e00, 0x25e04, 2227 0x26000, 0x2602c, 2228 0x26100, 0x2613c, 2229 0x26190, 0x261c8, 2230 0x26200, 0x26318, 2231 0x26400, 0x26528, 2232 0x26540, 0x26614, 2233 0x27000, 0x27040, 2234 0x2704c, 0x27060, 2235 0x270c0, 0x270ec, 2236 0x27200, 0x27268, 2237 0x27270, 0x27284, 2238 0x272fc, 0x27388, 2239 0x27400, 0x27404, 2240 0x27500, 0x27518, 2241 0x2752c, 0x2753c, 2242 0x27550, 0x27554, 2243 0x27600, 0x27600, 2244 0x27608, 0x27628, 2245 0x27630, 0x2763c, 2246 0x27700, 0x2771c, 2247 0x27780, 0x2778c, 2248 0x27800, 0x27c38, 2249 0x27c80, 0x27d7c, 2250 0x27e00, 0x27e04 2251 }; 2252 2253 regs->version = 4 | (sc->params.rev << 10); 2254 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2) 2255 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]); 2256 } 2257 2258 static void 2259 cxgbe_tick(void *arg) 2260 { 2261 struct port_info *pi = arg; 2262 struct ifnet *ifp = pi->ifp; 2263 struct sge_txq *txq; 2264 int i, drops; 2265 struct port_stats *s = &pi->stats; 2266 2267 PORT_LOCK(pi); 2268 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2269 PORT_UNLOCK(pi); 2270 return; /* without scheduling another callout */ 2271 } 2272 2273 t4_get_port_stats(pi->adapter, pi->tx_chan, s); 2274 2275 ifp->if_opackets = s->tx_frames; 2276 ifp->if_ipackets = s->rx_frames; 2277 ifp->if_obytes = s->tx_octets; 2278 ifp->if_ibytes = s->rx_octets; 2279 ifp->if_omcasts = s->tx_mcast_frames; 2280 ifp->if_imcasts = s->rx_mcast_frames; 2281 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 2282 s->rx_ovflow3; 2283 2284 drops = s->tx_drop; 2285 for_each_txq(pi, i, txq) 2286 drops += txq->br->br_drops; 2287 ifp->if_snd.ifq_drops = drops; 2288 2289 ifp->if_oerrors = s->tx_error_frames; 2290 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long + 2291 s->rx_fcs_err + s->rx_len_err; 2292 2293 callout_schedule(&pi->tick, hz); 2294 PORT_UNLOCK(pi); 2295 } 2296 2297 static int 2298 t4_sysctls(struct adapter *sc) 2299 { 2300 struct sysctl_ctx_list *ctx; 2301 struct sysctl_oid *oid; 2302 struct sysctl_oid_list *children; 2303 2304 ctx = device_get_sysctl_ctx(sc->dev); 2305 oid = device_get_sysctl_tree(sc->dev); 2306 children = SYSCTL_CHILDREN(oid); 2307 2308 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, 2309 &sc->params.nports, 0, "# of ports"); 2310 2311 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 2312 &sc->params.rev, 0, "chip hardware revision"); 2313 2314 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 2315 CTLFLAG_RD, &sc->fw_version, 0, "firmware version"); 2316 2317 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "TOE", CTLFLAG_RD, 2318 &sc->params.offload, 0, "hardware is capable of TCP offload"); 2319 2320 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, 2321 &sc->params.vpd.cclk, 0, "core clock frequency (in KHz)"); 2322 2323 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 2324 CTLTYPE_STRING | CTLFLAG_RD, &intr_timer, sizeof(intr_timer), 2325 sysctl_int_array, "A", "interrupt holdoff timer values (us)"); 2326 2327 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 2328 CTLTYPE_STRING | CTLFLAG_RD, &intr_pktcount, sizeof(intr_pktcount), 2329 sysctl_int_array, "A", "interrupt holdoff packet counter values"); 2330 2331 return (0); 2332 } 2333 2334 static int 2335 cxgbe_sysctls(struct port_info *pi) 2336 { 2337 struct sysctl_ctx_list *ctx; 2338 struct sysctl_oid *oid; 2339 struct sysctl_oid_list *children; 2340 2341 ctx = device_get_sysctl_ctx(pi->dev); 2342 2343 /* 2344 * dev.cxgbe.X. 2345 */ 2346 oid = device_get_sysctl_tree(pi->dev); 2347 children = SYSCTL_CHILDREN(oid); 2348 2349 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 2350 &pi->nrxq, 0, "# of rx queues"); 2351 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 2352 &pi->ntxq, 0, "# of tx queues"); 2353 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 2354 &pi->first_rxq, 0, "index of first rx queue"); 2355 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 2356 &pi->first_txq, 0, "index of first tx queue"); 2357 2358 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 2359 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I", 2360 "holdoff timer index"); 2361 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 2362 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I", 2363 "holdoff packet counter index"); 2364 2365 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 2366 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I", 2367 "rx queue size"); 2368 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 2369 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I", 2370 "tx queue size"); 2371 2372 /* 2373 * dev.cxgbe.X.stats. 2374 */ 2375 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 2376 NULL, "port statistics"); 2377 children = SYSCTL_CHILDREN(oid); 2378 2379 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ 2380 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ 2381 CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \ 2382 sysctl_handle_t4_reg64, "QU", desc) 2383 2384 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", 2385 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); 2386 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", 2387 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); 2388 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", 2389 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); 2390 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", 2391 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); 2392 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", 2393 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); 2394 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", 2395 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); 2396 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", 2397 "# of tx frames in this range", 2398 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); 2399 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", 2400 "# of tx frames in this range", 2401 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); 2402 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", 2403 "# of tx frames in this range", 2404 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); 2405 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", 2406 "# of tx frames in this range", 2407 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); 2408 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", 2409 "# of tx frames in this range", 2410 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); 2411 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", 2412 "# of tx frames in this range", 2413 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); 2414 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", 2415 "# of tx frames in this range", 2416 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); 2417 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", 2418 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); 2419 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", 2420 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); 2421 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", 2422 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); 2423 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", 2424 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); 2425 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", 2426 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); 2427 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", 2428 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); 2429 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", 2430 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); 2431 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", 2432 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); 2433 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", 2434 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); 2435 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", 2436 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); 2437 2438 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", 2439 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); 2440 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", 2441 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); 2442 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", 2443 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); 2444 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", 2445 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); 2446 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", 2447 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); 2448 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", 2449 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); 2450 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", 2451 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); 2452 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", 2453 "# of frames received with bad FCS", 2454 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); 2455 SYSCTL_ADD_T4_REG64(pi, "rx_len_err", 2456 "# of frames received with length error", 2457 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); 2458 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", 2459 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); 2460 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", 2461 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); 2462 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", 2463 "# of rx frames in this range", 2464 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); 2465 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", 2466 "# of rx frames in this range", 2467 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); 2468 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", 2469 "# of rx frames in this range", 2470 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); 2471 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", 2472 "# of rx frames in this range", 2473 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); 2474 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", 2475 "# of rx frames in this range", 2476 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); 2477 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", 2478 "# of rx frames in this range", 2479 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); 2480 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", 2481 "# of rx frames in this range", 2482 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); 2483 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", 2484 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); 2485 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", 2486 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); 2487 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", 2488 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); 2489 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", 2490 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); 2491 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", 2492 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); 2493 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", 2494 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); 2495 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", 2496 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); 2497 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", 2498 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); 2499 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", 2500 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); 2501 2502 #undef SYSCTL_ADD_T4_REG64 2503 2504 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ 2505 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 2506 &pi->stats.name, desc) 2507 2508 /* We get these from port_stats and they may be stale by upto 1s */ 2509 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, 2510 "# drops due to buffer-group 0 overflows"); 2511 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, 2512 "# drops due to buffer-group 1 overflows"); 2513 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, 2514 "# drops due to buffer-group 2 overflows"); 2515 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, 2516 "# drops due to buffer-group 3 overflows"); 2517 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, 2518 "# of buffer-group 0 truncated packets"); 2519 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, 2520 "# of buffer-group 1 truncated packets"); 2521 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, 2522 "# of buffer-group 2 truncated packets"); 2523 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, 2524 "# of buffer-group 3 truncated packets"); 2525 2526 #undef SYSCTL_ADD_T4_PORTSTAT 2527 2528 return (0); 2529 } 2530 2531 static int 2532 sysctl_int_array(SYSCTL_HANDLER_ARGS) 2533 { 2534 int rc, *i; 2535 struct sbuf sb; 2536 2537 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); 2538 for (i = arg1; arg2; arg2 -= sizeof(int), i++) 2539 sbuf_printf(&sb, "%d ", *i); 2540 sbuf_trim(&sb); 2541 sbuf_finish(&sb); 2542 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 2543 sbuf_delete(&sb); 2544 return (rc); 2545 } 2546 2547 static int 2548 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 2549 { 2550 struct port_info *pi = arg1; 2551 struct adapter *sc = pi->adapter; 2552 struct sge_rxq *rxq; 2553 int idx, rc, i; 2554 2555 idx = pi->tmr_idx; 2556 2557 rc = sysctl_handle_int(oidp, &idx, 0, req); 2558 if (rc != 0 || req->newptr == NULL) 2559 return (rc); 2560 2561 if (idx < 0 || idx >= SGE_NTIMERS) 2562 return (EINVAL); 2563 2564 ADAPTER_LOCK(sc); 2565 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 2566 if (rc == 0) { 2567 for_each_rxq(pi, i, rxq) { 2568 rxq->iq.intr_params = V_QINTR_TIMER_IDX(idx) | 2569 V_QINTR_CNT_EN(pi->pktc_idx != -1); 2570 } 2571 pi->tmr_idx = idx; 2572 } 2573 2574 ADAPTER_UNLOCK(sc); 2575 return (rc); 2576 } 2577 2578 static int 2579 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 2580 { 2581 struct port_info *pi = arg1; 2582 struct adapter *sc = pi->adapter; 2583 int idx, rc; 2584 2585 idx = pi->pktc_idx; 2586 2587 rc = sysctl_handle_int(oidp, &idx, 0, req); 2588 if (rc != 0 || req->newptr == NULL) 2589 return (rc); 2590 2591 if (idx < -1 || idx >= SGE_NCOUNTERS) 2592 return (EINVAL); 2593 2594 ADAPTER_LOCK(sc); 2595 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 2596 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING) 2597 rc = EBUSY; /* can be changed only when port is down */ 2598 2599 if (rc == 0) 2600 pi->pktc_idx = idx; 2601 2602 ADAPTER_UNLOCK(sc); 2603 return (rc); 2604 } 2605 2606 static int 2607 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 2608 { 2609 struct port_info *pi = arg1; 2610 struct adapter *sc = pi->adapter; 2611 int qsize, rc; 2612 2613 qsize = pi->qsize_rxq; 2614 2615 rc = sysctl_handle_int(oidp, &qsize, 0, req); 2616 if (rc != 0 || req->newptr == NULL) 2617 return (rc); 2618 2619 if (qsize < 128 || (qsize & 7)) 2620 return (EINVAL); 2621 2622 ADAPTER_LOCK(sc); 2623 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 2624 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING) 2625 rc = EBUSY; /* can be changed only when port is down */ 2626 2627 if (rc == 0) 2628 pi->qsize_rxq = qsize; 2629 2630 ADAPTER_UNLOCK(sc); 2631 return (rc); 2632 } 2633 2634 static int 2635 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 2636 { 2637 struct port_info *pi = arg1; 2638 struct adapter *sc = pi->adapter; 2639 int qsize, rc; 2640 2641 qsize = pi->qsize_txq; 2642 2643 rc = sysctl_handle_int(oidp, &qsize, 0, req); 2644 if (rc != 0 || req->newptr == NULL) 2645 return (rc); 2646 2647 if (qsize < 128) 2648 return (EINVAL); 2649 2650 ADAPTER_LOCK(sc); 2651 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 2652 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING) 2653 rc = EBUSY; /* can be changed only when port is down */ 2654 2655 if (rc == 0) 2656 pi->qsize_txq = qsize; 2657 2658 ADAPTER_UNLOCK(sc); 2659 return (rc); 2660 } 2661 2662 static int 2663 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 2664 { 2665 struct adapter *sc = arg1; 2666 int reg = arg2; 2667 uint64_t val; 2668 2669 val = t4_read_reg64(sc, reg); 2670 2671 return (sysctl_handle_64(oidp, &val, 0, req)); 2672 } 2673 2674 static inline void 2675 txq_start(struct ifnet *ifp, struct sge_txq *txq) 2676 { 2677 struct buf_ring *br; 2678 struct mbuf *m; 2679 2680 TXQ_LOCK_ASSERT_OWNED(txq); 2681 2682 br = txq->br; 2683 m = txq->m ? txq->m : drbr_dequeue(ifp, br); 2684 if (m) 2685 t4_eth_tx(ifp, txq, m); 2686 } 2687 2688 void 2689 cxgbe_txq_start(void *arg, int count) 2690 { 2691 struct sge_txq *txq = arg; 2692 2693 TXQ_LOCK(txq); 2694 if (txq->eq.flags & EQ_CRFLUSHED) { 2695 txq->eq.flags &= ~EQ_CRFLUSHED; 2696 txq_start(txq->ifp, txq); 2697 } else 2698 wakeup_one(txq); /* txq is going away, wakeup free_txq */ 2699 TXQ_UNLOCK(txq); 2700 } 2701 2702 int 2703 t4_os_find_pci_capability(struct adapter *sc, int cap) 2704 { 2705 device_t dev; 2706 struct pci_devinfo *dinfo; 2707 pcicfgregs *cfg; 2708 uint32_t status; 2709 uint8_t ptr; 2710 2711 dev = sc->dev; 2712 dinfo = device_get_ivars(dev); 2713 cfg = &dinfo->cfg; 2714 2715 status = pci_read_config(dev, PCIR_STATUS, 2); 2716 if (!(status & PCIM_STATUS_CAPPRESENT)) 2717 return (0); 2718 2719 switch (cfg->hdrtype & PCIM_HDRTYPE) { 2720 case 0: 2721 case 1: 2722 ptr = PCIR_CAP_PTR; 2723 break; 2724 case 2: 2725 ptr = PCIR_CAP_PTR_2; 2726 break; 2727 default: 2728 return (0); 2729 break; 2730 } 2731 ptr = pci_read_config(dev, ptr, 1); 2732 2733 while (ptr != 0) { 2734 if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap) 2735 return (ptr); 2736 ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1); 2737 } 2738 2739 return (0); 2740 } 2741 2742 int 2743 t4_os_pci_save_state(struct adapter *sc) 2744 { 2745 device_t dev; 2746 struct pci_devinfo *dinfo; 2747 2748 dev = sc->dev; 2749 dinfo = device_get_ivars(dev); 2750 2751 pci_cfg_save(dev, dinfo, 0); 2752 return (0); 2753 } 2754 2755 int 2756 t4_os_pci_restore_state(struct adapter *sc) 2757 { 2758 device_t dev; 2759 struct pci_devinfo *dinfo; 2760 2761 dev = sc->dev; 2762 dinfo = device_get_ivars(dev); 2763 2764 pci_cfg_restore(dev, dinfo); 2765 return (0); 2766 } 2767 2768 void 2769 t4_os_portmod_changed(const struct adapter *sc, int idx) 2770 { 2771 struct port_info *pi = sc->port[idx]; 2772 static const char *mod_str[] = { 2773 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 2774 }; 2775 2776 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 2777 if_printf(pi->ifp, "transceiver unplugged.\n"); 2778 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 2779 if_printf(pi->ifp, "unknown transceiver inserted.\n"); 2780 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 2781 if_printf(pi->ifp, "unsupported transceiver inserted.\n"); 2782 else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str)) { 2783 if_printf(pi->ifp, "%s transceiver inserted.\n", 2784 mod_str[pi->mod_type]); 2785 } else { 2786 if_printf(pi->ifp, "transceiver (type %d) inserted.\n", 2787 pi->mod_type); 2788 } 2789 } 2790 2791 void 2792 t4_os_link_changed(struct adapter *sc, int idx, int link_stat) 2793 { 2794 struct port_info *pi = sc->port[idx]; 2795 struct ifnet *ifp = pi->ifp; 2796 2797 if (link_stat) { 2798 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed); 2799 if_link_state_change(ifp, LINK_STATE_UP); 2800 } else 2801 if_link_state_change(ifp, LINK_STATE_DOWN); 2802 } 2803 2804 static int 2805 t4_open(struct cdev *dev, int flags, int type, struct thread *td) 2806 { 2807 return (0); 2808 } 2809 2810 static int 2811 t4_close(struct cdev *dev, int flags, int type, struct thread *td) 2812 { 2813 return (0); 2814 } 2815 2816 static int 2817 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 2818 struct thread *td) 2819 { 2820 int rc; 2821 struct adapter *sc = dev->si_drv1; 2822 2823 rc = priv_check(td, PRIV_DRIVER); 2824 if (rc != 0) 2825 return (rc); 2826 2827 switch (cmd) { 2828 case CHELSIO_T4_GETREG: { 2829 struct t4_reg *edata = (struct t4_reg *)data; 2830 2831 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 2832 return (EFAULT); 2833 2834 if (edata->size == 4) 2835 edata->val = t4_read_reg(sc, edata->addr); 2836 else if (edata->size == 8) 2837 edata->val = t4_read_reg64(sc, edata->addr); 2838 else 2839 return (EINVAL); 2840 2841 break; 2842 } 2843 case CHELSIO_T4_SETREG: { 2844 struct t4_reg *edata = (struct t4_reg *)data; 2845 2846 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 2847 return (EFAULT); 2848 2849 if (edata->size == 4) { 2850 if (edata->val & 0xffffffff00000000) 2851 return (EINVAL); 2852 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 2853 } else if (edata->size == 8) 2854 t4_write_reg64(sc, edata->addr, edata->val); 2855 else 2856 return (EINVAL); 2857 break; 2858 } 2859 case CHELSIO_T4_REGDUMP: { 2860 struct t4_regdump *regs = (struct t4_regdump *)data; 2861 int reglen = T4_REGDUMP_SIZE; 2862 uint8_t *buf; 2863 2864 if (regs->len < reglen) { 2865 regs->len = reglen; /* hint to the caller */ 2866 return (ENOBUFS); 2867 } 2868 2869 regs->len = reglen; 2870 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 2871 t4_get_regs(sc, regs, buf); 2872 rc = copyout(buf, regs->data, reglen); 2873 free(buf, M_CXGBE); 2874 break; 2875 } 2876 default: 2877 rc = EINVAL; 2878 } 2879 2880 return (rc); 2881 } 2882 2883 static int 2884 t4_mod_event(module_t mod, int cmd, void *arg) 2885 { 2886 2887 if (cmd == MOD_LOAD) 2888 t4_sge_modload(); 2889 2890 return (0); 2891 } 2892 2893 static devclass_t t4_devclass; 2894 static devclass_t cxgbe_devclass; 2895 2896 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, t4_mod_event, 0); 2897 MODULE_VERSION(t4nex, 1); 2898 2899 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 2900 MODULE_VERSION(cxgbe, 1); 2901