1 /*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 33 #include <sys/param.h> 34 #include <sys/conf.h> 35 #include <sys/priv.h> 36 #include <sys/kernel.h> 37 #include <sys/bus.h> 38 #include <sys/module.h> 39 #include <sys/malloc.h> 40 #include <sys/queue.h> 41 #include <sys/taskqueue.h> 42 #include <sys/pciio.h> 43 #include <dev/pci/pcireg.h> 44 #include <dev/pci/pcivar.h> 45 #include <dev/pci/pci_private.h> 46 #include <sys/firmware.h> 47 #include <sys/sbuf.h> 48 #include <sys/smp.h> 49 #include <sys/socket.h> 50 #include <sys/sockio.h> 51 #include <sys/sysctl.h> 52 #include <net/ethernet.h> 53 #include <net/if.h> 54 #include <net/if_types.h> 55 #include <net/if_dl.h> 56 #include <net/if_vlan_var.h> 57 58 #include "common/t4_hw.h" 59 #include "common/common.h" 60 #include "common/t4_msg.h" 61 #include "common/t4_regs.h" 62 #include "common/t4_regs_values.h" 63 #include "common/t4fw_interface.h" 64 #include "t4_ioctl.h" 65 #include "t4_l2t.h" 66 67 /* T4 bus driver interface */ 68 static int t4_probe(device_t); 69 static int t4_attach(device_t); 70 static int t4_detach(device_t); 71 static device_method_t t4_methods[] = { 72 DEVMETHOD(device_probe, t4_probe), 73 DEVMETHOD(device_attach, t4_attach), 74 DEVMETHOD(device_detach, t4_detach), 75 76 /* bus interface */ 77 DEVMETHOD(bus_print_child, bus_generic_print_child), 78 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 79 80 { 0, 0 } 81 }; 82 static driver_t t4_driver = { 83 "t4nex", 84 t4_methods, 85 sizeof(struct adapter) 86 }; 87 88 89 /* T4 port (cxgbe) interface */ 90 static int cxgbe_probe(device_t); 91 static int cxgbe_attach(device_t); 92 static int cxgbe_detach(device_t); 93 static device_method_t cxgbe_methods[] = { 94 DEVMETHOD(device_probe, cxgbe_probe), 95 DEVMETHOD(device_attach, cxgbe_attach), 96 DEVMETHOD(device_detach, cxgbe_detach), 97 { 0, 0 } 98 }; 99 static driver_t cxgbe_driver = { 100 "cxgbe", 101 cxgbe_methods, 102 sizeof(struct port_info) 103 }; 104 105 static d_ioctl_t t4_ioctl; 106 static d_open_t t4_open; 107 static d_close_t t4_close; 108 109 static struct cdevsw t4_cdevsw = { 110 .d_version = D_VERSION, 111 .d_flags = 0, 112 .d_open = t4_open, 113 .d_close = t4_close, 114 .d_ioctl = t4_ioctl, 115 .d_name = "t4nex", 116 }; 117 118 /* ifnet + media interface */ 119 static void cxgbe_init(void *); 120 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 121 static void cxgbe_start(struct ifnet *); 122 static int cxgbe_transmit(struct ifnet *, struct mbuf *); 123 static void cxgbe_qflush(struct ifnet *); 124 static int cxgbe_media_change(struct ifnet *); 125 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 126 127 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4 Ethernet driver and services"); 128 129 /* 130 * Tunables. 131 */ 132 static SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, 133 "cxgbe driver parameters"); 134 135 static int force_firmware_install = 0; 136 TUNABLE_INT("hw.cxgbe.force_firmware_install", &force_firmware_install); 137 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, force_firmware_install, CTLFLAG_RDTUN, 138 &force_firmware_install, 0, "install firmware on every attach."); 139 140 /* 141 * Holdoff timer and packet counter values. 142 */ 143 static unsigned int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200}; 144 static unsigned int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ 145 146 /* 147 * Max # of tx and rx queues to use for each 10G and 1G port. 148 */ 149 static unsigned int max_ntxq_10g = 8; 150 TUNABLE_INT("hw.cxgbe.max_ntxq_10G_port", &max_ntxq_10g); 151 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_10G_port, CTLFLAG_RDTUN, 152 &max_ntxq_10g, 0, "maximum number of tx queues per 10G port."); 153 154 static unsigned int max_nrxq_10g = 8; 155 TUNABLE_INT("hw.cxgbe.max_nrxq_10G_port", &max_nrxq_10g); 156 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_10G_port, CTLFLAG_RDTUN, 157 &max_nrxq_10g, 0, "maximum number of rxq's (per 10G port)."); 158 159 static unsigned int max_ntxq_1g = 2; 160 TUNABLE_INT("hw.cxgbe.max_ntxq_1G_port", &max_ntxq_1g); 161 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_1G_port, CTLFLAG_RDTUN, 162 &max_ntxq_1g, 0, "maximum number of tx queues per 1G port."); 163 164 static unsigned int max_nrxq_1g = 2; 165 TUNABLE_INT("hw.cxgbe.max_nrxq_1G_port", &max_nrxq_1g); 166 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_1G_port, CTLFLAG_RDTUN, 167 &max_nrxq_1g, 0, "maximum number of rxq's (per 1G port)."); 168 169 /* 170 * Holdoff parameters for 10G and 1G ports. 171 */ 172 static unsigned int tmr_idx_10g = 1; 173 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &tmr_idx_10g); 174 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_10G, CTLFLAG_RDTUN, 175 &tmr_idx_10g, 0, 176 "default timer index for interrupt holdoff (10G ports)."); 177 178 static int pktc_idx_10g = 2; 179 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &pktc_idx_10g); 180 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_10G, CTLFLAG_RDTUN, 181 &pktc_idx_10g, 0, 182 "default pkt counter index for interrupt holdoff (10G ports)."); 183 184 static unsigned int tmr_idx_1g = 1; 185 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &tmr_idx_1g); 186 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_1G, CTLFLAG_RDTUN, 187 &tmr_idx_1g, 0, 188 "default timer index for interrupt holdoff (1G ports)."); 189 190 static int pktc_idx_1g = 2; 191 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &pktc_idx_1g); 192 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_1G, CTLFLAG_RDTUN, 193 &pktc_idx_1g, 0, 194 "default pkt counter index for interrupt holdoff (1G ports)."); 195 196 /* 197 * Size (# of entries) of each tx and rx queue. 198 */ 199 static unsigned int qsize_txq = TX_EQ_QSIZE; 200 TUNABLE_INT("hw.cxgbe.qsize_txq", &qsize_txq); 201 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN, 202 &qsize_txq, 0, "default queue size of NIC tx queues."); 203 204 static unsigned int qsize_rxq = RX_IQ_QSIZE; 205 TUNABLE_INT("hw.cxgbe.qsize_rxq", &qsize_rxq); 206 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN, 207 &qsize_rxq, 0, "default queue size of NIC rx queues."); 208 209 /* 210 * Interrupt types allowed. 211 */ 212 static int intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 213 TUNABLE_INT("hw.cxgbe.interrupt_types", &intr_types); 214 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &intr_types, 0, 215 "interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively)"); 216 217 /* 218 * Force the driver to use the same set of interrupts for all ports. 219 */ 220 static int intr_shared = 0; 221 TUNABLE_INT("hw.cxgbe.interrupts_shared", &intr_shared); 222 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupts_shared, CTLFLAG_RDTUN, 223 &intr_shared, 0, "interrupts shared between all ports"); 224 225 static unsigned int filter_mode = HW_TPL_FR_MT_PR_IV_P_FC; 226 TUNABLE_INT("hw.cxgbe.filter_mode", &filter_mode); 227 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, filter_mode, CTLFLAG_RDTUN, 228 &filter_mode, 0, "default global filter mode."); 229 230 struct intrs_and_queues { 231 int intr_type; /* INTx, MSI, or MSI-X */ 232 int nirq; /* Number of vectors */ 233 int intr_shared; /* Interrupts shared between all ports */ 234 int ntxq10g; /* # of NIC txq's for each 10G port */ 235 int nrxq10g; /* # of NIC rxq's for each 10G port */ 236 int ntxq1g; /* # of NIC txq's for each 1G port */ 237 int nrxq1g; /* # of NIC rxq's for each 1G port */ 238 }; 239 240 struct filter_entry { 241 uint32_t valid:1; /* filter allocated and valid */ 242 uint32_t locked:1; /* filter is administratively locked */ 243 uint32_t pending:1; /* filter action is pending firmware reply */ 244 uint32_t smtidx:8; /* Source MAC Table index for smac */ 245 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 246 247 struct t4_filter_specification fs; 248 }; 249 250 enum { 251 MEMWIN0_APERTURE = 2048, 252 MEMWIN0_BASE = 0x1b800, 253 MEMWIN1_APERTURE = 32768, 254 MEMWIN1_BASE = 0x28000, 255 MEMWIN2_APERTURE = 65536, 256 MEMWIN2_BASE = 0x30000, 257 }; 258 259 enum { 260 XGMAC_MTU = (1 << 0), 261 XGMAC_PROMISC = (1 << 1), 262 XGMAC_ALLMULTI = (1 << 2), 263 XGMAC_VLANEX = (1 << 3), 264 XGMAC_UCADDR = (1 << 4), 265 XGMAC_MCADDRS = (1 << 5), 266 267 XGMAC_ALL = 0xffff 268 }; 269 270 static int map_bars(struct adapter *); 271 static void setup_memwin(struct adapter *); 272 static int cfg_itype_and_nqueues(struct adapter *, int, int, 273 struct intrs_and_queues *); 274 static int prep_firmware(struct adapter *); 275 static int get_devlog_params(struct adapter *, struct devlog_params *); 276 static int get_capabilities(struct adapter *, struct fw_caps_config_cmd *); 277 static int get_params(struct adapter *, struct fw_caps_config_cmd *); 278 static void t4_set_desc(struct adapter *); 279 static void build_medialist(struct port_info *); 280 static int update_mac_settings(struct port_info *, int); 281 static int cxgbe_init_locked(struct port_info *); 282 static int cxgbe_init_synchronized(struct port_info *); 283 static int cxgbe_uninit_locked(struct port_info *); 284 static int cxgbe_uninit_synchronized(struct port_info *); 285 static int first_port_up(struct adapter *); 286 static int last_port_down(struct adapter *); 287 static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 288 iq_intr_handler_t *, void *, char *); 289 static int t4_free_irq(struct adapter *, struct irq *); 290 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int, 291 unsigned int); 292 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 293 static void cxgbe_tick(void *); 294 static int t4_sysctls(struct adapter *); 295 static int cxgbe_sysctls(struct port_info *); 296 static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 297 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 298 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 299 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 300 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 301 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 302 static int sysctl_devlog(SYSCTL_HANDLER_ARGS); 303 static inline void txq_start(struct ifnet *, struct sge_txq *); 304 static uint32_t fconf_to_mode(uint32_t); 305 static uint32_t mode_to_fconf(uint32_t); 306 static uint32_t fspec_to_fconf(struct t4_filter_specification *); 307 static int get_filter_mode(struct adapter *, uint32_t *); 308 static int set_filter_mode(struct adapter *, uint32_t); 309 static inline uint64_t get_filter_hits(struct adapter *, uint32_t); 310 static int get_filter(struct adapter *, struct t4_filter *); 311 static int set_filter(struct adapter *, struct t4_filter *); 312 static int del_filter(struct adapter *, struct t4_filter *); 313 static void clear_filter(struct filter_entry *); 314 static int set_filter_wr(struct adapter *, int); 315 static int del_filter_wr(struct adapter *, int); 316 void filter_rpl(struct adapter *, const struct cpl_set_tcb_rpl *); 317 static int get_sge_context(struct adapter *, struct t4_sge_context *); 318 static int t4_mod_event(module_t, int, void *); 319 320 struct t4_pciids { 321 uint16_t device; 322 uint8_t mpf; 323 char *desc; 324 } t4_pciids[] = { 325 {0xa000, 0, "Chelsio Terminator 4 FPGA"}, 326 {0x4400, 4, "Chelsio T440-dbg"}, 327 {0x4401, 4, "Chelsio T420-CR"}, 328 {0x4402, 4, "Chelsio T422-CR"}, 329 {0x4403, 4, "Chelsio T440-CR"}, 330 {0x4404, 4, "Chelsio T420-BCH"}, 331 {0x4405, 4, "Chelsio T440-BCH"}, 332 {0x4406, 4, "Chelsio T440-CH"}, 333 {0x4407, 4, "Chelsio T420-SO"}, 334 {0x4408, 4, "Chelsio T420-CX"}, 335 {0x4409, 4, "Chelsio T420-BT"}, 336 {0x440a, 4, "Chelsio T404-BT"}, 337 }; 338 339 static int 340 t4_probe(device_t dev) 341 { 342 int i; 343 uint16_t v = pci_get_vendor(dev); 344 uint16_t d = pci_get_device(dev); 345 346 if (v != PCI_VENDOR_ID_CHELSIO) 347 return (ENXIO); 348 349 for (i = 0; i < ARRAY_SIZE(t4_pciids); i++) { 350 if (d == t4_pciids[i].device && 351 pci_get_function(dev) == t4_pciids[i].mpf) { 352 device_set_desc(dev, t4_pciids[i].desc); 353 return (BUS_PROBE_DEFAULT); 354 } 355 } 356 357 return (ENXIO); 358 } 359 360 static int 361 t4_attach(device_t dev) 362 { 363 struct adapter *sc; 364 int rc = 0, i, n10g, n1g, rqidx, tqidx; 365 struct fw_caps_config_cmd caps; 366 uint32_t p, v; 367 struct intrs_and_queues iaq; 368 struct sge *s; 369 370 sc = device_get_softc(dev); 371 sc->dev = dev; 372 sc->pf = pci_get_function(dev); 373 sc->mbox = sc->pf; 374 375 pci_enable_busmaster(dev); 376 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 377 pci_set_max_read_req(dev, 4096); 378 v = pci_read_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, 2); 379 v |= PCIM_EXP_CTL_RELAXED_ORD_ENABLE; 380 pci_write_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, v, 2); 381 } 382 383 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 384 device_get_nameunit(dev)); 385 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 386 387 rc = map_bars(sc); 388 if (rc != 0) 389 goto done; /* error message displayed already */ 390 391 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 392 393 /* Prepare the adapter for operation */ 394 rc = -t4_prep_adapter(sc); 395 if (rc != 0) { 396 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 397 goto done; 398 } 399 400 /* Do this really early */ 401 sc->cdev = make_dev(&t4_cdevsw, device_get_unit(dev), UID_ROOT, 402 GID_WHEEL, 0600, "%s", device_get_nameunit(dev)); 403 sc->cdev->si_drv1 = sc; 404 405 /* Prepare the firmware for operation */ 406 rc = prep_firmware(sc); 407 if (rc != 0) 408 goto done; /* error message displayed already */ 409 410 /* Read firmware devlog parameters */ 411 (void) get_devlog_params(sc, &sc->params.devlog); 412 413 /* Get device capabilities and select which ones we'll use */ 414 rc = get_capabilities(sc, &caps); 415 if (rc != 0) { 416 device_printf(dev, 417 "failed to initialize adapter capabilities: %d.\n", rc); 418 goto done; 419 } 420 421 /* Choose the global RSS mode. */ 422 rc = -t4_config_glbl_rss(sc, sc->mbox, 423 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, 424 F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | 425 F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ | 426 F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP); 427 if (rc != 0) { 428 device_printf(dev, 429 "failed to select global RSS mode: %d.\n", rc); 430 goto done; 431 } 432 433 /* These are total (sum of all ports) limits for a bus driver */ 434 rc = -t4_cfg_pfvf(sc, sc->mbox, sc->pf, 0, 435 128, /* max # of egress queues */ 436 64, /* max # of egress Ethernet or control queues */ 437 64, /* max # of ingress queues with fl/interrupt */ 438 0, /* max # of ingress queues without interrupt */ 439 0, /* PCIe traffic class */ 440 4, /* max # of virtual interfaces */ 441 M_FW_PFVF_CMD_CMASK, M_FW_PFVF_CMD_PMASK, 16, 442 FW_CMD_CAP_PF, FW_CMD_CAP_PF); 443 if (rc != 0) { 444 device_printf(dev, 445 "failed to configure pf/vf resources: %d.\n", rc); 446 goto done; 447 } 448 449 /* Need this before sge_init */ 450 for (i = 0; i < SGE_NTIMERS; i++) 451 sc->sge.timer_val[i] = min(intr_timer[i], 200U); 452 for (i = 0; i < SGE_NCOUNTERS; i++) 453 sc->sge.counter_val[i] = min(intr_pktcount[i], M_THRESHOLD_0); 454 455 /* Also need the cooked value of cclk before sge_init */ 456 p = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 457 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); 458 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &p, &v); 459 if (rc != 0) { 460 device_printf(sc->dev, 461 "failed to obtain core clock value: %d.\n", rc); 462 goto done; 463 } 464 sc->params.vpd.cclk = v; 465 466 t4_sge_init(sc); 467 468 t4_set_filter_mode(sc, filter_mode); 469 t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG, 470 V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP), 471 V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP)); 472 t4_tp_wr_bits_indirect(sc, A_TP_INGRESS_CONFIG, F_CSUM_HAS_PSEUDO_HDR, 473 F_LOOKUPEVERYPKT); 474 475 /* get basic stuff going */ 476 rc = -t4_early_init(sc, sc->mbox); 477 if (rc != 0) { 478 device_printf(dev, "early init failed: %d.\n", rc); 479 goto done; 480 } 481 482 rc = get_params(sc, &caps); 483 if (rc != 0) 484 goto done; /* error message displayed already */ 485 486 /* These are finalized by FW initialization, load their values now */ 487 v = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 488 sc->params.tp.tre = G_TIMERRESOLUTION(v); 489 sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v); 490 t4_read_mtu_tbl(sc, sc->params.mtus, NULL); 491 492 /* tweak some settings */ 493 t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) | 494 V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) | 495 V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9)); 496 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); 497 t4_set_reg_field(sc, A_TP_PARA_REG3, F_TUNNELCNGDROP0 | 498 F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | F_TUNNELCNGDROP3, 0); 499 500 setup_memwin(sc); 501 502 rc = t4_create_dma_tag(sc); 503 if (rc != 0) 504 goto done; /* error message displayed already */ 505 506 /* 507 * First pass over all the ports - allocate VIs and initialize some 508 * basic parameters like mac address, port type, etc. We also figure 509 * out whether a port is 10G or 1G and use that information when 510 * calculating how many interrupts to attempt to allocate. 511 */ 512 n10g = n1g = 0; 513 for_each_port(sc, i) { 514 struct port_info *pi; 515 516 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 517 sc->port[i] = pi; 518 519 /* These must be set before t4_port_init */ 520 pi->adapter = sc; 521 pi->port_id = i; 522 523 /* Allocate the vi and initialize parameters like mac addr */ 524 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0); 525 if (rc != 0) { 526 device_printf(dev, "unable to initialize port %d: %d\n", 527 i, rc); 528 free(pi, M_CXGBE); 529 sc->port[i] = NULL; 530 goto done; 531 } 532 533 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 534 device_get_nameunit(dev), i); 535 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 536 537 if (is_10G_port(pi)) { 538 n10g++; 539 pi->tmr_idx = tmr_idx_10g; 540 pi->pktc_idx = pktc_idx_10g; 541 } else { 542 n1g++; 543 pi->tmr_idx = tmr_idx_1g; 544 pi->pktc_idx = pktc_idx_1g; 545 } 546 547 pi->xact_addr_filt = -1; 548 549 pi->qsize_rxq = max(qsize_rxq, 128); 550 while (pi->qsize_rxq & 7) 551 pi->qsize_rxq++; 552 pi->qsize_txq = max(qsize_txq, 128); 553 554 if (pi->qsize_rxq != qsize_rxq) { 555 device_printf(dev, 556 "using %d instead of %d as the rx queue size.\n", 557 pi->qsize_rxq, qsize_rxq); 558 } 559 if (pi->qsize_txq != qsize_txq) { 560 device_printf(dev, 561 "using %d instead of %d as the tx queue size.\n", 562 pi->qsize_txq, qsize_txq); 563 } 564 565 pi->dev = device_add_child(dev, "cxgbe", -1); 566 if (pi->dev == NULL) { 567 device_printf(dev, 568 "failed to add device for port %d.\n", i); 569 rc = ENXIO; 570 goto done; 571 } 572 device_set_softc(pi->dev, pi); 573 574 setbit(&sc->registered_device_map, i); 575 } 576 577 if (sc->registered_device_map == 0) { 578 device_printf(dev, "no usable ports\n"); 579 rc = ENXIO; 580 goto done; 581 } 582 583 /* 584 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 585 */ 586 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq); 587 if (rc != 0) 588 goto done; /* error message displayed already */ 589 590 sc->intr_type = iaq.intr_type; 591 sc->intr_count = iaq.nirq; 592 593 s = &sc->sge; 594 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g; 595 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g; 596 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 597 s->neq += sc->params.nports; /* control queues, 1 per port */ 598 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 599 if (iaq.intr_shared) 600 sc->flags |= INTR_SHARED; 601 s->niq += NINTRQ(sc); /* interrupt queues */ 602 603 s->intrq = malloc(NINTRQ(sc) * sizeof(struct sge_iq), M_CXGBE, 604 M_ZERO | M_WAITOK); 605 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_ctrlq), M_CXGBE, 606 M_ZERO | M_WAITOK); 607 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 608 M_ZERO | M_WAITOK); 609 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 610 M_ZERO | M_WAITOK); 611 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, 612 M_ZERO | M_WAITOK); 613 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, 614 M_ZERO | M_WAITOK); 615 616 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 617 M_ZERO | M_WAITOK); 618 619 sc->l2t = t4_init_l2t(M_WAITOK); 620 621 t4_sysctls(sc); 622 623 /* 624 * Second pass over the ports. This time we know the number of rx and 625 * tx queues that each port should get. 626 */ 627 rqidx = tqidx = 0; 628 for_each_port(sc, i) { 629 struct port_info *pi = sc->port[i]; 630 631 if (pi == NULL) 632 continue; 633 634 pi->first_rxq = rqidx; 635 pi->nrxq = is_10G_port(pi) ? iaq.nrxq10g : iaq.nrxq1g; 636 637 pi->first_txq = tqidx; 638 pi->ntxq = is_10G_port(pi) ? iaq.ntxq10g : iaq.ntxq1g; 639 640 rqidx += pi->nrxq; 641 tqidx += pi->ntxq; 642 } 643 644 rc = bus_generic_attach(dev); 645 if (rc != 0) { 646 device_printf(dev, 647 "failed to attach all child ports: %d\n", rc); 648 goto done; 649 } 650 651 #ifdef INVARIANTS 652 device_printf(dev, 653 "%p, %d ports (0x%x), %d intr_type, %d intr_count\n", 654 sc, sc->params.nports, sc->params.portvec, 655 sc->intr_type, sc->intr_count); 656 #endif 657 t4_set_desc(sc); 658 659 done: 660 if (rc != 0) 661 t4_detach(dev); 662 663 return (rc); 664 } 665 666 /* 667 * Idempotent 668 */ 669 static int 670 t4_detach(device_t dev) 671 { 672 struct adapter *sc; 673 struct port_info *pi; 674 int i; 675 676 sc = device_get_softc(dev); 677 678 if (sc->cdev) 679 destroy_dev(sc->cdev); 680 681 bus_generic_detach(dev); 682 for (i = 0; i < MAX_NPORTS; i++) { 683 pi = sc->port[i]; 684 if (pi) { 685 t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid); 686 if (pi->dev) 687 device_delete_child(dev, pi->dev); 688 689 mtx_destroy(&pi->pi_lock); 690 free(pi, M_CXGBE); 691 } 692 } 693 694 if (sc->flags & FW_OK) 695 t4_fw_bye(sc, sc->mbox); 696 697 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 698 pci_release_msi(dev); 699 700 if (sc->regs_res) 701 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 702 sc->regs_res); 703 704 if (sc->msix_res) 705 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 706 sc->msix_res); 707 708 if (sc->l2t) 709 t4_free_l2t(sc->l2t); 710 711 free(sc->irq, M_CXGBE); 712 free(sc->sge.rxq, M_CXGBE); 713 free(sc->sge.txq, M_CXGBE); 714 free(sc->sge.ctrlq, M_CXGBE); 715 free(sc->sge.intrq, M_CXGBE); 716 free(sc->sge.iqmap, M_CXGBE); 717 free(sc->sge.eqmap, M_CXGBE); 718 free(sc->tids.ftid_tab, M_CXGBE); 719 t4_destroy_dma_tag(sc); 720 mtx_destroy(&sc->sc_lock); 721 722 bzero(sc, sizeof(*sc)); 723 724 return (0); 725 } 726 727 728 static int 729 cxgbe_probe(device_t dev) 730 { 731 char buf[128]; 732 struct port_info *pi = device_get_softc(dev); 733 734 snprintf(buf, sizeof(buf), "Port %d", pi->port_id); 735 device_set_desc_copy(dev, buf); 736 737 return (BUS_PROBE_DEFAULT); 738 } 739 740 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 741 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 742 IFCAP_VLAN_HWTSO) 743 #define T4_CAP_ENABLE (T4_CAP & ~IFCAP_TSO6) 744 745 static int 746 cxgbe_attach(device_t dev) 747 { 748 struct port_info *pi = device_get_softc(dev); 749 struct ifnet *ifp; 750 751 /* Allocate an ifnet and set it up */ 752 ifp = if_alloc(IFT_ETHER); 753 if (ifp == NULL) { 754 device_printf(dev, "Cannot allocate ifnet\n"); 755 return (ENOMEM); 756 } 757 pi->ifp = ifp; 758 ifp->if_softc = pi; 759 760 callout_init(&pi->tick, CALLOUT_MPSAFE); 761 pi->tq = taskqueue_create("cxgbe_taskq", M_NOWAIT, 762 taskqueue_thread_enqueue, &pi->tq); 763 if (pi->tq == NULL) { 764 device_printf(dev, "failed to allocate port task queue\n"); 765 if_free(pi->ifp); 766 return (ENOMEM); 767 } 768 taskqueue_start_threads(&pi->tq, 1, PI_NET, "%s taskq", 769 device_get_nameunit(dev)); 770 771 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 772 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 773 774 ifp->if_init = cxgbe_init; 775 ifp->if_ioctl = cxgbe_ioctl; 776 ifp->if_start = cxgbe_start; 777 ifp->if_transmit = cxgbe_transmit; 778 ifp->if_qflush = cxgbe_qflush; 779 780 ifp->if_snd.ifq_drv_maxlen = 1024; 781 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 782 IFQ_SET_READY(&ifp->if_snd); 783 784 ifp->if_capabilities = T4_CAP; 785 ifp->if_capenable = T4_CAP_ENABLE; 786 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO; 787 788 /* Initialize ifmedia for this port */ 789 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change, 790 cxgbe_media_status); 791 build_medialist(pi); 792 793 ether_ifattach(ifp, pi->hw_addr); 794 795 #ifdef INVARIANTS 796 device_printf(dev, "%p, %d txq, %d rxq\n", pi, pi->ntxq, pi->nrxq); 797 #endif 798 799 cxgbe_sysctls(pi); 800 801 return (0); 802 } 803 804 static int 805 cxgbe_detach(device_t dev) 806 { 807 struct port_info *pi = device_get_softc(dev); 808 struct adapter *sc = pi->adapter; 809 int rc; 810 811 /* Tell if_ioctl and if_init that the port is going away */ 812 ADAPTER_LOCK(sc); 813 SET_DOOMED(pi); 814 wakeup(&sc->flags); 815 while (IS_BUSY(sc)) 816 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 817 SET_BUSY(sc); 818 ADAPTER_UNLOCK(sc); 819 820 rc = cxgbe_uninit_synchronized(pi); 821 if (rc != 0) 822 device_printf(dev, "port uninit failed: %d.\n", rc); 823 824 taskqueue_free(pi->tq); 825 826 ifmedia_removeall(&pi->media); 827 ether_ifdetach(pi->ifp); 828 if_free(pi->ifp); 829 830 ADAPTER_LOCK(sc); 831 CLR_BUSY(sc); 832 wakeup_one(&sc->flags); 833 ADAPTER_UNLOCK(sc); 834 835 return (0); 836 } 837 838 static void 839 cxgbe_init(void *arg) 840 { 841 struct port_info *pi = arg; 842 struct adapter *sc = pi->adapter; 843 844 ADAPTER_LOCK(sc); 845 cxgbe_init_locked(pi); /* releases adapter lock */ 846 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 847 } 848 849 static int 850 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 851 { 852 int rc = 0, mtu, flags; 853 struct port_info *pi = ifp->if_softc; 854 struct adapter *sc = pi->adapter; 855 struct ifreq *ifr = (struct ifreq *)data; 856 uint32_t mask; 857 858 switch (cmd) { 859 case SIOCSIFMTU: 860 ADAPTER_LOCK(sc); 861 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 862 if (rc) { 863 fail: 864 ADAPTER_UNLOCK(sc); 865 return (rc); 866 } 867 868 mtu = ifr->ifr_mtu; 869 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) { 870 rc = EINVAL; 871 } else { 872 ifp->if_mtu = mtu; 873 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 874 t4_update_fl_bufsize(ifp); 875 PORT_LOCK(pi); 876 rc = update_mac_settings(pi, XGMAC_MTU); 877 PORT_UNLOCK(pi); 878 } 879 } 880 ADAPTER_UNLOCK(sc); 881 break; 882 883 case SIOCSIFFLAGS: 884 ADAPTER_LOCK(sc); 885 if (IS_DOOMED(pi)) { 886 rc = ENXIO; 887 goto fail; 888 } 889 if (ifp->if_flags & IFF_UP) { 890 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 891 flags = pi->if_flags; 892 if ((ifp->if_flags ^ flags) & 893 (IFF_PROMISC | IFF_ALLMULTI)) { 894 if (IS_BUSY(sc)) { 895 rc = EBUSY; 896 goto fail; 897 } 898 PORT_LOCK(pi); 899 rc = update_mac_settings(pi, 900 XGMAC_PROMISC | XGMAC_ALLMULTI); 901 PORT_UNLOCK(pi); 902 } 903 ADAPTER_UNLOCK(sc); 904 } else 905 rc = cxgbe_init_locked(pi); 906 pi->if_flags = ifp->if_flags; 907 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) 908 rc = cxgbe_uninit_locked(pi); 909 else 910 ADAPTER_UNLOCK(sc); 911 912 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 913 break; 914 915 case SIOCADDMULTI: 916 case SIOCDELMULTI: /* these two can be called with a mutex held :-( */ 917 ADAPTER_LOCK(sc); 918 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 919 if (rc) 920 goto fail; 921 922 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 923 PORT_LOCK(pi); 924 rc = update_mac_settings(pi, XGMAC_MCADDRS); 925 PORT_UNLOCK(pi); 926 } 927 ADAPTER_UNLOCK(sc); 928 break; 929 930 case SIOCSIFCAP: 931 ADAPTER_LOCK(sc); 932 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 933 if (rc) 934 goto fail; 935 936 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 937 if (mask & IFCAP_TXCSUM) { 938 ifp->if_capenable ^= IFCAP_TXCSUM; 939 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 940 941 if (IFCAP_TSO & ifp->if_capenable && 942 !(IFCAP_TXCSUM & ifp->if_capenable)) { 943 ifp->if_capenable &= ~IFCAP_TSO; 944 ifp->if_hwassist &= ~CSUM_TSO; 945 if_printf(ifp, 946 "tso disabled due to -txcsum.\n"); 947 } 948 } 949 if (mask & IFCAP_RXCSUM) 950 ifp->if_capenable ^= IFCAP_RXCSUM; 951 if (mask & IFCAP_TSO4) { 952 ifp->if_capenable ^= IFCAP_TSO4; 953 954 if (IFCAP_TSO & ifp->if_capenable) { 955 if (IFCAP_TXCSUM & ifp->if_capenable) 956 ifp->if_hwassist |= CSUM_TSO; 957 else { 958 ifp->if_capenable &= ~IFCAP_TSO; 959 ifp->if_hwassist &= ~CSUM_TSO; 960 if_printf(ifp, 961 "enable txcsum first.\n"); 962 rc = EAGAIN; 963 } 964 } else 965 ifp->if_hwassist &= ~CSUM_TSO; 966 } 967 if (mask & IFCAP_LRO) { 968 #ifdef INET 969 int i; 970 struct sge_rxq *rxq; 971 972 ifp->if_capenable ^= IFCAP_LRO; 973 for_each_rxq(pi, i, rxq) { 974 if (ifp->if_capenable & IFCAP_LRO) 975 rxq->flags |= RXQ_LRO_ENABLED; 976 else 977 rxq->flags &= ~RXQ_LRO_ENABLED; 978 } 979 #endif 980 } 981 #ifndef TCP_OFFLOAD_DISABLE 982 if (mask & IFCAP_TOE4) { 983 rc = EOPNOTSUPP; 984 } 985 #endif 986 if (mask & IFCAP_VLAN_HWTAGGING) { 987 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 988 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 989 PORT_LOCK(pi); 990 rc = update_mac_settings(pi, XGMAC_VLANEX); 991 PORT_UNLOCK(pi); 992 } 993 } 994 if (mask & IFCAP_VLAN_MTU) { 995 ifp->if_capenable ^= IFCAP_VLAN_MTU; 996 997 /* Need to find out how to disable auto-mtu-inflation */ 998 } 999 if (mask & IFCAP_VLAN_HWTSO) 1000 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1001 if (mask & IFCAP_VLAN_HWCSUM) 1002 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1003 1004 #ifdef VLAN_CAPABILITIES 1005 VLAN_CAPABILITIES(ifp); 1006 #endif 1007 ADAPTER_UNLOCK(sc); 1008 break; 1009 1010 case SIOCSIFMEDIA: 1011 case SIOCGIFMEDIA: 1012 ifmedia_ioctl(ifp, ifr, &pi->media, cmd); 1013 break; 1014 1015 default: 1016 rc = ether_ioctl(ifp, cmd, data); 1017 } 1018 1019 return (rc); 1020 } 1021 1022 static void 1023 cxgbe_start(struct ifnet *ifp) 1024 { 1025 struct port_info *pi = ifp->if_softc; 1026 struct sge_txq *txq; 1027 int i; 1028 1029 for_each_txq(pi, i, txq) { 1030 if (TXQ_TRYLOCK(txq)) { 1031 txq_start(ifp, txq); 1032 TXQ_UNLOCK(txq); 1033 } 1034 } 1035 } 1036 1037 static int 1038 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 1039 { 1040 struct port_info *pi = ifp->if_softc; 1041 struct adapter *sc = pi->adapter; 1042 struct sge_txq *txq = &sc->sge.txq[pi->first_txq]; 1043 struct buf_ring *br; 1044 int rc; 1045 1046 M_ASSERTPKTHDR(m); 1047 1048 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1049 m_freem(m); 1050 return (0); 1051 } 1052 1053 if (m->m_flags & M_FLOWID) 1054 txq += (m->m_pkthdr.flowid % pi->ntxq); 1055 br = txq->br; 1056 1057 if (TXQ_TRYLOCK(txq) == 0) { 1058 /* 1059 * XXX: make sure that this packet really is sent out. There is 1060 * a small race where t4_eth_tx may stop draining the drbr and 1061 * goes away, just before we enqueued this mbuf. 1062 */ 1063 1064 return (drbr_enqueue(ifp, br, m)); 1065 } 1066 1067 /* 1068 * txq->m is the mbuf that is held up due to a temporary shortage of 1069 * resources and it should be put on the wire first. Then what's in 1070 * drbr and finally the mbuf that was just passed in to us. 1071 * 1072 * Return code should indicate the fate of the mbuf that was passed in 1073 * this time. 1074 */ 1075 1076 TXQ_LOCK_ASSERT_OWNED(txq); 1077 if (drbr_needs_enqueue(ifp, br) || txq->m) { 1078 1079 /* Queued for transmission. */ 1080 1081 rc = drbr_enqueue(ifp, br, m); 1082 m = txq->m ? txq->m : drbr_dequeue(ifp, br); 1083 (void) t4_eth_tx(ifp, txq, m); 1084 TXQ_UNLOCK(txq); 1085 return (rc); 1086 } 1087 1088 /* Direct transmission. */ 1089 rc = t4_eth_tx(ifp, txq, m); 1090 if (rc != 0 && txq->m) 1091 rc = 0; /* held, will be transmitted soon (hopefully) */ 1092 1093 TXQ_UNLOCK(txq); 1094 return (rc); 1095 } 1096 1097 static void 1098 cxgbe_qflush(struct ifnet *ifp) 1099 { 1100 struct port_info *pi = ifp->if_softc; 1101 struct sge_txq *txq; 1102 int i; 1103 struct mbuf *m; 1104 1105 /* queues do not exist if !IFF_DRV_RUNNING. */ 1106 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1107 for_each_txq(pi, i, txq) { 1108 TXQ_LOCK(txq); 1109 m_freem(txq->m); 1110 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL) 1111 m_freem(m); 1112 TXQ_UNLOCK(txq); 1113 } 1114 } 1115 if_qflush(ifp); 1116 } 1117 1118 static int 1119 cxgbe_media_change(struct ifnet *ifp) 1120 { 1121 struct port_info *pi = ifp->if_softc; 1122 1123 device_printf(pi->dev, "%s unimplemented.\n", __func__); 1124 1125 return (EOPNOTSUPP); 1126 } 1127 1128 static void 1129 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1130 { 1131 struct port_info *pi = ifp->if_softc; 1132 struct ifmedia_entry *cur = pi->media.ifm_cur; 1133 int speed = pi->link_cfg.speed; 1134 int data = (pi->port_type << 8) | pi->mod_type; 1135 1136 if (cur->ifm_data != data) { 1137 build_medialist(pi); 1138 cur = pi->media.ifm_cur; 1139 } 1140 1141 ifmr->ifm_status = IFM_AVALID; 1142 if (!pi->link_cfg.link_ok) 1143 return; 1144 1145 ifmr->ifm_status |= IFM_ACTIVE; 1146 1147 /* active and current will differ iff current media is autoselect. */ 1148 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 1149 return; 1150 1151 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 1152 if (speed == SPEED_10000) 1153 ifmr->ifm_active |= IFM_10G_T; 1154 else if (speed == SPEED_1000) 1155 ifmr->ifm_active |= IFM_1000_T; 1156 else if (speed == SPEED_100) 1157 ifmr->ifm_active |= IFM_100_TX; 1158 else if (speed == SPEED_10) 1159 ifmr->ifm_active |= IFM_10_T; 1160 else 1161 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, 1162 speed)); 1163 } 1164 1165 void 1166 t4_fatal_err(struct adapter *sc) 1167 { 1168 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 1169 t4_intr_disable(sc); 1170 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n", 1171 device_get_nameunit(sc->dev)); 1172 } 1173 1174 static int 1175 map_bars(struct adapter *sc) 1176 { 1177 sc->regs_rid = PCIR_BAR(0); 1178 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1179 &sc->regs_rid, RF_ACTIVE); 1180 if (sc->regs_res == NULL) { 1181 device_printf(sc->dev, "cannot map registers.\n"); 1182 return (ENXIO); 1183 } 1184 sc->bt = rman_get_bustag(sc->regs_res); 1185 sc->bh = rman_get_bushandle(sc->regs_res); 1186 sc->mmio_len = rman_get_size(sc->regs_res); 1187 1188 sc->msix_rid = PCIR_BAR(4); 1189 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1190 &sc->msix_rid, RF_ACTIVE); 1191 if (sc->msix_res == NULL) { 1192 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 1193 return (ENXIO); 1194 } 1195 1196 return (0); 1197 } 1198 1199 static void 1200 setup_memwin(struct adapter *sc) 1201 { 1202 u_long bar0; 1203 1204 bar0 = rman_get_start(sc->regs_res); 1205 1206 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0), 1207 (bar0 + MEMWIN0_BASE) | V_BIR(0) | 1208 V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); 1209 1210 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1), 1211 (bar0 + MEMWIN1_BASE) | V_BIR(0) | 1212 V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); 1213 1214 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2), 1215 (bar0 + MEMWIN2_BASE) | V_BIR(0) | 1216 V_WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); 1217 } 1218 1219 static int 1220 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, 1221 struct intrs_and_queues *iaq) 1222 { 1223 int rc, itype, navail, nc, nrxq10g, nrxq1g; 1224 1225 bzero(iaq, sizeof(*iaq)); 1226 nc = mp_ncpus; /* our snapshot of the number of CPUs */ 1227 1228 for (itype = INTR_MSIX; itype; itype >>= 1) { 1229 1230 if ((itype & intr_types) == 0) 1231 continue; /* not allowed */ 1232 1233 if (itype == INTR_MSIX) 1234 navail = pci_msix_count(sc->dev); 1235 else if (itype == INTR_MSI) 1236 navail = pci_msi_count(sc->dev); 1237 else 1238 navail = 1; 1239 1240 if (navail == 0) 1241 continue; 1242 1243 iaq->intr_type = itype; 1244 1245 iaq->ntxq10g = min(nc, max_ntxq_10g); 1246 iaq->ntxq1g = min(nc, max_ntxq_1g); 1247 1248 nrxq10g = min(nc, max_nrxq_10g); 1249 nrxq1g = min(nc, max_nrxq_1g); 1250 1251 iaq->nirq = n10g * nrxq10g + n1g * nrxq1g + T4_EXTRA_INTR; 1252 if (iaq->nirq <= navail && intr_shared == 0) { 1253 1254 if (itype == INTR_MSI && !powerof2(iaq->nirq)) 1255 goto share; 1256 1257 /* One for err, one for fwq, and one for each rxq */ 1258 1259 iaq->intr_shared = 0; 1260 iaq->nrxq10g = nrxq10g; 1261 iaq->nrxq1g = nrxq1g; 1262 1263 } else { 1264 share: 1265 iaq->intr_shared = 1; 1266 1267 if (navail >= nc + T4_EXTRA_INTR) { 1268 if (itype == INTR_MSIX) 1269 navail = nc + T4_EXTRA_INTR; 1270 1271 /* navail is and must remain a pow2 for MSI */ 1272 if (itype == INTR_MSI) { 1273 KASSERT(powerof2(navail), 1274 ("%d not power of 2", navail)); 1275 1276 while (navail / 2 >= nc + T4_EXTRA_INTR) 1277 navail /= 2; 1278 } 1279 } 1280 iaq->nirq = navail; /* total # of interrupts */ 1281 1282 /* 1283 * If we have multiple vectors available reserve one 1284 * exclusively for errors. The rest will be shared by 1285 * the fwq and data. 1286 */ 1287 if (navail > 1) 1288 navail--; 1289 iaq->nrxq10g = min(nrxq10g, navail); 1290 iaq->nrxq1g = min(nrxq1g, navail); 1291 } 1292 1293 navail = iaq->nirq; 1294 rc = 0; 1295 if (itype == INTR_MSIX) 1296 rc = pci_alloc_msix(sc->dev, &navail); 1297 else if (itype == INTR_MSI) 1298 rc = pci_alloc_msi(sc->dev, &navail); 1299 1300 if (rc == 0) { 1301 if (navail == iaq->nirq) 1302 return (0); 1303 1304 /* 1305 * Didn't get the number requested. Use whatever number 1306 * the kernel is willing to allocate (it's in navail). 1307 */ 1308 pci_release_msi(sc->dev); 1309 goto share; 1310 } 1311 1312 device_printf(sc->dev, 1313 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 1314 itype, rc, iaq->nirq, navail); 1315 } 1316 1317 device_printf(sc->dev, 1318 "failed to find a usable interrupt type. " 1319 "allowed=%d, msi-x=%d, msi=%d, intx=1", intr_types, 1320 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 1321 1322 return (ENXIO); 1323 } 1324 1325 /* 1326 * Install a compatible firmware (if required), establish contact with it, 1327 * become the master, and reset the device. 1328 */ 1329 static int 1330 prep_firmware(struct adapter *sc) 1331 { 1332 const struct firmware *fw; 1333 int rc; 1334 enum dev_state state; 1335 1336 /* Check firmware version and install a different one if necessary */ 1337 rc = t4_check_fw_version(sc); 1338 if (rc != 0 || force_firmware_install) { 1339 uint32_t v = 0; 1340 1341 fw = firmware_get(T4_FWNAME); 1342 if (fw != NULL) { 1343 const struct fw_hdr *hdr = (const void *)fw->data; 1344 1345 v = ntohl(hdr->fw_ver); 1346 1347 /* 1348 * The firmware module will not be used if it isn't the 1349 * same major version as what the driver was compiled 1350 * with. This check trumps force_firmware_install. 1351 */ 1352 if (G_FW_HDR_FW_VER_MAJOR(v) != FW_VERSION_MAJOR) { 1353 device_printf(sc->dev, 1354 "Found firmware image but version %d " 1355 "can not be used with this driver (%d)\n", 1356 G_FW_HDR_FW_VER_MAJOR(v), FW_VERSION_MAJOR); 1357 1358 firmware_put(fw, FIRMWARE_UNLOAD); 1359 fw = NULL; 1360 } 1361 } 1362 1363 if (fw == NULL && (rc < 0 || force_firmware_install)) { 1364 device_printf(sc->dev, "No usable firmware. " 1365 "card has %d.%d.%d, driver compiled with %d.%d.%d, " 1366 "force_firmware_install%s set", 1367 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 1368 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 1369 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 1370 FW_VERSION_MAJOR, FW_VERSION_MINOR, 1371 FW_VERSION_MICRO, 1372 force_firmware_install ? "" : " not"); 1373 return (EAGAIN); 1374 } 1375 1376 /* 1377 * Always upgrade, even for minor/micro/build mismatches. 1378 * Downgrade only for a major version mismatch or if 1379 * force_firmware_install was specified. 1380 */ 1381 if (fw != NULL && (rc < 0 || force_firmware_install || 1382 v > sc->params.fw_vers)) { 1383 device_printf(sc->dev, 1384 "installing firmware %d.%d.%d.%d on card.\n", 1385 G_FW_HDR_FW_VER_MAJOR(v), G_FW_HDR_FW_VER_MINOR(v), 1386 G_FW_HDR_FW_VER_MICRO(v), G_FW_HDR_FW_VER_BUILD(v)); 1387 1388 rc = -t4_load_fw(sc, fw->data, fw->datasize); 1389 if (rc != 0) { 1390 device_printf(sc->dev, 1391 "failed to install firmware: %d\n", rc); 1392 firmware_put(fw, FIRMWARE_UNLOAD); 1393 return (rc); 1394 } else { 1395 /* refresh */ 1396 (void) t4_check_fw_version(sc); 1397 } 1398 } 1399 1400 if (fw != NULL) 1401 firmware_put(fw, FIRMWARE_UNLOAD); 1402 } 1403 1404 /* Contact firmware, request master */ 1405 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state); 1406 if (rc < 0) { 1407 rc = -rc; 1408 device_printf(sc->dev, 1409 "failed to connect to the firmware: %d.\n", rc); 1410 return (rc); 1411 } 1412 1413 /* Reset device */ 1414 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST); 1415 if (rc != 0) { 1416 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 1417 if (rc != ETIMEDOUT && rc != EIO) 1418 t4_fw_bye(sc, sc->mbox); 1419 return (rc); 1420 } 1421 1422 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 1423 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 1424 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 1425 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 1426 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 1427 sc->flags |= FW_OK; 1428 1429 return (0); 1430 } 1431 1432 static int 1433 get_devlog_params(struct adapter *sc, struct devlog_params *dlog) 1434 { 1435 struct fw_devlog_cmd devlog_cmd; 1436 uint32_t meminfo; 1437 int rc; 1438 1439 bzero(&devlog_cmd, sizeof(devlog_cmd)); 1440 devlog_cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 1441 F_FW_CMD_REQUEST | F_FW_CMD_READ); 1442 devlog_cmd.retval_len16 = htobe32(FW_LEN16(devlog_cmd)); 1443 rc = -t4_wr_mbox(sc, sc->mbox, &devlog_cmd, sizeof(devlog_cmd), 1444 &devlog_cmd); 1445 if (rc != 0) { 1446 device_printf(sc->dev, 1447 "failed to get devlog parameters: %d.\n", rc); 1448 bzero(dlog, sizeof (*dlog)); 1449 return (rc); 1450 } 1451 1452 meminfo = be32toh(devlog_cmd.memtype_devlog_memaddr16_devlog); 1453 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(meminfo); 1454 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(meminfo) << 4; 1455 dlog->size = be32toh(devlog_cmd.memsize_devlog); 1456 1457 return (0); 1458 } 1459 1460 static int 1461 get_capabilities(struct adapter *sc, struct fw_caps_config_cmd *caps) 1462 { 1463 int rc; 1464 1465 bzero(caps, sizeof(*caps)); 1466 caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1467 F_FW_CMD_REQUEST | F_FW_CMD_READ); 1468 caps->retval_len16 = htobe32(FW_LEN16(*caps)); 1469 1470 rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), caps); 1471 if (rc != 0) 1472 return (rc); 1473 1474 if (caps->niccaps & htobe16(FW_CAPS_CONFIG_NIC_VM)) 1475 caps->niccaps ^= htobe16(FW_CAPS_CONFIG_NIC_VM); 1476 1477 caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1478 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 1479 rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), NULL); 1480 1481 return (rc); 1482 } 1483 1484 static int 1485 get_params(struct adapter *sc, struct fw_caps_config_cmd *caps) 1486 { 1487 int rc; 1488 uint32_t params[7], val[7]; 1489 1490 #define FW_PARAM_DEV(param) \ 1491 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 1492 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 1493 #define FW_PARAM_PFVF(param) \ 1494 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 1495 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 1496 1497 params[0] = FW_PARAM_DEV(PORTVEC); 1498 params[1] = FW_PARAM_PFVF(IQFLINT_START); 1499 params[2] = FW_PARAM_PFVF(EQ_START); 1500 params[3] = FW_PARAM_PFVF(FILTER_START); 1501 params[4] = FW_PARAM_PFVF(FILTER_END); 1502 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 5, params, val); 1503 if (rc != 0) { 1504 device_printf(sc->dev, 1505 "failed to query parameters: %d.\n", rc); 1506 goto done; 1507 } 1508 1509 sc->params.portvec = val[0]; 1510 sc->params.nports = 0; 1511 while (val[0]) { 1512 sc->params.nports++; 1513 val[0] &= val[0] - 1; 1514 } 1515 1516 sc->sge.iq_start = val[1]; 1517 sc->sge.eq_start = val[2]; 1518 sc->tids.ftid_base = val[3]; 1519 sc->tids.nftids = val[4] - val[3] + 1; 1520 1521 if (caps->toecaps) { 1522 /* query offload-related parameters */ 1523 params[0] = FW_PARAM_DEV(NTID); 1524 params[1] = FW_PARAM_PFVF(SERVER_START); 1525 params[2] = FW_PARAM_PFVF(SERVER_END); 1526 params[3] = FW_PARAM_PFVF(TDDP_START); 1527 params[4] = FW_PARAM_PFVF(TDDP_END); 1528 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 1529 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val); 1530 if (rc != 0) { 1531 device_printf(sc->dev, 1532 "failed to query TOE parameters: %d.\n", rc); 1533 goto done; 1534 } 1535 sc->tids.ntids = val[0]; 1536 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 1537 sc->tids.stid_base = val[1]; 1538 sc->tids.nstids = val[2] - val[1] + 1; 1539 sc->vres.ddp.start = val[3]; 1540 sc->vres.ddp.size = val[4] - val[3] + 1; 1541 sc->params.ofldq_wr_cred = val[5]; 1542 sc->params.offload = 1; 1543 } 1544 if (caps->rdmacaps) { 1545 params[0] = FW_PARAM_PFVF(STAG_START); 1546 params[1] = FW_PARAM_PFVF(STAG_END); 1547 params[2] = FW_PARAM_PFVF(RQ_START); 1548 params[3] = FW_PARAM_PFVF(RQ_END); 1549 params[4] = FW_PARAM_PFVF(PBL_START); 1550 params[5] = FW_PARAM_PFVF(PBL_END); 1551 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val); 1552 if (rc != 0) { 1553 device_printf(sc->dev, 1554 "failed to query RDMA parameters: %d.\n", rc); 1555 goto done; 1556 } 1557 sc->vres.stag.start = val[0]; 1558 sc->vres.stag.size = val[1] - val[0] + 1; 1559 sc->vres.rq.start = val[2]; 1560 sc->vres.rq.size = val[3] - val[2] + 1; 1561 sc->vres.pbl.start = val[4]; 1562 sc->vres.pbl.size = val[5] - val[4] + 1; 1563 } 1564 if (caps->iscsicaps) { 1565 params[0] = FW_PARAM_PFVF(ISCSI_START); 1566 params[1] = FW_PARAM_PFVF(ISCSI_END); 1567 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, params, val); 1568 if (rc != 0) { 1569 device_printf(sc->dev, 1570 "failed to query iSCSI parameters: %d.\n", rc); 1571 goto done; 1572 } 1573 sc->vres.iscsi.start = val[0]; 1574 sc->vres.iscsi.size = val[1] - val[0] + 1; 1575 } 1576 #undef FW_PARAM_PFVF 1577 #undef FW_PARAM_DEV 1578 1579 done: 1580 return (rc); 1581 } 1582 1583 static void 1584 t4_set_desc(struct adapter *sc) 1585 { 1586 char buf[128]; 1587 struct adapter_params *p = &sc->params; 1588 1589 snprintf(buf, sizeof(buf), 1590 "Chelsio %s (rev %d) %d port %sNIC PCIe-x%d %d %s, S/N:%s, E/C:%s", 1591 p->vpd.id, p->rev, p->nports, is_offload(sc) ? "R" : "", 1592 p->pci.width, sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : 1593 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), p->vpd.sn, p->vpd.ec); 1594 1595 device_set_desc_copy(sc->dev, buf); 1596 } 1597 1598 static void 1599 build_medialist(struct port_info *pi) 1600 { 1601 struct ifmedia *media = &pi->media; 1602 int data, m; 1603 1604 PORT_LOCK(pi); 1605 1606 ifmedia_removeall(media); 1607 1608 m = IFM_ETHER | IFM_FDX; 1609 data = (pi->port_type << 8) | pi->mod_type; 1610 1611 switch(pi->port_type) { 1612 case FW_PORT_TYPE_BT_XFI: 1613 ifmedia_add(media, m | IFM_10G_T, data, NULL); 1614 break; 1615 1616 case FW_PORT_TYPE_BT_XAUI: 1617 ifmedia_add(media, m | IFM_10G_T, data, NULL); 1618 /* fall through */ 1619 1620 case FW_PORT_TYPE_BT_SGMII: 1621 ifmedia_add(media, m | IFM_1000_T, data, NULL); 1622 ifmedia_add(media, m | IFM_100_TX, data, NULL); 1623 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL); 1624 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 1625 break; 1626 1627 case FW_PORT_TYPE_CX4: 1628 ifmedia_add(media, m | IFM_10G_CX4, data, NULL); 1629 ifmedia_set(media, m | IFM_10G_CX4); 1630 break; 1631 1632 case FW_PORT_TYPE_SFP: 1633 case FW_PORT_TYPE_FIBER_XFI: 1634 case FW_PORT_TYPE_FIBER_XAUI: 1635 switch (pi->mod_type) { 1636 1637 case FW_PORT_MOD_TYPE_LR: 1638 ifmedia_add(media, m | IFM_10G_LR, data, NULL); 1639 ifmedia_set(media, m | IFM_10G_LR); 1640 break; 1641 1642 case FW_PORT_MOD_TYPE_SR: 1643 ifmedia_add(media, m | IFM_10G_SR, data, NULL); 1644 ifmedia_set(media, m | IFM_10G_SR); 1645 break; 1646 1647 case FW_PORT_MOD_TYPE_LRM: 1648 ifmedia_add(media, m | IFM_10G_LRM, data, NULL); 1649 ifmedia_set(media, m | IFM_10G_LRM); 1650 break; 1651 1652 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 1653 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 1654 ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL); 1655 ifmedia_set(media, m | IFM_10G_TWINAX); 1656 break; 1657 1658 case FW_PORT_MOD_TYPE_NONE: 1659 m &= ~IFM_FDX; 1660 ifmedia_add(media, m | IFM_NONE, data, NULL); 1661 ifmedia_set(media, m | IFM_NONE); 1662 break; 1663 1664 case FW_PORT_MOD_TYPE_NA: 1665 case FW_PORT_MOD_TYPE_ER: 1666 default: 1667 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL); 1668 ifmedia_set(media, m | IFM_UNKNOWN); 1669 break; 1670 } 1671 break; 1672 1673 case FW_PORT_TYPE_KX4: 1674 case FW_PORT_TYPE_KX: 1675 case FW_PORT_TYPE_KR: 1676 default: 1677 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL); 1678 ifmedia_set(media, m | IFM_UNKNOWN); 1679 break; 1680 } 1681 1682 PORT_UNLOCK(pi); 1683 } 1684 1685 /* 1686 * Program the port's XGMAC based on parameters in ifnet. The caller also 1687 * indicates which parameters should be programmed (the rest are left alone). 1688 */ 1689 static int 1690 update_mac_settings(struct port_info *pi, int flags) 1691 { 1692 int rc; 1693 struct ifnet *ifp = pi->ifp; 1694 struct adapter *sc = pi->adapter; 1695 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 1696 1697 PORT_LOCK_ASSERT_OWNED(pi); 1698 KASSERT(flags, ("%s: not told what to update.", __func__)); 1699 1700 if (flags & XGMAC_MTU) 1701 mtu = ifp->if_mtu; 1702 1703 if (flags & XGMAC_PROMISC) 1704 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 1705 1706 if (flags & XGMAC_ALLMULTI) 1707 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 1708 1709 if (flags & XGMAC_VLANEX) 1710 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 1711 1712 rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1, 1713 vlanex, false); 1714 if (rc) { 1715 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc); 1716 return (rc); 1717 } 1718 1719 if (flags & XGMAC_UCADDR) { 1720 uint8_t ucaddr[ETHER_ADDR_LEN]; 1721 1722 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 1723 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt, 1724 ucaddr, true, true); 1725 if (rc < 0) { 1726 rc = -rc; 1727 if_printf(ifp, "change_mac failed: %d\n", rc); 1728 return (rc); 1729 } else { 1730 pi->xact_addr_filt = rc; 1731 rc = 0; 1732 } 1733 } 1734 1735 if (flags & XGMAC_MCADDRS) { 1736 const uint8_t *mcaddr; 1737 int del = 1; 1738 uint64_t hash = 0; 1739 struct ifmultiaddr *ifma; 1740 1741 if_maddr_rlock(ifp); 1742 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1743 if (ifma->ifma_addr->sa_family != AF_LINK) 1744 continue; 1745 mcaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 1746 1747 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid, del, 1, 1748 &mcaddr, NULL, &hash, 0); 1749 if (rc < 0) { 1750 rc = -rc; 1751 if_printf(ifp, "failed to add mc address" 1752 " %02x:%02x:%02x:%02x:%02x:%02x rc=%d\n", 1753 mcaddr[0], mcaddr[1], mcaddr[2], mcaddr[3], 1754 mcaddr[4], mcaddr[5], rc); 1755 goto mcfail; 1756 } 1757 del = 0; 1758 } 1759 1760 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0); 1761 if (rc != 0) 1762 if_printf(ifp, "failed to set mc address hash: %d", rc); 1763 mcfail: 1764 if_maddr_runlock(ifp); 1765 } 1766 1767 return (rc); 1768 } 1769 1770 static int 1771 cxgbe_init_locked(struct port_info *pi) 1772 { 1773 struct adapter *sc = pi->adapter; 1774 int rc = 0; 1775 1776 ADAPTER_LOCK_ASSERT_OWNED(sc); 1777 1778 while (!IS_DOOMED(pi) && IS_BUSY(sc)) { 1779 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4init", 0)) { 1780 rc = EINTR; 1781 goto done; 1782 } 1783 } 1784 if (IS_DOOMED(pi)) { 1785 rc = ENXIO; 1786 goto done; 1787 } 1788 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 1789 1790 /* Give up the adapter lock, port init code can sleep. */ 1791 SET_BUSY(sc); 1792 ADAPTER_UNLOCK(sc); 1793 1794 rc = cxgbe_init_synchronized(pi); 1795 1796 done: 1797 ADAPTER_LOCK(sc); 1798 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 1799 CLR_BUSY(sc); 1800 wakeup_one(&sc->flags); 1801 ADAPTER_UNLOCK(sc); 1802 return (rc); 1803 } 1804 1805 static int 1806 cxgbe_init_synchronized(struct port_info *pi) 1807 { 1808 struct adapter *sc = pi->adapter; 1809 struct ifnet *ifp = pi->ifp; 1810 int rc = 0, i; 1811 uint16_t *rss; 1812 struct sge_rxq *rxq; 1813 1814 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1815 1816 if (isset(&sc->open_device_map, pi->port_id)) { 1817 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, 1818 ("mismatch between open_device_map and if_drv_flags")); 1819 return (0); /* already running */ 1820 } 1821 1822 if (sc->open_device_map == 0 && ((rc = first_port_up(sc)) != 0)) 1823 return (rc); /* error message displayed already */ 1824 1825 /* 1826 * Allocate tx/rx/fl queues for this port. 1827 */ 1828 rc = t4_setup_eth_queues(pi); 1829 if (rc != 0) 1830 goto done; /* error message displayed already */ 1831 1832 /* 1833 * Setup RSS for this port. 1834 */ 1835 rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); 1836 for_each_rxq(pi, i, rxq) { 1837 rss[i] = rxq->iq.abs_id; 1838 } 1839 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss, 1840 pi->nrxq); 1841 free(rss, M_CXGBE); 1842 if (rc != 0) { 1843 if_printf(ifp, "rss_config failed: %d\n", rc); 1844 goto done; 1845 } 1846 1847 PORT_LOCK(pi); 1848 rc = update_mac_settings(pi, XGMAC_ALL); 1849 PORT_UNLOCK(pi); 1850 if (rc) 1851 goto done; /* error message displayed already */ 1852 1853 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg); 1854 if (rc != 0) { 1855 if_printf(ifp, "start_link failed: %d\n", rc); 1856 goto done; 1857 } 1858 1859 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true); 1860 if (rc != 0) { 1861 if_printf(ifp, "enable_vi failed: %d\n", rc); 1862 goto done; 1863 } 1864 pi->flags |= VI_ENABLED; 1865 1866 /* all ok */ 1867 setbit(&sc->open_device_map, pi->port_id); 1868 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1869 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1870 1871 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 1872 done: 1873 if (rc != 0) 1874 cxgbe_uninit_synchronized(pi); 1875 1876 return (rc); 1877 } 1878 1879 static int 1880 cxgbe_uninit_locked(struct port_info *pi) 1881 { 1882 struct adapter *sc = pi->adapter; 1883 int rc; 1884 1885 ADAPTER_LOCK_ASSERT_OWNED(sc); 1886 1887 while (!IS_DOOMED(pi) && IS_BUSY(sc)) { 1888 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4uninit", 0)) { 1889 rc = EINTR; 1890 goto done; 1891 } 1892 } 1893 if (IS_DOOMED(pi)) { 1894 rc = ENXIO; 1895 goto done; 1896 } 1897 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 1898 SET_BUSY(sc); 1899 ADAPTER_UNLOCK(sc); 1900 1901 rc = cxgbe_uninit_synchronized(pi); 1902 1903 ADAPTER_LOCK(sc); 1904 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 1905 CLR_BUSY(sc); 1906 wakeup_one(&sc->flags); 1907 done: 1908 ADAPTER_UNLOCK(sc); 1909 return (rc); 1910 } 1911 1912 /* 1913 * Idempotent. 1914 */ 1915 static int 1916 cxgbe_uninit_synchronized(struct port_info *pi) 1917 { 1918 struct adapter *sc = pi->adapter; 1919 struct ifnet *ifp = pi->ifp; 1920 int rc; 1921 1922 /* 1923 * taskqueue_drain may cause a deadlock if the adapter lock is held. 1924 */ 1925 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1926 1927 /* 1928 * Clear this port's bit from the open device map, and then drain 1929 * tasks and callouts. 1930 */ 1931 clrbit(&sc->open_device_map, pi->port_id); 1932 1933 PORT_LOCK(pi); 1934 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1935 callout_stop(&pi->tick); 1936 PORT_UNLOCK(pi); 1937 callout_drain(&pi->tick); 1938 1939 /* 1940 * Stop and then free the queues' resources, including the queues 1941 * themselves. 1942 * 1943 * XXX: we could just stop the queues here (on ifconfig down) and free 1944 * them later (on port detach), but having up/down go through the entire 1945 * allocate/activate/deactivate/free sequence is a good way to find 1946 * leaks and bugs. 1947 */ 1948 rc = t4_teardown_eth_queues(pi); 1949 if (rc != 0) 1950 if_printf(ifp, "teardown failed: %d\n", rc); 1951 1952 if (pi->flags & VI_ENABLED) { 1953 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false); 1954 if (rc) 1955 if_printf(ifp, "disable_vi failed: %d\n", rc); 1956 else 1957 pi->flags &= ~VI_ENABLED; 1958 } 1959 1960 pi->link_cfg.link_ok = 0; 1961 pi->link_cfg.speed = 0; 1962 t4_os_link_changed(sc, pi->port_id, 0); 1963 1964 if (sc->open_device_map == 0) 1965 last_port_down(sc); 1966 1967 return (0); 1968 } 1969 1970 #define T4_ALLOC_IRQ(sc, irq, rid, handler, arg, name) do { \ 1971 rc = t4_alloc_irq(sc, irq, rid, handler, arg, name); \ 1972 if (rc != 0) \ 1973 goto done; \ 1974 } while (0) 1975 static int 1976 first_port_up(struct adapter *sc) 1977 { 1978 int rc, i, rid, p, q; 1979 char s[8]; 1980 struct irq *irq; 1981 struct sge_iq *intrq; 1982 1983 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1984 1985 /* 1986 * queues that belong to the adapter (not any particular port). 1987 */ 1988 rc = t4_setup_adapter_queues(sc); 1989 if (rc != 0) 1990 goto done; 1991 1992 /* 1993 * Setup interrupts. 1994 */ 1995 irq = &sc->irq[0]; 1996 rid = sc->intr_type == INTR_INTX ? 0 : 1; 1997 if (sc->intr_count == 1) { 1998 KASSERT(sc->flags & INTR_SHARED, 1999 ("%s: single interrupt but not shared?", __func__)); 2000 2001 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_all, sc, "all"); 2002 } else { 2003 /* Multiple interrupts. The first one is always error intr */ 2004 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_err, sc, "err"); 2005 irq++; 2006 rid++; 2007 2008 /* Firmware event queue normally has an interrupt of its own */ 2009 if (sc->intr_count > T4_EXTRA_INTR) { 2010 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, 2011 "evt"); 2012 irq++; 2013 rid++; 2014 } 2015 2016 intrq = &sc->sge.intrq[0]; 2017 if (sc->flags & INTR_SHARED) { 2018 2019 /* All ports share these interrupt queues */ 2020 2021 for (i = 0; i < NINTRQ(sc); i++) { 2022 snprintf(s, sizeof(s), "*.%d", i); 2023 T4_ALLOC_IRQ(sc, irq, rid, t4_intr, intrq, s); 2024 irq++; 2025 rid++; 2026 intrq++; 2027 } 2028 } else { 2029 2030 /* Each port has its own set of interrupt queues */ 2031 2032 for (p = 0; p < sc->params.nports; p++) { 2033 for (q = 0; q < sc->port[p]->nrxq; q++) { 2034 snprintf(s, sizeof(s), "%d.%d", p, q); 2035 T4_ALLOC_IRQ(sc, irq, rid, t4_intr, 2036 intrq, s); 2037 irq++; 2038 rid++; 2039 intrq++; 2040 } 2041 } 2042 } 2043 } 2044 2045 t4_intr_enable(sc); 2046 sc->flags |= FULL_INIT_DONE; 2047 2048 done: 2049 if (rc != 0) 2050 last_port_down(sc); 2051 2052 return (rc); 2053 } 2054 #undef T4_ALLOC_IRQ 2055 2056 /* 2057 * Idempotent. 2058 */ 2059 static int 2060 last_port_down(struct adapter *sc) 2061 { 2062 int i; 2063 2064 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 2065 2066 t4_intr_disable(sc); 2067 2068 t4_teardown_adapter_queues(sc); 2069 2070 for (i = 0; i < sc->intr_count; i++) 2071 t4_free_irq(sc, &sc->irq[i]); 2072 2073 sc->flags &= ~FULL_INIT_DONE; 2074 2075 return (0); 2076 } 2077 2078 static int 2079 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 2080 iq_intr_handler_t *handler, void *arg, char *name) 2081 { 2082 int rc; 2083 2084 irq->rid = rid; 2085 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 2086 RF_SHAREABLE | RF_ACTIVE); 2087 if (irq->res == NULL) { 2088 device_printf(sc->dev, 2089 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 2090 return (ENOMEM); 2091 } 2092 2093 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 2094 NULL, handler, arg, &irq->tag); 2095 if (rc != 0) { 2096 device_printf(sc->dev, 2097 "failed to setup interrupt for rid %d, name %s: %d\n", 2098 rid, name, rc); 2099 } else if (name) 2100 bus_describe_intr(sc->dev, irq->res, irq->tag, name); 2101 2102 return (rc); 2103 } 2104 2105 static int 2106 t4_free_irq(struct adapter *sc, struct irq *irq) 2107 { 2108 if (irq->tag) 2109 bus_teardown_intr(sc->dev, irq->res, irq->tag); 2110 if (irq->res) 2111 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 2112 2113 bzero(irq, sizeof(*irq)); 2114 2115 return (0); 2116 } 2117 2118 static void 2119 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start, 2120 unsigned int end) 2121 { 2122 uint32_t *p = (uint32_t *)(buf + start); 2123 2124 for ( ; start <= end; start += sizeof(uint32_t)) 2125 *p++ = t4_read_reg(sc, start); 2126 } 2127 2128 static void 2129 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 2130 { 2131 int i; 2132 static const unsigned int reg_ranges[] = { 2133 0x1008, 0x1108, 2134 0x1180, 0x11b4, 2135 0x11fc, 0x123c, 2136 0x1300, 0x173c, 2137 0x1800, 0x18fc, 2138 0x3000, 0x30d8, 2139 0x30e0, 0x5924, 2140 0x5960, 0x59d4, 2141 0x5a00, 0x5af8, 2142 0x6000, 0x6098, 2143 0x6100, 0x6150, 2144 0x6200, 0x6208, 2145 0x6240, 0x6248, 2146 0x6280, 0x6338, 2147 0x6370, 0x638c, 2148 0x6400, 0x643c, 2149 0x6500, 0x6524, 2150 0x6a00, 0x6a38, 2151 0x6a60, 0x6a78, 2152 0x6b00, 0x6b84, 2153 0x6bf0, 0x6c84, 2154 0x6cf0, 0x6d84, 2155 0x6df0, 0x6e84, 2156 0x6ef0, 0x6f84, 2157 0x6ff0, 0x7084, 2158 0x70f0, 0x7184, 2159 0x71f0, 0x7284, 2160 0x72f0, 0x7384, 2161 0x73f0, 0x7450, 2162 0x7500, 0x7530, 2163 0x7600, 0x761c, 2164 0x7680, 0x76cc, 2165 0x7700, 0x7798, 2166 0x77c0, 0x77fc, 2167 0x7900, 0x79fc, 2168 0x7b00, 0x7c38, 2169 0x7d00, 0x7efc, 2170 0x8dc0, 0x8e1c, 2171 0x8e30, 0x8e78, 2172 0x8ea0, 0x8f6c, 2173 0x8fc0, 0x9074, 2174 0x90fc, 0x90fc, 2175 0x9400, 0x9458, 2176 0x9600, 0x96bc, 2177 0x9800, 0x9808, 2178 0x9820, 0x983c, 2179 0x9850, 0x9864, 2180 0x9c00, 0x9c6c, 2181 0x9c80, 0x9cec, 2182 0x9d00, 0x9d6c, 2183 0x9d80, 0x9dec, 2184 0x9e00, 0x9e6c, 2185 0x9e80, 0x9eec, 2186 0x9f00, 0x9f6c, 2187 0x9f80, 0x9fec, 2188 0xd004, 0xd03c, 2189 0xdfc0, 0xdfe0, 2190 0xe000, 0xea7c, 2191 0xf000, 0x11190, 2192 0x19040, 0x19124, 2193 0x19150, 0x191b0, 2194 0x191d0, 0x191e8, 2195 0x19238, 0x1924c, 2196 0x193f8, 0x19474, 2197 0x19490, 0x194f8, 2198 0x19800, 0x19f30, 2199 0x1a000, 0x1a06c, 2200 0x1a0b0, 0x1a120, 2201 0x1a128, 0x1a138, 2202 0x1a190, 0x1a1c4, 2203 0x1a1fc, 0x1a1fc, 2204 0x1e040, 0x1e04c, 2205 0x1e240, 0x1e28c, 2206 0x1e2c0, 0x1e2c0, 2207 0x1e2e0, 0x1e2e0, 2208 0x1e300, 0x1e384, 2209 0x1e3c0, 0x1e3c8, 2210 0x1e440, 0x1e44c, 2211 0x1e640, 0x1e68c, 2212 0x1e6c0, 0x1e6c0, 2213 0x1e6e0, 0x1e6e0, 2214 0x1e700, 0x1e784, 2215 0x1e7c0, 0x1e7c8, 2216 0x1e840, 0x1e84c, 2217 0x1ea40, 0x1ea8c, 2218 0x1eac0, 0x1eac0, 2219 0x1eae0, 0x1eae0, 2220 0x1eb00, 0x1eb84, 2221 0x1ebc0, 0x1ebc8, 2222 0x1ec40, 0x1ec4c, 2223 0x1ee40, 0x1ee8c, 2224 0x1eec0, 0x1eec0, 2225 0x1eee0, 0x1eee0, 2226 0x1ef00, 0x1ef84, 2227 0x1efc0, 0x1efc8, 2228 0x1f040, 0x1f04c, 2229 0x1f240, 0x1f28c, 2230 0x1f2c0, 0x1f2c0, 2231 0x1f2e0, 0x1f2e0, 2232 0x1f300, 0x1f384, 2233 0x1f3c0, 0x1f3c8, 2234 0x1f440, 0x1f44c, 2235 0x1f640, 0x1f68c, 2236 0x1f6c0, 0x1f6c0, 2237 0x1f6e0, 0x1f6e0, 2238 0x1f700, 0x1f784, 2239 0x1f7c0, 0x1f7c8, 2240 0x1f840, 0x1f84c, 2241 0x1fa40, 0x1fa8c, 2242 0x1fac0, 0x1fac0, 2243 0x1fae0, 0x1fae0, 2244 0x1fb00, 0x1fb84, 2245 0x1fbc0, 0x1fbc8, 2246 0x1fc40, 0x1fc4c, 2247 0x1fe40, 0x1fe8c, 2248 0x1fec0, 0x1fec0, 2249 0x1fee0, 0x1fee0, 2250 0x1ff00, 0x1ff84, 2251 0x1ffc0, 0x1ffc8, 2252 0x20000, 0x2002c, 2253 0x20100, 0x2013c, 2254 0x20190, 0x201c8, 2255 0x20200, 0x20318, 2256 0x20400, 0x20528, 2257 0x20540, 0x20614, 2258 0x21000, 0x21040, 2259 0x2104c, 0x21060, 2260 0x210c0, 0x210ec, 2261 0x21200, 0x21268, 2262 0x21270, 0x21284, 2263 0x212fc, 0x21388, 2264 0x21400, 0x21404, 2265 0x21500, 0x21518, 2266 0x2152c, 0x2153c, 2267 0x21550, 0x21554, 2268 0x21600, 0x21600, 2269 0x21608, 0x21628, 2270 0x21630, 0x2163c, 2271 0x21700, 0x2171c, 2272 0x21780, 0x2178c, 2273 0x21800, 0x21c38, 2274 0x21c80, 0x21d7c, 2275 0x21e00, 0x21e04, 2276 0x22000, 0x2202c, 2277 0x22100, 0x2213c, 2278 0x22190, 0x221c8, 2279 0x22200, 0x22318, 2280 0x22400, 0x22528, 2281 0x22540, 0x22614, 2282 0x23000, 0x23040, 2283 0x2304c, 0x23060, 2284 0x230c0, 0x230ec, 2285 0x23200, 0x23268, 2286 0x23270, 0x23284, 2287 0x232fc, 0x23388, 2288 0x23400, 0x23404, 2289 0x23500, 0x23518, 2290 0x2352c, 0x2353c, 2291 0x23550, 0x23554, 2292 0x23600, 0x23600, 2293 0x23608, 0x23628, 2294 0x23630, 0x2363c, 2295 0x23700, 0x2371c, 2296 0x23780, 0x2378c, 2297 0x23800, 0x23c38, 2298 0x23c80, 0x23d7c, 2299 0x23e00, 0x23e04, 2300 0x24000, 0x2402c, 2301 0x24100, 0x2413c, 2302 0x24190, 0x241c8, 2303 0x24200, 0x24318, 2304 0x24400, 0x24528, 2305 0x24540, 0x24614, 2306 0x25000, 0x25040, 2307 0x2504c, 0x25060, 2308 0x250c0, 0x250ec, 2309 0x25200, 0x25268, 2310 0x25270, 0x25284, 2311 0x252fc, 0x25388, 2312 0x25400, 0x25404, 2313 0x25500, 0x25518, 2314 0x2552c, 0x2553c, 2315 0x25550, 0x25554, 2316 0x25600, 0x25600, 2317 0x25608, 0x25628, 2318 0x25630, 0x2563c, 2319 0x25700, 0x2571c, 2320 0x25780, 0x2578c, 2321 0x25800, 0x25c38, 2322 0x25c80, 0x25d7c, 2323 0x25e00, 0x25e04, 2324 0x26000, 0x2602c, 2325 0x26100, 0x2613c, 2326 0x26190, 0x261c8, 2327 0x26200, 0x26318, 2328 0x26400, 0x26528, 2329 0x26540, 0x26614, 2330 0x27000, 0x27040, 2331 0x2704c, 0x27060, 2332 0x270c0, 0x270ec, 2333 0x27200, 0x27268, 2334 0x27270, 0x27284, 2335 0x272fc, 0x27388, 2336 0x27400, 0x27404, 2337 0x27500, 0x27518, 2338 0x2752c, 0x2753c, 2339 0x27550, 0x27554, 2340 0x27600, 0x27600, 2341 0x27608, 0x27628, 2342 0x27630, 0x2763c, 2343 0x27700, 0x2771c, 2344 0x27780, 0x2778c, 2345 0x27800, 0x27c38, 2346 0x27c80, 0x27d7c, 2347 0x27e00, 0x27e04 2348 }; 2349 2350 regs->version = 4 | (sc->params.rev << 10); 2351 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2) 2352 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]); 2353 } 2354 2355 static void 2356 cxgbe_tick(void *arg) 2357 { 2358 struct port_info *pi = arg; 2359 struct ifnet *ifp = pi->ifp; 2360 struct sge_txq *txq; 2361 int i, drops; 2362 struct port_stats *s = &pi->stats; 2363 2364 PORT_LOCK(pi); 2365 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2366 PORT_UNLOCK(pi); 2367 return; /* without scheduling another callout */ 2368 } 2369 2370 t4_get_port_stats(pi->adapter, pi->tx_chan, s); 2371 2372 ifp->if_opackets = s->tx_frames; 2373 ifp->if_ipackets = s->rx_frames; 2374 ifp->if_obytes = s->tx_octets; 2375 ifp->if_ibytes = s->rx_octets; 2376 ifp->if_omcasts = s->tx_mcast_frames; 2377 ifp->if_imcasts = s->rx_mcast_frames; 2378 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 2379 s->rx_ovflow3; 2380 2381 drops = s->tx_drop; 2382 for_each_txq(pi, i, txq) 2383 drops += txq->br->br_drops; 2384 ifp->if_snd.ifq_drops = drops; 2385 2386 ifp->if_oerrors = s->tx_error_frames; 2387 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long + 2388 s->rx_fcs_err + s->rx_len_err; 2389 2390 callout_schedule(&pi->tick, hz); 2391 PORT_UNLOCK(pi); 2392 } 2393 2394 static int 2395 t4_sysctls(struct adapter *sc) 2396 { 2397 struct sysctl_ctx_list *ctx; 2398 struct sysctl_oid *oid; 2399 struct sysctl_oid_list *children; 2400 2401 ctx = device_get_sysctl_ctx(sc->dev); 2402 oid = device_get_sysctl_tree(sc->dev); 2403 children = SYSCTL_CHILDREN(oid); 2404 2405 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, 2406 &sc->params.nports, 0, "# of ports"); 2407 2408 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 2409 &sc->params.rev, 0, "chip hardware revision"); 2410 2411 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 2412 CTLFLAG_RD, &sc->fw_version, 0, "firmware version"); 2413 2414 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "TOE", CTLFLAG_RD, 2415 &sc->params.offload, 0, "hardware is capable of TCP offload"); 2416 2417 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, 2418 &sc->params.vpd.cclk, 0, "core clock frequency (in KHz)"); 2419 2420 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 2421 CTLTYPE_STRING | CTLFLAG_RD, &intr_timer, sizeof(intr_timer), 2422 sysctl_int_array, "A", "interrupt holdoff timer values (us)"); 2423 2424 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 2425 CTLTYPE_STRING | CTLFLAG_RD, &intr_pktcount, sizeof(intr_pktcount), 2426 sysctl_int_array, "A", "interrupt holdoff packet counter values"); 2427 2428 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", 2429 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 2430 sysctl_devlog, "A", "device log"); 2431 2432 return (0); 2433 } 2434 2435 static int 2436 cxgbe_sysctls(struct port_info *pi) 2437 { 2438 struct sysctl_ctx_list *ctx; 2439 struct sysctl_oid *oid; 2440 struct sysctl_oid_list *children; 2441 2442 ctx = device_get_sysctl_ctx(pi->dev); 2443 2444 /* 2445 * dev.cxgbe.X. 2446 */ 2447 oid = device_get_sysctl_tree(pi->dev); 2448 children = SYSCTL_CHILDREN(oid); 2449 2450 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 2451 &pi->nrxq, 0, "# of rx queues"); 2452 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 2453 &pi->ntxq, 0, "# of tx queues"); 2454 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 2455 &pi->first_rxq, 0, "index of first rx queue"); 2456 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 2457 &pi->first_txq, 0, "index of first tx queue"); 2458 2459 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 2460 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I", 2461 "holdoff timer index"); 2462 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 2463 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I", 2464 "holdoff packet counter index"); 2465 2466 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 2467 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I", 2468 "rx queue size"); 2469 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 2470 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I", 2471 "tx queue size"); 2472 2473 /* 2474 * dev.cxgbe.X.stats. 2475 */ 2476 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 2477 NULL, "port statistics"); 2478 children = SYSCTL_CHILDREN(oid); 2479 2480 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ 2481 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ 2482 CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \ 2483 sysctl_handle_t4_reg64, "QU", desc) 2484 2485 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", 2486 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); 2487 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", 2488 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); 2489 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", 2490 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); 2491 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", 2492 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); 2493 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", 2494 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); 2495 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", 2496 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); 2497 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", 2498 "# of tx frames in this range", 2499 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); 2500 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", 2501 "# of tx frames in this range", 2502 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); 2503 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", 2504 "# of tx frames in this range", 2505 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); 2506 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", 2507 "# of tx frames in this range", 2508 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); 2509 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", 2510 "# of tx frames in this range", 2511 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); 2512 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", 2513 "# of tx frames in this range", 2514 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); 2515 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", 2516 "# of tx frames in this range", 2517 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); 2518 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", 2519 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); 2520 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", 2521 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); 2522 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", 2523 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); 2524 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", 2525 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); 2526 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", 2527 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); 2528 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", 2529 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); 2530 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", 2531 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); 2532 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", 2533 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); 2534 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", 2535 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); 2536 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", 2537 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); 2538 2539 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", 2540 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); 2541 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", 2542 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); 2543 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", 2544 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); 2545 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", 2546 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); 2547 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", 2548 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); 2549 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", 2550 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); 2551 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", 2552 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); 2553 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", 2554 "# of frames received with bad FCS", 2555 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); 2556 SYSCTL_ADD_T4_REG64(pi, "rx_len_err", 2557 "# of frames received with length error", 2558 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); 2559 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", 2560 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); 2561 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", 2562 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); 2563 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", 2564 "# of rx frames in this range", 2565 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); 2566 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", 2567 "# of rx frames in this range", 2568 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); 2569 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", 2570 "# of rx frames in this range", 2571 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); 2572 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", 2573 "# of rx frames in this range", 2574 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); 2575 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", 2576 "# of rx frames in this range", 2577 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); 2578 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", 2579 "# of rx frames in this range", 2580 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); 2581 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", 2582 "# of rx frames in this range", 2583 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); 2584 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", 2585 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); 2586 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", 2587 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); 2588 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", 2589 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); 2590 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", 2591 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); 2592 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", 2593 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); 2594 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", 2595 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); 2596 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", 2597 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); 2598 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", 2599 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); 2600 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", 2601 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); 2602 2603 #undef SYSCTL_ADD_T4_REG64 2604 2605 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ 2606 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 2607 &pi->stats.name, desc) 2608 2609 /* We get these from port_stats and they may be stale by upto 1s */ 2610 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, 2611 "# drops due to buffer-group 0 overflows"); 2612 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, 2613 "# drops due to buffer-group 1 overflows"); 2614 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, 2615 "# drops due to buffer-group 2 overflows"); 2616 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, 2617 "# drops due to buffer-group 3 overflows"); 2618 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, 2619 "# of buffer-group 0 truncated packets"); 2620 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, 2621 "# of buffer-group 1 truncated packets"); 2622 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, 2623 "# of buffer-group 2 truncated packets"); 2624 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, 2625 "# of buffer-group 3 truncated packets"); 2626 2627 #undef SYSCTL_ADD_T4_PORTSTAT 2628 2629 return (0); 2630 } 2631 2632 static int 2633 sysctl_int_array(SYSCTL_HANDLER_ARGS) 2634 { 2635 int rc, *i; 2636 struct sbuf sb; 2637 2638 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); 2639 for (i = arg1; arg2; arg2 -= sizeof(int), i++) 2640 sbuf_printf(&sb, "%d ", *i); 2641 sbuf_trim(&sb); 2642 sbuf_finish(&sb); 2643 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 2644 sbuf_delete(&sb); 2645 return (rc); 2646 } 2647 2648 static int 2649 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 2650 { 2651 struct port_info *pi = arg1; 2652 struct adapter *sc = pi->adapter; 2653 struct sge_rxq *rxq; 2654 int idx, rc, i; 2655 2656 idx = pi->tmr_idx; 2657 2658 rc = sysctl_handle_int(oidp, &idx, 0, req); 2659 if (rc != 0 || req->newptr == NULL) 2660 return (rc); 2661 2662 if (idx < 0 || idx >= SGE_NTIMERS) 2663 return (EINVAL); 2664 2665 ADAPTER_LOCK(sc); 2666 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 2667 if (rc == 0) { 2668 for_each_rxq(pi, i, rxq) { 2669 rxq->iq.intr_params = V_QINTR_TIMER_IDX(idx) | 2670 V_QINTR_CNT_EN(pi->pktc_idx != -1); 2671 } 2672 pi->tmr_idx = idx; 2673 } 2674 2675 ADAPTER_UNLOCK(sc); 2676 return (rc); 2677 } 2678 2679 static int 2680 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 2681 { 2682 struct port_info *pi = arg1; 2683 struct adapter *sc = pi->adapter; 2684 int idx, rc; 2685 2686 idx = pi->pktc_idx; 2687 2688 rc = sysctl_handle_int(oidp, &idx, 0, req); 2689 if (rc != 0 || req->newptr == NULL) 2690 return (rc); 2691 2692 if (idx < -1 || idx >= SGE_NCOUNTERS) 2693 return (EINVAL); 2694 2695 ADAPTER_LOCK(sc); 2696 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 2697 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING) 2698 rc = EBUSY; /* can be changed only when port is down */ 2699 2700 if (rc == 0) 2701 pi->pktc_idx = idx; 2702 2703 ADAPTER_UNLOCK(sc); 2704 return (rc); 2705 } 2706 2707 static int 2708 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 2709 { 2710 struct port_info *pi = arg1; 2711 struct adapter *sc = pi->adapter; 2712 int qsize, rc; 2713 2714 qsize = pi->qsize_rxq; 2715 2716 rc = sysctl_handle_int(oidp, &qsize, 0, req); 2717 if (rc != 0 || req->newptr == NULL) 2718 return (rc); 2719 2720 if (qsize < 128 || (qsize & 7)) 2721 return (EINVAL); 2722 2723 ADAPTER_LOCK(sc); 2724 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 2725 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING) 2726 rc = EBUSY; /* can be changed only when port is down */ 2727 2728 if (rc == 0) 2729 pi->qsize_rxq = qsize; 2730 2731 ADAPTER_UNLOCK(sc); 2732 return (rc); 2733 } 2734 2735 static int 2736 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 2737 { 2738 struct port_info *pi = arg1; 2739 struct adapter *sc = pi->adapter; 2740 int qsize, rc; 2741 2742 qsize = pi->qsize_txq; 2743 2744 rc = sysctl_handle_int(oidp, &qsize, 0, req); 2745 if (rc != 0 || req->newptr == NULL) 2746 return (rc); 2747 2748 if (qsize < 128) 2749 return (EINVAL); 2750 2751 ADAPTER_LOCK(sc); 2752 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 2753 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING) 2754 rc = EBUSY; /* can be changed only when port is down */ 2755 2756 if (rc == 0) 2757 pi->qsize_txq = qsize; 2758 2759 ADAPTER_UNLOCK(sc); 2760 return (rc); 2761 } 2762 2763 static int 2764 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 2765 { 2766 struct adapter *sc = arg1; 2767 int reg = arg2; 2768 uint64_t val; 2769 2770 val = t4_read_reg64(sc, reg); 2771 2772 return (sysctl_handle_64(oidp, &val, 0, req)); 2773 } 2774 2775 const char *devlog_level_strings[] = { 2776 [FW_DEVLOG_LEVEL_EMERG] = "EMERG", 2777 [FW_DEVLOG_LEVEL_CRIT] = "CRIT", 2778 [FW_DEVLOG_LEVEL_ERR] = "ERR", 2779 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", 2780 [FW_DEVLOG_LEVEL_INFO] = "INFO", 2781 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" 2782 }; 2783 2784 const char *devlog_facility_strings[] = { 2785 [FW_DEVLOG_FACILITY_CORE] = "CORE", 2786 [FW_DEVLOG_FACILITY_SCHED] = "SCHED", 2787 [FW_DEVLOG_FACILITY_TIMER] = "TIMER", 2788 [FW_DEVLOG_FACILITY_RES] = "RES", 2789 [FW_DEVLOG_FACILITY_HW] = "HW", 2790 [FW_DEVLOG_FACILITY_FLR] = "FLR", 2791 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", 2792 [FW_DEVLOG_FACILITY_PHY] = "PHY", 2793 [FW_DEVLOG_FACILITY_MAC] = "MAC", 2794 [FW_DEVLOG_FACILITY_PORT] = "PORT", 2795 [FW_DEVLOG_FACILITY_VI] = "VI", 2796 [FW_DEVLOG_FACILITY_FILTER] = "FILTER", 2797 [FW_DEVLOG_FACILITY_ACL] = "ACL", 2798 [FW_DEVLOG_FACILITY_TM] = "TM", 2799 [FW_DEVLOG_FACILITY_QFC] = "QFC", 2800 [FW_DEVLOG_FACILITY_DCB] = "DCB", 2801 [FW_DEVLOG_FACILITY_ETH] = "ETH", 2802 [FW_DEVLOG_FACILITY_OFLD] = "OFLD", 2803 [FW_DEVLOG_FACILITY_RI] = "RI", 2804 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", 2805 [FW_DEVLOG_FACILITY_FCOE] = "FCOE", 2806 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", 2807 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE" 2808 }; 2809 2810 static int 2811 sysctl_devlog(SYSCTL_HANDLER_ARGS) 2812 { 2813 struct adapter *sc = arg1; 2814 struct devlog_params *dparams = &sc->params.devlog; 2815 struct fw_devlog_e *buf, *e; 2816 int i, j, rc, nentries, first = 0; 2817 struct sbuf *sb; 2818 uint64_t ftstamp = UINT64_MAX; 2819 2820 if (dparams->start == 0) 2821 return (ENXIO); 2822 2823 nentries = dparams->size / sizeof(struct fw_devlog_e); 2824 2825 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT); 2826 if (buf == NULL) 2827 return (ENOMEM); 2828 2829 rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size, 2830 (void *)buf); 2831 if (rc != 0) 2832 goto done; 2833 2834 for (i = 0; i < nentries; i++) { 2835 e = &buf[i]; 2836 2837 if (e->timestamp == 0) 2838 break; /* end */ 2839 2840 e->timestamp = be64toh(e->timestamp); 2841 e->seqno = be32toh(e->seqno); 2842 for (j = 0; j < 8; j++) 2843 e->params[j] = be32toh(e->params[j]); 2844 2845 if (e->timestamp < ftstamp) { 2846 ftstamp = e->timestamp; 2847 first = i; 2848 } 2849 } 2850 2851 if (buf[first].timestamp == 0) 2852 goto done; /* nothing in the log */ 2853 2854 rc = sysctl_wire_old_buffer(req, 0); 2855 if (rc != 0) 2856 goto done; 2857 2858 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 2859 sbuf_printf(sb, "\n%10s %15s %8s %8s %s\n", 2860 "Seq#", "Tstamp", "Level", "Facility", "Message"); 2861 2862 i = first; 2863 do { 2864 e = &buf[i]; 2865 if (e->timestamp == 0) 2866 break; /* end */ 2867 2868 sbuf_printf(sb, "%10d %15ju %8s %8s ", 2869 e->seqno, e->timestamp, 2870 (e->level < ARRAY_SIZE(devlog_level_strings) ? 2871 devlog_level_strings[e->level] : "UNKNOWN"), 2872 (e->facility < ARRAY_SIZE(devlog_facility_strings) ? 2873 devlog_facility_strings[e->facility] : "UNKNOWN")); 2874 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], 2875 e->params[2], e->params[3], e->params[4], 2876 e->params[5], e->params[6], e->params[7]); 2877 2878 if (++i == nentries) 2879 i = 0; 2880 } while (i != first); 2881 2882 rc = sbuf_finish(sb); 2883 sbuf_delete(sb); 2884 done: 2885 free(buf, M_CXGBE); 2886 return (rc); 2887 } 2888 2889 static inline void 2890 txq_start(struct ifnet *ifp, struct sge_txq *txq) 2891 { 2892 struct buf_ring *br; 2893 struct mbuf *m; 2894 2895 TXQ_LOCK_ASSERT_OWNED(txq); 2896 2897 br = txq->br; 2898 m = txq->m ? txq->m : drbr_dequeue(ifp, br); 2899 if (m) 2900 t4_eth_tx(ifp, txq, m); 2901 } 2902 2903 void 2904 cxgbe_txq_start(void *arg, int count) 2905 { 2906 struct sge_txq *txq = arg; 2907 2908 TXQ_LOCK(txq); 2909 if (txq->eq.flags & EQ_CRFLUSHED) { 2910 txq->eq.flags &= ~EQ_CRFLUSHED; 2911 txq_start(txq->ifp, txq); 2912 } else 2913 wakeup_one(txq); /* txq is going away, wakeup free_txq */ 2914 TXQ_UNLOCK(txq); 2915 } 2916 2917 static uint32_t 2918 fconf_to_mode(uint32_t fconf) 2919 { 2920 uint32_t mode; 2921 2922 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | 2923 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; 2924 2925 if (fconf & F_FRAGMENTATION) 2926 mode |= T4_FILTER_IP_FRAGMENT; 2927 2928 if (fconf & F_MPSHITTYPE) 2929 mode |= T4_FILTER_MPS_HIT_TYPE; 2930 2931 if (fconf & F_MACMATCH) 2932 mode |= T4_FILTER_MAC_IDX; 2933 2934 if (fconf & F_ETHERTYPE) 2935 mode |= T4_FILTER_ETH_TYPE; 2936 2937 if (fconf & F_PROTOCOL) 2938 mode |= T4_FILTER_IP_PROTO; 2939 2940 if (fconf & F_TOS) 2941 mode |= T4_FILTER_IP_TOS; 2942 2943 if (fconf & F_VLAN) 2944 mode |= T4_FILTER_IVLAN; 2945 2946 if (fconf & F_VNIC_ID) 2947 mode |= T4_FILTER_OVLAN; 2948 2949 if (fconf & F_PORT) 2950 mode |= T4_FILTER_PORT; 2951 2952 if (fconf & F_FCOE) 2953 mode |= T4_FILTER_FCoE; 2954 2955 return (mode); 2956 } 2957 2958 static uint32_t 2959 mode_to_fconf(uint32_t mode) 2960 { 2961 uint32_t fconf = 0; 2962 2963 if (mode & T4_FILTER_IP_FRAGMENT) 2964 fconf |= F_FRAGMENTATION; 2965 2966 if (mode & T4_FILTER_MPS_HIT_TYPE) 2967 fconf |= F_MPSHITTYPE; 2968 2969 if (mode & T4_FILTER_MAC_IDX) 2970 fconf |= F_MACMATCH; 2971 2972 if (mode & T4_FILTER_ETH_TYPE) 2973 fconf |= F_ETHERTYPE; 2974 2975 if (mode & T4_FILTER_IP_PROTO) 2976 fconf |= F_PROTOCOL; 2977 2978 if (mode & T4_FILTER_IP_TOS) 2979 fconf |= F_TOS; 2980 2981 if (mode & T4_FILTER_IVLAN) 2982 fconf |= F_VLAN; 2983 2984 if (mode & T4_FILTER_OVLAN) 2985 fconf |= F_VNIC_ID; 2986 2987 if (mode & T4_FILTER_PORT) 2988 fconf |= F_PORT; 2989 2990 if (mode & T4_FILTER_FCoE) 2991 fconf |= F_FCOE; 2992 2993 return (fconf); 2994 } 2995 2996 static uint32_t 2997 fspec_to_fconf(struct t4_filter_specification *fs) 2998 { 2999 uint32_t fconf = 0; 3000 3001 if (fs->val.frag || fs->mask.frag) 3002 fconf |= F_FRAGMENTATION; 3003 3004 if (fs->val.matchtype || fs->mask.matchtype) 3005 fconf |= F_MPSHITTYPE; 3006 3007 if (fs->val.macidx || fs->mask.macidx) 3008 fconf |= F_MACMATCH; 3009 3010 if (fs->val.ethtype || fs->mask.ethtype) 3011 fconf |= F_ETHERTYPE; 3012 3013 if (fs->val.proto || fs->mask.proto) 3014 fconf |= F_PROTOCOL; 3015 3016 if (fs->val.tos || fs->mask.tos) 3017 fconf |= F_TOS; 3018 3019 if (fs->val.ivlan_vld || fs->mask.ivlan_vld) 3020 fconf |= F_VLAN; 3021 3022 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) 3023 fconf |= F_VNIC_ID; 3024 3025 if (fs->val.iport || fs->mask.iport) 3026 fconf |= F_PORT; 3027 3028 if (fs->val.fcoe || fs->mask.fcoe) 3029 fconf |= F_FCOE; 3030 3031 return (fconf); 3032 } 3033 3034 static int 3035 get_filter_mode(struct adapter *sc, uint32_t *mode) 3036 { 3037 uint32_t fconf; 3038 3039 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1, 3040 A_TP_VLAN_PRI_MAP); 3041 3042 *mode = fconf_to_mode(fconf); 3043 3044 return (0); 3045 } 3046 3047 static int 3048 set_filter_mode(struct adapter *sc, uint32_t mode) 3049 { 3050 uint32_t fconf; 3051 int rc; 3052 3053 fconf = mode_to_fconf(mode); 3054 3055 ADAPTER_LOCK(sc); 3056 if (IS_BUSY(sc)) { 3057 rc = EAGAIN; 3058 goto done; 3059 } 3060 3061 if (sc->tids.ftids_in_use > 0) { 3062 rc = EBUSY; 3063 goto done; 3064 } 3065 3066 rc = -t4_set_filter_mode(sc, fconf); 3067 done: 3068 ADAPTER_UNLOCK(sc); 3069 return (rc); 3070 } 3071 3072 static inline uint64_t 3073 get_filter_hits(struct adapter *sc, uint32_t fid) 3074 { 3075 uint32_t tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 3076 uint64_t hits; 3077 3078 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0), 3079 tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE); 3080 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0)); 3081 hits = t4_read_reg64(sc, MEMWIN0_BASE + 16); 3082 3083 return (be64toh(hits)); 3084 } 3085 3086 static int 3087 get_filter(struct adapter *sc, struct t4_filter *t) 3088 { 3089 int i, nfilters = sc->tids.nftids; 3090 struct filter_entry *f; 3091 3092 ADAPTER_LOCK_ASSERT_OWNED(sc); 3093 3094 if (IS_BUSY(sc)) 3095 return (EAGAIN); 3096 3097 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL || 3098 t->idx >= nfilters) { 3099 t->idx = 0xffffffff; 3100 return (0); 3101 } 3102 3103 f = &sc->tids.ftid_tab[t->idx]; 3104 for (i = t->idx; i < nfilters; i++, f++) { 3105 if (f->valid) { 3106 t->idx = i; 3107 t->l2tidx = f->l2t ? f->l2t->idx : 0; 3108 t->smtidx = f->smtidx; 3109 if (f->fs.hitcnts) 3110 t->hits = get_filter_hits(sc, t->idx); 3111 else 3112 t->hits = UINT64_MAX; 3113 t->fs = f->fs; 3114 3115 return (0); 3116 } 3117 } 3118 3119 t->idx = 0xffffffff; 3120 return (0); 3121 } 3122 3123 static int 3124 set_filter(struct adapter *sc, struct t4_filter *t) 3125 { 3126 uint32_t fconf; 3127 unsigned int nfilters, nports; 3128 struct filter_entry *f; 3129 int i; 3130 3131 ADAPTER_LOCK_ASSERT_OWNED(sc); 3132 3133 nfilters = sc->tids.nftids; 3134 nports = sc->params.nports; 3135 3136 if (nfilters == 0) 3137 return (ENOTSUP); 3138 3139 if (!(sc->flags & FULL_INIT_DONE)) 3140 return (EAGAIN); 3141 3142 if (t->idx >= nfilters) 3143 return (EINVAL); 3144 3145 /* Validate against the global filter mode */ 3146 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1, 3147 A_TP_VLAN_PRI_MAP); 3148 if ((fconf | fspec_to_fconf(&t->fs)) != fconf) 3149 return (E2BIG); 3150 3151 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) 3152 return (EINVAL); 3153 3154 if (t->fs.val.iport >= nports) 3155 return (EINVAL); 3156 3157 /* Can't specify an iq if not steering to it */ 3158 if (!t->fs.dirsteer && t->fs.iq) 3159 return (EINVAL); 3160 3161 /* IPv6 filter idx must be 4 aligned */ 3162 if (t->fs.type == 1 && 3163 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) 3164 return (EINVAL); 3165 3166 if (sc->tids.ftid_tab == NULL) { 3167 KASSERT(sc->tids.ftids_in_use == 0, 3168 ("%s: no memory allocated but filters_in_use > 0", 3169 __func__)); 3170 3171 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) * 3172 nfilters, M_CXGBE, M_NOWAIT | M_ZERO); 3173 if (sc->tids.ftid_tab == NULL) 3174 return (ENOMEM); 3175 } 3176 3177 for (i = 0; i < 4; i++) { 3178 f = &sc->tids.ftid_tab[t->idx + i]; 3179 3180 if (f->pending || f->valid) 3181 return (EBUSY); 3182 if (f->locked) 3183 return (EPERM); 3184 3185 if (t->fs.type == 0) 3186 break; 3187 } 3188 3189 f = &sc->tids.ftid_tab[t->idx]; 3190 f->fs = t->fs; 3191 3192 return set_filter_wr(sc, t->idx); 3193 } 3194 3195 static int 3196 del_filter(struct adapter *sc, struct t4_filter *t) 3197 { 3198 unsigned int nfilters; 3199 struct filter_entry *f; 3200 3201 ADAPTER_LOCK_ASSERT_OWNED(sc); 3202 3203 if (IS_BUSY(sc)) 3204 return (EAGAIN); 3205 3206 nfilters = sc->tids.nftids; 3207 3208 if (nfilters == 0) 3209 return (ENOTSUP); 3210 3211 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 || 3212 t->idx >= nfilters) 3213 return (EINVAL); 3214 3215 if (!(sc->flags & FULL_INIT_DONE)) 3216 return (EAGAIN); 3217 3218 f = &sc->tids.ftid_tab[t->idx]; 3219 3220 if (f->pending) 3221 return (EBUSY); 3222 if (f->locked) 3223 return (EPERM); 3224 3225 if (f->valid) { 3226 t->fs = f->fs; /* extra info for the caller */ 3227 return del_filter_wr(sc, t->idx); 3228 } 3229 3230 return (0); 3231 } 3232 3233 static void 3234 clear_filter(struct filter_entry *f) 3235 { 3236 if (f->l2t) 3237 t4_l2t_release(f->l2t); 3238 3239 bzero(f, sizeof (*f)); 3240 } 3241 3242 static int 3243 set_filter_wr(struct adapter *sc, int fidx) 3244 { 3245 int rc; 3246 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 3247 struct mbuf *m; 3248 struct fw_filter_wr *fwr; 3249 unsigned int ftid; 3250 3251 ADAPTER_LOCK_ASSERT_OWNED(sc); 3252 3253 if (f->fs.newdmac || f->fs.newvlan) { 3254 /* This filter needs an L2T entry; allocate one. */ 3255 f->l2t = t4_l2t_alloc_switching(sc->l2t); 3256 if (f->l2t == NULL) 3257 return (EAGAIN); 3258 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport, 3259 f->fs.dmac)) { 3260 t4_l2t_release(f->l2t); 3261 f->l2t = NULL; 3262 return (ENOMEM); 3263 } 3264 } 3265 3266 ftid = sc->tids.ftid_base + fidx; 3267 3268 m = m_gethdr(M_NOWAIT, MT_DATA); 3269 if (m == NULL) 3270 return (ENOMEM); 3271 3272 fwr = mtod(m, struct fw_filter_wr *); 3273 m->m_len = m->m_pkthdr.len = sizeof(*fwr); 3274 bzero(fwr, sizeof (*fwr)); 3275 3276 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); 3277 fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); 3278 fwr->tid_to_iq = 3279 htobe32(V_FW_FILTER_WR_TID(ftid) | 3280 V_FW_FILTER_WR_RQTYPE(f->fs.type) | 3281 V_FW_FILTER_WR_NOREPLY(0) | 3282 V_FW_FILTER_WR_IQ(f->fs.iq)); 3283 fwr->del_filter_to_l2tix = 3284 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 3285 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 3286 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 3287 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 3288 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 3289 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 3290 V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 3291 V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 3292 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 3293 f->fs.newvlan == VLAN_REWRITE) | 3294 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 3295 f->fs.newvlan == VLAN_REWRITE) | 3296 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 3297 V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 3298 V_FW_FILTER_WR_PRIO(f->fs.prio) | 3299 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 3300 fwr->ethtype = htobe16(f->fs.val.ethtype); 3301 fwr->ethtypem = htobe16(f->fs.mask.ethtype); 3302 fwr->frag_to_ovlan_vldm = 3303 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 3304 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 3305 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) | 3306 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) | 3307 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) | 3308 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld)); 3309 fwr->smac_sel = 0; 3310 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) | 3311 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.intrq[0].abs_id)); 3312 fwr->maci_to_matchtypem = 3313 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 3314 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 3315 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 3316 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 3317 V_FW_FILTER_WR_PORT(f->fs.val.iport) | 3318 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 3319 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 3320 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 3321 fwr->ptcl = f->fs.val.proto; 3322 fwr->ptclm = f->fs.mask.proto; 3323 fwr->ttyp = f->fs.val.tos; 3324 fwr->ttypm = f->fs.mask.tos; 3325 fwr->ivlan = htobe16(f->fs.val.ivlan); 3326 fwr->ivlanm = htobe16(f->fs.mask.ivlan); 3327 fwr->ovlan = htobe16(f->fs.val.ovlan); 3328 fwr->ovlanm = htobe16(f->fs.mask.ovlan); 3329 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip)); 3330 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm)); 3331 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip)); 3332 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm)); 3333 fwr->lp = htobe16(f->fs.val.dport); 3334 fwr->lpm = htobe16(f->fs.mask.dport); 3335 fwr->fp = htobe16(f->fs.val.sport); 3336 fwr->fpm = htobe16(f->fs.mask.sport); 3337 if (f->fs.newsmac) 3338 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma)); 3339 3340 f->pending = 1; 3341 sc->tids.ftids_in_use++; 3342 rc = t4_mgmt_tx(sc, m); 3343 if (rc != 0) { 3344 sc->tids.ftids_in_use--; 3345 m_freem(m); 3346 clear_filter(f); 3347 } 3348 return (rc); 3349 } 3350 3351 static int 3352 del_filter_wr(struct adapter *sc, int fidx) 3353 { 3354 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 3355 struct mbuf *m; 3356 struct fw_filter_wr *fwr; 3357 unsigned int rc, ftid; 3358 3359 ADAPTER_LOCK_ASSERT_OWNED(sc); 3360 3361 ftid = sc->tids.ftid_base + fidx; 3362 3363 m = m_gethdr(M_NOWAIT, MT_DATA); 3364 if (m == NULL) 3365 return (ENOMEM); 3366 3367 fwr = mtod(m, struct fw_filter_wr *); 3368 m->m_len = m->m_pkthdr.len = sizeof(*fwr); 3369 bzero(fwr, sizeof (*fwr)); 3370 3371 t4_mk_filtdelwr(ftid, fwr, sc->sge.intrq[0].abs_id); 3372 3373 f->pending = 1; 3374 rc = t4_mgmt_tx(sc, m); 3375 if (rc != 0) { 3376 f->pending = 0; 3377 m_freem(m); 3378 } 3379 return (rc); 3380 } 3381 3382 /* XXX move intr handlers to main.c and make this static */ 3383 void 3384 filter_rpl(struct adapter *sc, const struct cpl_set_tcb_rpl *rpl) 3385 { 3386 unsigned int idx = GET_TID(rpl); 3387 3388 if (idx >= sc->tids.ftid_base && 3389 (idx -= sc->tids.ftid_base) < sc->tids.nftids) { 3390 unsigned int rc = G_COOKIE(rpl->cookie); 3391 struct filter_entry *f = &sc->tids.ftid_tab[idx]; 3392 3393 if (rc == FW_FILTER_WR_FLT_DELETED) { 3394 /* 3395 * Clear the filter when we get confirmation from the 3396 * hardware that the filter has been deleted. 3397 */ 3398 clear_filter(f); 3399 sc->tids.ftids_in_use--; 3400 } else if (rc == FW_FILTER_WR_SMT_TBL_FULL) { 3401 device_printf(sc->dev, 3402 "filter %u setup failed due to full SMT\n", idx); 3403 clear_filter(f); 3404 sc->tids.ftids_in_use--; 3405 } else if (rc == FW_FILTER_WR_FLT_ADDED) { 3406 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; 3407 f->pending = 0; /* asynchronous setup completed */ 3408 f->valid = 1; 3409 } else { 3410 /* 3411 * Something went wrong. Issue a warning about the 3412 * problem and clear everything out. 3413 */ 3414 device_printf(sc->dev, 3415 "filter %u setup failed with error %u\n", idx, rc); 3416 clear_filter(f); 3417 sc->tids.ftids_in_use--; 3418 } 3419 } 3420 } 3421 3422 static int 3423 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) 3424 { 3425 int rc = EINVAL; 3426 3427 if (cntxt->cid > M_CTXTQID) 3428 return (rc); 3429 3430 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && 3431 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) 3432 return (rc); 3433 3434 if (sc->flags & FW_OK) { 3435 ADAPTER_LOCK(sc); /* Avoid parallel t4_wr_mbox */ 3436 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, 3437 &cntxt->data[0]); 3438 ADAPTER_UNLOCK(sc); 3439 } 3440 3441 if (rc != 0) { 3442 /* Read via firmware failed or wasn't even attempted */ 3443 3444 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, 3445 &cntxt->data[0]); 3446 } 3447 3448 return (rc); 3449 } 3450 3451 int 3452 t4_os_find_pci_capability(struct adapter *sc, int cap) 3453 { 3454 int i; 3455 3456 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); 3457 } 3458 3459 int 3460 t4_os_pci_save_state(struct adapter *sc) 3461 { 3462 device_t dev; 3463 struct pci_devinfo *dinfo; 3464 3465 dev = sc->dev; 3466 dinfo = device_get_ivars(dev); 3467 3468 pci_cfg_save(dev, dinfo, 0); 3469 return (0); 3470 } 3471 3472 int 3473 t4_os_pci_restore_state(struct adapter *sc) 3474 { 3475 device_t dev; 3476 struct pci_devinfo *dinfo; 3477 3478 dev = sc->dev; 3479 dinfo = device_get_ivars(dev); 3480 3481 pci_cfg_restore(dev, dinfo); 3482 return (0); 3483 } 3484 3485 void 3486 t4_os_portmod_changed(const struct adapter *sc, int idx) 3487 { 3488 struct port_info *pi = sc->port[idx]; 3489 static const char *mod_str[] = { 3490 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 3491 }; 3492 3493 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 3494 if_printf(pi->ifp, "transceiver unplugged.\n"); 3495 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 3496 if_printf(pi->ifp, "unknown transceiver inserted.\n"); 3497 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 3498 if_printf(pi->ifp, "unsupported transceiver inserted.\n"); 3499 else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str)) { 3500 if_printf(pi->ifp, "%s transceiver inserted.\n", 3501 mod_str[pi->mod_type]); 3502 } else { 3503 if_printf(pi->ifp, "transceiver (type %d) inserted.\n", 3504 pi->mod_type); 3505 } 3506 } 3507 3508 void 3509 t4_os_link_changed(struct adapter *sc, int idx, int link_stat) 3510 { 3511 struct port_info *pi = sc->port[idx]; 3512 struct ifnet *ifp = pi->ifp; 3513 3514 if (link_stat) { 3515 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed); 3516 if_link_state_change(ifp, LINK_STATE_UP); 3517 } else 3518 if_link_state_change(ifp, LINK_STATE_DOWN); 3519 } 3520 3521 static int 3522 t4_open(struct cdev *dev, int flags, int type, struct thread *td) 3523 { 3524 return (0); 3525 } 3526 3527 static int 3528 t4_close(struct cdev *dev, int flags, int type, struct thread *td) 3529 { 3530 return (0); 3531 } 3532 3533 static int 3534 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 3535 struct thread *td) 3536 { 3537 int rc; 3538 struct adapter *sc = dev->si_drv1; 3539 3540 rc = priv_check(td, PRIV_DRIVER); 3541 if (rc != 0) 3542 return (rc); 3543 3544 switch (cmd) { 3545 case CHELSIO_T4_GETREG: { 3546 struct t4_reg *edata = (struct t4_reg *)data; 3547 3548 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 3549 return (EFAULT); 3550 3551 if (edata->size == 4) 3552 edata->val = t4_read_reg(sc, edata->addr); 3553 else if (edata->size == 8) 3554 edata->val = t4_read_reg64(sc, edata->addr); 3555 else 3556 return (EINVAL); 3557 3558 break; 3559 } 3560 case CHELSIO_T4_SETREG: { 3561 struct t4_reg *edata = (struct t4_reg *)data; 3562 3563 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 3564 return (EFAULT); 3565 3566 if (edata->size == 4) { 3567 if (edata->val & 0xffffffff00000000) 3568 return (EINVAL); 3569 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 3570 } else if (edata->size == 8) 3571 t4_write_reg64(sc, edata->addr, edata->val); 3572 else 3573 return (EINVAL); 3574 break; 3575 } 3576 case CHELSIO_T4_REGDUMP: { 3577 struct t4_regdump *regs = (struct t4_regdump *)data; 3578 int reglen = T4_REGDUMP_SIZE; 3579 uint8_t *buf; 3580 3581 if (regs->len < reglen) { 3582 regs->len = reglen; /* hint to the caller */ 3583 return (ENOBUFS); 3584 } 3585 3586 regs->len = reglen; 3587 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 3588 t4_get_regs(sc, regs, buf); 3589 rc = copyout(buf, regs->data, reglen); 3590 free(buf, M_CXGBE); 3591 break; 3592 } 3593 case CHELSIO_T4_GET_FILTER_MODE: 3594 rc = get_filter_mode(sc, (uint32_t *)data); 3595 break; 3596 case CHELSIO_T4_SET_FILTER_MODE: 3597 rc = set_filter_mode(sc, *(uint32_t *)data); 3598 break; 3599 case CHELSIO_T4_GET_FILTER: 3600 ADAPTER_LOCK(sc); 3601 rc = get_filter(sc, (struct t4_filter *)data); 3602 ADAPTER_UNLOCK(sc); 3603 break; 3604 case CHELSIO_T4_SET_FILTER: 3605 ADAPTER_LOCK(sc); 3606 rc = set_filter(sc, (struct t4_filter *)data); 3607 ADAPTER_UNLOCK(sc); 3608 break; 3609 case CHELSIO_T4_DEL_FILTER: 3610 ADAPTER_LOCK(sc); 3611 rc = del_filter(sc, (struct t4_filter *)data); 3612 ADAPTER_UNLOCK(sc); 3613 break; 3614 case CHELSIO_T4_GET_SGE_CONTEXT: 3615 rc = get_sge_context(sc, (struct t4_sge_context *)data); 3616 break; 3617 default: 3618 rc = EINVAL; 3619 } 3620 3621 return (rc); 3622 } 3623 3624 static int 3625 t4_mod_event(module_t mod, int cmd, void *arg) 3626 { 3627 3628 if (cmd == MOD_LOAD) 3629 t4_sge_modload(); 3630 3631 return (0); 3632 } 3633 3634 static devclass_t t4_devclass; 3635 static devclass_t cxgbe_devclass; 3636 3637 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, t4_mod_event, 0); 3638 MODULE_VERSION(t4nex, 1); 3639 3640 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 3641 MODULE_VERSION(cxgbe, 1); 3642