1 /*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 33 #include <sys/param.h> 34 #include <sys/conf.h> 35 #include <sys/priv.h> 36 #include <sys/kernel.h> 37 #include <sys/bus.h> 38 #include <sys/module.h> 39 #include <sys/malloc.h> 40 #include <sys/queue.h> 41 #include <sys/taskqueue.h> 42 #include <sys/pciio.h> 43 #include <dev/pci/pcireg.h> 44 #include <dev/pci/pcivar.h> 45 #include <dev/pci/pci_private.h> 46 #include <sys/firmware.h> 47 #include <sys/sbuf.h> 48 #include <sys/smp.h> 49 #include <sys/socket.h> 50 #include <sys/sockio.h> 51 #include <sys/sysctl.h> 52 #include <net/ethernet.h> 53 #include <net/if.h> 54 #include <net/if_types.h> 55 #include <net/if_dl.h> 56 #include <net/if_vlan_var.h> 57 58 #include "common/t4_hw.h" 59 #include "common/common.h" 60 #include "common/t4_msg.h" 61 #include "common/t4_regs.h" 62 #include "common/t4_regs_values.h" 63 #include "common/t4fw_interface.h" 64 #include "t4_ioctl.h" 65 #include "t4_l2t.h" 66 67 /* T4 bus driver interface */ 68 static int t4_probe(device_t); 69 static int t4_attach(device_t); 70 static int t4_detach(device_t); 71 static device_method_t t4_methods[] = { 72 DEVMETHOD(device_probe, t4_probe), 73 DEVMETHOD(device_attach, t4_attach), 74 DEVMETHOD(device_detach, t4_detach), 75 76 /* bus interface */ 77 DEVMETHOD(bus_print_child, bus_generic_print_child), 78 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 79 80 { 0, 0 } 81 }; 82 static driver_t t4_driver = { 83 "t4nex", 84 t4_methods, 85 sizeof(struct adapter) 86 }; 87 88 89 /* T4 port (cxgbe) interface */ 90 static int cxgbe_probe(device_t); 91 static int cxgbe_attach(device_t); 92 static int cxgbe_detach(device_t); 93 static device_method_t cxgbe_methods[] = { 94 DEVMETHOD(device_probe, cxgbe_probe), 95 DEVMETHOD(device_attach, cxgbe_attach), 96 DEVMETHOD(device_detach, cxgbe_detach), 97 { 0, 0 } 98 }; 99 static driver_t cxgbe_driver = { 100 "cxgbe", 101 cxgbe_methods, 102 sizeof(struct port_info) 103 }; 104 105 static d_ioctl_t t4_ioctl; 106 static d_open_t t4_open; 107 static d_close_t t4_close; 108 109 static struct cdevsw t4_cdevsw = { 110 .d_version = D_VERSION, 111 .d_flags = 0, 112 .d_open = t4_open, 113 .d_close = t4_close, 114 .d_ioctl = t4_ioctl, 115 .d_name = "t4nex", 116 }; 117 118 /* ifnet + media interface */ 119 static void cxgbe_init(void *); 120 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 121 static void cxgbe_start(struct ifnet *); 122 static int cxgbe_transmit(struct ifnet *, struct mbuf *); 123 static void cxgbe_qflush(struct ifnet *); 124 static int cxgbe_media_change(struct ifnet *); 125 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 126 127 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4 Ethernet driver and services"); 128 129 /* 130 * Tunables. 131 */ 132 SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe driver parameters"); 133 134 static int force_firmware_install = 0; 135 TUNABLE_INT("hw.cxgbe.force_firmware_install", &force_firmware_install); 136 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, force_firmware_install, CTLFLAG_RDTUN, 137 &force_firmware_install, 0, "install firmware on every attach."); 138 139 /* 140 * Holdoff timer and packet counter values. 141 */ 142 static unsigned int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200}; 143 static unsigned int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ 144 145 /* 146 * Max # of tx and rx queues to use for each 10G and 1G port. 147 */ 148 static unsigned int max_ntxq_10g = 8; 149 TUNABLE_INT("hw.cxgbe.max_ntxq_10G_port", &max_ntxq_10g); 150 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_10G_port, CTLFLAG_RDTUN, 151 &max_ntxq_10g, 0, "maximum number of tx queues per 10G port."); 152 153 static unsigned int max_nrxq_10g = 8; 154 TUNABLE_INT("hw.cxgbe.max_nrxq_10G_port", &max_nrxq_10g); 155 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_10G_port, CTLFLAG_RDTUN, 156 &max_nrxq_10g, 0, "maximum number of rxq's (per 10G port)."); 157 158 static unsigned int max_ntxq_1g = 2; 159 TUNABLE_INT("hw.cxgbe.max_ntxq_1G_port", &max_ntxq_1g); 160 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_1G_port, CTLFLAG_RDTUN, 161 &max_ntxq_1g, 0, "maximum number of tx queues per 1G port."); 162 163 static unsigned int max_nrxq_1g = 2; 164 TUNABLE_INT("hw.cxgbe.max_nrxq_1G_port", &max_nrxq_1g); 165 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_1G_port, CTLFLAG_RDTUN, 166 &max_nrxq_1g, 0, "maximum number of rxq's (per 1G port)."); 167 168 /* 169 * Holdoff parameters for 10G and 1G ports. 170 */ 171 static unsigned int tmr_idx_10g = 1; 172 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &tmr_idx_10g); 173 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_10G, CTLFLAG_RDTUN, 174 &tmr_idx_10g, 0, 175 "default timer index for interrupt holdoff (10G ports)."); 176 177 static int pktc_idx_10g = 2; 178 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &pktc_idx_10g); 179 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_10G, CTLFLAG_RDTUN, 180 &pktc_idx_10g, 0, 181 "default pkt counter index for interrupt holdoff (10G ports)."); 182 183 static unsigned int tmr_idx_1g = 1; 184 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &tmr_idx_1g); 185 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_1G, CTLFLAG_RDTUN, 186 &tmr_idx_1g, 0, 187 "default timer index for interrupt holdoff (1G ports)."); 188 189 static int pktc_idx_1g = 2; 190 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &pktc_idx_1g); 191 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_1G, CTLFLAG_RDTUN, 192 &pktc_idx_1g, 0, 193 "default pkt counter index for interrupt holdoff (1G ports)."); 194 195 /* 196 * Size (# of entries) of each tx and rx queue. 197 */ 198 static unsigned int qsize_txq = TX_EQ_QSIZE; 199 TUNABLE_INT("hw.cxgbe.qsize_txq", &qsize_txq); 200 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN, 201 &qsize_txq, 0, "default queue size of NIC tx queues."); 202 203 static unsigned int qsize_rxq = RX_IQ_QSIZE; 204 TUNABLE_INT("hw.cxgbe.qsize_rxq", &qsize_rxq); 205 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN, 206 &qsize_rxq, 0, "default queue size of NIC rx queues."); 207 208 /* 209 * Interrupt types allowed. 210 */ 211 static int intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 212 TUNABLE_INT("hw.cxgbe.interrupt_types", &intr_types); 213 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &intr_types, 0, 214 "interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively)"); 215 216 /* 217 * Force the driver to use the same set of interrupts for all ports. 218 */ 219 static int intr_shared = 0; 220 TUNABLE_INT("hw.cxgbe.interrupts_shared", &intr_shared); 221 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupts_shared, CTLFLAG_RDTUN, 222 &intr_shared, 0, "interrupts shared between all ports"); 223 224 static unsigned int filter_mode = HW_TPL_FR_MT_PR_IV_P_FC; 225 TUNABLE_INT("hw.cxgbe.filter_mode", &filter_mode); 226 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, filter_mode, CTLFLAG_RDTUN, 227 &filter_mode, 0, "default global filter mode."); 228 229 struct intrs_and_queues { 230 int intr_type; /* INTx, MSI, or MSI-X */ 231 int nirq; /* Number of vectors */ 232 int intr_shared; /* Interrupts shared between all ports */ 233 int ntxq10g; /* # of NIC txq's for each 10G port */ 234 int nrxq10g; /* # of NIC rxq's for each 10G port */ 235 int ntxq1g; /* # of NIC txq's for each 1G port */ 236 int nrxq1g; /* # of NIC rxq's for each 1G port */ 237 }; 238 239 struct filter_entry { 240 uint32_t valid:1; /* filter allocated and valid */ 241 uint32_t locked:1; /* filter is administratively locked */ 242 uint32_t pending:1; /* filter action is pending firmware reply */ 243 uint32_t smtidx:8; /* Source MAC Table index for smac */ 244 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 245 246 struct t4_filter_specification fs; 247 }; 248 249 enum { 250 MEMWIN0_APERTURE = 2048, 251 MEMWIN0_BASE = 0x1b800, 252 MEMWIN1_APERTURE = 32768, 253 MEMWIN1_BASE = 0x28000, 254 MEMWIN2_APERTURE = 65536, 255 MEMWIN2_BASE = 0x30000, 256 }; 257 258 enum { 259 XGMAC_MTU = (1 << 0), 260 XGMAC_PROMISC = (1 << 1), 261 XGMAC_ALLMULTI = (1 << 2), 262 XGMAC_VLANEX = (1 << 3), 263 XGMAC_UCADDR = (1 << 4), 264 XGMAC_MCADDRS = (1 << 5), 265 266 XGMAC_ALL = 0xffff 267 }; 268 269 static int map_bars(struct adapter *); 270 static void setup_memwin(struct adapter *); 271 static int cfg_itype_and_nqueues(struct adapter *, int, int, 272 struct intrs_and_queues *); 273 static int prep_firmware(struct adapter *); 274 static int get_devlog_params(struct adapter *, struct devlog_params *); 275 static int get_capabilities(struct adapter *, struct fw_caps_config_cmd *); 276 static int get_params(struct adapter *, struct fw_caps_config_cmd *); 277 static void t4_set_desc(struct adapter *); 278 static void build_medialist(struct port_info *); 279 static int update_mac_settings(struct port_info *, int); 280 static int cxgbe_init_locked(struct port_info *); 281 static int cxgbe_init_synchronized(struct port_info *); 282 static int cxgbe_uninit_locked(struct port_info *); 283 static int cxgbe_uninit_synchronized(struct port_info *); 284 static int first_port_up(struct adapter *); 285 static int last_port_down(struct adapter *); 286 static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 287 iq_intr_handler_t *, void *, char *); 288 static int t4_free_irq(struct adapter *, struct irq *); 289 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int, 290 unsigned int); 291 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 292 static void cxgbe_tick(void *); 293 static int t4_sysctls(struct adapter *); 294 static int cxgbe_sysctls(struct port_info *); 295 static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 296 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 297 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 298 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 299 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 300 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 301 static int sysctl_devlog(SYSCTL_HANDLER_ARGS); 302 static inline void txq_start(struct ifnet *, struct sge_txq *); 303 static uint32_t fconf_to_mode(uint32_t); 304 static uint32_t mode_to_fconf(uint32_t); 305 static uint32_t fspec_to_fconf(struct t4_filter_specification *); 306 static int get_filter_mode(struct adapter *, uint32_t *); 307 static int set_filter_mode(struct adapter *, uint32_t); 308 static inline uint64_t get_filter_hits(struct adapter *, uint32_t); 309 static int get_filter(struct adapter *, struct t4_filter *); 310 static int set_filter(struct adapter *, struct t4_filter *); 311 static int del_filter(struct adapter *, struct t4_filter *); 312 static void clear_filter(struct filter_entry *); 313 static int set_filter_wr(struct adapter *, int); 314 static int del_filter_wr(struct adapter *, int); 315 void filter_rpl(struct adapter *, const struct cpl_set_tcb_rpl *); 316 static int get_sge_context(struct adapter *, struct t4_sge_context *); 317 static int t4_mod_event(module_t, int, void *); 318 319 struct t4_pciids { 320 uint16_t device; 321 uint8_t mpf; 322 char *desc; 323 } t4_pciids[] = { 324 {0xa000, 0, "Chelsio Terminator 4 FPGA"}, 325 {0x4400, 4, "Chelsio T440-dbg"}, 326 {0x4401, 4, "Chelsio T420-CR"}, 327 {0x4402, 4, "Chelsio T422-CR"}, 328 {0x4403, 4, "Chelsio T440-CR"}, 329 {0x4404, 4, "Chelsio T420-BCH"}, 330 {0x4405, 4, "Chelsio T440-BCH"}, 331 {0x4406, 4, "Chelsio T440-CH"}, 332 {0x4407, 4, "Chelsio T420-SO"}, 333 {0x4408, 4, "Chelsio T420-CX"}, 334 {0x4409, 4, "Chelsio T420-BT"}, 335 {0x440a, 4, "Chelsio T404-BT"}, 336 }; 337 338 static int 339 t4_probe(device_t dev) 340 { 341 int i; 342 uint16_t v = pci_get_vendor(dev); 343 uint16_t d = pci_get_device(dev); 344 345 if (v != PCI_VENDOR_ID_CHELSIO) 346 return (ENXIO); 347 348 for (i = 0; i < ARRAY_SIZE(t4_pciids); i++) { 349 if (d == t4_pciids[i].device && 350 pci_get_function(dev) == t4_pciids[i].mpf) { 351 device_set_desc(dev, t4_pciids[i].desc); 352 return (BUS_PROBE_DEFAULT); 353 } 354 } 355 356 return (ENXIO); 357 } 358 359 static int 360 t4_attach(device_t dev) 361 { 362 struct adapter *sc; 363 int rc = 0, i, n10g, n1g, rqidx, tqidx; 364 struct fw_caps_config_cmd caps; 365 uint32_t p, v; 366 struct intrs_and_queues iaq; 367 struct sge *s; 368 369 sc = device_get_softc(dev); 370 sc->dev = dev; 371 sc->pf = pci_get_function(dev); 372 sc->mbox = sc->pf; 373 374 pci_enable_busmaster(dev); 375 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 376 pci_set_max_read_req(dev, 4096); 377 v = pci_read_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, 2); 378 v |= PCIM_EXP_CTL_RELAXED_ORD_ENABLE; 379 pci_write_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, v, 2); 380 } 381 382 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 383 device_get_nameunit(dev)); 384 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 385 386 rc = map_bars(sc); 387 if (rc != 0) 388 goto done; /* error message displayed already */ 389 390 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 391 392 /* Prepare the adapter for operation */ 393 rc = -t4_prep_adapter(sc); 394 if (rc != 0) { 395 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 396 goto done; 397 } 398 399 /* Do this really early */ 400 sc->cdev = make_dev(&t4_cdevsw, device_get_unit(dev), UID_ROOT, 401 GID_WHEEL, 0600, "%s", device_get_nameunit(dev)); 402 sc->cdev->si_drv1 = sc; 403 404 /* Prepare the firmware for operation */ 405 rc = prep_firmware(sc); 406 if (rc != 0) 407 goto done; /* error message displayed already */ 408 409 /* Read firmware devlog parameters */ 410 (void) get_devlog_params(sc, &sc->params.devlog); 411 412 /* Get device capabilities and select which ones we'll use */ 413 rc = get_capabilities(sc, &caps); 414 if (rc != 0) { 415 device_printf(dev, 416 "failed to initialize adapter capabilities: %d.\n", rc); 417 goto done; 418 } 419 420 /* Choose the global RSS mode. */ 421 rc = -t4_config_glbl_rss(sc, sc->mbox, 422 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, 423 F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | 424 F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ | 425 F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP); 426 if (rc != 0) { 427 device_printf(dev, 428 "failed to select global RSS mode: %d.\n", rc); 429 goto done; 430 } 431 432 /* These are total (sum of all ports) limits for a bus driver */ 433 rc = -t4_cfg_pfvf(sc, sc->mbox, sc->pf, 0, 434 128, /* max # of egress queues */ 435 64, /* max # of egress Ethernet or control queues */ 436 64, /* max # of ingress queues with fl/interrupt */ 437 0, /* max # of ingress queues without interrupt */ 438 0, /* PCIe traffic class */ 439 4, /* max # of virtual interfaces */ 440 M_FW_PFVF_CMD_CMASK, M_FW_PFVF_CMD_PMASK, 16, 441 FW_CMD_CAP_PF, FW_CMD_CAP_PF); 442 if (rc != 0) { 443 device_printf(dev, 444 "failed to configure pf/vf resources: %d.\n", rc); 445 goto done; 446 } 447 448 /* Need this before sge_init */ 449 for (i = 0; i < SGE_NTIMERS; i++) 450 sc->sge.timer_val[i] = min(intr_timer[i], 200U); 451 for (i = 0; i < SGE_NCOUNTERS; i++) 452 sc->sge.counter_val[i] = min(intr_pktcount[i], M_THRESHOLD_0); 453 454 /* Also need the cooked value of cclk before sge_init */ 455 p = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 456 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); 457 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &p, &v); 458 if (rc != 0) { 459 device_printf(sc->dev, 460 "failed to obtain core clock value: %d.\n", rc); 461 goto done; 462 } 463 sc->params.vpd.cclk = v; 464 465 t4_sge_init(sc); 466 467 t4_set_filter_mode(sc, filter_mode); 468 t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG, 469 V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP), 470 V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP)); 471 t4_tp_wr_bits_indirect(sc, A_TP_INGRESS_CONFIG, F_CSUM_HAS_PSEUDO_HDR, 472 F_LOOKUPEVERYPKT); 473 474 /* get basic stuff going */ 475 rc = -t4_early_init(sc, sc->mbox); 476 if (rc != 0) { 477 device_printf(dev, "early init failed: %d.\n", rc); 478 goto done; 479 } 480 481 rc = get_params(sc, &caps); 482 if (rc != 0) 483 goto done; /* error message displayed already */ 484 485 /* These are finalized by FW initialization, load their values now */ 486 v = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 487 sc->params.tp.tre = G_TIMERRESOLUTION(v); 488 sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v); 489 t4_read_mtu_tbl(sc, sc->params.mtus, NULL); 490 491 /* tweak some settings */ 492 t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) | 493 V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) | 494 V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9)); 495 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); 496 t4_set_reg_field(sc, A_TP_PARA_REG3, F_TUNNELCNGDROP0 | 497 F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | F_TUNNELCNGDROP3, 0); 498 499 setup_memwin(sc); 500 501 rc = t4_create_dma_tag(sc); 502 if (rc != 0) 503 goto done; /* error message displayed already */ 504 505 /* 506 * First pass over all the ports - allocate VIs and initialize some 507 * basic parameters like mac address, port type, etc. We also figure 508 * out whether a port is 10G or 1G and use that information when 509 * calculating how many interrupts to attempt to allocate. 510 */ 511 n10g = n1g = 0; 512 for_each_port(sc, i) { 513 struct port_info *pi; 514 515 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 516 sc->port[i] = pi; 517 518 /* These must be set before t4_port_init */ 519 pi->adapter = sc; 520 pi->port_id = i; 521 522 /* Allocate the vi and initialize parameters like mac addr */ 523 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0); 524 if (rc != 0) { 525 device_printf(dev, "unable to initialize port %d: %d\n", 526 i, rc); 527 free(pi, M_CXGBE); 528 sc->port[i] = NULL; 529 goto done; 530 } 531 532 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 533 device_get_nameunit(dev), i); 534 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 535 536 if (is_10G_port(pi)) { 537 n10g++; 538 pi->tmr_idx = tmr_idx_10g; 539 pi->pktc_idx = pktc_idx_10g; 540 } else { 541 n1g++; 542 pi->tmr_idx = tmr_idx_1g; 543 pi->pktc_idx = pktc_idx_1g; 544 } 545 546 pi->xact_addr_filt = -1; 547 548 pi->qsize_rxq = max(qsize_rxq, 128); 549 while (pi->qsize_rxq & 7) 550 pi->qsize_rxq++; 551 pi->qsize_txq = max(qsize_txq, 128); 552 553 if (pi->qsize_rxq != qsize_rxq) { 554 device_printf(dev, 555 "using %d instead of %d as the rx queue size.\n", 556 pi->qsize_rxq, qsize_rxq); 557 } 558 if (pi->qsize_txq != qsize_txq) { 559 device_printf(dev, 560 "using %d instead of %d as the tx queue size.\n", 561 pi->qsize_txq, qsize_txq); 562 } 563 564 pi->dev = device_add_child(dev, "cxgbe", -1); 565 if (pi->dev == NULL) { 566 device_printf(dev, 567 "failed to add device for port %d.\n", i); 568 rc = ENXIO; 569 goto done; 570 } 571 device_set_softc(pi->dev, pi); 572 573 setbit(&sc->registered_device_map, i); 574 } 575 576 if (sc->registered_device_map == 0) { 577 device_printf(dev, "no usable ports\n"); 578 rc = ENXIO; 579 goto done; 580 } 581 582 /* 583 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 584 */ 585 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq); 586 if (rc != 0) 587 goto done; /* error message displayed already */ 588 589 sc->intr_type = iaq.intr_type; 590 sc->intr_count = iaq.nirq; 591 592 s = &sc->sge; 593 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g; 594 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g; 595 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 596 s->neq += sc->params.nports; /* control queues, 1 per port */ 597 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 598 if (iaq.intr_shared) 599 sc->flags |= INTR_SHARED; 600 s->niq += NINTRQ(sc); /* interrupt queues */ 601 602 s->intrq = malloc(NINTRQ(sc) * sizeof(struct sge_iq), M_CXGBE, 603 M_ZERO | M_WAITOK); 604 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_ctrlq), M_CXGBE, 605 M_ZERO | M_WAITOK); 606 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 607 M_ZERO | M_WAITOK); 608 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 609 M_ZERO | M_WAITOK); 610 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, 611 M_ZERO | M_WAITOK); 612 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, 613 M_ZERO | M_WAITOK); 614 615 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 616 M_ZERO | M_WAITOK); 617 618 sc->l2t = t4_init_l2t(M_WAITOK); 619 620 t4_sysctls(sc); 621 622 /* 623 * Second pass over the ports. This time we know the number of rx and 624 * tx queues that each port should get. 625 */ 626 rqidx = tqidx = 0; 627 for_each_port(sc, i) { 628 struct port_info *pi = sc->port[i]; 629 630 if (pi == NULL) 631 continue; 632 633 pi->first_rxq = rqidx; 634 pi->nrxq = is_10G_port(pi) ? iaq.nrxq10g : iaq.nrxq1g; 635 636 pi->first_txq = tqidx; 637 pi->ntxq = is_10G_port(pi) ? iaq.ntxq10g : iaq.ntxq1g; 638 639 rqidx += pi->nrxq; 640 tqidx += pi->ntxq; 641 } 642 643 rc = bus_generic_attach(dev); 644 if (rc != 0) { 645 device_printf(dev, 646 "failed to attach all child ports: %d\n", rc); 647 goto done; 648 } 649 650 #ifdef INVARIANTS 651 device_printf(dev, 652 "%p, %d ports (0x%x), %d intr_type, %d intr_count\n", 653 sc, sc->params.nports, sc->params.portvec, 654 sc->intr_type, sc->intr_count); 655 #endif 656 t4_set_desc(sc); 657 658 done: 659 if (rc != 0) 660 t4_detach(dev); 661 662 return (rc); 663 } 664 665 /* 666 * Idempotent 667 */ 668 static int 669 t4_detach(device_t dev) 670 { 671 struct adapter *sc; 672 struct port_info *pi; 673 int i; 674 675 sc = device_get_softc(dev); 676 677 if (sc->cdev) 678 destroy_dev(sc->cdev); 679 680 bus_generic_detach(dev); 681 for (i = 0; i < MAX_NPORTS; i++) { 682 pi = sc->port[i]; 683 if (pi) { 684 t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid); 685 if (pi->dev) 686 device_delete_child(dev, pi->dev); 687 688 mtx_destroy(&pi->pi_lock); 689 free(pi, M_CXGBE); 690 } 691 } 692 693 if (sc->flags & FW_OK) 694 t4_fw_bye(sc, sc->mbox); 695 696 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 697 pci_release_msi(dev); 698 699 if (sc->regs_res) 700 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 701 sc->regs_res); 702 703 if (sc->msix_res) 704 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 705 sc->msix_res); 706 707 if (sc->l2t) 708 t4_free_l2t(sc->l2t); 709 710 free(sc->irq, M_CXGBE); 711 free(sc->sge.rxq, M_CXGBE); 712 free(sc->sge.txq, M_CXGBE); 713 free(sc->sge.ctrlq, M_CXGBE); 714 free(sc->sge.intrq, M_CXGBE); 715 free(sc->sge.iqmap, M_CXGBE); 716 free(sc->sge.eqmap, M_CXGBE); 717 free(sc->tids.ftid_tab, M_CXGBE); 718 t4_destroy_dma_tag(sc); 719 mtx_destroy(&sc->sc_lock); 720 721 bzero(sc, sizeof(*sc)); 722 723 return (0); 724 } 725 726 727 static int 728 cxgbe_probe(device_t dev) 729 { 730 char buf[128]; 731 struct port_info *pi = device_get_softc(dev); 732 733 snprintf(buf, sizeof(buf), "Port %d", pi->port_id); 734 device_set_desc_copy(dev, buf); 735 736 return (BUS_PROBE_DEFAULT); 737 } 738 739 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 740 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 741 IFCAP_VLAN_HWTSO) 742 #define T4_CAP_ENABLE (T4_CAP & ~IFCAP_TSO6) 743 744 static int 745 cxgbe_attach(device_t dev) 746 { 747 struct port_info *pi = device_get_softc(dev); 748 struct ifnet *ifp; 749 750 /* Allocate an ifnet and set it up */ 751 ifp = if_alloc(IFT_ETHER); 752 if (ifp == NULL) { 753 device_printf(dev, "Cannot allocate ifnet\n"); 754 return (ENOMEM); 755 } 756 pi->ifp = ifp; 757 ifp->if_softc = pi; 758 759 callout_init(&pi->tick, CALLOUT_MPSAFE); 760 pi->tq = taskqueue_create("cxgbe_taskq", M_NOWAIT, 761 taskqueue_thread_enqueue, &pi->tq); 762 if (pi->tq == NULL) { 763 device_printf(dev, "failed to allocate port task queue\n"); 764 if_free(pi->ifp); 765 return (ENOMEM); 766 } 767 taskqueue_start_threads(&pi->tq, 1, PI_NET, "%s taskq", 768 device_get_nameunit(dev)); 769 770 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 771 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 772 773 ifp->if_init = cxgbe_init; 774 ifp->if_ioctl = cxgbe_ioctl; 775 ifp->if_start = cxgbe_start; 776 ifp->if_transmit = cxgbe_transmit; 777 ifp->if_qflush = cxgbe_qflush; 778 779 ifp->if_snd.ifq_drv_maxlen = 1024; 780 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 781 IFQ_SET_READY(&ifp->if_snd); 782 783 ifp->if_capabilities = T4_CAP; 784 ifp->if_capenable = T4_CAP_ENABLE; 785 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO; 786 787 /* Initialize ifmedia for this port */ 788 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change, 789 cxgbe_media_status); 790 build_medialist(pi); 791 792 ether_ifattach(ifp, pi->hw_addr); 793 794 #ifdef INVARIANTS 795 device_printf(dev, "%p, %d txq, %d rxq\n", pi, pi->ntxq, pi->nrxq); 796 #endif 797 798 cxgbe_sysctls(pi); 799 800 return (0); 801 } 802 803 static int 804 cxgbe_detach(device_t dev) 805 { 806 struct port_info *pi = device_get_softc(dev); 807 struct adapter *sc = pi->adapter; 808 int rc; 809 810 /* Tell if_ioctl and if_init that the port is going away */ 811 ADAPTER_LOCK(sc); 812 SET_DOOMED(pi); 813 wakeup(&sc->flags); 814 while (IS_BUSY(sc)) 815 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 816 SET_BUSY(sc); 817 ADAPTER_UNLOCK(sc); 818 819 rc = cxgbe_uninit_synchronized(pi); 820 if (rc != 0) 821 device_printf(dev, "port uninit failed: %d.\n", rc); 822 823 taskqueue_free(pi->tq); 824 825 ifmedia_removeall(&pi->media); 826 ether_ifdetach(pi->ifp); 827 if_free(pi->ifp); 828 829 ADAPTER_LOCK(sc); 830 CLR_BUSY(sc); 831 wakeup_one(&sc->flags); 832 ADAPTER_UNLOCK(sc); 833 834 return (0); 835 } 836 837 static void 838 cxgbe_init(void *arg) 839 { 840 struct port_info *pi = arg; 841 struct adapter *sc = pi->adapter; 842 843 ADAPTER_LOCK(sc); 844 cxgbe_init_locked(pi); /* releases adapter lock */ 845 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 846 } 847 848 static int 849 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 850 { 851 int rc = 0, mtu, flags; 852 struct port_info *pi = ifp->if_softc; 853 struct adapter *sc = pi->adapter; 854 struct ifreq *ifr = (struct ifreq *)data; 855 uint32_t mask; 856 857 switch (cmd) { 858 case SIOCSIFMTU: 859 ADAPTER_LOCK(sc); 860 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 861 if (rc) { 862 fail: 863 ADAPTER_UNLOCK(sc); 864 return (rc); 865 } 866 867 mtu = ifr->ifr_mtu; 868 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) { 869 rc = EINVAL; 870 } else { 871 ifp->if_mtu = mtu; 872 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 873 t4_update_fl_bufsize(ifp); 874 PORT_LOCK(pi); 875 rc = update_mac_settings(pi, XGMAC_MTU); 876 PORT_UNLOCK(pi); 877 } 878 } 879 ADAPTER_UNLOCK(sc); 880 break; 881 882 case SIOCSIFFLAGS: 883 ADAPTER_LOCK(sc); 884 if (IS_DOOMED(pi)) { 885 rc = ENXIO; 886 goto fail; 887 } 888 if (ifp->if_flags & IFF_UP) { 889 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 890 flags = pi->if_flags; 891 if ((ifp->if_flags ^ flags) & 892 (IFF_PROMISC | IFF_ALLMULTI)) { 893 if (IS_BUSY(sc)) { 894 rc = EBUSY; 895 goto fail; 896 } 897 PORT_LOCK(pi); 898 rc = update_mac_settings(pi, 899 XGMAC_PROMISC | XGMAC_ALLMULTI); 900 PORT_UNLOCK(pi); 901 } 902 ADAPTER_UNLOCK(sc); 903 } else 904 rc = cxgbe_init_locked(pi); 905 pi->if_flags = ifp->if_flags; 906 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) 907 rc = cxgbe_uninit_locked(pi); 908 else 909 ADAPTER_UNLOCK(sc); 910 911 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 912 break; 913 914 case SIOCADDMULTI: 915 case SIOCDELMULTI: /* these two can be called with a mutex held :-( */ 916 ADAPTER_LOCK(sc); 917 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 918 if (rc) 919 goto fail; 920 921 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 922 PORT_LOCK(pi); 923 rc = update_mac_settings(pi, XGMAC_MCADDRS); 924 PORT_UNLOCK(pi); 925 } 926 ADAPTER_UNLOCK(sc); 927 break; 928 929 case SIOCSIFCAP: 930 ADAPTER_LOCK(sc); 931 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 932 if (rc) 933 goto fail; 934 935 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 936 if (mask & IFCAP_TXCSUM) { 937 ifp->if_capenable ^= IFCAP_TXCSUM; 938 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 939 940 if (IFCAP_TSO & ifp->if_capenable && 941 !(IFCAP_TXCSUM & ifp->if_capenable)) { 942 ifp->if_capenable &= ~IFCAP_TSO; 943 ifp->if_hwassist &= ~CSUM_TSO; 944 if_printf(ifp, 945 "tso disabled due to -txcsum.\n"); 946 } 947 } 948 if (mask & IFCAP_RXCSUM) 949 ifp->if_capenable ^= IFCAP_RXCSUM; 950 if (mask & IFCAP_TSO4) { 951 ifp->if_capenable ^= IFCAP_TSO4; 952 953 if (IFCAP_TSO & ifp->if_capenable) { 954 if (IFCAP_TXCSUM & ifp->if_capenable) 955 ifp->if_hwassist |= CSUM_TSO; 956 else { 957 ifp->if_capenable &= ~IFCAP_TSO; 958 ifp->if_hwassist &= ~CSUM_TSO; 959 if_printf(ifp, 960 "enable txcsum first.\n"); 961 rc = EAGAIN; 962 } 963 } else 964 ifp->if_hwassist &= ~CSUM_TSO; 965 } 966 if (mask & IFCAP_LRO) { 967 #ifdef INET 968 int i; 969 struct sge_rxq *rxq; 970 971 ifp->if_capenable ^= IFCAP_LRO; 972 for_each_rxq(pi, i, rxq) { 973 if (ifp->if_capenable & IFCAP_LRO) 974 rxq->flags |= RXQ_LRO_ENABLED; 975 else 976 rxq->flags &= ~RXQ_LRO_ENABLED; 977 } 978 #endif 979 } 980 #ifndef TCP_OFFLOAD_DISABLE 981 if (mask & IFCAP_TOE4) { 982 rc = EOPNOTSUPP; 983 } 984 #endif 985 if (mask & IFCAP_VLAN_HWTAGGING) { 986 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 987 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 988 PORT_LOCK(pi); 989 rc = update_mac_settings(pi, XGMAC_VLANEX); 990 PORT_UNLOCK(pi); 991 } 992 } 993 if (mask & IFCAP_VLAN_MTU) { 994 ifp->if_capenable ^= IFCAP_VLAN_MTU; 995 996 /* Need to find out how to disable auto-mtu-inflation */ 997 } 998 if (mask & IFCAP_VLAN_HWTSO) 999 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1000 if (mask & IFCAP_VLAN_HWCSUM) 1001 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1002 1003 #ifdef VLAN_CAPABILITIES 1004 VLAN_CAPABILITIES(ifp); 1005 #endif 1006 ADAPTER_UNLOCK(sc); 1007 break; 1008 1009 case SIOCSIFMEDIA: 1010 case SIOCGIFMEDIA: 1011 ifmedia_ioctl(ifp, ifr, &pi->media, cmd); 1012 break; 1013 1014 default: 1015 rc = ether_ioctl(ifp, cmd, data); 1016 } 1017 1018 return (rc); 1019 } 1020 1021 static void 1022 cxgbe_start(struct ifnet *ifp) 1023 { 1024 struct port_info *pi = ifp->if_softc; 1025 struct sge_txq *txq; 1026 int i; 1027 1028 for_each_txq(pi, i, txq) { 1029 if (TXQ_TRYLOCK(txq)) { 1030 txq_start(ifp, txq); 1031 TXQ_UNLOCK(txq); 1032 } 1033 } 1034 } 1035 1036 static int 1037 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 1038 { 1039 struct port_info *pi = ifp->if_softc; 1040 struct adapter *sc = pi->adapter; 1041 struct sge_txq *txq = &sc->sge.txq[pi->first_txq]; 1042 struct buf_ring *br; 1043 int rc; 1044 1045 M_ASSERTPKTHDR(m); 1046 1047 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1048 m_freem(m); 1049 return (0); 1050 } 1051 1052 if (m->m_flags & M_FLOWID) 1053 txq += (m->m_pkthdr.flowid % pi->ntxq); 1054 br = txq->br; 1055 1056 if (TXQ_TRYLOCK(txq) == 0) { 1057 /* 1058 * XXX: make sure that this packet really is sent out. There is 1059 * a small race where t4_eth_tx may stop draining the drbr and 1060 * goes away, just before we enqueued this mbuf. 1061 */ 1062 1063 return (drbr_enqueue(ifp, br, m)); 1064 } 1065 1066 /* 1067 * txq->m is the mbuf that is held up due to a temporary shortage of 1068 * resources and it should be put on the wire first. Then what's in 1069 * drbr and finally the mbuf that was just passed in to us. 1070 * 1071 * Return code should indicate the fate of the mbuf that was passed in 1072 * this time. 1073 */ 1074 1075 TXQ_LOCK_ASSERT_OWNED(txq); 1076 if (drbr_needs_enqueue(ifp, br) || txq->m) { 1077 1078 /* Queued for transmission. */ 1079 1080 rc = drbr_enqueue(ifp, br, m); 1081 m = txq->m ? txq->m : drbr_dequeue(ifp, br); 1082 (void) t4_eth_tx(ifp, txq, m); 1083 TXQ_UNLOCK(txq); 1084 return (rc); 1085 } 1086 1087 /* Direct transmission. */ 1088 rc = t4_eth_tx(ifp, txq, m); 1089 if (rc != 0 && txq->m) 1090 rc = 0; /* held, will be transmitted soon (hopefully) */ 1091 1092 TXQ_UNLOCK(txq); 1093 return (rc); 1094 } 1095 1096 static void 1097 cxgbe_qflush(struct ifnet *ifp) 1098 { 1099 struct port_info *pi = ifp->if_softc; 1100 struct sge_txq *txq; 1101 int i; 1102 struct mbuf *m; 1103 1104 /* queues do not exist if !IFF_DRV_RUNNING. */ 1105 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1106 for_each_txq(pi, i, txq) { 1107 TXQ_LOCK(txq); 1108 m_freem(txq->m); 1109 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL) 1110 m_freem(m); 1111 TXQ_UNLOCK(txq); 1112 } 1113 } 1114 if_qflush(ifp); 1115 } 1116 1117 static int 1118 cxgbe_media_change(struct ifnet *ifp) 1119 { 1120 struct port_info *pi = ifp->if_softc; 1121 1122 device_printf(pi->dev, "%s unimplemented.\n", __func__); 1123 1124 return (EOPNOTSUPP); 1125 } 1126 1127 static void 1128 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1129 { 1130 struct port_info *pi = ifp->if_softc; 1131 struct ifmedia_entry *cur = pi->media.ifm_cur; 1132 int speed = pi->link_cfg.speed; 1133 int data = (pi->port_type << 8) | pi->mod_type; 1134 1135 if (cur->ifm_data != data) { 1136 build_medialist(pi); 1137 cur = pi->media.ifm_cur; 1138 } 1139 1140 ifmr->ifm_status = IFM_AVALID; 1141 if (!pi->link_cfg.link_ok) 1142 return; 1143 1144 ifmr->ifm_status |= IFM_ACTIVE; 1145 1146 /* active and current will differ iff current media is autoselect. */ 1147 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 1148 return; 1149 1150 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 1151 if (speed == SPEED_10000) 1152 ifmr->ifm_active |= IFM_10G_T; 1153 else if (speed == SPEED_1000) 1154 ifmr->ifm_active |= IFM_1000_T; 1155 else if (speed == SPEED_100) 1156 ifmr->ifm_active |= IFM_100_TX; 1157 else if (speed == SPEED_10) 1158 ifmr->ifm_active |= IFM_10_T; 1159 else 1160 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, 1161 speed)); 1162 } 1163 1164 void 1165 t4_fatal_err(struct adapter *sc) 1166 { 1167 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 1168 t4_intr_disable(sc); 1169 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n", 1170 device_get_nameunit(sc->dev)); 1171 } 1172 1173 static int 1174 map_bars(struct adapter *sc) 1175 { 1176 sc->regs_rid = PCIR_BAR(0); 1177 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1178 &sc->regs_rid, RF_ACTIVE); 1179 if (sc->regs_res == NULL) { 1180 device_printf(sc->dev, "cannot map registers.\n"); 1181 return (ENXIO); 1182 } 1183 sc->bt = rman_get_bustag(sc->regs_res); 1184 sc->bh = rman_get_bushandle(sc->regs_res); 1185 sc->mmio_len = rman_get_size(sc->regs_res); 1186 1187 sc->msix_rid = PCIR_BAR(4); 1188 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1189 &sc->msix_rid, RF_ACTIVE); 1190 if (sc->msix_res == NULL) { 1191 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 1192 return (ENXIO); 1193 } 1194 1195 return (0); 1196 } 1197 1198 static void 1199 setup_memwin(struct adapter *sc) 1200 { 1201 u_long bar0; 1202 1203 bar0 = rman_get_start(sc->regs_res); 1204 1205 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0), 1206 (bar0 + MEMWIN0_BASE) | V_BIR(0) | 1207 V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); 1208 1209 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1), 1210 (bar0 + MEMWIN1_BASE) | V_BIR(0) | 1211 V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); 1212 1213 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2), 1214 (bar0 + MEMWIN2_BASE) | V_BIR(0) | 1215 V_WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); 1216 } 1217 1218 static int 1219 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, 1220 struct intrs_and_queues *iaq) 1221 { 1222 int rc, itype, navail, nc, nrxq10g, nrxq1g; 1223 1224 bzero(iaq, sizeof(*iaq)); 1225 nc = mp_ncpus; /* our snapshot of the number of CPUs */ 1226 1227 for (itype = INTR_MSIX; itype; itype >>= 1) { 1228 1229 if ((itype & intr_types) == 0) 1230 continue; /* not allowed */ 1231 1232 if (itype == INTR_MSIX) 1233 navail = pci_msix_count(sc->dev); 1234 else if (itype == INTR_MSI) 1235 navail = pci_msi_count(sc->dev); 1236 else 1237 navail = 1; 1238 1239 if (navail == 0) 1240 continue; 1241 1242 iaq->intr_type = itype; 1243 1244 iaq->ntxq10g = min(nc, max_ntxq_10g); 1245 iaq->ntxq1g = min(nc, max_ntxq_1g); 1246 1247 nrxq10g = min(nc, max_nrxq_10g); 1248 nrxq1g = min(nc, max_nrxq_1g); 1249 1250 iaq->nirq = n10g * nrxq10g + n1g * nrxq1g + T4_EXTRA_INTR; 1251 if (iaq->nirq <= navail && intr_shared == 0) { 1252 1253 if (itype == INTR_MSI && !powerof2(iaq->nirq)) 1254 goto share; 1255 1256 /* One for err, one for fwq, and one for each rxq */ 1257 1258 iaq->intr_shared = 0; 1259 iaq->nrxq10g = nrxq10g; 1260 iaq->nrxq1g = nrxq1g; 1261 1262 } else { 1263 share: 1264 iaq->intr_shared = 1; 1265 1266 if (navail >= nc + T4_EXTRA_INTR) { 1267 if (itype == INTR_MSIX) 1268 navail = nc + T4_EXTRA_INTR; 1269 1270 /* navail is and must remain a pow2 for MSI */ 1271 if (itype == INTR_MSI) { 1272 KASSERT(powerof2(navail), 1273 ("%d not power of 2", navail)); 1274 1275 while (navail / 2 >= nc + T4_EXTRA_INTR) 1276 navail /= 2; 1277 } 1278 } 1279 iaq->nirq = navail; /* total # of interrupts */ 1280 1281 /* 1282 * If we have multiple vectors available reserve one 1283 * exclusively for errors. The rest will be shared by 1284 * the fwq and data. 1285 */ 1286 if (navail > 1) 1287 navail--; 1288 iaq->nrxq10g = min(nrxq10g, navail); 1289 iaq->nrxq1g = min(nrxq1g, navail); 1290 } 1291 1292 navail = iaq->nirq; 1293 rc = 0; 1294 if (itype == INTR_MSIX) 1295 rc = pci_alloc_msix(sc->dev, &navail); 1296 else if (itype == INTR_MSI) 1297 rc = pci_alloc_msi(sc->dev, &navail); 1298 1299 if (rc == 0) { 1300 if (navail == iaq->nirq) 1301 return (0); 1302 1303 /* 1304 * Didn't get the number requested. Use whatever number 1305 * the kernel is willing to allocate (it's in navail). 1306 */ 1307 pci_release_msi(sc->dev); 1308 goto share; 1309 } 1310 1311 device_printf(sc->dev, 1312 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 1313 itype, rc, iaq->nirq, navail); 1314 } 1315 1316 device_printf(sc->dev, 1317 "failed to find a usable interrupt type. " 1318 "allowed=%d, msi-x=%d, msi=%d, intx=1", intr_types, 1319 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 1320 1321 return (ENXIO); 1322 } 1323 1324 /* 1325 * Install a compatible firmware (if required), establish contact with it, 1326 * become the master, and reset the device. 1327 */ 1328 static int 1329 prep_firmware(struct adapter *sc) 1330 { 1331 const struct firmware *fw; 1332 int rc; 1333 enum dev_state state; 1334 1335 /* Check firmware version and install a different one if necessary */ 1336 rc = t4_check_fw_version(sc); 1337 if (rc != 0 || force_firmware_install) { 1338 uint32_t v = 0; 1339 1340 fw = firmware_get(T4_FWNAME); 1341 if (fw != NULL) { 1342 const struct fw_hdr *hdr = (const void *)fw->data; 1343 1344 v = ntohl(hdr->fw_ver); 1345 1346 /* 1347 * The firmware module will not be used if it isn't the 1348 * same major version as what the driver was compiled 1349 * with. This check trumps force_firmware_install. 1350 */ 1351 if (G_FW_HDR_FW_VER_MAJOR(v) != FW_VERSION_MAJOR) { 1352 device_printf(sc->dev, 1353 "Found firmware image but version %d " 1354 "can not be used with this driver (%d)\n", 1355 G_FW_HDR_FW_VER_MAJOR(v), FW_VERSION_MAJOR); 1356 1357 firmware_put(fw, FIRMWARE_UNLOAD); 1358 fw = NULL; 1359 } 1360 } 1361 1362 if (fw == NULL && (rc < 0 || force_firmware_install)) { 1363 device_printf(sc->dev, "No usable firmware. " 1364 "card has %d.%d.%d, driver compiled with %d.%d.%d, " 1365 "force_firmware_install%s set", 1366 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 1367 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 1368 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 1369 FW_VERSION_MAJOR, FW_VERSION_MINOR, 1370 FW_VERSION_MICRO, 1371 force_firmware_install ? "" : " not"); 1372 return (EAGAIN); 1373 } 1374 1375 /* 1376 * Always upgrade, even for minor/micro/build mismatches. 1377 * Downgrade only for a major version mismatch or if 1378 * force_firmware_install was specified. 1379 */ 1380 if (fw != NULL && (rc < 0 || force_firmware_install || 1381 v > sc->params.fw_vers)) { 1382 device_printf(sc->dev, 1383 "installing firmware %d.%d.%d.%d on card.\n", 1384 G_FW_HDR_FW_VER_MAJOR(v), G_FW_HDR_FW_VER_MINOR(v), 1385 G_FW_HDR_FW_VER_MICRO(v), G_FW_HDR_FW_VER_BUILD(v)); 1386 1387 rc = -t4_load_fw(sc, fw->data, fw->datasize); 1388 if (rc != 0) { 1389 device_printf(sc->dev, 1390 "failed to install firmware: %d\n", rc); 1391 firmware_put(fw, FIRMWARE_UNLOAD); 1392 return (rc); 1393 } else { 1394 /* refresh */ 1395 (void) t4_check_fw_version(sc); 1396 } 1397 } 1398 1399 if (fw != NULL) 1400 firmware_put(fw, FIRMWARE_UNLOAD); 1401 } 1402 1403 /* Contact firmware, request master */ 1404 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state); 1405 if (rc < 0) { 1406 rc = -rc; 1407 device_printf(sc->dev, 1408 "failed to connect to the firmware: %d.\n", rc); 1409 return (rc); 1410 } 1411 1412 /* Reset device */ 1413 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST); 1414 if (rc != 0) { 1415 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 1416 if (rc != ETIMEDOUT && rc != EIO) 1417 t4_fw_bye(sc, sc->mbox); 1418 return (rc); 1419 } 1420 1421 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 1422 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 1423 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 1424 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 1425 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 1426 sc->flags |= FW_OK; 1427 1428 return (0); 1429 } 1430 1431 static int 1432 get_devlog_params(struct adapter *sc, struct devlog_params *dlog) 1433 { 1434 struct fw_devlog_cmd devlog_cmd; 1435 uint32_t meminfo; 1436 int rc; 1437 1438 bzero(&devlog_cmd, sizeof(devlog_cmd)); 1439 devlog_cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 1440 F_FW_CMD_REQUEST | F_FW_CMD_READ); 1441 devlog_cmd.retval_len16 = htobe32(FW_LEN16(devlog_cmd)); 1442 rc = -t4_wr_mbox(sc, sc->mbox, &devlog_cmd, sizeof(devlog_cmd), 1443 &devlog_cmd); 1444 if (rc != 0) { 1445 device_printf(sc->dev, 1446 "failed to get devlog parameters: %d.\n", rc); 1447 bzero(dlog, sizeof (*dlog)); 1448 return (rc); 1449 } 1450 1451 meminfo = be32toh(devlog_cmd.memtype_devlog_memaddr16_devlog); 1452 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(meminfo); 1453 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(meminfo) << 4; 1454 dlog->size = be32toh(devlog_cmd.memsize_devlog); 1455 1456 return (0); 1457 } 1458 1459 static int 1460 get_capabilities(struct adapter *sc, struct fw_caps_config_cmd *caps) 1461 { 1462 int rc; 1463 1464 bzero(caps, sizeof(*caps)); 1465 caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1466 F_FW_CMD_REQUEST | F_FW_CMD_READ); 1467 caps->retval_len16 = htobe32(FW_LEN16(*caps)); 1468 1469 rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), caps); 1470 if (rc != 0) 1471 return (rc); 1472 1473 if (caps->niccaps & htobe16(FW_CAPS_CONFIG_NIC_VM)) 1474 caps->niccaps ^= htobe16(FW_CAPS_CONFIG_NIC_VM); 1475 1476 caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1477 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 1478 rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), NULL); 1479 1480 return (rc); 1481 } 1482 1483 static int 1484 get_params(struct adapter *sc, struct fw_caps_config_cmd *caps) 1485 { 1486 int rc; 1487 uint32_t params[7], val[7]; 1488 1489 #define FW_PARAM_DEV(param) \ 1490 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 1491 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 1492 #define FW_PARAM_PFVF(param) \ 1493 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 1494 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 1495 1496 params[0] = FW_PARAM_DEV(PORTVEC); 1497 params[1] = FW_PARAM_PFVF(IQFLINT_START); 1498 params[2] = FW_PARAM_PFVF(EQ_START); 1499 params[3] = FW_PARAM_PFVF(FILTER_START); 1500 params[4] = FW_PARAM_PFVF(FILTER_END); 1501 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 5, params, val); 1502 if (rc != 0) { 1503 device_printf(sc->dev, 1504 "failed to query parameters: %d.\n", rc); 1505 goto done; 1506 } 1507 1508 sc->params.portvec = val[0]; 1509 sc->params.nports = 0; 1510 while (val[0]) { 1511 sc->params.nports++; 1512 val[0] &= val[0] - 1; 1513 } 1514 1515 sc->sge.iq_start = val[1]; 1516 sc->sge.eq_start = val[2]; 1517 sc->tids.ftid_base = val[3]; 1518 sc->tids.nftids = val[4] - val[3] + 1; 1519 1520 if (caps->toecaps) { 1521 /* query offload-related parameters */ 1522 params[0] = FW_PARAM_DEV(NTID); 1523 params[1] = FW_PARAM_PFVF(SERVER_START); 1524 params[2] = FW_PARAM_PFVF(SERVER_END); 1525 params[3] = FW_PARAM_PFVF(TDDP_START); 1526 params[4] = FW_PARAM_PFVF(TDDP_END); 1527 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 1528 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val); 1529 if (rc != 0) { 1530 device_printf(sc->dev, 1531 "failed to query TOE parameters: %d.\n", rc); 1532 goto done; 1533 } 1534 sc->tids.ntids = val[0]; 1535 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 1536 sc->tids.stid_base = val[1]; 1537 sc->tids.nstids = val[2] - val[1] + 1; 1538 sc->vres.ddp.start = val[3]; 1539 sc->vres.ddp.size = val[4] - val[3] + 1; 1540 sc->params.ofldq_wr_cred = val[5]; 1541 sc->params.offload = 1; 1542 } 1543 if (caps->rdmacaps) { 1544 params[0] = FW_PARAM_PFVF(STAG_START); 1545 params[1] = FW_PARAM_PFVF(STAG_END); 1546 params[2] = FW_PARAM_PFVF(RQ_START); 1547 params[3] = FW_PARAM_PFVF(RQ_END); 1548 params[4] = FW_PARAM_PFVF(PBL_START); 1549 params[5] = FW_PARAM_PFVF(PBL_END); 1550 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val); 1551 if (rc != 0) { 1552 device_printf(sc->dev, 1553 "failed to query RDMA parameters: %d.\n", rc); 1554 goto done; 1555 } 1556 sc->vres.stag.start = val[0]; 1557 sc->vres.stag.size = val[1] - val[0] + 1; 1558 sc->vres.rq.start = val[2]; 1559 sc->vres.rq.size = val[3] - val[2] + 1; 1560 sc->vres.pbl.start = val[4]; 1561 sc->vres.pbl.size = val[5] - val[4] + 1; 1562 } 1563 if (caps->iscsicaps) { 1564 params[0] = FW_PARAM_PFVF(ISCSI_START); 1565 params[1] = FW_PARAM_PFVF(ISCSI_END); 1566 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, params, val); 1567 if (rc != 0) { 1568 device_printf(sc->dev, 1569 "failed to query iSCSI parameters: %d.\n", rc); 1570 goto done; 1571 } 1572 sc->vres.iscsi.start = val[0]; 1573 sc->vres.iscsi.size = val[1] - val[0] + 1; 1574 } 1575 #undef FW_PARAM_PFVF 1576 #undef FW_PARAM_DEV 1577 1578 done: 1579 return (rc); 1580 } 1581 1582 static void 1583 t4_set_desc(struct adapter *sc) 1584 { 1585 char buf[128]; 1586 struct adapter_params *p = &sc->params; 1587 1588 snprintf(buf, sizeof(buf), 1589 "Chelsio %s (rev %d) %d port %sNIC PCIe-x%d %d %s, S/N:%s, E/C:%s", 1590 p->vpd.id, p->rev, p->nports, is_offload(sc) ? "R" : "", 1591 p->pci.width, sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : 1592 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), p->vpd.sn, p->vpd.ec); 1593 1594 device_set_desc_copy(sc->dev, buf); 1595 } 1596 1597 static void 1598 build_medialist(struct port_info *pi) 1599 { 1600 struct ifmedia *media = &pi->media; 1601 int data, m; 1602 1603 PORT_LOCK(pi); 1604 1605 ifmedia_removeall(media); 1606 1607 m = IFM_ETHER | IFM_FDX; 1608 data = (pi->port_type << 8) | pi->mod_type; 1609 1610 switch(pi->port_type) { 1611 case FW_PORT_TYPE_BT_XFI: 1612 ifmedia_add(media, m | IFM_10G_T, data, NULL); 1613 break; 1614 1615 case FW_PORT_TYPE_BT_XAUI: 1616 ifmedia_add(media, m | IFM_10G_T, data, NULL); 1617 /* fall through */ 1618 1619 case FW_PORT_TYPE_BT_SGMII: 1620 ifmedia_add(media, m | IFM_1000_T, data, NULL); 1621 ifmedia_add(media, m | IFM_100_TX, data, NULL); 1622 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL); 1623 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 1624 break; 1625 1626 case FW_PORT_TYPE_CX4: 1627 ifmedia_add(media, m | IFM_10G_CX4, data, NULL); 1628 ifmedia_set(media, m | IFM_10G_CX4); 1629 break; 1630 1631 case FW_PORT_TYPE_SFP: 1632 case FW_PORT_TYPE_FIBER_XFI: 1633 case FW_PORT_TYPE_FIBER_XAUI: 1634 switch (pi->mod_type) { 1635 1636 case FW_PORT_MOD_TYPE_LR: 1637 ifmedia_add(media, m | IFM_10G_LR, data, NULL); 1638 ifmedia_set(media, m | IFM_10G_LR); 1639 break; 1640 1641 case FW_PORT_MOD_TYPE_SR: 1642 ifmedia_add(media, m | IFM_10G_SR, data, NULL); 1643 ifmedia_set(media, m | IFM_10G_SR); 1644 break; 1645 1646 case FW_PORT_MOD_TYPE_LRM: 1647 ifmedia_add(media, m | IFM_10G_LRM, data, NULL); 1648 ifmedia_set(media, m | IFM_10G_LRM); 1649 break; 1650 1651 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 1652 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 1653 ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL); 1654 ifmedia_set(media, m | IFM_10G_TWINAX); 1655 break; 1656 1657 case FW_PORT_MOD_TYPE_NONE: 1658 m &= ~IFM_FDX; 1659 ifmedia_add(media, m | IFM_NONE, data, NULL); 1660 ifmedia_set(media, m | IFM_NONE); 1661 break; 1662 1663 case FW_PORT_MOD_TYPE_NA: 1664 case FW_PORT_MOD_TYPE_ER: 1665 default: 1666 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL); 1667 ifmedia_set(media, m | IFM_UNKNOWN); 1668 break; 1669 } 1670 break; 1671 1672 case FW_PORT_TYPE_KX4: 1673 case FW_PORT_TYPE_KX: 1674 case FW_PORT_TYPE_KR: 1675 default: 1676 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL); 1677 ifmedia_set(media, m | IFM_UNKNOWN); 1678 break; 1679 } 1680 1681 PORT_UNLOCK(pi); 1682 } 1683 1684 /* 1685 * Program the port's XGMAC based on parameters in ifnet. The caller also 1686 * indicates which parameters should be programmed (the rest are left alone). 1687 */ 1688 static int 1689 update_mac_settings(struct port_info *pi, int flags) 1690 { 1691 int rc; 1692 struct ifnet *ifp = pi->ifp; 1693 struct adapter *sc = pi->adapter; 1694 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 1695 1696 PORT_LOCK_ASSERT_OWNED(pi); 1697 KASSERT(flags, ("%s: not told what to update.", __func__)); 1698 1699 if (flags & XGMAC_MTU) 1700 mtu = ifp->if_mtu; 1701 1702 if (flags & XGMAC_PROMISC) 1703 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 1704 1705 if (flags & XGMAC_ALLMULTI) 1706 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 1707 1708 if (flags & XGMAC_VLANEX) 1709 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 1710 1711 rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1, 1712 vlanex, false); 1713 if (rc) { 1714 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc); 1715 return (rc); 1716 } 1717 1718 if (flags & XGMAC_UCADDR) { 1719 uint8_t ucaddr[ETHER_ADDR_LEN]; 1720 1721 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 1722 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt, 1723 ucaddr, true, true); 1724 if (rc < 0) { 1725 rc = -rc; 1726 if_printf(ifp, "change_mac failed: %d\n", rc); 1727 return (rc); 1728 } else { 1729 pi->xact_addr_filt = rc; 1730 rc = 0; 1731 } 1732 } 1733 1734 if (flags & XGMAC_MCADDRS) { 1735 const uint8_t *mcaddr; 1736 int del = 1; 1737 uint64_t hash = 0; 1738 struct ifmultiaddr *ifma; 1739 1740 if_maddr_rlock(ifp); 1741 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1742 if (ifma->ifma_addr->sa_family != AF_LINK) 1743 continue; 1744 mcaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 1745 1746 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid, del, 1, 1747 &mcaddr, NULL, &hash, 0); 1748 if (rc < 0) { 1749 rc = -rc; 1750 if_printf(ifp, "failed to add mc address" 1751 " %02x:%02x:%02x:%02x:%02x:%02x rc=%d\n", 1752 mcaddr[0], mcaddr[1], mcaddr[2], mcaddr[3], 1753 mcaddr[4], mcaddr[5], rc); 1754 goto mcfail; 1755 } 1756 del = 0; 1757 } 1758 1759 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0); 1760 if (rc != 0) 1761 if_printf(ifp, "failed to set mc address hash: %d", rc); 1762 mcfail: 1763 if_maddr_runlock(ifp); 1764 } 1765 1766 return (rc); 1767 } 1768 1769 static int 1770 cxgbe_init_locked(struct port_info *pi) 1771 { 1772 struct adapter *sc = pi->adapter; 1773 int rc = 0; 1774 1775 ADAPTER_LOCK_ASSERT_OWNED(sc); 1776 1777 while (!IS_DOOMED(pi) && IS_BUSY(sc)) { 1778 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4init", 0)) { 1779 rc = EINTR; 1780 goto done; 1781 } 1782 } 1783 if (IS_DOOMED(pi)) { 1784 rc = ENXIO; 1785 goto done; 1786 } 1787 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 1788 1789 /* Give up the adapter lock, port init code can sleep. */ 1790 SET_BUSY(sc); 1791 ADAPTER_UNLOCK(sc); 1792 1793 rc = cxgbe_init_synchronized(pi); 1794 1795 done: 1796 ADAPTER_LOCK(sc); 1797 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 1798 CLR_BUSY(sc); 1799 wakeup_one(&sc->flags); 1800 ADAPTER_UNLOCK(sc); 1801 return (rc); 1802 } 1803 1804 static int 1805 cxgbe_init_synchronized(struct port_info *pi) 1806 { 1807 struct adapter *sc = pi->adapter; 1808 struct ifnet *ifp = pi->ifp; 1809 int rc = 0, i; 1810 uint16_t *rss; 1811 struct sge_rxq *rxq; 1812 1813 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1814 1815 if (isset(&sc->open_device_map, pi->port_id)) { 1816 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, 1817 ("mismatch between open_device_map and if_drv_flags")); 1818 return (0); /* already running */ 1819 } 1820 1821 if (sc->open_device_map == 0 && ((rc = first_port_up(sc)) != 0)) 1822 return (rc); /* error message displayed already */ 1823 1824 /* 1825 * Allocate tx/rx/fl queues for this port. 1826 */ 1827 rc = t4_setup_eth_queues(pi); 1828 if (rc != 0) 1829 goto done; /* error message displayed already */ 1830 1831 /* 1832 * Setup RSS for this port. 1833 */ 1834 rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); 1835 for_each_rxq(pi, i, rxq) { 1836 rss[i] = rxq->iq.abs_id; 1837 } 1838 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss, 1839 pi->nrxq); 1840 free(rss, M_CXGBE); 1841 if (rc != 0) { 1842 if_printf(ifp, "rss_config failed: %d\n", rc); 1843 goto done; 1844 } 1845 1846 PORT_LOCK(pi); 1847 rc = update_mac_settings(pi, XGMAC_ALL); 1848 PORT_UNLOCK(pi); 1849 if (rc) 1850 goto done; /* error message displayed already */ 1851 1852 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg); 1853 if (rc != 0) { 1854 if_printf(ifp, "start_link failed: %d\n", rc); 1855 goto done; 1856 } 1857 1858 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true); 1859 if (rc != 0) { 1860 if_printf(ifp, "enable_vi failed: %d\n", rc); 1861 goto done; 1862 } 1863 pi->flags |= VI_ENABLED; 1864 1865 /* all ok */ 1866 setbit(&sc->open_device_map, pi->port_id); 1867 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1868 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1869 1870 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 1871 done: 1872 if (rc != 0) 1873 cxgbe_uninit_synchronized(pi); 1874 1875 return (rc); 1876 } 1877 1878 static int 1879 cxgbe_uninit_locked(struct port_info *pi) 1880 { 1881 struct adapter *sc = pi->adapter; 1882 int rc; 1883 1884 ADAPTER_LOCK_ASSERT_OWNED(sc); 1885 1886 while (!IS_DOOMED(pi) && IS_BUSY(sc)) { 1887 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4uninit", 0)) { 1888 rc = EINTR; 1889 goto done; 1890 } 1891 } 1892 if (IS_DOOMED(pi)) { 1893 rc = ENXIO; 1894 goto done; 1895 } 1896 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 1897 SET_BUSY(sc); 1898 ADAPTER_UNLOCK(sc); 1899 1900 rc = cxgbe_uninit_synchronized(pi); 1901 1902 ADAPTER_LOCK(sc); 1903 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 1904 CLR_BUSY(sc); 1905 wakeup_one(&sc->flags); 1906 done: 1907 ADAPTER_UNLOCK(sc); 1908 return (rc); 1909 } 1910 1911 /* 1912 * Idempotent. 1913 */ 1914 static int 1915 cxgbe_uninit_synchronized(struct port_info *pi) 1916 { 1917 struct adapter *sc = pi->adapter; 1918 struct ifnet *ifp = pi->ifp; 1919 int rc; 1920 1921 /* 1922 * taskqueue_drain may cause a deadlock if the adapter lock is held. 1923 */ 1924 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1925 1926 /* 1927 * Clear this port's bit from the open device map, and then drain 1928 * tasks and callouts. 1929 */ 1930 clrbit(&sc->open_device_map, pi->port_id); 1931 1932 PORT_LOCK(pi); 1933 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1934 callout_stop(&pi->tick); 1935 PORT_UNLOCK(pi); 1936 callout_drain(&pi->tick); 1937 1938 /* 1939 * Stop and then free the queues' resources, including the queues 1940 * themselves. 1941 * 1942 * XXX: we could just stop the queues here (on ifconfig down) and free 1943 * them later (on port detach), but having up/down go through the entire 1944 * allocate/activate/deactivate/free sequence is a good way to find 1945 * leaks and bugs. 1946 */ 1947 rc = t4_teardown_eth_queues(pi); 1948 if (rc != 0) 1949 if_printf(ifp, "teardown failed: %d\n", rc); 1950 1951 if (pi->flags & VI_ENABLED) { 1952 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false); 1953 if (rc) 1954 if_printf(ifp, "disable_vi failed: %d\n", rc); 1955 else 1956 pi->flags &= ~VI_ENABLED; 1957 } 1958 1959 pi->link_cfg.link_ok = 0; 1960 pi->link_cfg.speed = 0; 1961 t4_os_link_changed(sc, pi->port_id, 0); 1962 1963 if (sc->open_device_map == 0) 1964 last_port_down(sc); 1965 1966 return (0); 1967 } 1968 1969 #define T4_ALLOC_IRQ(sc, irq, rid, handler, arg, name) do { \ 1970 rc = t4_alloc_irq(sc, irq, rid, handler, arg, name); \ 1971 if (rc != 0) \ 1972 goto done; \ 1973 } while (0) 1974 static int 1975 first_port_up(struct adapter *sc) 1976 { 1977 int rc, i, rid, p, q; 1978 char s[8]; 1979 struct irq *irq; 1980 struct sge_iq *intrq; 1981 1982 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1983 1984 /* 1985 * queues that belong to the adapter (not any particular port). 1986 */ 1987 rc = t4_setup_adapter_queues(sc); 1988 if (rc != 0) 1989 goto done; 1990 1991 /* 1992 * Setup interrupts. 1993 */ 1994 irq = &sc->irq[0]; 1995 rid = sc->intr_type == INTR_INTX ? 0 : 1; 1996 if (sc->intr_count == 1) { 1997 KASSERT(sc->flags & INTR_SHARED, 1998 ("%s: single interrupt but not shared?", __func__)); 1999 2000 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_all, sc, "all"); 2001 } else { 2002 /* Multiple interrupts. The first one is always error intr */ 2003 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_err, sc, "err"); 2004 irq++; 2005 rid++; 2006 2007 /* Firmware event queue normally has an interrupt of its own */ 2008 if (sc->intr_count > T4_EXTRA_INTR) { 2009 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, 2010 "evt"); 2011 irq++; 2012 rid++; 2013 } 2014 2015 intrq = &sc->sge.intrq[0]; 2016 if (sc->flags & INTR_SHARED) { 2017 2018 /* All ports share these interrupt queues */ 2019 2020 for (i = 0; i < NINTRQ(sc); i++) { 2021 snprintf(s, sizeof(s), "*.%d", i); 2022 T4_ALLOC_IRQ(sc, irq, rid, t4_intr, intrq, s); 2023 irq++; 2024 rid++; 2025 intrq++; 2026 } 2027 } else { 2028 2029 /* Each port has its own set of interrupt queues */ 2030 2031 for (p = 0; p < sc->params.nports; p++) { 2032 for (q = 0; q < sc->port[p]->nrxq; q++) { 2033 snprintf(s, sizeof(s), "%d.%d", p, q); 2034 T4_ALLOC_IRQ(sc, irq, rid, t4_intr, 2035 intrq, s); 2036 irq++; 2037 rid++; 2038 intrq++; 2039 } 2040 } 2041 } 2042 } 2043 2044 t4_intr_enable(sc); 2045 sc->flags |= FULL_INIT_DONE; 2046 2047 done: 2048 if (rc != 0) 2049 last_port_down(sc); 2050 2051 return (rc); 2052 } 2053 #undef T4_ALLOC_IRQ 2054 2055 /* 2056 * Idempotent. 2057 */ 2058 static int 2059 last_port_down(struct adapter *sc) 2060 { 2061 int i; 2062 2063 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 2064 2065 t4_intr_disable(sc); 2066 2067 t4_teardown_adapter_queues(sc); 2068 2069 for (i = 0; i < sc->intr_count; i++) 2070 t4_free_irq(sc, &sc->irq[i]); 2071 2072 sc->flags &= ~FULL_INIT_DONE; 2073 2074 return (0); 2075 } 2076 2077 static int 2078 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 2079 iq_intr_handler_t *handler, void *arg, char *name) 2080 { 2081 int rc; 2082 2083 irq->rid = rid; 2084 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 2085 RF_SHAREABLE | RF_ACTIVE); 2086 if (irq->res == NULL) { 2087 device_printf(sc->dev, 2088 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 2089 return (ENOMEM); 2090 } 2091 2092 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 2093 NULL, handler, arg, &irq->tag); 2094 if (rc != 0) { 2095 device_printf(sc->dev, 2096 "failed to setup interrupt for rid %d, name %s: %d\n", 2097 rid, name, rc); 2098 } else if (name) 2099 bus_describe_intr(sc->dev, irq->res, irq->tag, name); 2100 2101 return (rc); 2102 } 2103 2104 static int 2105 t4_free_irq(struct adapter *sc, struct irq *irq) 2106 { 2107 if (irq->tag) 2108 bus_teardown_intr(sc->dev, irq->res, irq->tag); 2109 if (irq->res) 2110 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 2111 2112 bzero(irq, sizeof(*irq)); 2113 2114 return (0); 2115 } 2116 2117 static void 2118 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start, 2119 unsigned int end) 2120 { 2121 uint32_t *p = (uint32_t *)(buf + start); 2122 2123 for ( ; start <= end; start += sizeof(uint32_t)) 2124 *p++ = t4_read_reg(sc, start); 2125 } 2126 2127 static void 2128 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 2129 { 2130 int i; 2131 static const unsigned int reg_ranges[] = { 2132 0x1008, 0x1108, 2133 0x1180, 0x11b4, 2134 0x11fc, 0x123c, 2135 0x1300, 0x173c, 2136 0x1800, 0x18fc, 2137 0x3000, 0x30d8, 2138 0x30e0, 0x5924, 2139 0x5960, 0x59d4, 2140 0x5a00, 0x5af8, 2141 0x6000, 0x6098, 2142 0x6100, 0x6150, 2143 0x6200, 0x6208, 2144 0x6240, 0x6248, 2145 0x6280, 0x6338, 2146 0x6370, 0x638c, 2147 0x6400, 0x643c, 2148 0x6500, 0x6524, 2149 0x6a00, 0x6a38, 2150 0x6a60, 0x6a78, 2151 0x6b00, 0x6b84, 2152 0x6bf0, 0x6c84, 2153 0x6cf0, 0x6d84, 2154 0x6df0, 0x6e84, 2155 0x6ef0, 0x6f84, 2156 0x6ff0, 0x7084, 2157 0x70f0, 0x7184, 2158 0x71f0, 0x7284, 2159 0x72f0, 0x7384, 2160 0x73f0, 0x7450, 2161 0x7500, 0x7530, 2162 0x7600, 0x761c, 2163 0x7680, 0x76cc, 2164 0x7700, 0x7798, 2165 0x77c0, 0x77fc, 2166 0x7900, 0x79fc, 2167 0x7b00, 0x7c38, 2168 0x7d00, 0x7efc, 2169 0x8dc0, 0x8e1c, 2170 0x8e30, 0x8e78, 2171 0x8ea0, 0x8f6c, 2172 0x8fc0, 0x9074, 2173 0x90fc, 0x90fc, 2174 0x9400, 0x9458, 2175 0x9600, 0x96bc, 2176 0x9800, 0x9808, 2177 0x9820, 0x983c, 2178 0x9850, 0x9864, 2179 0x9c00, 0x9c6c, 2180 0x9c80, 0x9cec, 2181 0x9d00, 0x9d6c, 2182 0x9d80, 0x9dec, 2183 0x9e00, 0x9e6c, 2184 0x9e80, 0x9eec, 2185 0x9f00, 0x9f6c, 2186 0x9f80, 0x9fec, 2187 0xd004, 0xd03c, 2188 0xdfc0, 0xdfe0, 2189 0xe000, 0xea7c, 2190 0xf000, 0x11190, 2191 0x19040, 0x19124, 2192 0x19150, 0x191b0, 2193 0x191d0, 0x191e8, 2194 0x19238, 0x1924c, 2195 0x193f8, 0x19474, 2196 0x19490, 0x194f8, 2197 0x19800, 0x19f30, 2198 0x1a000, 0x1a06c, 2199 0x1a0b0, 0x1a120, 2200 0x1a128, 0x1a138, 2201 0x1a190, 0x1a1c4, 2202 0x1a1fc, 0x1a1fc, 2203 0x1e040, 0x1e04c, 2204 0x1e240, 0x1e28c, 2205 0x1e2c0, 0x1e2c0, 2206 0x1e2e0, 0x1e2e0, 2207 0x1e300, 0x1e384, 2208 0x1e3c0, 0x1e3c8, 2209 0x1e440, 0x1e44c, 2210 0x1e640, 0x1e68c, 2211 0x1e6c0, 0x1e6c0, 2212 0x1e6e0, 0x1e6e0, 2213 0x1e700, 0x1e784, 2214 0x1e7c0, 0x1e7c8, 2215 0x1e840, 0x1e84c, 2216 0x1ea40, 0x1ea8c, 2217 0x1eac0, 0x1eac0, 2218 0x1eae0, 0x1eae0, 2219 0x1eb00, 0x1eb84, 2220 0x1ebc0, 0x1ebc8, 2221 0x1ec40, 0x1ec4c, 2222 0x1ee40, 0x1ee8c, 2223 0x1eec0, 0x1eec0, 2224 0x1eee0, 0x1eee0, 2225 0x1ef00, 0x1ef84, 2226 0x1efc0, 0x1efc8, 2227 0x1f040, 0x1f04c, 2228 0x1f240, 0x1f28c, 2229 0x1f2c0, 0x1f2c0, 2230 0x1f2e0, 0x1f2e0, 2231 0x1f300, 0x1f384, 2232 0x1f3c0, 0x1f3c8, 2233 0x1f440, 0x1f44c, 2234 0x1f640, 0x1f68c, 2235 0x1f6c0, 0x1f6c0, 2236 0x1f6e0, 0x1f6e0, 2237 0x1f700, 0x1f784, 2238 0x1f7c0, 0x1f7c8, 2239 0x1f840, 0x1f84c, 2240 0x1fa40, 0x1fa8c, 2241 0x1fac0, 0x1fac0, 2242 0x1fae0, 0x1fae0, 2243 0x1fb00, 0x1fb84, 2244 0x1fbc0, 0x1fbc8, 2245 0x1fc40, 0x1fc4c, 2246 0x1fe40, 0x1fe8c, 2247 0x1fec0, 0x1fec0, 2248 0x1fee0, 0x1fee0, 2249 0x1ff00, 0x1ff84, 2250 0x1ffc0, 0x1ffc8, 2251 0x20000, 0x2002c, 2252 0x20100, 0x2013c, 2253 0x20190, 0x201c8, 2254 0x20200, 0x20318, 2255 0x20400, 0x20528, 2256 0x20540, 0x20614, 2257 0x21000, 0x21040, 2258 0x2104c, 0x21060, 2259 0x210c0, 0x210ec, 2260 0x21200, 0x21268, 2261 0x21270, 0x21284, 2262 0x212fc, 0x21388, 2263 0x21400, 0x21404, 2264 0x21500, 0x21518, 2265 0x2152c, 0x2153c, 2266 0x21550, 0x21554, 2267 0x21600, 0x21600, 2268 0x21608, 0x21628, 2269 0x21630, 0x2163c, 2270 0x21700, 0x2171c, 2271 0x21780, 0x2178c, 2272 0x21800, 0x21c38, 2273 0x21c80, 0x21d7c, 2274 0x21e00, 0x21e04, 2275 0x22000, 0x2202c, 2276 0x22100, 0x2213c, 2277 0x22190, 0x221c8, 2278 0x22200, 0x22318, 2279 0x22400, 0x22528, 2280 0x22540, 0x22614, 2281 0x23000, 0x23040, 2282 0x2304c, 0x23060, 2283 0x230c0, 0x230ec, 2284 0x23200, 0x23268, 2285 0x23270, 0x23284, 2286 0x232fc, 0x23388, 2287 0x23400, 0x23404, 2288 0x23500, 0x23518, 2289 0x2352c, 0x2353c, 2290 0x23550, 0x23554, 2291 0x23600, 0x23600, 2292 0x23608, 0x23628, 2293 0x23630, 0x2363c, 2294 0x23700, 0x2371c, 2295 0x23780, 0x2378c, 2296 0x23800, 0x23c38, 2297 0x23c80, 0x23d7c, 2298 0x23e00, 0x23e04, 2299 0x24000, 0x2402c, 2300 0x24100, 0x2413c, 2301 0x24190, 0x241c8, 2302 0x24200, 0x24318, 2303 0x24400, 0x24528, 2304 0x24540, 0x24614, 2305 0x25000, 0x25040, 2306 0x2504c, 0x25060, 2307 0x250c0, 0x250ec, 2308 0x25200, 0x25268, 2309 0x25270, 0x25284, 2310 0x252fc, 0x25388, 2311 0x25400, 0x25404, 2312 0x25500, 0x25518, 2313 0x2552c, 0x2553c, 2314 0x25550, 0x25554, 2315 0x25600, 0x25600, 2316 0x25608, 0x25628, 2317 0x25630, 0x2563c, 2318 0x25700, 0x2571c, 2319 0x25780, 0x2578c, 2320 0x25800, 0x25c38, 2321 0x25c80, 0x25d7c, 2322 0x25e00, 0x25e04, 2323 0x26000, 0x2602c, 2324 0x26100, 0x2613c, 2325 0x26190, 0x261c8, 2326 0x26200, 0x26318, 2327 0x26400, 0x26528, 2328 0x26540, 0x26614, 2329 0x27000, 0x27040, 2330 0x2704c, 0x27060, 2331 0x270c0, 0x270ec, 2332 0x27200, 0x27268, 2333 0x27270, 0x27284, 2334 0x272fc, 0x27388, 2335 0x27400, 0x27404, 2336 0x27500, 0x27518, 2337 0x2752c, 0x2753c, 2338 0x27550, 0x27554, 2339 0x27600, 0x27600, 2340 0x27608, 0x27628, 2341 0x27630, 0x2763c, 2342 0x27700, 0x2771c, 2343 0x27780, 0x2778c, 2344 0x27800, 0x27c38, 2345 0x27c80, 0x27d7c, 2346 0x27e00, 0x27e04 2347 }; 2348 2349 regs->version = 4 | (sc->params.rev << 10); 2350 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2) 2351 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]); 2352 } 2353 2354 static void 2355 cxgbe_tick(void *arg) 2356 { 2357 struct port_info *pi = arg; 2358 struct ifnet *ifp = pi->ifp; 2359 struct sge_txq *txq; 2360 int i, drops; 2361 struct port_stats *s = &pi->stats; 2362 2363 PORT_LOCK(pi); 2364 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2365 PORT_UNLOCK(pi); 2366 return; /* without scheduling another callout */ 2367 } 2368 2369 t4_get_port_stats(pi->adapter, pi->tx_chan, s); 2370 2371 ifp->if_opackets = s->tx_frames; 2372 ifp->if_ipackets = s->rx_frames; 2373 ifp->if_obytes = s->tx_octets; 2374 ifp->if_ibytes = s->rx_octets; 2375 ifp->if_omcasts = s->tx_mcast_frames; 2376 ifp->if_imcasts = s->rx_mcast_frames; 2377 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 2378 s->rx_ovflow3; 2379 2380 drops = s->tx_drop; 2381 for_each_txq(pi, i, txq) 2382 drops += txq->br->br_drops; 2383 ifp->if_snd.ifq_drops = drops; 2384 2385 ifp->if_oerrors = s->tx_error_frames; 2386 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long + 2387 s->rx_fcs_err + s->rx_len_err; 2388 2389 callout_schedule(&pi->tick, hz); 2390 PORT_UNLOCK(pi); 2391 } 2392 2393 static int 2394 t4_sysctls(struct adapter *sc) 2395 { 2396 struct sysctl_ctx_list *ctx; 2397 struct sysctl_oid *oid; 2398 struct sysctl_oid_list *children; 2399 2400 ctx = device_get_sysctl_ctx(sc->dev); 2401 oid = device_get_sysctl_tree(sc->dev); 2402 children = SYSCTL_CHILDREN(oid); 2403 2404 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, 2405 &sc->params.nports, 0, "# of ports"); 2406 2407 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 2408 &sc->params.rev, 0, "chip hardware revision"); 2409 2410 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 2411 CTLFLAG_RD, &sc->fw_version, 0, "firmware version"); 2412 2413 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "TOE", CTLFLAG_RD, 2414 &sc->params.offload, 0, "hardware is capable of TCP offload"); 2415 2416 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, 2417 &sc->params.vpd.cclk, 0, "core clock frequency (in KHz)"); 2418 2419 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 2420 CTLTYPE_STRING | CTLFLAG_RD, &intr_timer, sizeof(intr_timer), 2421 sysctl_int_array, "A", "interrupt holdoff timer values (us)"); 2422 2423 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 2424 CTLTYPE_STRING | CTLFLAG_RD, &intr_pktcount, sizeof(intr_pktcount), 2425 sysctl_int_array, "A", "interrupt holdoff packet counter values"); 2426 2427 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", 2428 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 2429 sysctl_devlog, "A", "device log"); 2430 2431 return (0); 2432 } 2433 2434 static int 2435 cxgbe_sysctls(struct port_info *pi) 2436 { 2437 struct sysctl_ctx_list *ctx; 2438 struct sysctl_oid *oid; 2439 struct sysctl_oid_list *children; 2440 2441 ctx = device_get_sysctl_ctx(pi->dev); 2442 2443 /* 2444 * dev.cxgbe.X. 2445 */ 2446 oid = device_get_sysctl_tree(pi->dev); 2447 children = SYSCTL_CHILDREN(oid); 2448 2449 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 2450 &pi->nrxq, 0, "# of rx queues"); 2451 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 2452 &pi->ntxq, 0, "# of tx queues"); 2453 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 2454 &pi->first_rxq, 0, "index of first rx queue"); 2455 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 2456 &pi->first_txq, 0, "index of first tx queue"); 2457 2458 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 2459 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I", 2460 "holdoff timer index"); 2461 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 2462 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I", 2463 "holdoff packet counter index"); 2464 2465 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 2466 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I", 2467 "rx queue size"); 2468 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 2469 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I", 2470 "tx queue size"); 2471 2472 /* 2473 * dev.cxgbe.X.stats. 2474 */ 2475 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 2476 NULL, "port statistics"); 2477 children = SYSCTL_CHILDREN(oid); 2478 2479 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ 2480 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ 2481 CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \ 2482 sysctl_handle_t4_reg64, "QU", desc) 2483 2484 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", 2485 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); 2486 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", 2487 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); 2488 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", 2489 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); 2490 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", 2491 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); 2492 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", 2493 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); 2494 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", 2495 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); 2496 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", 2497 "# of tx frames in this range", 2498 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); 2499 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", 2500 "# of tx frames in this range", 2501 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); 2502 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", 2503 "# of tx frames in this range", 2504 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); 2505 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", 2506 "# of tx frames in this range", 2507 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); 2508 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", 2509 "# of tx frames in this range", 2510 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); 2511 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", 2512 "# of tx frames in this range", 2513 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); 2514 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", 2515 "# of tx frames in this range", 2516 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); 2517 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", 2518 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); 2519 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", 2520 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); 2521 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", 2522 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); 2523 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", 2524 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); 2525 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", 2526 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); 2527 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", 2528 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); 2529 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", 2530 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); 2531 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", 2532 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); 2533 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", 2534 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); 2535 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", 2536 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); 2537 2538 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", 2539 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); 2540 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", 2541 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); 2542 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", 2543 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); 2544 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", 2545 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); 2546 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", 2547 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); 2548 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", 2549 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); 2550 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", 2551 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); 2552 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", 2553 "# of frames received with bad FCS", 2554 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); 2555 SYSCTL_ADD_T4_REG64(pi, "rx_len_err", 2556 "# of frames received with length error", 2557 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); 2558 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", 2559 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); 2560 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", 2561 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); 2562 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", 2563 "# of rx frames in this range", 2564 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); 2565 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", 2566 "# of rx frames in this range", 2567 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); 2568 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", 2569 "# of rx frames in this range", 2570 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); 2571 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", 2572 "# of rx frames in this range", 2573 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); 2574 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", 2575 "# of rx frames in this range", 2576 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); 2577 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", 2578 "# of rx frames in this range", 2579 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); 2580 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", 2581 "# of rx frames in this range", 2582 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); 2583 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", 2584 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); 2585 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", 2586 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); 2587 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", 2588 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); 2589 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", 2590 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); 2591 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", 2592 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); 2593 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", 2594 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); 2595 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", 2596 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); 2597 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", 2598 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); 2599 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", 2600 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); 2601 2602 #undef SYSCTL_ADD_T4_REG64 2603 2604 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ 2605 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 2606 &pi->stats.name, desc) 2607 2608 /* We get these from port_stats and they may be stale by upto 1s */ 2609 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, 2610 "# drops due to buffer-group 0 overflows"); 2611 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, 2612 "# drops due to buffer-group 1 overflows"); 2613 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, 2614 "# drops due to buffer-group 2 overflows"); 2615 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, 2616 "# drops due to buffer-group 3 overflows"); 2617 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, 2618 "# of buffer-group 0 truncated packets"); 2619 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, 2620 "# of buffer-group 1 truncated packets"); 2621 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, 2622 "# of buffer-group 2 truncated packets"); 2623 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, 2624 "# of buffer-group 3 truncated packets"); 2625 2626 #undef SYSCTL_ADD_T4_PORTSTAT 2627 2628 return (0); 2629 } 2630 2631 static int 2632 sysctl_int_array(SYSCTL_HANDLER_ARGS) 2633 { 2634 int rc, *i; 2635 struct sbuf sb; 2636 2637 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); 2638 for (i = arg1; arg2; arg2 -= sizeof(int), i++) 2639 sbuf_printf(&sb, "%d ", *i); 2640 sbuf_trim(&sb); 2641 sbuf_finish(&sb); 2642 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 2643 sbuf_delete(&sb); 2644 return (rc); 2645 } 2646 2647 static int 2648 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 2649 { 2650 struct port_info *pi = arg1; 2651 struct adapter *sc = pi->adapter; 2652 struct sge_rxq *rxq; 2653 int idx, rc, i; 2654 2655 idx = pi->tmr_idx; 2656 2657 rc = sysctl_handle_int(oidp, &idx, 0, req); 2658 if (rc != 0 || req->newptr == NULL) 2659 return (rc); 2660 2661 if (idx < 0 || idx >= SGE_NTIMERS) 2662 return (EINVAL); 2663 2664 ADAPTER_LOCK(sc); 2665 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 2666 if (rc == 0) { 2667 for_each_rxq(pi, i, rxq) { 2668 rxq->iq.intr_params = V_QINTR_TIMER_IDX(idx) | 2669 V_QINTR_CNT_EN(pi->pktc_idx != -1); 2670 } 2671 pi->tmr_idx = idx; 2672 } 2673 2674 ADAPTER_UNLOCK(sc); 2675 return (rc); 2676 } 2677 2678 static int 2679 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 2680 { 2681 struct port_info *pi = arg1; 2682 struct adapter *sc = pi->adapter; 2683 int idx, rc; 2684 2685 idx = pi->pktc_idx; 2686 2687 rc = sysctl_handle_int(oidp, &idx, 0, req); 2688 if (rc != 0 || req->newptr == NULL) 2689 return (rc); 2690 2691 if (idx < -1 || idx >= SGE_NCOUNTERS) 2692 return (EINVAL); 2693 2694 ADAPTER_LOCK(sc); 2695 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 2696 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING) 2697 rc = EBUSY; /* can be changed only when port is down */ 2698 2699 if (rc == 0) 2700 pi->pktc_idx = idx; 2701 2702 ADAPTER_UNLOCK(sc); 2703 return (rc); 2704 } 2705 2706 static int 2707 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 2708 { 2709 struct port_info *pi = arg1; 2710 struct adapter *sc = pi->adapter; 2711 int qsize, rc; 2712 2713 qsize = pi->qsize_rxq; 2714 2715 rc = sysctl_handle_int(oidp, &qsize, 0, req); 2716 if (rc != 0 || req->newptr == NULL) 2717 return (rc); 2718 2719 if (qsize < 128 || (qsize & 7)) 2720 return (EINVAL); 2721 2722 ADAPTER_LOCK(sc); 2723 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 2724 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING) 2725 rc = EBUSY; /* can be changed only when port is down */ 2726 2727 if (rc == 0) 2728 pi->qsize_rxq = qsize; 2729 2730 ADAPTER_UNLOCK(sc); 2731 return (rc); 2732 } 2733 2734 static int 2735 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 2736 { 2737 struct port_info *pi = arg1; 2738 struct adapter *sc = pi->adapter; 2739 int qsize, rc; 2740 2741 qsize = pi->qsize_txq; 2742 2743 rc = sysctl_handle_int(oidp, &qsize, 0, req); 2744 if (rc != 0 || req->newptr == NULL) 2745 return (rc); 2746 2747 if (qsize < 128) 2748 return (EINVAL); 2749 2750 ADAPTER_LOCK(sc); 2751 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 2752 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING) 2753 rc = EBUSY; /* can be changed only when port is down */ 2754 2755 if (rc == 0) 2756 pi->qsize_txq = qsize; 2757 2758 ADAPTER_UNLOCK(sc); 2759 return (rc); 2760 } 2761 2762 static int 2763 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 2764 { 2765 struct adapter *sc = arg1; 2766 int reg = arg2; 2767 uint64_t val; 2768 2769 val = t4_read_reg64(sc, reg); 2770 2771 return (sysctl_handle_64(oidp, &val, 0, req)); 2772 } 2773 2774 const char *devlog_level_strings[] = { 2775 [FW_DEVLOG_LEVEL_EMERG] = "EMERG", 2776 [FW_DEVLOG_LEVEL_CRIT] = "CRIT", 2777 [FW_DEVLOG_LEVEL_ERR] = "ERR", 2778 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", 2779 [FW_DEVLOG_LEVEL_INFO] = "INFO", 2780 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" 2781 }; 2782 2783 const char *devlog_facility_strings[] = { 2784 [FW_DEVLOG_FACILITY_CORE] = "CORE", 2785 [FW_DEVLOG_FACILITY_SCHED] = "SCHED", 2786 [FW_DEVLOG_FACILITY_TIMER] = "TIMER", 2787 [FW_DEVLOG_FACILITY_RES] = "RES", 2788 [FW_DEVLOG_FACILITY_HW] = "HW", 2789 [FW_DEVLOG_FACILITY_FLR] = "FLR", 2790 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", 2791 [FW_DEVLOG_FACILITY_PHY] = "PHY", 2792 [FW_DEVLOG_FACILITY_MAC] = "MAC", 2793 [FW_DEVLOG_FACILITY_PORT] = "PORT", 2794 [FW_DEVLOG_FACILITY_VI] = "VI", 2795 [FW_DEVLOG_FACILITY_FILTER] = "FILTER", 2796 [FW_DEVLOG_FACILITY_ACL] = "ACL", 2797 [FW_DEVLOG_FACILITY_TM] = "TM", 2798 [FW_DEVLOG_FACILITY_QFC] = "QFC", 2799 [FW_DEVLOG_FACILITY_DCB] = "DCB", 2800 [FW_DEVLOG_FACILITY_ETH] = "ETH", 2801 [FW_DEVLOG_FACILITY_OFLD] = "OFLD", 2802 [FW_DEVLOG_FACILITY_RI] = "RI", 2803 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", 2804 [FW_DEVLOG_FACILITY_FCOE] = "FCOE", 2805 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", 2806 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE" 2807 }; 2808 2809 static int 2810 sysctl_devlog(SYSCTL_HANDLER_ARGS) 2811 { 2812 struct adapter *sc = arg1; 2813 struct devlog_params *dparams = &sc->params.devlog; 2814 struct fw_devlog_e *buf, *e; 2815 int i, j, rc, nentries, first = 0; 2816 struct sbuf *sb; 2817 uint64_t ftstamp = UINT64_MAX; 2818 2819 if (dparams->start == 0) 2820 return (ENXIO); 2821 2822 nentries = dparams->size / sizeof(struct fw_devlog_e); 2823 2824 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT); 2825 if (buf == NULL) 2826 return (ENOMEM); 2827 2828 rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size, 2829 (void *)buf); 2830 if (rc != 0) 2831 goto done; 2832 2833 for (i = 0; i < nentries; i++) { 2834 e = &buf[i]; 2835 2836 if (e->timestamp == 0) 2837 break; /* end */ 2838 2839 e->timestamp = be64toh(e->timestamp); 2840 e->seqno = be32toh(e->seqno); 2841 for (j = 0; j < 8; j++) 2842 e->params[j] = be32toh(e->params[j]); 2843 2844 if (e->timestamp < ftstamp) { 2845 ftstamp = e->timestamp; 2846 first = i; 2847 } 2848 } 2849 2850 if (buf[first].timestamp == 0) 2851 goto done; /* nothing in the log */ 2852 2853 rc = sysctl_wire_old_buffer(req, 0); 2854 if (rc != 0) 2855 goto done; 2856 2857 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 2858 sbuf_printf(sb, "\n%10s %15s %8s %8s %s\n", 2859 "Seq#", "Tstamp", "Level", "Facility", "Message"); 2860 2861 i = first; 2862 do { 2863 e = &buf[i]; 2864 if (e->timestamp == 0) 2865 break; /* end */ 2866 2867 sbuf_printf(sb, "%10d %15ju %8s %8s ", 2868 e->seqno, e->timestamp, 2869 (e->level < ARRAY_SIZE(devlog_level_strings) ? 2870 devlog_level_strings[e->level] : "UNKNOWN"), 2871 (e->facility < ARRAY_SIZE(devlog_facility_strings) ? 2872 devlog_facility_strings[e->facility] : "UNKNOWN")); 2873 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], 2874 e->params[2], e->params[3], e->params[4], 2875 e->params[5], e->params[6], e->params[7]); 2876 2877 if (++i == nentries) 2878 i = 0; 2879 } while (i != first); 2880 2881 rc = sbuf_finish(sb); 2882 sbuf_delete(sb); 2883 done: 2884 free(buf, M_CXGBE); 2885 return (rc); 2886 } 2887 2888 static inline void 2889 txq_start(struct ifnet *ifp, struct sge_txq *txq) 2890 { 2891 struct buf_ring *br; 2892 struct mbuf *m; 2893 2894 TXQ_LOCK_ASSERT_OWNED(txq); 2895 2896 br = txq->br; 2897 m = txq->m ? txq->m : drbr_dequeue(ifp, br); 2898 if (m) 2899 t4_eth_tx(ifp, txq, m); 2900 } 2901 2902 void 2903 cxgbe_txq_start(void *arg, int count) 2904 { 2905 struct sge_txq *txq = arg; 2906 2907 TXQ_LOCK(txq); 2908 if (txq->eq.flags & EQ_CRFLUSHED) { 2909 txq->eq.flags &= ~EQ_CRFLUSHED; 2910 txq_start(txq->ifp, txq); 2911 } else 2912 wakeup_one(txq); /* txq is going away, wakeup free_txq */ 2913 TXQ_UNLOCK(txq); 2914 } 2915 2916 static uint32_t 2917 fconf_to_mode(uint32_t fconf) 2918 { 2919 uint32_t mode; 2920 2921 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | 2922 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; 2923 2924 if (fconf & F_FRAGMENTATION) 2925 mode |= T4_FILTER_IP_FRAGMENT; 2926 2927 if (fconf & F_MPSHITTYPE) 2928 mode |= T4_FILTER_MPS_HIT_TYPE; 2929 2930 if (fconf & F_MACMATCH) 2931 mode |= T4_FILTER_MAC_IDX; 2932 2933 if (fconf & F_ETHERTYPE) 2934 mode |= T4_FILTER_ETH_TYPE; 2935 2936 if (fconf & F_PROTOCOL) 2937 mode |= T4_FILTER_IP_PROTO; 2938 2939 if (fconf & F_TOS) 2940 mode |= T4_FILTER_IP_TOS; 2941 2942 if (fconf & F_VLAN) 2943 mode |= T4_FILTER_IVLAN; 2944 2945 if (fconf & F_VNIC_ID) 2946 mode |= T4_FILTER_OVLAN; 2947 2948 if (fconf & F_PORT) 2949 mode |= T4_FILTER_PORT; 2950 2951 if (fconf & F_FCOE) 2952 mode |= T4_FILTER_FCoE; 2953 2954 return (mode); 2955 } 2956 2957 static uint32_t 2958 mode_to_fconf(uint32_t mode) 2959 { 2960 uint32_t fconf = 0; 2961 2962 if (mode & T4_FILTER_IP_FRAGMENT) 2963 fconf |= F_FRAGMENTATION; 2964 2965 if (mode & T4_FILTER_MPS_HIT_TYPE) 2966 fconf |= F_MPSHITTYPE; 2967 2968 if (mode & T4_FILTER_MAC_IDX) 2969 fconf |= F_MACMATCH; 2970 2971 if (mode & T4_FILTER_ETH_TYPE) 2972 fconf |= F_ETHERTYPE; 2973 2974 if (mode & T4_FILTER_IP_PROTO) 2975 fconf |= F_PROTOCOL; 2976 2977 if (mode & T4_FILTER_IP_TOS) 2978 fconf |= F_TOS; 2979 2980 if (mode & T4_FILTER_IVLAN) 2981 fconf |= F_VLAN; 2982 2983 if (mode & T4_FILTER_OVLAN) 2984 fconf |= F_VNIC_ID; 2985 2986 if (mode & T4_FILTER_PORT) 2987 fconf |= F_PORT; 2988 2989 if (mode & T4_FILTER_FCoE) 2990 fconf |= F_FCOE; 2991 2992 return (fconf); 2993 } 2994 2995 static uint32_t 2996 fspec_to_fconf(struct t4_filter_specification *fs) 2997 { 2998 uint32_t fconf = 0; 2999 3000 if (fs->val.frag || fs->mask.frag) 3001 fconf |= F_FRAGMENTATION; 3002 3003 if (fs->val.matchtype || fs->mask.matchtype) 3004 fconf |= F_MPSHITTYPE; 3005 3006 if (fs->val.macidx || fs->mask.macidx) 3007 fconf |= F_MACMATCH; 3008 3009 if (fs->val.ethtype || fs->mask.ethtype) 3010 fconf |= F_ETHERTYPE; 3011 3012 if (fs->val.proto || fs->mask.proto) 3013 fconf |= F_PROTOCOL; 3014 3015 if (fs->val.tos || fs->mask.tos) 3016 fconf |= F_TOS; 3017 3018 if (fs->val.ivlan_vld || fs->mask.ivlan_vld) 3019 fconf |= F_VLAN; 3020 3021 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) 3022 fconf |= F_VNIC_ID; 3023 3024 if (fs->val.iport || fs->mask.iport) 3025 fconf |= F_PORT; 3026 3027 if (fs->val.fcoe || fs->mask.fcoe) 3028 fconf |= F_FCOE; 3029 3030 return (fconf); 3031 } 3032 3033 static int 3034 get_filter_mode(struct adapter *sc, uint32_t *mode) 3035 { 3036 uint32_t fconf; 3037 3038 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1, 3039 A_TP_VLAN_PRI_MAP); 3040 3041 *mode = fconf_to_mode(fconf); 3042 3043 return (0); 3044 } 3045 3046 static int 3047 set_filter_mode(struct adapter *sc, uint32_t mode) 3048 { 3049 uint32_t fconf; 3050 int rc; 3051 3052 fconf = mode_to_fconf(mode); 3053 3054 ADAPTER_LOCK(sc); 3055 if (IS_BUSY(sc)) { 3056 rc = EAGAIN; 3057 goto done; 3058 } 3059 3060 if (sc->tids.ftids_in_use > 0) { 3061 rc = EBUSY; 3062 goto done; 3063 } 3064 3065 rc = -t4_set_filter_mode(sc, fconf); 3066 done: 3067 ADAPTER_UNLOCK(sc); 3068 return (rc); 3069 } 3070 3071 static inline uint64_t 3072 get_filter_hits(struct adapter *sc, uint32_t fid) 3073 { 3074 uint32_t tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 3075 uint64_t hits; 3076 3077 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0), 3078 tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE); 3079 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0)); 3080 hits = t4_read_reg64(sc, MEMWIN0_BASE + 16); 3081 3082 return (be64toh(hits)); 3083 } 3084 3085 static int 3086 get_filter(struct adapter *sc, struct t4_filter *t) 3087 { 3088 int i, nfilters = sc->tids.nftids; 3089 struct filter_entry *f; 3090 3091 ADAPTER_LOCK_ASSERT_OWNED(sc); 3092 3093 if (IS_BUSY(sc)) 3094 return (EAGAIN); 3095 3096 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL || 3097 t->idx >= nfilters) { 3098 t->idx = 0xffffffff; 3099 return (0); 3100 } 3101 3102 f = &sc->tids.ftid_tab[t->idx]; 3103 for (i = t->idx; i < nfilters; i++, f++) { 3104 if (f->valid) { 3105 t->idx = i; 3106 t->l2tidx = f->l2t ? f->l2t->idx : 0; 3107 t->smtidx = f->smtidx; 3108 if (f->fs.hitcnts) 3109 t->hits = get_filter_hits(sc, t->idx); 3110 else 3111 t->hits = UINT64_MAX; 3112 t->fs = f->fs; 3113 3114 return (0); 3115 } 3116 } 3117 3118 t->idx = 0xffffffff; 3119 return (0); 3120 } 3121 3122 static int 3123 set_filter(struct adapter *sc, struct t4_filter *t) 3124 { 3125 uint32_t fconf; 3126 unsigned int nfilters, nports; 3127 struct filter_entry *f; 3128 int i; 3129 3130 ADAPTER_LOCK_ASSERT_OWNED(sc); 3131 3132 nfilters = sc->tids.nftids; 3133 nports = sc->params.nports; 3134 3135 if (nfilters == 0) 3136 return (ENOTSUP); 3137 3138 if (!(sc->flags & FULL_INIT_DONE)) 3139 return (EAGAIN); 3140 3141 if (t->idx >= nfilters) 3142 return (EINVAL); 3143 3144 /* Validate against the global filter mode */ 3145 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1, 3146 A_TP_VLAN_PRI_MAP); 3147 if ((fconf | fspec_to_fconf(&t->fs)) != fconf) 3148 return (E2BIG); 3149 3150 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) 3151 return (EINVAL); 3152 3153 if (t->fs.val.iport >= nports) 3154 return (EINVAL); 3155 3156 /* Can't specify an iq if not steering to it */ 3157 if (!t->fs.dirsteer && t->fs.iq) 3158 return (EINVAL); 3159 3160 /* IPv6 filter idx must be 4 aligned */ 3161 if (t->fs.type == 1 && 3162 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) 3163 return (EINVAL); 3164 3165 if (sc->tids.ftid_tab == NULL) { 3166 KASSERT(sc->tids.ftids_in_use == 0, 3167 ("%s: no memory allocated but filters_in_use > 0", 3168 __func__)); 3169 3170 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) * 3171 nfilters, M_CXGBE, M_NOWAIT | M_ZERO); 3172 if (sc->tids.ftid_tab == NULL) 3173 return (ENOMEM); 3174 } 3175 3176 for (i = 0; i < 4; i++) { 3177 f = &sc->tids.ftid_tab[t->idx + i]; 3178 3179 if (f->pending || f->valid) 3180 return (EBUSY); 3181 if (f->locked) 3182 return (EPERM); 3183 3184 if (t->fs.type == 0) 3185 break; 3186 } 3187 3188 f = &sc->tids.ftid_tab[t->idx]; 3189 f->fs = t->fs; 3190 3191 return set_filter_wr(sc, t->idx); 3192 } 3193 3194 static int 3195 del_filter(struct adapter *sc, struct t4_filter *t) 3196 { 3197 unsigned int nfilters; 3198 struct filter_entry *f; 3199 3200 ADAPTER_LOCK_ASSERT_OWNED(sc); 3201 3202 if (IS_BUSY(sc)) 3203 return (EAGAIN); 3204 3205 nfilters = sc->tids.nftids; 3206 3207 if (nfilters == 0) 3208 return (ENOTSUP); 3209 3210 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 || 3211 t->idx >= nfilters) 3212 return (EINVAL); 3213 3214 if (!(sc->flags & FULL_INIT_DONE)) 3215 return (EAGAIN); 3216 3217 f = &sc->tids.ftid_tab[t->idx]; 3218 3219 if (f->pending) 3220 return (EBUSY); 3221 if (f->locked) 3222 return (EPERM); 3223 3224 if (f->valid) { 3225 t->fs = f->fs; /* extra info for the caller */ 3226 return del_filter_wr(sc, t->idx); 3227 } 3228 3229 return (0); 3230 } 3231 3232 static void 3233 clear_filter(struct filter_entry *f) 3234 { 3235 if (f->l2t) 3236 t4_l2t_release(f->l2t); 3237 3238 bzero(f, sizeof (*f)); 3239 } 3240 3241 static int 3242 set_filter_wr(struct adapter *sc, int fidx) 3243 { 3244 int rc; 3245 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 3246 struct mbuf *m; 3247 struct fw_filter_wr *fwr; 3248 unsigned int ftid; 3249 3250 ADAPTER_LOCK_ASSERT_OWNED(sc); 3251 3252 if (f->fs.newdmac || f->fs.newvlan) { 3253 /* This filter needs an L2T entry; allocate one. */ 3254 f->l2t = t4_l2t_alloc_switching(sc->l2t); 3255 if (f->l2t == NULL) 3256 return (EAGAIN); 3257 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport, 3258 f->fs.dmac)) { 3259 t4_l2t_release(f->l2t); 3260 f->l2t = NULL; 3261 return (ENOMEM); 3262 } 3263 } 3264 3265 ftid = sc->tids.ftid_base + fidx; 3266 3267 m = m_gethdr(M_NOWAIT, MT_DATA); 3268 if (m == NULL) 3269 return (ENOMEM); 3270 3271 fwr = mtod(m, struct fw_filter_wr *); 3272 m->m_len = m->m_pkthdr.len = sizeof(*fwr); 3273 bzero(fwr, sizeof (*fwr)); 3274 3275 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); 3276 fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); 3277 fwr->tid_to_iq = 3278 htobe32(V_FW_FILTER_WR_TID(ftid) | 3279 V_FW_FILTER_WR_RQTYPE(f->fs.type) | 3280 V_FW_FILTER_WR_NOREPLY(0) | 3281 V_FW_FILTER_WR_IQ(f->fs.iq)); 3282 fwr->del_filter_to_l2tix = 3283 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 3284 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 3285 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 3286 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 3287 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 3288 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 3289 V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 3290 V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 3291 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 3292 f->fs.newvlan == VLAN_REWRITE) | 3293 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 3294 f->fs.newvlan == VLAN_REWRITE) | 3295 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 3296 V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 3297 V_FW_FILTER_WR_PRIO(f->fs.prio) | 3298 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 3299 fwr->ethtype = htobe16(f->fs.val.ethtype); 3300 fwr->ethtypem = htobe16(f->fs.mask.ethtype); 3301 fwr->frag_to_ovlan_vldm = 3302 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 3303 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 3304 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) | 3305 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) | 3306 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) | 3307 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld)); 3308 fwr->smac_sel = 0; 3309 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) | 3310 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.intrq[0].abs_id)); 3311 fwr->maci_to_matchtypem = 3312 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 3313 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 3314 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 3315 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 3316 V_FW_FILTER_WR_PORT(f->fs.val.iport) | 3317 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 3318 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 3319 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 3320 fwr->ptcl = f->fs.val.proto; 3321 fwr->ptclm = f->fs.mask.proto; 3322 fwr->ttyp = f->fs.val.tos; 3323 fwr->ttypm = f->fs.mask.tos; 3324 fwr->ivlan = htobe16(f->fs.val.ivlan); 3325 fwr->ivlanm = htobe16(f->fs.mask.ivlan); 3326 fwr->ovlan = htobe16(f->fs.val.ovlan); 3327 fwr->ovlanm = htobe16(f->fs.mask.ovlan); 3328 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip)); 3329 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm)); 3330 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip)); 3331 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm)); 3332 fwr->lp = htobe16(f->fs.val.dport); 3333 fwr->lpm = htobe16(f->fs.mask.dport); 3334 fwr->fp = htobe16(f->fs.val.sport); 3335 fwr->fpm = htobe16(f->fs.mask.sport); 3336 if (f->fs.newsmac) 3337 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma)); 3338 3339 f->pending = 1; 3340 sc->tids.ftids_in_use++; 3341 rc = t4_mgmt_tx(sc, m); 3342 if (rc != 0) { 3343 sc->tids.ftids_in_use--; 3344 m_freem(m); 3345 clear_filter(f); 3346 } 3347 return (rc); 3348 } 3349 3350 static int 3351 del_filter_wr(struct adapter *sc, int fidx) 3352 { 3353 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 3354 struct mbuf *m; 3355 struct fw_filter_wr *fwr; 3356 unsigned int rc, ftid; 3357 3358 ADAPTER_LOCK_ASSERT_OWNED(sc); 3359 3360 ftid = sc->tids.ftid_base + fidx; 3361 3362 m = m_gethdr(M_NOWAIT, MT_DATA); 3363 if (m == NULL) 3364 return (ENOMEM); 3365 3366 fwr = mtod(m, struct fw_filter_wr *); 3367 m->m_len = m->m_pkthdr.len = sizeof(*fwr); 3368 bzero(fwr, sizeof (*fwr)); 3369 3370 t4_mk_filtdelwr(ftid, fwr, sc->sge.intrq[0].abs_id); 3371 3372 f->pending = 1; 3373 rc = t4_mgmt_tx(sc, m); 3374 if (rc != 0) { 3375 f->pending = 0; 3376 m_freem(m); 3377 } 3378 return (rc); 3379 } 3380 3381 /* XXX move intr handlers to main.c and make this static */ 3382 void 3383 filter_rpl(struct adapter *sc, const struct cpl_set_tcb_rpl *rpl) 3384 { 3385 unsigned int idx = GET_TID(rpl); 3386 3387 if (idx >= sc->tids.ftid_base && 3388 (idx -= sc->tids.ftid_base) < sc->tids.nftids) { 3389 unsigned int rc = G_COOKIE(rpl->cookie); 3390 struct filter_entry *f = &sc->tids.ftid_tab[idx]; 3391 3392 if (rc == FW_FILTER_WR_FLT_DELETED) { 3393 /* 3394 * Clear the filter when we get confirmation from the 3395 * hardware that the filter has been deleted. 3396 */ 3397 clear_filter(f); 3398 sc->tids.ftids_in_use--; 3399 } else if (rc == FW_FILTER_WR_SMT_TBL_FULL) { 3400 device_printf(sc->dev, 3401 "filter %u setup failed due to full SMT\n", idx); 3402 clear_filter(f); 3403 sc->tids.ftids_in_use--; 3404 } else if (rc == FW_FILTER_WR_FLT_ADDED) { 3405 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; 3406 f->pending = 0; /* asynchronous setup completed */ 3407 f->valid = 1; 3408 } else { 3409 /* 3410 * Something went wrong. Issue a warning about the 3411 * problem and clear everything out. 3412 */ 3413 device_printf(sc->dev, 3414 "filter %u setup failed with error %u\n", idx, rc); 3415 clear_filter(f); 3416 sc->tids.ftids_in_use--; 3417 } 3418 } 3419 } 3420 3421 static int 3422 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) 3423 { 3424 int rc = EINVAL; 3425 3426 if (cntxt->cid > M_CTXTQID) 3427 return (rc); 3428 3429 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && 3430 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) 3431 return (rc); 3432 3433 if (sc->flags & FW_OK) { 3434 ADAPTER_LOCK(sc); /* Avoid parallel t4_wr_mbox */ 3435 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, 3436 &cntxt->data[0]); 3437 ADAPTER_UNLOCK(sc); 3438 } 3439 3440 if (rc != 0) { 3441 /* Read via firmware failed or wasn't even attempted */ 3442 3443 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, 3444 &cntxt->data[0]); 3445 } 3446 3447 return (rc); 3448 } 3449 3450 int 3451 t4_os_find_pci_capability(struct adapter *sc, int cap) 3452 { 3453 int i; 3454 3455 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); 3456 } 3457 3458 int 3459 t4_os_pci_save_state(struct adapter *sc) 3460 { 3461 device_t dev; 3462 struct pci_devinfo *dinfo; 3463 3464 dev = sc->dev; 3465 dinfo = device_get_ivars(dev); 3466 3467 pci_cfg_save(dev, dinfo, 0); 3468 return (0); 3469 } 3470 3471 int 3472 t4_os_pci_restore_state(struct adapter *sc) 3473 { 3474 device_t dev; 3475 struct pci_devinfo *dinfo; 3476 3477 dev = sc->dev; 3478 dinfo = device_get_ivars(dev); 3479 3480 pci_cfg_restore(dev, dinfo); 3481 return (0); 3482 } 3483 3484 void 3485 t4_os_portmod_changed(const struct adapter *sc, int idx) 3486 { 3487 struct port_info *pi = sc->port[idx]; 3488 static const char *mod_str[] = { 3489 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 3490 }; 3491 3492 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 3493 if_printf(pi->ifp, "transceiver unplugged.\n"); 3494 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 3495 if_printf(pi->ifp, "unknown transceiver inserted.\n"); 3496 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 3497 if_printf(pi->ifp, "unsupported transceiver inserted.\n"); 3498 else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str)) { 3499 if_printf(pi->ifp, "%s transceiver inserted.\n", 3500 mod_str[pi->mod_type]); 3501 } else { 3502 if_printf(pi->ifp, "transceiver (type %d) inserted.\n", 3503 pi->mod_type); 3504 } 3505 } 3506 3507 void 3508 t4_os_link_changed(struct adapter *sc, int idx, int link_stat) 3509 { 3510 struct port_info *pi = sc->port[idx]; 3511 struct ifnet *ifp = pi->ifp; 3512 3513 if (link_stat) { 3514 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed); 3515 if_link_state_change(ifp, LINK_STATE_UP); 3516 } else 3517 if_link_state_change(ifp, LINK_STATE_DOWN); 3518 } 3519 3520 static int 3521 t4_open(struct cdev *dev, int flags, int type, struct thread *td) 3522 { 3523 return (0); 3524 } 3525 3526 static int 3527 t4_close(struct cdev *dev, int flags, int type, struct thread *td) 3528 { 3529 return (0); 3530 } 3531 3532 static int 3533 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 3534 struct thread *td) 3535 { 3536 int rc; 3537 struct adapter *sc = dev->si_drv1; 3538 3539 rc = priv_check(td, PRIV_DRIVER); 3540 if (rc != 0) 3541 return (rc); 3542 3543 switch (cmd) { 3544 case CHELSIO_T4_GETREG: { 3545 struct t4_reg *edata = (struct t4_reg *)data; 3546 3547 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 3548 return (EFAULT); 3549 3550 if (edata->size == 4) 3551 edata->val = t4_read_reg(sc, edata->addr); 3552 else if (edata->size == 8) 3553 edata->val = t4_read_reg64(sc, edata->addr); 3554 else 3555 return (EINVAL); 3556 3557 break; 3558 } 3559 case CHELSIO_T4_SETREG: { 3560 struct t4_reg *edata = (struct t4_reg *)data; 3561 3562 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 3563 return (EFAULT); 3564 3565 if (edata->size == 4) { 3566 if (edata->val & 0xffffffff00000000) 3567 return (EINVAL); 3568 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 3569 } else if (edata->size == 8) 3570 t4_write_reg64(sc, edata->addr, edata->val); 3571 else 3572 return (EINVAL); 3573 break; 3574 } 3575 case CHELSIO_T4_REGDUMP: { 3576 struct t4_regdump *regs = (struct t4_regdump *)data; 3577 int reglen = T4_REGDUMP_SIZE; 3578 uint8_t *buf; 3579 3580 if (regs->len < reglen) { 3581 regs->len = reglen; /* hint to the caller */ 3582 return (ENOBUFS); 3583 } 3584 3585 regs->len = reglen; 3586 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 3587 t4_get_regs(sc, regs, buf); 3588 rc = copyout(buf, regs->data, reglen); 3589 free(buf, M_CXGBE); 3590 break; 3591 } 3592 case CHELSIO_T4_GET_FILTER_MODE: 3593 rc = get_filter_mode(sc, (uint32_t *)data); 3594 break; 3595 case CHELSIO_T4_SET_FILTER_MODE: 3596 rc = set_filter_mode(sc, *(uint32_t *)data); 3597 break; 3598 case CHELSIO_T4_GET_FILTER: 3599 ADAPTER_LOCK(sc); 3600 rc = get_filter(sc, (struct t4_filter *)data); 3601 ADAPTER_UNLOCK(sc); 3602 break; 3603 case CHELSIO_T4_SET_FILTER: 3604 ADAPTER_LOCK(sc); 3605 rc = set_filter(sc, (struct t4_filter *)data); 3606 ADAPTER_UNLOCK(sc); 3607 break; 3608 case CHELSIO_T4_DEL_FILTER: 3609 ADAPTER_LOCK(sc); 3610 rc = del_filter(sc, (struct t4_filter *)data); 3611 ADAPTER_UNLOCK(sc); 3612 break; 3613 case CHELSIO_T4_GET_SGE_CONTEXT: 3614 rc = get_sge_context(sc, (struct t4_sge_context *)data); 3615 break; 3616 default: 3617 rc = EINVAL; 3618 } 3619 3620 return (rc); 3621 } 3622 3623 static int 3624 t4_mod_event(module_t mod, int cmd, void *arg) 3625 { 3626 3627 if (cmd == MOD_LOAD) 3628 t4_sge_modload(); 3629 3630 return (0); 3631 } 3632 3633 static devclass_t t4_devclass; 3634 static devclass_t cxgbe_devclass; 3635 3636 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, t4_mod_event, 0); 3637 MODULE_VERSION(t4nex, 1); 3638 3639 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 3640 MODULE_VERSION(cxgbe, 1); 3641