1 /*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 34 #include <sys/param.h> 35 #include <sys/conf.h> 36 #include <sys/priv.h> 37 #include <sys/kernel.h> 38 #include <sys/bus.h> 39 #include <sys/module.h> 40 #include <sys/malloc.h> 41 #include <sys/queue.h> 42 #include <sys/taskqueue.h> 43 #include <sys/pciio.h> 44 #include <dev/pci/pcireg.h> 45 #include <dev/pci/pcivar.h> 46 #include <dev/pci/pci_private.h> 47 #include <sys/firmware.h> 48 #include <sys/sbuf.h> 49 #include <sys/smp.h> 50 #include <sys/socket.h> 51 #include <sys/sockio.h> 52 #include <sys/sysctl.h> 53 #include <net/ethernet.h> 54 #include <net/if.h> 55 #include <net/if_types.h> 56 #include <net/if_dl.h> 57 #include <net/if_vlan_var.h> 58 59 #include "common/common.h" 60 #include "common/t4_msg.h" 61 #include "common/t4_regs.h" 62 #include "common/t4_regs_values.h" 63 #include "t4_ioctl.h" 64 #include "t4_l2t.h" 65 66 /* T4 bus driver interface */ 67 static int t4_probe(device_t); 68 static int t4_attach(device_t); 69 static int t4_detach(device_t); 70 static device_method_t t4_methods[] = { 71 DEVMETHOD(device_probe, t4_probe), 72 DEVMETHOD(device_attach, t4_attach), 73 DEVMETHOD(device_detach, t4_detach), 74 75 DEVMETHOD_END 76 }; 77 static driver_t t4_driver = { 78 "t4nex", 79 t4_methods, 80 sizeof(struct adapter) 81 }; 82 83 84 /* T4 port (cxgbe) interface */ 85 static int cxgbe_probe(device_t); 86 static int cxgbe_attach(device_t); 87 static int cxgbe_detach(device_t); 88 static device_method_t cxgbe_methods[] = { 89 DEVMETHOD(device_probe, cxgbe_probe), 90 DEVMETHOD(device_attach, cxgbe_attach), 91 DEVMETHOD(device_detach, cxgbe_detach), 92 { 0, 0 } 93 }; 94 static driver_t cxgbe_driver = { 95 "cxgbe", 96 cxgbe_methods, 97 sizeof(struct port_info) 98 }; 99 100 static d_ioctl_t t4_ioctl; 101 static d_open_t t4_open; 102 static d_close_t t4_close; 103 104 static struct cdevsw t4_cdevsw = { 105 .d_version = D_VERSION, 106 .d_flags = 0, 107 .d_open = t4_open, 108 .d_close = t4_close, 109 .d_ioctl = t4_ioctl, 110 .d_name = "t4nex", 111 }; 112 113 /* ifnet + media interface */ 114 static void cxgbe_init(void *); 115 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 116 static int cxgbe_transmit(struct ifnet *, struct mbuf *); 117 static void cxgbe_qflush(struct ifnet *); 118 static int cxgbe_media_change(struct ifnet *); 119 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 120 121 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4 Ethernet driver and services"); 122 123 /* 124 * Correct lock order when you need to acquire multiple locks is t4_list_lock, 125 * then ADAPTER_LOCK, then t4_uld_list_lock. 126 */ 127 static struct mtx t4_list_lock; 128 static SLIST_HEAD(, adapter) t4_list; 129 #ifdef TCP_OFFLOAD 130 static struct mtx t4_uld_list_lock; 131 static SLIST_HEAD(, uld_info) t4_uld_list; 132 #endif 133 134 /* 135 * Tunables. See tweak_tunables() too. 136 */ 137 138 /* 139 * Number of queues for tx and rx, 10G and 1G, NIC and offload. 140 */ 141 #define NTXQ_10G 16 142 static int t4_ntxq10g = -1; 143 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g); 144 145 #define NRXQ_10G 8 146 static int t4_nrxq10g = -1; 147 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g); 148 149 #define NTXQ_1G 4 150 static int t4_ntxq1g = -1; 151 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g); 152 153 #define NRXQ_1G 2 154 static int t4_nrxq1g = -1; 155 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g); 156 157 #ifdef TCP_OFFLOAD 158 #define NOFLDTXQ_10G 8 159 static int t4_nofldtxq10g = -1; 160 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g); 161 162 #define NOFLDRXQ_10G 2 163 static int t4_nofldrxq10g = -1; 164 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g); 165 166 #define NOFLDTXQ_1G 2 167 static int t4_nofldtxq1g = -1; 168 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g); 169 170 #define NOFLDRXQ_1G 1 171 static int t4_nofldrxq1g = -1; 172 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g); 173 #endif 174 175 /* 176 * Holdoff parameters for 10G and 1G ports. 177 */ 178 #define TMR_IDX_10G 1 179 static int t4_tmr_idx_10g = TMR_IDX_10G; 180 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g); 181 182 #define PKTC_IDX_10G (-1) 183 static int t4_pktc_idx_10g = PKTC_IDX_10G; 184 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g); 185 186 #define TMR_IDX_1G 1 187 static int t4_tmr_idx_1g = TMR_IDX_1G; 188 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g); 189 190 #define PKTC_IDX_1G (-1) 191 static int t4_pktc_idx_1g = PKTC_IDX_1G; 192 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g); 193 194 /* 195 * Size (# of entries) of each tx and rx queue. 196 */ 197 static unsigned int t4_qsize_txq = TX_EQ_QSIZE; 198 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq); 199 200 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE; 201 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq); 202 203 /* 204 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). 205 */ 206 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 207 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types); 208 209 /* 210 * Configuration file. 211 */ 212 static char t4_cfg_file[32] = "default"; 213 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file)); 214 215 /* 216 * ASIC features that will be used. Disable the ones you don't want so that the 217 * chip resources aren't wasted on features that will not be used. 218 */ 219 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ 220 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed); 221 222 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC; 223 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed); 224 225 static int t4_toecaps_allowed = -1; 226 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed); 227 228 static int t4_rdmacaps_allowed = 0; 229 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed); 230 231 static int t4_iscsicaps_allowed = 0; 232 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed); 233 234 static int t4_fcoecaps_allowed = 0; 235 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed); 236 237 struct intrs_and_queues { 238 int intr_type; /* INTx, MSI, or MSI-X */ 239 int nirq; /* Number of vectors */ 240 int intr_flags; 241 int ntxq10g; /* # of NIC txq's for each 10G port */ 242 int nrxq10g; /* # of NIC rxq's for each 10G port */ 243 int ntxq1g; /* # of NIC txq's for each 1G port */ 244 int nrxq1g; /* # of NIC rxq's for each 1G port */ 245 #ifdef TCP_OFFLOAD 246 int nofldtxq10g; /* # of TOE txq's for each 10G port */ 247 int nofldrxq10g; /* # of TOE rxq's for each 10G port */ 248 int nofldtxq1g; /* # of TOE txq's for each 1G port */ 249 int nofldrxq1g; /* # of TOE rxq's for each 1G port */ 250 #endif 251 }; 252 253 struct filter_entry { 254 uint32_t valid:1; /* filter allocated and valid */ 255 uint32_t locked:1; /* filter is administratively locked */ 256 uint32_t pending:1; /* filter action is pending firmware reply */ 257 uint32_t smtidx:8; /* Source MAC Table index for smac */ 258 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 259 260 struct t4_filter_specification fs; 261 }; 262 263 enum { 264 XGMAC_MTU = (1 << 0), 265 XGMAC_PROMISC = (1 << 1), 266 XGMAC_ALLMULTI = (1 << 2), 267 XGMAC_VLANEX = (1 << 3), 268 XGMAC_UCADDR = (1 << 4), 269 XGMAC_MCADDRS = (1 << 5), 270 271 XGMAC_ALL = 0xffff 272 }; 273 274 static int map_bars(struct adapter *); 275 static void setup_memwin(struct adapter *); 276 static int cfg_itype_and_nqueues(struct adapter *, int, int, 277 struct intrs_and_queues *); 278 static int prep_firmware(struct adapter *); 279 static int upload_config_file(struct adapter *, const struct firmware *, 280 uint32_t *, uint32_t *); 281 static int partition_resources(struct adapter *, const struct firmware *); 282 static int get_params__pre_init(struct adapter *); 283 static int get_params__post_init(struct adapter *); 284 static void t4_set_desc(struct adapter *); 285 static void build_medialist(struct port_info *); 286 static int update_mac_settings(struct port_info *, int); 287 static int cxgbe_init_locked(struct port_info *); 288 static int cxgbe_init_synchronized(struct port_info *); 289 static int cxgbe_uninit_locked(struct port_info *); 290 static int cxgbe_uninit_synchronized(struct port_info *); 291 static int adapter_full_init(struct adapter *); 292 static int adapter_full_uninit(struct adapter *); 293 static int port_full_init(struct port_info *); 294 static int port_full_uninit(struct port_info *); 295 static void quiesce_eq(struct adapter *, struct sge_eq *); 296 static void quiesce_iq(struct adapter *, struct sge_iq *); 297 static void quiesce_fl(struct adapter *, struct sge_fl *); 298 static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 299 driver_intr_t *, void *, char *); 300 static int t4_free_irq(struct adapter *, struct irq *); 301 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int, 302 unsigned int); 303 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 304 static void cxgbe_tick(void *); 305 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t); 306 static int cpl_not_handled(struct sge_iq *, const struct rss_header *, 307 struct mbuf *); 308 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *); 309 static int fw_msg_not_handled(struct adapter *, const __be64 *); 310 static int t4_sysctls(struct adapter *); 311 static int cxgbe_sysctls(struct port_info *); 312 static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 313 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS); 314 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 315 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 316 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 317 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 318 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 319 #ifdef SBUF_DRAIN 320 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); 321 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); 322 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); 323 static int sysctl_devlog(SYSCTL_HANDLER_ARGS); 324 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); 325 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); 326 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); 327 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); 328 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); 329 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); 330 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); 331 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); 332 static int sysctl_tids(SYSCTL_HANDLER_ARGS); 333 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); 334 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); 335 #endif 336 static inline void txq_start(struct ifnet *, struct sge_txq *); 337 static uint32_t fconf_to_mode(uint32_t); 338 static uint32_t mode_to_fconf(uint32_t); 339 static uint32_t fspec_to_fconf(struct t4_filter_specification *); 340 static int get_filter_mode(struct adapter *, uint32_t *); 341 static int set_filter_mode(struct adapter *, uint32_t); 342 static inline uint64_t get_filter_hits(struct adapter *, uint32_t); 343 static int get_filter(struct adapter *, struct t4_filter *); 344 static int set_filter(struct adapter *, struct t4_filter *); 345 static int del_filter(struct adapter *, struct t4_filter *); 346 static void clear_filter(struct filter_entry *); 347 static int set_filter_wr(struct adapter *, int); 348 static int del_filter_wr(struct adapter *, int); 349 static int get_sge_context(struct adapter *, struct t4_sge_context *); 350 static int read_card_mem(struct adapter *, struct t4_mem_range *); 351 #ifdef TCP_OFFLOAD 352 static int toe_capability(struct port_info *, int); 353 #endif 354 static int t4_mod_event(module_t, int, void *); 355 356 struct t4_pciids { 357 uint16_t device; 358 char *desc; 359 } t4_pciids[] = { 360 {0xa000, "Chelsio Terminator 4 FPGA"}, 361 {0x4400, "Chelsio T440-dbg"}, 362 {0x4401, "Chelsio T420-CR"}, 363 {0x4402, "Chelsio T422-CR"}, 364 {0x4403, "Chelsio T440-CR"}, 365 {0x4404, "Chelsio T420-BCH"}, 366 {0x4405, "Chelsio T440-BCH"}, 367 {0x4406, "Chelsio T440-CH"}, 368 {0x4407, "Chelsio T420-SO"}, 369 {0x4408, "Chelsio T420-CX"}, 370 {0x4409, "Chelsio T420-BT"}, 371 {0x440a, "Chelsio T404-BT"}, 372 }; 373 374 #ifdef TCP_OFFLOAD 375 /* 376 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be 377 * exactly the same for both rxq and ofld_rxq. 378 */ 379 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); 380 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); 381 #endif 382 383 /* No easy way to include t4_msg.h before adapter.h so we check this way */ 384 CTASSERT(ARRAY_SIZE(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS); 385 CTASSERT(ARRAY_SIZE(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES); 386 387 static int 388 t4_probe(device_t dev) 389 { 390 int i; 391 uint16_t v = pci_get_vendor(dev); 392 uint16_t d = pci_get_device(dev); 393 uint8_t f = pci_get_function(dev); 394 395 if (v != PCI_VENDOR_ID_CHELSIO) 396 return (ENXIO); 397 398 /* Attach only to PF0 of the FPGA */ 399 if (d == 0xa000 && f != 0) 400 return (ENXIO); 401 402 for (i = 0; i < ARRAY_SIZE(t4_pciids); i++) { 403 if (d == t4_pciids[i].device) { 404 device_set_desc(dev, t4_pciids[i].desc); 405 return (BUS_PROBE_DEFAULT); 406 } 407 } 408 409 return (ENXIO); 410 } 411 412 static int 413 t4_attach(device_t dev) 414 { 415 struct adapter *sc; 416 int rc = 0, i, n10g, n1g, rqidx, tqidx; 417 struct intrs_and_queues iaq; 418 struct sge *s; 419 #ifdef TCP_OFFLOAD 420 int ofld_rqidx, ofld_tqidx; 421 #endif 422 423 sc = device_get_softc(dev); 424 sc->dev = dev; 425 426 pci_enable_busmaster(dev); 427 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 428 uint32_t v; 429 430 pci_set_max_read_req(dev, 4096); 431 v = pci_read_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, 2); 432 v |= PCIM_EXP_CTL_RELAXED_ORD_ENABLE; 433 pci_write_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, v, 2); 434 } 435 436 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 437 device_get_nameunit(dev)); 438 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 439 mtx_lock(&t4_list_lock); 440 SLIST_INSERT_HEAD(&t4_list, sc, link); 441 mtx_unlock(&t4_list_lock); 442 443 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); 444 TAILQ_INIT(&sc->sfl); 445 callout_init(&sc->sfl_callout, CALLOUT_MPSAFE); 446 447 rc = map_bars(sc); 448 if (rc != 0) 449 goto done; /* error message displayed already */ 450 451 /* 452 * This is the real PF# to which we're attaching. Works from within PCI 453 * passthrough environments too, where pci_get_function() could return a 454 * different PF# depending on the passthrough configuration. We need to 455 * use the real PF# in all our communication with the firmware. 456 */ 457 sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI)); 458 sc->mbox = sc->pf; 459 460 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 461 sc->an_handler = an_not_handled; 462 for (i = 0; i < ARRAY_SIZE(sc->cpl_handler); i++) 463 sc->cpl_handler[i] = cpl_not_handled; 464 for (i = 0; i < ARRAY_SIZE(sc->fw_msg_handler); i++) 465 sc->fw_msg_handler[i] = fw_msg_not_handled; 466 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl); 467 468 /* Prepare the adapter for operation */ 469 rc = -t4_prep_adapter(sc); 470 if (rc != 0) { 471 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 472 goto done; 473 } 474 475 /* 476 * Do this really early, with the memory windows set up even before the 477 * character device. The userland tool's register i/o and mem read 478 * will work even in "recovery mode". 479 */ 480 setup_memwin(sc); 481 sc->cdev = make_dev(&t4_cdevsw, device_get_unit(dev), UID_ROOT, 482 GID_WHEEL, 0600, "%s", device_get_nameunit(dev)); 483 sc->cdev->si_drv1 = sc; 484 485 /* Go no further if recovery mode has been requested. */ 486 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { 487 device_printf(dev, "recovery mode.\n"); 488 goto done; 489 } 490 491 /* Prepare the firmware for operation */ 492 rc = prep_firmware(sc); 493 if (rc != 0) 494 goto done; /* error message displayed already */ 495 496 rc = get_params__pre_init(sc); 497 if (rc != 0) 498 goto done; /* error message displayed already */ 499 500 rc = t4_sge_init(sc); 501 if (rc != 0) 502 goto done; /* error message displayed already */ 503 504 if (sc->flags & MASTER_PF) { 505 /* get basic stuff going */ 506 rc = -t4_fw_initialize(sc, sc->mbox); 507 if (rc != 0) { 508 device_printf(dev, "early init failed: %d.\n", rc); 509 goto done; 510 } 511 } 512 513 rc = get_params__post_init(sc); 514 if (rc != 0) 515 goto done; /* error message displayed already */ 516 517 if (sc->flags & MASTER_PF) { 518 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 519 520 /* final tweaks to some settings */ 521 522 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, 523 sc->params.b_wnd); 524 /* 4K, 16K, 64K, 256K DDP "page sizes" */ 525 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(0) | V_HPZ1(2) | 526 V_HPZ2(4) | V_HPZ3(6)); 527 t4_set_reg_field(sc, A_ULP_RX_CTL, F_TDDPTAGTCB, F_TDDPTAGTCB); 528 t4_set_reg_field(sc, A_TP_PARA_REG3, F_TUNNELCNGDROP0 | 529 F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | F_TUNNELCNGDROP3, 530 F_TUNNELCNGDROP0 | F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | 531 F_TUNNELCNGDROP3); 532 t4_set_reg_field(sc, A_TP_PARA_REG5, 533 V_INDICATESIZE(M_INDICATESIZE) | 534 F_REARMDDPOFFSET | F_RESETDDPOFFSET, 535 V_INDICATESIZE(indsz) | 536 F_REARMDDPOFFSET | F_RESETDDPOFFSET); 537 } else { 538 /* 539 * XXX: Verify that we can live with whatever the master driver 540 * has done so far, and hope that it doesn't change any global 541 * setting from underneath us in the future. 542 */ 543 } 544 545 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1, 546 A_TP_VLAN_PRI_MAP); 547 548 for (i = 0; i < NCHAN; i++) 549 sc->params.tp.tx_modq[i] = i; 550 551 rc = t4_create_dma_tag(sc); 552 if (rc != 0) 553 goto done; /* error message displayed already */ 554 555 /* 556 * First pass over all the ports - allocate VIs and initialize some 557 * basic parameters like mac address, port type, etc. We also figure 558 * out whether a port is 10G or 1G and use that information when 559 * calculating how many interrupts to attempt to allocate. 560 */ 561 n10g = n1g = 0; 562 for_each_port(sc, i) { 563 struct port_info *pi; 564 565 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 566 sc->port[i] = pi; 567 568 /* These must be set before t4_port_init */ 569 pi->adapter = sc; 570 pi->port_id = i; 571 572 /* Allocate the vi and initialize parameters like mac addr */ 573 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0); 574 if (rc != 0) { 575 device_printf(dev, "unable to initialize port %d: %d\n", 576 i, rc); 577 free(pi, M_CXGBE); 578 sc->port[i] = NULL; 579 goto done; 580 } 581 582 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 583 device_get_nameunit(dev), i); 584 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 585 586 if (is_10G_port(pi)) { 587 n10g++; 588 pi->tmr_idx = t4_tmr_idx_10g; 589 pi->pktc_idx = t4_pktc_idx_10g; 590 } else { 591 n1g++; 592 pi->tmr_idx = t4_tmr_idx_1g; 593 pi->pktc_idx = t4_pktc_idx_1g; 594 } 595 596 pi->xact_addr_filt = -1; 597 598 pi->qsize_rxq = t4_qsize_rxq; 599 pi->qsize_txq = t4_qsize_txq; 600 601 pi->dev = device_add_child(dev, "cxgbe", -1); 602 if (pi->dev == NULL) { 603 device_printf(dev, 604 "failed to add device for port %d.\n", i); 605 rc = ENXIO; 606 goto done; 607 } 608 device_set_softc(pi->dev, pi); 609 } 610 611 /* 612 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 613 */ 614 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq); 615 if (rc != 0) 616 goto done; /* error message displayed already */ 617 618 sc->intr_type = iaq.intr_type; 619 sc->intr_count = iaq.nirq; 620 sc->flags |= iaq.intr_flags; 621 622 s = &sc->sge; 623 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g; 624 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g; 625 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 626 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */ 627 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 628 629 #ifdef TCP_OFFLOAD 630 if (is_offload(sc)) { 631 632 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g; 633 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g; 634 s->neq += s->nofldtxq + s->nofldrxq; 635 s->niq += s->nofldrxq; 636 637 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), 638 M_CXGBE, M_ZERO | M_WAITOK); 639 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq), 640 M_CXGBE, M_ZERO | M_WAITOK); 641 } 642 #endif 643 644 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE, 645 M_ZERO | M_WAITOK); 646 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 647 M_ZERO | M_WAITOK); 648 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 649 M_ZERO | M_WAITOK); 650 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, 651 M_ZERO | M_WAITOK); 652 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, 653 M_ZERO | M_WAITOK); 654 655 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 656 M_ZERO | M_WAITOK); 657 658 t4_init_l2t(sc, M_WAITOK); 659 660 /* 661 * Second pass over the ports. This time we know the number of rx and 662 * tx queues that each port should get. 663 */ 664 rqidx = tqidx = 0; 665 #ifdef TCP_OFFLOAD 666 ofld_rqidx = ofld_tqidx = 0; 667 #endif 668 for_each_port(sc, i) { 669 struct port_info *pi = sc->port[i]; 670 671 if (pi == NULL) 672 continue; 673 674 pi->first_rxq = rqidx; 675 pi->first_txq = tqidx; 676 if (is_10G_port(pi)) { 677 pi->nrxq = iaq.nrxq10g; 678 pi->ntxq = iaq.ntxq10g; 679 } else { 680 pi->nrxq = iaq.nrxq1g; 681 pi->ntxq = iaq.ntxq1g; 682 } 683 684 rqidx += pi->nrxq; 685 tqidx += pi->ntxq; 686 687 #ifdef TCP_OFFLOAD 688 if (is_offload(sc)) { 689 pi->first_ofld_rxq = ofld_rqidx; 690 pi->first_ofld_txq = ofld_tqidx; 691 if (is_10G_port(pi)) { 692 pi->nofldrxq = iaq.nofldrxq10g; 693 pi->nofldtxq = iaq.nofldtxq10g; 694 } else { 695 pi->nofldrxq = iaq.nofldrxq1g; 696 pi->nofldtxq = iaq.nofldtxq1g; 697 } 698 ofld_rqidx += pi->nofldrxq; 699 ofld_tqidx += pi->nofldtxq; 700 } 701 #endif 702 } 703 704 rc = bus_generic_attach(dev); 705 if (rc != 0) { 706 device_printf(dev, 707 "failed to attach all child ports: %d\n", rc); 708 goto done; 709 } 710 711 device_printf(dev, 712 "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n", 713 sc->params.pci.width, sc->params.nports, sc->intr_count, 714 sc->intr_type == INTR_MSIX ? "MSI-X" : 715 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), 716 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); 717 718 t4_set_desc(sc); 719 720 done: 721 if (rc != 0 && sc->cdev) { 722 /* cdev was created and so cxgbetool works; recover that way. */ 723 device_printf(dev, 724 "error during attach, adapter is now in recovery mode.\n"); 725 rc = 0; 726 } 727 728 if (rc != 0) 729 t4_detach(dev); 730 else 731 t4_sysctls(sc); 732 733 return (rc); 734 } 735 736 /* 737 * Idempotent 738 */ 739 static int 740 t4_detach(device_t dev) 741 { 742 struct adapter *sc; 743 struct port_info *pi; 744 int i, rc; 745 746 sc = device_get_softc(dev); 747 748 if (sc->flags & FULL_INIT_DONE) 749 t4_intr_disable(sc); 750 751 if (sc->cdev) { 752 destroy_dev(sc->cdev); 753 sc->cdev = NULL; 754 } 755 756 rc = bus_generic_detach(dev); 757 if (rc) { 758 device_printf(dev, 759 "failed to detach child devices: %d\n", rc); 760 return (rc); 761 } 762 763 for (i = 0; i < MAX_NPORTS; i++) { 764 pi = sc->port[i]; 765 if (pi) { 766 t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid); 767 if (pi->dev) 768 device_delete_child(dev, pi->dev); 769 770 mtx_destroy(&pi->pi_lock); 771 free(pi, M_CXGBE); 772 } 773 } 774 775 if (sc->flags & FULL_INIT_DONE) 776 adapter_full_uninit(sc); 777 778 if (sc->flags & FW_OK) 779 t4_fw_bye(sc, sc->mbox); 780 781 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 782 pci_release_msi(dev); 783 784 if (sc->regs_res) 785 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 786 sc->regs_res); 787 788 if (sc->msix_res) 789 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 790 sc->msix_res); 791 792 if (sc->l2t) 793 t4_free_l2t(sc->l2t); 794 795 #ifdef TCP_OFFLOAD 796 free(sc->sge.ofld_rxq, M_CXGBE); 797 free(sc->sge.ofld_txq, M_CXGBE); 798 #endif 799 free(sc->irq, M_CXGBE); 800 free(sc->sge.rxq, M_CXGBE); 801 free(sc->sge.txq, M_CXGBE); 802 free(sc->sge.ctrlq, M_CXGBE); 803 free(sc->sge.iqmap, M_CXGBE); 804 free(sc->sge.eqmap, M_CXGBE); 805 free(sc->tids.ftid_tab, M_CXGBE); 806 t4_destroy_dma_tag(sc); 807 if (mtx_initialized(&sc->sc_lock)) { 808 mtx_lock(&t4_list_lock); 809 SLIST_REMOVE(&t4_list, sc, adapter, link); 810 mtx_unlock(&t4_list_lock); 811 mtx_destroy(&sc->sc_lock); 812 } 813 814 if (mtx_initialized(&sc->sfl_lock)) 815 mtx_destroy(&sc->sfl_lock); 816 817 bzero(sc, sizeof(*sc)); 818 819 return (0); 820 } 821 822 823 static int 824 cxgbe_probe(device_t dev) 825 { 826 char buf[128]; 827 struct port_info *pi = device_get_softc(dev); 828 829 snprintf(buf, sizeof(buf), "port %d", pi->port_id); 830 device_set_desc_copy(dev, buf); 831 832 return (BUS_PROBE_DEFAULT); 833 } 834 835 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 836 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 837 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6) 838 #define T4_CAP_ENABLE (T4_CAP) 839 840 static int 841 cxgbe_attach(device_t dev) 842 { 843 struct port_info *pi = device_get_softc(dev); 844 struct ifnet *ifp; 845 846 /* Allocate an ifnet and set it up */ 847 ifp = if_alloc(IFT_ETHER); 848 if (ifp == NULL) { 849 device_printf(dev, "Cannot allocate ifnet\n"); 850 return (ENOMEM); 851 } 852 pi->ifp = ifp; 853 ifp->if_softc = pi; 854 855 callout_init(&pi->tick, CALLOUT_MPSAFE); 856 857 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 858 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 859 860 ifp->if_init = cxgbe_init; 861 ifp->if_ioctl = cxgbe_ioctl; 862 ifp->if_transmit = cxgbe_transmit; 863 ifp->if_qflush = cxgbe_qflush; 864 865 ifp->if_capabilities = T4_CAP; 866 #ifdef TCP_OFFLOAD 867 if (is_offload(pi->adapter)) 868 ifp->if_capabilities |= IFCAP_TOE4; 869 #endif 870 ifp->if_capenable = T4_CAP_ENABLE; 871 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 872 CSUM_UDP_IPV6 | CSUM_TCP_IPV6; 873 874 /* Initialize ifmedia for this port */ 875 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change, 876 cxgbe_media_status); 877 build_medialist(pi); 878 879 pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp, 880 EVENTHANDLER_PRI_ANY); 881 882 ether_ifattach(ifp, pi->hw_addr); 883 884 #ifdef TCP_OFFLOAD 885 if (is_offload(pi->adapter)) { 886 device_printf(dev, 887 "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n", 888 pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq); 889 } else 890 #endif 891 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq); 892 893 cxgbe_sysctls(pi); 894 895 return (0); 896 } 897 898 static int 899 cxgbe_detach(device_t dev) 900 { 901 struct port_info *pi = device_get_softc(dev); 902 struct adapter *sc = pi->adapter; 903 struct ifnet *ifp = pi->ifp; 904 905 /* Tell if_ioctl and if_init that the port is going away */ 906 ADAPTER_LOCK(sc); 907 SET_DOOMED(pi); 908 wakeup(&sc->flags); 909 while (IS_BUSY(sc)) 910 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 911 SET_BUSY(sc); 912 ADAPTER_UNLOCK(sc); 913 914 if (pi->vlan_c) 915 EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c); 916 917 PORT_LOCK(pi); 918 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 919 callout_stop(&pi->tick); 920 PORT_UNLOCK(pi); 921 callout_drain(&pi->tick); 922 923 /* Let detach proceed even if these fail. */ 924 cxgbe_uninit_synchronized(pi); 925 port_full_uninit(pi); 926 927 ifmedia_removeall(&pi->media); 928 ether_ifdetach(pi->ifp); 929 if_free(pi->ifp); 930 931 ADAPTER_LOCK(sc); 932 CLR_BUSY(sc); 933 wakeup_one(&sc->flags); 934 ADAPTER_UNLOCK(sc); 935 936 return (0); 937 } 938 939 static void 940 cxgbe_init(void *arg) 941 { 942 struct port_info *pi = arg; 943 struct adapter *sc = pi->adapter; 944 945 ADAPTER_LOCK(sc); 946 cxgbe_init_locked(pi); /* releases adapter lock */ 947 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 948 } 949 950 static int 951 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 952 { 953 int rc = 0, mtu, flags; 954 struct port_info *pi = ifp->if_softc; 955 struct adapter *sc = pi->adapter; 956 struct ifreq *ifr = (struct ifreq *)data; 957 uint32_t mask; 958 959 switch (cmd) { 960 case SIOCSIFMTU: 961 ADAPTER_LOCK(sc); 962 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 963 if (rc) { 964 fail: 965 ADAPTER_UNLOCK(sc); 966 return (rc); 967 } 968 969 mtu = ifr->ifr_mtu; 970 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) { 971 rc = EINVAL; 972 } else { 973 ifp->if_mtu = mtu; 974 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 975 t4_update_fl_bufsize(ifp); 976 PORT_LOCK(pi); 977 rc = update_mac_settings(pi, XGMAC_MTU); 978 PORT_UNLOCK(pi); 979 } 980 } 981 ADAPTER_UNLOCK(sc); 982 break; 983 984 case SIOCSIFFLAGS: 985 ADAPTER_LOCK(sc); 986 if (IS_DOOMED(pi)) { 987 rc = ENXIO; 988 goto fail; 989 } 990 if (ifp->if_flags & IFF_UP) { 991 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 992 flags = pi->if_flags; 993 if ((ifp->if_flags ^ flags) & 994 (IFF_PROMISC | IFF_ALLMULTI)) { 995 if (IS_BUSY(sc)) { 996 rc = EBUSY; 997 goto fail; 998 } 999 PORT_LOCK(pi); 1000 rc = update_mac_settings(pi, 1001 XGMAC_PROMISC | XGMAC_ALLMULTI); 1002 PORT_UNLOCK(pi); 1003 } 1004 ADAPTER_UNLOCK(sc); 1005 } else 1006 rc = cxgbe_init_locked(pi); 1007 pi->if_flags = ifp->if_flags; 1008 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1009 rc = cxgbe_uninit_locked(pi); 1010 else 1011 ADAPTER_UNLOCK(sc); 1012 1013 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1014 break; 1015 1016 case SIOCADDMULTI: 1017 case SIOCDELMULTI: /* these two can be called with a mutex held :-( */ 1018 ADAPTER_LOCK(sc); 1019 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 1020 if (rc) 1021 goto fail; 1022 1023 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1024 PORT_LOCK(pi); 1025 rc = update_mac_settings(pi, XGMAC_MCADDRS); 1026 PORT_UNLOCK(pi); 1027 } 1028 ADAPTER_UNLOCK(sc); 1029 break; 1030 1031 case SIOCSIFCAP: 1032 ADAPTER_LOCK(sc); 1033 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 1034 if (rc) 1035 goto fail; 1036 1037 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1038 if (mask & IFCAP_TXCSUM) { 1039 ifp->if_capenable ^= IFCAP_TXCSUM; 1040 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1041 1042 if (IFCAP_TSO4 & ifp->if_capenable && 1043 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1044 ifp->if_capenable &= ~IFCAP_TSO4; 1045 if_printf(ifp, 1046 "tso4 disabled due to -txcsum.\n"); 1047 } 1048 } 1049 if (mask & IFCAP_TXCSUM_IPV6) { 1050 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 1051 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1052 1053 if (IFCAP_TSO6 & ifp->if_capenable && 1054 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1055 ifp->if_capenable &= ~IFCAP_TSO6; 1056 if_printf(ifp, 1057 "tso6 disabled due to -txcsum6.\n"); 1058 } 1059 } 1060 if (mask & IFCAP_RXCSUM) 1061 ifp->if_capenable ^= IFCAP_RXCSUM; 1062 if (mask & IFCAP_RXCSUM_IPV6) 1063 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 1064 1065 /* 1066 * Note that we leave CSUM_TSO alone (it is always set). The 1067 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 1068 * sending a TSO request our way, so it's sufficient to toggle 1069 * IFCAP_TSOx only. 1070 */ 1071 if (mask & IFCAP_TSO4) { 1072 if (!(IFCAP_TSO4 & ifp->if_capenable) && 1073 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1074 if_printf(ifp, "enable txcsum first.\n"); 1075 rc = EAGAIN; 1076 goto fail; 1077 } 1078 ifp->if_capenable ^= IFCAP_TSO4; 1079 } 1080 if (mask & IFCAP_TSO6) { 1081 if (!(IFCAP_TSO6 & ifp->if_capenable) && 1082 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1083 if_printf(ifp, "enable txcsum6 first.\n"); 1084 rc = EAGAIN; 1085 goto fail; 1086 } 1087 ifp->if_capenable ^= IFCAP_TSO6; 1088 } 1089 if (mask & IFCAP_LRO) { 1090 #if defined(INET) || defined(INET6) 1091 int i; 1092 struct sge_rxq *rxq; 1093 1094 ifp->if_capenable ^= IFCAP_LRO; 1095 for_each_rxq(pi, i, rxq) { 1096 if (ifp->if_capenable & IFCAP_LRO) 1097 rxq->iq.flags |= IQ_LRO_ENABLED; 1098 else 1099 rxq->iq.flags &= ~IQ_LRO_ENABLED; 1100 } 1101 #endif 1102 } 1103 #ifdef TCP_OFFLOAD 1104 if (mask & IFCAP_TOE) { 1105 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE; 1106 1107 rc = toe_capability(pi, enable); 1108 if (rc != 0) 1109 goto fail; 1110 1111 ifp->if_capenable ^= mask; 1112 } 1113 #endif 1114 if (mask & IFCAP_VLAN_HWTAGGING) { 1115 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1116 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1117 PORT_LOCK(pi); 1118 rc = update_mac_settings(pi, XGMAC_VLANEX); 1119 PORT_UNLOCK(pi); 1120 } 1121 } 1122 if (mask & IFCAP_VLAN_MTU) { 1123 ifp->if_capenable ^= IFCAP_VLAN_MTU; 1124 1125 /* Need to find out how to disable auto-mtu-inflation */ 1126 } 1127 if (mask & IFCAP_VLAN_HWTSO) 1128 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1129 if (mask & IFCAP_VLAN_HWCSUM) 1130 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1131 1132 #ifdef VLAN_CAPABILITIES 1133 VLAN_CAPABILITIES(ifp); 1134 #endif 1135 ADAPTER_UNLOCK(sc); 1136 break; 1137 1138 case SIOCSIFMEDIA: 1139 case SIOCGIFMEDIA: 1140 ifmedia_ioctl(ifp, ifr, &pi->media, cmd); 1141 break; 1142 1143 default: 1144 rc = ether_ioctl(ifp, cmd, data); 1145 } 1146 1147 return (rc); 1148 } 1149 1150 static int 1151 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 1152 { 1153 struct port_info *pi = ifp->if_softc; 1154 struct adapter *sc = pi->adapter; 1155 struct sge_txq *txq = &sc->sge.txq[pi->first_txq]; 1156 struct buf_ring *br; 1157 int rc; 1158 1159 M_ASSERTPKTHDR(m); 1160 1161 if (__predict_false(pi->link_cfg.link_ok == 0)) { 1162 m_freem(m); 1163 return (ENETDOWN); 1164 } 1165 1166 if (m->m_flags & M_FLOWID) 1167 txq += (m->m_pkthdr.flowid % pi->ntxq); 1168 br = txq->br; 1169 1170 if (TXQ_TRYLOCK(txq) == 0) { 1171 struct sge_eq *eq = &txq->eq; 1172 1173 /* 1174 * It is possible that t4_eth_tx finishes up and releases the 1175 * lock between the TRYLOCK above and the drbr_enqueue here. We 1176 * need to make sure that this mbuf doesn't just sit there in 1177 * the drbr. 1178 */ 1179 1180 rc = drbr_enqueue(ifp, br, m); 1181 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 && 1182 !(eq->flags & EQ_DOOMED)) 1183 callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq); 1184 return (rc); 1185 } 1186 1187 /* 1188 * txq->m is the mbuf that is held up due to a temporary shortage of 1189 * resources and it should be put on the wire first. Then what's in 1190 * drbr and finally the mbuf that was just passed in to us. 1191 * 1192 * Return code should indicate the fate of the mbuf that was passed in 1193 * this time. 1194 */ 1195 1196 TXQ_LOCK_ASSERT_OWNED(txq); 1197 if (drbr_needs_enqueue(ifp, br) || txq->m) { 1198 1199 /* Queued for transmission. */ 1200 1201 rc = drbr_enqueue(ifp, br, m); 1202 m = txq->m ? txq->m : drbr_dequeue(ifp, br); 1203 (void) t4_eth_tx(ifp, txq, m); 1204 TXQ_UNLOCK(txq); 1205 return (rc); 1206 } 1207 1208 /* Direct transmission. */ 1209 rc = t4_eth_tx(ifp, txq, m); 1210 if (rc != 0 && txq->m) 1211 rc = 0; /* held, will be transmitted soon (hopefully) */ 1212 1213 TXQ_UNLOCK(txq); 1214 return (rc); 1215 } 1216 1217 static void 1218 cxgbe_qflush(struct ifnet *ifp) 1219 { 1220 struct port_info *pi = ifp->if_softc; 1221 struct sge_txq *txq; 1222 int i; 1223 struct mbuf *m; 1224 1225 /* queues do not exist if !PORT_INIT_DONE. */ 1226 if (pi->flags & PORT_INIT_DONE) { 1227 for_each_txq(pi, i, txq) { 1228 TXQ_LOCK(txq); 1229 m_freem(txq->m); 1230 txq->m = NULL; 1231 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL) 1232 m_freem(m); 1233 TXQ_UNLOCK(txq); 1234 } 1235 } 1236 if_qflush(ifp); 1237 } 1238 1239 static int 1240 cxgbe_media_change(struct ifnet *ifp) 1241 { 1242 struct port_info *pi = ifp->if_softc; 1243 1244 device_printf(pi->dev, "%s unimplemented.\n", __func__); 1245 1246 return (EOPNOTSUPP); 1247 } 1248 1249 static void 1250 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1251 { 1252 struct port_info *pi = ifp->if_softc; 1253 struct ifmedia_entry *cur = pi->media.ifm_cur; 1254 int speed = pi->link_cfg.speed; 1255 int data = (pi->port_type << 8) | pi->mod_type; 1256 1257 if (cur->ifm_data != data) { 1258 build_medialist(pi); 1259 cur = pi->media.ifm_cur; 1260 } 1261 1262 ifmr->ifm_status = IFM_AVALID; 1263 if (!pi->link_cfg.link_ok) 1264 return; 1265 1266 ifmr->ifm_status |= IFM_ACTIVE; 1267 1268 /* active and current will differ iff current media is autoselect. */ 1269 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 1270 return; 1271 1272 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 1273 if (speed == SPEED_10000) 1274 ifmr->ifm_active |= IFM_10G_T; 1275 else if (speed == SPEED_1000) 1276 ifmr->ifm_active |= IFM_1000_T; 1277 else if (speed == SPEED_100) 1278 ifmr->ifm_active |= IFM_100_TX; 1279 else if (speed == SPEED_10) 1280 ifmr->ifm_active |= IFM_10_T; 1281 else 1282 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, 1283 speed)); 1284 } 1285 1286 void 1287 t4_fatal_err(struct adapter *sc) 1288 { 1289 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 1290 t4_intr_disable(sc); 1291 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n", 1292 device_get_nameunit(sc->dev)); 1293 } 1294 1295 static int 1296 map_bars(struct adapter *sc) 1297 { 1298 sc->regs_rid = PCIR_BAR(0); 1299 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1300 &sc->regs_rid, RF_ACTIVE); 1301 if (sc->regs_res == NULL) { 1302 device_printf(sc->dev, "cannot map registers.\n"); 1303 return (ENXIO); 1304 } 1305 sc->bt = rman_get_bustag(sc->regs_res); 1306 sc->bh = rman_get_bushandle(sc->regs_res); 1307 sc->mmio_len = rman_get_size(sc->regs_res); 1308 1309 sc->msix_rid = PCIR_BAR(4); 1310 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1311 &sc->msix_rid, RF_ACTIVE); 1312 if (sc->msix_res == NULL) { 1313 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 1314 return (ENXIO); 1315 } 1316 1317 return (0); 1318 } 1319 1320 static void 1321 setup_memwin(struct adapter *sc) 1322 { 1323 uint32_t bar0; 1324 1325 /* 1326 * Read low 32b of bar0 indirectly via the hardware backdoor mechanism. 1327 * Works from within PCI passthrough environments too, where 1328 * rman_get_start() can return a different value. We need to program 1329 * the memory window decoders with the actual addresses that will be 1330 * coming across the PCIe link. 1331 */ 1332 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); 1333 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; 1334 1335 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0), 1336 (bar0 + MEMWIN0_BASE) | V_BIR(0) | 1337 V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); 1338 1339 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1), 1340 (bar0 + MEMWIN1_BASE) | V_BIR(0) | 1341 V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); 1342 1343 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2), 1344 (bar0 + MEMWIN2_BASE) | V_BIR(0) | 1345 V_WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); 1346 1347 /* flush */ 1348 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 1349 } 1350 1351 static int 1352 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, 1353 struct intrs_and_queues *iaq) 1354 { 1355 int rc, itype, navail, nrxq10g, nrxq1g, n; 1356 int nofldrxq10g = 0, nofldrxq1g = 0; 1357 1358 bzero(iaq, sizeof(*iaq)); 1359 1360 iaq->ntxq10g = t4_ntxq10g; 1361 iaq->ntxq1g = t4_ntxq1g; 1362 iaq->nrxq10g = nrxq10g = t4_nrxq10g; 1363 iaq->nrxq1g = nrxq1g = t4_nrxq1g; 1364 #ifdef TCP_OFFLOAD 1365 if (is_offload(sc)) { 1366 iaq->nofldtxq10g = t4_nofldtxq10g; 1367 iaq->nofldtxq1g = t4_nofldtxq1g; 1368 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g; 1369 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g; 1370 } 1371 #endif 1372 1373 for (itype = INTR_MSIX; itype; itype >>= 1) { 1374 1375 if ((itype & t4_intr_types) == 0) 1376 continue; /* not allowed */ 1377 1378 if (itype == INTR_MSIX) 1379 navail = pci_msix_count(sc->dev); 1380 else if (itype == INTR_MSI) 1381 navail = pci_msi_count(sc->dev); 1382 else 1383 navail = 1; 1384 restart: 1385 if (navail == 0) 1386 continue; 1387 1388 iaq->intr_type = itype; 1389 iaq->intr_flags = 0; 1390 1391 /* 1392 * Best option: an interrupt vector for errors, one for the 1393 * firmware event queue, and one each for each rxq (NIC as well 1394 * as offload). 1395 */ 1396 iaq->nirq = T4_EXTRA_INTR; 1397 iaq->nirq += n10g * (nrxq10g + nofldrxq10g); 1398 iaq->nirq += n1g * (nrxq1g + nofldrxq1g); 1399 if (iaq->nirq <= navail && 1400 (itype != INTR_MSI || powerof2(iaq->nirq))) { 1401 iaq->intr_flags |= INTR_DIRECT; 1402 goto allocate; 1403 } 1404 1405 /* 1406 * Second best option: an interrupt vector for errors, one for 1407 * the firmware event queue, and one each for either NIC or 1408 * offload rxq's. 1409 */ 1410 iaq->nirq = T4_EXTRA_INTR; 1411 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g); 1412 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g); 1413 if (iaq->nirq <= navail && 1414 (itype != INTR_MSI || powerof2(iaq->nirq))) 1415 goto allocate; 1416 1417 /* 1418 * Next best option: an interrupt vector for errors, one for the 1419 * firmware event queue, and at least one per port. At this 1420 * point we know we'll have to downsize nrxq or nofldrxq to fit 1421 * what's available to us. 1422 */ 1423 iaq->nirq = T4_EXTRA_INTR; 1424 iaq->nirq += n10g + n1g; 1425 if (iaq->nirq <= navail) { 1426 int leftover = navail - iaq->nirq; 1427 1428 if (n10g > 0) { 1429 int target = max(nrxq10g, nofldrxq10g); 1430 1431 n = 1; 1432 while (n < target && leftover >= n10g) { 1433 leftover -= n10g; 1434 iaq->nirq += n10g; 1435 n++; 1436 } 1437 iaq->nrxq10g = min(n, nrxq10g); 1438 #ifdef TCP_OFFLOAD 1439 if (is_offload(sc)) 1440 iaq->nofldrxq10g = min(n, nofldrxq10g); 1441 #endif 1442 } 1443 1444 if (n1g > 0) { 1445 int target = max(nrxq1g, nofldrxq1g); 1446 1447 n = 1; 1448 while (n < target && leftover >= n1g) { 1449 leftover -= n1g; 1450 iaq->nirq += n1g; 1451 n++; 1452 } 1453 iaq->nrxq1g = min(n, nrxq1g); 1454 #ifdef TCP_OFFLOAD 1455 if (is_offload(sc)) 1456 iaq->nofldrxq1g = min(n, nofldrxq1g); 1457 #endif 1458 } 1459 1460 if (itype != INTR_MSI || powerof2(iaq->nirq)) 1461 goto allocate; 1462 } 1463 1464 /* 1465 * Least desirable option: one interrupt vector for everything. 1466 */ 1467 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1; 1468 #ifdef TCP_OFFLOAD 1469 if (is_offload(sc)) 1470 iaq->nofldrxq10g = iaq->nofldrxq1g = 1; 1471 #endif 1472 1473 allocate: 1474 navail = iaq->nirq; 1475 rc = 0; 1476 if (itype == INTR_MSIX) 1477 rc = pci_alloc_msix(sc->dev, &navail); 1478 else if (itype == INTR_MSI) 1479 rc = pci_alloc_msi(sc->dev, &navail); 1480 1481 if (rc == 0) { 1482 if (navail == iaq->nirq) 1483 return (0); 1484 1485 /* 1486 * Didn't get the number requested. Use whatever number 1487 * the kernel is willing to allocate (it's in navail). 1488 */ 1489 device_printf(sc->dev, "fewer vectors than requested, " 1490 "type=%d, req=%d, rcvd=%d; will downshift req.\n", 1491 itype, iaq->nirq, navail); 1492 pci_release_msi(sc->dev); 1493 goto restart; 1494 } 1495 1496 device_printf(sc->dev, 1497 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 1498 itype, rc, iaq->nirq, navail); 1499 } 1500 1501 device_printf(sc->dev, 1502 "failed to find a usable interrupt type. " 1503 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, 1504 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 1505 1506 return (ENXIO); 1507 } 1508 1509 /* 1510 * Install a compatible firmware (if required), establish contact with it (by 1511 * saying hello), and reset the device. If we end up as the master driver, 1512 * partition adapter resources by providing a configuration file to the 1513 * firmware. 1514 */ 1515 static int 1516 prep_firmware(struct adapter *sc) 1517 { 1518 const struct firmware *fw = NULL, *cfg = NULL, *default_cfg; 1519 int rc; 1520 enum dev_state state; 1521 1522 default_cfg = firmware_get(T4_CFGNAME); 1523 1524 /* Check firmware version and install a different one if necessary */ 1525 rc = t4_check_fw_version(sc); 1526 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 1527 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 1528 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 1529 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 1530 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 1531 if (rc != 0) { 1532 uint32_t v = 0; 1533 1534 fw = firmware_get(T4_FWNAME); 1535 if (fw != NULL) { 1536 const struct fw_hdr *hdr = (const void *)fw->data; 1537 1538 v = ntohl(hdr->fw_ver); 1539 1540 /* 1541 * The firmware module will not be used if it isn't the 1542 * same major version as what the driver was compiled 1543 * with. 1544 */ 1545 if (G_FW_HDR_FW_VER_MAJOR(v) != FW_VERSION_MAJOR) { 1546 device_printf(sc->dev, 1547 "Found firmware image but version %d " 1548 "can not be used with this driver (%d)\n", 1549 G_FW_HDR_FW_VER_MAJOR(v), FW_VERSION_MAJOR); 1550 1551 firmware_put(fw, FIRMWARE_UNLOAD); 1552 fw = NULL; 1553 } 1554 } 1555 1556 if (fw == NULL && rc < 0) { 1557 device_printf(sc->dev, "No usable firmware. " 1558 "card has %d.%d.%d, driver compiled with %d.%d.%d", 1559 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 1560 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 1561 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 1562 FW_VERSION_MAJOR, FW_VERSION_MINOR, 1563 FW_VERSION_MICRO); 1564 rc = EAGAIN; 1565 goto done; 1566 } 1567 1568 /* 1569 * Always upgrade, even for minor/micro/build mismatches. 1570 * Downgrade only for a major version mismatch or if 1571 * force_firmware_install was specified. 1572 */ 1573 if (fw != NULL && (rc < 0 || v > sc->params.fw_vers)) { 1574 device_printf(sc->dev, 1575 "installing firmware %d.%d.%d.%d on card.\n", 1576 G_FW_HDR_FW_VER_MAJOR(v), G_FW_HDR_FW_VER_MINOR(v), 1577 G_FW_HDR_FW_VER_MICRO(v), G_FW_HDR_FW_VER_BUILD(v)); 1578 1579 rc = -t4_load_fw(sc, fw->data, fw->datasize); 1580 if (rc != 0) { 1581 device_printf(sc->dev, 1582 "failed to install firmware: %d\n", rc); 1583 goto done; 1584 } else { 1585 /* refresh */ 1586 (void) t4_check_fw_version(sc); 1587 snprintf(sc->fw_version, 1588 sizeof(sc->fw_version), "%u.%u.%u.%u", 1589 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 1590 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 1591 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 1592 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 1593 } 1594 } 1595 } 1596 1597 /* Contact firmware. */ 1598 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); 1599 if (rc < 0) { 1600 rc = -rc; 1601 device_printf(sc->dev, 1602 "failed to connect to the firmware: %d.\n", rc); 1603 goto done; 1604 } 1605 if (rc == sc->mbox) 1606 sc->flags |= MASTER_PF; 1607 1608 /* Reset device */ 1609 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST); 1610 if (rc != 0) { 1611 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 1612 if (rc != ETIMEDOUT && rc != EIO) 1613 t4_fw_bye(sc, sc->mbox); 1614 goto done; 1615 } 1616 1617 /* Partition adapter resources as specified in the config file. */ 1618 if (sc->flags & MASTER_PF) { 1619 if (strncmp(t4_cfg_file, "default", sizeof(t4_cfg_file))) { 1620 char s[32]; 1621 1622 snprintf(s, sizeof(s), "t4fw_cfg_%s", t4_cfg_file); 1623 cfg = firmware_get(s); 1624 if (cfg == NULL) { 1625 device_printf(sc->dev, 1626 "unable to locate %s module, " 1627 "will use default config file.\n", s); 1628 } 1629 } 1630 1631 rc = partition_resources(sc, cfg ? cfg : default_cfg); 1632 if (rc != 0) 1633 goto done; /* error message displayed already */ 1634 } 1635 1636 sc->flags |= FW_OK; 1637 1638 done: 1639 if (fw != NULL) 1640 firmware_put(fw, FIRMWARE_UNLOAD); 1641 if (cfg != NULL) 1642 firmware_put(cfg, FIRMWARE_UNLOAD); 1643 if (default_cfg != NULL) 1644 firmware_put(default_cfg, FIRMWARE_UNLOAD); 1645 1646 return (rc); 1647 } 1648 1649 #define FW_PARAM_DEV(param) \ 1650 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 1651 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 1652 #define FW_PARAM_PFVF(param) \ 1653 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 1654 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 1655 1656 /* 1657 * Upload configuration file to card's memory. 1658 */ 1659 static int 1660 upload_config_file(struct adapter *sc, const struct firmware *fw, uint32_t *mt, 1661 uint32_t *ma) 1662 { 1663 int rc, i; 1664 uint32_t param, val, mtype, maddr, bar, off, win, remaining; 1665 const uint32_t *b; 1666 1667 /* Figure out where the firmware wants us to upload it. */ 1668 param = FW_PARAM_DEV(CF); 1669 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 1670 if (rc != 0) { 1671 /* Firmwares without config file support will fail this way */ 1672 device_printf(sc->dev, 1673 "failed to query config file location: %d.\n", rc); 1674 return (rc); 1675 } 1676 *mt = mtype = G_FW_PARAMS_PARAM_Y(val); 1677 *ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16; 1678 1679 if (maddr & 3) { 1680 device_printf(sc->dev, 1681 "cannot upload config file (type %u, addr %x).\n", 1682 mtype, maddr); 1683 return (EFAULT); 1684 } 1685 1686 /* Translate mtype/maddr to an address suitable for the PCIe window */ 1687 val = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 1688 val &= F_EDRAM0_ENABLE | F_EDRAM1_ENABLE | F_EXT_MEM_ENABLE; 1689 switch (mtype) { 1690 case FW_MEMTYPE_CF_EDC0: 1691 if (!(val & F_EDRAM0_ENABLE)) 1692 goto err; 1693 bar = t4_read_reg(sc, A_MA_EDRAM0_BAR); 1694 maddr += G_EDRAM0_BASE(bar) << 20; 1695 break; 1696 1697 case FW_MEMTYPE_CF_EDC1: 1698 if (!(val & F_EDRAM1_ENABLE)) 1699 goto err; 1700 bar = t4_read_reg(sc, A_MA_EDRAM1_BAR); 1701 maddr += G_EDRAM1_BASE(bar) << 20; 1702 break; 1703 1704 case FW_MEMTYPE_CF_EXTMEM: 1705 if (!(val & F_EXT_MEM_ENABLE)) 1706 goto err; 1707 bar = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 1708 maddr += G_EXT_MEM_BASE(bar) << 20; 1709 break; 1710 1711 default: 1712 err: 1713 device_printf(sc->dev, 1714 "cannot upload config file (type %u, enabled %u).\n", 1715 mtype, val); 1716 return (EFAULT); 1717 } 1718 1719 /* 1720 * Position the PCIe window (we use memwin2) to the 16B aligned area 1721 * just at/before the upload location. 1722 */ 1723 win = maddr & ~0xf; 1724 off = maddr - win; /* offset from the start of the window. */ 1725 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2), win); 1726 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2)); 1727 1728 remaining = fw->datasize; 1729 if (remaining > FLASH_CFG_MAX_SIZE || 1730 remaining > MEMWIN2_APERTURE - off) { 1731 device_printf(sc->dev, "cannot upload config file all at once " 1732 "(size %u, max %u, room %u).\n", 1733 remaining, FLASH_CFG_MAX_SIZE, MEMWIN2_APERTURE - off); 1734 return (EFBIG); 1735 } 1736 1737 /* 1738 * XXX: sheer laziness. We deliberately added 4 bytes of useless 1739 * stuffing/comments at the end of the config file so it's ok to simply 1740 * throw away the last remaining bytes when the config file is not an 1741 * exact multiple of 4. 1742 */ 1743 b = fw->data; 1744 for (i = 0; remaining >= 4; i += 4, remaining -= 4) 1745 t4_write_reg(sc, MEMWIN2_BASE + off + i, *b++); 1746 1747 return (rc); 1748 } 1749 1750 /* 1751 * Partition chip resources for use between various PFs, VFs, etc. This is done 1752 * by uploading the firmware configuration file to the adapter and instructing 1753 * the firmware to process it. 1754 */ 1755 static int 1756 partition_resources(struct adapter *sc, const struct firmware *cfg) 1757 { 1758 int rc; 1759 struct fw_caps_config_cmd caps; 1760 uint32_t mtype, maddr, finicsum, cfcsum; 1761 1762 rc = cfg ? upload_config_file(sc, cfg, &mtype, &maddr) : ENOENT; 1763 if (rc != 0) { 1764 mtype = FW_MEMTYPE_CF_FLASH; 1765 maddr = t4_flash_cfg_addr(sc); 1766 } 1767 1768 bzero(&caps, sizeof(caps)); 1769 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1770 F_FW_CMD_REQUEST | F_FW_CMD_READ); 1771 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | 1772 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 1773 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps)); 1774 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 1775 if (rc != 0) { 1776 device_printf(sc->dev, 1777 "failed to pre-process config file: %d.\n", rc); 1778 return (rc); 1779 } 1780 1781 finicsum = be32toh(caps.finicsum); 1782 cfcsum = be32toh(caps.cfcsum); 1783 if (finicsum != cfcsum) { 1784 device_printf(sc->dev, 1785 "WARNING: config file checksum mismatch: %08x %08x\n", 1786 finicsum, cfcsum); 1787 } 1788 sc->cfcsum = cfcsum; 1789 1790 #define LIMIT_CAPS(x) do { \ 1791 caps.x &= htobe16(t4_##x##_allowed); \ 1792 sc->x = htobe16(caps.x); \ 1793 } while (0) 1794 1795 /* 1796 * Let the firmware know what features will (not) be used so it can tune 1797 * things accordingly. 1798 */ 1799 LIMIT_CAPS(linkcaps); 1800 LIMIT_CAPS(niccaps); 1801 LIMIT_CAPS(toecaps); 1802 LIMIT_CAPS(rdmacaps); 1803 LIMIT_CAPS(iscsicaps); 1804 LIMIT_CAPS(fcoecaps); 1805 #undef LIMIT_CAPS 1806 1807 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1808 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 1809 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 1810 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); 1811 if (rc != 0) { 1812 device_printf(sc->dev, 1813 "failed to process config file: %d.\n", rc); 1814 return (rc); 1815 } 1816 1817 return (0); 1818 } 1819 1820 /* 1821 * Retrieve parameters that are needed (or nice to have) prior to calling 1822 * t4_sge_init and t4_fw_initialize. 1823 */ 1824 static int 1825 get_params__pre_init(struct adapter *sc) 1826 { 1827 int rc; 1828 uint32_t param[2], val[2]; 1829 struct fw_devlog_cmd cmd; 1830 struct devlog_params *dlog = &sc->params.devlog; 1831 1832 param[0] = FW_PARAM_DEV(PORTVEC); 1833 param[1] = FW_PARAM_DEV(CCLK); 1834 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 1835 if (rc != 0) { 1836 device_printf(sc->dev, 1837 "failed to query parameters (pre_init): %d.\n", rc); 1838 return (rc); 1839 } 1840 1841 sc->params.portvec = val[0]; 1842 sc->params.nports = 0; 1843 while (val[0]) { 1844 sc->params.nports++; 1845 val[0] &= val[0] - 1; 1846 } 1847 1848 sc->params.vpd.cclk = val[1]; 1849 1850 /* Read device log parameters. */ 1851 bzero(&cmd, sizeof(cmd)); 1852 cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 1853 F_FW_CMD_REQUEST | F_FW_CMD_READ); 1854 cmd.retval_len16 = htobe32(FW_LEN16(cmd)); 1855 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd); 1856 if (rc != 0) { 1857 device_printf(sc->dev, 1858 "failed to get devlog parameters: %d.\n", rc); 1859 bzero(dlog, sizeof (*dlog)); 1860 rc = 0; /* devlog isn't critical for device operation */ 1861 } else { 1862 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog); 1863 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]); 1864 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4; 1865 dlog->size = be32toh(cmd.memsize_devlog); 1866 } 1867 1868 return (rc); 1869 } 1870 1871 /* 1872 * Retrieve various parameters that are of interest to the driver. The device 1873 * has been initialized by the firmware at this point. 1874 */ 1875 static int 1876 get_params__post_init(struct adapter *sc) 1877 { 1878 int rc; 1879 uint32_t param[7], val[7]; 1880 struct fw_caps_config_cmd caps; 1881 1882 param[0] = FW_PARAM_PFVF(IQFLINT_START); 1883 param[1] = FW_PARAM_PFVF(EQ_START); 1884 param[2] = FW_PARAM_PFVF(FILTER_START); 1885 param[3] = FW_PARAM_PFVF(FILTER_END); 1886 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val); 1887 if (rc != 0) { 1888 device_printf(sc->dev, 1889 "failed to query parameters (post_init): %d.\n", rc); 1890 return (rc); 1891 } 1892 1893 sc->sge.iq_start = val[0]; 1894 sc->sge.eq_start = val[1]; 1895 sc->tids.ftid_base = val[2]; 1896 sc->tids.nftids = val[3] - val[2] + 1; 1897 1898 /* get capabilites */ 1899 bzero(&caps, sizeof(caps)); 1900 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1901 F_FW_CMD_REQUEST | F_FW_CMD_READ); 1902 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 1903 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 1904 if (rc != 0) { 1905 device_printf(sc->dev, 1906 "failed to get card capabilities: %d.\n", rc); 1907 return (rc); 1908 } 1909 1910 if (caps.toecaps) { 1911 /* query offload-related parameters */ 1912 param[0] = FW_PARAM_DEV(NTID); 1913 param[1] = FW_PARAM_PFVF(SERVER_START); 1914 param[2] = FW_PARAM_PFVF(SERVER_END); 1915 param[3] = FW_PARAM_PFVF(TDDP_START); 1916 param[4] = FW_PARAM_PFVF(TDDP_END); 1917 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 1918 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 1919 if (rc != 0) { 1920 device_printf(sc->dev, 1921 "failed to query TOE parameters: %d.\n", rc); 1922 return (rc); 1923 } 1924 sc->tids.ntids = val[0]; 1925 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 1926 sc->tids.stid_base = val[1]; 1927 sc->tids.nstids = val[2] - val[1] + 1; 1928 sc->vres.ddp.start = val[3]; 1929 sc->vres.ddp.size = val[4] - val[3] + 1; 1930 sc->params.ofldq_wr_cred = val[5]; 1931 sc->params.offload = 1; 1932 } 1933 if (caps.rdmacaps) { 1934 param[0] = FW_PARAM_PFVF(STAG_START); 1935 param[1] = FW_PARAM_PFVF(STAG_END); 1936 param[2] = FW_PARAM_PFVF(RQ_START); 1937 param[3] = FW_PARAM_PFVF(RQ_END); 1938 param[4] = FW_PARAM_PFVF(PBL_START); 1939 param[5] = FW_PARAM_PFVF(PBL_END); 1940 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 1941 if (rc != 0) { 1942 device_printf(sc->dev, 1943 "failed to query RDMA parameters(1): %d.\n", rc); 1944 return (rc); 1945 } 1946 sc->vres.stag.start = val[0]; 1947 sc->vres.stag.size = val[1] - val[0] + 1; 1948 sc->vres.rq.start = val[2]; 1949 sc->vres.rq.size = val[3] - val[2] + 1; 1950 sc->vres.pbl.start = val[4]; 1951 sc->vres.pbl.size = val[5] - val[4] + 1; 1952 1953 param[0] = FW_PARAM_PFVF(SQRQ_START); 1954 param[1] = FW_PARAM_PFVF(SQRQ_END); 1955 param[2] = FW_PARAM_PFVF(CQ_START); 1956 param[3] = FW_PARAM_PFVF(CQ_END); 1957 param[4] = FW_PARAM_PFVF(OCQ_START); 1958 param[5] = FW_PARAM_PFVF(OCQ_END); 1959 rc = -t4_query_params(sc, 0, 0, 0, 6, param, val); 1960 if (rc != 0) { 1961 device_printf(sc->dev, 1962 "failed to query RDMA parameters(2): %d.\n", rc); 1963 return (rc); 1964 } 1965 sc->vres.qp.start = val[0]; 1966 sc->vres.qp.size = val[1] - val[0] + 1; 1967 sc->vres.cq.start = val[2]; 1968 sc->vres.cq.size = val[3] - val[2] + 1; 1969 sc->vres.ocq.start = val[4]; 1970 sc->vres.ocq.size = val[5] - val[4] + 1; 1971 } 1972 if (caps.iscsicaps) { 1973 param[0] = FW_PARAM_PFVF(ISCSI_START); 1974 param[1] = FW_PARAM_PFVF(ISCSI_END); 1975 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 1976 if (rc != 0) { 1977 device_printf(sc->dev, 1978 "failed to query iSCSI parameters: %d.\n", rc); 1979 return (rc); 1980 } 1981 sc->vres.iscsi.start = val[0]; 1982 sc->vres.iscsi.size = val[1] - val[0] + 1; 1983 } 1984 1985 /* These are finalized by FW initialization, load their values now */ 1986 val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 1987 sc->params.tp.tre = G_TIMERRESOLUTION(val[0]); 1988 sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]); 1989 t4_read_mtu_tbl(sc, sc->params.mtus, NULL); 1990 1991 return (rc); 1992 } 1993 1994 #undef FW_PARAM_PFVF 1995 #undef FW_PARAM_DEV 1996 1997 static void 1998 t4_set_desc(struct adapter *sc) 1999 { 2000 char buf[128]; 2001 struct adapter_params *p = &sc->params; 2002 2003 snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, E/C:%s", 2004 p->vpd.id, is_offload(sc) ? "R" : "", p->rev, p->vpd.sn, p->vpd.ec); 2005 2006 device_set_desc_copy(sc->dev, buf); 2007 } 2008 2009 static void 2010 build_medialist(struct port_info *pi) 2011 { 2012 struct ifmedia *media = &pi->media; 2013 int data, m; 2014 2015 PORT_LOCK(pi); 2016 2017 ifmedia_removeall(media); 2018 2019 m = IFM_ETHER | IFM_FDX; 2020 data = (pi->port_type << 8) | pi->mod_type; 2021 2022 switch(pi->port_type) { 2023 case FW_PORT_TYPE_BT_XFI: 2024 ifmedia_add(media, m | IFM_10G_T, data, NULL); 2025 break; 2026 2027 case FW_PORT_TYPE_BT_XAUI: 2028 ifmedia_add(media, m | IFM_10G_T, data, NULL); 2029 /* fall through */ 2030 2031 case FW_PORT_TYPE_BT_SGMII: 2032 ifmedia_add(media, m | IFM_1000_T, data, NULL); 2033 ifmedia_add(media, m | IFM_100_TX, data, NULL); 2034 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL); 2035 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 2036 break; 2037 2038 case FW_PORT_TYPE_CX4: 2039 ifmedia_add(media, m | IFM_10G_CX4, data, NULL); 2040 ifmedia_set(media, m | IFM_10G_CX4); 2041 break; 2042 2043 case FW_PORT_TYPE_SFP: 2044 case FW_PORT_TYPE_FIBER_XFI: 2045 case FW_PORT_TYPE_FIBER_XAUI: 2046 switch (pi->mod_type) { 2047 2048 case FW_PORT_MOD_TYPE_LR: 2049 ifmedia_add(media, m | IFM_10G_LR, data, NULL); 2050 ifmedia_set(media, m | IFM_10G_LR); 2051 break; 2052 2053 case FW_PORT_MOD_TYPE_SR: 2054 ifmedia_add(media, m | IFM_10G_SR, data, NULL); 2055 ifmedia_set(media, m | IFM_10G_SR); 2056 break; 2057 2058 case FW_PORT_MOD_TYPE_LRM: 2059 ifmedia_add(media, m | IFM_10G_LRM, data, NULL); 2060 ifmedia_set(media, m | IFM_10G_LRM); 2061 break; 2062 2063 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 2064 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 2065 ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL); 2066 ifmedia_set(media, m | IFM_10G_TWINAX); 2067 break; 2068 2069 case FW_PORT_MOD_TYPE_NONE: 2070 m &= ~IFM_FDX; 2071 ifmedia_add(media, m | IFM_NONE, data, NULL); 2072 ifmedia_set(media, m | IFM_NONE); 2073 break; 2074 2075 case FW_PORT_MOD_TYPE_NA: 2076 case FW_PORT_MOD_TYPE_ER: 2077 default: 2078 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL); 2079 ifmedia_set(media, m | IFM_UNKNOWN); 2080 break; 2081 } 2082 break; 2083 2084 case FW_PORT_TYPE_KX4: 2085 case FW_PORT_TYPE_KX: 2086 case FW_PORT_TYPE_KR: 2087 default: 2088 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL); 2089 ifmedia_set(media, m | IFM_UNKNOWN); 2090 break; 2091 } 2092 2093 PORT_UNLOCK(pi); 2094 } 2095 2096 #define FW_MAC_EXACT_CHUNK 7 2097 2098 /* 2099 * Program the port's XGMAC based on parameters in ifnet. The caller also 2100 * indicates which parameters should be programmed (the rest are left alone). 2101 */ 2102 static int 2103 update_mac_settings(struct port_info *pi, int flags) 2104 { 2105 int rc; 2106 struct ifnet *ifp = pi->ifp; 2107 struct adapter *sc = pi->adapter; 2108 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 2109 2110 PORT_LOCK_ASSERT_OWNED(pi); 2111 KASSERT(flags, ("%s: not told what to update.", __func__)); 2112 2113 if (flags & XGMAC_MTU) 2114 mtu = ifp->if_mtu; 2115 2116 if (flags & XGMAC_PROMISC) 2117 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 2118 2119 if (flags & XGMAC_ALLMULTI) 2120 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 2121 2122 if (flags & XGMAC_VLANEX) 2123 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 2124 2125 rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1, 2126 vlanex, false); 2127 if (rc) { 2128 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc); 2129 return (rc); 2130 } 2131 2132 if (flags & XGMAC_UCADDR) { 2133 uint8_t ucaddr[ETHER_ADDR_LEN]; 2134 2135 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 2136 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt, 2137 ucaddr, true, true); 2138 if (rc < 0) { 2139 rc = -rc; 2140 if_printf(ifp, "change_mac failed: %d\n", rc); 2141 return (rc); 2142 } else { 2143 pi->xact_addr_filt = rc; 2144 rc = 0; 2145 } 2146 } 2147 2148 if (flags & XGMAC_MCADDRS) { 2149 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; 2150 int del = 1; 2151 uint64_t hash = 0; 2152 struct ifmultiaddr *ifma; 2153 int i = 0, j; 2154 2155 if_maddr_rlock(ifp); 2156 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2157 if (ifma->ifma_addr->sa_family != AF_LINK) 2158 continue; 2159 mcaddr[i++] = 2160 LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 2161 2162 if (i == FW_MAC_EXACT_CHUNK) { 2163 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid, 2164 del, i, mcaddr, NULL, &hash, 0); 2165 if (rc < 0) { 2166 rc = -rc; 2167 for (j = 0; j < i; j++) { 2168 if_printf(ifp, 2169 "failed to add mc address" 2170 " %02x:%02x:%02x:" 2171 "%02x:%02x:%02x rc=%d\n", 2172 mcaddr[j][0], mcaddr[j][1], 2173 mcaddr[j][2], mcaddr[j][3], 2174 mcaddr[j][4], mcaddr[j][5], 2175 rc); 2176 } 2177 goto mcfail; 2178 } 2179 del = 0; 2180 i = 0; 2181 } 2182 } 2183 if (i > 0) { 2184 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid, 2185 del, i, mcaddr, NULL, &hash, 0); 2186 if (rc < 0) { 2187 rc = -rc; 2188 for (j = 0; j < i; j++) { 2189 if_printf(ifp, 2190 "failed to add mc address" 2191 " %02x:%02x:%02x:" 2192 "%02x:%02x:%02x rc=%d\n", 2193 mcaddr[j][0], mcaddr[j][1], 2194 mcaddr[j][2], mcaddr[j][3], 2195 mcaddr[j][4], mcaddr[j][5], 2196 rc); 2197 } 2198 goto mcfail; 2199 } 2200 } 2201 2202 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0); 2203 if (rc != 0) 2204 if_printf(ifp, "failed to set mc address hash: %d", rc); 2205 mcfail: 2206 if_maddr_runlock(ifp); 2207 } 2208 2209 return (rc); 2210 } 2211 2212 static int 2213 cxgbe_init_locked(struct port_info *pi) 2214 { 2215 struct adapter *sc = pi->adapter; 2216 int rc = 0; 2217 2218 ADAPTER_LOCK_ASSERT_OWNED(sc); 2219 2220 while (!IS_DOOMED(pi) && IS_BUSY(sc)) { 2221 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4init", 0)) { 2222 rc = EINTR; 2223 goto done; 2224 } 2225 } 2226 if (IS_DOOMED(pi)) { 2227 rc = ENXIO; 2228 goto done; 2229 } 2230 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 2231 2232 /* Give up the adapter lock, port init code can sleep. */ 2233 SET_BUSY(sc); 2234 ADAPTER_UNLOCK(sc); 2235 2236 rc = cxgbe_init_synchronized(pi); 2237 2238 done: 2239 ADAPTER_LOCK(sc); 2240 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 2241 CLR_BUSY(sc); 2242 wakeup_one(&sc->flags); 2243 ADAPTER_UNLOCK(sc); 2244 return (rc); 2245 } 2246 2247 static int 2248 cxgbe_init_synchronized(struct port_info *pi) 2249 { 2250 struct adapter *sc = pi->adapter; 2251 struct ifnet *ifp = pi->ifp; 2252 int rc = 0; 2253 2254 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 2255 2256 if (isset(&sc->open_device_map, pi->port_id)) { 2257 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, 2258 ("mismatch between open_device_map and if_drv_flags")); 2259 return (0); /* already running */ 2260 } 2261 2262 if (!(sc->flags & FULL_INIT_DONE) && 2263 ((rc = adapter_full_init(sc)) != 0)) 2264 return (rc); /* error message displayed already */ 2265 2266 if (!(pi->flags & PORT_INIT_DONE) && 2267 ((rc = port_full_init(pi)) != 0)) 2268 return (rc); /* error message displayed already */ 2269 2270 PORT_LOCK(pi); 2271 rc = update_mac_settings(pi, XGMAC_ALL); 2272 PORT_UNLOCK(pi); 2273 if (rc) 2274 goto done; /* error message displayed already */ 2275 2276 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg); 2277 if (rc != 0) { 2278 if_printf(ifp, "start_link failed: %d\n", rc); 2279 goto done; 2280 } 2281 2282 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true); 2283 if (rc != 0) { 2284 if_printf(ifp, "enable_vi failed: %d\n", rc); 2285 goto done; 2286 } 2287 2288 /* all ok */ 2289 setbit(&sc->open_device_map, pi->port_id); 2290 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2291 2292 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 2293 done: 2294 if (rc != 0) 2295 cxgbe_uninit_synchronized(pi); 2296 2297 return (rc); 2298 } 2299 2300 static int 2301 cxgbe_uninit_locked(struct port_info *pi) 2302 { 2303 struct adapter *sc = pi->adapter; 2304 int rc; 2305 2306 ADAPTER_LOCK_ASSERT_OWNED(sc); 2307 2308 while (!IS_DOOMED(pi) && IS_BUSY(sc)) { 2309 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4uninit", 0)) { 2310 rc = EINTR; 2311 goto done; 2312 } 2313 } 2314 if (IS_DOOMED(pi)) { 2315 rc = ENXIO; 2316 goto done; 2317 } 2318 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 2319 SET_BUSY(sc); 2320 ADAPTER_UNLOCK(sc); 2321 2322 rc = cxgbe_uninit_synchronized(pi); 2323 2324 ADAPTER_LOCK(sc); 2325 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 2326 CLR_BUSY(sc); 2327 wakeup_one(&sc->flags); 2328 done: 2329 ADAPTER_UNLOCK(sc); 2330 return (rc); 2331 } 2332 2333 /* 2334 * Idempotent. 2335 */ 2336 static int 2337 cxgbe_uninit_synchronized(struct port_info *pi) 2338 { 2339 struct adapter *sc = pi->adapter; 2340 struct ifnet *ifp = pi->ifp; 2341 int rc; 2342 2343 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 2344 2345 /* 2346 * Disable the VI so that all its data in either direction is discarded 2347 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz 2348 * tick) intact as the TP can deliver negative advice or data that it's 2349 * holding in its RAM (for an offloaded connection) even after the VI is 2350 * disabled. 2351 */ 2352 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false); 2353 if (rc) { 2354 if_printf(ifp, "disable_vi failed: %d\n", rc); 2355 return (rc); 2356 } 2357 2358 clrbit(&sc->open_device_map, pi->port_id); 2359 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2360 2361 pi->link_cfg.link_ok = 0; 2362 pi->link_cfg.speed = 0; 2363 t4_os_link_changed(sc, pi->port_id, 0); 2364 2365 return (0); 2366 } 2367 2368 #define T4_ALLOC_IRQ(sc, irq, rid, handler, arg, name) do { \ 2369 rc = t4_alloc_irq(sc, irq, rid, handler, arg, name); \ 2370 if (rc != 0) \ 2371 goto done; \ 2372 } while (0) 2373 2374 static int 2375 adapter_full_init(struct adapter *sc) 2376 { 2377 int rc, i, rid, p, q; 2378 char s[8]; 2379 struct irq *irq; 2380 struct port_info *pi; 2381 struct sge_rxq *rxq; 2382 #ifdef TCP_OFFLOAD 2383 struct sge_ofld_rxq *ofld_rxq; 2384 #endif 2385 2386 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 2387 KASSERT((sc->flags & FULL_INIT_DONE) == 0, 2388 ("%s: FULL_INIT_DONE already", __func__)); 2389 2390 /* 2391 * queues that belong to the adapter (not any particular port). 2392 */ 2393 rc = t4_setup_adapter_queues(sc); 2394 if (rc != 0) 2395 goto done; 2396 2397 for (i = 0; i < ARRAY_SIZE(sc->tq); i++) { 2398 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, 2399 taskqueue_thread_enqueue, &sc->tq[i]); 2400 if (sc->tq[i] == NULL) { 2401 device_printf(sc->dev, 2402 "failed to allocate task queue %d\n", i); 2403 rc = ENOMEM; 2404 goto done; 2405 } 2406 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", 2407 device_get_nameunit(sc->dev), i); 2408 } 2409 2410 /* 2411 * Setup interrupts. 2412 */ 2413 irq = &sc->irq[0]; 2414 rid = sc->intr_type == INTR_INTX ? 0 : 1; 2415 if (sc->intr_count == 1) { 2416 KASSERT(!(sc->flags & INTR_DIRECT), 2417 ("%s: single interrupt && INTR_DIRECT?", __func__)); 2418 2419 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_all, sc, "all"); 2420 } else { 2421 /* Multiple interrupts. */ 2422 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, 2423 ("%s: too few intr.", __func__)); 2424 2425 /* The first one is always error intr */ 2426 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_err, sc, "err"); 2427 irq++; 2428 rid++; 2429 2430 /* The second one is always the firmware event queue */ 2431 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt"); 2432 irq++; 2433 rid++; 2434 2435 /* 2436 * Note that if INTR_DIRECT is not set then either the NIC rx 2437 * queues or (exclusive or) the TOE rx queueus will be taking 2438 * direct interrupts. 2439 * 2440 * There is no need to check for is_offload(sc) as nofldrxq 2441 * will be 0 if offload is disabled. 2442 */ 2443 for_each_port(sc, p) { 2444 pi = sc->port[p]; 2445 2446 #ifdef TCP_OFFLOAD 2447 /* 2448 * Skip over the NIC queues if they aren't taking direct 2449 * interrupts. 2450 */ 2451 if (!(sc->flags & INTR_DIRECT) && 2452 pi->nofldrxq > pi->nrxq) 2453 goto ofld_queues; 2454 #endif 2455 rxq = &sc->sge.rxq[pi->first_rxq]; 2456 for (q = 0; q < pi->nrxq; q++, rxq++) { 2457 snprintf(s, sizeof(s), "%d.%d", p, q); 2458 T4_ALLOC_IRQ(sc, irq, rid, t4_intr, rxq, s); 2459 irq++; 2460 rid++; 2461 } 2462 2463 #ifdef TCP_OFFLOAD 2464 /* 2465 * Skip over the offload queues if they aren't taking 2466 * direct interrupts. 2467 */ 2468 if (!(sc->flags & INTR_DIRECT)) 2469 continue; 2470 ofld_queues: 2471 ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq]; 2472 for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) { 2473 snprintf(s, sizeof(s), "%d,%d", p, q); 2474 T4_ALLOC_IRQ(sc, irq, rid, t4_intr, ofld_rxq, s); 2475 irq++; 2476 rid++; 2477 } 2478 #endif 2479 } 2480 } 2481 2482 t4_intr_enable(sc); 2483 sc->flags |= FULL_INIT_DONE; 2484 done: 2485 if (rc != 0) 2486 adapter_full_uninit(sc); 2487 2488 return (rc); 2489 } 2490 #undef T4_ALLOC_IRQ 2491 2492 static int 2493 adapter_full_uninit(struct adapter *sc) 2494 { 2495 int i; 2496 2497 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 2498 2499 t4_teardown_adapter_queues(sc); 2500 2501 for (i = 0; i < sc->intr_count; i++) 2502 t4_free_irq(sc, &sc->irq[i]); 2503 2504 for (i = 0; i < ARRAY_SIZE(sc->tq) && sc->tq[i]; i++) { 2505 taskqueue_free(sc->tq[i]); 2506 sc->tq[i] = NULL; 2507 } 2508 2509 sc->flags &= ~FULL_INIT_DONE; 2510 2511 return (0); 2512 } 2513 2514 static int 2515 port_full_init(struct port_info *pi) 2516 { 2517 struct adapter *sc = pi->adapter; 2518 struct ifnet *ifp = pi->ifp; 2519 uint16_t *rss; 2520 struct sge_rxq *rxq; 2521 int rc, i; 2522 2523 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 2524 KASSERT((pi->flags & PORT_INIT_DONE) == 0, 2525 ("%s: PORT_INIT_DONE already", __func__)); 2526 2527 sysctl_ctx_init(&pi->ctx); 2528 pi->flags |= PORT_SYSCTL_CTX; 2529 2530 /* 2531 * Allocate tx/rx/fl queues for this port. 2532 */ 2533 rc = t4_setup_port_queues(pi); 2534 if (rc != 0) 2535 goto done; /* error message displayed already */ 2536 2537 /* 2538 * Setup RSS for this port. 2539 */ 2540 rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE, 2541 M_ZERO | M_WAITOK); 2542 for_each_rxq(pi, i, rxq) { 2543 rss[i] = rxq->iq.abs_id; 2544 } 2545 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, 2546 pi->rss_size, rss, pi->nrxq); 2547 free(rss, M_CXGBE); 2548 if (rc != 0) { 2549 if_printf(ifp, "rss_config failed: %d\n", rc); 2550 goto done; 2551 } 2552 2553 pi->flags |= PORT_INIT_DONE; 2554 done: 2555 if (rc != 0) 2556 port_full_uninit(pi); 2557 2558 return (rc); 2559 } 2560 2561 /* 2562 * Idempotent. 2563 */ 2564 static int 2565 port_full_uninit(struct port_info *pi) 2566 { 2567 struct adapter *sc = pi->adapter; 2568 int i; 2569 struct sge_rxq *rxq; 2570 struct sge_txq *txq; 2571 #ifdef TCP_OFFLOAD 2572 struct sge_ofld_rxq *ofld_rxq; 2573 struct sge_wrq *ofld_txq; 2574 #endif 2575 2576 if (pi->flags & PORT_INIT_DONE) { 2577 2578 /* Need to quiesce queues. XXX: ctrl queues? */ 2579 2580 for_each_txq(pi, i, txq) { 2581 quiesce_eq(sc, &txq->eq); 2582 } 2583 2584 #ifdef TCP_OFFLOAD 2585 for_each_ofld_txq(pi, i, ofld_txq) { 2586 quiesce_eq(sc, &ofld_txq->eq); 2587 } 2588 #endif 2589 2590 for_each_rxq(pi, i, rxq) { 2591 quiesce_iq(sc, &rxq->iq); 2592 quiesce_fl(sc, &rxq->fl); 2593 } 2594 2595 #ifdef TCP_OFFLOAD 2596 for_each_ofld_rxq(pi, i, ofld_rxq) { 2597 quiesce_iq(sc, &ofld_rxq->iq); 2598 quiesce_fl(sc, &ofld_rxq->fl); 2599 } 2600 #endif 2601 } 2602 2603 t4_teardown_port_queues(pi); 2604 pi->flags &= ~PORT_INIT_DONE; 2605 2606 return (0); 2607 } 2608 2609 static void 2610 quiesce_eq(struct adapter *sc, struct sge_eq *eq) 2611 { 2612 EQ_LOCK(eq); 2613 eq->flags |= EQ_DOOMED; 2614 2615 /* 2616 * Wait for the response to a credit flush if one's 2617 * pending. 2618 */ 2619 while (eq->flags & EQ_CRFLUSHED) 2620 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0); 2621 EQ_UNLOCK(eq); 2622 2623 callout_drain(&eq->tx_callout); /* XXX: iffy */ 2624 pause("callout", 10); /* Still iffy */ 2625 2626 taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task); 2627 } 2628 2629 static void 2630 quiesce_iq(struct adapter *sc, struct sge_iq *iq) 2631 { 2632 (void) sc; /* unused */ 2633 2634 /* Synchronize with the interrupt handler */ 2635 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) 2636 pause("iqfree", 1); 2637 } 2638 2639 static void 2640 quiesce_fl(struct adapter *sc, struct sge_fl *fl) 2641 { 2642 mtx_lock(&sc->sfl_lock); 2643 FL_LOCK(fl); 2644 fl->flags |= FL_DOOMED; 2645 FL_UNLOCK(fl); 2646 mtx_unlock(&sc->sfl_lock); 2647 2648 callout_drain(&sc->sfl_callout); 2649 KASSERT((fl->flags & FL_STARVING) == 0, 2650 ("%s: still starving", __func__)); 2651 } 2652 2653 static int 2654 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 2655 driver_intr_t *handler, void *arg, char *name) 2656 { 2657 int rc; 2658 2659 irq->rid = rid; 2660 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 2661 RF_SHAREABLE | RF_ACTIVE); 2662 if (irq->res == NULL) { 2663 device_printf(sc->dev, 2664 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 2665 return (ENOMEM); 2666 } 2667 2668 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 2669 NULL, handler, arg, &irq->tag); 2670 if (rc != 0) { 2671 device_printf(sc->dev, 2672 "failed to setup interrupt for rid %d, name %s: %d\n", 2673 rid, name, rc); 2674 } else if (name) 2675 bus_describe_intr(sc->dev, irq->res, irq->tag, name); 2676 2677 return (rc); 2678 } 2679 2680 static int 2681 t4_free_irq(struct adapter *sc, struct irq *irq) 2682 { 2683 if (irq->tag) 2684 bus_teardown_intr(sc->dev, irq->res, irq->tag); 2685 if (irq->res) 2686 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 2687 2688 bzero(irq, sizeof(*irq)); 2689 2690 return (0); 2691 } 2692 2693 static void 2694 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start, 2695 unsigned int end) 2696 { 2697 uint32_t *p = (uint32_t *)(buf + start); 2698 2699 for ( ; start <= end; start += sizeof(uint32_t)) 2700 *p++ = t4_read_reg(sc, start); 2701 } 2702 2703 static void 2704 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 2705 { 2706 int i; 2707 static const unsigned int reg_ranges[] = { 2708 0x1008, 0x1108, 2709 0x1180, 0x11b4, 2710 0x11fc, 0x123c, 2711 0x1300, 0x173c, 2712 0x1800, 0x18fc, 2713 0x3000, 0x30d8, 2714 0x30e0, 0x5924, 2715 0x5960, 0x59d4, 2716 0x5a00, 0x5af8, 2717 0x6000, 0x6098, 2718 0x6100, 0x6150, 2719 0x6200, 0x6208, 2720 0x6240, 0x6248, 2721 0x6280, 0x6338, 2722 0x6370, 0x638c, 2723 0x6400, 0x643c, 2724 0x6500, 0x6524, 2725 0x6a00, 0x6a38, 2726 0x6a60, 0x6a78, 2727 0x6b00, 0x6b84, 2728 0x6bf0, 0x6c84, 2729 0x6cf0, 0x6d84, 2730 0x6df0, 0x6e84, 2731 0x6ef0, 0x6f84, 2732 0x6ff0, 0x7084, 2733 0x70f0, 0x7184, 2734 0x71f0, 0x7284, 2735 0x72f0, 0x7384, 2736 0x73f0, 0x7450, 2737 0x7500, 0x7530, 2738 0x7600, 0x761c, 2739 0x7680, 0x76cc, 2740 0x7700, 0x7798, 2741 0x77c0, 0x77fc, 2742 0x7900, 0x79fc, 2743 0x7b00, 0x7c38, 2744 0x7d00, 0x7efc, 2745 0x8dc0, 0x8e1c, 2746 0x8e30, 0x8e78, 2747 0x8ea0, 0x8f6c, 2748 0x8fc0, 0x9074, 2749 0x90fc, 0x90fc, 2750 0x9400, 0x9458, 2751 0x9600, 0x96bc, 2752 0x9800, 0x9808, 2753 0x9820, 0x983c, 2754 0x9850, 0x9864, 2755 0x9c00, 0x9c6c, 2756 0x9c80, 0x9cec, 2757 0x9d00, 0x9d6c, 2758 0x9d80, 0x9dec, 2759 0x9e00, 0x9e6c, 2760 0x9e80, 0x9eec, 2761 0x9f00, 0x9f6c, 2762 0x9f80, 0x9fec, 2763 0xd004, 0xd03c, 2764 0xdfc0, 0xdfe0, 2765 0xe000, 0xea7c, 2766 0xf000, 0x11190, 2767 0x19040, 0x1906c, 2768 0x19078, 0x19080, 2769 0x1908c, 0x19124, 2770 0x19150, 0x191b0, 2771 0x191d0, 0x191e8, 2772 0x19238, 0x1924c, 2773 0x193f8, 0x19474, 2774 0x19490, 0x194f8, 2775 0x19800, 0x19f30, 2776 0x1a000, 0x1a06c, 2777 0x1a0b0, 0x1a120, 2778 0x1a128, 0x1a138, 2779 0x1a190, 0x1a1c4, 2780 0x1a1fc, 0x1a1fc, 2781 0x1e040, 0x1e04c, 2782 0x1e284, 0x1e28c, 2783 0x1e2c0, 0x1e2c0, 2784 0x1e2e0, 0x1e2e0, 2785 0x1e300, 0x1e384, 2786 0x1e3c0, 0x1e3c8, 2787 0x1e440, 0x1e44c, 2788 0x1e684, 0x1e68c, 2789 0x1e6c0, 0x1e6c0, 2790 0x1e6e0, 0x1e6e0, 2791 0x1e700, 0x1e784, 2792 0x1e7c0, 0x1e7c8, 2793 0x1e840, 0x1e84c, 2794 0x1ea84, 0x1ea8c, 2795 0x1eac0, 0x1eac0, 2796 0x1eae0, 0x1eae0, 2797 0x1eb00, 0x1eb84, 2798 0x1ebc0, 0x1ebc8, 2799 0x1ec40, 0x1ec4c, 2800 0x1ee84, 0x1ee8c, 2801 0x1eec0, 0x1eec0, 2802 0x1eee0, 0x1eee0, 2803 0x1ef00, 0x1ef84, 2804 0x1efc0, 0x1efc8, 2805 0x1f040, 0x1f04c, 2806 0x1f284, 0x1f28c, 2807 0x1f2c0, 0x1f2c0, 2808 0x1f2e0, 0x1f2e0, 2809 0x1f300, 0x1f384, 2810 0x1f3c0, 0x1f3c8, 2811 0x1f440, 0x1f44c, 2812 0x1f684, 0x1f68c, 2813 0x1f6c0, 0x1f6c0, 2814 0x1f6e0, 0x1f6e0, 2815 0x1f700, 0x1f784, 2816 0x1f7c0, 0x1f7c8, 2817 0x1f840, 0x1f84c, 2818 0x1fa84, 0x1fa8c, 2819 0x1fac0, 0x1fac0, 2820 0x1fae0, 0x1fae0, 2821 0x1fb00, 0x1fb84, 2822 0x1fbc0, 0x1fbc8, 2823 0x1fc40, 0x1fc4c, 2824 0x1fe84, 0x1fe8c, 2825 0x1fec0, 0x1fec0, 2826 0x1fee0, 0x1fee0, 2827 0x1ff00, 0x1ff84, 2828 0x1ffc0, 0x1ffc8, 2829 0x20000, 0x2002c, 2830 0x20100, 0x2013c, 2831 0x20190, 0x201c8, 2832 0x20200, 0x20318, 2833 0x20400, 0x20528, 2834 0x20540, 0x20614, 2835 0x21000, 0x21040, 2836 0x2104c, 0x21060, 2837 0x210c0, 0x210ec, 2838 0x21200, 0x21268, 2839 0x21270, 0x21284, 2840 0x212fc, 0x21388, 2841 0x21400, 0x21404, 2842 0x21500, 0x21518, 2843 0x2152c, 0x2153c, 2844 0x21550, 0x21554, 2845 0x21600, 0x21600, 2846 0x21608, 0x21628, 2847 0x21630, 0x2163c, 2848 0x21700, 0x2171c, 2849 0x21780, 0x2178c, 2850 0x21800, 0x21c38, 2851 0x21c80, 0x21d7c, 2852 0x21e00, 0x21e04, 2853 0x22000, 0x2202c, 2854 0x22100, 0x2213c, 2855 0x22190, 0x221c8, 2856 0x22200, 0x22318, 2857 0x22400, 0x22528, 2858 0x22540, 0x22614, 2859 0x23000, 0x23040, 2860 0x2304c, 0x23060, 2861 0x230c0, 0x230ec, 2862 0x23200, 0x23268, 2863 0x23270, 0x23284, 2864 0x232fc, 0x23388, 2865 0x23400, 0x23404, 2866 0x23500, 0x23518, 2867 0x2352c, 0x2353c, 2868 0x23550, 0x23554, 2869 0x23600, 0x23600, 2870 0x23608, 0x23628, 2871 0x23630, 0x2363c, 2872 0x23700, 0x2371c, 2873 0x23780, 0x2378c, 2874 0x23800, 0x23c38, 2875 0x23c80, 0x23d7c, 2876 0x23e00, 0x23e04, 2877 0x24000, 0x2402c, 2878 0x24100, 0x2413c, 2879 0x24190, 0x241c8, 2880 0x24200, 0x24318, 2881 0x24400, 0x24528, 2882 0x24540, 0x24614, 2883 0x25000, 0x25040, 2884 0x2504c, 0x25060, 2885 0x250c0, 0x250ec, 2886 0x25200, 0x25268, 2887 0x25270, 0x25284, 2888 0x252fc, 0x25388, 2889 0x25400, 0x25404, 2890 0x25500, 0x25518, 2891 0x2552c, 0x2553c, 2892 0x25550, 0x25554, 2893 0x25600, 0x25600, 2894 0x25608, 0x25628, 2895 0x25630, 0x2563c, 2896 0x25700, 0x2571c, 2897 0x25780, 0x2578c, 2898 0x25800, 0x25c38, 2899 0x25c80, 0x25d7c, 2900 0x25e00, 0x25e04, 2901 0x26000, 0x2602c, 2902 0x26100, 0x2613c, 2903 0x26190, 0x261c8, 2904 0x26200, 0x26318, 2905 0x26400, 0x26528, 2906 0x26540, 0x26614, 2907 0x27000, 0x27040, 2908 0x2704c, 0x27060, 2909 0x270c0, 0x270ec, 2910 0x27200, 0x27268, 2911 0x27270, 0x27284, 2912 0x272fc, 0x27388, 2913 0x27400, 0x27404, 2914 0x27500, 0x27518, 2915 0x2752c, 0x2753c, 2916 0x27550, 0x27554, 2917 0x27600, 0x27600, 2918 0x27608, 0x27628, 2919 0x27630, 0x2763c, 2920 0x27700, 0x2771c, 2921 0x27780, 0x2778c, 2922 0x27800, 0x27c38, 2923 0x27c80, 0x27d7c, 2924 0x27e00, 0x27e04 2925 }; 2926 2927 regs->version = 4 | (sc->params.rev << 10); 2928 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2) 2929 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]); 2930 } 2931 2932 static void 2933 cxgbe_tick(void *arg) 2934 { 2935 struct port_info *pi = arg; 2936 struct ifnet *ifp = pi->ifp; 2937 struct sge_txq *txq; 2938 int i, drops; 2939 struct port_stats *s = &pi->stats; 2940 2941 PORT_LOCK(pi); 2942 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2943 PORT_UNLOCK(pi); 2944 return; /* without scheduling another callout */ 2945 } 2946 2947 t4_get_port_stats(pi->adapter, pi->tx_chan, s); 2948 2949 ifp->if_opackets = s->tx_frames - s->tx_pause; 2950 ifp->if_ipackets = s->rx_frames - s->rx_pause; 2951 ifp->if_obytes = s->tx_octets - s->tx_pause * 64; 2952 ifp->if_ibytes = s->rx_octets - s->rx_pause * 64; 2953 ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause; 2954 ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause; 2955 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 2956 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + 2957 s->rx_trunc3; 2958 2959 drops = s->tx_drop; 2960 for_each_txq(pi, i, txq) 2961 drops += txq->br->br_drops; 2962 ifp->if_snd.ifq_drops = drops; 2963 2964 ifp->if_oerrors = s->tx_error_frames; 2965 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long + 2966 s->rx_fcs_err + s->rx_len_err; 2967 2968 callout_schedule(&pi->tick, hz); 2969 PORT_UNLOCK(pi); 2970 } 2971 2972 static void 2973 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid) 2974 { 2975 struct ifnet *vlan; 2976 2977 if (arg != ifp) 2978 return; 2979 2980 vlan = VLAN_DEVAT(ifp, vid); 2981 VLAN_SETCOOKIE(vlan, ifp); 2982 } 2983 2984 static int 2985 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 2986 { 2987 2988 #ifdef INVARIANTS 2989 panic("%s: opcode 0x%02x on iq %p with payload %p", 2990 __func__, rss->opcode, iq, m); 2991 #else 2992 log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n", 2993 __func__, rss->opcode, iq, m); 2994 m_freem(m); 2995 #endif 2996 return (EDOOFUS); 2997 } 2998 2999 int 3000 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h) 3001 { 3002 uintptr_t *loc, new; 3003 3004 if (opcode >= ARRAY_SIZE(sc->cpl_handler)) 3005 return (EINVAL); 3006 3007 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled; 3008 loc = (uintptr_t *) &sc->cpl_handler[opcode]; 3009 atomic_store_rel_ptr(loc, new); 3010 3011 return (0); 3012 } 3013 3014 static int 3015 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl) 3016 { 3017 3018 #ifdef INVARIANTS 3019 panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl); 3020 #else 3021 log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n", 3022 __func__, iq, ctrl); 3023 #endif 3024 return (EDOOFUS); 3025 } 3026 3027 int 3028 t4_register_an_handler(struct adapter *sc, an_handler_t h) 3029 { 3030 uintptr_t *loc, new; 3031 3032 new = h ? (uintptr_t)h : (uintptr_t)an_not_handled; 3033 loc = (uintptr_t *) &sc->an_handler; 3034 atomic_store_rel_ptr(loc, new); 3035 3036 return (0); 3037 } 3038 3039 static int 3040 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl) 3041 { 3042 __be64 *r = __DECONST(__be64 *, rpl); 3043 struct cpl_fw6_msg *cpl = member2struct(cpl_fw6_msg, data, r); 3044 3045 #ifdef INVARIANTS 3046 panic("%s: fw_msg type %d", __func__, cpl->type); 3047 #else 3048 log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type); 3049 #endif 3050 return (EDOOFUS); 3051 } 3052 3053 int 3054 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h) 3055 { 3056 uintptr_t *loc, new; 3057 3058 if (type >= ARRAY_SIZE(sc->fw_msg_handler)) 3059 return (EINVAL); 3060 3061 new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled; 3062 loc = (uintptr_t *) &sc->fw_msg_handler[type]; 3063 atomic_store_rel_ptr(loc, new); 3064 3065 return (0); 3066 } 3067 3068 static int 3069 t4_sysctls(struct adapter *sc) 3070 { 3071 struct sysctl_ctx_list *ctx; 3072 struct sysctl_oid *oid; 3073 struct sysctl_oid_list *children, *c0; 3074 static char *caps[] = { 3075 "\20\1PPP\2QFC\3DCBX", /* caps[0] linkcaps */ 3076 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL", /* caps[1] niccaps */ 3077 "\20\1TOE", /* caps[2] toecaps */ 3078 "\20\1RDDP\2RDMAC", /* caps[3] rdmacaps */ 3079 "\20\1INITIATOR_PDU\2TARGET_PDU" /* caps[4] iscsicaps */ 3080 "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD" 3081 "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD", 3082 "\20\1INITIATOR\2TARGET\3CTRL_OFLD" /* caps[5] fcoecaps */ 3083 }; 3084 3085 ctx = device_get_sysctl_ctx(sc->dev); 3086 3087 /* 3088 * dev.t4nex.X. 3089 */ 3090 oid = device_get_sysctl_tree(sc->dev); 3091 c0 = children = SYSCTL_CHILDREN(oid); 3092 3093 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, 3094 &sc->params.nports, 0, "# of ports"); 3095 3096 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 3097 &sc->params.rev, 0, "chip hardware revision"); 3098 3099 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 3100 CTLFLAG_RD, &sc->fw_version, 0, "firmware version"); 3101 3102 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", 3103 CTLFLAG_RD, &t4_cfg_file, 0, "configuration file"); 3104 3105 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, 3106 &sc->cfcsum, 0, "config file checksum"); 3107 3108 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps", 3109 CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps, 3110 sysctl_bitfield, "A", "available link capabilities"); 3111 3112 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps", 3113 CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps, 3114 sysctl_bitfield, "A", "available NIC capabilities"); 3115 3116 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps", 3117 CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps, 3118 sysctl_bitfield, "A", "available TCP offload capabilities"); 3119 3120 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps", 3121 CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps, 3122 sysctl_bitfield, "A", "available RDMA capabilities"); 3123 3124 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps", 3125 CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps, 3126 sysctl_bitfield, "A", "available iSCSI capabilities"); 3127 3128 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps", 3129 CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps, 3130 sysctl_bitfield, "A", "available FCoE capabilities"); 3131 3132 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, 3133 &sc->params.vpd.cclk, 0, "core clock frequency (in KHz)"); 3134 3135 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 3136 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val, 3137 sizeof(sc->sge.timer_val), sysctl_int_array, "A", 3138 "interrupt holdoff timer values (us)"); 3139 3140 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 3141 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val, 3142 sizeof(sc->sge.counter_val), sysctl_int_array, "A", 3143 "interrupt holdoff packet counter values"); 3144 3145 #ifdef SBUF_DRAIN 3146 /* 3147 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. 3148 */ 3149 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", 3150 CTLFLAG_RD | CTLFLAG_SKIP, NULL, 3151 "logs and miscellaneous information"); 3152 children = SYSCTL_CHILDREN(oid); 3153 3154 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", 3155 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 3156 sysctl_cctrl, "A", "congestion control"); 3157 3158 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", 3159 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 3160 sysctl_cpl_stats, "A", "CPL statistics"); 3161 3162 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", 3163 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 3164 sysctl_ddp_stats, "A", "DDP statistics"); 3165 3166 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", 3167 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 3168 sysctl_devlog, "A", "firmware's device log"); 3169 3170 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", 3171 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 3172 sysctl_fcoe_stats, "A", "FCoE statistics"); 3173 3174 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", 3175 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 3176 sysctl_hw_sched, "A", "hardware scheduler "); 3177 3178 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", 3179 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 3180 sysctl_l2t, "A", "hardware L2 table"); 3181 3182 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", 3183 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 3184 sysctl_lb_stats, "A", "loopback statistics"); 3185 3186 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", 3187 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 3188 sysctl_meminfo, "A", "memory regions"); 3189 3190 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", 3191 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 3192 sysctl_path_mtus, "A", "path MTUs"); 3193 3194 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", 3195 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 3196 sysctl_pm_stats, "A", "PM statistics"); 3197 3198 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", 3199 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 3200 sysctl_rdma_stats, "A", "RDMA statistics"); 3201 3202 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", 3203 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 3204 sysctl_tcp_stats, "A", "TCP statistics"); 3205 3206 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", 3207 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 3208 sysctl_tids, "A", "TID information"); 3209 3210 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", 3211 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 3212 sysctl_tp_err_stats, "A", "TP error statistics"); 3213 3214 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", 3215 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 3216 sysctl_tx_rate, "A", "Tx rate"); 3217 #endif 3218 3219 #ifdef TCP_OFFLOAD 3220 if (is_offload(sc)) { 3221 /* 3222 * dev.t4nex.X.toe. 3223 */ 3224 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD, 3225 NULL, "TOE parameters"); 3226 children = SYSCTL_CHILDREN(oid); 3227 3228 sc->tt.sndbuf = 256 * 1024; 3229 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, 3230 &sc->tt.sndbuf, 0, "max hardware send buffer size"); 3231 3232 sc->tt.ddp = 0; 3233 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW, 3234 &sc->tt.ddp, 0, "DDP allowed"); 3235 3236 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5)); 3237 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW, 3238 &sc->tt.indsz, 0, "DDP max indicate size allowed"); 3239 3240 sc->tt.ddp_thres = 3241 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)); 3242 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW, 3243 &sc->tt.ddp_thres, 0, "DDP threshold"); 3244 } 3245 #endif 3246 3247 3248 return (0); 3249 } 3250 3251 static int 3252 cxgbe_sysctls(struct port_info *pi) 3253 { 3254 struct sysctl_ctx_list *ctx; 3255 struct sysctl_oid *oid; 3256 struct sysctl_oid_list *children; 3257 3258 ctx = device_get_sysctl_ctx(pi->dev); 3259 3260 /* 3261 * dev.cxgbe.X. 3262 */ 3263 oid = device_get_sysctl_tree(pi->dev); 3264 children = SYSCTL_CHILDREN(oid); 3265 3266 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 3267 &pi->nrxq, 0, "# of rx queues"); 3268 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 3269 &pi->ntxq, 0, "# of tx queues"); 3270 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 3271 &pi->first_rxq, 0, "index of first rx queue"); 3272 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 3273 &pi->first_txq, 0, "index of first tx queue"); 3274 3275 #ifdef TCP_OFFLOAD 3276 if (is_offload(pi->adapter)) { 3277 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, 3278 &pi->nofldrxq, 0, 3279 "# of rx queues for offloaded TCP connections"); 3280 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, 3281 &pi->nofldtxq, 0, 3282 "# of tx queues for offloaded TCP connections"); 3283 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", 3284 CTLFLAG_RD, &pi->first_ofld_rxq, 0, 3285 "index of first TOE rx queue"); 3286 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", 3287 CTLFLAG_RD, &pi->first_ofld_txq, 0, 3288 "index of first TOE tx queue"); 3289 } 3290 #endif 3291 3292 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 3293 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I", 3294 "holdoff timer index"); 3295 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 3296 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I", 3297 "holdoff packet counter index"); 3298 3299 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 3300 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I", 3301 "rx queue size"); 3302 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 3303 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I", 3304 "tx queue size"); 3305 3306 /* 3307 * dev.cxgbe.X.stats. 3308 */ 3309 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 3310 NULL, "port statistics"); 3311 children = SYSCTL_CHILDREN(oid); 3312 3313 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ 3314 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ 3315 CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \ 3316 sysctl_handle_t4_reg64, "QU", desc) 3317 3318 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", 3319 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); 3320 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", 3321 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); 3322 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", 3323 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); 3324 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", 3325 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); 3326 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", 3327 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); 3328 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", 3329 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); 3330 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", 3331 "# of tx frames in this range", 3332 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); 3333 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", 3334 "# of tx frames in this range", 3335 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); 3336 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", 3337 "# of tx frames in this range", 3338 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); 3339 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", 3340 "# of tx frames in this range", 3341 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); 3342 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", 3343 "# of tx frames in this range", 3344 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); 3345 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", 3346 "# of tx frames in this range", 3347 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); 3348 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", 3349 "# of tx frames in this range", 3350 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); 3351 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", 3352 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); 3353 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", 3354 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); 3355 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", 3356 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); 3357 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", 3358 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); 3359 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", 3360 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); 3361 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", 3362 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); 3363 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", 3364 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); 3365 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", 3366 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); 3367 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", 3368 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); 3369 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", 3370 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); 3371 3372 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", 3373 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); 3374 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", 3375 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); 3376 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", 3377 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); 3378 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", 3379 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); 3380 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", 3381 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); 3382 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", 3383 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); 3384 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", 3385 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); 3386 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", 3387 "# of frames received with bad FCS", 3388 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); 3389 SYSCTL_ADD_T4_REG64(pi, "rx_len_err", 3390 "# of frames received with length error", 3391 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); 3392 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", 3393 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); 3394 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", 3395 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); 3396 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", 3397 "# of rx frames in this range", 3398 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); 3399 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", 3400 "# of rx frames in this range", 3401 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); 3402 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", 3403 "# of rx frames in this range", 3404 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); 3405 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", 3406 "# of rx frames in this range", 3407 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); 3408 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", 3409 "# of rx frames in this range", 3410 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); 3411 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", 3412 "# of rx frames in this range", 3413 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); 3414 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", 3415 "# of rx frames in this range", 3416 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); 3417 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", 3418 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); 3419 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", 3420 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); 3421 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", 3422 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); 3423 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", 3424 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); 3425 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", 3426 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); 3427 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", 3428 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); 3429 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", 3430 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); 3431 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", 3432 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); 3433 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", 3434 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); 3435 3436 #undef SYSCTL_ADD_T4_REG64 3437 3438 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ 3439 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 3440 &pi->stats.name, desc) 3441 3442 /* We get these from port_stats and they may be stale by upto 1s */ 3443 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, 3444 "# drops due to buffer-group 0 overflows"); 3445 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, 3446 "# drops due to buffer-group 1 overflows"); 3447 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, 3448 "# drops due to buffer-group 2 overflows"); 3449 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, 3450 "# drops due to buffer-group 3 overflows"); 3451 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, 3452 "# of buffer-group 0 truncated packets"); 3453 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, 3454 "# of buffer-group 1 truncated packets"); 3455 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, 3456 "# of buffer-group 2 truncated packets"); 3457 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, 3458 "# of buffer-group 3 truncated packets"); 3459 3460 #undef SYSCTL_ADD_T4_PORTSTAT 3461 3462 return (0); 3463 } 3464 3465 static int 3466 sysctl_int_array(SYSCTL_HANDLER_ARGS) 3467 { 3468 int rc, *i; 3469 struct sbuf sb; 3470 3471 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); 3472 for (i = arg1; arg2; arg2 -= sizeof(int), i++) 3473 sbuf_printf(&sb, "%d ", *i); 3474 sbuf_trim(&sb); 3475 sbuf_finish(&sb); 3476 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 3477 sbuf_delete(&sb); 3478 return (rc); 3479 } 3480 3481 static int 3482 sysctl_bitfield(SYSCTL_HANDLER_ARGS) 3483 { 3484 int rc; 3485 struct sbuf *sb; 3486 3487 rc = sysctl_wire_old_buffer(req, 0); 3488 if (rc != 0) 3489 return(rc); 3490 3491 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3492 if (sb == NULL) 3493 return (ENOMEM); 3494 3495 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1); 3496 rc = sbuf_finish(sb); 3497 sbuf_delete(sb); 3498 3499 return (rc); 3500 } 3501 3502 static int 3503 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 3504 { 3505 struct port_info *pi = arg1; 3506 struct adapter *sc = pi->adapter; 3507 int idx, rc, i; 3508 3509 idx = pi->tmr_idx; 3510 3511 rc = sysctl_handle_int(oidp, &idx, 0, req); 3512 if (rc != 0 || req->newptr == NULL) 3513 return (rc); 3514 3515 if (idx < 0 || idx >= SGE_NTIMERS) 3516 return (EINVAL); 3517 3518 ADAPTER_LOCK(sc); 3519 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 3520 if (rc == 0) { 3521 struct sge_rxq *rxq; 3522 uint8_t v; 3523 3524 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1); 3525 for_each_rxq(pi, i, rxq) { 3526 #ifdef atomic_store_rel_8 3527 atomic_store_rel_8(&rxq->iq.intr_params, v); 3528 #else 3529 rxq->iq.intr_params = v; 3530 #endif 3531 } 3532 pi->tmr_idx = idx; 3533 } 3534 3535 ADAPTER_UNLOCK(sc); 3536 return (rc); 3537 } 3538 3539 static int 3540 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 3541 { 3542 struct port_info *pi = arg1; 3543 struct adapter *sc = pi->adapter; 3544 int idx, rc; 3545 3546 idx = pi->pktc_idx; 3547 3548 rc = sysctl_handle_int(oidp, &idx, 0, req); 3549 if (rc != 0 || req->newptr == NULL) 3550 return (rc); 3551 3552 if (idx < -1 || idx >= SGE_NCOUNTERS) 3553 return (EINVAL); 3554 3555 ADAPTER_LOCK(sc); 3556 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 3557 if (rc == 0 && pi->flags & PORT_INIT_DONE) 3558 rc = EBUSY; /* cannot be changed once the queues are created */ 3559 3560 if (rc == 0) 3561 pi->pktc_idx = idx; 3562 3563 ADAPTER_UNLOCK(sc); 3564 return (rc); 3565 } 3566 3567 static int 3568 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 3569 { 3570 struct port_info *pi = arg1; 3571 struct adapter *sc = pi->adapter; 3572 int qsize, rc; 3573 3574 qsize = pi->qsize_rxq; 3575 3576 rc = sysctl_handle_int(oidp, &qsize, 0, req); 3577 if (rc != 0 || req->newptr == NULL) 3578 return (rc); 3579 3580 if (qsize < 128 || (qsize & 7)) 3581 return (EINVAL); 3582 3583 ADAPTER_LOCK(sc); 3584 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 3585 if (rc == 0 && pi->flags & PORT_INIT_DONE) 3586 rc = EBUSY; /* cannot be changed once the queues are created */ 3587 3588 if (rc == 0) 3589 pi->qsize_rxq = qsize; 3590 3591 ADAPTER_UNLOCK(sc); 3592 return (rc); 3593 } 3594 3595 static int 3596 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 3597 { 3598 struct port_info *pi = arg1; 3599 struct adapter *sc = pi->adapter; 3600 int qsize, rc; 3601 3602 qsize = pi->qsize_txq; 3603 3604 rc = sysctl_handle_int(oidp, &qsize, 0, req); 3605 if (rc != 0 || req->newptr == NULL) 3606 return (rc); 3607 3608 if (qsize < 128) 3609 return (EINVAL); 3610 3611 ADAPTER_LOCK(sc); 3612 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 3613 if (rc == 0 && pi->flags & PORT_INIT_DONE) 3614 rc = EBUSY; /* cannot be changed once the queues are created */ 3615 3616 if (rc == 0) 3617 pi->qsize_txq = qsize; 3618 3619 ADAPTER_UNLOCK(sc); 3620 return (rc); 3621 } 3622 3623 static int 3624 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 3625 { 3626 struct adapter *sc = arg1; 3627 int reg = arg2; 3628 uint64_t val; 3629 3630 val = t4_read_reg64(sc, reg); 3631 3632 return (sysctl_handle_64(oidp, &val, 0, req)); 3633 } 3634 3635 #ifdef SBUF_DRAIN 3636 static int 3637 sysctl_cctrl(SYSCTL_HANDLER_ARGS) 3638 { 3639 struct adapter *sc = arg1; 3640 struct sbuf *sb; 3641 int rc, i; 3642 uint16_t incr[NMTUS][NCCTRL_WIN]; 3643 static const char *dec_fac[] = { 3644 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", 3645 "0.9375" 3646 }; 3647 3648 rc = sysctl_wire_old_buffer(req, 0); 3649 if (rc != 0) 3650 return (rc); 3651 3652 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 3653 if (sb == NULL) 3654 return (ENOMEM); 3655 3656 t4_read_cong_tbl(sc, incr); 3657 3658 for (i = 0; i < NCCTRL_WIN; ++i) { 3659 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, 3660 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], 3661 incr[5][i], incr[6][i], incr[7][i]); 3662 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", 3663 incr[8][i], incr[9][i], incr[10][i], incr[11][i], 3664 incr[12][i], incr[13][i], incr[14][i], incr[15][i], 3665 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); 3666 } 3667 3668 rc = sbuf_finish(sb); 3669 sbuf_delete(sb); 3670 3671 return (rc); 3672 } 3673 3674 static int 3675 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) 3676 { 3677 struct adapter *sc = arg1; 3678 struct sbuf *sb; 3679 int rc; 3680 struct tp_cpl_stats stats; 3681 3682 rc = sysctl_wire_old_buffer(req, 0); 3683 if (rc != 0) 3684 return (rc); 3685 3686 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 3687 if (sb == NULL) 3688 return (ENOMEM); 3689 3690 t4_tp_get_cpl_stats(sc, &stats); 3691 3692 sbuf_printf(sb, " channel 0 channel 1 channel 2 " 3693 "channel 3\n"); 3694 sbuf_printf(sb, "CPL requests: %10u %10u %10u %10u\n", 3695 stats.req[0], stats.req[1], stats.req[2], stats.req[3]); 3696 sbuf_printf(sb, "CPL responses: %10u %10u %10u %10u", 3697 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); 3698 3699 rc = sbuf_finish(sb); 3700 sbuf_delete(sb); 3701 3702 return (rc); 3703 } 3704 3705 static int 3706 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) 3707 { 3708 struct adapter *sc = arg1; 3709 struct sbuf *sb; 3710 int rc; 3711 struct tp_usm_stats stats; 3712 3713 rc = sysctl_wire_old_buffer(req, 0); 3714 if (rc != 0) 3715 return(rc); 3716 3717 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 3718 if (sb == NULL) 3719 return (ENOMEM); 3720 3721 t4_get_usm_stats(sc, &stats); 3722 3723 sbuf_printf(sb, "Frames: %u\n", stats.frames); 3724 sbuf_printf(sb, "Octets: %ju\n", stats.octets); 3725 sbuf_printf(sb, "Drops: %u", stats.drops); 3726 3727 rc = sbuf_finish(sb); 3728 sbuf_delete(sb); 3729 3730 return (rc); 3731 } 3732 3733 const char *devlog_level_strings[] = { 3734 [FW_DEVLOG_LEVEL_EMERG] = "EMERG", 3735 [FW_DEVLOG_LEVEL_CRIT] = "CRIT", 3736 [FW_DEVLOG_LEVEL_ERR] = "ERR", 3737 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", 3738 [FW_DEVLOG_LEVEL_INFO] = "INFO", 3739 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" 3740 }; 3741 3742 const char *devlog_facility_strings[] = { 3743 [FW_DEVLOG_FACILITY_CORE] = "CORE", 3744 [FW_DEVLOG_FACILITY_SCHED] = "SCHED", 3745 [FW_DEVLOG_FACILITY_TIMER] = "TIMER", 3746 [FW_DEVLOG_FACILITY_RES] = "RES", 3747 [FW_DEVLOG_FACILITY_HW] = "HW", 3748 [FW_DEVLOG_FACILITY_FLR] = "FLR", 3749 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", 3750 [FW_DEVLOG_FACILITY_PHY] = "PHY", 3751 [FW_DEVLOG_FACILITY_MAC] = "MAC", 3752 [FW_DEVLOG_FACILITY_PORT] = "PORT", 3753 [FW_DEVLOG_FACILITY_VI] = "VI", 3754 [FW_DEVLOG_FACILITY_FILTER] = "FILTER", 3755 [FW_DEVLOG_FACILITY_ACL] = "ACL", 3756 [FW_DEVLOG_FACILITY_TM] = "TM", 3757 [FW_DEVLOG_FACILITY_QFC] = "QFC", 3758 [FW_DEVLOG_FACILITY_DCB] = "DCB", 3759 [FW_DEVLOG_FACILITY_ETH] = "ETH", 3760 [FW_DEVLOG_FACILITY_OFLD] = "OFLD", 3761 [FW_DEVLOG_FACILITY_RI] = "RI", 3762 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", 3763 [FW_DEVLOG_FACILITY_FCOE] = "FCOE", 3764 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", 3765 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE" 3766 }; 3767 3768 static int 3769 sysctl_devlog(SYSCTL_HANDLER_ARGS) 3770 { 3771 struct adapter *sc = arg1; 3772 struct devlog_params *dparams = &sc->params.devlog; 3773 struct fw_devlog_e *buf, *e; 3774 int i, j, rc, nentries, first = 0; 3775 struct sbuf *sb; 3776 uint64_t ftstamp = UINT64_MAX; 3777 3778 if (dparams->start == 0) 3779 return (ENXIO); 3780 3781 nentries = dparams->size / sizeof(struct fw_devlog_e); 3782 3783 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT); 3784 if (buf == NULL) 3785 return (ENOMEM); 3786 3787 rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size, 3788 (void *)buf); 3789 if (rc != 0) 3790 goto done; 3791 3792 for (i = 0; i < nentries; i++) { 3793 e = &buf[i]; 3794 3795 if (e->timestamp == 0) 3796 break; /* end */ 3797 3798 e->timestamp = be64toh(e->timestamp); 3799 e->seqno = be32toh(e->seqno); 3800 for (j = 0; j < 8; j++) 3801 e->params[j] = be32toh(e->params[j]); 3802 3803 if (e->timestamp < ftstamp) { 3804 ftstamp = e->timestamp; 3805 first = i; 3806 } 3807 } 3808 3809 if (buf[first].timestamp == 0) 3810 goto done; /* nothing in the log */ 3811 3812 rc = sysctl_wire_old_buffer(req, 0); 3813 if (rc != 0) 3814 goto done; 3815 3816 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 3817 if (sb == NULL) { 3818 rc = ENOMEM; 3819 goto done; 3820 } 3821 sbuf_printf(sb, "%10s %15s %8s %8s %s\n", 3822 "Seq#", "Tstamp", "Level", "Facility", "Message"); 3823 3824 i = first; 3825 do { 3826 e = &buf[i]; 3827 if (e->timestamp == 0) 3828 break; /* end */ 3829 3830 sbuf_printf(sb, "%10d %15ju %8s %8s ", 3831 e->seqno, e->timestamp, 3832 (e->level < ARRAY_SIZE(devlog_level_strings) ? 3833 devlog_level_strings[e->level] : "UNKNOWN"), 3834 (e->facility < ARRAY_SIZE(devlog_facility_strings) ? 3835 devlog_facility_strings[e->facility] : "UNKNOWN")); 3836 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], 3837 e->params[2], e->params[3], e->params[4], 3838 e->params[5], e->params[6], e->params[7]); 3839 3840 if (++i == nentries) 3841 i = 0; 3842 } while (i != first); 3843 3844 rc = sbuf_finish(sb); 3845 sbuf_delete(sb); 3846 done: 3847 free(buf, M_CXGBE); 3848 return (rc); 3849 } 3850 3851 static int 3852 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) 3853 { 3854 struct adapter *sc = arg1; 3855 struct sbuf *sb; 3856 int rc; 3857 struct tp_fcoe_stats stats[4]; 3858 3859 rc = sysctl_wire_old_buffer(req, 0); 3860 if (rc != 0) 3861 return (rc); 3862 3863 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 3864 if (sb == NULL) 3865 return (ENOMEM); 3866 3867 t4_get_fcoe_stats(sc, 0, &stats[0]); 3868 t4_get_fcoe_stats(sc, 1, &stats[1]); 3869 t4_get_fcoe_stats(sc, 2, &stats[2]); 3870 t4_get_fcoe_stats(sc, 3, &stats[3]); 3871 3872 sbuf_printf(sb, " channel 0 channel 1 " 3873 "channel 2 channel 3\n"); 3874 sbuf_printf(sb, "octetsDDP: %16ju %16ju %16ju %16ju\n", 3875 stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP, 3876 stats[3].octetsDDP); 3877 sbuf_printf(sb, "framesDDP: %16u %16u %16u %16u\n", stats[0].framesDDP, 3878 stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP); 3879 sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u", 3880 stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop, 3881 stats[3].framesDrop); 3882 3883 rc = sbuf_finish(sb); 3884 sbuf_delete(sb); 3885 3886 return (rc); 3887 } 3888 3889 static int 3890 sysctl_hw_sched(SYSCTL_HANDLER_ARGS) 3891 { 3892 struct adapter *sc = arg1; 3893 struct sbuf *sb; 3894 int rc, i; 3895 unsigned int map, kbps, ipg, mode; 3896 unsigned int pace_tab[NTX_SCHED]; 3897 3898 rc = sysctl_wire_old_buffer(req, 0); 3899 if (rc != 0) 3900 return (rc); 3901 3902 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 3903 if (sb == NULL) 3904 return (ENOMEM); 3905 3906 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); 3907 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); 3908 t4_read_pace_tbl(sc, pace_tab); 3909 3910 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " 3911 "Class IPG (0.1 ns) Flow IPG (us)"); 3912 3913 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { 3914 t4_get_tx_sched(sc, i, &kbps, &ipg); 3915 sbuf_printf(sb, "\n %u %-5s %u ", i, 3916 (mode & (1 << i)) ? "flow" : "class", map & 3); 3917 if (kbps) 3918 sbuf_printf(sb, "%9u ", kbps); 3919 else 3920 sbuf_printf(sb, " disabled "); 3921 3922 if (ipg) 3923 sbuf_printf(sb, "%13u ", ipg); 3924 else 3925 sbuf_printf(sb, " disabled "); 3926 3927 if (pace_tab[i]) 3928 sbuf_printf(sb, "%10u", pace_tab[i]); 3929 else 3930 sbuf_printf(sb, " disabled"); 3931 } 3932 3933 rc = sbuf_finish(sb); 3934 sbuf_delete(sb); 3935 3936 return (rc); 3937 } 3938 3939 static int 3940 sysctl_lb_stats(SYSCTL_HANDLER_ARGS) 3941 { 3942 struct adapter *sc = arg1; 3943 struct sbuf *sb; 3944 int rc, i, j; 3945 uint64_t *p0, *p1; 3946 struct lb_port_stats s[2]; 3947 static const char *stat_name[] = { 3948 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", 3949 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", 3950 "Frames128To255:", "Frames256To511:", "Frames512To1023:", 3951 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", 3952 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", 3953 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", 3954 "BG2FramesTrunc:", "BG3FramesTrunc:" 3955 }; 3956 3957 rc = sysctl_wire_old_buffer(req, 0); 3958 if (rc != 0) 3959 return (rc); 3960 3961 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 3962 if (sb == NULL) 3963 return (ENOMEM); 3964 3965 memset(s, 0, sizeof(s)); 3966 3967 for (i = 0; i < 4; i += 2) { 3968 t4_get_lb_stats(sc, i, &s[0]); 3969 t4_get_lb_stats(sc, i + 1, &s[1]); 3970 3971 p0 = &s[0].octets; 3972 p1 = &s[1].octets; 3973 sbuf_printf(sb, "%s Loopback %u" 3974 " Loopback %u", i == 0 ? "" : "\n", i, i + 1); 3975 3976 for (j = 0; j < ARRAY_SIZE(stat_name); j++) 3977 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], 3978 *p0++, *p1++); 3979 } 3980 3981 rc = sbuf_finish(sb); 3982 sbuf_delete(sb); 3983 3984 return (rc); 3985 } 3986 3987 struct mem_desc { 3988 unsigned int base; 3989 unsigned int limit; 3990 unsigned int idx; 3991 }; 3992 3993 static int 3994 mem_desc_cmp(const void *a, const void *b) 3995 { 3996 return ((const struct mem_desc *)a)->base - 3997 ((const struct mem_desc *)b)->base; 3998 } 3999 4000 static void 4001 mem_region_show(struct sbuf *sb, const char *name, unsigned int from, 4002 unsigned int to) 4003 { 4004 unsigned int size; 4005 4006 size = to - from + 1; 4007 if (size == 0) 4008 return; 4009 4010 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ 4011 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); 4012 } 4013 4014 static int 4015 sysctl_meminfo(SYSCTL_HANDLER_ARGS) 4016 { 4017 struct adapter *sc = arg1; 4018 struct sbuf *sb; 4019 int rc, i, n; 4020 uint32_t lo, hi; 4021 static const char *memory[] = { "EDC0:", "EDC1:", "MC:" }; 4022 static const char *region[] = { 4023 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", 4024 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", 4025 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", 4026 "TDDP region:", "TPT region:", "STAG region:", "RQ region:", 4027 "RQUDP region:", "PBL region:", "TXPBL region:", "ULPRX state:", 4028 "ULPTX state:", "On-chip queues:" 4029 }; 4030 struct mem_desc avail[3]; 4031 struct mem_desc mem[ARRAY_SIZE(region) + 3]; /* up to 3 holes */ 4032 struct mem_desc *md = mem; 4033 4034 rc = sysctl_wire_old_buffer(req, 0); 4035 if (rc != 0) 4036 return (rc); 4037 4038 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 4039 if (sb == NULL) 4040 return (ENOMEM); 4041 4042 for (i = 0; i < ARRAY_SIZE(mem); i++) { 4043 mem[i].limit = 0; 4044 mem[i].idx = i; 4045 } 4046 4047 /* Find and sort the populated memory ranges */ 4048 i = 0; 4049 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 4050 if (lo & F_EDRAM0_ENABLE) { 4051 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); 4052 avail[i].base = G_EDRAM0_BASE(hi) << 20; 4053 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); 4054 avail[i].idx = 0; 4055 i++; 4056 } 4057 if (lo & F_EDRAM1_ENABLE) { 4058 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); 4059 avail[i].base = G_EDRAM1_BASE(hi) << 20; 4060 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); 4061 avail[i].idx = 1; 4062 i++; 4063 } 4064 if (lo & F_EXT_MEM_ENABLE) { 4065 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 4066 avail[i].base = G_EXT_MEM_BASE(hi) << 20; 4067 avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20); 4068 avail[i].idx = 2; 4069 i++; 4070 } 4071 if (!i) /* no memory available */ 4072 return 0; 4073 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); 4074 4075 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); 4076 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); 4077 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); 4078 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 4079 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); 4080 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); 4081 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); 4082 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); 4083 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); 4084 4085 /* the next few have explicit upper bounds */ 4086 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); 4087 md->limit = md->base - 1 + 4088 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * 4089 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); 4090 md++; 4091 4092 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); 4093 md->limit = md->base - 1 + 4094 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * 4095 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); 4096 md++; 4097 4098 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 4099 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4; 4100 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); 4101 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1; 4102 } else { 4103 md->base = 0; 4104 md->idx = ARRAY_SIZE(region); /* hide it */ 4105 } 4106 md++; 4107 4108 #define ulp_region(reg) \ 4109 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ 4110 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) 4111 4112 ulp_region(RX_ISCSI); 4113 ulp_region(RX_TDDP); 4114 ulp_region(TX_TPT); 4115 ulp_region(RX_STAG); 4116 ulp_region(RX_RQ); 4117 ulp_region(RX_RQUDP); 4118 ulp_region(RX_PBL); 4119 ulp_region(TX_PBL); 4120 #undef ulp_region 4121 4122 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); 4123 md->limit = md->base + sc->tids.ntids - 1; 4124 md++; 4125 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); 4126 md->limit = md->base + sc->tids.ntids - 1; 4127 md++; 4128 4129 md->base = sc->vres.ocq.start; 4130 if (sc->vres.ocq.size) 4131 md->limit = md->base + sc->vres.ocq.size - 1; 4132 else 4133 md->idx = ARRAY_SIZE(region); /* hide it */ 4134 md++; 4135 4136 /* add any address-space holes, there can be up to 3 */ 4137 for (n = 0; n < i - 1; n++) 4138 if (avail[n].limit < avail[n + 1].base) 4139 (md++)->base = avail[n].limit; 4140 if (avail[n].limit) 4141 (md++)->base = avail[n].limit; 4142 4143 n = md - mem; 4144 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); 4145 4146 for (lo = 0; lo < i; lo++) 4147 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, 4148 avail[lo].limit - 1); 4149 4150 sbuf_printf(sb, "\n"); 4151 for (i = 0; i < n; i++) { 4152 if (mem[i].idx >= ARRAY_SIZE(region)) 4153 continue; /* skip holes */ 4154 if (!mem[i].limit) 4155 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; 4156 mem_region_show(sb, region[mem[i].idx], mem[i].base, 4157 mem[i].limit); 4158 } 4159 4160 sbuf_printf(sb, "\n"); 4161 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); 4162 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; 4163 mem_region_show(sb, "uP RAM:", lo, hi); 4164 4165 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); 4166 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; 4167 mem_region_show(sb, "uP Extmem2:", lo, hi); 4168 4169 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); 4170 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n", 4171 G_PMRXMAXPAGE(lo), 4172 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, 4173 (lo & F_PMRXNUMCHN) ? 2 : 1); 4174 4175 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); 4176 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); 4177 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n", 4178 G_PMTXMAXPAGE(lo), 4179 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), 4180 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); 4181 sbuf_printf(sb, "%u p-structs\n", 4182 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT)); 4183 4184 for (i = 0; i < 4; i++) { 4185 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); 4186 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", 4187 i, G_USED(lo), G_ALLOC(lo)); 4188 } 4189 for (i = 0; i < 4; i++) { 4190 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); 4191 sbuf_printf(sb, 4192 "\nLoopback %d using %u pages out of %u allocated", 4193 i, G_USED(lo), G_ALLOC(lo)); 4194 } 4195 4196 rc = sbuf_finish(sb); 4197 sbuf_delete(sb); 4198 4199 return (rc); 4200 } 4201 4202 static int 4203 sysctl_path_mtus(SYSCTL_HANDLER_ARGS) 4204 { 4205 struct adapter *sc = arg1; 4206 struct sbuf *sb; 4207 int rc; 4208 uint16_t mtus[NMTUS]; 4209 4210 rc = sysctl_wire_old_buffer(req, 0); 4211 if (rc != 0) 4212 return (rc); 4213 4214 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 4215 if (sb == NULL) 4216 return (ENOMEM); 4217 4218 t4_read_mtu_tbl(sc, mtus, NULL); 4219 4220 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", 4221 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], 4222 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], 4223 mtus[14], mtus[15]); 4224 4225 rc = sbuf_finish(sb); 4226 sbuf_delete(sb); 4227 4228 return (rc); 4229 } 4230 4231 static int 4232 sysctl_pm_stats(SYSCTL_HANDLER_ARGS) 4233 { 4234 struct adapter *sc = arg1; 4235 struct sbuf *sb; 4236 int rc, i; 4237 uint32_t tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS]; 4238 uint64_t tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS]; 4239 static const char *pm_stats[] = { 4240 "Read:", "Write bypass:", "Write mem:", "Flush:", "FIFO wait:" 4241 }; 4242 4243 rc = sysctl_wire_old_buffer(req, 0); 4244 if (rc != 0) 4245 return (rc); 4246 4247 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 4248 if (sb == NULL) 4249 return (ENOMEM); 4250 4251 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc); 4252 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc); 4253 4254 sbuf_printf(sb, " Tx count Tx cycles " 4255 "Rx count Rx cycles"); 4256 for (i = 0; i < PM_NSTATS; i++) 4257 sbuf_printf(sb, "\n%-13s %10u %20ju %10u %20ju", 4258 pm_stats[i], tx_cnt[i], tx_cyc[i], rx_cnt[i], rx_cyc[i]); 4259 4260 rc = sbuf_finish(sb); 4261 sbuf_delete(sb); 4262 4263 return (rc); 4264 } 4265 4266 static int 4267 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) 4268 { 4269 struct adapter *sc = arg1; 4270 struct sbuf *sb; 4271 int rc; 4272 struct tp_rdma_stats stats; 4273 4274 rc = sysctl_wire_old_buffer(req, 0); 4275 if (rc != 0) 4276 return (rc); 4277 4278 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 4279 if (sb == NULL) 4280 return (ENOMEM); 4281 4282 t4_tp_get_rdma_stats(sc, &stats); 4283 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); 4284 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); 4285 4286 rc = sbuf_finish(sb); 4287 sbuf_delete(sb); 4288 4289 return (rc); 4290 } 4291 4292 static int 4293 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) 4294 { 4295 struct adapter *sc = arg1; 4296 struct sbuf *sb; 4297 int rc; 4298 struct tp_tcp_stats v4, v6; 4299 4300 rc = sysctl_wire_old_buffer(req, 0); 4301 if (rc != 0) 4302 return (rc); 4303 4304 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 4305 if (sb == NULL) 4306 return (ENOMEM); 4307 4308 t4_tp_get_tcp_stats(sc, &v4, &v6); 4309 sbuf_printf(sb, 4310 " IP IPv6\n"); 4311 sbuf_printf(sb, "OutRsts: %20u %20u\n", 4312 v4.tcpOutRsts, v6.tcpOutRsts); 4313 sbuf_printf(sb, "InSegs: %20ju %20ju\n", 4314 v4.tcpInSegs, v6.tcpInSegs); 4315 sbuf_printf(sb, "OutSegs: %20ju %20ju\n", 4316 v4.tcpOutSegs, v6.tcpOutSegs); 4317 sbuf_printf(sb, "RetransSegs: %20ju %20ju", 4318 v4.tcpRetransSegs, v6.tcpRetransSegs); 4319 4320 rc = sbuf_finish(sb); 4321 sbuf_delete(sb); 4322 4323 return (rc); 4324 } 4325 4326 static int 4327 sysctl_tids(SYSCTL_HANDLER_ARGS) 4328 { 4329 struct adapter *sc = arg1; 4330 struct sbuf *sb; 4331 int rc; 4332 struct tid_info *t = &sc->tids; 4333 4334 rc = sysctl_wire_old_buffer(req, 0); 4335 if (rc != 0) 4336 return (rc); 4337 4338 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 4339 if (sb == NULL) 4340 return (ENOMEM); 4341 4342 if (t->natids) { 4343 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, 4344 t->atids_in_use); 4345 } 4346 4347 if (t->ntids) { 4348 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 4349 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; 4350 4351 if (b) { 4352 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1, 4353 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 4354 t->ntids - 1); 4355 } else { 4356 sbuf_printf(sb, "TID range: %u-%u", 4357 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 4358 t->ntids - 1); 4359 } 4360 } else 4361 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1); 4362 sbuf_printf(sb, ", in use: %u\n", 4363 atomic_load_acq_int(&t->tids_in_use)); 4364 } 4365 4366 if (t->nstids) { 4367 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, 4368 t->stid_base + t->nstids - 1, t->stids_in_use); 4369 } 4370 4371 if (t->nftids) { 4372 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base, 4373 t->ftid_base + t->nftids - 1); 4374 } 4375 4376 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", 4377 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4), 4378 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6)); 4379 4380 rc = sbuf_finish(sb); 4381 sbuf_delete(sb); 4382 4383 return (rc); 4384 } 4385 4386 static int 4387 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) 4388 { 4389 struct adapter *sc = arg1; 4390 struct sbuf *sb; 4391 int rc; 4392 struct tp_err_stats stats; 4393 4394 rc = sysctl_wire_old_buffer(req, 0); 4395 if (rc != 0) 4396 return (rc); 4397 4398 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 4399 if (sb == NULL) 4400 return (ENOMEM); 4401 4402 t4_tp_get_err_stats(sc, &stats); 4403 4404 sbuf_printf(sb, " channel 0 channel 1 channel 2 " 4405 "channel 3\n"); 4406 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", 4407 stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2], 4408 stats.macInErrs[3]); 4409 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", 4410 stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2], 4411 stats.hdrInErrs[3]); 4412 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", 4413 stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2], 4414 stats.tcpInErrs[3]); 4415 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", 4416 stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2], 4417 stats.tcp6InErrs[3]); 4418 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", 4419 stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2], 4420 stats.tnlCongDrops[3]); 4421 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", 4422 stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2], 4423 stats.tnlTxDrops[3]); 4424 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", 4425 stats.ofldVlanDrops[0], stats.ofldVlanDrops[1], 4426 stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]); 4427 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", 4428 stats.ofldChanDrops[0], stats.ofldChanDrops[1], 4429 stats.ofldChanDrops[2], stats.ofldChanDrops[3]); 4430 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", 4431 stats.ofldNoNeigh, stats.ofldCongDefer); 4432 4433 rc = sbuf_finish(sb); 4434 sbuf_delete(sb); 4435 4436 return (rc); 4437 } 4438 4439 static int 4440 sysctl_tx_rate(SYSCTL_HANDLER_ARGS) 4441 { 4442 struct adapter *sc = arg1; 4443 struct sbuf *sb; 4444 int rc; 4445 u64 nrate[NCHAN], orate[NCHAN]; 4446 4447 rc = sysctl_wire_old_buffer(req, 0); 4448 if (rc != 0) 4449 return (rc); 4450 4451 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 4452 if (sb == NULL) 4453 return (ENOMEM); 4454 4455 t4_get_chan_txrate(sc, nrate, orate); 4456 sbuf_printf(sb, " channel 0 channel 1 channel 2 " 4457 "channel 3\n"); 4458 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", 4459 nrate[0], nrate[1], nrate[2], nrate[3]); 4460 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", 4461 orate[0], orate[1], orate[2], orate[3]); 4462 4463 rc = sbuf_finish(sb); 4464 sbuf_delete(sb); 4465 4466 return (rc); 4467 } 4468 #endif 4469 4470 static inline void 4471 txq_start(struct ifnet *ifp, struct sge_txq *txq) 4472 { 4473 struct buf_ring *br; 4474 struct mbuf *m; 4475 4476 TXQ_LOCK_ASSERT_OWNED(txq); 4477 4478 br = txq->br; 4479 m = txq->m ? txq->m : drbr_dequeue(ifp, br); 4480 if (m) 4481 t4_eth_tx(ifp, txq, m); 4482 } 4483 4484 void 4485 t4_tx_callout(void *arg) 4486 { 4487 struct sge_eq *eq = arg; 4488 struct adapter *sc; 4489 4490 if (EQ_TRYLOCK(eq) == 0) 4491 goto reschedule; 4492 4493 if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) { 4494 EQ_UNLOCK(eq); 4495 reschedule: 4496 if (__predict_true(!(eq->flags && EQ_DOOMED))) 4497 callout_schedule(&eq->tx_callout, 1); 4498 return; 4499 } 4500 4501 EQ_LOCK_ASSERT_OWNED(eq); 4502 4503 if (__predict_true((eq->flags & EQ_DOOMED) == 0)) { 4504 4505 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) { 4506 struct sge_txq *txq = arg; 4507 struct port_info *pi = txq->ifp->if_softc; 4508 4509 sc = pi->adapter; 4510 } else { 4511 struct sge_wrq *wrq = arg; 4512 4513 sc = wrq->adapter; 4514 } 4515 4516 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task); 4517 } 4518 4519 EQ_UNLOCK(eq); 4520 } 4521 4522 void 4523 t4_tx_task(void *arg, int count) 4524 { 4525 struct sge_eq *eq = arg; 4526 4527 EQ_LOCK(eq); 4528 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) { 4529 struct sge_txq *txq = arg; 4530 txq_start(txq->ifp, txq); 4531 } else { 4532 struct sge_wrq *wrq = arg; 4533 t4_wrq_tx_locked(wrq->adapter, wrq, NULL); 4534 } 4535 EQ_UNLOCK(eq); 4536 } 4537 4538 static uint32_t 4539 fconf_to_mode(uint32_t fconf) 4540 { 4541 uint32_t mode; 4542 4543 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | 4544 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; 4545 4546 if (fconf & F_FRAGMENTATION) 4547 mode |= T4_FILTER_IP_FRAGMENT; 4548 4549 if (fconf & F_MPSHITTYPE) 4550 mode |= T4_FILTER_MPS_HIT_TYPE; 4551 4552 if (fconf & F_MACMATCH) 4553 mode |= T4_FILTER_MAC_IDX; 4554 4555 if (fconf & F_ETHERTYPE) 4556 mode |= T4_FILTER_ETH_TYPE; 4557 4558 if (fconf & F_PROTOCOL) 4559 mode |= T4_FILTER_IP_PROTO; 4560 4561 if (fconf & F_TOS) 4562 mode |= T4_FILTER_IP_TOS; 4563 4564 if (fconf & F_VLAN) 4565 mode |= T4_FILTER_VLAN; 4566 4567 if (fconf & F_VNIC_ID) 4568 mode |= T4_FILTER_VNIC; 4569 4570 if (fconf & F_PORT) 4571 mode |= T4_FILTER_PORT; 4572 4573 if (fconf & F_FCOE) 4574 mode |= T4_FILTER_FCoE; 4575 4576 return (mode); 4577 } 4578 4579 static uint32_t 4580 mode_to_fconf(uint32_t mode) 4581 { 4582 uint32_t fconf = 0; 4583 4584 if (mode & T4_FILTER_IP_FRAGMENT) 4585 fconf |= F_FRAGMENTATION; 4586 4587 if (mode & T4_FILTER_MPS_HIT_TYPE) 4588 fconf |= F_MPSHITTYPE; 4589 4590 if (mode & T4_FILTER_MAC_IDX) 4591 fconf |= F_MACMATCH; 4592 4593 if (mode & T4_FILTER_ETH_TYPE) 4594 fconf |= F_ETHERTYPE; 4595 4596 if (mode & T4_FILTER_IP_PROTO) 4597 fconf |= F_PROTOCOL; 4598 4599 if (mode & T4_FILTER_IP_TOS) 4600 fconf |= F_TOS; 4601 4602 if (mode & T4_FILTER_VLAN) 4603 fconf |= F_VLAN; 4604 4605 if (mode & T4_FILTER_VNIC) 4606 fconf |= F_VNIC_ID; 4607 4608 if (mode & T4_FILTER_PORT) 4609 fconf |= F_PORT; 4610 4611 if (mode & T4_FILTER_FCoE) 4612 fconf |= F_FCOE; 4613 4614 return (fconf); 4615 } 4616 4617 static uint32_t 4618 fspec_to_fconf(struct t4_filter_specification *fs) 4619 { 4620 uint32_t fconf = 0; 4621 4622 if (fs->val.frag || fs->mask.frag) 4623 fconf |= F_FRAGMENTATION; 4624 4625 if (fs->val.matchtype || fs->mask.matchtype) 4626 fconf |= F_MPSHITTYPE; 4627 4628 if (fs->val.macidx || fs->mask.macidx) 4629 fconf |= F_MACMATCH; 4630 4631 if (fs->val.ethtype || fs->mask.ethtype) 4632 fconf |= F_ETHERTYPE; 4633 4634 if (fs->val.proto || fs->mask.proto) 4635 fconf |= F_PROTOCOL; 4636 4637 if (fs->val.tos || fs->mask.tos) 4638 fconf |= F_TOS; 4639 4640 if (fs->val.vlan_vld || fs->mask.vlan_vld) 4641 fconf |= F_VLAN; 4642 4643 if (fs->val.vnic_vld || fs->mask.vnic_vld) 4644 fconf |= F_VNIC_ID; 4645 4646 if (fs->val.iport || fs->mask.iport) 4647 fconf |= F_PORT; 4648 4649 if (fs->val.fcoe || fs->mask.fcoe) 4650 fconf |= F_FCOE; 4651 4652 return (fconf); 4653 } 4654 4655 static int 4656 get_filter_mode(struct adapter *sc, uint32_t *mode) 4657 { 4658 uint32_t fconf; 4659 4660 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1, 4661 A_TP_VLAN_PRI_MAP); 4662 4663 if (sc->filter_mode != fconf) { 4664 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n", 4665 device_get_nameunit(sc->dev), sc->filter_mode, fconf); 4666 sc->filter_mode = fconf; 4667 } 4668 4669 *mode = fconf_to_mode(sc->filter_mode); 4670 4671 return (0); 4672 } 4673 4674 static int 4675 set_filter_mode(struct adapter *sc, uint32_t mode) 4676 { 4677 uint32_t fconf; 4678 int rc; 4679 4680 fconf = mode_to_fconf(mode); 4681 4682 ADAPTER_LOCK(sc); 4683 if (IS_BUSY(sc)) { 4684 rc = EAGAIN; 4685 goto done; 4686 } 4687 4688 if (sc->tids.ftids_in_use > 0) { 4689 rc = EBUSY; 4690 goto done; 4691 } 4692 4693 #ifdef TCP_OFFLOAD 4694 if (sc->offload_map) { 4695 rc = EBUSY; 4696 goto done; 4697 } 4698 #endif 4699 4700 #ifdef notyet 4701 rc = -t4_set_filter_mode(sc, fconf); 4702 if (rc == 0) 4703 sc->filter_mode = fconf; 4704 #else 4705 rc = ENOTSUP; 4706 #endif 4707 4708 done: 4709 ADAPTER_UNLOCK(sc); 4710 return (rc); 4711 } 4712 4713 static inline uint64_t 4714 get_filter_hits(struct adapter *sc, uint32_t fid) 4715 { 4716 uint32_t tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 4717 uint64_t hits; 4718 4719 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0), 4720 tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE); 4721 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0)); 4722 hits = t4_read_reg64(sc, MEMWIN0_BASE + 16); 4723 4724 return (be64toh(hits)); 4725 } 4726 4727 static int 4728 get_filter(struct adapter *sc, struct t4_filter *t) 4729 { 4730 int i, nfilters = sc->tids.nftids; 4731 struct filter_entry *f; 4732 4733 ADAPTER_LOCK_ASSERT_OWNED(sc); 4734 4735 if (IS_BUSY(sc)) 4736 return (EAGAIN); 4737 4738 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL || 4739 t->idx >= nfilters) { 4740 t->idx = 0xffffffff; 4741 return (0); 4742 } 4743 4744 f = &sc->tids.ftid_tab[t->idx]; 4745 for (i = t->idx; i < nfilters; i++, f++) { 4746 if (f->valid) { 4747 t->idx = i; 4748 t->l2tidx = f->l2t ? f->l2t->idx : 0; 4749 t->smtidx = f->smtidx; 4750 if (f->fs.hitcnts) 4751 t->hits = get_filter_hits(sc, t->idx); 4752 else 4753 t->hits = UINT64_MAX; 4754 t->fs = f->fs; 4755 4756 return (0); 4757 } 4758 } 4759 4760 t->idx = 0xffffffff; 4761 return (0); 4762 } 4763 4764 static int 4765 set_filter(struct adapter *sc, struct t4_filter *t) 4766 { 4767 unsigned int nfilters, nports; 4768 struct filter_entry *f; 4769 int i; 4770 4771 ADAPTER_LOCK_ASSERT_OWNED(sc); 4772 4773 nfilters = sc->tids.nftids; 4774 nports = sc->params.nports; 4775 4776 if (nfilters == 0) 4777 return (ENOTSUP); 4778 4779 if (!(sc->flags & FULL_INIT_DONE)) 4780 return (EAGAIN); 4781 4782 if (t->idx >= nfilters) 4783 return (EINVAL); 4784 4785 /* Validate against the global filter mode */ 4786 if ((sc->filter_mode | fspec_to_fconf(&t->fs)) != sc->filter_mode) 4787 return (E2BIG); 4788 4789 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) 4790 return (EINVAL); 4791 4792 if (t->fs.val.iport >= nports) 4793 return (EINVAL); 4794 4795 /* Can't specify an iq if not steering to it */ 4796 if (!t->fs.dirsteer && t->fs.iq) 4797 return (EINVAL); 4798 4799 /* IPv6 filter idx must be 4 aligned */ 4800 if (t->fs.type == 1 && 4801 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) 4802 return (EINVAL); 4803 4804 if (sc->tids.ftid_tab == NULL) { 4805 KASSERT(sc->tids.ftids_in_use == 0, 4806 ("%s: no memory allocated but filters_in_use > 0", 4807 __func__)); 4808 4809 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) * 4810 nfilters, M_CXGBE, M_NOWAIT | M_ZERO); 4811 if (sc->tids.ftid_tab == NULL) 4812 return (ENOMEM); 4813 } 4814 4815 for (i = 0; i < 4; i++) { 4816 f = &sc->tids.ftid_tab[t->idx + i]; 4817 4818 if (f->pending || f->valid) 4819 return (EBUSY); 4820 if (f->locked) 4821 return (EPERM); 4822 4823 if (t->fs.type == 0) 4824 break; 4825 } 4826 4827 f = &sc->tids.ftid_tab[t->idx]; 4828 f->fs = t->fs; 4829 4830 return set_filter_wr(sc, t->idx); 4831 } 4832 4833 static int 4834 del_filter(struct adapter *sc, struct t4_filter *t) 4835 { 4836 unsigned int nfilters; 4837 struct filter_entry *f; 4838 4839 ADAPTER_LOCK_ASSERT_OWNED(sc); 4840 4841 if (IS_BUSY(sc)) 4842 return (EAGAIN); 4843 4844 nfilters = sc->tids.nftids; 4845 4846 if (nfilters == 0) 4847 return (ENOTSUP); 4848 4849 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 || 4850 t->idx >= nfilters) 4851 return (EINVAL); 4852 4853 if (!(sc->flags & FULL_INIT_DONE)) 4854 return (EAGAIN); 4855 4856 f = &sc->tids.ftid_tab[t->idx]; 4857 4858 if (f->pending) 4859 return (EBUSY); 4860 if (f->locked) 4861 return (EPERM); 4862 4863 if (f->valid) { 4864 t->fs = f->fs; /* extra info for the caller */ 4865 return del_filter_wr(sc, t->idx); 4866 } 4867 4868 return (0); 4869 } 4870 4871 static void 4872 clear_filter(struct filter_entry *f) 4873 { 4874 if (f->l2t) 4875 t4_l2t_release(f->l2t); 4876 4877 bzero(f, sizeof (*f)); 4878 } 4879 4880 static int 4881 set_filter_wr(struct adapter *sc, int fidx) 4882 { 4883 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 4884 struct wrqe *wr; 4885 struct fw_filter_wr *fwr; 4886 unsigned int ftid; 4887 4888 ADAPTER_LOCK_ASSERT_OWNED(sc); 4889 4890 if (f->fs.newdmac || f->fs.newvlan) { 4891 /* This filter needs an L2T entry; allocate one. */ 4892 f->l2t = t4_l2t_alloc_switching(sc->l2t); 4893 if (f->l2t == NULL) 4894 return (EAGAIN); 4895 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport, 4896 f->fs.dmac)) { 4897 t4_l2t_release(f->l2t); 4898 f->l2t = NULL; 4899 return (ENOMEM); 4900 } 4901 } 4902 4903 ftid = sc->tids.ftid_base + fidx; 4904 4905 wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq); 4906 if (wr == NULL) 4907 return (ENOMEM); 4908 4909 fwr = wrtod(wr); 4910 bzero(fwr, sizeof (*fwr)); 4911 4912 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); 4913 fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); 4914 fwr->tid_to_iq = 4915 htobe32(V_FW_FILTER_WR_TID(ftid) | 4916 V_FW_FILTER_WR_RQTYPE(f->fs.type) | 4917 V_FW_FILTER_WR_NOREPLY(0) | 4918 V_FW_FILTER_WR_IQ(f->fs.iq)); 4919 fwr->del_filter_to_l2tix = 4920 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 4921 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 4922 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 4923 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 4924 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 4925 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 4926 V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 4927 V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 4928 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 4929 f->fs.newvlan == VLAN_REWRITE) | 4930 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 4931 f->fs.newvlan == VLAN_REWRITE) | 4932 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 4933 V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 4934 V_FW_FILTER_WR_PRIO(f->fs.prio) | 4935 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 4936 fwr->ethtype = htobe16(f->fs.val.ethtype); 4937 fwr->ethtypem = htobe16(f->fs.mask.ethtype); 4938 fwr->frag_to_ovlan_vldm = 4939 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 4940 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 4941 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) | 4942 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) | 4943 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) | 4944 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld)); 4945 fwr->smac_sel = 0; 4946 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) | 4947 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id)); 4948 fwr->maci_to_matchtypem = 4949 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 4950 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 4951 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 4952 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 4953 V_FW_FILTER_WR_PORT(f->fs.val.iport) | 4954 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 4955 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 4956 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 4957 fwr->ptcl = f->fs.val.proto; 4958 fwr->ptclm = f->fs.mask.proto; 4959 fwr->ttyp = f->fs.val.tos; 4960 fwr->ttypm = f->fs.mask.tos; 4961 fwr->ivlan = htobe16(f->fs.val.vlan); 4962 fwr->ivlanm = htobe16(f->fs.mask.vlan); 4963 fwr->ovlan = htobe16(f->fs.val.vnic); 4964 fwr->ovlanm = htobe16(f->fs.mask.vnic); 4965 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip)); 4966 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm)); 4967 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip)); 4968 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm)); 4969 fwr->lp = htobe16(f->fs.val.dport); 4970 fwr->lpm = htobe16(f->fs.mask.dport); 4971 fwr->fp = htobe16(f->fs.val.sport); 4972 fwr->fpm = htobe16(f->fs.mask.sport); 4973 if (f->fs.newsmac) 4974 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma)); 4975 4976 f->pending = 1; 4977 sc->tids.ftids_in_use++; 4978 4979 t4_wrq_tx(sc, wr); 4980 return (0); 4981 } 4982 4983 static int 4984 del_filter_wr(struct adapter *sc, int fidx) 4985 { 4986 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 4987 struct wrqe *wr; 4988 struct fw_filter_wr *fwr; 4989 unsigned int ftid; 4990 4991 ADAPTER_LOCK_ASSERT_OWNED(sc); 4992 4993 ftid = sc->tids.ftid_base + fidx; 4994 4995 wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq); 4996 if (wr == NULL) 4997 return (ENOMEM); 4998 fwr = wrtod(wr); 4999 bzero(fwr, sizeof (*fwr)); 5000 5001 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id); 5002 5003 f->pending = 1; 5004 t4_wrq_tx(sc, wr); 5005 return (0); 5006 } 5007 5008 int 5009 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 5010 { 5011 struct adapter *sc = iq->adapter; 5012 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1); 5013 unsigned int idx = GET_TID(rpl); 5014 5015 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 5016 rss->opcode)); 5017 5018 if (idx >= sc->tids.ftid_base && 5019 (idx -= sc->tids.ftid_base) < sc->tids.nftids) { 5020 unsigned int rc = G_COOKIE(rpl->cookie); 5021 struct filter_entry *f = &sc->tids.ftid_tab[idx]; 5022 5023 ADAPTER_LOCK(sc); 5024 if (rc == FW_FILTER_WR_FLT_ADDED) { 5025 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; 5026 f->pending = 0; /* asynchronous setup completed */ 5027 f->valid = 1; 5028 } else { 5029 if (rc != FW_FILTER_WR_FLT_DELETED) { 5030 /* Add or delete failed, display an error */ 5031 log(LOG_ERR, 5032 "filter %u setup failed with error %u\n", 5033 idx, rc); 5034 } 5035 5036 clear_filter(f); 5037 sc->tids.ftids_in_use--; 5038 } 5039 ADAPTER_UNLOCK(sc); 5040 } 5041 5042 return (0); 5043 } 5044 5045 static int 5046 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) 5047 { 5048 int rc = EINVAL; 5049 5050 if (cntxt->cid > M_CTXTQID) 5051 return (rc); 5052 5053 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && 5054 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) 5055 return (rc); 5056 5057 if (sc->flags & FW_OK) { 5058 ADAPTER_LOCK(sc); /* Avoid parallel t4_wr_mbox */ 5059 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, 5060 &cntxt->data[0]); 5061 ADAPTER_UNLOCK(sc); 5062 } 5063 5064 if (rc != 0) { 5065 /* Read via firmware failed or wasn't even attempted */ 5066 5067 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, 5068 &cntxt->data[0]); 5069 } 5070 5071 return (rc); 5072 } 5073 5074 static int 5075 read_card_mem(struct adapter *sc, struct t4_mem_range *mr) 5076 { 5077 uint32_t base, size, lo, hi, win, off, remaining, i, n; 5078 uint32_t *buf, *b; 5079 int rc; 5080 5081 /* reads are in multiples of 32 bits */ 5082 if (mr->addr & 3 || mr->len & 3 || mr->len == 0) 5083 return (EINVAL); 5084 5085 /* 5086 * We don't want to deal with potential holes so we mandate that the 5087 * requested region must lie entirely within one of the 3 memories. 5088 */ 5089 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 5090 if (lo & F_EDRAM0_ENABLE) { 5091 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); 5092 base = G_EDRAM0_BASE(hi) << 20; 5093 size = G_EDRAM0_SIZE(hi) << 20; 5094 if (size > 0 && 5095 mr->addr >= base && mr->addr < base + size && 5096 mr->addr + mr->len <= base + size) 5097 goto proceed; 5098 } 5099 if (lo & F_EDRAM1_ENABLE) { 5100 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); 5101 base = G_EDRAM1_BASE(hi) << 20; 5102 size = G_EDRAM1_SIZE(hi) << 20; 5103 if (size > 0 && 5104 mr->addr >= base && mr->addr < base + size && 5105 mr->addr + mr->len <= base + size) 5106 goto proceed; 5107 } 5108 if (lo & F_EXT_MEM_ENABLE) { 5109 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 5110 base = G_EXT_MEM_BASE(hi) << 20; 5111 size = G_EXT_MEM_SIZE(hi) << 20; 5112 if (size > 0 && 5113 mr->addr >= base && mr->addr < base + size && 5114 mr->addr + mr->len <= base + size) 5115 goto proceed; 5116 } 5117 return (ENXIO); 5118 5119 proceed: 5120 buf = b = malloc(mr->len, M_CXGBE, M_WAITOK); 5121 5122 /* 5123 * Position the PCIe window (we use memwin2) to the 16B aligned area 5124 * just at/before the requested region. 5125 */ 5126 win = mr->addr & ~0xf; 5127 off = mr->addr - win; /* offset of the requested region in the win */ 5128 remaining = mr->len; 5129 5130 while (remaining) { 5131 t4_write_reg(sc, 5132 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2), win); 5133 t4_read_reg(sc, 5134 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2)); 5135 5136 /* number of bytes that we'll copy in the inner loop */ 5137 n = min(remaining, MEMWIN2_APERTURE - off); 5138 5139 for (i = 0; i < n; i += 4, remaining -= 4) 5140 *b++ = t4_read_reg(sc, MEMWIN2_BASE + off + i); 5141 5142 win += MEMWIN2_APERTURE; 5143 off = 0; 5144 } 5145 5146 rc = copyout(buf, mr->data, mr->len); 5147 free(buf, M_CXGBE); 5148 5149 return (rc); 5150 } 5151 5152 int 5153 t4_os_find_pci_capability(struct adapter *sc, int cap) 5154 { 5155 int i; 5156 5157 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); 5158 } 5159 5160 int 5161 t4_os_pci_save_state(struct adapter *sc) 5162 { 5163 device_t dev; 5164 struct pci_devinfo *dinfo; 5165 5166 dev = sc->dev; 5167 dinfo = device_get_ivars(dev); 5168 5169 pci_cfg_save(dev, dinfo, 0); 5170 return (0); 5171 } 5172 5173 int 5174 t4_os_pci_restore_state(struct adapter *sc) 5175 { 5176 device_t dev; 5177 struct pci_devinfo *dinfo; 5178 5179 dev = sc->dev; 5180 dinfo = device_get_ivars(dev); 5181 5182 pci_cfg_restore(dev, dinfo); 5183 return (0); 5184 } 5185 5186 void 5187 t4_os_portmod_changed(const struct adapter *sc, int idx) 5188 { 5189 struct port_info *pi = sc->port[idx]; 5190 static const char *mod_str[] = { 5191 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 5192 }; 5193 5194 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 5195 if_printf(pi->ifp, "transceiver unplugged.\n"); 5196 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 5197 if_printf(pi->ifp, "unknown transceiver inserted.\n"); 5198 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 5199 if_printf(pi->ifp, "unsupported transceiver inserted.\n"); 5200 else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str)) { 5201 if_printf(pi->ifp, "%s transceiver inserted.\n", 5202 mod_str[pi->mod_type]); 5203 } else { 5204 if_printf(pi->ifp, "transceiver (type %d) inserted.\n", 5205 pi->mod_type); 5206 } 5207 } 5208 5209 void 5210 t4_os_link_changed(struct adapter *sc, int idx, int link_stat) 5211 { 5212 struct port_info *pi = sc->port[idx]; 5213 struct ifnet *ifp = pi->ifp; 5214 5215 if (link_stat) { 5216 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed); 5217 if_link_state_change(ifp, LINK_STATE_UP); 5218 } else 5219 if_link_state_change(ifp, LINK_STATE_DOWN); 5220 } 5221 5222 void 5223 t4_iterate(void (*func)(struct adapter *, void *), void *arg) 5224 { 5225 struct adapter *sc; 5226 5227 mtx_lock(&t4_list_lock); 5228 SLIST_FOREACH(sc, &t4_list, link) { 5229 /* 5230 * func should not make any assumptions about what state sc is 5231 * in - the only guarantee is that sc->sc_lock is a valid lock. 5232 */ 5233 func(sc, arg); 5234 } 5235 mtx_unlock(&t4_list_lock); 5236 } 5237 5238 static int 5239 t4_open(struct cdev *dev, int flags, int type, struct thread *td) 5240 { 5241 return (0); 5242 } 5243 5244 static int 5245 t4_close(struct cdev *dev, int flags, int type, struct thread *td) 5246 { 5247 return (0); 5248 } 5249 5250 static int 5251 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 5252 struct thread *td) 5253 { 5254 int rc; 5255 struct adapter *sc = dev->si_drv1; 5256 5257 rc = priv_check(td, PRIV_DRIVER); 5258 if (rc != 0) 5259 return (rc); 5260 5261 switch (cmd) { 5262 case CHELSIO_T4_GETREG: { 5263 struct t4_reg *edata = (struct t4_reg *)data; 5264 5265 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 5266 return (EFAULT); 5267 5268 if (edata->size == 4) 5269 edata->val = t4_read_reg(sc, edata->addr); 5270 else if (edata->size == 8) 5271 edata->val = t4_read_reg64(sc, edata->addr); 5272 else 5273 return (EINVAL); 5274 5275 break; 5276 } 5277 case CHELSIO_T4_SETREG: { 5278 struct t4_reg *edata = (struct t4_reg *)data; 5279 5280 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 5281 return (EFAULT); 5282 5283 if (edata->size == 4) { 5284 if (edata->val & 0xffffffff00000000) 5285 return (EINVAL); 5286 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 5287 } else if (edata->size == 8) 5288 t4_write_reg64(sc, edata->addr, edata->val); 5289 else 5290 return (EINVAL); 5291 break; 5292 } 5293 case CHELSIO_T4_REGDUMP: { 5294 struct t4_regdump *regs = (struct t4_regdump *)data; 5295 int reglen = T4_REGDUMP_SIZE; 5296 uint8_t *buf; 5297 5298 if (regs->len < reglen) { 5299 regs->len = reglen; /* hint to the caller */ 5300 return (ENOBUFS); 5301 } 5302 5303 regs->len = reglen; 5304 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 5305 t4_get_regs(sc, regs, buf); 5306 rc = copyout(buf, regs->data, reglen); 5307 free(buf, M_CXGBE); 5308 break; 5309 } 5310 case CHELSIO_T4_GET_FILTER_MODE: 5311 rc = get_filter_mode(sc, (uint32_t *)data); 5312 break; 5313 case CHELSIO_T4_SET_FILTER_MODE: 5314 rc = set_filter_mode(sc, *(uint32_t *)data); 5315 break; 5316 case CHELSIO_T4_GET_FILTER: 5317 ADAPTER_LOCK(sc); 5318 rc = get_filter(sc, (struct t4_filter *)data); 5319 ADAPTER_UNLOCK(sc); 5320 break; 5321 case CHELSIO_T4_SET_FILTER: 5322 ADAPTER_LOCK(sc); 5323 rc = set_filter(sc, (struct t4_filter *)data); 5324 ADAPTER_UNLOCK(sc); 5325 break; 5326 case CHELSIO_T4_DEL_FILTER: 5327 ADAPTER_LOCK(sc); 5328 rc = del_filter(sc, (struct t4_filter *)data); 5329 ADAPTER_UNLOCK(sc); 5330 break; 5331 case CHELSIO_T4_GET_SGE_CONTEXT: 5332 rc = get_sge_context(sc, (struct t4_sge_context *)data); 5333 break; 5334 case CHELSIO_T4_LOAD_FW: { 5335 struct t4_data *fw = (struct t4_data *)data; 5336 uint8_t *fw_data; 5337 5338 if (sc->flags & FULL_INIT_DONE) 5339 return (EBUSY); 5340 5341 fw_data = malloc(fw->len, M_CXGBE, M_NOWAIT); 5342 if (fw_data == NULL) 5343 return (ENOMEM); 5344 5345 rc = copyin(fw->data, fw_data, fw->len); 5346 if (rc == 0) 5347 rc = -t4_load_fw(sc, fw_data, fw->len); 5348 5349 free(fw_data, M_CXGBE); 5350 break; 5351 } 5352 case CHELSIO_T4_GET_MEM: 5353 rc = read_card_mem(sc, (struct t4_mem_range *)data); 5354 break; 5355 default: 5356 rc = EINVAL; 5357 } 5358 5359 return (rc); 5360 } 5361 5362 #ifdef TCP_OFFLOAD 5363 static int 5364 toe_capability(struct port_info *pi, int enable) 5365 { 5366 int rc; 5367 struct adapter *sc = pi->adapter; 5368 5369 ADAPTER_LOCK_ASSERT_OWNED(sc); 5370 5371 if (!is_offload(sc)) 5372 return (ENODEV); 5373 5374 if (enable) { 5375 if (!(sc->flags & FULL_INIT_DONE)) { 5376 log(LOG_WARNING, 5377 "You must enable a cxgbe interface first\n"); 5378 return (EAGAIN); 5379 } 5380 5381 if (isset(&sc->offload_map, pi->port_id)) 5382 return (0); 5383 5384 if (!(sc->flags & TOM_INIT_DONE)) { 5385 rc = t4_activate_uld(sc, ULD_TOM); 5386 if (rc == EAGAIN) { 5387 log(LOG_WARNING, 5388 "You must kldload t4_tom.ko before trying " 5389 "to enable TOE on a cxgbe interface.\n"); 5390 } 5391 if (rc != 0) 5392 return (rc); 5393 KASSERT(sc->tom_softc != NULL, 5394 ("%s: TOM activated but softc NULL", __func__)); 5395 KASSERT(sc->flags & TOM_INIT_DONE, 5396 ("%s: TOM activated but flag not set", __func__)); 5397 } 5398 5399 setbit(&sc->offload_map, pi->port_id); 5400 } else { 5401 if (!isset(&sc->offload_map, pi->port_id)) 5402 return (0); 5403 5404 KASSERT(sc->flags & TOM_INIT_DONE, 5405 ("%s: TOM never initialized?", __func__)); 5406 clrbit(&sc->offload_map, pi->port_id); 5407 } 5408 5409 return (0); 5410 } 5411 5412 /* 5413 * Add an upper layer driver to the global list. 5414 */ 5415 int 5416 t4_register_uld(struct uld_info *ui) 5417 { 5418 int rc = 0; 5419 struct uld_info *u; 5420 5421 mtx_lock(&t4_uld_list_lock); 5422 SLIST_FOREACH(u, &t4_uld_list, link) { 5423 if (u->uld_id == ui->uld_id) { 5424 rc = EEXIST; 5425 goto done; 5426 } 5427 } 5428 5429 SLIST_INSERT_HEAD(&t4_uld_list, ui, link); 5430 ui->refcount = 0; 5431 done: 5432 mtx_unlock(&t4_uld_list_lock); 5433 return (rc); 5434 } 5435 5436 int 5437 t4_unregister_uld(struct uld_info *ui) 5438 { 5439 int rc = EINVAL; 5440 struct uld_info *u; 5441 5442 mtx_lock(&t4_uld_list_lock); 5443 5444 SLIST_FOREACH(u, &t4_uld_list, link) { 5445 if (u == ui) { 5446 if (ui->refcount > 0) { 5447 rc = EBUSY; 5448 goto done; 5449 } 5450 5451 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); 5452 rc = 0; 5453 goto done; 5454 } 5455 } 5456 done: 5457 mtx_unlock(&t4_uld_list_lock); 5458 return (rc); 5459 } 5460 5461 int 5462 t4_activate_uld(struct adapter *sc, int id) 5463 { 5464 int rc = EAGAIN; 5465 struct uld_info *ui; 5466 5467 mtx_lock(&t4_uld_list_lock); 5468 5469 SLIST_FOREACH(ui, &t4_uld_list, link) { 5470 if (ui->uld_id == id) { 5471 rc = ui->activate(sc); 5472 if (rc == 0) 5473 ui->refcount++; 5474 goto done; 5475 } 5476 } 5477 done: 5478 mtx_unlock(&t4_uld_list_lock); 5479 5480 return (rc); 5481 } 5482 5483 int 5484 t4_deactivate_uld(struct adapter *sc, int id) 5485 { 5486 int rc = EINVAL; 5487 struct uld_info *ui; 5488 5489 mtx_lock(&t4_uld_list_lock); 5490 5491 SLIST_FOREACH(ui, &t4_uld_list, link) { 5492 if (ui->uld_id == id) { 5493 rc = ui->deactivate(sc); 5494 if (rc == 0) 5495 ui->refcount--; 5496 goto done; 5497 } 5498 } 5499 done: 5500 mtx_unlock(&t4_uld_list_lock); 5501 5502 return (rc); 5503 } 5504 #endif 5505 5506 /* 5507 * Come up with reasonable defaults for some of the tunables, provided they're 5508 * not set by the user (in which case we'll use the values as is). 5509 */ 5510 static void 5511 tweak_tunables(void) 5512 { 5513 int nc = mp_ncpus; /* our snapshot of the number of CPUs */ 5514 5515 if (t4_ntxq10g < 1) 5516 t4_ntxq10g = min(nc, NTXQ_10G); 5517 5518 if (t4_ntxq1g < 1) 5519 t4_ntxq1g = min(nc, NTXQ_1G); 5520 5521 if (t4_nrxq10g < 1) 5522 t4_nrxq10g = min(nc, NRXQ_10G); 5523 5524 if (t4_nrxq1g < 1) 5525 t4_nrxq1g = min(nc, NRXQ_1G); 5526 5527 #ifdef TCP_OFFLOAD 5528 if (t4_nofldtxq10g < 1) 5529 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G); 5530 5531 if (t4_nofldtxq1g < 1) 5532 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G); 5533 5534 if (t4_nofldrxq10g < 1) 5535 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G); 5536 5537 if (t4_nofldrxq1g < 1) 5538 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G); 5539 5540 if (t4_toecaps_allowed == -1) 5541 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; 5542 #else 5543 if (t4_toecaps_allowed == -1) 5544 t4_toecaps_allowed = 0; 5545 #endif 5546 5547 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS) 5548 t4_tmr_idx_10g = TMR_IDX_10G; 5549 5550 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS) 5551 t4_pktc_idx_10g = PKTC_IDX_10G; 5552 5553 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS) 5554 t4_tmr_idx_1g = TMR_IDX_1G; 5555 5556 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS) 5557 t4_pktc_idx_1g = PKTC_IDX_1G; 5558 5559 if (t4_qsize_txq < 128) 5560 t4_qsize_txq = 128; 5561 5562 if (t4_qsize_rxq < 128) 5563 t4_qsize_rxq = 128; 5564 while (t4_qsize_rxq & 7) 5565 t4_qsize_rxq++; 5566 5567 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; 5568 } 5569 5570 static int 5571 t4_mod_event(module_t mod, int cmd, void *arg) 5572 { 5573 int rc = 0; 5574 5575 switch (cmd) { 5576 case MOD_LOAD: 5577 t4_sge_modload(); 5578 mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF); 5579 SLIST_INIT(&t4_list); 5580 #ifdef TCP_OFFLOAD 5581 mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF); 5582 SLIST_INIT(&t4_uld_list); 5583 #endif 5584 tweak_tunables(); 5585 break; 5586 5587 case MOD_UNLOAD: 5588 #ifdef TCP_OFFLOAD 5589 mtx_lock(&t4_uld_list_lock); 5590 if (!SLIST_EMPTY(&t4_uld_list)) { 5591 rc = EBUSY; 5592 mtx_unlock(&t4_uld_list_lock); 5593 break; 5594 } 5595 mtx_unlock(&t4_uld_list_lock); 5596 mtx_destroy(&t4_uld_list_lock); 5597 #endif 5598 mtx_lock(&t4_list_lock); 5599 if (!SLIST_EMPTY(&t4_list)) { 5600 rc = EBUSY; 5601 mtx_unlock(&t4_list_lock); 5602 break; 5603 } 5604 mtx_unlock(&t4_list_lock); 5605 mtx_destroy(&t4_list_lock); 5606 break; 5607 } 5608 5609 return (rc); 5610 } 5611 5612 static devclass_t t4_devclass; 5613 static devclass_t cxgbe_devclass; 5614 5615 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, t4_mod_event, 0); 5616 MODULE_VERSION(t4nex, 1); 5617 5618 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 5619 MODULE_VERSION(cxgbe, 1); 5620