1 /************************************************************************** 2 SPDX-License-Identifier: BSD-2-Clause 3 4 Copyright (c) 2007-2009, Chelsio Inc. 5 All rights reserved. 6 7 Redistribution and use in source and binary forms, with or without 8 modification, are permitted provided that the following conditions are met: 9 10 1. Redistributions of source code must retain the above copyright notice, 11 this list of conditions and the following disclaimer. 12 13 2. Neither the name of the Chelsio Corporation nor the names of its 14 contributors may be used to endorse or promote products derived from 15 this software without specific prior written permission. 16 17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 21 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 POSSIBILITY OF SUCH DAMAGE. 28 29 ***************************************************************************/ 30 31 #include <sys/cdefs.h> 32 #include "opt_inet.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/bus.h> 38 #include <sys/module.h> 39 #include <sys/pciio.h> 40 #include <sys/conf.h> 41 #include <machine/bus.h> 42 #include <machine/resource.h> 43 #include <sys/ktr.h> 44 #include <sys/rman.h> 45 #include <sys/ioccom.h> 46 #include <sys/mbuf.h> 47 #include <sys/linker.h> 48 #include <sys/firmware.h> 49 #include <sys/socket.h> 50 #include <sys/sockio.h> 51 #include <sys/smp.h> 52 #include <sys/sysctl.h> 53 #include <sys/syslog.h> 54 #include <sys/queue.h> 55 #include <sys/taskqueue.h> 56 #include <sys/proc.h> 57 58 #include <net/bpf.h> 59 #include <net/debugnet.h> 60 #include <net/ethernet.h> 61 #include <net/if.h> 62 #include <net/if_var.h> 63 #include <net/if_arp.h> 64 #include <net/if_dl.h> 65 #include <net/if_media.h> 66 #include <net/if_types.h> 67 #include <net/if_vlan_var.h> 68 69 #include <netinet/in_systm.h> 70 #include <netinet/in.h> 71 #include <netinet/if_ether.h> 72 #include <netinet/ip.h> 73 #include <netinet/ip.h> 74 #include <netinet/tcp.h> 75 #include <netinet/udp.h> 76 77 #include <dev/pci/pcireg.h> 78 #include <dev/pci/pcivar.h> 79 #include <dev/pci/pci_private.h> 80 81 #include <cxgb_include.h> 82 83 #ifdef PRIV_SUPPORTED 84 #include <sys/priv.h> 85 #endif 86 87 static int cxgb_setup_interrupts(adapter_t *); 88 static void cxgb_teardown_interrupts(adapter_t *); 89 static void cxgb_init(void *); 90 static int cxgb_init_locked(struct port_info *); 91 static int cxgb_uninit_locked(struct port_info *); 92 static int cxgb_uninit_synchronized(struct port_info *); 93 static int cxgb_ioctl(if_t, unsigned long, caddr_t); 94 static int cxgb_media_change(if_t); 95 static int cxgb_ifm_type(int); 96 static void cxgb_build_medialist(struct port_info *); 97 static void cxgb_media_status(if_t, struct ifmediareq *); 98 static uint64_t cxgb_get_counter(if_t, ift_counter); 99 static int setup_sge_qsets(adapter_t *); 100 static void cxgb_async_intr(void *); 101 static void cxgb_tick_handler(void *, int); 102 static void cxgb_tick(void *); 103 static void link_check_callout(void *); 104 static void check_link_status(void *, int); 105 static void setup_rss(adapter_t *sc); 106 static int alloc_filters(struct adapter *); 107 static int setup_hw_filters(struct adapter *); 108 static int set_filter(struct adapter *, int, const struct filter_info *); 109 static inline void mk_set_tcb_field(struct cpl_set_tcb_field *, unsigned int, 110 unsigned int, u64, u64); 111 static inline void set_tcb_field_ulp(struct cpl_set_tcb_field *, unsigned int, 112 unsigned int, u64, u64); 113 #ifdef TCP_OFFLOAD 114 static int cpl_not_handled(struct sge_qset *, struct rsp_desc *, struct mbuf *); 115 #endif 116 117 /* Attachment glue for the PCI controller end of the device. Each port of 118 * the device is attached separately, as defined later. 119 */ 120 static int cxgb_controller_probe(device_t); 121 static int cxgb_controller_attach(device_t); 122 static int cxgb_controller_detach(device_t); 123 static void cxgb_free(struct adapter *); 124 static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start, 125 unsigned int end); 126 static void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf); 127 static int cxgb_get_regs_len(void); 128 static void touch_bars(device_t dev); 129 static void cxgb_update_mac_settings(struct port_info *p); 130 #ifdef TCP_OFFLOAD 131 static int toe_capability(struct port_info *, int); 132 #endif 133 134 /* Table for probing the cards. The desc field isn't actually used */ 135 struct cxgb_ident { 136 uint16_t vendor; 137 uint16_t device; 138 int index; 139 char *desc; 140 } cxgb_identifiers[] = { 141 {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"}, 142 {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"}, 143 {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"}, 144 {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"}, 145 {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"}, 146 {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"}, 147 {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"}, 148 {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"}, 149 {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"}, 150 {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"}, 151 {PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"}, 152 {PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"}, 153 {PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"}, 154 {PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"}, 155 {0, 0, 0, NULL} 156 }; 157 158 static device_method_t cxgb_controller_methods[] = { 159 DEVMETHOD(device_probe, cxgb_controller_probe), 160 DEVMETHOD(device_attach, cxgb_controller_attach), 161 DEVMETHOD(device_detach, cxgb_controller_detach), 162 163 DEVMETHOD_END 164 }; 165 166 static driver_t cxgb_controller_driver = { 167 "cxgbc", 168 cxgb_controller_methods, 169 sizeof(struct adapter) 170 }; 171 172 static int cxgbc_mod_event(module_t, int, void *); 173 174 DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgbc_mod_event, NULL); 175 MODULE_PNP_INFO("U16:vendor;U16:device", pci, cxgbc, cxgb_identifiers, 176 nitems(cxgb_identifiers) - 1); 177 MODULE_VERSION(cxgbc, 1); 178 MODULE_DEPEND(cxgbc, firmware, 1, 1, 1); 179 180 /* 181 * Attachment glue for the ports. Attachment is done directly to the 182 * controller device. 183 */ 184 static int cxgb_port_probe(device_t); 185 static int cxgb_port_attach(device_t); 186 static int cxgb_port_detach(device_t); 187 188 static device_method_t cxgb_port_methods[] = { 189 DEVMETHOD(device_probe, cxgb_port_probe), 190 DEVMETHOD(device_attach, cxgb_port_attach), 191 DEVMETHOD(device_detach, cxgb_port_detach), 192 { 0, 0 } 193 }; 194 195 static driver_t cxgb_port_driver = { 196 "cxgb", 197 cxgb_port_methods, 198 0 199 }; 200 201 static d_ioctl_t cxgb_extension_ioctl; 202 static d_open_t cxgb_extension_open; 203 static d_close_t cxgb_extension_close; 204 205 static struct cdevsw cxgb_cdevsw = { 206 .d_version = D_VERSION, 207 .d_flags = 0, 208 .d_open = cxgb_extension_open, 209 .d_close = cxgb_extension_close, 210 .d_ioctl = cxgb_extension_ioctl, 211 .d_name = "cxgb", 212 }; 213 214 DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, 0, 0); 215 MODULE_VERSION(cxgb, 1); 216 217 DEBUGNET_DEFINE(cxgb); 218 219 static struct mtx t3_list_lock; 220 static SLIST_HEAD(, adapter) t3_list; 221 #ifdef TCP_OFFLOAD 222 static struct mtx t3_uld_list_lock; 223 static SLIST_HEAD(, uld_info) t3_uld_list; 224 #endif 225 226 /* 227 * The driver uses the best interrupt scheme available on a platform in the 228 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which 229 * of these schemes the driver may consider as follows: 230 * 231 * msi = 2: choose from among all three options 232 * msi = 1 : only consider MSI and pin interrupts 233 * msi = 0: force pin interrupts 234 */ 235 static int msi_allowed = 2; 236 237 SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 238 "CXGB driver parameters"); 239 SYSCTL_INT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0, 240 "MSI-X, MSI, INTx selector"); 241 242 /* 243 * The driver uses an auto-queue algorithm by default. 244 * To disable it and force a single queue-set per port, use multiq = 0 245 */ 246 static int multiq = 1; 247 SYSCTL_INT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0, 248 "use min(ncpus/ports, 8) queue-sets per port"); 249 250 /* 251 * By default the driver will not update the firmware unless 252 * it was compiled against a newer version 253 * 254 */ 255 static int force_fw_update = 0; 256 SYSCTL_INT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0, 257 "update firmware even if up to date"); 258 259 int cxgb_use_16k_clusters = -1; 260 SYSCTL_INT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN, 261 &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue "); 262 263 static int nfilters = -1; 264 SYSCTL_INT(_hw_cxgb, OID_AUTO, nfilters, CTLFLAG_RDTUN, 265 &nfilters, 0, "max number of entries in the filter table"); 266 267 enum { 268 MAX_TXQ_ENTRIES = 16384, 269 MAX_CTRL_TXQ_ENTRIES = 1024, 270 MAX_RSPQ_ENTRIES = 16384, 271 MAX_RX_BUFFERS = 16384, 272 MAX_RX_JUMBO_BUFFERS = 16384, 273 MIN_TXQ_ENTRIES = 4, 274 MIN_CTRL_TXQ_ENTRIES = 4, 275 MIN_RSPQ_ENTRIES = 32, 276 MIN_FL_ENTRIES = 32, 277 MIN_FL_JUMBO_ENTRIES = 32 278 }; 279 280 struct filter_info { 281 u32 sip; 282 u32 sip_mask; 283 u32 dip; 284 u16 sport; 285 u16 dport; 286 u32 vlan:12; 287 u32 vlan_prio:3; 288 u32 mac_hit:1; 289 u32 mac_idx:4; 290 u32 mac_vld:1; 291 u32 pkt_type:2; 292 u32 report_filter_id:1; 293 u32 pass:1; 294 u32 rss:1; 295 u32 qset:3; 296 u32 locked:1; 297 u32 valid:1; 298 }; 299 300 enum { FILTER_NO_VLAN_PRI = 7 }; 301 302 #define EEPROM_MAGIC 0x38E2F10C 303 304 #define PORT_MASK ((1 << MAX_NPORTS) - 1) 305 306 307 static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset); 308 309 310 static __inline char 311 t3rev2char(struct adapter *adapter) 312 { 313 char rev = 'z'; 314 315 switch(adapter->params.rev) { 316 case T3_REV_A: 317 rev = 'a'; 318 break; 319 case T3_REV_B: 320 case T3_REV_B2: 321 rev = 'b'; 322 break; 323 case T3_REV_C: 324 rev = 'c'; 325 break; 326 } 327 return rev; 328 } 329 330 static struct cxgb_ident * 331 cxgb_get_ident(device_t dev) 332 { 333 struct cxgb_ident *id; 334 335 for (id = cxgb_identifiers; id->desc != NULL; id++) { 336 if ((id->vendor == pci_get_vendor(dev)) && 337 (id->device == pci_get_device(dev))) { 338 return (id); 339 } 340 } 341 return (NULL); 342 } 343 344 static const struct adapter_info * 345 cxgb_get_adapter_info(device_t dev) 346 { 347 struct cxgb_ident *id; 348 const struct adapter_info *ai; 349 350 id = cxgb_get_ident(dev); 351 if (id == NULL) 352 return (NULL); 353 354 ai = t3_get_adapter_info(id->index); 355 356 return (ai); 357 } 358 359 static int 360 cxgb_controller_probe(device_t dev) 361 { 362 const struct adapter_info *ai; 363 const char *ports; 364 int nports; 365 366 ai = cxgb_get_adapter_info(dev); 367 if (ai == NULL) 368 return (ENXIO); 369 370 nports = ai->nports0 + ai->nports1; 371 if (nports == 1) 372 ports = "port"; 373 else 374 ports = "ports"; 375 376 device_set_descf(dev, "%s, %d %s", ai->desc, nports, ports); 377 return (BUS_PROBE_DEFAULT); 378 } 379 380 #define FW_FNAME "cxgb_t3fw" 381 #define TPEEPROM_NAME "cxgb_t3%c_tp_eeprom" 382 #define TPSRAM_NAME "cxgb_t3%c_protocol_sram" 383 384 static int 385 upgrade_fw(adapter_t *sc) 386 { 387 const struct firmware *fw; 388 int status; 389 u32 vers; 390 391 if ((fw = firmware_get(FW_FNAME)) == NULL) { 392 device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME); 393 return (ENOENT); 394 } else 395 device_printf(sc->dev, "installing firmware on card\n"); 396 status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize); 397 398 if (status != 0) { 399 device_printf(sc->dev, "failed to install firmware: %d\n", 400 status); 401 } else { 402 t3_get_fw_version(sc, &vers); 403 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d", 404 G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers), 405 G_FW_VERSION_MICRO(vers)); 406 } 407 408 firmware_put(fw, FIRMWARE_UNLOAD); 409 410 return (status); 411 } 412 413 /* 414 * The cxgb_controller_attach function is responsible for the initial 415 * bringup of the device. Its responsibilities include: 416 * 417 * 1. Determine if the device supports MSI or MSI-X. 418 * 2. Allocate bus resources so that we can access the Base Address Register 419 * 3. Create and initialize mutexes for the controller and its control 420 * logic such as SGE and MDIO. 421 * 4. Call hardware specific setup routine for the adapter as a whole. 422 * 5. Allocate the BAR for doing MSI-X. 423 * 6. Setup the line interrupt iff MSI-X is not supported. 424 * 7. Create the driver's taskq. 425 * 8. Start one task queue service thread. 426 * 9. Check if the firmware and SRAM are up-to-date. They will be 427 * auto-updated later (before FULL_INIT_DONE), if required. 428 * 10. Create a child device for each MAC (port) 429 * 11. Initialize T3 private state. 430 * 12. Trigger the LED 431 * 13. Setup offload iff supported. 432 * 14. Reset/restart the tick callout. 433 * 15. Attach sysctls 434 * 435 * NOTE: Any modification or deviation from this list MUST be reflected in 436 * the above comment. Failure to do so will result in problems on various 437 * error conditions including link flapping. 438 */ 439 static int 440 cxgb_controller_attach(device_t dev) 441 { 442 device_t child; 443 const struct adapter_info *ai; 444 struct adapter *sc; 445 int i, error = 0; 446 uint32_t vers; 447 int port_qsets = 1; 448 int msi_needed, reg; 449 450 sc = device_get_softc(dev); 451 sc->dev = dev; 452 sc->msi_count = 0; 453 ai = cxgb_get_adapter_info(dev); 454 455 snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d", 456 device_get_unit(dev)); 457 ADAPTER_LOCK_INIT(sc, sc->lockbuf); 458 459 snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d", 460 device_get_unit(dev)); 461 snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d", 462 device_get_unit(dev)); 463 snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d", 464 device_get_unit(dev)); 465 466 MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN); 467 MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF); 468 MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF); 469 470 mtx_lock(&t3_list_lock); 471 SLIST_INSERT_HEAD(&t3_list, sc, link); 472 mtx_unlock(&t3_list_lock); 473 474 /* find the PCIe link width and set max read request to 4KB*/ 475 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { 476 uint16_t lnk; 477 478 lnk = pci_read_config(dev, reg + PCIER_LINK_STA, 2); 479 sc->link_width = (lnk & PCIEM_LINK_STA_WIDTH) >> 4; 480 if (sc->link_width < 8 && 481 (ai->caps & SUPPORTED_10000baseT_Full)) { 482 device_printf(sc->dev, 483 "PCIe x%d Link, expect reduced performance\n", 484 sc->link_width); 485 } 486 487 pci_set_max_read_req(dev, 4096); 488 } 489 490 touch_bars(dev); 491 pci_enable_busmaster(dev); 492 /* 493 * Allocate the registers and make them available to the driver. 494 * The registers that we care about for NIC mode are in BAR 0 495 */ 496 sc->regs_rid = PCIR_BAR(0); 497 if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 498 &sc->regs_rid, RF_ACTIVE)) == NULL) { 499 device_printf(dev, "Cannot allocate BAR region 0\n"); 500 error = ENXIO; 501 goto out; 502 } 503 504 sc->bt = rman_get_bustag(sc->regs_res); 505 sc->bh = rman_get_bushandle(sc->regs_res); 506 sc->mmio_len = rman_get_size(sc->regs_res); 507 508 for (i = 0; i < MAX_NPORTS; i++) 509 sc->port[i].adapter = sc; 510 511 if (t3_prep_adapter(sc, ai, 1) < 0) { 512 printf("prep adapter failed\n"); 513 error = ENODEV; 514 goto out; 515 } 516 517 sc->udbs_rid = PCIR_BAR(2); 518 sc->udbs_res = NULL; 519 if (is_offload(sc) && 520 ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 521 &sc->udbs_rid, RF_ACTIVE)) == NULL)) { 522 device_printf(dev, "Cannot allocate BAR region 1\n"); 523 error = ENXIO; 524 goto out; 525 } 526 527 /* Allocate the BAR for doing MSI-X. If it succeeds, try to allocate 528 * enough messages for the queue sets. If that fails, try falling 529 * back to MSI. If that fails, then try falling back to the legacy 530 * interrupt pin model. 531 */ 532 sc->msix_regs_rid = 0x20; 533 if ((msi_allowed >= 2) && 534 (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 535 &sc->msix_regs_rid, RF_ACTIVE)) != NULL) { 536 537 if (multiq) 538 port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus); 539 msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1; 540 541 if (pci_msix_count(dev) == 0 || 542 (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 || 543 sc->msi_count != msi_needed) { 544 device_printf(dev, "alloc msix failed - " 545 "msi_count=%d, msi_needed=%d, err=%d; " 546 "will try MSI\n", sc->msi_count, 547 msi_needed, error); 548 sc->msi_count = 0; 549 port_qsets = 1; 550 pci_release_msi(dev); 551 bus_release_resource(dev, SYS_RES_MEMORY, 552 sc->msix_regs_rid, sc->msix_regs_res); 553 sc->msix_regs_res = NULL; 554 } else { 555 sc->flags |= USING_MSIX; 556 sc->cxgb_intr = cxgb_async_intr; 557 device_printf(dev, 558 "using MSI-X interrupts (%u vectors)\n", 559 sc->msi_count); 560 } 561 } 562 563 if ((msi_allowed >= 1) && (sc->msi_count == 0)) { 564 sc->msi_count = 1; 565 if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) { 566 device_printf(dev, "alloc msi failed - " 567 "err=%d; will try INTx\n", error); 568 sc->msi_count = 0; 569 port_qsets = 1; 570 pci_release_msi(dev); 571 } else { 572 sc->flags |= USING_MSI; 573 sc->cxgb_intr = t3_intr_msi; 574 device_printf(dev, "using MSI interrupts\n"); 575 } 576 } 577 if (sc->msi_count == 0) { 578 device_printf(dev, "using line interrupts\n"); 579 sc->cxgb_intr = t3b_intr; 580 } 581 582 /* Create a private taskqueue thread for handling driver events */ 583 sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT, 584 taskqueue_thread_enqueue, &sc->tq); 585 if (sc->tq == NULL) { 586 device_printf(dev, "failed to allocate controller task queue\n"); 587 goto out; 588 } 589 590 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq", 591 device_get_nameunit(dev)); 592 TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc); 593 594 595 /* Create a periodic callout for checking adapter status */ 596 callout_init(&sc->cxgb_tick_ch, 1); 597 598 if (t3_check_fw_version(sc) < 0 || force_fw_update) { 599 /* 600 * Warn user that a firmware update will be attempted in init. 601 */ 602 device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n", 603 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO); 604 sc->flags &= ~FW_UPTODATE; 605 } else { 606 sc->flags |= FW_UPTODATE; 607 } 608 609 if (t3_check_tpsram_version(sc) < 0) { 610 /* 611 * Warn user that a firmware update will be attempted in init. 612 */ 613 device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n", 614 t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); 615 sc->flags &= ~TPS_UPTODATE; 616 } else { 617 sc->flags |= TPS_UPTODATE; 618 } 619 620 /* 621 * Create a child device for each MAC. The ethernet attachment 622 * will be done in these children. 623 */ 624 for (i = 0; i < (sc)->params.nports; i++) { 625 struct port_info *pi; 626 627 if ((child = device_add_child(dev, "cxgb", -1)) == NULL) { 628 device_printf(dev, "failed to add child port\n"); 629 error = EINVAL; 630 goto out; 631 } 632 pi = &sc->port[i]; 633 pi->adapter = sc; 634 pi->nqsets = port_qsets; 635 pi->first_qset = i*port_qsets; 636 pi->port_id = i; 637 pi->tx_chan = i >= ai->nports0; 638 pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i; 639 sc->rxpkt_map[pi->txpkt_intf] = i; 640 sc->port[i].tx_chan = i >= ai->nports0; 641 sc->portdev[i] = child; 642 device_set_softc(child, pi); 643 } 644 if ((error = bus_generic_attach(dev)) != 0) 645 goto out; 646 647 /* initialize sge private state */ 648 t3_sge_init_adapter(sc); 649 650 t3_led_ready(sc); 651 652 error = t3_get_fw_version(sc, &vers); 653 if (error) 654 goto out; 655 656 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d", 657 G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers), 658 G_FW_VERSION_MICRO(vers)); 659 660 device_set_descf(dev, "%s %sNIC\t E/C: %s S/N: %s", 661 ai->desc, is_offload(sc) ? "R" : "", 662 sc->params.vpd.ec, sc->params.vpd.sn); 663 664 snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x", 665 sc->params.vpd.port_type[0], sc->params.vpd.port_type[1], 666 sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]); 667 668 device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]); 669 callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc); 670 t3_add_attach_sysctls(sc); 671 672 #ifdef TCP_OFFLOAD 673 for (i = 0; i < NUM_CPL_HANDLERS; i++) 674 sc->cpl_handler[i] = cpl_not_handled; 675 #endif 676 677 t3_intr_clear(sc); 678 error = cxgb_setup_interrupts(sc); 679 out: 680 if (error) 681 cxgb_free(sc); 682 683 return (error); 684 } 685 686 /* 687 * The cxgb_controller_detach routine is called with the device is 688 * unloaded from the system. 689 */ 690 691 static int 692 cxgb_controller_detach(device_t dev) 693 { 694 struct adapter *sc; 695 696 sc = device_get_softc(dev); 697 698 cxgb_free(sc); 699 700 return (0); 701 } 702 703 /* 704 * The cxgb_free() is called by the cxgb_controller_detach() routine 705 * to tear down the structures that were built up in 706 * cxgb_controller_attach(), and should be the final piece of work 707 * done when fully unloading the driver. 708 * 709 * 710 * 1. Shutting down the threads started by the cxgb_controller_attach() 711 * routine. 712 * 2. Stopping the lower level device and all callouts (cxgb_down_locked()). 713 * 3. Detaching all of the port devices created during the 714 * cxgb_controller_attach() routine. 715 * 4. Removing the device children created via cxgb_controller_attach(). 716 * 5. Releasing PCI resources associated with the device. 717 * 6. Turning off the offload support, iff it was turned on. 718 * 7. Destroying the mutexes created in cxgb_controller_attach(). 719 * 720 */ 721 static void 722 cxgb_free(struct adapter *sc) 723 { 724 int i, nqsets = 0; 725 726 ADAPTER_LOCK(sc); 727 sc->flags |= CXGB_SHUTDOWN; 728 ADAPTER_UNLOCK(sc); 729 730 /* 731 * Make sure all child devices are gone. 732 */ 733 bus_generic_detach(sc->dev); 734 for (i = 0; i < (sc)->params.nports; i++) { 735 if (sc->portdev[i] && 736 device_delete_child(sc->dev, sc->portdev[i]) != 0) 737 device_printf(sc->dev, "failed to delete child port\n"); 738 nqsets += sc->port[i].nqsets; 739 } 740 741 /* 742 * At this point, it is as if cxgb_port_detach has run on all ports, and 743 * cxgb_down has run on the adapter. All interrupts have been silenced, 744 * all open devices have been closed. 745 */ 746 KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)", 747 __func__, sc->open_device_map)); 748 for (i = 0; i < sc->params.nports; i++) { 749 KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!", 750 __func__, i)); 751 } 752 753 /* 754 * Finish off the adapter's callouts. 755 */ 756 callout_drain(&sc->cxgb_tick_ch); 757 callout_drain(&sc->sge_timer_ch); 758 759 /* 760 * Release resources grabbed under FULL_INIT_DONE by cxgb_up. The 761 * sysctls are cleaned up by the kernel linker. 762 */ 763 if (sc->flags & FULL_INIT_DONE) { 764 t3_free_sge_resources(sc, nqsets); 765 sc->flags &= ~FULL_INIT_DONE; 766 } 767 768 /* 769 * Release all interrupt resources. 770 */ 771 cxgb_teardown_interrupts(sc); 772 if (sc->flags & (USING_MSI | USING_MSIX)) { 773 device_printf(sc->dev, "releasing msi message(s)\n"); 774 pci_release_msi(sc->dev); 775 } else { 776 device_printf(sc->dev, "no msi message to release\n"); 777 } 778 779 if (sc->msix_regs_res != NULL) { 780 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid, 781 sc->msix_regs_res); 782 } 783 784 /* 785 * Free the adapter's taskqueue. 786 */ 787 if (sc->tq != NULL) { 788 taskqueue_free(sc->tq); 789 sc->tq = NULL; 790 } 791 792 free(sc->filters, M_DEVBUF); 793 t3_sge_free(sc); 794 795 if (sc->udbs_res != NULL) 796 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid, 797 sc->udbs_res); 798 799 if (sc->regs_res != NULL) 800 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid, 801 sc->regs_res); 802 803 MTX_DESTROY(&sc->mdio_lock); 804 MTX_DESTROY(&sc->sge.reg_lock); 805 MTX_DESTROY(&sc->elmer_lock); 806 mtx_lock(&t3_list_lock); 807 SLIST_REMOVE(&t3_list, sc, adapter, link); 808 mtx_unlock(&t3_list_lock); 809 ADAPTER_LOCK_DEINIT(sc); 810 } 811 812 /** 813 * setup_sge_qsets - configure SGE Tx/Rx/response queues 814 * @sc: the controller softc 815 * 816 * Determines how many sets of SGE queues to use and initializes them. 817 * We support multiple queue sets per port if we have MSI-X, otherwise 818 * just one queue set per port. 819 */ 820 static int 821 setup_sge_qsets(adapter_t *sc) 822 { 823 int i, j, err, irq_idx = 0, qset_idx = 0; 824 u_int ntxq = SGE_TXQ_PER_SET; 825 826 if ((err = t3_sge_alloc(sc)) != 0) { 827 device_printf(sc->dev, "t3_sge_alloc returned %d\n", err); 828 return (err); 829 } 830 831 if (sc->params.rev > 0 && !(sc->flags & USING_MSI)) 832 irq_idx = -1; 833 834 for (i = 0; i < (sc)->params.nports; i++) { 835 struct port_info *pi = &sc->port[i]; 836 837 for (j = 0; j < pi->nqsets; j++, qset_idx++) { 838 err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports, 839 (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx, 840 &sc->params.sge.qset[qset_idx], ntxq, pi); 841 if (err) { 842 t3_free_sge_resources(sc, qset_idx); 843 device_printf(sc->dev, 844 "t3_sge_alloc_qset failed with %d\n", err); 845 return (err); 846 } 847 } 848 } 849 850 sc->nqsets = qset_idx; 851 852 return (0); 853 } 854 855 static void 856 cxgb_teardown_interrupts(adapter_t *sc) 857 { 858 int i; 859 860 for (i = 0; i < SGE_QSETS; i++) { 861 if (sc->msix_intr_tag[i] == NULL) { 862 863 /* Should have been setup fully or not at all */ 864 KASSERT(sc->msix_irq_res[i] == NULL && 865 sc->msix_irq_rid[i] == 0, 866 ("%s: half-done interrupt (%d).", __func__, i)); 867 868 continue; 869 } 870 871 bus_teardown_intr(sc->dev, sc->msix_irq_res[i], 872 sc->msix_intr_tag[i]); 873 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i], 874 sc->msix_irq_res[i]); 875 876 sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL; 877 sc->msix_irq_rid[i] = 0; 878 } 879 880 if (sc->intr_tag) { 881 KASSERT(sc->irq_res != NULL, 882 ("%s: half-done interrupt.", __func__)); 883 884 bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag); 885 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, 886 sc->irq_res); 887 888 sc->irq_res = sc->intr_tag = NULL; 889 sc->irq_rid = 0; 890 } 891 } 892 893 static int 894 cxgb_setup_interrupts(adapter_t *sc) 895 { 896 struct resource *res; 897 void *tag; 898 int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX); 899 900 sc->irq_rid = intr_flag ? 1 : 0; 901 sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid, 902 RF_SHAREABLE | RF_ACTIVE); 903 if (sc->irq_res == NULL) { 904 device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n", 905 intr_flag, sc->irq_rid); 906 err = EINVAL; 907 sc->irq_rid = 0; 908 } else { 909 err = bus_setup_intr(sc->dev, sc->irq_res, 910 INTR_MPSAFE | INTR_TYPE_NET, NULL, 911 sc->cxgb_intr, sc, &sc->intr_tag); 912 913 if (err) { 914 device_printf(sc->dev, 915 "Cannot set up interrupt (%x, %u, %d)\n", 916 intr_flag, sc->irq_rid, err); 917 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, 918 sc->irq_res); 919 sc->irq_res = sc->intr_tag = NULL; 920 sc->irq_rid = 0; 921 } 922 } 923 924 /* That's all for INTx or MSI */ 925 if (!(intr_flag & USING_MSIX) || err) 926 return (err); 927 928 bus_describe_intr(sc->dev, sc->irq_res, sc->intr_tag, "err"); 929 for (i = 0; i < sc->msi_count - 1; i++) { 930 rid = i + 2; 931 res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid, 932 RF_SHAREABLE | RF_ACTIVE); 933 if (res == NULL) { 934 device_printf(sc->dev, "Cannot allocate interrupt " 935 "for message %d\n", rid); 936 err = EINVAL; 937 break; 938 } 939 940 err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET, 941 NULL, t3_intr_msix, &sc->sge.qs[i], &tag); 942 if (err) { 943 device_printf(sc->dev, "Cannot set up interrupt " 944 "for message %d (%d)\n", rid, err); 945 bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res); 946 break; 947 } 948 949 sc->msix_irq_rid[i] = rid; 950 sc->msix_irq_res[i] = res; 951 sc->msix_intr_tag[i] = tag; 952 bus_describe_intr(sc->dev, res, tag, "qs%d", i); 953 } 954 955 if (err) 956 cxgb_teardown_interrupts(sc); 957 958 return (err); 959 } 960 961 962 static int 963 cxgb_port_probe(device_t dev) 964 { 965 struct port_info *p; 966 const char *desc; 967 968 p = device_get_softc(dev); 969 desc = p->phy.desc; 970 device_set_descf(dev, "Port %d %s", p->port_id, desc); 971 return (0); 972 } 973 974 975 static int 976 cxgb_makedev(struct port_info *pi) 977 { 978 979 pi->port_cdev = make_dev(&cxgb_cdevsw, if_getdunit(pi->ifp), 980 UID_ROOT, GID_WHEEL, 0600, "%s", if_name(pi->ifp)); 981 982 if (pi->port_cdev == NULL) 983 return (ENOMEM); 984 985 pi->port_cdev->si_drv1 = (void *)pi; 986 987 return (0); 988 } 989 990 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 991 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 992 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6) 993 #define CXGB_CAP_ENABLE CXGB_CAP 994 995 static int 996 cxgb_port_attach(device_t dev) 997 { 998 struct port_info *p; 999 if_t ifp; 1000 int err; 1001 struct adapter *sc; 1002 1003 p = device_get_softc(dev); 1004 sc = p->adapter; 1005 snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d", 1006 device_get_unit(device_get_parent(dev)), p->port_id); 1007 PORT_LOCK_INIT(p, p->lockbuf); 1008 1009 callout_init(&p->link_check_ch, 1); 1010 TASK_INIT(&p->link_check_task, 0, check_link_status, p); 1011 1012 /* Allocate an ifnet object and set it up */ 1013 ifp = p->ifp = if_alloc(IFT_ETHER); 1014 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1015 if_setinitfn(ifp, cxgb_init); 1016 if_setsoftc(ifp, p); 1017 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 1018 if_setioctlfn(ifp, cxgb_ioctl); 1019 if_settransmitfn(ifp, cxgb_transmit); 1020 if_setqflushfn(ifp, cxgb_qflush); 1021 if_setgetcounterfn(ifp, cxgb_get_counter); 1022 1023 if_setcapabilities(ifp, CXGB_CAP); 1024 #ifdef TCP_OFFLOAD 1025 if (is_offload(sc)) 1026 if_setcapabilitiesbit(ifp, IFCAP_TOE4, 0); 1027 #endif 1028 if_setcapenable(ifp, CXGB_CAP_ENABLE); 1029 if_sethwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 1030 CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1031 if_sethwtsomax(ifp, IP_MAXPACKET); 1032 if_sethwtsomaxsegcount(ifp, 36); 1033 if_sethwtsomaxsegsize(ifp, 65536); 1034 1035 /* 1036 * Disable TSO on 4-port - it isn't supported by the firmware. 1037 */ 1038 if (sc->params.nports > 2) { 1039 if_setcapabilitiesbit(ifp, 0, IFCAP_TSO | IFCAP_VLAN_HWTSO); 1040 if_setcapenablebit(ifp, 0, IFCAP_TSO | IFCAP_VLAN_HWTSO); 1041 if_sethwassistbits(ifp, 0, CSUM_TSO); 1042 } 1043 1044 /* Create a list of media supported by this port */ 1045 ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change, 1046 cxgb_media_status); 1047 cxgb_build_medialist(p); 1048 1049 ether_ifattach(ifp, p->hw_addr); 1050 1051 /* Attach driver debugnet methods. */ 1052 DEBUGNET_SET(ifp, cxgb); 1053 1054 #ifdef DEFAULT_JUMBO 1055 if (sc->params.nports <= 2) 1056 if_setmtu(ifp, ETHERMTU_JUMBO); 1057 #endif 1058 if ((err = cxgb_makedev(p)) != 0) { 1059 printf("makedev failed %d\n", err); 1060 return (err); 1061 } 1062 1063 t3_sge_init_port(p); 1064 1065 return (err); 1066 } 1067 1068 /* 1069 * cxgb_port_detach() is called via the device_detach methods when 1070 * cxgb_free() calls the bus_generic_detach. It is responsible for 1071 * removing the device from the view of the kernel, i.e. from all 1072 * interfaces lists etc. This routine is only called when the driver is 1073 * being unloaded, not when the link goes down. 1074 */ 1075 static int 1076 cxgb_port_detach(device_t dev) 1077 { 1078 struct port_info *p; 1079 struct adapter *sc; 1080 int i; 1081 1082 p = device_get_softc(dev); 1083 sc = p->adapter; 1084 1085 /* Tell cxgb_ioctl and if_init that the port is going away */ 1086 ADAPTER_LOCK(sc); 1087 SET_DOOMED(p); 1088 wakeup(&sc->flags); 1089 while (IS_BUSY(sc)) 1090 mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0); 1091 SET_BUSY(sc); 1092 ADAPTER_UNLOCK(sc); 1093 1094 if (p->port_cdev != NULL) 1095 destroy_dev(p->port_cdev); 1096 1097 cxgb_uninit_synchronized(p); 1098 ether_ifdetach(p->ifp); 1099 1100 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { 1101 struct sge_qset *qs = &sc->sge.qs[i]; 1102 struct sge_txq *txq = &qs->txq[TXQ_ETH]; 1103 1104 callout_drain(&txq->txq_watchdog); 1105 callout_drain(&txq->txq_timer); 1106 } 1107 1108 PORT_LOCK_DEINIT(p); 1109 if_free(p->ifp); 1110 p->ifp = NULL; 1111 1112 ADAPTER_LOCK(sc); 1113 CLR_BUSY(sc); 1114 wakeup_one(&sc->flags); 1115 ADAPTER_UNLOCK(sc); 1116 return (0); 1117 } 1118 1119 void 1120 t3_fatal_err(struct adapter *sc) 1121 { 1122 u_int fw_status[4]; 1123 1124 if (sc->flags & FULL_INIT_DONE) { 1125 t3_sge_stop(sc); 1126 t3_write_reg(sc, A_XGM_TX_CTRL, 0); 1127 t3_write_reg(sc, A_XGM_RX_CTRL, 0); 1128 t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0); 1129 t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0); 1130 t3_intr_disable(sc); 1131 } 1132 device_printf(sc->dev,"encountered fatal error, operation suspended\n"); 1133 if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status)) 1134 device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n", 1135 fw_status[0], fw_status[1], fw_status[2], fw_status[3]); 1136 } 1137 1138 int 1139 t3_os_find_pci_capability(adapter_t *sc, int cap) 1140 { 1141 device_t dev; 1142 struct pci_devinfo *dinfo; 1143 pcicfgregs *cfg; 1144 uint32_t status; 1145 uint8_t ptr; 1146 1147 dev = sc->dev; 1148 dinfo = device_get_ivars(dev); 1149 cfg = &dinfo->cfg; 1150 1151 status = pci_read_config(dev, PCIR_STATUS, 2); 1152 if (!(status & PCIM_STATUS_CAPPRESENT)) 1153 return (0); 1154 1155 switch (cfg->hdrtype & PCIM_HDRTYPE) { 1156 case 0: 1157 case 1: 1158 ptr = PCIR_CAP_PTR; 1159 break; 1160 case 2: 1161 ptr = PCIR_CAP_PTR_2; 1162 break; 1163 default: 1164 return (0); 1165 break; 1166 } 1167 ptr = pci_read_config(dev, ptr, 1); 1168 1169 while (ptr != 0) { 1170 if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap) 1171 return (ptr); 1172 ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1); 1173 } 1174 1175 return (0); 1176 } 1177 1178 int 1179 t3_os_pci_save_state(struct adapter *sc) 1180 { 1181 device_t dev; 1182 struct pci_devinfo *dinfo; 1183 1184 dev = sc->dev; 1185 dinfo = device_get_ivars(dev); 1186 1187 pci_cfg_save(dev, dinfo, 0); 1188 return (0); 1189 } 1190 1191 int 1192 t3_os_pci_restore_state(struct adapter *sc) 1193 { 1194 device_t dev; 1195 struct pci_devinfo *dinfo; 1196 1197 dev = sc->dev; 1198 dinfo = device_get_ivars(dev); 1199 1200 pci_cfg_restore(dev, dinfo); 1201 return (0); 1202 } 1203 1204 /** 1205 * t3_os_link_changed - handle link status changes 1206 * @sc: the adapter associated with the link change 1207 * @port_id: the port index whose link status has changed 1208 * @link_status: the new status of the link 1209 * @speed: the new speed setting 1210 * @duplex: the new duplex setting 1211 * @fc: the new flow-control setting 1212 * 1213 * This is the OS-dependent handler for link status changes. The OS 1214 * neutral handler takes care of most of the processing for these events, 1215 * then calls this handler for any OS-specific processing. 1216 */ 1217 void 1218 t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed, 1219 int duplex, int fc, int mac_was_reset) 1220 { 1221 struct port_info *pi = &adapter->port[port_id]; 1222 if_t ifp = pi->ifp; 1223 1224 /* no race with detach, so ifp should always be good */ 1225 KASSERT(ifp, ("%s: if detached.", __func__)); 1226 1227 /* Reapply mac settings if they were lost due to a reset */ 1228 if (mac_was_reset) { 1229 PORT_LOCK(pi); 1230 cxgb_update_mac_settings(pi); 1231 PORT_UNLOCK(pi); 1232 } 1233 1234 if (link_status) { 1235 if_setbaudrate(ifp, IF_Mbps(speed)); 1236 if_link_state_change(ifp, LINK_STATE_UP); 1237 } else 1238 if_link_state_change(ifp, LINK_STATE_DOWN); 1239 } 1240 1241 /** 1242 * t3_os_phymod_changed - handle PHY module changes 1243 * @phy: the PHY reporting the module change 1244 * @mod_type: new module type 1245 * 1246 * This is the OS-dependent handler for PHY module changes. It is 1247 * invoked when a PHY module is removed or inserted for any OS-specific 1248 * processing. 1249 */ 1250 void t3_os_phymod_changed(struct adapter *adap, int port_id) 1251 { 1252 static const char *mod_str[] = { 1253 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX-L", "unknown" 1254 }; 1255 struct port_info *pi = &adap->port[port_id]; 1256 int mod = pi->phy.modtype; 1257 1258 if (mod != pi->media.ifm_cur->ifm_data) 1259 cxgb_build_medialist(pi); 1260 1261 if (mod == phy_modtype_none) 1262 if_printf(pi->ifp, "PHY module unplugged\n"); 1263 else { 1264 KASSERT(mod < ARRAY_SIZE(mod_str), 1265 ("invalid PHY module type %d", mod)); 1266 if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]); 1267 } 1268 } 1269 1270 void 1271 t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]) 1272 { 1273 1274 /* 1275 * The ifnet might not be allocated before this gets called, 1276 * as this is called early on in attach by t3_prep_adapter 1277 * save the address off in the port structure 1278 */ 1279 if (cxgb_debug) 1280 printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":"); 1281 bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN); 1282 } 1283 1284 /* 1285 * Programs the XGMAC based on the settings in the ifnet. These settings 1286 * include MTU, MAC address, mcast addresses, etc. 1287 */ 1288 static void 1289 cxgb_update_mac_settings(struct port_info *p) 1290 { 1291 if_t ifp = p->ifp; 1292 struct t3_rx_mode rm; 1293 struct cmac *mac = &p->mac; 1294 int mtu, hwtagging; 1295 1296 PORT_LOCK_ASSERT_OWNED(p); 1297 1298 bcopy(if_getlladdr(ifp), p->hw_addr, ETHER_ADDR_LEN); 1299 1300 mtu = if_getmtu(ifp); 1301 if (if_getcapenable(ifp) & IFCAP_VLAN_MTU) 1302 mtu += ETHER_VLAN_ENCAP_LEN; 1303 1304 hwtagging = (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0; 1305 1306 t3_mac_set_mtu(mac, mtu); 1307 t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging); 1308 t3_mac_set_address(mac, 0, p->hw_addr); 1309 t3_init_rx_mode(&rm, p); 1310 t3_mac_set_rx_mode(mac, &rm); 1311 } 1312 1313 1314 static int 1315 await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, 1316 unsigned long n) 1317 { 1318 int attempts = 5; 1319 1320 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { 1321 if (!--attempts) 1322 return (ETIMEDOUT); 1323 t3_os_sleep(10); 1324 } 1325 return 0; 1326 } 1327 1328 static int 1329 init_tp_parity(struct adapter *adap) 1330 { 1331 int i; 1332 struct mbuf *m; 1333 struct cpl_set_tcb_field *greq; 1334 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; 1335 1336 t3_tp_set_offload_mode(adap, 1); 1337 1338 for (i = 0; i < 16; i++) { 1339 struct cpl_smt_write_req *req; 1340 1341 m = m_gethdr(M_WAITOK, MT_DATA); 1342 req = mtod(m, struct cpl_smt_write_req *); 1343 m->m_len = m->m_pkthdr.len = sizeof(*req); 1344 memset(req, 0, sizeof(*req)); 1345 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1346 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i)); 1347 req->iff = i; 1348 t3_mgmt_tx(adap, m); 1349 } 1350 1351 for (i = 0; i < 2048; i++) { 1352 struct cpl_l2t_write_req *req; 1353 1354 m = m_gethdr(M_WAITOK, MT_DATA); 1355 req = mtod(m, struct cpl_l2t_write_req *); 1356 m->m_len = m->m_pkthdr.len = sizeof(*req); 1357 memset(req, 0, sizeof(*req)); 1358 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1359 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i)); 1360 req->params = htonl(V_L2T_W_IDX(i)); 1361 t3_mgmt_tx(adap, m); 1362 } 1363 1364 for (i = 0; i < 2048; i++) { 1365 struct cpl_rte_write_req *req; 1366 1367 m = m_gethdr(M_WAITOK, MT_DATA); 1368 req = mtod(m, struct cpl_rte_write_req *); 1369 m->m_len = m->m_pkthdr.len = sizeof(*req); 1370 memset(req, 0, sizeof(*req)); 1371 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1372 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i)); 1373 req->l2t_idx = htonl(V_L2T_W_IDX(i)); 1374 t3_mgmt_tx(adap, m); 1375 } 1376 1377 m = m_gethdr(M_WAITOK, MT_DATA); 1378 greq = mtod(m, struct cpl_set_tcb_field *); 1379 m->m_len = m->m_pkthdr.len = sizeof(*greq); 1380 memset(greq, 0, sizeof(*greq)); 1381 greq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1382 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0)); 1383 greq->mask = htobe64(1); 1384 t3_mgmt_tx(adap, m); 1385 1386 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); 1387 t3_tp_set_offload_mode(adap, 0); 1388 return (i); 1389 } 1390 1391 /** 1392 * setup_rss - configure Receive Side Steering (per-queue connection demux) 1393 * @adap: the adapter 1394 * 1395 * Sets up RSS to distribute packets to multiple receive queues. We 1396 * configure the RSS CPU lookup table to distribute to the number of HW 1397 * receive queues, and the response queue lookup table to narrow that 1398 * down to the response queues actually configured for each port. 1399 * We always configure the RSS mapping for two ports since the mapping 1400 * table has plenty of entries. 1401 */ 1402 static void 1403 setup_rss(adapter_t *adap) 1404 { 1405 int i; 1406 u_int nq[2]; 1407 uint8_t cpus[SGE_QSETS + 1]; 1408 uint16_t rspq_map[RSS_TABLE_SIZE]; 1409 1410 for (i = 0; i < SGE_QSETS; ++i) 1411 cpus[i] = i; 1412 cpus[SGE_QSETS] = 0xff; 1413 1414 nq[0] = nq[1] = 0; 1415 for_each_port(adap, i) { 1416 const struct port_info *pi = adap2pinfo(adap, i); 1417 1418 nq[pi->tx_chan] += pi->nqsets; 1419 } 1420 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) { 1421 rspq_map[i] = nq[0] ? i % nq[0] : 0; 1422 rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0; 1423 } 1424 1425 /* Calculate the reverse RSS map table */ 1426 for (i = 0; i < SGE_QSETS; ++i) 1427 adap->rrss_map[i] = 0xff; 1428 for (i = 0; i < RSS_TABLE_SIZE; ++i) 1429 if (adap->rrss_map[rspq_map[i]] == 0xff) 1430 adap->rrss_map[rspq_map[i]] = i; 1431 1432 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN | 1433 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN | 1434 F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, 1435 cpus, rspq_map); 1436 1437 } 1438 static void 1439 send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, 1440 int hi, int port) 1441 { 1442 struct mbuf *m; 1443 struct mngt_pktsched_wr *req; 1444 1445 m = m_gethdr(M_NOWAIT, MT_DATA); 1446 if (m) { 1447 req = mtod(m, struct mngt_pktsched_wr *); 1448 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT)); 1449 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET; 1450 req->sched = sched; 1451 req->idx = qidx; 1452 req->min = lo; 1453 req->max = hi; 1454 req->binding = port; 1455 m->m_len = m->m_pkthdr.len = sizeof(*req); 1456 t3_mgmt_tx(adap, m); 1457 } 1458 } 1459 1460 static void 1461 bind_qsets(adapter_t *sc) 1462 { 1463 int i, j; 1464 1465 for (i = 0; i < (sc)->params.nports; ++i) { 1466 const struct port_info *pi = adap2pinfo(sc, i); 1467 1468 for (j = 0; j < pi->nqsets; ++j) { 1469 send_pktsched_cmd(sc, 1, pi->first_qset + j, -1, 1470 -1, pi->tx_chan); 1471 1472 } 1473 } 1474 } 1475 1476 static void 1477 update_tpeeprom(struct adapter *adap) 1478 { 1479 const struct firmware *tpeeprom; 1480 1481 uint32_t version; 1482 unsigned int major, minor; 1483 int ret, len; 1484 char rev, name[32]; 1485 1486 t3_seeprom_read(adap, TP_SRAM_OFFSET, &version); 1487 1488 major = G_TP_VERSION_MAJOR(version); 1489 minor = G_TP_VERSION_MINOR(version); 1490 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR) 1491 return; 1492 1493 rev = t3rev2char(adap); 1494 snprintf(name, sizeof(name), TPEEPROM_NAME, rev); 1495 1496 tpeeprom = firmware_get(name); 1497 if (tpeeprom == NULL) { 1498 device_printf(adap->dev, 1499 "could not load TP EEPROM: unable to load %s\n", 1500 name); 1501 return; 1502 } 1503 1504 len = tpeeprom->datasize - 4; 1505 1506 ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize); 1507 if (ret) 1508 goto release_tpeeprom; 1509 1510 if (len != TP_SRAM_LEN) { 1511 device_printf(adap->dev, 1512 "%s length is wrong len=%d expected=%d\n", name, 1513 len, TP_SRAM_LEN); 1514 return; 1515 } 1516 1517 ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize, 1518 TP_SRAM_OFFSET); 1519 1520 if (!ret) { 1521 device_printf(adap->dev, 1522 "Protocol SRAM image updated in EEPROM to %d.%d.%d\n", 1523 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); 1524 } else 1525 device_printf(adap->dev, 1526 "Protocol SRAM image update in EEPROM failed\n"); 1527 1528 release_tpeeprom: 1529 firmware_put(tpeeprom, FIRMWARE_UNLOAD); 1530 1531 return; 1532 } 1533 1534 static int 1535 update_tpsram(struct adapter *adap) 1536 { 1537 const struct firmware *tpsram; 1538 int ret; 1539 char rev, name[32]; 1540 1541 rev = t3rev2char(adap); 1542 snprintf(name, sizeof(name), TPSRAM_NAME, rev); 1543 1544 update_tpeeprom(adap); 1545 1546 tpsram = firmware_get(name); 1547 if (tpsram == NULL){ 1548 device_printf(adap->dev, "could not load TP SRAM\n"); 1549 return (EINVAL); 1550 } else 1551 device_printf(adap->dev, "updating TP SRAM\n"); 1552 1553 ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize); 1554 if (ret) 1555 goto release_tpsram; 1556 1557 ret = t3_set_proto_sram(adap, tpsram->data); 1558 if (ret) 1559 device_printf(adap->dev, "loading protocol SRAM failed\n"); 1560 1561 release_tpsram: 1562 firmware_put(tpsram, FIRMWARE_UNLOAD); 1563 1564 return ret; 1565 } 1566 1567 /** 1568 * cxgb_up - enable the adapter 1569 * @adap: adapter being enabled 1570 * 1571 * Called when the first port is enabled, this function performs the 1572 * actions necessary to make an adapter operational, such as completing 1573 * the initialization of HW modules, and enabling interrupts. 1574 */ 1575 static int 1576 cxgb_up(struct adapter *sc) 1577 { 1578 int err = 0; 1579 unsigned int mxf = t3_mc5_size(&sc->mc5) - MC5_MIN_TIDS; 1580 1581 KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)", 1582 __func__, sc->open_device_map)); 1583 1584 if ((sc->flags & FULL_INIT_DONE) == 0) { 1585 1586 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1587 1588 if ((sc->flags & FW_UPTODATE) == 0) 1589 if ((err = upgrade_fw(sc))) 1590 goto out; 1591 1592 if ((sc->flags & TPS_UPTODATE) == 0) 1593 if ((err = update_tpsram(sc))) 1594 goto out; 1595 1596 if (is_offload(sc) && nfilters != 0) { 1597 sc->params.mc5.nservers = 0; 1598 1599 if (nfilters < 0) 1600 sc->params.mc5.nfilters = mxf; 1601 else 1602 sc->params.mc5.nfilters = min(nfilters, mxf); 1603 } 1604 1605 err = t3_init_hw(sc, 0); 1606 if (err) 1607 goto out; 1608 1609 t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT); 1610 t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); 1611 1612 err = setup_sge_qsets(sc); 1613 if (err) 1614 goto out; 1615 1616 alloc_filters(sc); 1617 setup_rss(sc); 1618 1619 t3_add_configured_sysctls(sc); 1620 sc->flags |= FULL_INIT_DONE; 1621 } 1622 1623 t3_intr_clear(sc); 1624 t3_sge_start(sc); 1625 t3_intr_enable(sc); 1626 1627 if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) && 1628 is_offload(sc) && init_tp_parity(sc) == 0) 1629 sc->flags |= TP_PARITY_INIT; 1630 1631 if (sc->flags & TP_PARITY_INIT) { 1632 t3_write_reg(sc, A_TP_INT_CAUSE, F_CMCACHEPERR | F_ARPLUTPERR); 1633 t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff); 1634 } 1635 1636 if (!(sc->flags & QUEUES_BOUND)) { 1637 bind_qsets(sc); 1638 setup_hw_filters(sc); 1639 sc->flags |= QUEUES_BOUND; 1640 } 1641 1642 t3_sge_reset_adapter(sc); 1643 out: 1644 return (err); 1645 } 1646 1647 /* 1648 * Called when the last open device is closed. Does NOT undo all of cxgb_up's 1649 * work. Specifically, the resources grabbed under FULL_INIT_DONE are released 1650 * during controller_detach, not here. 1651 */ 1652 static void 1653 cxgb_down(struct adapter *sc) 1654 { 1655 t3_sge_stop(sc); 1656 t3_intr_disable(sc); 1657 } 1658 1659 /* 1660 * if_init for cxgb ports. 1661 */ 1662 static void 1663 cxgb_init(void *arg) 1664 { 1665 struct port_info *p = arg; 1666 struct adapter *sc = p->adapter; 1667 1668 ADAPTER_LOCK(sc); 1669 cxgb_init_locked(p); /* releases adapter lock */ 1670 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1671 } 1672 1673 static int 1674 cxgb_init_locked(struct port_info *p) 1675 { 1676 struct adapter *sc = p->adapter; 1677 if_t ifp = p->ifp; 1678 struct cmac *mac = &p->mac; 1679 int i, rc = 0, may_sleep = 0, gave_up_lock = 0; 1680 1681 ADAPTER_LOCK_ASSERT_OWNED(sc); 1682 1683 while (!IS_DOOMED(p) && IS_BUSY(sc)) { 1684 gave_up_lock = 1; 1685 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) { 1686 rc = EINTR; 1687 goto done; 1688 } 1689 } 1690 if (IS_DOOMED(p)) { 1691 rc = ENXIO; 1692 goto done; 1693 } 1694 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 1695 1696 /* 1697 * The code that runs during one-time adapter initialization can sleep 1698 * so it's important not to hold any locks across it. 1699 */ 1700 may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1; 1701 1702 if (may_sleep) { 1703 SET_BUSY(sc); 1704 gave_up_lock = 1; 1705 ADAPTER_UNLOCK(sc); 1706 } 1707 1708 if (sc->open_device_map == 0 && ((rc = cxgb_up(sc)) != 0)) 1709 goto done; 1710 1711 PORT_LOCK(p); 1712 if (isset(&sc->open_device_map, p->port_id) && 1713 (if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 1714 PORT_UNLOCK(p); 1715 goto done; 1716 } 1717 t3_port_intr_enable(sc, p->port_id); 1718 if (!mac->multiport) 1719 t3_mac_init(mac); 1720 cxgb_update_mac_settings(p); 1721 t3_link_start(&p->phy, mac, &p->link_config); 1722 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); 1723 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 1724 PORT_UNLOCK(p); 1725 1726 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { 1727 struct sge_qset *qs = &sc->sge.qs[i]; 1728 struct sge_txq *txq = &qs->txq[TXQ_ETH]; 1729 1730 callout_reset_on(&txq->txq_watchdog, hz, cxgb_tx_watchdog, qs, 1731 txq->txq_watchdog.c_cpu); 1732 } 1733 1734 /* all ok */ 1735 setbit(&sc->open_device_map, p->port_id); 1736 callout_reset(&p->link_check_ch, 1737 p->phy.caps & SUPPORTED_LINK_IRQ ? hz * 3 : hz / 4, 1738 link_check_callout, p); 1739 1740 done: 1741 if (may_sleep) { 1742 ADAPTER_LOCK(sc); 1743 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 1744 CLR_BUSY(sc); 1745 } 1746 if (gave_up_lock) 1747 wakeup_one(&sc->flags); 1748 ADAPTER_UNLOCK(sc); 1749 return (rc); 1750 } 1751 1752 static int 1753 cxgb_uninit_locked(struct port_info *p) 1754 { 1755 struct adapter *sc = p->adapter; 1756 int rc; 1757 1758 ADAPTER_LOCK_ASSERT_OWNED(sc); 1759 1760 while (!IS_DOOMED(p) && IS_BUSY(sc)) { 1761 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) { 1762 rc = EINTR; 1763 goto done; 1764 } 1765 } 1766 if (IS_DOOMED(p)) { 1767 rc = ENXIO; 1768 goto done; 1769 } 1770 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 1771 SET_BUSY(sc); 1772 ADAPTER_UNLOCK(sc); 1773 1774 rc = cxgb_uninit_synchronized(p); 1775 1776 ADAPTER_LOCK(sc); 1777 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 1778 CLR_BUSY(sc); 1779 wakeup_one(&sc->flags); 1780 done: 1781 ADAPTER_UNLOCK(sc); 1782 return (rc); 1783 } 1784 1785 /* 1786 * Called on "ifconfig down", and from port_detach 1787 */ 1788 static int 1789 cxgb_uninit_synchronized(struct port_info *pi) 1790 { 1791 struct adapter *sc = pi->adapter; 1792 if_t ifp = pi->ifp; 1793 1794 /* 1795 * taskqueue_drain may cause a deadlock if the adapter lock is held. 1796 */ 1797 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1798 1799 /* 1800 * Clear this port's bit from the open device map, and then drain all 1801 * the tasks that can access/manipulate this port's port_info or ifp. 1802 * We disable this port's interrupts here and so the slow/ext 1803 * interrupt tasks won't be enqueued. The tick task will continue to 1804 * be enqueued every second but the runs after this drain will not see 1805 * this port in the open device map. 1806 * 1807 * A well behaved task must take open_device_map into account and ignore 1808 * ports that are not open. 1809 */ 1810 clrbit(&sc->open_device_map, pi->port_id); 1811 t3_port_intr_disable(sc, pi->port_id); 1812 taskqueue_drain(sc->tq, &sc->slow_intr_task); 1813 taskqueue_drain(sc->tq, &sc->tick_task); 1814 1815 callout_drain(&pi->link_check_ch); 1816 taskqueue_drain(sc->tq, &pi->link_check_task); 1817 1818 PORT_LOCK(pi); 1819 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1820 1821 /* disable pause frames */ 1822 t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0); 1823 1824 /* Reset RX FIFO HWM */ 1825 t3_set_reg_field(sc, A_XGM_RXFIFO_CFG + pi->mac.offset, 1826 V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0); 1827 1828 DELAY(100 * 1000); 1829 1830 /* Wait for TXFIFO empty */ 1831 t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset, 1832 F_TXFIFO_EMPTY, 1, 20, 5); 1833 1834 DELAY(100 * 1000); 1835 t3_mac_disable(&pi->mac, MAC_DIRECTION_RX); 1836 1837 pi->phy.ops->power_down(&pi->phy, 1); 1838 1839 PORT_UNLOCK(pi); 1840 1841 pi->link_config.link_ok = 0; 1842 t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0); 1843 1844 if (sc->open_device_map == 0) 1845 cxgb_down(pi->adapter); 1846 1847 return (0); 1848 } 1849 1850 /* 1851 * Mark lro enabled or disabled in all qsets for this port 1852 */ 1853 static int 1854 cxgb_set_lro(struct port_info *p, int enabled) 1855 { 1856 int i; 1857 struct adapter *adp = p->adapter; 1858 struct sge_qset *q; 1859 1860 for (i = 0; i < p->nqsets; i++) { 1861 q = &adp->sge.qs[p->first_qset + i]; 1862 q->lro.enabled = (enabled != 0); 1863 } 1864 return (0); 1865 } 1866 1867 static int 1868 cxgb_ioctl(if_t ifp, unsigned long command, caddr_t data) 1869 { 1870 struct port_info *p = if_getsoftc(ifp); 1871 struct adapter *sc = p->adapter; 1872 struct ifreq *ifr = (struct ifreq *)data; 1873 int flags, error = 0, mtu; 1874 uint32_t mask; 1875 1876 switch (command) { 1877 case SIOCSIFMTU: 1878 ADAPTER_LOCK(sc); 1879 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 1880 if (error) { 1881 fail: 1882 ADAPTER_UNLOCK(sc); 1883 return (error); 1884 } 1885 1886 mtu = ifr->ifr_mtu; 1887 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) { 1888 error = EINVAL; 1889 } else { 1890 if_setmtu(ifp, mtu); 1891 PORT_LOCK(p); 1892 cxgb_update_mac_settings(p); 1893 PORT_UNLOCK(p); 1894 } 1895 ADAPTER_UNLOCK(sc); 1896 break; 1897 case SIOCSIFFLAGS: 1898 ADAPTER_LOCK(sc); 1899 if (IS_DOOMED(p)) { 1900 error = ENXIO; 1901 goto fail; 1902 } 1903 if (if_getflags(ifp) & IFF_UP) { 1904 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1905 flags = p->if_flags; 1906 if (((if_getflags(ifp) ^ flags) & IFF_PROMISC) || 1907 ((if_getflags(ifp) ^ flags) & IFF_ALLMULTI)) { 1908 if (IS_BUSY(sc)) { 1909 error = EBUSY; 1910 goto fail; 1911 } 1912 PORT_LOCK(p); 1913 cxgb_update_mac_settings(p); 1914 PORT_UNLOCK(p); 1915 } 1916 ADAPTER_UNLOCK(sc); 1917 } else 1918 error = cxgb_init_locked(p); 1919 p->if_flags = if_getflags(ifp); 1920 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1921 error = cxgb_uninit_locked(p); 1922 else 1923 ADAPTER_UNLOCK(sc); 1924 1925 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1926 break; 1927 case SIOCADDMULTI: 1928 case SIOCDELMULTI: 1929 ADAPTER_LOCK(sc); 1930 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 1931 if (error) 1932 goto fail; 1933 1934 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1935 PORT_LOCK(p); 1936 cxgb_update_mac_settings(p); 1937 PORT_UNLOCK(p); 1938 } 1939 ADAPTER_UNLOCK(sc); 1940 1941 break; 1942 case SIOCSIFCAP: 1943 ADAPTER_LOCK(sc); 1944 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 1945 if (error) 1946 goto fail; 1947 1948 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 1949 if (mask & IFCAP_TXCSUM) { 1950 if_togglecapenable(ifp, IFCAP_TXCSUM); 1951 if_togglehwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP); 1952 1953 if (IFCAP_TSO4 & if_getcapenable(ifp) && 1954 !(IFCAP_TXCSUM & if_getcapenable(ifp))) { 1955 mask &= ~IFCAP_TSO4; 1956 if_setcapenablebit(ifp, 0, IFCAP_TSO4); 1957 if_printf(ifp, 1958 "tso4 disabled due to -txcsum.\n"); 1959 } 1960 } 1961 if (mask & IFCAP_TXCSUM_IPV6) { 1962 if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6); 1963 if_togglehwassist(ifp, CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1964 1965 if (IFCAP_TSO6 & if_getcapenable(ifp) && 1966 !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) { 1967 mask &= ~IFCAP_TSO6; 1968 if_setcapenablebit(ifp, 0, IFCAP_TSO6); 1969 if_printf(ifp, 1970 "tso6 disabled due to -txcsum6.\n"); 1971 } 1972 } 1973 if (mask & IFCAP_RXCSUM) 1974 if_togglecapenable(ifp, IFCAP_RXCSUM); 1975 if (mask & IFCAP_RXCSUM_IPV6) 1976 if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6); 1977 1978 /* 1979 * Note that we leave CSUM_TSO alone (it is always set). The 1980 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 1981 * sending a TSO request our way, so it's sufficient to toggle 1982 * IFCAP_TSOx only. 1983 */ 1984 if (mask & IFCAP_TSO4) { 1985 if (!(IFCAP_TSO4 & if_getcapenable(ifp)) && 1986 !(IFCAP_TXCSUM & if_getcapenable(ifp))) { 1987 if_printf(ifp, "enable txcsum first.\n"); 1988 error = EAGAIN; 1989 goto fail; 1990 } 1991 if_togglecapenable(ifp, IFCAP_TSO4); 1992 } 1993 if (mask & IFCAP_TSO6) { 1994 if (!(IFCAP_TSO6 & if_getcapenable(ifp)) && 1995 !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) { 1996 if_printf(ifp, "enable txcsum6 first.\n"); 1997 error = EAGAIN; 1998 goto fail; 1999 } 2000 if_togglecapenable(ifp, IFCAP_TSO6); 2001 } 2002 if (mask & IFCAP_LRO) { 2003 if_togglecapenable(ifp, IFCAP_LRO); 2004 2005 /* Safe to do this even if cxgb_up not called yet */ 2006 cxgb_set_lro(p, if_getcapenable(ifp) & IFCAP_LRO); 2007 } 2008 #ifdef TCP_OFFLOAD 2009 if (mask & IFCAP_TOE4) { 2010 int enable = (if_getcapenable(ifp) ^ mask) & IFCAP_TOE4; 2011 2012 error = toe_capability(p, enable); 2013 if (error == 0) 2014 if_togglecapenable(ifp, mask); 2015 } 2016 #endif 2017 if (mask & IFCAP_VLAN_HWTAGGING) { 2018 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); 2019 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2020 PORT_LOCK(p); 2021 cxgb_update_mac_settings(p); 2022 PORT_UNLOCK(p); 2023 } 2024 } 2025 if (mask & IFCAP_VLAN_MTU) { 2026 if_togglecapenable(ifp, IFCAP_VLAN_MTU); 2027 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2028 PORT_LOCK(p); 2029 cxgb_update_mac_settings(p); 2030 PORT_UNLOCK(p); 2031 } 2032 } 2033 if (mask & IFCAP_VLAN_HWTSO) 2034 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 2035 if (mask & IFCAP_VLAN_HWCSUM) 2036 if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM); 2037 2038 #ifdef VLAN_CAPABILITIES 2039 VLAN_CAPABILITIES(ifp); 2040 #endif 2041 ADAPTER_UNLOCK(sc); 2042 break; 2043 case SIOCSIFMEDIA: 2044 case SIOCGIFMEDIA: 2045 error = ifmedia_ioctl(ifp, ifr, &p->media, command); 2046 break; 2047 default: 2048 error = ether_ioctl(ifp, command, data); 2049 } 2050 2051 return (error); 2052 } 2053 2054 static int 2055 cxgb_media_change(if_t ifp) 2056 { 2057 return (EOPNOTSUPP); 2058 } 2059 2060 /* 2061 * Translates phy->modtype to the correct Ethernet media subtype. 2062 */ 2063 static int 2064 cxgb_ifm_type(int mod) 2065 { 2066 switch (mod) { 2067 case phy_modtype_sr: 2068 return (IFM_10G_SR); 2069 case phy_modtype_lr: 2070 return (IFM_10G_LR); 2071 case phy_modtype_lrm: 2072 return (IFM_10G_LRM); 2073 case phy_modtype_twinax: 2074 return (IFM_10G_TWINAX); 2075 case phy_modtype_twinax_long: 2076 return (IFM_10G_TWINAX_LONG); 2077 case phy_modtype_none: 2078 return (IFM_NONE); 2079 case phy_modtype_unknown: 2080 return (IFM_UNKNOWN); 2081 } 2082 2083 KASSERT(0, ("%s: modtype %d unknown", __func__, mod)); 2084 return (IFM_UNKNOWN); 2085 } 2086 2087 /* 2088 * Rebuilds the ifmedia list for this port, and sets the current media. 2089 */ 2090 static void 2091 cxgb_build_medialist(struct port_info *p) 2092 { 2093 struct cphy *phy = &p->phy; 2094 struct ifmedia *media = &p->media; 2095 int mod = phy->modtype; 2096 int m = IFM_ETHER | IFM_FDX; 2097 2098 PORT_LOCK(p); 2099 2100 ifmedia_removeall(media); 2101 if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) { 2102 /* Copper (RJ45) */ 2103 2104 if (phy->caps & SUPPORTED_10000baseT_Full) 2105 ifmedia_add(media, m | IFM_10G_T, mod, NULL); 2106 2107 if (phy->caps & SUPPORTED_1000baseT_Full) 2108 ifmedia_add(media, m | IFM_1000_T, mod, NULL); 2109 2110 if (phy->caps & SUPPORTED_100baseT_Full) 2111 ifmedia_add(media, m | IFM_100_TX, mod, NULL); 2112 2113 if (phy->caps & SUPPORTED_10baseT_Full) 2114 ifmedia_add(media, m | IFM_10_T, mod, NULL); 2115 2116 ifmedia_add(media, IFM_ETHER | IFM_AUTO, mod, NULL); 2117 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 2118 2119 } else if (phy->caps & SUPPORTED_TP) { 2120 /* Copper (CX4) */ 2121 2122 KASSERT(phy->caps & SUPPORTED_10000baseT_Full, 2123 ("%s: unexpected cap 0x%x", __func__, phy->caps)); 2124 2125 ifmedia_add(media, m | IFM_10G_CX4, mod, NULL); 2126 ifmedia_set(media, m | IFM_10G_CX4); 2127 2128 } else if (phy->caps & SUPPORTED_FIBRE && 2129 phy->caps & SUPPORTED_10000baseT_Full) { 2130 /* 10G optical (but includes SFP+ twinax) */ 2131 2132 m |= cxgb_ifm_type(mod); 2133 if (IFM_SUBTYPE(m) == IFM_NONE) 2134 m &= ~IFM_FDX; 2135 2136 ifmedia_add(media, m, mod, NULL); 2137 ifmedia_set(media, m); 2138 2139 } else if (phy->caps & SUPPORTED_FIBRE && 2140 phy->caps & SUPPORTED_1000baseT_Full) { 2141 /* 1G optical */ 2142 2143 /* XXX: Lie and claim to be SX, could actually be any 1G-X */ 2144 ifmedia_add(media, m | IFM_1000_SX, mod, NULL); 2145 ifmedia_set(media, m | IFM_1000_SX); 2146 2147 } else { 2148 KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__, 2149 phy->caps)); 2150 } 2151 2152 PORT_UNLOCK(p); 2153 } 2154 2155 static void 2156 cxgb_media_status(if_t ifp, struct ifmediareq *ifmr) 2157 { 2158 struct port_info *p = if_getsoftc(ifp); 2159 struct ifmedia_entry *cur = p->media.ifm_cur; 2160 int speed = p->link_config.speed; 2161 2162 if (cur->ifm_data != p->phy.modtype) { 2163 cxgb_build_medialist(p); 2164 cur = p->media.ifm_cur; 2165 } 2166 2167 ifmr->ifm_status = IFM_AVALID; 2168 if (!p->link_config.link_ok) 2169 return; 2170 2171 ifmr->ifm_status |= IFM_ACTIVE; 2172 2173 /* 2174 * active and current will differ iff current media is autoselect. That 2175 * can happen only for copper RJ45. 2176 */ 2177 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 2178 return; 2179 KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg, 2180 ("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps)); 2181 2182 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 2183 if (speed == SPEED_10000) 2184 ifmr->ifm_active |= IFM_10G_T; 2185 else if (speed == SPEED_1000) 2186 ifmr->ifm_active |= IFM_1000_T; 2187 else if (speed == SPEED_100) 2188 ifmr->ifm_active |= IFM_100_TX; 2189 else if (speed == SPEED_10) 2190 ifmr->ifm_active |= IFM_10_T; 2191 else 2192 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, 2193 speed)); 2194 } 2195 2196 static uint64_t 2197 cxgb_get_counter(if_t ifp, ift_counter c) 2198 { 2199 struct port_info *pi = if_getsoftc(ifp); 2200 struct adapter *sc = pi->adapter; 2201 struct cmac *mac = &pi->mac; 2202 struct mac_stats *mstats = &mac->stats; 2203 2204 cxgb_refresh_stats(pi); 2205 2206 switch (c) { 2207 case IFCOUNTER_IPACKETS: 2208 return (mstats->rx_frames); 2209 2210 case IFCOUNTER_IERRORS: 2211 return (mstats->rx_jabber + mstats->rx_data_errs + 2212 mstats->rx_sequence_errs + mstats->rx_runt + 2213 mstats->rx_too_long + mstats->rx_mac_internal_errs + 2214 mstats->rx_short + mstats->rx_fcs_errs); 2215 2216 case IFCOUNTER_OPACKETS: 2217 return (mstats->tx_frames); 2218 2219 case IFCOUNTER_OERRORS: 2220 return (mstats->tx_excess_collisions + mstats->tx_underrun + 2221 mstats->tx_len_errs + mstats->tx_mac_internal_errs + 2222 mstats->tx_excess_deferral + mstats->tx_fcs_errs); 2223 2224 case IFCOUNTER_COLLISIONS: 2225 return (mstats->tx_total_collisions); 2226 2227 case IFCOUNTER_IBYTES: 2228 return (mstats->rx_octets); 2229 2230 case IFCOUNTER_OBYTES: 2231 return (mstats->tx_octets); 2232 2233 case IFCOUNTER_IMCASTS: 2234 return (mstats->rx_mcast_frames); 2235 2236 case IFCOUNTER_OMCASTS: 2237 return (mstats->tx_mcast_frames); 2238 2239 case IFCOUNTER_IQDROPS: 2240 return (mstats->rx_cong_drops); 2241 2242 case IFCOUNTER_OQDROPS: { 2243 int i; 2244 uint64_t drops; 2245 2246 drops = 0; 2247 if (sc->flags & FULL_INIT_DONE) { 2248 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) 2249 drops += sc->sge.qs[i].txq[TXQ_ETH].txq_mr->br_drops; 2250 } 2251 2252 return (drops); 2253 2254 } 2255 2256 default: 2257 return (if_get_counter_default(ifp, c)); 2258 } 2259 } 2260 2261 static void 2262 cxgb_async_intr(void *data) 2263 { 2264 adapter_t *sc = data; 2265 2266 t3_write_reg(sc, A_PL_INT_ENABLE0, 0); 2267 (void) t3_read_reg(sc, A_PL_INT_ENABLE0); 2268 taskqueue_enqueue(sc->tq, &sc->slow_intr_task); 2269 } 2270 2271 static void 2272 link_check_callout(void *arg) 2273 { 2274 struct port_info *pi = arg; 2275 struct adapter *sc = pi->adapter; 2276 2277 if (!isset(&sc->open_device_map, pi->port_id)) 2278 return; 2279 2280 taskqueue_enqueue(sc->tq, &pi->link_check_task); 2281 } 2282 2283 static void 2284 check_link_status(void *arg, int pending) 2285 { 2286 struct port_info *pi = arg; 2287 struct adapter *sc = pi->adapter; 2288 2289 if (!isset(&sc->open_device_map, pi->port_id)) 2290 return; 2291 2292 t3_link_changed(sc, pi->port_id); 2293 2294 if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ) || 2295 pi->link_config.link_ok == 0) 2296 callout_reset(&pi->link_check_ch, hz, link_check_callout, pi); 2297 } 2298 2299 void 2300 t3_os_link_intr(struct port_info *pi) 2301 { 2302 /* 2303 * Schedule a link check in the near future. If the link is flapping 2304 * rapidly we'll keep resetting the callout and delaying the check until 2305 * things stabilize a bit. 2306 */ 2307 callout_reset(&pi->link_check_ch, hz / 4, link_check_callout, pi); 2308 } 2309 2310 static void 2311 check_t3b2_mac(struct adapter *sc) 2312 { 2313 int i; 2314 2315 if (sc->flags & CXGB_SHUTDOWN) 2316 return; 2317 2318 for_each_port(sc, i) { 2319 struct port_info *p = &sc->port[i]; 2320 int status; 2321 #ifdef INVARIANTS 2322 if_t ifp = p->ifp; 2323 #endif 2324 2325 if (!isset(&sc->open_device_map, p->port_id) || p->link_fault || 2326 !p->link_config.link_ok) 2327 continue; 2328 2329 KASSERT(if_getdrvflags(ifp) & IFF_DRV_RUNNING, 2330 ("%s: state mismatch (drv_flags %x, device_map %x)", 2331 __func__, if_getdrvflags(ifp), sc->open_device_map)); 2332 2333 PORT_LOCK(p); 2334 status = t3b2_mac_watchdog_task(&p->mac); 2335 if (status == 1) 2336 p->mac.stats.num_toggled++; 2337 else if (status == 2) { 2338 struct cmac *mac = &p->mac; 2339 2340 cxgb_update_mac_settings(p); 2341 t3_link_start(&p->phy, mac, &p->link_config); 2342 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); 2343 t3_port_intr_enable(sc, p->port_id); 2344 p->mac.stats.num_resets++; 2345 } 2346 PORT_UNLOCK(p); 2347 } 2348 } 2349 2350 static void 2351 cxgb_tick(void *arg) 2352 { 2353 adapter_t *sc = (adapter_t *)arg; 2354 2355 if (sc->flags & CXGB_SHUTDOWN) 2356 return; 2357 2358 taskqueue_enqueue(sc->tq, &sc->tick_task); 2359 callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc); 2360 } 2361 2362 void 2363 cxgb_refresh_stats(struct port_info *pi) 2364 { 2365 struct timeval tv; 2366 const struct timeval interval = {0, 250000}; /* 250ms */ 2367 2368 getmicrotime(&tv); 2369 timevalsub(&tv, &interval); 2370 if (timevalcmp(&tv, &pi->last_refreshed, <)) 2371 return; 2372 2373 PORT_LOCK(pi); 2374 t3_mac_update_stats(&pi->mac); 2375 PORT_UNLOCK(pi); 2376 getmicrotime(&pi->last_refreshed); 2377 } 2378 2379 static void 2380 cxgb_tick_handler(void *arg, int count) 2381 { 2382 adapter_t *sc = (adapter_t *)arg; 2383 const struct adapter_params *p = &sc->params; 2384 int i; 2385 uint32_t cause, reset; 2386 2387 if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE)) 2388 return; 2389 2390 if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map) 2391 check_t3b2_mac(sc); 2392 2393 cause = t3_read_reg(sc, A_SG_INT_CAUSE) & (F_RSPQSTARVE | F_FLEMPTY); 2394 if (cause) { 2395 struct sge_qset *qs = &sc->sge.qs[0]; 2396 uint32_t mask, v; 2397 2398 v = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS) & ~0xff00; 2399 2400 mask = 1; 2401 for (i = 0; i < SGE_QSETS; i++) { 2402 if (v & mask) 2403 qs[i].rspq.starved++; 2404 mask <<= 1; 2405 } 2406 2407 mask <<= SGE_QSETS; /* skip RSPQXDISABLED */ 2408 2409 for (i = 0; i < SGE_QSETS * 2; i++) { 2410 if (v & mask) { 2411 qs[i / 2].fl[i % 2].empty++; 2412 } 2413 mask <<= 1; 2414 } 2415 2416 /* clear */ 2417 t3_write_reg(sc, A_SG_RSPQ_FL_STATUS, v); 2418 t3_write_reg(sc, A_SG_INT_CAUSE, cause); 2419 } 2420 2421 for (i = 0; i < sc->params.nports; i++) { 2422 struct port_info *pi = &sc->port[i]; 2423 struct cmac *mac = &pi->mac; 2424 2425 if (!isset(&sc->open_device_map, pi->port_id)) 2426 continue; 2427 2428 cxgb_refresh_stats(pi); 2429 2430 if (mac->multiport) 2431 continue; 2432 2433 /* Count rx fifo overflows, once per second */ 2434 cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset); 2435 reset = 0; 2436 if (cause & F_RXFIFO_OVERFLOW) { 2437 mac->stats.rx_fifo_ovfl++; 2438 reset |= F_RXFIFO_OVERFLOW; 2439 } 2440 t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset); 2441 } 2442 } 2443 2444 static void 2445 touch_bars(device_t dev) 2446 { 2447 /* 2448 * Don't enable yet 2449 */ 2450 #if !defined(__LP64__) && 0 2451 u32 v; 2452 2453 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v); 2454 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v); 2455 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v); 2456 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v); 2457 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v); 2458 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v); 2459 #endif 2460 } 2461 2462 static int 2463 set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset) 2464 { 2465 uint8_t *buf; 2466 int err = 0; 2467 u32 aligned_offset, aligned_len, *p; 2468 struct adapter *adapter = pi->adapter; 2469 2470 2471 aligned_offset = offset & ~3; 2472 aligned_len = (len + (offset & 3) + 3) & ~3; 2473 2474 if (aligned_offset != offset || aligned_len != len) { 2475 buf = malloc(aligned_len, M_DEVBUF, M_WAITOK | M_ZERO); 2476 err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf); 2477 if (!err && aligned_len > 4) 2478 err = t3_seeprom_read(adapter, 2479 aligned_offset + aligned_len - 4, 2480 (u32 *)&buf[aligned_len - 4]); 2481 if (err) 2482 goto out; 2483 memcpy(buf + (offset & 3), data, len); 2484 } else 2485 buf = (uint8_t *)(uintptr_t)data; 2486 2487 err = t3_seeprom_wp(adapter, 0); 2488 if (err) 2489 goto out; 2490 2491 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { 2492 err = t3_seeprom_write(adapter, aligned_offset, *p); 2493 aligned_offset += 4; 2494 } 2495 2496 if (!err) 2497 err = t3_seeprom_wp(adapter, 1); 2498 out: 2499 if (buf != data) 2500 free(buf, M_DEVBUF); 2501 return err; 2502 } 2503 2504 2505 static int 2506 in_range(int val, int lo, int hi) 2507 { 2508 return val < 0 || (val <= hi && val >= lo); 2509 } 2510 2511 static int 2512 cxgb_extension_open(struct cdev *dev, int flags, int fmp, struct thread *td) 2513 { 2514 return (0); 2515 } 2516 2517 static int 2518 cxgb_extension_close(struct cdev *dev, int flags, int fmt, struct thread *td) 2519 { 2520 return (0); 2521 } 2522 2523 static int 2524 cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, 2525 int fflag, struct thread *td) 2526 { 2527 int mmd, error = 0; 2528 struct port_info *pi = dev->si_drv1; 2529 adapter_t *sc = pi->adapter; 2530 2531 #ifdef PRIV_SUPPORTED 2532 if (priv_check(td, PRIV_DRIVER)) { 2533 if (cxgb_debug) 2534 printf("user does not have access to privileged ioctls\n"); 2535 return (EPERM); 2536 } 2537 #else 2538 if (suser(td)) { 2539 if (cxgb_debug) 2540 printf("user does not have access to privileged ioctls\n"); 2541 return (EPERM); 2542 } 2543 #endif 2544 2545 switch (cmd) { 2546 case CHELSIO_GET_MIIREG: { 2547 uint32_t val; 2548 struct cphy *phy = &pi->phy; 2549 struct ch_mii_data *mid = (struct ch_mii_data *)data; 2550 2551 if (!phy->mdio_read) 2552 return (EOPNOTSUPP); 2553 if (is_10G(sc)) { 2554 mmd = mid->phy_id >> 8; 2555 if (!mmd) 2556 mmd = MDIO_DEV_PCS; 2557 else if (mmd > MDIO_DEV_VEND2) 2558 return (EINVAL); 2559 2560 error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd, 2561 mid->reg_num, &val); 2562 } else 2563 error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0, 2564 mid->reg_num & 0x1f, &val); 2565 if (error == 0) 2566 mid->val_out = val; 2567 break; 2568 } 2569 case CHELSIO_SET_MIIREG: { 2570 struct cphy *phy = &pi->phy; 2571 struct ch_mii_data *mid = (struct ch_mii_data *)data; 2572 2573 if (!phy->mdio_write) 2574 return (EOPNOTSUPP); 2575 if (is_10G(sc)) { 2576 mmd = mid->phy_id >> 8; 2577 if (!mmd) 2578 mmd = MDIO_DEV_PCS; 2579 else if (mmd > MDIO_DEV_VEND2) 2580 return (EINVAL); 2581 2582 error = phy->mdio_write(sc, mid->phy_id & 0x1f, 2583 mmd, mid->reg_num, mid->val_in); 2584 } else 2585 error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0, 2586 mid->reg_num & 0x1f, 2587 mid->val_in); 2588 break; 2589 } 2590 case CHELSIO_SETREG: { 2591 struct ch_reg *edata = (struct ch_reg *)data; 2592 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 2593 return (EFAULT); 2594 t3_write_reg(sc, edata->addr, edata->val); 2595 break; 2596 } 2597 case CHELSIO_GETREG: { 2598 struct ch_reg *edata = (struct ch_reg *)data; 2599 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 2600 return (EFAULT); 2601 edata->val = t3_read_reg(sc, edata->addr); 2602 break; 2603 } 2604 case CHELSIO_GET_SGE_CONTEXT: { 2605 struct ch_cntxt *ecntxt = (struct ch_cntxt *)data; 2606 mtx_lock_spin(&sc->sge.reg_lock); 2607 switch (ecntxt->cntxt_type) { 2608 case CNTXT_TYPE_EGRESS: 2609 error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id, 2610 ecntxt->data); 2611 break; 2612 case CNTXT_TYPE_FL: 2613 error = -t3_sge_read_fl(sc, ecntxt->cntxt_id, 2614 ecntxt->data); 2615 break; 2616 case CNTXT_TYPE_RSP: 2617 error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id, 2618 ecntxt->data); 2619 break; 2620 case CNTXT_TYPE_CQ: 2621 error = -t3_sge_read_cq(sc, ecntxt->cntxt_id, 2622 ecntxt->data); 2623 break; 2624 default: 2625 error = EINVAL; 2626 break; 2627 } 2628 mtx_unlock_spin(&sc->sge.reg_lock); 2629 break; 2630 } 2631 case CHELSIO_GET_SGE_DESC: { 2632 struct ch_desc *edesc = (struct ch_desc *)data; 2633 int ret; 2634 if (edesc->queue_num >= SGE_QSETS * 6) 2635 return (EINVAL); 2636 ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6], 2637 edesc->queue_num % 6, edesc->idx, edesc->data); 2638 if (ret < 0) 2639 return (EINVAL); 2640 edesc->size = ret; 2641 break; 2642 } 2643 case CHELSIO_GET_QSET_PARAMS: { 2644 struct qset_params *q; 2645 struct ch_qset_params *t = (struct ch_qset_params *)data; 2646 int q1 = pi->first_qset; 2647 int nqsets = pi->nqsets; 2648 int i; 2649 2650 if (t->qset_idx >= nqsets) 2651 return EINVAL; 2652 2653 i = q1 + t->qset_idx; 2654 q = &sc->params.sge.qset[i]; 2655 t->rspq_size = q->rspq_size; 2656 t->txq_size[0] = q->txq_size[0]; 2657 t->txq_size[1] = q->txq_size[1]; 2658 t->txq_size[2] = q->txq_size[2]; 2659 t->fl_size[0] = q->fl_size; 2660 t->fl_size[1] = q->jumbo_size; 2661 t->polling = q->polling; 2662 t->lro = q->lro; 2663 t->intr_lat = q->coalesce_usecs; 2664 t->cong_thres = q->cong_thres; 2665 t->qnum = i; 2666 2667 if ((sc->flags & FULL_INIT_DONE) == 0) 2668 t->vector = 0; 2669 else if (sc->flags & USING_MSIX) 2670 t->vector = rman_get_start(sc->msix_irq_res[i]); 2671 else 2672 t->vector = rman_get_start(sc->irq_res); 2673 2674 break; 2675 } 2676 case CHELSIO_GET_QSET_NUM: { 2677 struct ch_reg *edata = (struct ch_reg *)data; 2678 edata->val = pi->nqsets; 2679 break; 2680 } 2681 case CHELSIO_LOAD_FW: { 2682 uint8_t *fw_data; 2683 uint32_t vers; 2684 struct ch_mem_range *t = (struct ch_mem_range *)data; 2685 2686 /* 2687 * You're allowed to load a firmware only before FULL_INIT_DONE 2688 * 2689 * FW_UPTODATE is also set so the rest of the initialization 2690 * will not overwrite what was loaded here. This gives you the 2691 * flexibility to load any firmware (and maybe shoot yourself in 2692 * the foot). 2693 */ 2694 2695 ADAPTER_LOCK(sc); 2696 if (sc->open_device_map || sc->flags & FULL_INIT_DONE) { 2697 ADAPTER_UNLOCK(sc); 2698 return (EBUSY); 2699 } 2700 2701 fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT); 2702 if (!fw_data) 2703 error = ENOMEM; 2704 else 2705 error = copyin(t->buf, fw_data, t->len); 2706 2707 if (!error) 2708 error = -t3_load_fw(sc, fw_data, t->len); 2709 2710 if (t3_get_fw_version(sc, &vers) == 0) { 2711 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), 2712 "%d.%d.%d", G_FW_VERSION_MAJOR(vers), 2713 G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers)); 2714 } 2715 2716 if (!error) 2717 sc->flags |= FW_UPTODATE; 2718 2719 free(fw_data, M_DEVBUF); 2720 ADAPTER_UNLOCK(sc); 2721 break; 2722 } 2723 case CHELSIO_LOAD_BOOT: { 2724 uint8_t *boot_data; 2725 struct ch_mem_range *t = (struct ch_mem_range *)data; 2726 2727 boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT); 2728 if (!boot_data) 2729 return ENOMEM; 2730 2731 error = copyin(t->buf, boot_data, t->len); 2732 if (!error) 2733 error = -t3_load_boot(sc, boot_data, t->len); 2734 2735 free(boot_data, M_DEVBUF); 2736 break; 2737 } 2738 case CHELSIO_GET_PM: { 2739 struct ch_pm *m = (struct ch_pm *)data; 2740 struct tp_params *p = &sc->params.tp; 2741 2742 if (!is_offload(sc)) 2743 return (EOPNOTSUPP); 2744 2745 m->tx_pg_sz = p->tx_pg_size; 2746 m->tx_num_pg = p->tx_num_pgs; 2747 m->rx_pg_sz = p->rx_pg_size; 2748 m->rx_num_pg = p->rx_num_pgs; 2749 m->pm_total = p->pmtx_size + p->chan_rx_size * p->nchan; 2750 2751 break; 2752 } 2753 case CHELSIO_SET_PM: { 2754 struct ch_pm *m = (struct ch_pm *)data; 2755 struct tp_params *p = &sc->params.tp; 2756 2757 if (!is_offload(sc)) 2758 return (EOPNOTSUPP); 2759 if (sc->flags & FULL_INIT_DONE) 2760 return (EBUSY); 2761 2762 if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) || 2763 !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1))) 2764 return (EINVAL); /* not power of 2 */ 2765 if (!(m->rx_pg_sz & 0x14000)) 2766 return (EINVAL); /* not 16KB or 64KB */ 2767 if (!(m->tx_pg_sz & 0x1554000)) 2768 return (EINVAL); 2769 if (m->tx_num_pg == -1) 2770 m->tx_num_pg = p->tx_num_pgs; 2771 if (m->rx_num_pg == -1) 2772 m->rx_num_pg = p->rx_num_pgs; 2773 if (m->tx_num_pg % 24 || m->rx_num_pg % 24) 2774 return (EINVAL); 2775 if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size || 2776 m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size) 2777 return (EINVAL); 2778 2779 p->rx_pg_size = m->rx_pg_sz; 2780 p->tx_pg_size = m->tx_pg_sz; 2781 p->rx_num_pgs = m->rx_num_pg; 2782 p->tx_num_pgs = m->tx_num_pg; 2783 break; 2784 } 2785 case CHELSIO_SETMTUTAB: { 2786 struct ch_mtus *m = (struct ch_mtus *)data; 2787 int i; 2788 2789 if (!is_offload(sc)) 2790 return (EOPNOTSUPP); 2791 if (offload_running(sc)) 2792 return (EBUSY); 2793 if (m->nmtus != NMTUS) 2794 return (EINVAL); 2795 if (m->mtus[0] < 81) /* accommodate SACK */ 2796 return (EINVAL); 2797 2798 /* 2799 * MTUs must be in ascending order 2800 */ 2801 for (i = 1; i < NMTUS; ++i) 2802 if (m->mtus[i] < m->mtus[i - 1]) 2803 return (EINVAL); 2804 2805 memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus)); 2806 break; 2807 } 2808 case CHELSIO_GETMTUTAB: { 2809 struct ch_mtus *m = (struct ch_mtus *)data; 2810 2811 if (!is_offload(sc)) 2812 return (EOPNOTSUPP); 2813 2814 memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus)); 2815 m->nmtus = NMTUS; 2816 break; 2817 } 2818 case CHELSIO_GET_MEM: { 2819 struct ch_mem_range *t = (struct ch_mem_range *)data; 2820 struct mc7 *mem; 2821 uint8_t *useraddr; 2822 u64 buf[32]; 2823 2824 /* 2825 * Use these to avoid modifying len/addr in the return 2826 * struct 2827 */ 2828 uint32_t len = t->len, addr = t->addr; 2829 2830 if (!is_offload(sc)) 2831 return (EOPNOTSUPP); 2832 if (!(sc->flags & FULL_INIT_DONE)) 2833 return (EIO); /* need the memory controllers */ 2834 if ((addr & 0x7) || (len & 0x7)) 2835 return (EINVAL); 2836 if (t->mem_id == MEM_CM) 2837 mem = &sc->cm; 2838 else if (t->mem_id == MEM_PMRX) 2839 mem = &sc->pmrx; 2840 else if (t->mem_id == MEM_PMTX) 2841 mem = &sc->pmtx; 2842 else 2843 return (EINVAL); 2844 2845 /* 2846 * Version scheme: 2847 * bits 0..9: chip version 2848 * bits 10..15: chip revision 2849 */ 2850 t->version = 3 | (sc->params.rev << 10); 2851 2852 /* 2853 * Read 256 bytes at a time as len can be large and we don't 2854 * want to use huge intermediate buffers. 2855 */ 2856 useraddr = (uint8_t *)t->buf; 2857 while (len) { 2858 unsigned int chunk = min(len, sizeof(buf)); 2859 2860 error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf); 2861 if (error) 2862 return (-error); 2863 if (copyout(buf, useraddr, chunk)) 2864 return (EFAULT); 2865 useraddr += chunk; 2866 addr += chunk; 2867 len -= chunk; 2868 } 2869 break; 2870 } 2871 case CHELSIO_READ_TCAM_WORD: { 2872 struct ch_tcam_word *t = (struct ch_tcam_word *)data; 2873 2874 if (!is_offload(sc)) 2875 return (EOPNOTSUPP); 2876 if (!(sc->flags & FULL_INIT_DONE)) 2877 return (EIO); /* need MC5 */ 2878 return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf); 2879 break; 2880 } 2881 case CHELSIO_SET_TRACE_FILTER: { 2882 struct ch_trace *t = (struct ch_trace *)data; 2883 const struct trace_params *tp; 2884 2885 tp = (const struct trace_params *)&t->sip; 2886 if (t->config_tx) 2887 t3_config_trace_filter(sc, tp, 0, t->invert_match, 2888 t->trace_tx); 2889 if (t->config_rx) 2890 t3_config_trace_filter(sc, tp, 1, t->invert_match, 2891 t->trace_rx); 2892 break; 2893 } 2894 case CHELSIO_SET_PKTSCHED: { 2895 struct ch_pktsched_params *p = (struct ch_pktsched_params *)data; 2896 if (sc->open_device_map == 0) 2897 return (EAGAIN); 2898 send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max, 2899 p->binding); 2900 break; 2901 } 2902 case CHELSIO_IFCONF_GETREGS: { 2903 struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data; 2904 int reglen = cxgb_get_regs_len(); 2905 uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT); 2906 if (buf == NULL) { 2907 return (ENOMEM); 2908 } 2909 if (regs->len > reglen) 2910 regs->len = reglen; 2911 else if (regs->len < reglen) 2912 error = ENOBUFS; 2913 2914 if (!error) { 2915 cxgb_get_regs(sc, regs, buf); 2916 error = copyout(buf, regs->data, reglen); 2917 } 2918 free(buf, M_DEVBUF); 2919 2920 break; 2921 } 2922 case CHELSIO_SET_HW_SCHED: { 2923 struct ch_hw_sched *t = (struct ch_hw_sched *)data; 2924 unsigned int ticks_per_usec = core_ticks_per_usec(sc); 2925 2926 if ((sc->flags & FULL_INIT_DONE) == 0) 2927 return (EAGAIN); /* need TP to be initialized */ 2928 if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) || 2929 !in_range(t->channel, 0, 1) || 2930 !in_range(t->kbps, 0, 10000000) || 2931 !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) || 2932 !in_range(t->flow_ipg, 0, 2933 dack_ticks_to_usec(sc, 0x7ff))) 2934 return (EINVAL); 2935 2936 if (t->kbps >= 0) { 2937 error = t3_config_sched(sc, t->kbps, t->sched); 2938 if (error < 0) 2939 return (-error); 2940 } 2941 if (t->class_ipg >= 0) 2942 t3_set_sched_ipg(sc, t->sched, t->class_ipg); 2943 if (t->flow_ipg >= 0) { 2944 t->flow_ipg *= 1000; /* us -> ns */ 2945 t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1); 2946 } 2947 if (t->mode >= 0) { 2948 int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched); 2949 2950 t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP, 2951 bit, t->mode ? bit : 0); 2952 } 2953 if (t->channel >= 0) 2954 t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP, 2955 1 << t->sched, t->channel << t->sched); 2956 break; 2957 } 2958 case CHELSIO_GET_EEPROM: { 2959 int i; 2960 struct ch_eeprom *e = (struct ch_eeprom *)data; 2961 uint8_t *buf; 2962 2963 if (e->offset & 3 || e->offset >= EEPROMSIZE || 2964 e->len > EEPROMSIZE || e->offset + e->len > EEPROMSIZE) { 2965 return (EINVAL); 2966 } 2967 2968 buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT); 2969 if (buf == NULL) { 2970 return (ENOMEM); 2971 } 2972 e->magic = EEPROM_MAGIC; 2973 for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4) 2974 error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]); 2975 2976 if (!error) 2977 error = copyout(buf + e->offset, e->data, e->len); 2978 2979 free(buf, M_DEVBUF); 2980 break; 2981 } 2982 case CHELSIO_CLEAR_STATS: { 2983 if (!(sc->flags & FULL_INIT_DONE)) 2984 return EAGAIN; 2985 2986 PORT_LOCK(pi); 2987 t3_mac_update_stats(&pi->mac); 2988 memset(&pi->mac.stats, 0, sizeof(pi->mac.stats)); 2989 PORT_UNLOCK(pi); 2990 break; 2991 } 2992 case CHELSIO_GET_UP_LA: { 2993 struct ch_up_la *la = (struct ch_up_la *)data; 2994 uint8_t *buf = malloc(LA_BUFSIZE, M_DEVBUF, M_NOWAIT); 2995 if (buf == NULL) { 2996 return (ENOMEM); 2997 } 2998 if (la->bufsize < LA_BUFSIZE) 2999 error = ENOBUFS; 3000 3001 if (!error) 3002 error = -t3_get_up_la(sc, &la->stopped, &la->idx, 3003 &la->bufsize, buf); 3004 if (!error) 3005 error = copyout(buf, la->data, la->bufsize); 3006 3007 free(buf, M_DEVBUF); 3008 break; 3009 } 3010 case CHELSIO_GET_UP_IOQS: { 3011 struct ch_up_ioqs *ioqs = (struct ch_up_ioqs *)data; 3012 uint8_t *buf = malloc(IOQS_BUFSIZE, M_DEVBUF, M_NOWAIT); 3013 uint32_t *v; 3014 3015 if (buf == NULL) { 3016 return (ENOMEM); 3017 } 3018 if (ioqs->bufsize < IOQS_BUFSIZE) 3019 error = ENOBUFS; 3020 3021 if (!error) 3022 error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf); 3023 3024 if (!error) { 3025 v = (uint32_t *)buf; 3026 3027 ioqs->ioq_rx_enable = *v++; 3028 ioqs->ioq_tx_enable = *v++; 3029 ioqs->ioq_rx_status = *v++; 3030 ioqs->ioq_tx_status = *v++; 3031 3032 error = copyout(v, ioqs->data, ioqs->bufsize); 3033 } 3034 3035 free(buf, M_DEVBUF); 3036 break; 3037 } 3038 case CHELSIO_SET_FILTER: { 3039 struct ch_filter *f = (struct ch_filter *)data; 3040 struct filter_info *p; 3041 unsigned int nfilters = sc->params.mc5.nfilters; 3042 3043 if (!is_offload(sc)) 3044 return (EOPNOTSUPP); /* No TCAM */ 3045 if (!(sc->flags & FULL_INIT_DONE)) 3046 return (EAGAIN); /* mc5 not setup yet */ 3047 if (nfilters == 0) 3048 return (EBUSY); /* TOE will use TCAM */ 3049 3050 /* sanity checks */ 3051 if (f->filter_id >= nfilters || 3052 (f->val.dip && f->mask.dip != 0xffffffff) || 3053 (f->val.sport && f->mask.sport != 0xffff) || 3054 (f->val.dport && f->mask.dport != 0xffff) || 3055 (f->val.vlan && f->mask.vlan != 0xfff) || 3056 (f->val.vlan_prio && 3057 f->mask.vlan_prio != FILTER_NO_VLAN_PRI) || 3058 (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) || 3059 f->qset >= SGE_QSETS || 3060 sc->rrss_map[f->qset] >= RSS_TABLE_SIZE) 3061 return (EINVAL); 3062 3063 /* Was allocated with M_WAITOK */ 3064 KASSERT(sc->filters, ("filter table NULL\n")); 3065 3066 p = &sc->filters[f->filter_id]; 3067 if (p->locked) 3068 return (EPERM); 3069 3070 bzero(p, sizeof(*p)); 3071 p->sip = f->val.sip; 3072 p->sip_mask = f->mask.sip; 3073 p->dip = f->val.dip; 3074 p->sport = f->val.sport; 3075 p->dport = f->val.dport; 3076 p->vlan = f->mask.vlan ? f->val.vlan : 0xfff; 3077 p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) : 3078 FILTER_NO_VLAN_PRI; 3079 p->mac_hit = f->mac_hit; 3080 p->mac_vld = f->mac_addr_idx != 0xffff; 3081 p->mac_idx = f->mac_addr_idx; 3082 p->pkt_type = f->proto; 3083 p->report_filter_id = f->want_filter_id; 3084 p->pass = f->pass; 3085 p->rss = f->rss; 3086 p->qset = f->qset; 3087 3088 error = set_filter(sc, f->filter_id, p); 3089 if (error == 0) 3090 p->valid = 1; 3091 break; 3092 } 3093 case CHELSIO_DEL_FILTER: { 3094 struct ch_filter *f = (struct ch_filter *)data; 3095 struct filter_info *p; 3096 unsigned int nfilters = sc->params.mc5.nfilters; 3097 3098 if (!is_offload(sc)) 3099 return (EOPNOTSUPP); 3100 if (!(sc->flags & FULL_INIT_DONE)) 3101 return (EAGAIN); 3102 if (nfilters == 0 || sc->filters == NULL) 3103 return (EINVAL); 3104 if (f->filter_id >= nfilters) 3105 return (EINVAL); 3106 3107 p = &sc->filters[f->filter_id]; 3108 if (p->locked) 3109 return (EPERM); 3110 if (!p->valid) 3111 return (EFAULT); /* Read "Bad address" as "Bad index" */ 3112 3113 bzero(p, sizeof(*p)); 3114 p->sip = p->sip_mask = 0xffffffff; 3115 p->vlan = 0xfff; 3116 p->vlan_prio = FILTER_NO_VLAN_PRI; 3117 p->pkt_type = 1; 3118 error = set_filter(sc, f->filter_id, p); 3119 break; 3120 } 3121 case CHELSIO_GET_FILTER: { 3122 struct ch_filter *f = (struct ch_filter *)data; 3123 struct filter_info *p; 3124 unsigned int i, nfilters = sc->params.mc5.nfilters; 3125 3126 if (!is_offload(sc)) 3127 return (EOPNOTSUPP); 3128 if (!(sc->flags & FULL_INIT_DONE)) 3129 return (EAGAIN); 3130 if (nfilters == 0 || sc->filters == NULL) 3131 return (EINVAL); 3132 3133 i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1; 3134 for (; i < nfilters; i++) { 3135 p = &sc->filters[i]; 3136 if (!p->valid) 3137 continue; 3138 3139 bzero(f, sizeof(*f)); 3140 3141 f->filter_id = i; 3142 f->val.sip = p->sip; 3143 f->mask.sip = p->sip_mask; 3144 f->val.dip = p->dip; 3145 f->mask.dip = p->dip ? 0xffffffff : 0; 3146 f->val.sport = p->sport; 3147 f->mask.sport = p->sport ? 0xffff : 0; 3148 f->val.dport = p->dport; 3149 f->mask.dport = p->dport ? 0xffff : 0; 3150 f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan; 3151 f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff; 3152 f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ? 3153 0 : p->vlan_prio; 3154 f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ? 3155 0 : FILTER_NO_VLAN_PRI; 3156 f->mac_hit = p->mac_hit; 3157 f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff; 3158 f->proto = p->pkt_type; 3159 f->want_filter_id = p->report_filter_id; 3160 f->pass = p->pass; 3161 f->rss = p->rss; 3162 f->qset = p->qset; 3163 3164 break; 3165 } 3166 3167 if (i == nfilters) 3168 f->filter_id = 0xffffffff; 3169 break; 3170 } 3171 default: 3172 return (EOPNOTSUPP); 3173 break; 3174 } 3175 3176 return (error); 3177 } 3178 3179 static __inline void 3180 reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start, 3181 unsigned int end) 3182 { 3183 uint32_t *p = (uint32_t *)(buf + start); 3184 3185 for ( ; start <= end; start += sizeof(uint32_t)) 3186 *p++ = t3_read_reg(ap, start); 3187 } 3188 3189 #define T3_REGMAP_SIZE (3 * 1024) 3190 static int 3191 cxgb_get_regs_len(void) 3192 { 3193 return T3_REGMAP_SIZE; 3194 } 3195 3196 static void 3197 cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf) 3198 { 3199 3200 /* 3201 * Version scheme: 3202 * bits 0..9: chip version 3203 * bits 10..15: chip revision 3204 * bit 31: set for PCIe cards 3205 */ 3206 regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31); 3207 3208 /* 3209 * We skip the MAC statistics registers because they are clear-on-read. 3210 * Also reading multi-register stats would need to synchronize with the 3211 * periodic mac stats accumulation. Hard to justify the complexity. 3212 */ 3213 memset(buf, 0, cxgb_get_regs_len()); 3214 reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN); 3215 reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT); 3216 reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE); 3217 reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA); 3218 reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3); 3219 reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0, 3220 XGM_REG(A_XGM_SERDES_STAT3, 1)); 3221 reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1), 3222 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1)); 3223 } 3224 3225 static int 3226 alloc_filters(struct adapter *sc) 3227 { 3228 struct filter_info *p; 3229 unsigned int nfilters = sc->params.mc5.nfilters; 3230 3231 if (nfilters == 0) 3232 return (0); 3233 3234 p = malloc(sizeof(*p) * nfilters, M_DEVBUF, M_WAITOK | M_ZERO); 3235 sc->filters = p; 3236 3237 p = &sc->filters[nfilters - 1]; 3238 p->vlan = 0xfff; 3239 p->vlan_prio = FILTER_NO_VLAN_PRI; 3240 p->pass = p->rss = p->valid = p->locked = 1; 3241 3242 return (0); 3243 } 3244 3245 static int 3246 setup_hw_filters(struct adapter *sc) 3247 { 3248 int i, rc; 3249 unsigned int nfilters = sc->params.mc5.nfilters; 3250 3251 if (!sc->filters) 3252 return (0); 3253 3254 t3_enable_filters(sc); 3255 3256 for (i = rc = 0; i < nfilters && !rc; i++) { 3257 if (sc->filters[i].locked) 3258 rc = set_filter(sc, i, &sc->filters[i]); 3259 } 3260 3261 return (rc); 3262 } 3263 3264 static int 3265 set_filter(struct adapter *sc, int id, const struct filter_info *f) 3266 { 3267 int len; 3268 struct mbuf *m; 3269 struct ulp_txpkt *txpkt; 3270 struct work_request_hdr *wr; 3271 struct cpl_pass_open_req *oreq; 3272 struct cpl_set_tcb_field *sreq; 3273 3274 len = sizeof(*wr) + sizeof(*oreq) + 2 * sizeof(*sreq); 3275 KASSERT(len <= MHLEN, ("filter request too big for an mbuf")); 3276 3277 id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes - 3278 sc->params.mc5.nfilters; 3279 3280 m = m_gethdr(M_WAITOK, MT_DATA); 3281 m->m_len = m->m_pkthdr.len = len; 3282 bzero(mtod(m, char *), len); 3283 3284 wr = mtod(m, struct work_request_hdr *); 3285 wr->wrh_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC); 3286 3287 oreq = (struct cpl_pass_open_req *)(wr + 1); 3288 txpkt = (struct ulp_txpkt *)oreq; 3289 txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); 3290 txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8)); 3291 OPCODE_TID(oreq) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, id)); 3292 oreq->local_port = htons(f->dport); 3293 oreq->peer_port = htons(f->sport); 3294 oreq->local_ip = htonl(f->dip); 3295 oreq->peer_ip = htonl(f->sip); 3296 oreq->peer_netmask = htonl(f->sip_mask); 3297 oreq->opt0h = 0; 3298 oreq->opt0l = htonl(F_NO_OFFLOAD); 3299 oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) | 3300 V_CONN_POLICY(CPL_CONN_POLICY_FILTER) | 3301 V_VLAN_PRI(f->vlan_prio >> 1) | 3302 V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) | 3303 V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) | 3304 V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4))); 3305 3306 sreq = (struct cpl_set_tcb_field *)(oreq + 1); 3307 set_tcb_field_ulp(sreq, id, 1, 0x1800808000ULL, 3308 (f->report_filter_id << 15) | (1 << 23) | 3309 ((u64)f->pass << 35) | ((u64)!f->rss << 36)); 3310 set_tcb_field_ulp(sreq + 1, id, 0, 0xffffffff, (2 << 19) | 1); 3311 t3_mgmt_tx(sc, m); 3312 3313 if (f->pass && !f->rss) { 3314 len = sizeof(*sreq); 3315 m = m_gethdr(M_WAITOK, MT_DATA); 3316 m->m_len = m->m_pkthdr.len = len; 3317 bzero(mtod(m, char *), len); 3318 sreq = mtod(m, struct cpl_set_tcb_field *); 3319 sreq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 3320 mk_set_tcb_field(sreq, id, 25, 0x3f80000, 3321 (u64)sc->rrss_map[f->qset] << 19); 3322 t3_mgmt_tx(sc, m); 3323 } 3324 return 0; 3325 } 3326 3327 static inline void 3328 mk_set_tcb_field(struct cpl_set_tcb_field *req, unsigned int tid, 3329 unsigned int word, u64 mask, u64 val) 3330 { 3331 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 3332 req->reply = V_NO_REPLY(1); 3333 req->cpu_idx = 0; 3334 req->word = htons(word); 3335 req->mask = htobe64(mask); 3336 req->val = htobe64(val); 3337 } 3338 3339 static inline void 3340 set_tcb_field_ulp(struct cpl_set_tcb_field *req, unsigned int tid, 3341 unsigned int word, u64 mask, u64 val) 3342 { 3343 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req; 3344 3345 txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); 3346 txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8)); 3347 mk_set_tcb_field(req, tid, word, mask, val); 3348 } 3349 3350 void 3351 t3_iterate(void (*func)(struct adapter *, void *), void *arg) 3352 { 3353 struct adapter *sc; 3354 3355 mtx_lock(&t3_list_lock); 3356 SLIST_FOREACH(sc, &t3_list, link) { 3357 /* 3358 * func should not make any assumptions about what state sc is 3359 * in - the only guarantee is that sc->sc_lock is a valid lock. 3360 */ 3361 func(sc, arg); 3362 } 3363 mtx_unlock(&t3_list_lock); 3364 } 3365 3366 #ifdef TCP_OFFLOAD 3367 static int 3368 toe_capability(struct port_info *pi, int enable) 3369 { 3370 int rc; 3371 struct adapter *sc = pi->adapter; 3372 3373 ADAPTER_LOCK_ASSERT_OWNED(sc); 3374 3375 if (!is_offload(sc)) 3376 return (ENODEV); 3377 3378 if (enable) { 3379 if (!(sc->flags & FULL_INIT_DONE)) { 3380 log(LOG_WARNING, 3381 "You must enable a cxgb interface first\n"); 3382 return (EAGAIN); 3383 } 3384 3385 if (isset(&sc->offload_map, pi->port_id)) 3386 return (0); 3387 3388 if (!(sc->flags & TOM_INIT_DONE)) { 3389 rc = t3_activate_uld(sc, ULD_TOM); 3390 if (rc == EAGAIN) { 3391 log(LOG_WARNING, 3392 "You must kldload t3_tom.ko before trying " 3393 "to enable TOE on a cxgb interface.\n"); 3394 } 3395 if (rc != 0) 3396 return (rc); 3397 KASSERT(sc->tom_softc != NULL, 3398 ("%s: TOM activated but softc NULL", __func__)); 3399 KASSERT(sc->flags & TOM_INIT_DONE, 3400 ("%s: TOM activated but flag not set", __func__)); 3401 } 3402 3403 setbit(&sc->offload_map, pi->port_id); 3404 3405 /* 3406 * XXX: Temporary code to allow iWARP to be enabled when TOE is 3407 * enabled on any port. Need to figure out how to enable, 3408 * disable, load, and unload iWARP cleanly. 3409 */ 3410 if (!isset(&sc->offload_map, MAX_NPORTS) && 3411 t3_activate_uld(sc, ULD_IWARP) == 0) 3412 setbit(&sc->offload_map, MAX_NPORTS); 3413 } else { 3414 if (!isset(&sc->offload_map, pi->port_id)) 3415 return (0); 3416 3417 KASSERT(sc->flags & TOM_INIT_DONE, 3418 ("%s: TOM never initialized?", __func__)); 3419 clrbit(&sc->offload_map, pi->port_id); 3420 } 3421 3422 return (0); 3423 } 3424 3425 /* 3426 * Add an upper layer driver to the global list. 3427 */ 3428 int 3429 t3_register_uld(struct uld_info *ui) 3430 { 3431 int rc = 0; 3432 struct uld_info *u; 3433 3434 mtx_lock(&t3_uld_list_lock); 3435 SLIST_FOREACH(u, &t3_uld_list, link) { 3436 if (u->uld_id == ui->uld_id) { 3437 rc = EEXIST; 3438 goto done; 3439 } 3440 } 3441 3442 SLIST_INSERT_HEAD(&t3_uld_list, ui, link); 3443 ui->refcount = 0; 3444 done: 3445 mtx_unlock(&t3_uld_list_lock); 3446 return (rc); 3447 } 3448 3449 int 3450 t3_unregister_uld(struct uld_info *ui) 3451 { 3452 int rc = EINVAL; 3453 struct uld_info *u; 3454 3455 mtx_lock(&t3_uld_list_lock); 3456 3457 SLIST_FOREACH(u, &t3_uld_list, link) { 3458 if (u == ui) { 3459 if (ui->refcount > 0) { 3460 rc = EBUSY; 3461 goto done; 3462 } 3463 3464 SLIST_REMOVE(&t3_uld_list, ui, uld_info, link); 3465 rc = 0; 3466 goto done; 3467 } 3468 } 3469 done: 3470 mtx_unlock(&t3_uld_list_lock); 3471 return (rc); 3472 } 3473 3474 int 3475 t3_activate_uld(struct adapter *sc, int id) 3476 { 3477 int rc = EAGAIN; 3478 struct uld_info *ui; 3479 3480 mtx_lock(&t3_uld_list_lock); 3481 3482 SLIST_FOREACH(ui, &t3_uld_list, link) { 3483 if (ui->uld_id == id) { 3484 rc = ui->activate(sc); 3485 if (rc == 0) 3486 ui->refcount++; 3487 goto done; 3488 } 3489 } 3490 done: 3491 mtx_unlock(&t3_uld_list_lock); 3492 3493 return (rc); 3494 } 3495 3496 int 3497 t3_deactivate_uld(struct adapter *sc, int id) 3498 { 3499 int rc = EINVAL; 3500 struct uld_info *ui; 3501 3502 mtx_lock(&t3_uld_list_lock); 3503 3504 SLIST_FOREACH(ui, &t3_uld_list, link) { 3505 if (ui->uld_id == id) { 3506 rc = ui->deactivate(sc); 3507 if (rc == 0) 3508 ui->refcount--; 3509 goto done; 3510 } 3511 } 3512 done: 3513 mtx_unlock(&t3_uld_list_lock); 3514 3515 return (rc); 3516 } 3517 3518 static int 3519 cpl_not_handled(struct sge_qset *qs __unused, struct rsp_desc *r __unused, 3520 struct mbuf *m) 3521 { 3522 m_freem(m); 3523 return (EDOOFUS); 3524 } 3525 3526 int 3527 t3_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h) 3528 { 3529 uintptr_t *loc, new; 3530 3531 if (opcode >= NUM_CPL_HANDLERS) 3532 return (EINVAL); 3533 3534 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled; 3535 loc = (uintptr_t *) &sc->cpl_handler[opcode]; 3536 atomic_store_rel_ptr(loc, new); 3537 3538 return (0); 3539 } 3540 #endif 3541 3542 static int 3543 cxgbc_mod_event(module_t mod, int cmd, void *arg) 3544 { 3545 int rc = 0; 3546 3547 switch (cmd) { 3548 case MOD_LOAD: 3549 mtx_init(&t3_list_lock, "T3 adapters", 0, MTX_DEF); 3550 SLIST_INIT(&t3_list); 3551 #ifdef TCP_OFFLOAD 3552 mtx_init(&t3_uld_list_lock, "T3 ULDs", 0, MTX_DEF); 3553 SLIST_INIT(&t3_uld_list); 3554 #endif 3555 break; 3556 3557 case MOD_UNLOAD: 3558 #ifdef TCP_OFFLOAD 3559 mtx_lock(&t3_uld_list_lock); 3560 if (!SLIST_EMPTY(&t3_uld_list)) { 3561 rc = EBUSY; 3562 mtx_unlock(&t3_uld_list_lock); 3563 break; 3564 } 3565 mtx_unlock(&t3_uld_list_lock); 3566 mtx_destroy(&t3_uld_list_lock); 3567 #endif 3568 mtx_lock(&t3_list_lock); 3569 if (!SLIST_EMPTY(&t3_list)) { 3570 rc = EBUSY; 3571 mtx_unlock(&t3_list_lock); 3572 break; 3573 } 3574 mtx_unlock(&t3_list_lock); 3575 mtx_destroy(&t3_list_lock); 3576 break; 3577 } 3578 3579 return (rc); 3580 } 3581 3582 #ifdef DEBUGNET 3583 static void 3584 cxgb_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize) 3585 { 3586 struct port_info *pi; 3587 adapter_t *adap; 3588 3589 pi = if_getsoftc(ifp); 3590 adap = pi->adapter; 3591 ADAPTER_LOCK(adap); 3592 *nrxr = adap->nqsets; 3593 *ncl = adap->sge.qs[0].fl[1].size; 3594 *clsize = adap->sge.qs[0].fl[1].buf_size; 3595 ADAPTER_UNLOCK(adap); 3596 } 3597 3598 static void 3599 cxgb_debugnet_event(if_t ifp, enum debugnet_ev event) 3600 { 3601 struct port_info *pi; 3602 struct sge_qset *qs; 3603 int i; 3604 3605 pi = if_getsoftc(ifp); 3606 if (event == DEBUGNET_START) 3607 for (i = 0; i < pi->adapter->nqsets; i++) { 3608 qs = &pi->adapter->sge.qs[i]; 3609 3610 /* Need to reinit after debugnet_mbuf_start(). */ 3611 qs->fl[0].zone = zone_pack; 3612 qs->fl[1].zone = zone_clust; 3613 qs->lro.enabled = 0; 3614 } 3615 } 3616 3617 static int 3618 cxgb_debugnet_transmit(if_t ifp, struct mbuf *m) 3619 { 3620 struct port_info *pi; 3621 struct sge_qset *qs; 3622 3623 pi = if_getsoftc(ifp); 3624 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 3625 IFF_DRV_RUNNING) 3626 return (ENOENT); 3627 3628 qs = &pi->adapter->sge.qs[pi->first_qset]; 3629 return (cxgb_debugnet_encap(qs, &m)); 3630 } 3631 3632 static int 3633 cxgb_debugnet_poll(if_t ifp, int count) 3634 { 3635 struct port_info *pi; 3636 adapter_t *adap; 3637 int i; 3638 3639 pi = if_getsoftc(ifp); 3640 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 3641 return (ENOENT); 3642 3643 adap = pi->adapter; 3644 for (i = 0; i < adap->nqsets; i++) 3645 (void)cxgb_debugnet_poll_rx(adap, &adap->sge.qs[i]); 3646 (void)cxgb_debugnet_poll_tx(&adap->sge.qs[pi->first_qset]); 3647 return (0); 3648 } 3649 #endif /* DEBUGNET */ 3650