1 /************************************************************************** 2 SPDX-License-Identifier: BSD-2-Clause 3 4 Copyright (c) 2007-2009, Chelsio Inc. 5 All rights reserved. 6 7 Redistribution and use in source and binary forms, with or without 8 modification, are permitted provided that the following conditions are met: 9 10 1. Redistributions of source code must retain the above copyright notice, 11 this list of conditions and the following disclaimer. 12 13 2. Neither the name of the Chelsio Corporation nor the names of its 14 contributors may be used to endorse or promote products derived from 15 this software without specific prior written permission. 16 17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 21 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 POSSIBILITY OF SUCH DAMAGE. 28 29 ***************************************************************************/ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_inet.h" 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/kernel.h> 39 #include <sys/bus.h> 40 #include <sys/module.h> 41 #include <sys/pciio.h> 42 #include <sys/conf.h> 43 #include <machine/bus.h> 44 #include <machine/resource.h> 45 #include <sys/ktr.h> 46 #include <sys/rman.h> 47 #include <sys/ioccom.h> 48 #include <sys/mbuf.h> 49 #include <sys/linker.h> 50 #include <sys/firmware.h> 51 #include <sys/socket.h> 52 #include <sys/sockio.h> 53 #include <sys/smp.h> 54 #include <sys/sysctl.h> 55 #include <sys/syslog.h> 56 #include <sys/queue.h> 57 #include <sys/taskqueue.h> 58 #include <sys/proc.h> 59 60 #include <net/bpf.h> 61 #include <net/debugnet.h> 62 #include <net/ethernet.h> 63 #include <net/if.h> 64 #include <net/if_var.h> 65 #include <net/if_arp.h> 66 #include <net/if_dl.h> 67 #include <net/if_media.h> 68 #include <net/if_types.h> 69 #include <net/if_vlan_var.h> 70 71 #include <netinet/in_systm.h> 72 #include <netinet/in.h> 73 #include <netinet/if_ether.h> 74 #include <netinet/ip.h> 75 #include <netinet/ip.h> 76 #include <netinet/tcp.h> 77 #include <netinet/udp.h> 78 79 #include <dev/pci/pcireg.h> 80 #include <dev/pci/pcivar.h> 81 #include <dev/pci/pci_private.h> 82 83 #include <cxgb_include.h> 84 85 #ifdef PRIV_SUPPORTED 86 #include <sys/priv.h> 87 #endif 88 89 static int cxgb_setup_interrupts(adapter_t *); 90 static void cxgb_teardown_interrupts(adapter_t *); 91 static void cxgb_init(void *); 92 static int cxgb_init_locked(struct port_info *); 93 static int cxgb_uninit_locked(struct port_info *); 94 static int cxgb_uninit_synchronized(struct port_info *); 95 static int cxgb_ioctl(if_t, unsigned long, caddr_t); 96 static int cxgb_media_change(if_t); 97 static int cxgb_ifm_type(int); 98 static void cxgb_build_medialist(struct port_info *); 99 static void cxgb_media_status(if_t, struct ifmediareq *); 100 static uint64_t cxgb_get_counter(if_t, ift_counter); 101 static int setup_sge_qsets(adapter_t *); 102 static void cxgb_async_intr(void *); 103 static void cxgb_tick_handler(void *, int); 104 static void cxgb_tick(void *); 105 static void link_check_callout(void *); 106 static void check_link_status(void *, int); 107 static void setup_rss(adapter_t *sc); 108 static int alloc_filters(struct adapter *); 109 static int setup_hw_filters(struct adapter *); 110 static int set_filter(struct adapter *, int, const struct filter_info *); 111 static inline void mk_set_tcb_field(struct cpl_set_tcb_field *, unsigned int, 112 unsigned int, u64, u64); 113 static inline void set_tcb_field_ulp(struct cpl_set_tcb_field *, unsigned int, 114 unsigned int, u64, u64); 115 #ifdef TCP_OFFLOAD 116 static int cpl_not_handled(struct sge_qset *, struct rsp_desc *, struct mbuf *); 117 #endif 118 119 /* Attachment glue for the PCI controller end of the device. Each port of 120 * the device is attached separately, as defined later. 121 */ 122 static int cxgb_controller_probe(device_t); 123 static int cxgb_controller_attach(device_t); 124 static int cxgb_controller_detach(device_t); 125 static void cxgb_free(struct adapter *); 126 static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start, 127 unsigned int end); 128 static void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf); 129 static int cxgb_get_regs_len(void); 130 static void touch_bars(device_t dev); 131 static void cxgb_update_mac_settings(struct port_info *p); 132 #ifdef TCP_OFFLOAD 133 static int toe_capability(struct port_info *, int); 134 #endif 135 136 /* Table for probing the cards. The desc field isn't actually used */ 137 struct cxgb_ident { 138 uint16_t vendor; 139 uint16_t device; 140 int index; 141 char *desc; 142 } cxgb_identifiers[] = { 143 {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"}, 144 {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"}, 145 {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"}, 146 {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"}, 147 {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"}, 148 {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"}, 149 {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"}, 150 {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"}, 151 {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"}, 152 {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"}, 153 {PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"}, 154 {PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"}, 155 {PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"}, 156 {PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"}, 157 {0, 0, 0, NULL} 158 }; 159 160 static device_method_t cxgb_controller_methods[] = { 161 DEVMETHOD(device_probe, cxgb_controller_probe), 162 DEVMETHOD(device_attach, cxgb_controller_attach), 163 DEVMETHOD(device_detach, cxgb_controller_detach), 164 165 DEVMETHOD_END 166 }; 167 168 static driver_t cxgb_controller_driver = { 169 "cxgbc", 170 cxgb_controller_methods, 171 sizeof(struct adapter) 172 }; 173 174 static int cxgbc_mod_event(module_t, int, void *); 175 176 DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgbc_mod_event, NULL); 177 MODULE_PNP_INFO("U16:vendor;U16:device", pci, cxgbc, cxgb_identifiers, 178 nitems(cxgb_identifiers) - 1); 179 MODULE_VERSION(cxgbc, 1); 180 MODULE_DEPEND(cxgbc, firmware, 1, 1, 1); 181 182 /* 183 * Attachment glue for the ports. Attachment is done directly to the 184 * controller device. 185 */ 186 static int cxgb_port_probe(device_t); 187 static int cxgb_port_attach(device_t); 188 static int cxgb_port_detach(device_t); 189 190 static device_method_t cxgb_port_methods[] = { 191 DEVMETHOD(device_probe, cxgb_port_probe), 192 DEVMETHOD(device_attach, cxgb_port_attach), 193 DEVMETHOD(device_detach, cxgb_port_detach), 194 { 0, 0 } 195 }; 196 197 static driver_t cxgb_port_driver = { 198 "cxgb", 199 cxgb_port_methods, 200 0 201 }; 202 203 static d_ioctl_t cxgb_extension_ioctl; 204 static d_open_t cxgb_extension_open; 205 static d_close_t cxgb_extension_close; 206 207 static struct cdevsw cxgb_cdevsw = { 208 .d_version = D_VERSION, 209 .d_flags = 0, 210 .d_open = cxgb_extension_open, 211 .d_close = cxgb_extension_close, 212 .d_ioctl = cxgb_extension_ioctl, 213 .d_name = "cxgb", 214 }; 215 216 DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, 0, 0); 217 MODULE_VERSION(cxgb, 1); 218 219 DEBUGNET_DEFINE(cxgb); 220 221 static struct mtx t3_list_lock; 222 static SLIST_HEAD(, adapter) t3_list; 223 #ifdef TCP_OFFLOAD 224 static struct mtx t3_uld_list_lock; 225 static SLIST_HEAD(, uld_info) t3_uld_list; 226 #endif 227 228 /* 229 * The driver uses the best interrupt scheme available on a platform in the 230 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which 231 * of these schemes the driver may consider as follows: 232 * 233 * msi = 2: choose from among all three options 234 * msi = 1 : only consider MSI and pin interrupts 235 * msi = 0: force pin interrupts 236 */ 237 static int msi_allowed = 2; 238 239 SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 240 "CXGB driver parameters"); 241 SYSCTL_INT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0, 242 "MSI-X, MSI, INTx selector"); 243 244 /* 245 * The driver uses an auto-queue algorithm by default. 246 * To disable it and force a single queue-set per port, use multiq = 0 247 */ 248 static int multiq = 1; 249 SYSCTL_INT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0, 250 "use min(ncpus/ports, 8) queue-sets per port"); 251 252 /* 253 * By default the driver will not update the firmware unless 254 * it was compiled against a newer version 255 * 256 */ 257 static int force_fw_update = 0; 258 SYSCTL_INT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0, 259 "update firmware even if up to date"); 260 261 int cxgb_use_16k_clusters = -1; 262 SYSCTL_INT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN, 263 &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue "); 264 265 static int nfilters = -1; 266 SYSCTL_INT(_hw_cxgb, OID_AUTO, nfilters, CTLFLAG_RDTUN, 267 &nfilters, 0, "max number of entries in the filter table"); 268 269 enum { 270 MAX_TXQ_ENTRIES = 16384, 271 MAX_CTRL_TXQ_ENTRIES = 1024, 272 MAX_RSPQ_ENTRIES = 16384, 273 MAX_RX_BUFFERS = 16384, 274 MAX_RX_JUMBO_BUFFERS = 16384, 275 MIN_TXQ_ENTRIES = 4, 276 MIN_CTRL_TXQ_ENTRIES = 4, 277 MIN_RSPQ_ENTRIES = 32, 278 MIN_FL_ENTRIES = 32, 279 MIN_FL_JUMBO_ENTRIES = 32 280 }; 281 282 struct filter_info { 283 u32 sip; 284 u32 sip_mask; 285 u32 dip; 286 u16 sport; 287 u16 dport; 288 u32 vlan:12; 289 u32 vlan_prio:3; 290 u32 mac_hit:1; 291 u32 mac_idx:4; 292 u32 mac_vld:1; 293 u32 pkt_type:2; 294 u32 report_filter_id:1; 295 u32 pass:1; 296 u32 rss:1; 297 u32 qset:3; 298 u32 locked:1; 299 u32 valid:1; 300 }; 301 302 enum { FILTER_NO_VLAN_PRI = 7 }; 303 304 #define EEPROM_MAGIC 0x38E2F10C 305 306 #define PORT_MASK ((1 << MAX_NPORTS) - 1) 307 308 309 static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset); 310 311 312 static __inline char 313 t3rev2char(struct adapter *adapter) 314 { 315 char rev = 'z'; 316 317 switch(adapter->params.rev) { 318 case T3_REV_A: 319 rev = 'a'; 320 break; 321 case T3_REV_B: 322 case T3_REV_B2: 323 rev = 'b'; 324 break; 325 case T3_REV_C: 326 rev = 'c'; 327 break; 328 } 329 return rev; 330 } 331 332 static struct cxgb_ident * 333 cxgb_get_ident(device_t dev) 334 { 335 struct cxgb_ident *id; 336 337 for (id = cxgb_identifiers; id->desc != NULL; id++) { 338 if ((id->vendor == pci_get_vendor(dev)) && 339 (id->device == pci_get_device(dev))) { 340 return (id); 341 } 342 } 343 return (NULL); 344 } 345 346 static const struct adapter_info * 347 cxgb_get_adapter_info(device_t dev) 348 { 349 struct cxgb_ident *id; 350 const struct adapter_info *ai; 351 352 id = cxgb_get_ident(dev); 353 if (id == NULL) 354 return (NULL); 355 356 ai = t3_get_adapter_info(id->index); 357 358 return (ai); 359 } 360 361 static int 362 cxgb_controller_probe(device_t dev) 363 { 364 const struct adapter_info *ai; 365 char *ports, buf[80]; 366 int nports; 367 368 ai = cxgb_get_adapter_info(dev); 369 if (ai == NULL) 370 return (ENXIO); 371 372 nports = ai->nports0 + ai->nports1; 373 if (nports == 1) 374 ports = "port"; 375 else 376 ports = "ports"; 377 378 snprintf(buf, sizeof(buf), "%s, %d %s", ai->desc, nports, ports); 379 device_set_desc_copy(dev, buf); 380 return (BUS_PROBE_DEFAULT); 381 } 382 383 #define FW_FNAME "cxgb_t3fw" 384 #define TPEEPROM_NAME "cxgb_t3%c_tp_eeprom" 385 #define TPSRAM_NAME "cxgb_t3%c_protocol_sram" 386 387 static int 388 upgrade_fw(adapter_t *sc) 389 { 390 const struct firmware *fw; 391 int status; 392 u32 vers; 393 394 if ((fw = firmware_get(FW_FNAME)) == NULL) { 395 device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME); 396 return (ENOENT); 397 } else 398 device_printf(sc->dev, "installing firmware on card\n"); 399 status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize); 400 401 if (status != 0) { 402 device_printf(sc->dev, "failed to install firmware: %d\n", 403 status); 404 } else { 405 t3_get_fw_version(sc, &vers); 406 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d", 407 G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers), 408 G_FW_VERSION_MICRO(vers)); 409 } 410 411 firmware_put(fw, FIRMWARE_UNLOAD); 412 413 return (status); 414 } 415 416 /* 417 * The cxgb_controller_attach function is responsible for the initial 418 * bringup of the device. Its responsibilities include: 419 * 420 * 1. Determine if the device supports MSI or MSI-X. 421 * 2. Allocate bus resources so that we can access the Base Address Register 422 * 3. Create and initialize mutexes for the controller and its control 423 * logic such as SGE and MDIO. 424 * 4. Call hardware specific setup routine for the adapter as a whole. 425 * 5. Allocate the BAR for doing MSI-X. 426 * 6. Setup the line interrupt iff MSI-X is not supported. 427 * 7. Create the driver's taskq. 428 * 8. Start one task queue service thread. 429 * 9. Check if the firmware and SRAM are up-to-date. They will be 430 * auto-updated later (before FULL_INIT_DONE), if required. 431 * 10. Create a child device for each MAC (port) 432 * 11. Initialize T3 private state. 433 * 12. Trigger the LED 434 * 13. Setup offload iff supported. 435 * 14. Reset/restart the tick callout. 436 * 15. Attach sysctls 437 * 438 * NOTE: Any modification or deviation from this list MUST be reflected in 439 * the above comment. Failure to do so will result in problems on various 440 * error conditions including link flapping. 441 */ 442 static int 443 cxgb_controller_attach(device_t dev) 444 { 445 device_t child; 446 const struct adapter_info *ai; 447 struct adapter *sc; 448 int i, error = 0; 449 uint32_t vers; 450 int port_qsets = 1; 451 int msi_needed, reg; 452 char buf[80]; 453 454 sc = device_get_softc(dev); 455 sc->dev = dev; 456 sc->msi_count = 0; 457 ai = cxgb_get_adapter_info(dev); 458 459 snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d", 460 device_get_unit(dev)); 461 ADAPTER_LOCK_INIT(sc, sc->lockbuf); 462 463 snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d", 464 device_get_unit(dev)); 465 snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d", 466 device_get_unit(dev)); 467 snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d", 468 device_get_unit(dev)); 469 470 MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN); 471 MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF); 472 MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF); 473 474 mtx_lock(&t3_list_lock); 475 SLIST_INSERT_HEAD(&t3_list, sc, link); 476 mtx_unlock(&t3_list_lock); 477 478 /* find the PCIe link width and set max read request to 4KB*/ 479 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { 480 uint16_t lnk; 481 482 lnk = pci_read_config(dev, reg + PCIER_LINK_STA, 2); 483 sc->link_width = (lnk & PCIEM_LINK_STA_WIDTH) >> 4; 484 if (sc->link_width < 8 && 485 (ai->caps & SUPPORTED_10000baseT_Full)) { 486 device_printf(sc->dev, 487 "PCIe x%d Link, expect reduced performance\n", 488 sc->link_width); 489 } 490 491 pci_set_max_read_req(dev, 4096); 492 } 493 494 touch_bars(dev); 495 pci_enable_busmaster(dev); 496 /* 497 * Allocate the registers and make them available to the driver. 498 * The registers that we care about for NIC mode are in BAR 0 499 */ 500 sc->regs_rid = PCIR_BAR(0); 501 if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 502 &sc->regs_rid, RF_ACTIVE)) == NULL) { 503 device_printf(dev, "Cannot allocate BAR region 0\n"); 504 error = ENXIO; 505 goto out; 506 } 507 508 sc->bt = rman_get_bustag(sc->regs_res); 509 sc->bh = rman_get_bushandle(sc->regs_res); 510 sc->mmio_len = rman_get_size(sc->regs_res); 511 512 for (i = 0; i < MAX_NPORTS; i++) 513 sc->port[i].adapter = sc; 514 515 if (t3_prep_adapter(sc, ai, 1) < 0) { 516 printf("prep adapter failed\n"); 517 error = ENODEV; 518 goto out; 519 } 520 521 sc->udbs_rid = PCIR_BAR(2); 522 sc->udbs_res = NULL; 523 if (is_offload(sc) && 524 ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 525 &sc->udbs_rid, RF_ACTIVE)) == NULL)) { 526 device_printf(dev, "Cannot allocate BAR region 1\n"); 527 error = ENXIO; 528 goto out; 529 } 530 531 /* Allocate the BAR for doing MSI-X. If it succeeds, try to allocate 532 * enough messages for the queue sets. If that fails, try falling 533 * back to MSI. If that fails, then try falling back to the legacy 534 * interrupt pin model. 535 */ 536 sc->msix_regs_rid = 0x20; 537 if ((msi_allowed >= 2) && 538 (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 539 &sc->msix_regs_rid, RF_ACTIVE)) != NULL) { 540 541 if (multiq) 542 port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus); 543 msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1; 544 545 if (pci_msix_count(dev) == 0 || 546 (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 || 547 sc->msi_count != msi_needed) { 548 device_printf(dev, "alloc msix failed - " 549 "msi_count=%d, msi_needed=%d, err=%d; " 550 "will try MSI\n", sc->msi_count, 551 msi_needed, error); 552 sc->msi_count = 0; 553 port_qsets = 1; 554 pci_release_msi(dev); 555 bus_release_resource(dev, SYS_RES_MEMORY, 556 sc->msix_regs_rid, sc->msix_regs_res); 557 sc->msix_regs_res = NULL; 558 } else { 559 sc->flags |= USING_MSIX; 560 sc->cxgb_intr = cxgb_async_intr; 561 device_printf(dev, 562 "using MSI-X interrupts (%u vectors)\n", 563 sc->msi_count); 564 } 565 } 566 567 if ((msi_allowed >= 1) && (sc->msi_count == 0)) { 568 sc->msi_count = 1; 569 if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) { 570 device_printf(dev, "alloc msi failed - " 571 "err=%d; will try INTx\n", error); 572 sc->msi_count = 0; 573 port_qsets = 1; 574 pci_release_msi(dev); 575 } else { 576 sc->flags |= USING_MSI; 577 sc->cxgb_intr = t3_intr_msi; 578 device_printf(dev, "using MSI interrupts\n"); 579 } 580 } 581 if (sc->msi_count == 0) { 582 device_printf(dev, "using line interrupts\n"); 583 sc->cxgb_intr = t3b_intr; 584 } 585 586 /* Create a private taskqueue thread for handling driver events */ 587 sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT, 588 taskqueue_thread_enqueue, &sc->tq); 589 if (sc->tq == NULL) { 590 device_printf(dev, "failed to allocate controller task queue\n"); 591 goto out; 592 } 593 594 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq", 595 device_get_nameunit(dev)); 596 TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc); 597 598 599 /* Create a periodic callout for checking adapter status */ 600 callout_init(&sc->cxgb_tick_ch, 1); 601 602 if (t3_check_fw_version(sc) < 0 || force_fw_update) { 603 /* 604 * Warn user that a firmware update will be attempted in init. 605 */ 606 device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n", 607 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO); 608 sc->flags &= ~FW_UPTODATE; 609 } else { 610 sc->flags |= FW_UPTODATE; 611 } 612 613 if (t3_check_tpsram_version(sc) < 0) { 614 /* 615 * Warn user that a firmware update will be attempted in init. 616 */ 617 device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n", 618 t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); 619 sc->flags &= ~TPS_UPTODATE; 620 } else { 621 sc->flags |= TPS_UPTODATE; 622 } 623 624 /* 625 * Create a child device for each MAC. The ethernet attachment 626 * will be done in these children. 627 */ 628 for (i = 0; i < (sc)->params.nports; i++) { 629 struct port_info *pi; 630 631 if ((child = device_add_child(dev, "cxgb", -1)) == NULL) { 632 device_printf(dev, "failed to add child port\n"); 633 error = EINVAL; 634 goto out; 635 } 636 pi = &sc->port[i]; 637 pi->adapter = sc; 638 pi->nqsets = port_qsets; 639 pi->first_qset = i*port_qsets; 640 pi->port_id = i; 641 pi->tx_chan = i >= ai->nports0; 642 pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i; 643 sc->rxpkt_map[pi->txpkt_intf] = i; 644 sc->port[i].tx_chan = i >= ai->nports0; 645 sc->portdev[i] = child; 646 device_set_softc(child, pi); 647 } 648 if ((error = bus_generic_attach(dev)) != 0) 649 goto out; 650 651 /* initialize sge private state */ 652 t3_sge_init_adapter(sc); 653 654 t3_led_ready(sc); 655 656 error = t3_get_fw_version(sc, &vers); 657 if (error) 658 goto out; 659 660 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d", 661 G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers), 662 G_FW_VERSION_MICRO(vers)); 663 664 snprintf(buf, sizeof(buf), "%s %sNIC\t E/C: %s S/N: %s", 665 ai->desc, is_offload(sc) ? "R" : "", 666 sc->params.vpd.ec, sc->params.vpd.sn); 667 device_set_desc_copy(dev, buf); 668 669 snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x", 670 sc->params.vpd.port_type[0], sc->params.vpd.port_type[1], 671 sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]); 672 673 device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]); 674 callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc); 675 t3_add_attach_sysctls(sc); 676 677 #ifdef TCP_OFFLOAD 678 for (i = 0; i < NUM_CPL_HANDLERS; i++) 679 sc->cpl_handler[i] = cpl_not_handled; 680 #endif 681 682 t3_intr_clear(sc); 683 error = cxgb_setup_interrupts(sc); 684 out: 685 if (error) 686 cxgb_free(sc); 687 688 return (error); 689 } 690 691 /* 692 * The cxgb_controller_detach routine is called with the device is 693 * unloaded from the system. 694 */ 695 696 static int 697 cxgb_controller_detach(device_t dev) 698 { 699 struct adapter *sc; 700 701 sc = device_get_softc(dev); 702 703 cxgb_free(sc); 704 705 return (0); 706 } 707 708 /* 709 * The cxgb_free() is called by the cxgb_controller_detach() routine 710 * to tear down the structures that were built up in 711 * cxgb_controller_attach(), and should be the final piece of work 712 * done when fully unloading the driver. 713 * 714 * 715 * 1. Shutting down the threads started by the cxgb_controller_attach() 716 * routine. 717 * 2. Stopping the lower level device and all callouts (cxgb_down_locked()). 718 * 3. Detaching all of the port devices created during the 719 * cxgb_controller_attach() routine. 720 * 4. Removing the device children created via cxgb_controller_attach(). 721 * 5. Releasing PCI resources associated with the device. 722 * 6. Turning off the offload support, iff it was turned on. 723 * 7. Destroying the mutexes created in cxgb_controller_attach(). 724 * 725 */ 726 static void 727 cxgb_free(struct adapter *sc) 728 { 729 int i, nqsets = 0; 730 731 ADAPTER_LOCK(sc); 732 sc->flags |= CXGB_SHUTDOWN; 733 ADAPTER_UNLOCK(sc); 734 735 /* 736 * Make sure all child devices are gone. 737 */ 738 bus_generic_detach(sc->dev); 739 for (i = 0; i < (sc)->params.nports; i++) { 740 if (sc->portdev[i] && 741 device_delete_child(sc->dev, sc->portdev[i]) != 0) 742 device_printf(sc->dev, "failed to delete child port\n"); 743 nqsets += sc->port[i].nqsets; 744 } 745 746 /* 747 * At this point, it is as if cxgb_port_detach has run on all ports, and 748 * cxgb_down has run on the adapter. All interrupts have been silenced, 749 * all open devices have been closed. 750 */ 751 KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)", 752 __func__, sc->open_device_map)); 753 for (i = 0; i < sc->params.nports; i++) { 754 KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!", 755 __func__, i)); 756 } 757 758 /* 759 * Finish off the adapter's callouts. 760 */ 761 callout_drain(&sc->cxgb_tick_ch); 762 callout_drain(&sc->sge_timer_ch); 763 764 /* 765 * Release resources grabbed under FULL_INIT_DONE by cxgb_up. The 766 * sysctls are cleaned up by the kernel linker. 767 */ 768 if (sc->flags & FULL_INIT_DONE) { 769 t3_free_sge_resources(sc, nqsets); 770 sc->flags &= ~FULL_INIT_DONE; 771 } 772 773 /* 774 * Release all interrupt resources. 775 */ 776 cxgb_teardown_interrupts(sc); 777 if (sc->flags & (USING_MSI | USING_MSIX)) { 778 device_printf(sc->dev, "releasing msi message(s)\n"); 779 pci_release_msi(sc->dev); 780 } else { 781 device_printf(sc->dev, "no msi message to release\n"); 782 } 783 784 if (sc->msix_regs_res != NULL) { 785 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid, 786 sc->msix_regs_res); 787 } 788 789 /* 790 * Free the adapter's taskqueue. 791 */ 792 if (sc->tq != NULL) { 793 taskqueue_free(sc->tq); 794 sc->tq = NULL; 795 } 796 797 free(sc->filters, M_DEVBUF); 798 t3_sge_free(sc); 799 800 if (sc->udbs_res != NULL) 801 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid, 802 sc->udbs_res); 803 804 if (sc->regs_res != NULL) 805 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid, 806 sc->regs_res); 807 808 MTX_DESTROY(&sc->mdio_lock); 809 MTX_DESTROY(&sc->sge.reg_lock); 810 MTX_DESTROY(&sc->elmer_lock); 811 mtx_lock(&t3_list_lock); 812 SLIST_REMOVE(&t3_list, sc, adapter, link); 813 mtx_unlock(&t3_list_lock); 814 ADAPTER_LOCK_DEINIT(sc); 815 } 816 817 /** 818 * setup_sge_qsets - configure SGE Tx/Rx/response queues 819 * @sc: the controller softc 820 * 821 * Determines how many sets of SGE queues to use and initializes them. 822 * We support multiple queue sets per port if we have MSI-X, otherwise 823 * just one queue set per port. 824 */ 825 static int 826 setup_sge_qsets(adapter_t *sc) 827 { 828 int i, j, err, irq_idx = 0, qset_idx = 0; 829 u_int ntxq = SGE_TXQ_PER_SET; 830 831 if ((err = t3_sge_alloc(sc)) != 0) { 832 device_printf(sc->dev, "t3_sge_alloc returned %d\n", err); 833 return (err); 834 } 835 836 if (sc->params.rev > 0 && !(sc->flags & USING_MSI)) 837 irq_idx = -1; 838 839 for (i = 0; i < (sc)->params.nports; i++) { 840 struct port_info *pi = &sc->port[i]; 841 842 for (j = 0; j < pi->nqsets; j++, qset_idx++) { 843 err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports, 844 (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx, 845 &sc->params.sge.qset[qset_idx], ntxq, pi); 846 if (err) { 847 t3_free_sge_resources(sc, qset_idx); 848 device_printf(sc->dev, 849 "t3_sge_alloc_qset failed with %d\n", err); 850 return (err); 851 } 852 } 853 } 854 855 sc->nqsets = qset_idx; 856 857 return (0); 858 } 859 860 static void 861 cxgb_teardown_interrupts(adapter_t *sc) 862 { 863 int i; 864 865 for (i = 0; i < SGE_QSETS; i++) { 866 if (sc->msix_intr_tag[i] == NULL) { 867 868 /* Should have been setup fully or not at all */ 869 KASSERT(sc->msix_irq_res[i] == NULL && 870 sc->msix_irq_rid[i] == 0, 871 ("%s: half-done interrupt (%d).", __func__, i)); 872 873 continue; 874 } 875 876 bus_teardown_intr(sc->dev, sc->msix_irq_res[i], 877 sc->msix_intr_tag[i]); 878 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i], 879 sc->msix_irq_res[i]); 880 881 sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL; 882 sc->msix_irq_rid[i] = 0; 883 } 884 885 if (sc->intr_tag) { 886 KASSERT(sc->irq_res != NULL, 887 ("%s: half-done interrupt.", __func__)); 888 889 bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag); 890 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, 891 sc->irq_res); 892 893 sc->irq_res = sc->intr_tag = NULL; 894 sc->irq_rid = 0; 895 } 896 } 897 898 static int 899 cxgb_setup_interrupts(adapter_t *sc) 900 { 901 struct resource *res; 902 void *tag; 903 int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX); 904 905 sc->irq_rid = intr_flag ? 1 : 0; 906 sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid, 907 RF_SHAREABLE | RF_ACTIVE); 908 if (sc->irq_res == NULL) { 909 device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n", 910 intr_flag, sc->irq_rid); 911 err = EINVAL; 912 sc->irq_rid = 0; 913 } else { 914 err = bus_setup_intr(sc->dev, sc->irq_res, 915 INTR_MPSAFE | INTR_TYPE_NET, NULL, 916 sc->cxgb_intr, sc, &sc->intr_tag); 917 918 if (err) { 919 device_printf(sc->dev, 920 "Cannot set up interrupt (%x, %u, %d)\n", 921 intr_flag, sc->irq_rid, err); 922 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, 923 sc->irq_res); 924 sc->irq_res = sc->intr_tag = NULL; 925 sc->irq_rid = 0; 926 } 927 } 928 929 /* That's all for INTx or MSI */ 930 if (!(intr_flag & USING_MSIX) || err) 931 return (err); 932 933 bus_describe_intr(sc->dev, sc->irq_res, sc->intr_tag, "err"); 934 for (i = 0; i < sc->msi_count - 1; i++) { 935 rid = i + 2; 936 res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid, 937 RF_SHAREABLE | RF_ACTIVE); 938 if (res == NULL) { 939 device_printf(sc->dev, "Cannot allocate interrupt " 940 "for message %d\n", rid); 941 err = EINVAL; 942 break; 943 } 944 945 err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET, 946 NULL, t3_intr_msix, &sc->sge.qs[i], &tag); 947 if (err) { 948 device_printf(sc->dev, "Cannot set up interrupt " 949 "for message %d (%d)\n", rid, err); 950 bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res); 951 break; 952 } 953 954 sc->msix_irq_rid[i] = rid; 955 sc->msix_irq_res[i] = res; 956 sc->msix_intr_tag[i] = tag; 957 bus_describe_intr(sc->dev, res, tag, "qs%d", i); 958 } 959 960 if (err) 961 cxgb_teardown_interrupts(sc); 962 963 return (err); 964 } 965 966 967 static int 968 cxgb_port_probe(device_t dev) 969 { 970 struct port_info *p; 971 char buf[80]; 972 const char *desc; 973 974 p = device_get_softc(dev); 975 desc = p->phy.desc; 976 snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, desc); 977 device_set_desc_copy(dev, buf); 978 return (0); 979 } 980 981 982 static int 983 cxgb_makedev(struct port_info *pi) 984 { 985 986 pi->port_cdev = make_dev(&cxgb_cdevsw, if_getdunit(pi->ifp), 987 UID_ROOT, GID_WHEEL, 0600, "%s", if_name(pi->ifp)); 988 989 if (pi->port_cdev == NULL) 990 return (ENOMEM); 991 992 pi->port_cdev->si_drv1 = (void *)pi; 993 994 return (0); 995 } 996 997 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 998 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 999 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6) 1000 #define CXGB_CAP_ENABLE CXGB_CAP 1001 1002 static int 1003 cxgb_port_attach(device_t dev) 1004 { 1005 struct port_info *p; 1006 if_t ifp; 1007 int err; 1008 struct adapter *sc; 1009 1010 p = device_get_softc(dev); 1011 sc = p->adapter; 1012 snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d", 1013 device_get_unit(device_get_parent(dev)), p->port_id); 1014 PORT_LOCK_INIT(p, p->lockbuf); 1015 1016 callout_init(&p->link_check_ch, 1); 1017 TASK_INIT(&p->link_check_task, 0, check_link_status, p); 1018 1019 /* Allocate an ifnet object and set it up */ 1020 ifp = p->ifp = if_alloc(IFT_ETHER); 1021 if (ifp == NULL) { 1022 device_printf(dev, "Cannot allocate ifnet\n"); 1023 return (ENOMEM); 1024 } 1025 1026 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1027 if_setinitfn(ifp, cxgb_init); 1028 if_setsoftc(ifp, p); 1029 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 1030 if_setioctlfn(ifp, cxgb_ioctl); 1031 if_settransmitfn(ifp, cxgb_transmit); 1032 if_setqflushfn(ifp, cxgb_qflush); 1033 if_setgetcounterfn(ifp, cxgb_get_counter); 1034 1035 if_setcapabilities(ifp, CXGB_CAP); 1036 #ifdef TCP_OFFLOAD 1037 if (is_offload(sc)) 1038 if_setcapabilitiesbit(ifp, IFCAP_TOE4, 0); 1039 #endif 1040 if_setcapenable(ifp, CXGB_CAP_ENABLE); 1041 if_sethwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 1042 CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1043 if_sethwtsomax(ifp, IP_MAXPACKET); 1044 if_sethwtsomaxsegcount(ifp, 36); 1045 if_sethwtsomaxsegsize(ifp, 65536); 1046 1047 /* 1048 * Disable TSO on 4-port - it isn't supported by the firmware. 1049 */ 1050 if (sc->params.nports > 2) { 1051 if_setcapabilitiesbit(ifp, 0, IFCAP_TSO | IFCAP_VLAN_HWTSO); 1052 if_setcapenablebit(ifp, 0, IFCAP_TSO | IFCAP_VLAN_HWTSO); 1053 if_sethwassistbits(ifp, 0, CSUM_TSO); 1054 } 1055 1056 ether_ifattach(ifp, p->hw_addr); 1057 1058 /* Attach driver debugnet methods. */ 1059 DEBUGNET_SET(ifp, cxgb); 1060 1061 #ifdef DEFAULT_JUMBO 1062 if (sc->params.nports <= 2) 1063 if_setmtu(ifp, ETHERMTU_JUMBO); 1064 #endif 1065 if ((err = cxgb_makedev(p)) != 0) { 1066 printf("makedev failed %d\n", err); 1067 return (err); 1068 } 1069 1070 /* Create a list of media supported by this port */ 1071 ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change, 1072 cxgb_media_status); 1073 cxgb_build_medialist(p); 1074 1075 t3_sge_init_port(p); 1076 1077 return (err); 1078 } 1079 1080 /* 1081 * cxgb_port_detach() is called via the device_detach methods when 1082 * cxgb_free() calls the bus_generic_detach. It is responsible for 1083 * removing the device from the view of the kernel, i.e. from all 1084 * interfaces lists etc. This routine is only called when the driver is 1085 * being unloaded, not when the link goes down. 1086 */ 1087 static int 1088 cxgb_port_detach(device_t dev) 1089 { 1090 struct port_info *p; 1091 struct adapter *sc; 1092 int i; 1093 1094 p = device_get_softc(dev); 1095 sc = p->adapter; 1096 1097 /* Tell cxgb_ioctl and if_init that the port is going away */ 1098 ADAPTER_LOCK(sc); 1099 SET_DOOMED(p); 1100 wakeup(&sc->flags); 1101 while (IS_BUSY(sc)) 1102 mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0); 1103 SET_BUSY(sc); 1104 ADAPTER_UNLOCK(sc); 1105 1106 if (p->port_cdev != NULL) 1107 destroy_dev(p->port_cdev); 1108 1109 cxgb_uninit_synchronized(p); 1110 ether_ifdetach(p->ifp); 1111 1112 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { 1113 struct sge_qset *qs = &sc->sge.qs[i]; 1114 struct sge_txq *txq = &qs->txq[TXQ_ETH]; 1115 1116 callout_drain(&txq->txq_watchdog); 1117 callout_drain(&txq->txq_timer); 1118 } 1119 1120 PORT_LOCK_DEINIT(p); 1121 if_free(p->ifp); 1122 p->ifp = NULL; 1123 1124 ADAPTER_LOCK(sc); 1125 CLR_BUSY(sc); 1126 wakeup_one(&sc->flags); 1127 ADAPTER_UNLOCK(sc); 1128 return (0); 1129 } 1130 1131 void 1132 t3_fatal_err(struct adapter *sc) 1133 { 1134 u_int fw_status[4]; 1135 1136 if (sc->flags & FULL_INIT_DONE) { 1137 t3_sge_stop(sc); 1138 t3_write_reg(sc, A_XGM_TX_CTRL, 0); 1139 t3_write_reg(sc, A_XGM_RX_CTRL, 0); 1140 t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0); 1141 t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0); 1142 t3_intr_disable(sc); 1143 } 1144 device_printf(sc->dev,"encountered fatal error, operation suspended\n"); 1145 if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status)) 1146 device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n", 1147 fw_status[0], fw_status[1], fw_status[2], fw_status[3]); 1148 } 1149 1150 int 1151 t3_os_find_pci_capability(adapter_t *sc, int cap) 1152 { 1153 device_t dev; 1154 struct pci_devinfo *dinfo; 1155 pcicfgregs *cfg; 1156 uint32_t status; 1157 uint8_t ptr; 1158 1159 dev = sc->dev; 1160 dinfo = device_get_ivars(dev); 1161 cfg = &dinfo->cfg; 1162 1163 status = pci_read_config(dev, PCIR_STATUS, 2); 1164 if (!(status & PCIM_STATUS_CAPPRESENT)) 1165 return (0); 1166 1167 switch (cfg->hdrtype & PCIM_HDRTYPE) { 1168 case 0: 1169 case 1: 1170 ptr = PCIR_CAP_PTR; 1171 break; 1172 case 2: 1173 ptr = PCIR_CAP_PTR_2; 1174 break; 1175 default: 1176 return (0); 1177 break; 1178 } 1179 ptr = pci_read_config(dev, ptr, 1); 1180 1181 while (ptr != 0) { 1182 if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap) 1183 return (ptr); 1184 ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1); 1185 } 1186 1187 return (0); 1188 } 1189 1190 int 1191 t3_os_pci_save_state(struct adapter *sc) 1192 { 1193 device_t dev; 1194 struct pci_devinfo *dinfo; 1195 1196 dev = sc->dev; 1197 dinfo = device_get_ivars(dev); 1198 1199 pci_cfg_save(dev, dinfo, 0); 1200 return (0); 1201 } 1202 1203 int 1204 t3_os_pci_restore_state(struct adapter *sc) 1205 { 1206 device_t dev; 1207 struct pci_devinfo *dinfo; 1208 1209 dev = sc->dev; 1210 dinfo = device_get_ivars(dev); 1211 1212 pci_cfg_restore(dev, dinfo); 1213 return (0); 1214 } 1215 1216 /** 1217 * t3_os_link_changed - handle link status changes 1218 * @sc: the adapter associated with the link change 1219 * @port_id: the port index whose link status has changed 1220 * @link_status: the new status of the link 1221 * @speed: the new speed setting 1222 * @duplex: the new duplex setting 1223 * @fc: the new flow-control setting 1224 * 1225 * This is the OS-dependent handler for link status changes. The OS 1226 * neutral handler takes care of most of the processing for these events, 1227 * then calls this handler for any OS-specific processing. 1228 */ 1229 void 1230 t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed, 1231 int duplex, int fc, int mac_was_reset) 1232 { 1233 struct port_info *pi = &adapter->port[port_id]; 1234 if_t ifp = pi->ifp; 1235 1236 /* no race with detach, so ifp should always be good */ 1237 KASSERT(ifp, ("%s: if detached.", __func__)); 1238 1239 /* Reapply mac settings if they were lost due to a reset */ 1240 if (mac_was_reset) { 1241 PORT_LOCK(pi); 1242 cxgb_update_mac_settings(pi); 1243 PORT_UNLOCK(pi); 1244 } 1245 1246 if (link_status) { 1247 if_setbaudrate(ifp, IF_Mbps(speed)); 1248 if_link_state_change(ifp, LINK_STATE_UP); 1249 } else 1250 if_link_state_change(ifp, LINK_STATE_DOWN); 1251 } 1252 1253 /** 1254 * t3_os_phymod_changed - handle PHY module changes 1255 * @phy: the PHY reporting the module change 1256 * @mod_type: new module type 1257 * 1258 * This is the OS-dependent handler for PHY module changes. It is 1259 * invoked when a PHY module is removed or inserted for any OS-specific 1260 * processing. 1261 */ 1262 void t3_os_phymod_changed(struct adapter *adap, int port_id) 1263 { 1264 static const char *mod_str[] = { 1265 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX-L", "unknown" 1266 }; 1267 struct port_info *pi = &adap->port[port_id]; 1268 int mod = pi->phy.modtype; 1269 1270 if (mod != pi->media.ifm_cur->ifm_data) 1271 cxgb_build_medialist(pi); 1272 1273 if (mod == phy_modtype_none) 1274 if_printf(pi->ifp, "PHY module unplugged\n"); 1275 else { 1276 KASSERT(mod < ARRAY_SIZE(mod_str), 1277 ("invalid PHY module type %d", mod)); 1278 if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]); 1279 } 1280 } 1281 1282 void 1283 t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]) 1284 { 1285 1286 /* 1287 * The ifnet might not be allocated before this gets called, 1288 * as this is called early on in attach by t3_prep_adapter 1289 * save the address off in the port structure 1290 */ 1291 if (cxgb_debug) 1292 printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":"); 1293 bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN); 1294 } 1295 1296 /* 1297 * Programs the XGMAC based on the settings in the ifnet. These settings 1298 * include MTU, MAC address, mcast addresses, etc. 1299 */ 1300 static void 1301 cxgb_update_mac_settings(struct port_info *p) 1302 { 1303 if_t ifp = p->ifp; 1304 struct t3_rx_mode rm; 1305 struct cmac *mac = &p->mac; 1306 int mtu, hwtagging; 1307 1308 PORT_LOCK_ASSERT_OWNED(p); 1309 1310 bcopy(if_getlladdr(ifp), p->hw_addr, ETHER_ADDR_LEN); 1311 1312 mtu = if_getmtu(ifp); 1313 if (if_getcapenable(ifp) & IFCAP_VLAN_MTU) 1314 mtu += ETHER_VLAN_ENCAP_LEN; 1315 1316 hwtagging = (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0; 1317 1318 t3_mac_set_mtu(mac, mtu); 1319 t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging); 1320 t3_mac_set_address(mac, 0, p->hw_addr); 1321 t3_init_rx_mode(&rm, p); 1322 t3_mac_set_rx_mode(mac, &rm); 1323 } 1324 1325 1326 static int 1327 await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, 1328 unsigned long n) 1329 { 1330 int attempts = 5; 1331 1332 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { 1333 if (!--attempts) 1334 return (ETIMEDOUT); 1335 t3_os_sleep(10); 1336 } 1337 return 0; 1338 } 1339 1340 static int 1341 init_tp_parity(struct adapter *adap) 1342 { 1343 int i; 1344 struct mbuf *m; 1345 struct cpl_set_tcb_field *greq; 1346 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; 1347 1348 t3_tp_set_offload_mode(adap, 1); 1349 1350 for (i = 0; i < 16; i++) { 1351 struct cpl_smt_write_req *req; 1352 1353 m = m_gethdr(M_WAITOK, MT_DATA); 1354 req = mtod(m, struct cpl_smt_write_req *); 1355 m->m_len = m->m_pkthdr.len = sizeof(*req); 1356 memset(req, 0, sizeof(*req)); 1357 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1358 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i)); 1359 req->iff = i; 1360 t3_mgmt_tx(adap, m); 1361 } 1362 1363 for (i = 0; i < 2048; i++) { 1364 struct cpl_l2t_write_req *req; 1365 1366 m = m_gethdr(M_WAITOK, MT_DATA); 1367 req = mtod(m, struct cpl_l2t_write_req *); 1368 m->m_len = m->m_pkthdr.len = sizeof(*req); 1369 memset(req, 0, sizeof(*req)); 1370 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1371 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i)); 1372 req->params = htonl(V_L2T_W_IDX(i)); 1373 t3_mgmt_tx(adap, m); 1374 } 1375 1376 for (i = 0; i < 2048; i++) { 1377 struct cpl_rte_write_req *req; 1378 1379 m = m_gethdr(M_WAITOK, MT_DATA); 1380 req = mtod(m, struct cpl_rte_write_req *); 1381 m->m_len = m->m_pkthdr.len = sizeof(*req); 1382 memset(req, 0, sizeof(*req)); 1383 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1384 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i)); 1385 req->l2t_idx = htonl(V_L2T_W_IDX(i)); 1386 t3_mgmt_tx(adap, m); 1387 } 1388 1389 m = m_gethdr(M_WAITOK, MT_DATA); 1390 greq = mtod(m, struct cpl_set_tcb_field *); 1391 m->m_len = m->m_pkthdr.len = sizeof(*greq); 1392 memset(greq, 0, sizeof(*greq)); 1393 greq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1394 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0)); 1395 greq->mask = htobe64(1); 1396 t3_mgmt_tx(adap, m); 1397 1398 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); 1399 t3_tp_set_offload_mode(adap, 0); 1400 return (i); 1401 } 1402 1403 /** 1404 * setup_rss - configure Receive Side Steering (per-queue connection demux) 1405 * @adap: the adapter 1406 * 1407 * Sets up RSS to distribute packets to multiple receive queues. We 1408 * configure the RSS CPU lookup table to distribute to the number of HW 1409 * receive queues, and the response queue lookup table to narrow that 1410 * down to the response queues actually configured for each port. 1411 * We always configure the RSS mapping for two ports since the mapping 1412 * table has plenty of entries. 1413 */ 1414 static void 1415 setup_rss(adapter_t *adap) 1416 { 1417 int i; 1418 u_int nq[2]; 1419 uint8_t cpus[SGE_QSETS + 1]; 1420 uint16_t rspq_map[RSS_TABLE_SIZE]; 1421 1422 for (i = 0; i < SGE_QSETS; ++i) 1423 cpus[i] = i; 1424 cpus[SGE_QSETS] = 0xff; 1425 1426 nq[0] = nq[1] = 0; 1427 for_each_port(adap, i) { 1428 const struct port_info *pi = adap2pinfo(adap, i); 1429 1430 nq[pi->tx_chan] += pi->nqsets; 1431 } 1432 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) { 1433 rspq_map[i] = nq[0] ? i % nq[0] : 0; 1434 rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0; 1435 } 1436 1437 /* Calculate the reverse RSS map table */ 1438 for (i = 0; i < SGE_QSETS; ++i) 1439 adap->rrss_map[i] = 0xff; 1440 for (i = 0; i < RSS_TABLE_SIZE; ++i) 1441 if (adap->rrss_map[rspq_map[i]] == 0xff) 1442 adap->rrss_map[rspq_map[i]] = i; 1443 1444 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN | 1445 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN | 1446 F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, 1447 cpus, rspq_map); 1448 1449 } 1450 static void 1451 send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, 1452 int hi, int port) 1453 { 1454 struct mbuf *m; 1455 struct mngt_pktsched_wr *req; 1456 1457 m = m_gethdr(M_NOWAIT, MT_DATA); 1458 if (m) { 1459 req = mtod(m, struct mngt_pktsched_wr *); 1460 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT)); 1461 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET; 1462 req->sched = sched; 1463 req->idx = qidx; 1464 req->min = lo; 1465 req->max = hi; 1466 req->binding = port; 1467 m->m_len = m->m_pkthdr.len = sizeof(*req); 1468 t3_mgmt_tx(adap, m); 1469 } 1470 } 1471 1472 static void 1473 bind_qsets(adapter_t *sc) 1474 { 1475 int i, j; 1476 1477 for (i = 0; i < (sc)->params.nports; ++i) { 1478 const struct port_info *pi = adap2pinfo(sc, i); 1479 1480 for (j = 0; j < pi->nqsets; ++j) { 1481 send_pktsched_cmd(sc, 1, pi->first_qset + j, -1, 1482 -1, pi->tx_chan); 1483 1484 } 1485 } 1486 } 1487 1488 static void 1489 update_tpeeprom(struct adapter *adap) 1490 { 1491 const struct firmware *tpeeprom; 1492 1493 uint32_t version; 1494 unsigned int major, minor; 1495 int ret, len; 1496 char rev, name[32]; 1497 1498 t3_seeprom_read(adap, TP_SRAM_OFFSET, &version); 1499 1500 major = G_TP_VERSION_MAJOR(version); 1501 minor = G_TP_VERSION_MINOR(version); 1502 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR) 1503 return; 1504 1505 rev = t3rev2char(adap); 1506 snprintf(name, sizeof(name), TPEEPROM_NAME, rev); 1507 1508 tpeeprom = firmware_get(name); 1509 if (tpeeprom == NULL) { 1510 device_printf(adap->dev, 1511 "could not load TP EEPROM: unable to load %s\n", 1512 name); 1513 return; 1514 } 1515 1516 len = tpeeprom->datasize - 4; 1517 1518 ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize); 1519 if (ret) 1520 goto release_tpeeprom; 1521 1522 if (len != TP_SRAM_LEN) { 1523 device_printf(adap->dev, 1524 "%s length is wrong len=%d expected=%d\n", name, 1525 len, TP_SRAM_LEN); 1526 return; 1527 } 1528 1529 ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize, 1530 TP_SRAM_OFFSET); 1531 1532 if (!ret) { 1533 device_printf(adap->dev, 1534 "Protocol SRAM image updated in EEPROM to %d.%d.%d\n", 1535 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); 1536 } else 1537 device_printf(adap->dev, 1538 "Protocol SRAM image update in EEPROM failed\n"); 1539 1540 release_tpeeprom: 1541 firmware_put(tpeeprom, FIRMWARE_UNLOAD); 1542 1543 return; 1544 } 1545 1546 static int 1547 update_tpsram(struct adapter *adap) 1548 { 1549 const struct firmware *tpsram; 1550 int ret; 1551 char rev, name[32]; 1552 1553 rev = t3rev2char(adap); 1554 snprintf(name, sizeof(name), TPSRAM_NAME, rev); 1555 1556 update_tpeeprom(adap); 1557 1558 tpsram = firmware_get(name); 1559 if (tpsram == NULL){ 1560 device_printf(adap->dev, "could not load TP SRAM\n"); 1561 return (EINVAL); 1562 } else 1563 device_printf(adap->dev, "updating TP SRAM\n"); 1564 1565 ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize); 1566 if (ret) 1567 goto release_tpsram; 1568 1569 ret = t3_set_proto_sram(adap, tpsram->data); 1570 if (ret) 1571 device_printf(adap->dev, "loading protocol SRAM failed\n"); 1572 1573 release_tpsram: 1574 firmware_put(tpsram, FIRMWARE_UNLOAD); 1575 1576 return ret; 1577 } 1578 1579 /** 1580 * cxgb_up - enable the adapter 1581 * @adap: adapter being enabled 1582 * 1583 * Called when the first port is enabled, this function performs the 1584 * actions necessary to make an adapter operational, such as completing 1585 * the initialization of HW modules, and enabling interrupts. 1586 */ 1587 static int 1588 cxgb_up(struct adapter *sc) 1589 { 1590 int err = 0; 1591 unsigned int mxf = t3_mc5_size(&sc->mc5) - MC5_MIN_TIDS; 1592 1593 KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)", 1594 __func__, sc->open_device_map)); 1595 1596 if ((sc->flags & FULL_INIT_DONE) == 0) { 1597 1598 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1599 1600 if ((sc->flags & FW_UPTODATE) == 0) 1601 if ((err = upgrade_fw(sc))) 1602 goto out; 1603 1604 if ((sc->flags & TPS_UPTODATE) == 0) 1605 if ((err = update_tpsram(sc))) 1606 goto out; 1607 1608 if (is_offload(sc) && nfilters != 0) { 1609 sc->params.mc5.nservers = 0; 1610 1611 if (nfilters < 0) 1612 sc->params.mc5.nfilters = mxf; 1613 else 1614 sc->params.mc5.nfilters = min(nfilters, mxf); 1615 } 1616 1617 err = t3_init_hw(sc, 0); 1618 if (err) 1619 goto out; 1620 1621 t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT); 1622 t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); 1623 1624 err = setup_sge_qsets(sc); 1625 if (err) 1626 goto out; 1627 1628 alloc_filters(sc); 1629 setup_rss(sc); 1630 1631 t3_add_configured_sysctls(sc); 1632 sc->flags |= FULL_INIT_DONE; 1633 } 1634 1635 t3_intr_clear(sc); 1636 t3_sge_start(sc); 1637 t3_intr_enable(sc); 1638 1639 if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) && 1640 is_offload(sc) && init_tp_parity(sc) == 0) 1641 sc->flags |= TP_PARITY_INIT; 1642 1643 if (sc->flags & TP_PARITY_INIT) { 1644 t3_write_reg(sc, A_TP_INT_CAUSE, F_CMCACHEPERR | F_ARPLUTPERR); 1645 t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff); 1646 } 1647 1648 if (!(sc->flags & QUEUES_BOUND)) { 1649 bind_qsets(sc); 1650 setup_hw_filters(sc); 1651 sc->flags |= QUEUES_BOUND; 1652 } 1653 1654 t3_sge_reset_adapter(sc); 1655 out: 1656 return (err); 1657 } 1658 1659 /* 1660 * Called when the last open device is closed. Does NOT undo all of cxgb_up's 1661 * work. Specifically, the resources grabbed under FULL_INIT_DONE are released 1662 * during controller_detach, not here. 1663 */ 1664 static void 1665 cxgb_down(struct adapter *sc) 1666 { 1667 t3_sge_stop(sc); 1668 t3_intr_disable(sc); 1669 } 1670 1671 /* 1672 * if_init for cxgb ports. 1673 */ 1674 static void 1675 cxgb_init(void *arg) 1676 { 1677 struct port_info *p = arg; 1678 struct adapter *sc = p->adapter; 1679 1680 ADAPTER_LOCK(sc); 1681 cxgb_init_locked(p); /* releases adapter lock */ 1682 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1683 } 1684 1685 static int 1686 cxgb_init_locked(struct port_info *p) 1687 { 1688 struct adapter *sc = p->adapter; 1689 if_t ifp = p->ifp; 1690 struct cmac *mac = &p->mac; 1691 int i, rc = 0, may_sleep = 0, gave_up_lock = 0; 1692 1693 ADAPTER_LOCK_ASSERT_OWNED(sc); 1694 1695 while (!IS_DOOMED(p) && IS_BUSY(sc)) { 1696 gave_up_lock = 1; 1697 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) { 1698 rc = EINTR; 1699 goto done; 1700 } 1701 } 1702 if (IS_DOOMED(p)) { 1703 rc = ENXIO; 1704 goto done; 1705 } 1706 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 1707 1708 /* 1709 * The code that runs during one-time adapter initialization can sleep 1710 * so it's important not to hold any locks across it. 1711 */ 1712 may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1; 1713 1714 if (may_sleep) { 1715 SET_BUSY(sc); 1716 gave_up_lock = 1; 1717 ADAPTER_UNLOCK(sc); 1718 } 1719 1720 if (sc->open_device_map == 0 && ((rc = cxgb_up(sc)) != 0)) 1721 goto done; 1722 1723 PORT_LOCK(p); 1724 if (isset(&sc->open_device_map, p->port_id) && 1725 (if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 1726 PORT_UNLOCK(p); 1727 goto done; 1728 } 1729 t3_port_intr_enable(sc, p->port_id); 1730 if (!mac->multiport) 1731 t3_mac_init(mac); 1732 cxgb_update_mac_settings(p); 1733 t3_link_start(&p->phy, mac, &p->link_config); 1734 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); 1735 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 1736 PORT_UNLOCK(p); 1737 1738 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { 1739 struct sge_qset *qs = &sc->sge.qs[i]; 1740 struct sge_txq *txq = &qs->txq[TXQ_ETH]; 1741 1742 callout_reset_on(&txq->txq_watchdog, hz, cxgb_tx_watchdog, qs, 1743 txq->txq_watchdog.c_cpu); 1744 } 1745 1746 /* all ok */ 1747 setbit(&sc->open_device_map, p->port_id); 1748 callout_reset(&p->link_check_ch, 1749 p->phy.caps & SUPPORTED_LINK_IRQ ? hz * 3 : hz / 4, 1750 link_check_callout, p); 1751 1752 done: 1753 if (may_sleep) { 1754 ADAPTER_LOCK(sc); 1755 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 1756 CLR_BUSY(sc); 1757 } 1758 if (gave_up_lock) 1759 wakeup_one(&sc->flags); 1760 ADAPTER_UNLOCK(sc); 1761 return (rc); 1762 } 1763 1764 static int 1765 cxgb_uninit_locked(struct port_info *p) 1766 { 1767 struct adapter *sc = p->adapter; 1768 int rc; 1769 1770 ADAPTER_LOCK_ASSERT_OWNED(sc); 1771 1772 while (!IS_DOOMED(p) && IS_BUSY(sc)) { 1773 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) { 1774 rc = EINTR; 1775 goto done; 1776 } 1777 } 1778 if (IS_DOOMED(p)) { 1779 rc = ENXIO; 1780 goto done; 1781 } 1782 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 1783 SET_BUSY(sc); 1784 ADAPTER_UNLOCK(sc); 1785 1786 rc = cxgb_uninit_synchronized(p); 1787 1788 ADAPTER_LOCK(sc); 1789 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 1790 CLR_BUSY(sc); 1791 wakeup_one(&sc->flags); 1792 done: 1793 ADAPTER_UNLOCK(sc); 1794 return (rc); 1795 } 1796 1797 /* 1798 * Called on "ifconfig down", and from port_detach 1799 */ 1800 static int 1801 cxgb_uninit_synchronized(struct port_info *pi) 1802 { 1803 struct adapter *sc = pi->adapter; 1804 if_t ifp = pi->ifp; 1805 1806 /* 1807 * taskqueue_drain may cause a deadlock if the adapter lock is held. 1808 */ 1809 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1810 1811 /* 1812 * Clear this port's bit from the open device map, and then drain all 1813 * the tasks that can access/manipulate this port's port_info or ifp. 1814 * We disable this port's interrupts here and so the slow/ext 1815 * interrupt tasks won't be enqueued. The tick task will continue to 1816 * be enqueued every second but the runs after this drain will not see 1817 * this port in the open device map. 1818 * 1819 * A well behaved task must take open_device_map into account and ignore 1820 * ports that are not open. 1821 */ 1822 clrbit(&sc->open_device_map, pi->port_id); 1823 t3_port_intr_disable(sc, pi->port_id); 1824 taskqueue_drain(sc->tq, &sc->slow_intr_task); 1825 taskqueue_drain(sc->tq, &sc->tick_task); 1826 1827 callout_drain(&pi->link_check_ch); 1828 taskqueue_drain(sc->tq, &pi->link_check_task); 1829 1830 PORT_LOCK(pi); 1831 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1832 1833 /* disable pause frames */ 1834 t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0); 1835 1836 /* Reset RX FIFO HWM */ 1837 t3_set_reg_field(sc, A_XGM_RXFIFO_CFG + pi->mac.offset, 1838 V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0); 1839 1840 DELAY(100 * 1000); 1841 1842 /* Wait for TXFIFO empty */ 1843 t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset, 1844 F_TXFIFO_EMPTY, 1, 20, 5); 1845 1846 DELAY(100 * 1000); 1847 t3_mac_disable(&pi->mac, MAC_DIRECTION_RX); 1848 1849 pi->phy.ops->power_down(&pi->phy, 1); 1850 1851 PORT_UNLOCK(pi); 1852 1853 pi->link_config.link_ok = 0; 1854 t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0); 1855 1856 if (sc->open_device_map == 0) 1857 cxgb_down(pi->adapter); 1858 1859 return (0); 1860 } 1861 1862 /* 1863 * Mark lro enabled or disabled in all qsets for this port 1864 */ 1865 static int 1866 cxgb_set_lro(struct port_info *p, int enabled) 1867 { 1868 int i; 1869 struct adapter *adp = p->adapter; 1870 struct sge_qset *q; 1871 1872 for (i = 0; i < p->nqsets; i++) { 1873 q = &adp->sge.qs[p->first_qset + i]; 1874 q->lro.enabled = (enabled != 0); 1875 } 1876 return (0); 1877 } 1878 1879 static int 1880 cxgb_ioctl(if_t ifp, unsigned long command, caddr_t data) 1881 { 1882 struct port_info *p = if_getsoftc(ifp); 1883 struct adapter *sc = p->adapter; 1884 struct ifreq *ifr = (struct ifreq *)data; 1885 int flags, error = 0, mtu; 1886 uint32_t mask; 1887 1888 switch (command) { 1889 case SIOCSIFMTU: 1890 ADAPTER_LOCK(sc); 1891 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 1892 if (error) { 1893 fail: 1894 ADAPTER_UNLOCK(sc); 1895 return (error); 1896 } 1897 1898 mtu = ifr->ifr_mtu; 1899 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) { 1900 error = EINVAL; 1901 } else { 1902 if_setmtu(ifp, mtu); 1903 PORT_LOCK(p); 1904 cxgb_update_mac_settings(p); 1905 PORT_UNLOCK(p); 1906 } 1907 ADAPTER_UNLOCK(sc); 1908 break; 1909 case SIOCSIFFLAGS: 1910 ADAPTER_LOCK(sc); 1911 if (IS_DOOMED(p)) { 1912 error = ENXIO; 1913 goto fail; 1914 } 1915 if (if_getflags(ifp) & IFF_UP) { 1916 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1917 flags = p->if_flags; 1918 if (((if_getflags(ifp) ^ flags) & IFF_PROMISC) || 1919 ((if_getflags(ifp) ^ flags) & IFF_ALLMULTI)) { 1920 if (IS_BUSY(sc)) { 1921 error = EBUSY; 1922 goto fail; 1923 } 1924 PORT_LOCK(p); 1925 cxgb_update_mac_settings(p); 1926 PORT_UNLOCK(p); 1927 } 1928 ADAPTER_UNLOCK(sc); 1929 } else 1930 error = cxgb_init_locked(p); 1931 p->if_flags = if_getflags(ifp); 1932 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1933 error = cxgb_uninit_locked(p); 1934 else 1935 ADAPTER_UNLOCK(sc); 1936 1937 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1938 break; 1939 case SIOCADDMULTI: 1940 case SIOCDELMULTI: 1941 ADAPTER_LOCK(sc); 1942 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 1943 if (error) 1944 goto fail; 1945 1946 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1947 PORT_LOCK(p); 1948 cxgb_update_mac_settings(p); 1949 PORT_UNLOCK(p); 1950 } 1951 ADAPTER_UNLOCK(sc); 1952 1953 break; 1954 case SIOCSIFCAP: 1955 ADAPTER_LOCK(sc); 1956 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 1957 if (error) 1958 goto fail; 1959 1960 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 1961 if (mask & IFCAP_TXCSUM) { 1962 if_togglecapenable(ifp, IFCAP_TXCSUM); 1963 if_togglehwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP); 1964 1965 if (IFCAP_TSO4 & if_getcapenable(ifp) && 1966 !(IFCAP_TXCSUM & if_getcapenable(ifp))) { 1967 mask &= ~IFCAP_TSO4; 1968 if_setcapenablebit(ifp, 0, IFCAP_TSO4); 1969 if_printf(ifp, 1970 "tso4 disabled due to -txcsum.\n"); 1971 } 1972 } 1973 if (mask & IFCAP_TXCSUM_IPV6) { 1974 if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6); 1975 if_togglehwassist(ifp, CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1976 1977 if (IFCAP_TSO6 & if_getcapenable(ifp) && 1978 !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) { 1979 mask &= ~IFCAP_TSO6; 1980 if_setcapenablebit(ifp, 0, IFCAP_TSO6); 1981 if_printf(ifp, 1982 "tso6 disabled due to -txcsum6.\n"); 1983 } 1984 } 1985 if (mask & IFCAP_RXCSUM) 1986 if_togglecapenable(ifp, IFCAP_RXCSUM); 1987 if (mask & IFCAP_RXCSUM_IPV6) 1988 if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6); 1989 1990 /* 1991 * Note that we leave CSUM_TSO alone (it is always set). The 1992 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 1993 * sending a TSO request our way, so it's sufficient to toggle 1994 * IFCAP_TSOx only. 1995 */ 1996 if (mask & IFCAP_TSO4) { 1997 if (!(IFCAP_TSO4 & if_getcapenable(ifp)) && 1998 !(IFCAP_TXCSUM & if_getcapenable(ifp))) { 1999 if_printf(ifp, "enable txcsum first.\n"); 2000 error = EAGAIN; 2001 goto fail; 2002 } 2003 if_togglecapenable(ifp, IFCAP_TSO4); 2004 } 2005 if (mask & IFCAP_TSO6) { 2006 if (!(IFCAP_TSO6 & if_getcapenable(ifp)) && 2007 !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) { 2008 if_printf(ifp, "enable txcsum6 first.\n"); 2009 error = EAGAIN; 2010 goto fail; 2011 } 2012 if_togglecapenable(ifp, IFCAP_TSO6); 2013 } 2014 if (mask & IFCAP_LRO) { 2015 if_togglecapenable(ifp, IFCAP_LRO); 2016 2017 /* Safe to do this even if cxgb_up not called yet */ 2018 cxgb_set_lro(p, if_getcapenable(ifp) & IFCAP_LRO); 2019 } 2020 #ifdef TCP_OFFLOAD 2021 if (mask & IFCAP_TOE4) { 2022 int enable = (if_getcapenable(ifp) ^ mask) & IFCAP_TOE4; 2023 2024 error = toe_capability(p, enable); 2025 if (error == 0) 2026 if_togglecapenable(ifp, mask); 2027 } 2028 #endif 2029 if (mask & IFCAP_VLAN_HWTAGGING) { 2030 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); 2031 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2032 PORT_LOCK(p); 2033 cxgb_update_mac_settings(p); 2034 PORT_UNLOCK(p); 2035 } 2036 } 2037 if (mask & IFCAP_VLAN_MTU) { 2038 if_togglecapenable(ifp, IFCAP_VLAN_MTU); 2039 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2040 PORT_LOCK(p); 2041 cxgb_update_mac_settings(p); 2042 PORT_UNLOCK(p); 2043 } 2044 } 2045 if (mask & IFCAP_VLAN_HWTSO) 2046 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 2047 if (mask & IFCAP_VLAN_HWCSUM) 2048 if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM); 2049 2050 #ifdef VLAN_CAPABILITIES 2051 VLAN_CAPABILITIES(ifp); 2052 #endif 2053 ADAPTER_UNLOCK(sc); 2054 break; 2055 case SIOCSIFMEDIA: 2056 case SIOCGIFMEDIA: 2057 error = ifmedia_ioctl(ifp, ifr, &p->media, command); 2058 break; 2059 default: 2060 error = ether_ioctl(ifp, command, data); 2061 } 2062 2063 return (error); 2064 } 2065 2066 static int 2067 cxgb_media_change(if_t ifp) 2068 { 2069 return (EOPNOTSUPP); 2070 } 2071 2072 /* 2073 * Translates phy->modtype to the correct Ethernet media subtype. 2074 */ 2075 static int 2076 cxgb_ifm_type(int mod) 2077 { 2078 switch (mod) { 2079 case phy_modtype_sr: 2080 return (IFM_10G_SR); 2081 case phy_modtype_lr: 2082 return (IFM_10G_LR); 2083 case phy_modtype_lrm: 2084 return (IFM_10G_LRM); 2085 case phy_modtype_twinax: 2086 return (IFM_10G_TWINAX); 2087 case phy_modtype_twinax_long: 2088 return (IFM_10G_TWINAX_LONG); 2089 case phy_modtype_none: 2090 return (IFM_NONE); 2091 case phy_modtype_unknown: 2092 return (IFM_UNKNOWN); 2093 } 2094 2095 KASSERT(0, ("%s: modtype %d unknown", __func__, mod)); 2096 return (IFM_UNKNOWN); 2097 } 2098 2099 /* 2100 * Rebuilds the ifmedia list for this port, and sets the current media. 2101 */ 2102 static void 2103 cxgb_build_medialist(struct port_info *p) 2104 { 2105 struct cphy *phy = &p->phy; 2106 struct ifmedia *media = &p->media; 2107 int mod = phy->modtype; 2108 int m = IFM_ETHER | IFM_FDX; 2109 2110 PORT_LOCK(p); 2111 2112 ifmedia_removeall(media); 2113 if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) { 2114 /* Copper (RJ45) */ 2115 2116 if (phy->caps & SUPPORTED_10000baseT_Full) 2117 ifmedia_add(media, m | IFM_10G_T, mod, NULL); 2118 2119 if (phy->caps & SUPPORTED_1000baseT_Full) 2120 ifmedia_add(media, m | IFM_1000_T, mod, NULL); 2121 2122 if (phy->caps & SUPPORTED_100baseT_Full) 2123 ifmedia_add(media, m | IFM_100_TX, mod, NULL); 2124 2125 if (phy->caps & SUPPORTED_10baseT_Full) 2126 ifmedia_add(media, m | IFM_10_T, mod, NULL); 2127 2128 ifmedia_add(media, IFM_ETHER | IFM_AUTO, mod, NULL); 2129 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 2130 2131 } else if (phy->caps & SUPPORTED_TP) { 2132 /* Copper (CX4) */ 2133 2134 KASSERT(phy->caps & SUPPORTED_10000baseT_Full, 2135 ("%s: unexpected cap 0x%x", __func__, phy->caps)); 2136 2137 ifmedia_add(media, m | IFM_10G_CX4, mod, NULL); 2138 ifmedia_set(media, m | IFM_10G_CX4); 2139 2140 } else if (phy->caps & SUPPORTED_FIBRE && 2141 phy->caps & SUPPORTED_10000baseT_Full) { 2142 /* 10G optical (but includes SFP+ twinax) */ 2143 2144 m |= cxgb_ifm_type(mod); 2145 if (IFM_SUBTYPE(m) == IFM_NONE) 2146 m &= ~IFM_FDX; 2147 2148 ifmedia_add(media, m, mod, NULL); 2149 ifmedia_set(media, m); 2150 2151 } else if (phy->caps & SUPPORTED_FIBRE && 2152 phy->caps & SUPPORTED_1000baseT_Full) { 2153 /* 1G optical */ 2154 2155 /* XXX: Lie and claim to be SX, could actually be any 1G-X */ 2156 ifmedia_add(media, m | IFM_1000_SX, mod, NULL); 2157 ifmedia_set(media, m | IFM_1000_SX); 2158 2159 } else { 2160 KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__, 2161 phy->caps)); 2162 } 2163 2164 PORT_UNLOCK(p); 2165 } 2166 2167 static void 2168 cxgb_media_status(if_t ifp, struct ifmediareq *ifmr) 2169 { 2170 struct port_info *p = if_getsoftc(ifp); 2171 struct ifmedia_entry *cur = p->media.ifm_cur; 2172 int speed = p->link_config.speed; 2173 2174 if (cur->ifm_data != p->phy.modtype) { 2175 cxgb_build_medialist(p); 2176 cur = p->media.ifm_cur; 2177 } 2178 2179 ifmr->ifm_status = IFM_AVALID; 2180 if (!p->link_config.link_ok) 2181 return; 2182 2183 ifmr->ifm_status |= IFM_ACTIVE; 2184 2185 /* 2186 * active and current will differ iff current media is autoselect. That 2187 * can happen only for copper RJ45. 2188 */ 2189 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 2190 return; 2191 KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg, 2192 ("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps)); 2193 2194 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 2195 if (speed == SPEED_10000) 2196 ifmr->ifm_active |= IFM_10G_T; 2197 else if (speed == SPEED_1000) 2198 ifmr->ifm_active |= IFM_1000_T; 2199 else if (speed == SPEED_100) 2200 ifmr->ifm_active |= IFM_100_TX; 2201 else if (speed == SPEED_10) 2202 ifmr->ifm_active |= IFM_10_T; 2203 else 2204 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, 2205 speed)); 2206 } 2207 2208 static uint64_t 2209 cxgb_get_counter(if_t ifp, ift_counter c) 2210 { 2211 struct port_info *pi = if_getsoftc(ifp); 2212 struct adapter *sc = pi->adapter; 2213 struct cmac *mac = &pi->mac; 2214 struct mac_stats *mstats = &mac->stats; 2215 2216 cxgb_refresh_stats(pi); 2217 2218 switch (c) { 2219 case IFCOUNTER_IPACKETS: 2220 return (mstats->rx_frames); 2221 2222 case IFCOUNTER_IERRORS: 2223 return (mstats->rx_jabber + mstats->rx_data_errs + 2224 mstats->rx_sequence_errs + mstats->rx_runt + 2225 mstats->rx_too_long + mstats->rx_mac_internal_errs + 2226 mstats->rx_short + mstats->rx_fcs_errs); 2227 2228 case IFCOUNTER_OPACKETS: 2229 return (mstats->tx_frames); 2230 2231 case IFCOUNTER_OERRORS: 2232 return (mstats->tx_excess_collisions + mstats->tx_underrun + 2233 mstats->tx_len_errs + mstats->tx_mac_internal_errs + 2234 mstats->tx_excess_deferral + mstats->tx_fcs_errs); 2235 2236 case IFCOUNTER_COLLISIONS: 2237 return (mstats->tx_total_collisions); 2238 2239 case IFCOUNTER_IBYTES: 2240 return (mstats->rx_octets); 2241 2242 case IFCOUNTER_OBYTES: 2243 return (mstats->tx_octets); 2244 2245 case IFCOUNTER_IMCASTS: 2246 return (mstats->rx_mcast_frames); 2247 2248 case IFCOUNTER_OMCASTS: 2249 return (mstats->tx_mcast_frames); 2250 2251 case IFCOUNTER_IQDROPS: 2252 return (mstats->rx_cong_drops); 2253 2254 case IFCOUNTER_OQDROPS: { 2255 int i; 2256 uint64_t drops; 2257 2258 drops = 0; 2259 if (sc->flags & FULL_INIT_DONE) { 2260 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) 2261 drops += sc->sge.qs[i].txq[TXQ_ETH].txq_mr->br_drops; 2262 } 2263 2264 return (drops); 2265 2266 } 2267 2268 default: 2269 return (if_get_counter_default(ifp, c)); 2270 } 2271 } 2272 2273 static void 2274 cxgb_async_intr(void *data) 2275 { 2276 adapter_t *sc = data; 2277 2278 t3_write_reg(sc, A_PL_INT_ENABLE0, 0); 2279 (void) t3_read_reg(sc, A_PL_INT_ENABLE0); 2280 taskqueue_enqueue(sc->tq, &sc->slow_intr_task); 2281 } 2282 2283 static void 2284 link_check_callout(void *arg) 2285 { 2286 struct port_info *pi = arg; 2287 struct adapter *sc = pi->adapter; 2288 2289 if (!isset(&sc->open_device_map, pi->port_id)) 2290 return; 2291 2292 taskqueue_enqueue(sc->tq, &pi->link_check_task); 2293 } 2294 2295 static void 2296 check_link_status(void *arg, int pending) 2297 { 2298 struct port_info *pi = arg; 2299 struct adapter *sc = pi->adapter; 2300 2301 if (!isset(&sc->open_device_map, pi->port_id)) 2302 return; 2303 2304 t3_link_changed(sc, pi->port_id); 2305 2306 if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ) || 2307 pi->link_config.link_ok == 0) 2308 callout_reset(&pi->link_check_ch, hz, link_check_callout, pi); 2309 } 2310 2311 void 2312 t3_os_link_intr(struct port_info *pi) 2313 { 2314 /* 2315 * Schedule a link check in the near future. If the link is flapping 2316 * rapidly we'll keep resetting the callout and delaying the check until 2317 * things stabilize a bit. 2318 */ 2319 callout_reset(&pi->link_check_ch, hz / 4, link_check_callout, pi); 2320 } 2321 2322 static void 2323 check_t3b2_mac(struct adapter *sc) 2324 { 2325 int i; 2326 2327 if (sc->flags & CXGB_SHUTDOWN) 2328 return; 2329 2330 for_each_port(sc, i) { 2331 struct port_info *p = &sc->port[i]; 2332 int status; 2333 #ifdef INVARIANTS 2334 if_t ifp = p->ifp; 2335 #endif 2336 2337 if (!isset(&sc->open_device_map, p->port_id) || p->link_fault || 2338 !p->link_config.link_ok) 2339 continue; 2340 2341 KASSERT(if_getdrvflags(ifp) & IFF_DRV_RUNNING, 2342 ("%s: state mismatch (drv_flags %x, device_map %x)", 2343 __func__, if_getdrvflags(ifp), sc->open_device_map)); 2344 2345 PORT_LOCK(p); 2346 status = t3b2_mac_watchdog_task(&p->mac); 2347 if (status == 1) 2348 p->mac.stats.num_toggled++; 2349 else if (status == 2) { 2350 struct cmac *mac = &p->mac; 2351 2352 cxgb_update_mac_settings(p); 2353 t3_link_start(&p->phy, mac, &p->link_config); 2354 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); 2355 t3_port_intr_enable(sc, p->port_id); 2356 p->mac.stats.num_resets++; 2357 } 2358 PORT_UNLOCK(p); 2359 } 2360 } 2361 2362 static void 2363 cxgb_tick(void *arg) 2364 { 2365 adapter_t *sc = (adapter_t *)arg; 2366 2367 if (sc->flags & CXGB_SHUTDOWN) 2368 return; 2369 2370 taskqueue_enqueue(sc->tq, &sc->tick_task); 2371 callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc); 2372 } 2373 2374 void 2375 cxgb_refresh_stats(struct port_info *pi) 2376 { 2377 struct timeval tv; 2378 const struct timeval interval = {0, 250000}; /* 250ms */ 2379 2380 getmicrotime(&tv); 2381 timevalsub(&tv, &interval); 2382 if (timevalcmp(&tv, &pi->last_refreshed, <)) 2383 return; 2384 2385 PORT_LOCK(pi); 2386 t3_mac_update_stats(&pi->mac); 2387 PORT_UNLOCK(pi); 2388 getmicrotime(&pi->last_refreshed); 2389 } 2390 2391 static void 2392 cxgb_tick_handler(void *arg, int count) 2393 { 2394 adapter_t *sc = (adapter_t *)arg; 2395 const struct adapter_params *p = &sc->params; 2396 int i; 2397 uint32_t cause, reset; 2398 2399 if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE)) 2400 return; 2401 2402 if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map) 2403 check_t3b2_mac(sc); 2404 2405 cause = t3_read_reg(sc, A_SG_INT_CAUSE) & (F_RSPQSTARVE | F_FLEMPTY); 2406 if (cause) { 2407 struct sge_qset *qs = &sc->sge.qs[0]; 2408 uint32_t mask, v; 2409 2410 v = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS) & ~0xff00; 2411 2412 mask = 1; 2413 for (i = 0; i < SGE_QSETS; i++) { 2414 if (v & mask) 2415 qs[i].rspq.starved++; 2416 mask <<= 1; 2417 } 2418 2419 mask <<= SGE_QSETS; /* skip RSPQXDISABLED */ 2420 2421 for (i = 0; i < SGE_QSETS * 2; i++) { 2422 if (v & mask) { 2423 qs[i / 2].fl[i % 2].empty++; 2424 } 2425 mask <<= 1; 2426 } 2427 2428 /* clear */ 2429 t3_write_reg(sc, A_SG_RSPQ_FL_STATUS, v); 2430 t3_write_reg(sc, A_SG_INT_CAUSE, cause); 2431 } 2432 2433 for (i = 0; i < sc->params.nports; i++) { 2434 struct port_info *pi = &sc->port[i]; 2435 struct cmac *mac = &pi->mac; 2436 2437 if (!isset(&sc->open_device_map, pi->port_id)) 2438 continue; 2439 2440 cxgb_refresh_stats(pi); 2441 2442 if (mac->multiport) 2443 continue; 2444 2445 /* Count rx fifo overflows, once per second */ 2446 cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset); 2447 reset = 0; 2448 if (cause & F_RXFIFO_OVERFLOW) { 2449 mac->stats.rx_fifo_ovfl++; 2450 reset |= F_RXFIFO_OVERFLOW; 2451 } 2452 t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset); 2453 } 2454 } 2455 2456 static void 2457 touch_bars(device_t dev) 2458 { 2459 /* 2460 * Don't enable yet 2461 */ 2462 #if !defined(__LP64__) && 0 2463 u32 v; 2464 2465 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v); 2466 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v); 2467 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v); 2468 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v); 2469 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v); 2470 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v); 2471 #endif 2472 } 2473 2474 static int 2475 set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset) 2476 { 2477 uint8_t *buf; 2478 int err = 0; 2479 u32 aligned_offset, aligned_len, *p; 2480 struct adapter *adapter = pi->adapter; 2481 2482 2483 aligned_offset = offset & ~3; 2484 aligned_len = (len + (offset & 3) + 3) & ~3; 2485 2486 if (aligned_offset != offset || aligned_len != len) { 2487 buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO); 2488 if (!buf) 2489 return (ENOMEM); 2490 err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf); 2491 if (!err && aligned_len > 4) 2492 err = t3_seeprom_read(adapter, 2493 aligned_offset + aligned_len - 4, 2494 (u32 *)&buf[aligned_len - 4]); 2495 if (err) 2496 goto out; 2497 memcpy(buf + (offset & 3), data, len); 2498 } else 2499 buf = (uint8_t *)(uintptr_t)data; 2500 2501 err = t3_seeprom_wp(adapter, 0); 2502 if (err) 2503 goto out; 2504 2505 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { 2506 err = t3_seeprom_write(adapter, aligned_offset, *p); 2507 aligned_offset += 4; 2508 } 2509 2510 if (!err) 2511 err = t3_seeprom_wp(adapter, 1); 2512 out: 2513 if (buf != data) 2514 free(buf, M_DEVBUF); 2515 return err; 2516 } 2517 2518 2519 static int 2520 in_range(int val, int lo, int hi) 2521 { 2522 return val < 0 || (val <= hi && val >= lo); 2523 } 2524 2525 static int 2526 cxgb_extension_open(struct cdev *dev, int flags, int fmp, struct thread *td) 2527 { 2528 return (0); 2529 } 2530 2531 static int 2532 cxgb_extension_close(struct cdev *dev, int flags, int fmt, struct thread *td) 2533 { 2534 return (0); 2535 } 2536 2537 static int 2538 cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, 2539 int fflag, struct thread *td) 2540 { 2541 int mmd, error = 0; 2542 struct port_info *pi = dev->si_drv1; 2543 adapter_t *sc = pi->adapter; 2544 2545 #ifdef PRIV_SUPPORTED 2546 if (priv_check(td, PRIV_DRIVER)) { 2547 if (cxgb_debug) 2548 printf("user does not have access to privileged ioctls\n"); 2549 return (EPERM); 2550 } 2551 #else 2552 if (suser(td)) { 2553 if (cxgb_debug) 2554 printf("user does not have access to privileged ioctls\n"); 2555 return (EPERM); 2556 } 2557 #endif 2558 2559 switch (cmd) { 2560 case CHELSIO_GET_MIIREG: { 2561 uint32_t val; 2562 struct cphy *phy = &pi->phy; 2563 struct ch_mii_data *mid = (struct ch_mii_data *)data; 2564 2565 if (!phy->mdio_read) 2566 return (EOPNOTSUPP); 2567 if (is_10G(sc)) { 2568 mmd = mid->phy_id >> 8; 2569 if (!mmd) 2570 mmd = MDIO_DEV_PCS; 2571 else if (mmd > MDIO_DEV_VEND2) 2572 return (EINVAL); 2573 2574 error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd, 2575 mid->reg_num, &val); 2576 } else 2577 error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0, 2578 mid->reg_num & 0x1f, &val); 2579 if (error == 0) 2580 mid->val_out = val; 2581 break; 2582 } 2583 case CHELSIO_SET_MIIREG: { 2584 struct cphy *phy = &pi->phy; 2585 struct ch_mii_data *mid = (struct ch_mii_data *)data; 2586 2587 if (!phy->mdio_write) 2588 return (EOPNOTSUPP); 2589 if (is_10G(sc)) { 2590 mmd = mid->phy_id >> 8; 2591 if (!mmd) 2592 mmd = MDIO_DEV_PCS; 2593 else if (mmd > MDIO_DEV_VEND2) 2594 return (EINVAL); 2595 2596 error = phy->mdio_write(sc, mid->phy_id & 0x1f, 2597 mmd, mid->reg_num, mid->val_in); 2598 } else 2599 error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0, 2600 mid->reg_num & 0x1f, 2601 mid->val_in); 2602 break; 2603 } 2604 case CHELSIO_SETREG: { 2605 struct ch_reg *edata = (struct ch_reg *)data; 2606 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 2607 return (EFAULT); 2608 t3_write_reg(sc, edata->addr, edata->val); 2609 break; 2610 } 2611 case CHELSIO_GETREG: { 2612 struct ch_reg *edata = (struct ch_reg *)data; 2613 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 2614 return (EFAULT); 2615 edata->val = t3_read_reg(sc, edata->addr); 2616 break; 2617 } 2618 case CHELSIO_GET_SGE_CONTEXT: { 2619 struct ch_cntxt *ecntxt = (struct ch_cntxt *)data; 2620 mtx_lock_spin(&sc->sge.reg_lock); 2621 switch (ecntxt->cntxt_type) { 2622 case CNTXT_TYPE_EGRESS: 2623 error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id, 2624 ecntxt->data); 2625 break; 2626 case CNTXT_TYPE_FL: 2627 error = -t3_sge_read_fl(sc, ecntxt->cntxt_id, 2628 ecntxt->data); 2629 break; 2630 case CNTXT_TYPE_RSP: 2631 error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id, 2632 ecntxt->data); 2633 break; 2634 case CNTXT_TYPE_CQ: 2635 error = -t3_sge_read_cq(sc, ecntxt->cntxt_id, 2636 ecntxt->data); 2637 break; 2638 default: 2639 error = EINVAL; 2640 break; 2641 } 2642 mtx_unlock_spin(&sc->sge.reg_lock); 2643 break; 2644 } 2645 case CHELSIO_GET_SGE_DESC: { 2646 struct ch_desc *edesc = (struct ch_desc *)data; 2647 int ret; 2648 if (edesc->queue_num >= SGE_QSETS * 6) 2649 return (EINVAL); 2650 ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6], 2651 edesc->queue_num % 6, edesc->idx, edesc->data); 2652 if (ret < 0) 2653 return (EINVAL); 2654 edesc->size = ret; 2655 break; 2656 } 2657 case CHELSIO_GET_QSET_PARAMS: { 2658 struct qset_params *q; 2659 struct ch_qset_params *t = (struct ch_qset_params *)data; 2660 int q1 = pi->first_qset; 2661 int nqsets = pi->nqsets; 2662 int i; 2663 2664 if (t->qset_idx >= nqsets) 2665 return EINVAL; 2666 2667 i = q1 + t->qset_idx; 2668 q = &sc->params.sge.qset[i]; 2669 t->rspq_size = q->rspq_size; 2670 t->txq_size[0] = q->txq_size[0]; 2671 t->txq_size[1] = q->txq_size[1]; 2672 t->txq_size[2] = q->txq_size[2]; 2673 t->fl_size[0] = q->fl_size; 2674 t->fl_size[1] = q->jumbo_size; 2675 t->polling = q->polling; 2676 t->lro = q->lro; 2677 t->intr_lat = q->coalesce_usecs; 2678 t->cong_thres = q->cong_thres; 2679 t->qnum = i; 2680 2681 if ((sc->flags & FULL_INIT_DONE) == 0) 2682 t->vector = 0; 2683 else if (sc->flags & USING_MSIX) 2684 t->vector = rman_get_start(sc->msix_irq_res[i]); 2685 else 2686 t->vector = rman_get_start(sc->irq_res); 2687 2688 break; 2689 } 2690 case CHELSIO_GET_QSET_NUM: { 2691 struct ch_reg *edata = (struct ch_reg *)data; 2692 edata->val = pi->nqsets; 2693 break; 2694 } 2695 case CHELSIO_LOAD_FW: { 2696 uint8_t *fw_data; 2697 uint32_t vers; 2698 struct ch_mem_range *t = (struct ch_mem_range *)data; 2699 2700 /* 2701 * You're allowed to load a firmware only before FULL_INIT_DONE 2702 * 2703 * FW_UPTODATE is also set so the rest of the initialization 2704 * will not overwrite what was loaded here. This gives you the 2705 * flexibility to load any firmware (and maybe shoot yourself in 2706 * the foot). 2707 */ 2708 2709 ADAPTER_LOCK(sc); 2710 if (sc->open_device_map || sc->flags & FULL_INIT_DONE) { 2711 ADAPTER_UNLOCK(sc); 2712 return (EBUSY); 2713 } 2714 2715 fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT); 2716 if (!fw_data) 2717 error = ENOMEM; 2718 else 2719 error = copyin(t->buf, fw_data, t->len); 2720 2721 if (!error) 2722 error = -t3_load_fw(sc, fw_data, t->len); 2723 2724 if (t3_get_fw_version(sc, &vers) == 0) { 2725 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), 2726 "%d.%d.%d", G_FW_VERSION_MAJOR(vers), 2727 G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers)); 2728 } 2729 2730 if (!error) 2731 sc->flags |= FW_UPTODATE; 2732 2733 free(fw_data, M_DEVBUF); 2734 ADAPTER_UNLOCK(sc); 2735 break; 2736 } 2737 case CHELSIO_LOAD_BOOT: { 2738 uint8_t *boot_data; 2739 struct ch_mem_range *t = (struct ch_mem_range *)data; 2740 2741 boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT); 2742 if (!boot_data) 2743 return ENOMEM; 2744 2745 error = copyin(t->buf, boot_data, t->len); 2746 if (!error) 2747 error = -t3_load_boot(sc, boot_data, t->len); 2748 2749 free(boot_data, M_DEVBUF); 2750 break; 2751 } 2752 case CHELSIO_GET_PM: { 2753 struct ch_pm *m = (struct ch_pm *)data; 2754 struct tp_params *p = &sc->params.tp; 2755 2756 if (!is_offload(sc)) 2757 return (EOPNOTSUPP); 2758 2759 m->tx_pg_sz = p->tx_pg_size; 2760 m->tx_num_pg = p->tx_num_pgs; 2761 m->rx_pg_sz = p->rx_pg_size; 2762 m->rx_num_pg = p->rx_num_pgs; 2763 m->pm_total = p->pmtx_size + p->chan_rx_size * p->nchan; 2764 2765 break; 2766 } 2767 case CHELSIO_SET_PM: { 2768 struct ch_pm *m = (struct ch_pm *)data; 2769 struct tp_params *p = &sc->params.tp; 2770 2771 if (!is_offload(sc)) 2772 return (EOPNOTSUPP); 2773 if (sc->flags & FULL_INIT_DONE) 2774 return (EBUSY); 2775 2776 if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) || 2777 !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1))) 2778 return (EINVAL); /* not power of 2 */ 2779 if (!(m->rx_pg_sz & 0x14000)) 2780 return (EINVAL); /* not 16KB or 64KB */ 2781 if (!(m->tx_pg_sz & 0x1554000)) 2782 return (EINVAL); 2783 if (m->tx_num_pg == -1) 2784 m->tx_num_pg = p->tx_num_pgs; 2785 if (m->rx_num_pg == -1) 2786 m->rx_num_pg = p->rx_num_pgs; 2787 if (m->tx_num_pg % 24 || m->rx_num_pg % 24) 2788 return (EINVAL); 2789 if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size || 2790 m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size) 2791 return (EINVAL); 2792 2793 p->rx_pg_size = m->rx_pg_sz; 2794 p->tx_pg_size = m->tx_pg_sz; 2795 p->rx_num_pgs = m->rx_num_pg; 2796 p->tx_num_pgs = m->tx_num_pg; 2797 break; 2798 } 2799 case CHELSIO_SETMTUTAB: { 2800 struct ch_mtus *m = (struct ch_mtus *)data; 2801 int i; 2802 2803 if (!is_offload(sc)) 2804 return (EOPNOTSUPP); 2805 if (offload_running(sc)) 2806 return (EBUSY); 2807 if (m->nmtus != NMTUS) 2808 return (EINVAL); 2809 if (m->mtus[0] < 81) /* accommodate SACK */ 2810 return (EINVAL); 2811 2812 /* 2813 * MTUs must be in ascending order 2814 */ 2815 for (i = 1; i < NMTUS; ++i) 2816 if (m->mtus[i] < m->mtus[i - 1]) 2817 return (EINVAL); 2818 2819 memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus)); 2820 break; 2821 } 2822 case CHELSIO_GETMTUTAB: { 2823 struct ch_mtus *m = (struct ch_mtus *)data; 2824 2825 if (!is_offload(sc)) 2826 return (EOPNOTSUPP); 2827 2828 memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus)); 2829 m->nmtus = NMTUS; 2830 break; 2831 } 2832 case CHELSIO_GET_MEM: { 2833 struct ch_mem_range *t = (struct ch_mem_range *)data; 2834 struct mc7 *mem; 2835 uint8_t *useraddr; 2836 u64 buf[32]; 2837 2838 /* 2839 * Use these to avoid modifying len/addr in the return 2840 * struct 2841 */ 2842 uint32_t len = t->len, addr = t->addr; 2843 2844 if (!is_offload(sc)) 2845 return (EOPNOTSUPP); 2846 if (!(sc->flags & FULL_INIT_DONE)) 2847 return (EIO); /* need the memory controllers */ 2848 if ((addr & 0x7) || (len & 0x7)) 2849 return (EINVAL); 2850 if (t->mem_id == MEM_CM) 2851 mem = &sc->cm; 2852 else if (t->mem_id == MEM_PMRX) 2853 mem = &sc->pmrx; 2854 else if (t->mem_id == MEM_PMTX) 2855 mem = &sc->pmtx; 2856 else 2857 return (EINVAL); 2858 2859 /* 2860 * Version scheme: 2861 * bits 0..9: chip version 2862 * bits 10..15: chip revision 2863 */ 2864 t->version = 3 | (sc->params.rev << 10); 2865 2866 /* 2867 * Read 256 bytes at a time as len can be large and we don't 2868 * want to use huge intermediate buffers. 2869 */ 2870 useraddr = (uint8_t *)t->buf; 2871 while (len) { 2872 unsigned int chunk = min(len, sizeof(buf)); 2873 2874 error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf); 2875 if (error) 2876 return (-error); 2877 if (copyout(buf, useraddr, chunk)) 2878 return (EFAULT); 2879 useraddr += chunk; 2880 addr += chunk; 2881 len -= chunk; 2882 } 2883 break; 2884 } 2885 case CHELSIO_READ_TCAM_WORD: { 2886 struct ch_tcam_word *t = (struct ch_tcam_word *)data; 2887 2888 if (!is_offload(sc)) 2889 return (EOPNOTSUPP); 2890 if (!(sc->flags & FULL_INIT_DONE)) 2891 return (EIO); /* need MC5 */ 2892 return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf); 2893 break; 2894 } 2895 case CHELSIO_SET_TRACE_FILTER: { 2896 struct ch_trace *t = (struct ch_trace *)data; 2897 const struct trace_params *tp; 2898 2899 tp = (const struct trace_params *)&t->sip; 2900 if (t->config_tx) 2901 t3_config_trace_filter(sc, tp, 0, t->invert_match, 2902 t->trace_tx); 2903 if (t->config_rx) 2904 t3_config_trace_filter(sc, tp, 1, t->invert_match, 2905 t->trace_rx); 2906 break; 2907 } 2908 case CHELSIO_SET_PKTSCHED: { 2909 struct ch_pktsched_params *p = (struct ch_pktsched_params *)data; 2910 if (sc->open_device_map == 0) 2911 return (EAGAIN); 2912 send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max, 2913 p->binding); 2914 break; 2915 } 2916 case CHELSIO_IFCONF_GETREGS: { 2917 struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data; 2918 int reglen = cxgb_get_regs_len(); 2919 uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT); 2920 if (buf == NULL) { 2921 return (ENOMEM); 2922 } 2923 if (regs->len > reglen) 2924 regs->len = reglen; 2925 else if (regs->len < reglen) 2926 error = ENOBUFS; 2927 2928 if (!error) { 2929 cxgb_get_regs(sc, regs, buf); 2930 error = copyout(buf, regs->data, reglen); 2931 } 2932 free(buf, M_DEVBUF); 2933 2934 break; 2935 } 2936 case CHELSIO_SET_HW_SCHED: { 2937 struct ch_hw_sched *t = (struct ch_hw_sched *)data; 2938 unsigned int ticks_per_usec = core_ticks_per_usec(sc); 2939 2940 if ((sc->flags & FULL_INIT_DONE) == 0) 2941 return (EAGAIN); /* need TP to be initialized */ 2942 if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) || 2943 !in_range(t->channel, 0, 1) || 2944 !in_range(t->kbps, 0, 10000000) || 2945 !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) || 2946 !in_range(t->flow_ipg, 0, 2947 dack_ticks_to_usec(sc, 0x7ff))) 2948 return (EINVAL); 2949 2950 if (t->kbps >= 0) { 2951 error = t3_config_sched(sc, t->kbps, t->sched); 2952 if (error < 0) 2953 return (-error); 2954 } 2955 if (t->class_ipg >= 0) 2956 t3_set_sched_ipg(sc, t->sched, t->class_ipg); 2957 if (t->flow_ipg >= 0) { 2958 t->flow_ipg *= 1000; /* us -> ns */ 2959 t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1); 2960 } 2961 if (t->mode >= 0) { 2962 int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched); 2963 2964 t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP, 2965 bit, t->mode ? bit : 0); 2966 } 2967 if (t->channel >= 0) 2968 t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP, 2969 1 << t->sched, t->channel << t->sched); 2970 break; 2971 } 2972 case CHELSIO_GET_EEPROM: { 2973 int i; 2974 struct ch_eeprom *e = (struct ch_eeprom *)data; 2975 uint8_t *buf; 2976 2977 if (e->offset & 3 || e->offset >= EEPROMSIZE || 2978 e->len > EEPROMSIZE || e->offset + e->len > EEPROMSIZE) { 2979 return (EINVAL); 2980 } 2981 2982 buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT); 2983 if (buf == NULL) { 2984 return (ENOMEM); 2985 } 2986 e->magic = EEPROM_MAGIC; 2987 for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4) 2988 error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]); 2989 2990 if (!error) 2991 error = copyout(buf + e->offset, e->data, e->len); 2992 2993 free(buf, M_DEVBUF); 2994 break; 2995 } 2996 case CHELSIO_CLEAR_STATS: { 2997 if (!(sc->flags & FULL_INIT_DONE)) 2998 return EAGAIN; 2999 3000 PORT_LOCK(pi); 3001 t3_mac_update_stats(&pi->mac); 3002 memset(&pi->mac.stats, 0, sizeof(pi->mac.stats)); 3003 PORT_UNLOCK(pi); 3004 break; 3005 } 3006 case CHELSIO_GET_UP_LA: { 3007 struct ch_up_la *la = (struct ch_up_la *)data; 3008 uint8_t *buf = malloc(LA_BUFSIZE, M_DEVBUF, M_NOWAIT); 3009 if (buf == NULL) { 3010 return (ENOMEM); 3011 } 3012 if (la->bufsize < LA_BUFSIZE) 3013 error = ENOBUFS; 3014 3015 if (!error) 3016 error = -t3_get_up_la(sc, &la->stopped, &la->idx, 3017 &la->bufsize, buf); 3018 if (!error) 3019 error = copyout(buf, la->data, la->bufsize); 3020 3021 free(buf, M_DEVBUF); 3022 break; 3023 } 3024 case CHELSIO_GET_UP_IOQS: { 3025 struct ch_up_ioqs *ioqs = (struct ch_up_ioqs *)data; 3026 uint8_t *buf = malloc(IOQS_BUFSIZE, M_DEVBUF, M_NOWAIT); 3027 uint32_t *v; 3028 3029 if (buf == NULL) { 3030 return (ENOMEM); 3031 } 3032 if (ioqs->bufsize < IOQS_BUFSIZE) 3033 error = ENOBUFS; 3034 3035 if (!error) 3036 error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf); 3037 3038 if (!error) { 3039 v = (uint32_t *)buf; 3040 3041 ioqs->ioq_rx_enable = *v++; 3042 ioqs->ioq_tx_enable = *v++; 3043 ioqs->ioq_rx_status = *v++; 3044 ioqs->ioq_tx_status = *v++; 3045 3046 error = copyout(v, ioqs->data, ioqs->bufsize); 3047 } 3048 3049 free(buf, M_DEVBUF); 3050 break; 3051 } 3052 case CHELSIO_SET_FILTER: { 3053 struct ch_filter *f = (struct ch_filter *)data; 3054 struct filter_info *p; 3055 unsigned int nfilters = sc->params.mc5.nfilters; 3056 3057 if (!is_offload(sc)) 3058 return (EOPNOTSUPP); /* No TCAM */ 3059 if (!(sc->flags & FULL_INIT_DONE)) 3060 return (EAGAIN); /* mc5 not setup yet */ 3061 if (nfilters == 0) 3062 return (EBUSY); /* TOE will use TCAM */ 3063 3064 /* sanity checks */ 3065 if (f->filter_id >= nfilters || 3066 (f->val.dip && f->mask.dip != 0xffffffff) || 3067 (f->val.sport && f->mask.sport != 0xffff) || 3068 (f->val.dport && f->mask.dport != 0xffff) || 3069 (f->val.vlan && f->mask.vlan != 0xfff) || 3070 (f->val.vlan_prio && 3071 f->mask.vlan_prio != FILTER_NO_VLAN_PRI) || 3072 (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) || 3073 f->qset >= SGE_QSETS || 3074 sc->rrss_map[f->qset] >= RSS_TABLE_SIZE) 3075 return (EINVAL); 3076 3077 /* Was allocated with M_WAITOK */ 3078 KASSERT(sc->filters, ("filter table NULL\n")); 3079 3080 p = &sc->filters[f->filter_id]; 3081 if (p->locked) 3082 return (EPERM); 3083 3084 bzero(p, sizeof(*p)); 3085 p->sip = f->val.sip; 3086 p->sip_mask = f->mask.sip; 3087 p->dip = f->val.dip; 3088 p->sport = f->val.sport; 3089 p->dport = f->val.dport; 3090 p->vlan = f->mask.vlan ? f->val.vlan : 0xfff; 3091 p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) : 3092 FILTER_NO_VLAN_PRI; 3093 p->mac_hit = f->mac_hit; 3094 p->mac_vld = f->mac_addr_idx != 0xffff; 3095 p->mac_idx = f->mac_addr_idx; 3096 p->pkt_type = f->proto; 3097 p->report_filter_id = f->want_filter_id; 3098 p->pass = f->pass; 3099 p->rss = f->rss; 3100 p->qset = f->qset; 3101 3102 error = set_filter(sc, f->filter_id, p); 3103 if (error == 0) 3104 p->valid = 1; 3105 break; 3106 } 3107 case CHELSIO_DEL_FILTER: { 3108 struct ch_filter *f = (struct ch_filter *)data; 3109 struct filter_info *p; 3110 unsigned int nfilters = sc->params.mc5.nfilters; 3111 3112 if (!is_offload(sc)) 3113 return (EOPNOTSUPP); 3114 if (!(sc->flags & FULL_INIT_DONE)) 3115 return (EAGAIN); 3116 if (nfilters == 0 || sc->filters == NULL) 3117 return (EINVAL); 3118 if (f->filter_id >= nfilters) 3119 return (EINVAL); 3120 3121 p = &sc->filters[f->filter_id]; 3122 if (p->locked) 3123 return (EPERM); 3124 if (!p->valid) 3125 return (EFAULT); /* Read "Bad address" as "Bad index" */ 3126 3127 bzero(p, sizeof(*p)); 3128 p->sip = p->sip_mask = 0xffffffff; 3129 p->vlan = 0xfff; 3130 p->vlan_prio = FILTER_NO_VLAN_PRI; 3131 p->pkt_type = 1; 3132 error = set_filter(sc, f->filter_id, p); 3133 break; 3134 } 3135 case CHELSIO_GET_FILTER: { 3136 struct ch_filter *f = (struct ch_filter *)data; 3137 struct filter_info *p; 3138 unsigned int i, nfilters = sc->params.mc5.nfilters; 3139 3140 if (!is_offload(sc)) 3141 return (EOPNOTSUPP); 3142 if (!(sc->flags & FULL_INIT_DONE)) 3143 return (EAGAIN); 3144 if (nfilters == 0 || sc->filters == NULL) 3145 return (EINVAL); 3146 3147 i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1; 3148 for (; i < nfilters; i++) { 3149 p = &sc->filters[i]; 3150 if (!p->valid) 3151 continue; 3152 3153 bzero(f, sizeof(*f)); 3154 3155 f->filter_id = i; 3156 f->val.sip = p->sip; 3157 f->mask.sip = p->sip_mask; 3158 f->val.dip = p->dip; 3159 f->mask.dip = p->dip ? 0xffffffff : 0; 3160 f->val.sport = p->sport; 3161 f->mask.sport = p->sport ? 0xffff : 0; 3162 f->val.dport = p->dport; 3163 f->mask.dport = p->dport ? 0xffff : 0; 3164 f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan; 3165 f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff; 3166 f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ? 3167 0 : p->vlan_prio; 3168 f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ? 3169 0 : FILTER_NO_VLAN_PRI; 3170 f->mac_hit = p->mac_hit; 3171 f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff; 3172 f->proto = p->pkt_type; 3173 f->want_filter_id = p->report_filter_id; 3174 f->pass = p->pass; 3175 f->rss = p->rss; 3176 f->qset = p->qset; 3177 3178 break; 3179 } 3180 3181 if (i == nfilters) 3182 f->filter_id = 0xffffffff; 3183 break; 3184 } 3185 default: 3186 return (EOPNOTSUPP); 3187 break; 3188 } 3189 3190 return (error); 3191 } 3192 3193 static __inline void 3194 reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start, 3195 unsigned int end) 3196 { 3197 uint32_t *p = (uint32_t *)(buf + start); 3198 3199 for ( ; start <= end; start += sizeof(uint32_t)) 3200 *p++ = t3_read_reg(ap, start); 3201 } 3202 3203 #define T3_REGMAP_SIZE (3 * 1024) 3204 static int 3205 cxgb_get_regs_len(void) 3206 { 3207 return T3_REGMAP_SIZE; 3208 } 3209 3210 static void 3211 cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf) 3212 { 3213 3214 /* 3215 * Version scheme: 3216 * bits 0..9: chip version 3217 * bits 10..15: chip revision 3218 * bit 31: set for PCIe cards 3219 */ 3220 regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31); 3221 3222 /* 3223 * We skip the MAC statistics registers because they are clear-on-read. 3224 * Also reading multi-register stats would need to synchronize with the 3225 * periodic mac stats accumulation. Hard to justify the complexity. 3226 */ 3227 memset(buf, 0, cxgb_get_regs_len()); 3228 reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN); 3229 reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT); 3230 reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE); 3231 reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA); 3232 reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3); 3233 reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0, 3234 XGM_REG(A_XGM_SERDES_STAT3, 1)); 3235 reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1), 3236 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1)); 3237 } 3238 3239 static int 3240 alloc_filters(struct adapter *sc) 3241 { 3242 struct filter_info *p; 3243 unsigned int nfilters = sc->params.mc5.nfilters; 3244 3245 if (nfilters == 0) 3246 return (0); 3247 3248 p = malloc(sizeof(*p) * nfilters, M_DEVBUF, M_WAITOK | M_ZERO); 3249 sc->filters = p; 3250 3251 p = &sc->filters[nfilters - 1]; 3252 p->vlan = 0xfff; 3253 p->vlan_prio = FILTER_NO_VLAN_PRI; 3254 p->pass = p->rss = p->valid = p->locked = 1; 3255 3256 return (0); 3257 } 3258 3259 static int 3260 setup_hw_filters(struct adapter *sc) 3261 { 3262 int i, rc; 3263 unsigned int nfilters = sc->params.mc5.nfilters; 3264 3265 if (!sc->filters) 3266 return (0); 3267 3268 t3_enable_filters(sc); 3269 3270 for (i = rc = 0; i < nfilters && !rc; i++) { 3271 if (sc->filters[i].locked) 3272 rc = set_filter(sc, i, &sc->filters[i]); 3273 } 3274 3275 return (rc); 3276 } 3277 3278 static int 3279 set_filter(struct adapter *sc, int id, const struct filter_info *f) 3280 { 3281 int len; 3282 struct mbuf *m; 3283 struct ulp_txpkt *txpkt; 3284 struct work_request_hdr *wr; 3285 struct cpl_pass_open_req *oreq; 3286 struct cpl_set_tcb_field *sreq; 3287 3288 len = sizeof(*wr) + sizeof(*oreq) + 2 * sizeof(*sreq); 3289 KASSERT(len <= MHLEN, ("filter request too big for an mbuf")); 3290 3291 id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes - 3292 sc->params.mc5.nfilters; 3293 3294 m = m_gethdr(M_WAITOK, MT_DATA); 3295 m->m_len = m->m_pkthdr.len = len; 3296 bzero(mtod(m, char *), len); 3297 3298 wr = mtod(m, struct work_request_hdr *); 3299 wr->wrh_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC); 3300 3301 oreq = (struct cpl_pass_open_req *)(wr + 1); 3302 txpkt = (struct ulp_txpkt *)oreq; 3303 txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); 3304 txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8)); 3305 OPCODE_TID(oreq) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, id)); 3306 oreq->local_port = htons(f->dport); 3307 oreq->peer_port = htons(f->sport); 3308 oreq->local_ip = htonl(f->dip); 3309 oreq->peer_ip = htonl(f->sip); 3310 oreq->peer_netmask = htonl(f->sip_mask); 3311 oreq->opt0h = 0; 3312 oreq->opt0l = htonl(F_NO_OFFLOAD); 3313 oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) | 3314 V_CONN_POLICY(CPL_CONN_POLICY_FILTER) | 3315 V_VLAN_PRI(f->vlan_prio >> 1) | 3316 V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) | 3317 V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) | 3318 V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4))); 3319 3320 sreq = (struct cpl_set_tcb_field *)(oreq + 1); 3321 set_tcb_field_ulp(sreq, id, 1, 0x1800808000ULL, 3322 (f->report_filter_id << 15) | (1 << 23) | 3323 ((u64)f->pass << 35) | ((u64)!f->rss << 36)); 3324 set_tcb_field_ulp(sreq + 1, id, 0, 0xffffffff, (2 << 19) | 1); 3325 t3_mgmt_tx(sc, m); 3326 3327 if (f->pass && !f->rss) { 3328 len = sizeof(*sreq); 3329 m = m_gethdr(M_WAITOK, MT_DATA); 3330 m->m_len = m->m_pkthdr.len = len; 3331 bzero(mtod(m, char *), len); 3332 sreq = mtod(m, struct cpl_set_tcb_field *); 3333 sreq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 3334 mk_set_tcb_field(sreq, id, 25, 0x3f80000, 3335 (u64)sc->rrss_map[f->qset] << 19); 3336 t3_mgmt_tx(sc, m); 3337 } 3338 return 0; 3339 } 3340 3341 static inline void 3342 mk_set_tcb_field(struct cpl_set_tcb_field *req, unsigned int tid, 3343 unsigned int word, u64 mask, u64 val) 3344 { 3345 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 3346 req->reply = V_NO_REPLY(1); 3347 req->cpu_idx = 0; 3348 req->word = htons(word); 3349 req->mask = htobe64(mask); 3350 req->val = htobe64(val); 3351 } 3352 3353 static inline void 3354 set_tcb_field_ulp(struct cpl_set_tcb_field *req, unsigned int tid, 3355 unsigned int word, u64 mask, u64 val) 3356 { 3357 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req; 3358 3359 txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); 3360 txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8)); 3361 mk_set_tcb_field(req, tid, word, mask, val); 3362 } 3363 3364 void 3365 t3_iterate(void (*func)(struct adapter *, void *), void *arg) 3366 { 3367 struct adapter *sc; 3368 3369 mtx_lock(&t3_list_lock); 3370 SLIST_FOREACH(sc, &t3_list, link) { 3371 /* 3372 * func should not make any assumptions about what state sc is 3373 * in - the only guarantee is that sc->sc_lock is a valid lock. 3374 */ 3375 func(sc, arg); 3376 } 3377 mtx_unlock(&t3_list_lock); 3378 } 3379 3380 #ifdef TCP_OFFLOAD 3381 static int 3382 toe_capability(struct port_info *pi, int enable) 3383 { 3384 int rc; 3385 struct adapter *sc = pi->adapter; 3386 3387 ADAPTER_LOCK_ASSERT_OWNED(sc); 3388 3389 if (!is_offload(sc)) 3390 return (ENODEV); 3391 3392 if (enable) { 3393 if (!(sc->flags & FULL_INIT_DONE)) { 3394 log(LOG_WARNING, 3395 "You must enable a cxgb interface first\n"); 3396 return (EAGAIN); 3397 } 3398 3399 if (isset(&sc->offload_map, pi->port_id)) 3400 return (0); 3401 3402 if (!(sc->flags & TOM_INIT_DONE)) { 3403 rc = t3_activate_uld(sc, ULD_TOM); 3404 if (rc == EAGAIN) { 3405 log(LOG_WARNING, 3406 "You must kldload t3_tom.ko before trying " 3407 "to enable TOE on a cxgb interface.\n"); 3408 } 3409 if (rc != 0) 3410 return (rc); 3411 KASSERT(sc->tom_softc != NULL, 3412 ("%s: TOM activated but softc NULL", __func__)); 3413 KASSERT(sc->flags & TOM_INIT_DONE, 3414 ("%s: TOM activated but flag not set", __func__)); 3415 } 3416 3417 setbit(&sc->offload_map, pi->port_id); 3418 3419 /* 3420 * XXX: Temporary code to allow iWARP to be enabled when TOE is 3421 * enabled on any port. Need to figure out how to enable, 3422 * disable, load, and unload iWARP cleanly. 3423 */ 3424 if (!isset(&sc->offload_map, MAX_NPORTS) && 3425 t3_activate_uld(sc, ULD_IWARP) == 0) 3426 setbit(&sc->offload_map, MAX_NPORTS); 3427 } else { 3428 if (!isset(&sc->offload_map, pi->port_id)) 3429 return (0); 3430 3431 KASSERT(sc->flags & TOM_INIT_DONE, 3432 ("%s: TOM never initialized?", __func__)); 3433 clrbit(&sc->offload_map, pi->port_id); 3434 } 3435 3436 return (0); 3437 } 3438 3439 /* 3440 * Add an upper layer driver to the global list. 3441 */ 3442 int 3443 t3_register_uld(struct uld_info *ui) 3444 { 3445 int rc = 0; 3446 struct uld_info *u; 3447 3448 mtx_lock(&t3_uld_list_lock); 3449 SLIST_FOREACH(u, &t3_uld_list, link) { 3450 if (u->uld_id == ui->uld_id) { 3451 rc = EEXIST; 3452 goto done; 3453 } 3454 } 3455 3456 SLIST_INSERT_HEAD(&t3_uld_list, ui, link); 3457 ui->refcount = 0; 3458 done: 3459 mtx_unlock(&t3_uld_list_lock); 3460 return (rc); 3461 } 3462 3463 int 3464 t3_unregister_uld(struct uld_info *ui) 3465 { 3466 int rc = EINVAL; 3467 struct uld_info *u; 3468 3469 mtx_lock(&t3_uld_list_lock); 3470 3471 SLIST_FOREACH(u, &t3_uld_list, link) { 3472 if (u == ui) { 3473 if (ui->refcount > 0) { 3474 rc = EBUSY; 3475 goto done; 3476 } 3477 3478 SLIST_REMOVE(&t3_uld_list, ui, uld_info, link); 3479 rc = 0; 3480 goto done; 3481 } 3482 } 3483 done: 3484 mtx_unlock(&t3_uld_list_lock); 3485 return (rc); 3486 } 3487 3488 int 3489 t3_activate_uld(struct adapter *sc, int id) 3490 { 3491 int rc = EAGAIN; 3492 struct uld_info *ui; 3493 3494 mtx_lock(&t3_uld_list_lock); 3495 3496 SLIST_FOREACH(ui, &t3_uld_list, link) { 3497 if (ui->uld_id == id) { 3498 rc = ui->activate(sc); 3499 if (rc == 0) 3500 ui->refcount++; 3501 goto done; 3502 } 3503 } 3504 done: 3505 mtx_unlock(&t3_uld_list_lock); 3506 3507 return (rc); 3508 } 3509 3510 int 3511 t3_deactivate_uld(struct adapter *sc, int id) 3512 { 3513 int rc = EINVAL; 3514 struct uld_info *ui; 3515 3516 mtx_lock(&t3_uld_list_lock); 3517 3518 SLIST_FOREACH(ui, &t3_uld_list, link) { 3519 if (ui->uld_id == id) { 3520 rc = ui->deactivate(sc); 3521 if (rc == 0) 3522 ui->refcount--; 3523 goto done; 3524 } 3525 } 3526 done: 3527 mtx_unlock(&t3_uld_list_lock); 3528 3529 return (rc); 3530 } 3531 3532 static int 3533 cpl_not_handled(struct sge_qset *qs __unused, struct rsp_desc *r __unused, 3534 struct mbuf *m) 3535 { 3536 m_freem(m); 3537 return (EDOOFUS); 3538 } 3539 3540 int 3541 t3_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h) 3542 { 3543 uintptr_t *loc, new; 3544 3545 if (opcode >= NUM_CPL_HANDLERS) 3546 return (EINVAL); 3547 3548 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled; 3549 loc = (uintptr_t *) &sc->cpl_handler[opcode]; 3550 atomic_store_rel_ptr(loc, new); 3551 3552 return (0); 3553 } 3554 #endif 3555 3556 static int 3557 cxgbc_mod_event(module_t mod, int cmd, void *arg) 3558 { 3559 int rc = 0; 3560 3561 switch (cmd) { 3562 case MOD_LOAD: 3563 mtx_init(&t3_list_lock, "T3 adapters", 0, MTX_DEF); 3564 SLIST_INIT(&t3_list); 3565 #ifdef TCP_OFFLOAD 3566 mtx_init(&t3_uld_list_lock, "T3 ULDs", 0, MTX_DEF); 3567 SLIST_INIT(&t3_uld_list); 3568 #endif 3569 break; 3570 3571 case MOD_UNLOAD: 3572 #ifdef TCP_OFFLOAD 3573 mtx_lock(&t3_uld_list_lock); 3574 if (!SLIST_EMPTY(&t3_uld_list)) { 3575 rc = EBUSY; 3576 mtx_unlock(&t3_uld_list_lock); 3577 break; 3578 } 3579 mtx_unlock(&t3_uld_list_lock); 3580 mtx_destroy(&t3_uld_list_lock); 3581 #endif 3582 mtx_lock(&t3_list_lock); 3583 if (!SLIST_EMPTY(&t3_list)) { 3584 rc = EBUSY; 3585 mtx_unlock(&t3_list_lock); 3586 break; 3587 } 3588 mtx_unlock(&t3_list_lock); 3589 mtx_destroy(&t3_list_lock); 3590 break; 3591 } 3592 3593 return (rc); 3594 } 3595 3596 #ifdef DEBUGNET 3597 static void 3598 cxgb_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize) 3599 { 3600 struct port_info *pi; 3601 adapter_t *adap; 3602 3603 pi = if_getsoftc(ifp); 3604 adap = pi->adapter; 3605 ADAPTER_LOCK(adap); 3606 *nrxr = adap->nqsets; 3607 *ncl = adap->sge.qs[0].fl[1].size; 3608 *clsize = adap->sge.qs[0].fl[1].buf_size; 3609 ADAPTER_UNLOCK(adap); 3610 } 3611 3612 static void 3613 cxgb_debugnet_event(if_t ifp, enum debugnet_ev event) 3614 { 3615 struct port_info *pi; 3616 struct sge_qset *qs; 3617 int i; 3618 3619 pi = if_getsoftc(ifp); 3620 if (event == DEBUGNET_START) 3621 for (i = 0; i < pi->adapter->nqsets; i++) { 3622 qs = &pi->adapter->sge.qs[i]; 3623 3624 /* Need to reinit after debugnet_mbuf_start(). */ 3625 qs->fl[0].zone = zone_pack; 3626 qs->fl[1].zone = zone_clust; 3627 qs->lro.enabled = 0; 3628 } 3629 } 3630 3631 static int 3632 cxgb_debugnet_transmit(if_t ifp, struct mbuf *m) 3633 { 3634 struct port_info *pi; 3635 struct sge_qset *qs; 3636 3637 pi = if_getsoftc(ifp); 3638 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 3639 IFF_DRV_RUNNING) 3640 return (ENOENT); 3641 3642 qs = &pi->adapter->sge.qs[pi->first_qset]; 3643 return (cxgb_debugnet_encap(qs, &m)); 3644 } 3645 3646 static int 3647 cxgb_debugnet_poll(if_t ifp, int count) 3648 { 3649 struct port_info *pi; 3650 adapter_t *adap; 3651 int i; 3652 3653 pi = if_getsoftc(ifp); 3654 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 3655 return (ENOENT); 3656 3657 adap = pi->adapter; 3658 for (i = 0; i < adap->nqsets; i++) 3659 (void)cxgb_debugnet_poll_rx(adap, &adap->sge.qs[i]); 3660 (void)cxgb_debugnet_poll_tx(&adap->sge.qs[pi->first_qset]); 3661 return (0); 3662 } 3663 #endif /* DEBUGNET */ 3664