1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * This file is part of the Chelsio T4 support code. 14 * 15 * Copyright (C) 2010-2013 Chelsio Communications. All rights reserved. 16 * 17 * This program is distributed in the hope that it will be useful, but WITHOUT 18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 19 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this 20 * release for licensing terms and conditions. 21 */ 22 23 #include <sys/ddi.h> 24 #include <sys/sunddi.h> 25 #include <sys/sunndi.h> 26 #include <sys/modctl.h> 27 #include <sys/conf.h> 28 #include <sys/devops.h> 29 #include <sys/pci.h> 30 #include <sys/atomic.h> 31 #include <sys/types.h> 32 #include <sys/file.h> 33 #include <sys/errno.h> 34 #include <sys/open.h> 35 #include <sys/cred.h> 36 #include <sys/stat.h> 37 #include <sys/mkdev.h> 38 #include <sys/queue.h> 39 #include <sys/containerof.h> 40 #include <sys/sensors.h> 41 42 #include "version.h" 43 #include "common/common.h" 44 #include "common/t4_msg.h" 45 #include "common/t4_regs.h" 46 #include "firmware/t4_fw.h" 47 #include "firmware/t4_cfg.h" 48 #include "firmware/t5_fw.h" 49 #include "firmware/t5_cfg.h" 50 #include "firmware/t6_fw.h" 51 #include "firmware/t6_cfg.h" 52 #include "t4_l2t.h" 53 54 static int t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp); 55 static int t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp); 56 static int t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp, 57 int *rp); 58 struct cb_ops t4_cb_ops = { 59 .cb_open = t4_cb_open, 60 .cb_close = t4_cb_close, 61 .cb_strategy = nodev, 62 .cb_print = nodev, 63 .cb_dump = nodev, 64 .cb_read = nodev, 65 .cb_write = nodev, 66 .cb_ioctl = t4_cb_ioctl, 67 .cb_devmap = nodev, 68 .cb_mmap = nodev, 69 .cb_segmap = nodev, 70 .cb_chpoll = nochpoll, 71 .cb_prop_op = ddi_prop_op, 72 .cb_flag = D_MP, 73 .cb_rev = CB_REV, 74 .cb_aread = nodev, 75 .cb_awrite = nodev 76 }; 77 78 static int t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, 79 void *arg, void *result); 80 static int t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op, 81 void *arg, dev_info_t **cdipp); 82 static int t4_bus_unconfig(dev_info_t *dip, uint_t flags, 83 ddi_bus_config_op_t op, void *arg); 84 struct bus_ops t4_bus_ops = { 85 .busops_rev = BUSO_REV, 86 .bus_ctl = t4_bus_ctl, 87 .bus_prop_op = ddi_bus_prop_op, 88 .bus_config = t4_bus_config, 89 .bus_unconfig = t4_bus_unconfig, 90 }; 91 92 static int t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, 93 void **rp); 94 static int t4_devo_probe(dev_info_t *dip); 95 static int t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 96 static int t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 97 static int t4_devo_quiesce(dev_info_t *dip); 98 struct dev_ops t4_dev_ops = { 99 .devo_rev = DEVO_REV, 100 .devo_getinfo = t4_devo_getinfo, 101 .devo_identify = nulldev, 102 .devo_probe = t4_devo_probe, 103 .devo_attach = t4_devo_attach, 104 .devo_detach = t4_devo_detach, 105 .devo_reset = nodev, 106 .devo_cb_ops = &t4_cb_ops, 107 .devo_bus_ops = &t4_bus_ops, 108 .devo_quiesce = &t4_devo_quiesce, 109 }; 110 111 static struct modldrv modldrv = { 112 .drv_modops = &mod_driverops, 113 .drv_linkinfo = "Chelsio T4 nexus " DRV_VERSION, 114 .drv_dev_ops = &t4_dev_ops 115 }; 116 117 static struct modlinkage modlinkage = { 118 .ml_rev = MODREV_1, 119 .ml_linkage = {&modldrv, NULL}, 120 }; 121 122 void *t4_list; 123 124 struct intrs_and_queues { 125 int intr_type; /* DDI_INTR_TYPE_* */ 126 int nirq; /* Number of vectors */ 127 int intr_fwd; /* Interrupts forwarded */ 128 int ntxq10g; /* # of NIC txq's for each 10G port */ 129 int nrxq10g; /* # of NIC rxq's for each 10G port */ 130 int ntxq1g; /* # of NIC txq's for each 1G port */ 131 int nrxq1g; /* # of NIC rxq's for each 1G port */ 132 #ifdef TCP_OFFLOAD_ENABLE 133 int nofldtxq10g; /* # of TOE txq's for each 10G port */ 134 int nofldrxq10g; /* # of TOE rxq's for each 10G port */ 135 int nofldtxq1g; /* # of TOE txq's for each 1G port */ 136 int nofldrxq1g; /* # of TOE rxq's for each 1G port */ 137 #endif 138 }; 139 140 struct fw_info fi[3]; 141 142 static int cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, 143 mblk_t *m); 144 static int fw_msg_not_handled(struct adapter *, const __be64 *); 145 int t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h); 146 static unsigned int getpf(struct adapter *sc); 147 static int prep_firmware(struct adapter *sc); 148 static int upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma); 149 static int partition_resources(struct adapter *sc); 150 static int adap__pre_init_tweaks(struct adapter *sc); 151 static int get_params__pre_init(struct adapter *sc); 152 static int get_params__post_init(struct adapter *sc); 153 static int set_params__post_init(struct adapter *); 154 static void setup_memwin(struct adapter *sc); 155 static int validate_mt_off_len(struct adapter *, int, uint32_t, int, 156 uint32_t *); 157 void memwin_info(struct adapter *, int, uint32_t *, uint32_t *); 158 uint32_t position_memwin(struct adapter *, int, uint32_t); 159 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data, 160 uint_t count); 161 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data, 162 uint_t count); 163 static int init_driver_props(struct adapter *sc, struct driver_properties *p); 164 static int remove_extra_props(struct adapter *sc, int n10g, int n1g); 165 static int cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, 166 struct intrs_and_queues *iaq); 167 static int add_child_node(struct adapter *sc, int idx); 168 static int remove_child_node(struct adapter *sc, int idx); 169 static kstat_t *setup_kstats(struct adapter *sc); 170 static kstat_t *setup_wc_kstats(struct adapter *); 171 static int update_wc_kstats(kstat_t *, int); 172 #ifdef TCP_OFFLOAD_ENABLE 173 static int toe_capability(struct port_info *pi, int enable); 174 static int activate_uld(struct adapter *sc, int id, struct uld_softc *usc); 175 static int deactivate_uld(struct uld_softc *usc); 176 #endif 177 static kmutex_t t4_adapter_list_lock; 178 static SLIST_HEAD(, adapter) t4_adapter_list; 179 #ifdef TCP_OFFLOAD_ENABLE 180 static kmutex_t t4_uld_list_lock; 181 static SLIST_HEAD(, uld_info) t4_uld_list; 182 #endif 183 184 static int t4_temperature_read(void *, sensor_ioctl_scalar_t *); 185 static int t4_voltage_read(void *, sensor_ioctl_scalar_t *); 186 static const ksensor_ops_t t4_temp_ops = { 187 .kso_kind = ksensor_kind_temperature, 188 .kso_scalar = t4_temperature_read 189 }; 190 191 static const ksensor_ops_t t4_volt_ops = { 192 .kso_kind = ksensor_kind_voltage, 193 .kso_scalar = t4_voltage_read 194 }; 195 196 static int t4_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *); 197 static int t4_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t, 198 ddi_ufm_image_t *); 199 static int t4_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t, 200 ddi_ufm_slot_t *); 201 static ddi_ufm_ops_t t4_ufm_ops = { 202 .ddi_ufm_op_fill_image = t4_ufm_fill_image, 203 .ddi_ufm_op_fill_slot = t4_ufm_fill_slot, 204 .ddi_ufm_op_getcaps = t4_ufm_getcaps 205 }; 206 207 int 208 _init(void) 209 { 210 int rc; 211 212 rc = ddi_soft_state_init(&t4_list, sizeof (struct adapter), 0); 213 if (rc != 0) 214 return (rc); 215 216 rc = mod_install(&modlinkage); 217 if (rc != 0) 218 ddi_soft_state_fini(&t4_list); 219 220 mutex_init(&t4_adapter_list_lock, NULL, MUTEX_DRIVER, NULL); 221 SLIST_INIT(&t4_adapter_list); 222 223 #ifdef TCP_OFFLOAD_ENABLE 224 mutex_init(&t4_uld_list_lock, NULL, MUTEX_DRIVER, NULL); 225 SLIST_INIT(&t4_uld_list); 226 #endif 227 228 return (rc); 229 } 230 231 int 232 _fini(void) 233 { 234 int rc; 235 236 rc = mod_remove(&modlinkage); 237 if (rc != 0) 238 return (rc); 239 240 ddi_soft_state_fini(&t4_list); 241 return (0); 242 } 243 244 int 245 _info(struct modinfo *mi) 246 { 247 return (mod_info(&modlinkage, mi)); 248 } 249 250 /* ARGSUSED */ 251 static int 252 t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **rp) 253 { 254 struct adapter *sc; 255 minor_t minor; 256 257 minor = getminor((dev_t)arg); /* same as instance# in our case */ 258 259 if (cmd == DDI_INFO_DEVT2DEVINFO) { 260 sc = ddi_get_soft_state(t4_list, minor); 261 if (sc == NULL) 262 return (DDI_FAILURE); 263 264 ASSERT(sc->dev == (dev_t)arg); 265 *rp = (void *)sc->dip; 266 } else if (cmd == DDI_INFO_DEVT2INSTANCE) 267 *rp = (void *) (unsigned long) minor; 268 else 269 ASSERT(0); 270 271 return (DDI_SUCCESS); 272 } 273 274 static int 275 t4_devo_probe(dev_info_t *dip) 276 { 277 int rc, id, *reg; 278 uint_t n, pf; 279 280 id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 281 "device-id", 0xffff); 282 if (id == 0xffff) 283 return (DDI_PROBE_DONTCARE); 284 285 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 286 "reg", ®, &n); 287 if (rc != DDI_SUCCESS) 288 return (DDI_PROBE_DONTCARE); 289 290 pf = PCI_REG_FUNC_G(reg[0]); 291 ddi_prop_free(reg); 292 293 /* Prevent driver attachment on any PF except 0 on the FPGA */ 294 if (id == 0xa000 && pf != 0) 295 return (DDI_PROBE_FAILURE); 296 297 return (DDI_PROBE_DONTCARE); 298 } 299 300 static int 301 t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 302 { 303 struct adapter *sc = NULL; 304 struct sge *s; 305 int i, instance, rc = DDI_SUCCESS, rqidx, tqidx, q; 306 int irq = 0, nxg, n100g, n40g, n25g, n10g, n1g; 307 #ifdef TCP_OFFLOAD_ENABLE 308 int ofld_rqidx, ofld_tqidx; 309 #endif 310 char name[16]; 311 struct driver_properties *prp; 312 struct intrs_and_queues iaq; 313 ddi_device_acc_attr_t da = { 314 .devacc_attr_version = DDI_DEVICE_ATTR_V0, 315 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC, 316 .devacc_attr_dataorder = DDI_UNORDERED_OK_ACC 317 }; 318 ddi_device_acc_attr_t da1 = { 319 .devacc_attr_version = DDI_DEVICE_ATTR_V0, 320 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC, 321 .devacc_attr_dataorder = DDI_MERGING_OK_ACC 322 }; 323 324 if (cmd != DDI_ATTACH) 325 return (DDI_FAILURE); 326 327 /* 328 * Allocate space for soft state. 329 */ 330 instance = ddi_get_instance(dip); 331 rc = ddi_soft_state_zalloc(t4_list, instance); 332 if (rc != DDI_SUCCESS) { 333 cxgb_printf(dip, CE_WARN, 334 "failed to allocate soft state: %d", rc); 335 return (DDI_FAILURE); 336 } 337 338 sc = ddi_get_soft_state(t4_list, instance); 339 sc->dip = dip; 340 sc->dev = makedevice(ddi_driver_major(dip), instance); 341 mutex_init(&sc->lock, NULL, MUTEX_DRIVER, NULL); 342 cv_init(&sc->cv, NULL, CV_DRIVER, NULL); 343 mutex_init(&sc->sfl_lock, NULL, MUTEX_DRIVER, NULL); 344 TAILQ_INIT(&sc->sfl); 345 346 mutex_enter(&t4_adapter_list_lock); 347 SLIST_INSERT_HEAD(&t4_adapter_list, sc, link); 348 mutex_exit(&t4_adapter_list_lock); 349 350 sc->pf = getpf(sc); 351 if (sc->pf > 8) { 352 rc = EINVAL; 353 cxgb_printf(dip, CE_WARN, 354 "failed to determine PCI PF# of device"); 355 goto done; 356 } 357 sc->mbox = sc->pf; 358 359 /* Initialize the driver properties */ 360 prp = &sc->props; 361 (void)init_driver_props(sc, prp); 362 363 /* 364 * Enable access to the PCI config space. 365 */ 366 rc = pci_config_setup(dip, &sc->pci_regh); 367 if (rc != DDI_SUCCESS) { 368 cxgb_printf(dip, CE_WARN, 369 "failed to enable PCI config space access: %d", rc); 370 goto done; 371 } 372 373 /* TODO: Set max read request to 4K */ 374 375 /* 376 * Enable MMIO access. 377 */ 378 rc = ddi_regs_map_setup(dip, 1, &sc->regp, 0, 0, &da, &sc->regh); 379 if (rc != DDI_SUCCESS) { 380 cxgb_printf(dip, CE_WARN, 381 "failed to map device registers: %d", rc); 382 goto done; 383 } 384 385 (void) memset(sc->chan_map, 0xff, sizeof (sc->chan_map)); 386 387 /* 388 * Initialize cpl handler. 389 */ 390 for (i = 0; i < ARRAY_SIZE(sc->cpl_handler); i++) { 391 sc->cpl_handler[i] = cpl_not_handled; 392 } 393 394 for (i = 0; i < ARRAY_SIZE(sc->fw_msg_handler); i++) { 395 sc->fw_msg_handler[i] = fw_msg_not_handled; 396 } 397 398 for (i = 0; i < NCHAN; i++) { 399 (void) snprintf(name, sizeof (name), "%s-%d", 400 "reclaim", i); 401 sc->tq[i] = ddi_taskq_create(sc->dip, 402 name, 1, TASKQ_DEFAULTPRI, 0); 403 404 if (sc->tq[i] == NULL) { 405 cxgb_printf(dip, CE_WARN, 406 "failed to create task queues"); 407 rc = DDI_FAILURE; 408 goto done; 409 } 410 } 411 412 /* 413 * Prepare the adapter for operation. 414 */ 415 rc = -t4_prep_adapter(sc, false); 416 if (rc != 0) { 417 cxgb_printf(dip, CE_WARN, "failed to prepare adapter: %d", rc); 418 goto done; 419 } 420 421 /* 422 * Enable BAR1 access. 423 */ 424 sc->doorbells |= DOORBELL_KDB; 425 rc = ddi_regs_map_setup(dip, 2, &sc->reg1p, 0, 0, &da1, &sc->reg1h); 426 if (rc != DDI_SUCCESS) { 427 cxgb_printf(dip, CE_WARN, 428 "failed to map BAR1 device registers: %d", rc); 429 goto done; 430 } else { 431 if (is_t5(sc->params.chip)) { 432 sc->doorbells |= DOORBELL_UDB; 433 if (prp->wc) { 434 /* 435 * Enable write combining on BAR2. This is the 436 * userspace doorbell BAR and is split into 128B 437 * (UDBS_SEG_SIZE) doorbell regions, each associated 438 * with an egress queue. The first 64B has the doorbell 439 * and the second 64B can be used to submit a tx work 440 * request with an implicit doorbell. 441 */ 442 sc->doorbells &= ~DOORBELL_UDB; 443 sc->doorbells |= (DOORBELL_WCWR | 444 DOORBELL_UDBWC); 445 t4_write_reg(sc, A_SGE_STAT_CFG, 446 V_STATSOURCE_T5(7) | V_STATMODE(0)); 447 } 448 } 449 } 450 451 /* 452 * Do this really early. Note that minor number = instance. 453 */ 454 (void) snprintf(name, sizeof (name), "%s,%d", T4_NEXUS_NAME, instance); 455 rc = ddi_create_minor_node(dip, name, S_IFCHR, instance, 456 DDI_NT_NEXUS, 0); 457 if (rc != DDI_SUCCESS) { 458 cxgb_printf(dip, CE_WARN, 459 "failed to create device node: %d", rc); 460 rc = DDI_SUCCESS; /* carry on */ 461 } 462 463 /* Do this early. Memory window is required for loading config file. */ 464 setup_memwin(sc); 465 466 /* Prepare the firmware for operation */ 467 rc = prep_firmware(sc); 468 if (rc != 0) 469 goto done; /* error message displayed already */ 470 471 rc = adap__pre_init_tweaks(sc); 472 if (rc != 0) 473 goto done; 474 475 rc = get_params__pre_init(sc); 476 if (rc != 0) 477 goto done; /* error message displayed already */ 478 479 t4_sge_init(sc); 480 481 if (sc->flags & MASTER_PF) { 482 /* get basic stuff going */ 483 rc = -t4_fw_initialize(sc, sc->mbox); 484 if (rc != 0) { 485 cxgb_printf(sc->dip, CE_WARN, 486 "early init failed: %d.\n", rc); 487 goto done; 488 } 489 } 490 491 rc = get_params__post_init(sc); 492 if (rc != 0) 493 goto done; /* error message displayed already */ 494 495 rc = set_params__post_init(sc); 496 if (rc != 0) 497 goto done; /* error message displayed already */ 498 499 /* 500 * TODO: This is the place to call t4_set_filter_mode() 501 */ 502 503 /* tweak some settings */ 504 t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) | 505 V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) | 506 V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9)); 507 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); 508 509 /* 510 * Work-around for bug 2619 511 * Set DisableVlan field in TP_RSS_CONFIG_VRT register so that the 512 * VLAN tag extraction is disabled. 513 */ 514 t4_set_reg_field(sc, A_TP_RSS_CONFIG_VRT, F_DISABLEVLAN, F_DISABLEVLAN); 515 516 /* Store filter mode */ 517 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1, 518 A_TP_VLAN_PRI_MAP); 519 520 /* 521 * First pass over all the ports - allocate VIs and initialize some 522 * basic parameters like mac address, port type, etc. We also figure 523 * out whether a port is 10G or 1G and use that information when 524 * calculating how many interrupts to attempt to allocate. 525 */ 526 n100g = n40g = n25g = n10g = n1g = 0; 527 for_each_port(sc, i) { 528 struct port_info *pi; 529 530 pi = kmem_zalloc(sizeof (*pi), KM_SLEEP); 531 sc->port[i] = pi; 532 533 /* These must be set before t4_port_init */ 534 pi->adapter = sc; 535 /* LINTED: E_ASSIGN_NARROW_CONV */ 536 pi->port_id = i; 537 } 538 539 /* Allocate the vi and initialize parameters like mac addr */ 540 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0); 541 if (rc) { 542 cxgb_printf(dip, CE_WARN, 543 "unable to initialize port: %d", rc); 544 goto done; 545 } 546 547 for_each_port(sc, i) { 548 struct port_info *pi = sc->port[i]; 549 550 mutex_init(&pi->lock, NULL, MUTEX_DRIVER, NULL); 551 pi->mtu = ETHERMTU; 552 553 if (is_100G_port(pi)) { 554 n100g++; 555 pi->tmr_idx = prp->tmr_idx_10g; 556 pi->pktc_idx = prp->pktc_idx_10g; 557 } else if (is_40G_port(pi)) { 558 n40g++; 559 pi->tmr_idx = prp->tmr_idx_10g; 560 pi->pktc_idx = prp->pktc_idx_10g; 561 } else if (is_25G_port(pi)) { 562 n25g++; 563 pi->tmr_idx = prp->tmr_idx_10g; 564 pi->pktc_idx = prp->pktc_idx_10g; 565 } else if (is_10G_port(pi)) { 566 n10g++; 567 pi->tmr_idx = prp->tmr_idx_10g; 568 pi->pktc_idx = prp->pktc_idx_10g; 569 } else { 570 n1g++; 571 pi->tmr_idx = prp->tmr_idx_1g; 572 pi->pktc_idx = prp->pktc_idx_1g; 573 } 574 575 pi->xact_addr_filt = -1; 576 t4_mc_init(pi); 577 578 setbit(&sc->registered_device_map, i); 579 } 580 581 nxg = n10g + n25g + n40g + n100g; 582 (void) remove_extra_props(sc, nxg, n1g); 583 584 if (sc->registered_device_map == 0) { 585 cxgb_printf(dip, CE_WARN, "no usable ports"); 586 rc = DDI_FAILURE; 587 goto done; 588 } 589 590 rc = cfg_itype_and_nqueues(sc, nxg, n1g, &iaq); 591 if (rc != 0) 592 goto done; /* error message displayed already */ 593 594 sc->intr_type = iaq.intr_type; 595 sc->intr_count = iaq.nirq; 596 597 if (sc->props.multi_rings && (sc->intr_type != DDI_INTR_TYPE_MSIX)) { 598 sc->props.multi_rings = 0; 599 cxgb_printf(dip, CE_WARN, 600 "Multiple rings disabled as interrupt type is not MSI-X"); 601 } 602 603 if (sc->props.multi_rings && iaq.intr_fwd) { 604 sc->props.multi_rings = 0; 605 cxgb_printf(dip, CE_WARN, 606 "Multiple rings disabled as interrupts are forwarded"); 607 } 608 609 if (!sc->props.multi_rings) { 610 iaq.ntxq10g = 1; 611 iaq.ntxq1g = 1; 612 } 613 s = &sc->sge; 614 s->nrxq = nxg * iaq.nrxq10g + n1g * iaq.nrxq1g; 615 s->ntxq = nxg * iaq.ntxq10g + n1g * iaq.ntxq1g; 616 s->neq = s->ntxq + s->nrxq; /* the fl in an rxq is an eq */ 617 #ifdef TCP_OFFLOAD_ENABLE 618 /* control queues, 1 per port + 1 mgmtq */ 619 s->neq += sc->params.nports + 1; 620 #endif 621 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 622 if (iaq.intr_fwd != 0) 623 sc->flags |= INTR_FWD; 624 #ifdef TCP_OFFLOAD_ENABLE 625 if (is_offload(sc) != 0) { 626 627 s->nofldrxq = nxg * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g; 628 s->nofldtxq = nxg * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g; 629 s->neq += s->nofldtxq + s->nofldrxq; 630 s->niq += s->nofldrxq; 631 632 s->ofld_rxq = kmem_zalloc(s->nofldrxq * 633 sizeof (struct sge_ofld_rxq), KM_SLEEP); 634 s->ofld_txq = kmem_zalloc(s->nofldtxq * 635 sizeof (struct sge_wrq), KM_SLEEP); 636 s->ctrlq = kmem_zalloc(sc->params.nports * 637 sizeof (struct sge_wrq), KM_SLEEP); 638 639 } 640 #endif 641 s->rxq = kmem_zalloc(s->nrxq * sizeof (struct sge_rxq), KM_SLEEP); 642 s->txq = kmem_zalloc(s->ntxq * sizeof (struct sge_txq), KM_SLEEP); 643 s->iqmap = kmem_zalloc(s->niq * sizeof (struct sge_iq *), KM_SLEEP); 644 s->eqmap = kmem_zalloc(s->neq * sizeof (struct sge_eq *), KM_SLEEP); 645 646 sc->intr_handle = kmem_zalloc(sc->intr_count * 647 sizeof (ddi_intr_handle_t), KM_SLEEP); 648 649 /* 650 * Second pass over the ports. This time we know the number of rx and 651 * tx queues that each port should get. 652 */ 653 rqidx = tqidx = 0; 654 #ifdef TCP_OFFLOAD_ENABLE 655 ofld_rqidx = ofld_tqidx = 0; 656 #endif 657 for_each_port(sc, i) { 658 struct port_info *pi = sc->port[i]; 659 660 if (pi == NULL) 661 continue; 662 663 t4_mc_cb_init(pi); 664 /* LINTED: E_ASSIGN_NARROW_CONV */ 665 pi->first_rxq = rqidx; 666 /* LINTED: E_ASSIGN_NARROW_CONV */ 667 pi->nrxq = (is_10XG_port(pi)) ? iaq.nrxq10g 668 : iaq.nrxq1g; 669 /* LINTED: E_ASSIGN_NARROW_CONV */ 670 pi->first_txq = tqidx; 671 /* LINTED: E_ASSIGN_NARROW_CONV */ 672 pi->ntxq = (is_10XG_port(pi)) ? iaq.ntxq10g 673 : iaq.ntxq1g; 674 675 rqidx += pi->nrxq; 676 tqidx += pi->ntxq; 677 678 #ifdef TCP_OFFLOAD_ENABLE 679 if (is_offload(sc) != 0) { 680 /* LINTED: E_ASSIGN_NARROW_CONV */ 681 pi->first_ofld_rxq = ofld_rqidx; 682 pi->nofldrxq = max(1, pi->nrxq / 4); 683 684 /* LINTED: E_ASSIGN_NARROW_CONV */ 685 pi->first_ofld_txq = ofld_tqidx; 686 pi->nofldtxq = max(1, pi->ntxq / 2); 687 688 ofld_rqidx += pi->nofldrxq; 689 ofld_tqidx += pi->nofldtxq; 690 } 691 #endif 692 693 /* 694 * Enable hw checksumming and LSO for all ports by default. 695 * They can be disabled using ndd (hw_csum and hw_lso). 696 */ 697 pi->features |= (CXGBE_HW_CSUM | CXGBE_HW_LSO); 698 } 699 700 #ifdef TCP_OFFLOAD_ENABLE 701 sc->l2t = t4_init_l2t(sc); 702 #endif 703 704 /* 705 * Setup Interrupts. 706 */ 707 708 i = 0; 709 rc = ddi_intr_alloc(dip, sc->intr_handle, sc->intr_type, 0, 710 sc->intr_count, &i, DDI_INTR_ALLOC_STRICT); 711 if (rc != DDI_SUCCESS) { 712 cxgb_printf(dip, CE_WARN, 713 "failed to allocate %d interrupt(s) of type %d: %d, %d", 714 sc->intr_count, sc->intr_type, rc, i); 715 goto done; 716 } 717 ASSERT(sc->intr_count == i); /* allocation was STRICT */ 718 (void) ddi_intr_get_cap(sc->intr_handle[0], &sc->intr_cap); 719 (void) ddi_intr_get_pri(sc->intr_handle[0], &sc->intr_pri); 720 if (sc->intr_count == 1) { 721 ASSERT(sc->flags & INTR_FWD); 722 (void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_all, sc, 723 &s->fwq); 724 } else { 725 /* Multiple interrupts. The first one is always error intr */ 726 (void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_err, sc, 727 NULL); 728 irq++; 729 730 /* The second one is always the firmware event queue */ 731 (void) ddi_intr_add_handler(sc->intr_handle[1], t4_intr, sc, 732 &s->fwq); 733 irq++; 734 /* 735 * Note that if INTR_FWD is set then either the NIC rx 736 * queues or (exclusive or) the TOE rx queueus will be taking 737 * direct interrupts. 738 * 739 * There is no need to check for is_offload(sc) as nofldrxq 740 * will be 0 if offload is disabled. 741 */ 742 for_each_port(sc, i) { 743 struct port_info *pi = sc->port[i]; 744 struct sge_rxq *rxq; 745 #ifdef TCP_OFFLOAD_ENABLE 746 struct sge_ofld_rxq *ofld_rxq; 747 748 /* 749 * Skip over the NIC queues if they aren't taking direct 750 * interrupts. 751 */ 752 if ((sc->flags & INTR_FWD) && 753 pi->nofldrxq > pi->nrxq) 754 goto ofld_queues; 755 #endif 756 rxq = &s->rxq[pi->first_rxq]; 757 for (q = 0; q < pi->nrxq; q++, rxq++) { 758 (void) ddi_intr_add_handler( 759 sc->intr_handle[irq], t4_intr, sc, 760 &rxq->iq); 761 irq++; 762 } 763 764 #ifdef TCP_OFFLOAD_ENABLE 765 /* 766 * Skip over the offload queues if they aren't taking 767 * direct interrupts. 768 */ 769 if ((sc->flags & INTR_FWD)) 770 continue; 771 ofld_queues: 772 ofld_rxq = &s->ofld_rxq[pi->first_ofld_rxq]; 773 for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) { 774 (void) ddi_intr_add_handler( 775 sc->intr_handle[irq], t4_intr, sc, 776 &ofld_rxq->iq); 777 irq++; 778 } 779 #endif 780 } 781 782 } 783 sc->flags |= INTR_ALLOCATED; 784 785 if ((rc = ksensor_create_scalar_pcidev(dip, SENSOR_KIND_TEMPERATURE, 786 &t4_temp_ops, sc, "temp", &sc->temp_sensor)) != 0) { 787 cxgb_printf(dip, CE_WARN, "failed to create temperature " 788 "sensor: %d", rc); 789 rc = DDI_FAILURE; 790 goto done; 791 } 792 793 if ((rc = ksensor_create_scalar_pcidev(dip, SENSOR_KIND_VOLTAGE, 794 &t4_volt_ops, sc, "vdd", &sc->volt_sensor)) != 0) { 795 cxgb_printf(dip, CE_WARN, "failed to create voltage " 796 "sensor: %d", rc); 797 rc = DDI_FAILURE; 798 goto done; 799 } 800 801 802 if ((rc = ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &t4_ufm_ops, 803 &sc->ufm_hdl, sc)) != 0) { 804 cxgb_printf(dip, CE_WARN, "failed to enable UFM ops: %d", rc); 805 rc = DDI_FAILURE; 806 goto done; 807 } 808 ddi_ufm_update(sc->ufm_hdl); 809 ddi_report_dev(dip); 810 811 /* 812 * Hardware/Firmware/etc. Version/Revision IDs. 813 */ 814 t4_dump_version_info(sc); 815 816 if (n100g) { 817 cxgb_printf(dip, CE_NOTE, 818 "%dx100G (%d rxq, %d txq total) %d %s.", 819 n100g, rqidx, tqidx, sc->intr_count, 820 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" : 821 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" : 822 "fixed interrupt"); 823 } else if (n40g) { 824 cxgb_printf(dip, CE_NOTE, 825 "%dx40G (%d rxq, %d txq total) %d %s.", 826 n40g, rqidx, tqidx, sc->intr_count, 827 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" : 828 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" : 829 "fixed interrupt"); 830 } else if (n25g) { 831 cxgb_printf(dip, CE_NOTE, 832 "%dx25G (%d rxq, %d txq total) %d %s.", 833 n25g, rqidx, tqidx, sc->intr_count, 834 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" : 835 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" : 836 "fixed interrupt"); 837 } else if (n10g && n1g) { 838 cxgb_printf(dip, CE_NOTE, 839 "%dx10G %dx1G (%d rxq, %d txq total) %d %s.", 840 n10g, n1g, rqidx, tqidx, sc->intr_count, 841 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" : 842 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" : 843 "fixed interrupt"); 844 } else { 845 cxgb_printf(dip, CE_NOTE, 846 "%dx%sG (%d rxq, %d txq per port) %d %s.", 847 n10g ? n10g : n1g, 848 n10g ? "10" : "1", 849 n10g ? iaq.nrxq10g : iaq.nrxq1g, 850 n10g ? iaq.ntxq10g : iaq.ntxq1g, 851 sc->intr_count, 852 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" : 853 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" : 854 "fixed interrupt"); 855 } 856 857 sc->ksp = setup_kstats(sc); 858 sc->ksp_stat = setup_wc_kstats(sc); 859 sc->params.drv_memwin = MEMWIN_NIC; 860 861 done: 862 if (rc != DDI_SUCCESS) { 863 (void) t4_devo_detach(dip, DDI_DETACH); 864 865 /* rc may have errno style errors or DDI errors */ 866 rc = DDI_FAILURE; 867 } 868 869 return (rc); 870 } 871 872 static int 873 t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 874 { 875 int instance, i; 876 struct adapter *sc; 877 struct port_info *pi; 878 struct sge *s; 879 880 if (cmd != DDI_DETACH) 881 return (DDI_FAILURE); 882 883 instance = ddi_get_instance(dip); 884 sc = ddi_get_soft_state(t4_list, instance); 885 if (sc == NULL) 886 return (DDI_SUCCESS); 887 888 if (sc->flags & FULL_INIT_DONE) { 889 t4_intr_disable(sc); 890 for_each_port(sc, i) { 891 pi = sc->port[i]; 892 if (pi && pi->flags & PORT_INIT_DONE) 893 (void) port_full_uninit(pi); 894 } 895 (void) adapter_full_uninit(sc); 896 } 897 898 /* Safe to call no matter what */ 899 if (sc->ufm_hdl != NULL) { 900 ddi_ufm_fini(sc->ufm_hdl); 901 sc->ufm_hdl = NULL; 902 } 903 (void) ksensor_remove(dip, KSENSOR_ALL_IDS); 904 ddi_prop_remove_all(dip); 905 ddi_remove_minor_node(dip, NULL); 906 907 for (i = 0; i < NCHAN; i++) { 908 if (sc->tq[i]) { 909 ddi_taskq_wait(sc->tq[i]); 910 ddi_taskq_destroy(sc->tq[i]); 911 } 912 } 913 914 if (sc->ksp != NULL) 915 kstat_delete(sc->ksp); 916 if (sc->ksp_stat != NULL) 917 kstat_delete(sc->ksp_stat); 918 919 s = &sc->sge; 920 if (s->rxq != NULL) 921 kmem_free(s->rxq, s->nrxq * sizeof (struct sge_rxq)); 922 #ifdef TCP_OFFLOAD_ENABLE 923 if (s->ofld_txq != NULL) 924 kmem_free(s->ofld_txq, s->nofldtxq * sizeof (struct sge_wrq)); 925 if (s->ofld_rxq != NULL) 926 kmem_free(s->ofld_rxq, 927 s->nofldrxq * sizeof (struct sge_ofld_rxq)); 928 if (s->ctrlq != NULL) 929 kmem_free(s->ctrlq, 930 sc->params.nports * sizeof (struct sge_wrq)); 931 #endif 932 if (s->txq != NULL) 933 kmem_free(s->txq, s->ntxq * sizeof (struct sge_txq)); 934 if (s->iqmap != NULL) 935 kmem_free(s->iqmap, s->niq * sizeof (struct sge_iq *)); 936 if (s->eqmap != NULL) 937 kmem_free(s->eqmap, s->neq * sizeof (struct sge_eq *)); 938 939 if (s->rxbuf_cache != NULL) 940 rxbuf_cache_destroy(s->rxbuf_cache); 941 942 if (sc->flags & INTR_ALLOCATED) { 943 for (i = 0; i < sc->intr_count; i++) { 944 (void) ddi_intr_remove_handler(sc->intr_handle[i]); 945 (void) ddi_intr_free(sc->intr_handle[i]); 946 } 947 sc->flags &= ~INTR_ALLOCATED; 948 } 949 950 if (sc->intr_handle != NULL) { 951 kmem_free(sc->intr_handle, 952 sc->intr_count * sizeof (*sc->intr_handle)); 953 } 954 955 for_each_port(sc, i) { 956 pi = sc->port[i]; 957 if (pi != NULL) { 958 mutex_destroy(&pi->lock); 959 kmem_free(pi, sizeof (*pi)); 960 clrbit(&sc->registered_device_map, i); 961 } 962 } 963 964 if (sc->flags & FW_OK) 965 (void) t4_fw_bye(sc, sc->mbox); 966 967 if (sc->reg1h != NULL) 968 ddi_regs_map_free(&sc->reg1h); 969 970 if (sc->regh != NULL) 971 ddi_regs_map_free(&sc->regh); 972 973 if (sc->pci_regh != NULL) 974 pci_config_teardown(&sc->pci_regh); 975 976 mutex_enter(&t4_adapter_list_lock); 977 SLIST_REMOVE_HEAD(&t4_adapter_list, link); 978 mutex_exit(&t4_adapter_list_lock); 979 980 mutex_destroy(&sc->lock); 981 cv_destroy(&sc->cv); 982 mutex_destroy(&sc->sfl_lock); 983 984 #ifdef DEBUG 985 bzero(sc, sizeof (*sc)); 986 #endif 987 ddi_soft_state_free(t4_list, instance); 988 989 return (DDI_SUCCESS); 990 } 991 992 static int 993 t4_devo_quiesce(dev_info_t *dip) 994 { 995 int instance; 996 struct adapter *sc; 997 998 instance = ddi_get_instance(dip); 999 sc = ddi_get_soft_state(t4_list, instance); 1000 if (sc == NULL) 1001 return (DDI_SUCCESS); 1002 1003 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 1004 t4_intr_disable(sc); 1005 t4_write_reg(sc, A_PL_RST, F_PIORSTMODE | F_PIORST); 1006 1007 return (DDI_SUCCESS); 1008 } 1009 1010 static int 1011 t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, void *arg, 1012 void *result) 1013 { 1014 char s[4]; 1015 struct port_info *pi; 1016 dev_info_t *child = (dev_info_t *)arg; 1017 1018 switch (op) { 1019 case DDI_CTLOPS_REPORTDEV: 1020 pi = ddi_get_parent_data(rdip); 1021 pi->instance = ddi_get_instance(dip); 1022 pi->child_inst = ddi_get_instance(rdip); 1023 cmn_err(CE_CONT, "?%s%d is port %s on %s%d\n", 1024 ddi_node_name(rdip), ddi_get_instance(rdip), 1025 ddi_get_name_addr(rdip), ddi_driver_name(dip), 1026 ddi_get_instance(dip)); 1027 return (DDI_SUCCESS); 1028 1029 case DDI_CTLOPS_INITCHILD: 1030 pi = ddi_get_parent_data(child); 1031 if (pi == NULL) 1032 return (DDI_NOT_WELL_FORMED); 1033 (void) snprintf(s, sizeof (s), "%d", pi->port_id); 1034 ddi_set_name_addr(child, s); 1035 return (DDI_SUCCESS); 1036 1037 case DDI_CTLOPS_UNINITCHILD: 1038 ddi_set_name_addr(child, NULL); 1039 return (DDI_SUCCESS); 1040 1041 case DDI_CTLOPS_ATTACH: 1042 case DDI_CTLOPS_DETACH: 1043 return (DDI_SUCCESS); 1044 1045 default: 1046 return (ddi_ctlops(dip, rdip, op, arg, result)); 1047 } 1048 } 1049 1050 static int 1051 t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op, void *arg, 1052 dev_info_t **cdipp) 1053 { 1054 int instance, i; 1055 struct adapter *sc; 1056 1057 instance = ddi_get_instance(dip); 1058 sc = ddi_get_soft_state(t4_list, instance); 1059 1060 if (op == BUS_CONFIG_ONE) { 1061 char *c; 1062 1063 /* 1064 * arg is something like "cxgb@0" where 0 is the port_id hanging 1065 * off this nexus. 1066 */ 1067 1068 c = arg; 1069 while (*(c + 1)) 1070 c++; 1071 1072 /* There should be exactly 1 digit after '@' */ 1073 if (*(c - 1) != '@') 1074 return (NDI_FAILURE); 1075 1076 i = *c - '0'; 1077 1078 if (add_child_node(sc, i) != 0) 1079 return (NDI_FAILURE); 1080 1081 flags |= NDI_ONLINE_ATTACH; 1082 1083 } else if (op == BUS_CONFIG_ALL || op == BUS_CONFIG_DRIVER) { 1084 /* Allocate and bind all child device nodes */ 1085 for_each_port(sc, i) 1086 (void) add_child_node(sc, i); 1087 flags |= NDI_ONLINE_ATTACH; 1088 } 1089 1090 return (ndi_busop_bus_config(dip, flags, op, arg, cdipp, 0)); 1091 } 1092 1093 static int 1094 t4_bus_unconfig(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op, 1095 void *arg) 1096 { 1097 int instance, i, rc; 1098 struct adapter *sc; 1099 1100 instance = ddi_get_instance(dip); 1101 sc = ddi_get_soft_state(t4_list, instance); 1102 1103 if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ALL || 1104 op == BUS_UNCONFIG_DRIVER) 1105 flags |= NDI_UNCONFIG; 1106 1107 rc = ndi_busop_bus_unconfig(dip, flags, op, arg); 1108 if (rc != 0) 1109 return (rc); 1110 1111 if (op == BUS_UNCONFIG_ONE) { 1112 char *c; 1113 1114 c = arg; 1115 while (*(c + 1)) 1116 c++; 1117 1118 if (*(c - 1) != '@') 1119 return (NDI_SUCCESS); 1120 1121 i = *c - '0'; 1122 1123 rc = remove_child_node(sc, i); 1124 1125 } else if (op == BUS_UNCONFIG_ALL || op == BUS_UNCONFIG_DRIVER) { 1126 1127 for_each_port(sc, i) 1128 (void) remove_child_node(sc, i); 1129 } 1130 1131 return (rc); 1132 } 1133 1134 /* ARGSUSED */ 1135 static int 1136 t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp) 1137 { 1138 struct adapter *sc; 1139 1140 if (otyp != OTYP_CHR) 1141 return (EINVAL); 1142 1143 sc = ddi_get_soft_state(t4_list, getminor(*devp)); 1144 if (sc == NULL) 1145 return (ENXIO); 1146 1147 return (atomic_cas_uint(&sc->open, 0, EBUSY)); 1148 } 1149 1150 /* ARGSUSED */ 1151 static int 1152 t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp) 1153 { 1154 struct adapter *sc; 1155 1156 sc = ddi_get_soft_state(t4_list, getminor(dev)); 1157 if (sc == NULL) 1158 return (EINVAL); 1159 1160 (void) atomic_swap_uint(&sc->open, 0); 1161 return (0); 1162 } 1163 1164 /* ARGSUSED */ 1165 static int 1166 t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp, int *rp) 1167 { 1168 int instance; 1169 struct adapter *sc; 1170 void *data = (void *)d; 1171 1172 if (crgetuid(credp) != 0) 1173 return (EPERM); 1174 1175 instance = getminor(dev); 1176 sc = ddi_get_soft_state(t4_list, instance); 1177 if (sc == NULL) 1178 return (EINVAL); 1179 1180 return (t4_ioctl(sc, cmd, data, mode)); 1181 } 1182 1183 static unsigned int 1184 getpf(struct adapter *sc) 1185 { 1186 int rc, *data; 1187 uint_t n, pf; 1188 1189 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip, 1190 DDI_PROP_DONTPASS, "reg", &data, &n); 1191 if (rc != DDI_SUCCESS) { 1192 cxgb_printf(sc->dip, CE_WARN, 1193 "failed to lookup \"reg\" property: %d", rc); 1194 return (0xff); 1195 } 1196 1197 pf = PCI_REG_FUNC_G(data[0]); 1198 ddi_prop_free(data); 1199 1200 return (pf); 1201 } 1202 1203 1204 static struct fw_info * 1205 find_fw_info(int chip) 1206 { 1207 u32 i; 1208 1209 fi[0].chip = CHELSIO_T4; 1210 fi[0].fw_hdr.chip = FW_HDR_CHIP_T4; 1211 fi[0].fw_hdr.fw_ver = cpu_to_be32(FW_VERSION(T4)); 1212 fi[0].fw_hdr.intfver_nic = FW_INTFVER(T4, NIC); 1213 fi[0].fw_hdr.intfver_vnic = FW_INTFVER(T4, VNIC); 1214 fi[0].fw_hdr.intfver_ofld = FW_INTFVER(T4, OFLD); 1215 fi[0].fw_hdr.intfver_ri = FW_INTFVER(T4, RI); 1216 fi[0].fw_hdr.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU); 1217 fi[0].fw_hdr.intfver_iscsi = FW_INTFVER(T4, ISCSI); 1218 fi[0].fw_hdr.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU); 1219 fi[0].fw_hdr.intfver_fcoe = FW_INTFVER(T4, FCOE); 1220 1221 fi[1].chip = CHELSIO_T5; 1222 fi[1].fw_hdr.chip = FW_HDR_CHIP_T5; 1223 fi[1].fw_hdr.fw_ver = cpu_to_be32(FW_VERSION(T5)); 1224 fi[1].fw_hdr.intfver_nic = FW_INTFVER(T5, NIC); 1225 fi[1].fw_hdr.intfver_vnic = FW_INTFVER(T5, VNIC); 1226 fi[1].fw_hdr.intfver_ofld = FW_INTFVER(T5, OFLD); 1227 fi[1].fw_hdr.intfver_ri = FW_INTFVER(T5, RI); 1228 fi[1].fw_hdr.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU); 1229 fi[1].fw_hdr.intfver_iscsi = FW_INTFVER(T5, ISCSI); 1230 fi[1].fw_hdr.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU); 1231 fi[1].fw_hdr.intfver_fcoe = FW_INTFVER(T5, FCOE); 1232 1233 fi[2].chip = CHELSIO_T6; 1234 fi[2].fw_hdr.chip = FW_HDR_CHIP_T6; 1235 fi[2].fw_hdr.fw_ver = cpu_to_be32(FW_VERSION(T6)); 1236 fi[2].fw_hdr.intfver_nic = FW_INTFVER(T6, NIC); 1237 fi[2].fw_hdr.intfver_vnic = FW_INTFVER(T6, VNIC); 1238 fi[2].fw_hdr.intfver_ofld = FW_INTFVER(T6, OFLD); 1239 fi[2].fw_hdr.intfver_ri = FW_INTFVER(T6, RI); 1240 fi[2].fw_hdr.intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU); 1241 fi[2].fw_hdr.intfver_iscsi = FW_INTFVER(T6, ISCSI); 1242 fi[2].fw_hdr.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU); 1243 fi[2].fw_hdr.intfver_fcoe = FW_INTFVER(T6, FCOE); 1244 1245 for (i = 0; i < ARRAY_SIZE(fi); i++) { 1246 if (fi[i].chip == chip) 1247 return &fi[i]; 1248 } 1249 1250 return NULL; 1251 } 1252 1253 /* 1254 * Install a compatible firmware (if required), establish contact with it, 1255 * become the master, and reset the device. 1256 */ 1257 static int 1258 prep_firmware(struct adapter *sc) 1259 { 1260 int rc; 1261 int fw_size; 1262 int reset = 1; 1263 enum dev_state state; 1264 unsigned char *fw_data; 1265 struct fw_info *fw_info; 1266 struct fw_hdr *card_fw; 1267 1268 struct driver_properties *p = &sc->props; 1269 1270 /* Contact firmware, request master */ 1271 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state); 1272 if (rc < 0) { 1273 rc = -rc; 1274 cxgb_printf(sc->dip, CE_WARN, 1275 "failed to connect to the firmware: %d.", rc); 1276 return (rc); 1277 } 1278 1279 if (rc == sc->mbox) 1280 sc->flags |= MASTER_PF; 1281 1282 /* We may need FW version info for later reporting */ 1283 t4_get_version_info(sc); 1284 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(sc->params.chip)); 1285 /* allocate memory to read the header of the firmware on the 1286 * card 1287 */ 1288 if (!fw_info) { 1289 cxgb_printf(sc->dip, CE_WARN, 1290 "unable to look up firmware information for chip %d.\n", 1291 CHELSIO_CHIP_VERSION(sc->params.chip)); 1292 return EINVAL; 1293 } 1294 card_fw = kmem_zalloc(sizeof(*card_fw), KM_SLEEP); 1295 if(!card_fw) { 1296 cxgb_printf(sc->dip, CE_WARN, 1297 "Memory allocation for card FW header failed\n"); 1298 return ENOMEM; 1299 } 1300 switch(CHELSIO_CHIP_VERSION(sc->params.chip)) { 1301 case CHELSIO_T4: 1302 fw_data = t4fw_data; 1303 fw_size = t4fw_size; 1304 break; 1305 case CHELSIO_T5: 1306 fw_data = t5fw_data; 1307 fw_size = t5fw_size; 1308 break; 1309 case CHELSIO_T6: 1310 fw_data = t6fw_data; 1311 fw_size = t6fw_size; 1312 break; 1313 default: 1314 cxgb_printf(sc->dip, CE_WARN, "Adapter type not supported\n"); 1315 kmem_free(card_fw, sizeof(*card_fw)); 1316 return EINVAL; 1317 } 1318 1319 rc = -t4_prep_fw(sc, fw_info, fw_data, fw_size, card_fw, 1320 p->t4_fw_install, state, &reset); 1321 1322 kmem_free(card_fw, sizeof(*card_fw)); 1323 1324 if (rc != 0) { 1325 cxgb_printf(sc->dip, CE_WARN, 1326 "failed to install firmware: %d", rc); 1327 return (rc); 1328 } else { 1329 /* refresh */ 1330 (void) t4_check_fw_version(sc); 1331 } 1332 1333 /* Reset device */ 1334 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST); 1335 if (rc != 0) { 1336 cxgb_printf(sc->dip, CE_WARN, 1337 "firmware reset failed: %d.", rc); 1338 if (rc != ETIMEDOUT && rc != EIO) 1339 (void) t4_fw_bye(sc, sc->mbox); 1340 return (rc); 1341 } 1342 1343 /* Partition adapter resources as specified in the config file. */ 1344 if (sc->flags & MASTER_PF) { 1345 /* Handle default vs special T4 config file */ 1346 1347 rc = partition_resources(sc); 1348 if (rc != 0) 1349 goto err; /* error message displayed already */ 1350 } 1351 1352 sc->flags |= FW_OK; 1353 return (0); 1354 err: 1355 return (rc); 1356 1357 } 1358 1359 static const struct memwin t4_memwin[] = { 1360 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 1361 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 1362 { MEMWIN2_BASE, MEMWIN2_APERTURE } 1363 }; 1364 1365 static const struct memwin t5_memwin[] = { 1366 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 1367 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 1368 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, 1369 }; 1370 1371 #define FW_PARAM_DEV(param) \ 1372 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 1373 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 1374 #define FW_PARAM_PFVF(param) \ 1375 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 1376 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 1377 1378 /* 1379 * Verify that the memory range specified by the memtype/offset/len pair is 1380 * valid and lies entirely within the memtype specified. The global address of 1381 * the start of the range is returned in addr. 1382 */ 1383 int 1384 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len, 1385 uint32_t *addr) 1386 { 1387 uint32_t em, addr_len, maddr, mlen; 1388 1389 /* Memory can only be accessed in naturally aligned 4 byte units */ 1390 if (off & 3 || len & 3 || len == 0) 1391 return (EINVAL); 1392 1393 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 1394 switch (mtype) { 1395 case MEM_EDC0: 1396 if (!(em & F_EDRAM0_ENABLE)) 1397 return (EINVAL); 1398 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 1399 maddr = G_EDRAM0_BASE(addr_len) << 20; 1400 mlen = G_EDRAM0_SIZE(addr_len) << 20; 1401 break; 1402 case MEM_EDC1: 1403 if (!(em & F_EDRAM1_ENABLE)) 1404 return (EINVAL); 1405 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 1406 maddr = G_EDRAM1_BASE(addr_len) << 20; 1407 mlen = G_EDRAM1_SIZE(addr_len) << 20; 1408 break; 1409 case MEM_MC: 1410 if (!(em & F_EXT_MEM_ENABLE)) 1411 return (EINVAL); 1412 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 1413 maddr = G_EXT_MEM_BASE(addr_len) << 20; 1414 mlen = G_EXT_MEM_SIZE(addr_len) << 20; 1415 break; 1416 case MEM_MC1: 1417 if (is_t4(sc->params.chip) || !(em & F_EXT_MEM1_ENABLE)) 1418 return (EINVAL); 1419 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 1420 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 1421 mlen = G_EXT_MEM1_SIZE(addr_len) << 20; 1422 break; 1423 default: 1424 return (EINVAL); 1425 } 1426 1427 if (mlen > 0 && off < mlen && off + len <= mlen) { 1428 *addr = maddr + off; /* global address */ 1429 return (0); 1430 } 1431 1432 return (EFAULT); 1433 } 1434 1435 void 1436 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture) 1437 { 1438 const struct memwin *mw; 1439 1440 if (is_t4(sc->params.chip)) { 1441 mw = &t4_memwin[win]; 1442 } else { 1443 mw = &t5_memwin[win]; 1444 } 1445 1446 if (base != NULL) 1447 *base = mw->base; 1448 if (aperture != NULL) 1449 *aperture = mw->aperture; 1450 } 1451 1452 /* 1453 * Upload configuration file to card's memory. 1454 */ 1455 static int 1456 upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma) 1457 { 1458 int rc = 0, cflen; 1459 u_int i, n; 1460 uint32_t param, val, addr, mtype, maddr; 1461 uint32_t off, mw_base, mw_aperture; 1462 const uint32_t *cfdata; 1463 1464 /* Figure out where the firmware wants us to upload it. */ 1465 param = FW_PARAM_DEV(CF); 1466 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 1467 if (rc != 0) { 1468 /* Firmwares without config file support will fail this way */ 1469 cxgb_printf(sc->dip, CE_WARN, 1470 "failed to query config file location: %d.\n", rc); 1471 return (rc); 1472 } 1473 *mt = mtype = G_FW_PARAMS_PARAM_Y(val); 1474 *ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16; 1475 1476 switch (CHELSIO_CHIP_VERSION(sc->params.chip)) { 1477 case CHELSIO_T4: 1478 cflen = t4cfg_size & ~3; 1479 /* LINTED: E_BAD_PTR_CAST_ALIGN */ 1480 cfdata = (const uint32_t *)t4cfg_data; 1481 break; 1482 case CHELSIO_T5: 1483 cflen = t5cfg_size & ~3; 1484 /* LINTED: E_BAD_PTR_CAST_ALIGN */ 1485 cfdata = (const uint32_t *)t5cfg_data; 1486 break; 1487 case CHELSIO_T6: 1488 cflen = t6cfg_size & ~3; 1489 /* LINTED: E_BAD_PTR_CAST_ALIGN */ 1490 cfdata = (const uint32_t *)t6cfg_data; 1491 break; 1492 default: 1493 cxgb_printf(sc->dip, CE_WARN, 1494 "Invalid Adapter detected\n"); 1495 return EINVAL; 1496 } 1497 1498 if (cflen > FLASH_CFG_MAX_SIZE) { 1499 cxgb_printf(sc->dip, CE_WARN, 1500 "config file too long (%d, max allowed is %d). ", 1501 cflen, FLASH_CFG_MAX_SIZE); 1502 return (EFBIG); 1503 } 1504 1505 rc = validate_mt_off_len(sc, mtype, maddr, cflen, &addr); 1506 if (rc != 0) { 1507 1508 cxgb_printf(sc->dip, CE_WARN, 1509 "%s: addr (%d/0x%x) or len %d is not valid: %d. " 1510 "Will try to use the config on the card, if any.\n", 1511 __func__, mtype, maddr, cflen, rc); 1512 return (EFAULT); 1513 } 1514 1515 memwin_info(sc, 2, &mw_base, &mw_aperture); 1516 while (cflen) { 1517 off = position_memwin(sc, 2, addr); 1518 n = min(cflen, mw_aperture - off); 1519 for (i = 0; i < n; i += 4) 1520 t4_write_reg(sc, mw_base + off + i, *cfdata++); 1521 cflen -= n; 1522 addr += n; 1523 } 1524 1525 return (rc); 1526 } 1527 1528 /* 1529 * Partition chip resources for use between various PFs, VFs, etc. This is done 1530 * by uploading the firmware configuration file to the adapter and instructing 1531 * the firmware to process it. 1532 */ 1533 static int 1534 partition_resources(struct adapter *sc) 1535 { 1536 int rc; 1537 struct fw_caps_config_cmd caps; 1538 uint32_t mtype, maddr, finicsum, cfcsum; 1539 1540 rc = upload_config_file(sc, &mtype, &maddr); 1541 if (rc != 0) { 1542 mtype = FW_MEMTYPE_CF_FLASH; 1543 maddr = t4_flash_cfg_addr(sc); 1544 } 1545 1546 bzero(&caps, sizeof (caps)); 1547 caps.op_to_write = BE_32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1548 F_FW_CMD_REQUEST | F_FW_CMD_READ); 1549 caps.cfvalid_to_len16 = BE_32(F_FW_CAPS_CONFIG_CMD_CFVALID | 1550 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 1551 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps)); 1552 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps); 1553 if (rc != 0) { 1554 cxgb_printf(sc->dip, CE_WARN, 1555 "failed to pre-process config file: %d.\n", rc); 1556 return (rc); 1557 } 1558 1559 finicsum = ntohl(caps.finicsum); 1560 cfcsum = ntohl(caps.cfcsum); 1561 if (finicsum != cfcsum) { 1562 cxgb_printf(sc->dip, CE_WARN, 1563 "WARNING: config file checksum mismatch: %08x %08x\n", 1564 finicsum, cfcsum); 1565 } 1566 sc->cfcsum = cfcsum; 1567 1568 /* TODO: Need to configure this correctly */ 1569 caps.toecaps = htons(FW_CAPS_CONFIG_TOE); 1570 caps.iscsicaps = 0; 1571 caps.rdmacaps = 0; 1572 caps.fcoecaps = 0; 1573 /* TODO: Disable VNIC cap for now */ 1574 caps.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); 1575 1576 caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1577 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 1578 caps.cfvalid_to_len16 = htonl(FW_LEN16(caps)); 1579 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), NULL); 1580 if (rc != 0) { 1581 cxgb_printf(sc->dip, CE_WARN, 1582 "failed to process config file: %d.\n", rc); 1583 return (rc); 1584 } 1585 1586 return (0); 1587 } 1588 1589 /* 1590 * Tweak configuration based on module parameters, etc. Most of these have 1591 * defaults assigned to them by Firmware Configuration Files (if we're using 1592 * them) but need to be explicitly set if we're using hard-coded 1593 * initialization. But even in the case of using Firmware Configuration 1594 * Files, we'd like to expose the ability to change these via module 1595 * parameters so these are essentially common tweaks/settings for 1596 * Configuration Files and hard-coded initialization ... 1597 */ 1598 static int 1599 adap__pre_init_tweaks(struct adapter *sc) 1600 { 1601 int rx_dma_offset = 2; /* Offset of RX packets into DMA buffers */ 1602 1603 /* 1604 * Fix up various Host-Dependent Parameters like Page Size, Cache 1605 * Line Size, etc. The firmware default is for a 4KB Page Size and 1606 * 64B Cache Line Size ... 1607 */ 1608 (void) t4_fixup_host_params_compat(sc, PAGE_SIZE, CACHE_LINE, T5_LAST_REV); 1609 1610 t4_set_reg_field(sc, A_SGE_CONTROL, 1611 V_PKTSHIFT(M_PKTSHIFT), V_PKTSHIFT(rx_dma_offset)); 1612 1613 return 0; 1614 } 1615 /* 1616 * Retrieve parameters that are needed (or nice to have) prior to calling 1617 * t4_sge_init and t4_fw_initialize. 1618 */ 1619 static int 1620 get_params__pre_init(struct adapter *sc) 1621 { 1622 int rc; 1623 uint32_t param[2], val[2]; 1624 struct fw_devlog_cmd cmd; 1625 struct devlog_params *dlog = &sc->params.devlog; 1626 1627 /* 1628 * Grab the raw VPD parameters. 1629 */ 1630 rc = -t4_get_raw_vpd_params(sc, &sc->params.vpd); 1631 if (rc != 0) { 1632 cxgb_printf(sc->dip, CE_WARN, 1633 "failed to query VPD parameters (pre_init): %d.\n", rc); 1634 return (rc); 1635 } 1636 1637 param[0] = FW_PARAM_DEV(PORTVEC); 1638 param[1] = FW_PARAM_DEV(CCLK); 1639 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 1640 if (rc != 0) { 1641 cxgb_printf(sc->dip, CE_WARN, 1642 "failed to query parameters (pre_init): %d.\n", rc); 1643 return (rc); 1644 } 1645 1646 sc->params.portvec = val[0]; 1647 sc->params.nports = 0; 1648 while (val[0]) { 1649 sc->params.nports++; 1650 val[0] &= val[0] - 1; 1651 } 1652 1653 sc->params.vpd.cclk = val[1]; 1654 1655 /* Read device log parameters. */ 1656 bzero(&cmd, sizeof (cmd)); 1657 cmd.op_to_write = htonl(V_FW_CMD_OP(FW_DEVLOG_CMD) | 1658 F_FW_CMD_REQUEST | F_FW_CMD_READ); 1659 cmd.retval_len16 = htonl(FW_LEN16(cmd)); 1660 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof (cmd), &cmd); 1661 if (rc != 0) { 1662 cxgb_printf(sc->dip, CE_WARN, 1663 "failed to get devlog parameters: %d.\n", rc); 1664 bzero(dlog, sizeof (*dlog)); 1665 rc = 0; /* devlog isn't critical for device operation */ 1666 } else { 1667 val[0] = ntohl(cmd.memtype_devlog_memaddr16_devlog); 1668 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]); 1669 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4; 1670 dlog->size = ntohl(cmd.memsize_devlog); 1671 } 1672 1673 return (rc); 1674 } 1675 1676 /* 1677 * Retrieve various parameters that are of interest to the driver. The device 1678 * has been initialized by the firmware at this point. 1679 */ 1680 static int 1681 get_params__post_init(struct adapter *sc) 1682 { 1683 int rc; 1684 uint32_t param[7], val[7]; 1685 struct fw_caps_config_cmd caps; 1686 1687 param[0] = FW_PARAM_PFVF(IQFLINT_START); 1688 param[1] = FW_PARAM_PFVF(EQ_START); 1689 param[2] = FW_PARAM_PFVF(FILTER_START); 1690 param[3] = FW_PARAM_PFVF(FILTER_END); 1691 param[4] = FW_PARAM_PFVF(L2T_START); 1692 param[5] = FW_PARAM_PFVF(L2T_END); 1693 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 1694 if (rc != 0) { 1695 cxgb_printf(sc->dip, CE_WARN, 1696 "failed to query parameters (post_init): %d.\n", rc); 1697 return (rc); 1698 } 1699 1700 /* LINTED: E_ASSIGN_NARROW_CONV */ 1701 sc->sge.iq_start = val[0]; 1702 sc->sge.eq_start = val[1]; 1703 sc->tids.ftid_base = val[2]; 1704 sc->tids.nftids = val[3] - val[2] + 1; 1705 sc->vres.l2t.start = val[4]; 1706 sc->vres.l2t.size = val[5] - val[4] + 1; 1707 1708 /* get capabilites */ 1709 bzero(&caps, sizeof (caps)); 1710 caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1711 F_FW_CMD_REQUEST | F_FW_CMD_READ); 1712 caps.cfvalid_to_len16 = htonl(FW_LEN16(caps)); 1713 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps); 1714 if (rc != 0) { 1715 cxgb_printf(sc->dip, CE_WARN, 1716 "failed to get card capabilities: %d.\n", rc); 1717 return (rc); 1718 } 1719 1720 if (caps.toecaps != 0) { 1721 /* query offload-related parameters */ 1722 param[0] = FW_PARAM_DEV(NTID); 1723 param[1] = FW_PARAM_PFVF(SERVER_START); 1724 param[2] = FW_PARAM_PFVF(SERVER_END); 1725 param[3] = FW_PARAM_PFVF(TDDP_START); 1726 param[4] = FW_PARAM_PFVF(TDDP_END); 1727 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 1728 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 1729 if (rc != 0) { 1730 cxgb_printf(sc->dip, CE_WARN, 1731 "failed to query TOE parameters: %d.\n", rc); 1732 return (rc); 1733 } 1734 sc->tids.ntids = val[0]; 1735 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 1736 sc->tids.stid_base = val[1]; 1737 sc->tids.nstids = val[2] - val[1] + 1; 1738 sc->vres.ddp.start = val[3]; 1739 sc->vres.ddp.size = val[4] - val[3] + 1; 1740 sc->params.ofldq_wr_cred = val[5]; 1741 sc->params.offload = 1; 1742 } 1743 1744 /* These are finalized by FW initialization, load their values now */ 1745 val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 1746 sc->params.tp.tre = G_TIMERRESOLUTION(val[0]); 1747 sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]); 1748 t4_read_mtu_tbl(sc, sc->params.mtus, NULL); 1749 1750 return (rc); 1751 } 1752 1753 static int 1754 set_params__post_init(struct adapter *sc) 1755 { 1756 uint32_t param, val; 1757 1758 /* ask for encapsulated CPLs */ 1759 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 1760 val = 1; 1761 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 1762 1763 return (0); 1764 } 1765 1766 /* TODO: verify */ 1767 static void 1768 setup_memwin(struct adapter *sc) 1769 { 1770 pci_regspec_t *data; 1771 int rc; 1772 uint_t n; 1773 uintptr_t bar0; 1774 uintptr_t mem_win0_base, mem_win1_base, mem_win2_base; 1775 uintptr_t mem_win2_aperture; 1776 1777 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip, 1778 DDI_PROP_DONTPASS, "assigned-addresses", (int **)&data, &n); 1779 if (rc != DDI_SUCCESS) { 1780 cxgb_printf(sc->dip, CE_WARN, 1781 "failed to lookup \"assigned-addresses\" property: %d", rc); 1782 return; 1783 } 1784 n /= sizeof (*data); 1785 1786 bar0 = ((uint64_t)data[0].pci_phys_mid << 32) | data[0].pci_phys_low; 1787 ddi_prop_free(data); 1788 1789 if (is_t4(sc->params.chip)) { 1790 mem_win0_base = bar0 + MEMWIN0_BASE; 1791 mem_win1_base = bar0 + MEMWIN1_BASE; 1792 mem_win2_base = bar0 + MEMWIN2_BASE; 1793 mem_win2_aperture = MEMWIN2_APERTURE; 1794 } else { 1795 /* For T5, only relative offset inside the PCIe BAR is passed */ 1796 mem_win0_base = MEMWIN0_BASE; 1797 mem_win1_base = MEMWIN1_BASE; 1798 mem_win2_base = MEMWIN2_BASE_T5; 1799 mem_win2_aperture = MEMWIN2_APERTURE_T5; 1800 } 1801 1802 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0), 1803 mem_win0_base | V_BIR(0) | 1804 V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); 1805 1806 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1), 1807 mem_win1_base | V_BIR(0) | 1808 V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); 1809 1810 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2), 1811 mem_win2_base | V_BIR(0) | 1812 V_WINDOW(ilog2(mem_win2_aperture) - 10)); 1813 1814 /* flush */ 1815 (void)t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 1816 } 1817 1818 /* 1819 * Positions the memory window such that it can be used to access the specified 1820 * address in the chip's address space. The return value is the offset of addr 1821 * from the start of the window. 1822 */ 1823 uint32_t 1824 position_memwin(struct adapter *sc, int n, uint32_t addr) 1825 { 1826 uint32_t start, pf; 1827 uint32_t reg; 1828 1829 if (addr & 3) { 1830 cxgb_printf(sc->dip, CE_WARN, 1831 "addr (0x%x) is not at a 4B boundary.\n", addr); 1832 return (EFAULT); 1833 } 1834 1835 if (is_t4(sc->params.chip)) { 1836 pf = 0; 1837 start = addr & ~0xf; /* start must be 16B aligned */ 1838 } else { 1839 pf = V_PFNUM(sc->pf); 1840 start = addr & ~0x7f; /* start must be 128B aligned */ 1841 } 1842 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n); 1843 1844 t4_write_reg(sc, reg, start | pf); 1845 (void) t4_read_reg(sc, reg); 1846 1847 return (addr - start); 1848 } 1849 1850 1851 /* 1852 * Reads the named property and fills up the "data" array (which has at least 1853 * "count" elements). We first try and lookup the property for our dev_t and 1854 * then retry with DDI_DEV_T_ANY if it's not found. 1855 * 1856 * Returns non-zero if the property was found and "data" has been updated. 1857 */ 1858 static int 1859 prop_lookup_int_array(struct adapter *sc, char *name, int *data, uint_t count) 1860 { 1861 dev_info_t *dip = sc->dip; 1862 dev_t dev = sc->dev; 1863 int rc, *d; 1864 uint_t i, n; 1865 1866 rc = ddi_prop_lookup_int_array(dev, dip, DDI_PROP_DONTPASS, 1867 name, &d, &n); 1868 if (rc == DDI_PROP_SUCCESS) 1869 goto found; 1870 1871 if (rc != DDI_PROP_NOT_FOUND) { 1872 cxgb_printf(dip, CE_WARN, 1873 "failed to lookup property %s for minor %d: %d.", 1874 name, getminor(dev), rc); 1875 return (0); 1876 } 1877 1878 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1879 name, &d, &n); 1880 if (rc == DDI_PROP_SUCCESS) 1881 goto found; 1882 1883 if (rc != DDI_PROP_NOT_FOUND) { 1884 cxgb_printf(dip, CE_WARN, 1885 "failed to lookup property %s: %d.", name, rc); 1886 return (0); 1887 } 1888 1889 return (0); 1890 1891 found: 1892 if (n > count) { 1893 cxgb_printf(dip, CE_NOTE, 1894 "property %s has too many elements (%d), ignoring extras", 1895 name, n); 1896 } 1897 1898 for (i = 0; i < n && i < count; i++) 1899 data[i] = d[i]; 1900 ddi_prop_free(d); 1901 1902 return (1); 1903 } 1904 1905 static int 1906 prop_lookup_int(struct adapter *sc, char *name, int defval) 1907 { 1908 int rc; 1909 1910 rc = ddi_prop_get_int(sc->dev, sc->dip, DDI_PROP_DONTPASS, name, -1); 1911 if (rc != -1) 1912 return (rc); 1913 1914 return (ddi_prop_get_int(DDI_DEV_T_ANY, sc->dip, DDI_PROP_DONTPASS, 1915 name, defval)); 1916 } 1917 1918 static int 1919 init_driver_props(struct adapter *sc, struct driver_properties *p) 1920 { 1921 dev_t dev = sc->dev; 1922 dev_info_t *dip = sc->dip; 1923 int i, *data; 1924 uint_t tmr[SGE_NTIMERS] = {5, 10, 20, 50, 100, 200}; 1925 uint_t cnt[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ 1926 1927 /* 1928 * Holdoff timer 1929 */ 1930 data = &p->timer_val[0]; 1931 for (i = 0; i < SGE_NTIMERS; i++) 1932 data[i] = tmr[i]; 1933 (void) prop_lookup_int_array(sc, "holdoff-timer-values", data, 1934 SGE_NTIMERS); 1935 for (i = 0; i < SGE_NTIMERS; i++) { 1936 int limit = 200U; 1937 if (data[i] > limit) { 1938 cxgb_printf(dip, CE_WARN, 1939 "holdoff timer %d is too high (%d), lowered to %d.", 1940 i, data[i], limit); 1941 data[i] = limit; 1942 } 1943 } 1944 (void) ddi_prop_update_int_array(dev, dip, "holdoff-timer-values", 1945 data, SGE_NTIMERS); 1946 1947 /* 1948 * Holdoff packet counter 1949 */ 1950 data = &p->counter_val[0]; 1951 for (i = 0; i < SGE_NCOUNTERS; i++) 1952 data[i] = cnt[i]; 1953 (void) prop_lookup_int_array(sc, "holdoff-pkt-counter-values", data, 1954 SGE_NCOUNTERS); 1955 for (i = 0; i < SGE_NCOUNTERS; i++) { 1956 int limit = M_THRESHOLD_0; 1957 if (data[i] > limit) { 1958 cxgb_printf(dip, CE_WARN, 1959 "holdoff pkt-counter %d is too high (%d), " 1960 "lowered to %d.", i, data[i], limit); 1961 data[i] = limit; 1962 } 1963 } 1964 (void) ddi_prop_update_int_array(dev, dip, "holdoff-pkt-counter-values", 1965 data, SGE_NCOUNTERS); 1966 1967 /* 1968 * Maximum # of tx and rx queues to use for each 1969 * 100G, 40G, 25G, 10G and 1G port. 1970 */ 1971 p->max_ntxq_10g = prop_lookup_int(sc, "max-ntxq-10G-port", 8); 1972 (void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port", 1973 p->max_ntxq_10g); 1974 1975 p->max_nrxq_10g = prop_lookup_int(sc, "max-nrxq-10G-port", 8); 1976 (void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port", 1977 p->max_nrxq_10g); 1978 1979 p->max_ntxq_1g = prop_lookup_int(sc, "max-ntxq-1G-port", 2); 1980 (void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port", 1981 p->max_ntxq_1g); 1982 1983 p->max_nrxq_1g = prop_lookup_int(sc, "max-nrxq-1G-port", 2); 1984 (void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port", 1985 p->max_nrxq_1g); 1986 1987 #ifdef TCP_OFFLOAD_ENABLE 1988 p->max_nofldtxq_10g = prop_lookup_int(sc, "max-nofldtxq-10G-port", 8); 1989 (void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port", 1990 p->max_nofldtxq_10g); 1991 1992 p->max_nofldrxq_10g = prop_lookup_int(sc, "max-nofldrxq-10G-port", 2); 1993 (void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port", 1994 p->max_nofldrxq_10g); 1995 1996 p->max_nofldtxq_1g = prop_lookup_int(sc, "max-nofldtxq-1G-port", 2); 1997 (void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port", 1998 p->max_nofldtxq_1g); 1999 2000 p->max_nofldrxq_1g = prop_lookup_int(sc, "max-nofldrxq-1G-port", 1); 2001 (void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port", 2002 p->max_nofldrxq_1g); 2003 #endif 2004 2005 /* 2006 * Holdoff parameters for 10G and 1G ports. 2007 */ 2008 p->tmr_idx_10g = prop_lookup_int(sc, "holdoff-timer-idx-10G", 0); 2009 (void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-10G", 2010 p->tmr_idx_10g); 2011 2012 p->pktc_idx_10g = prop_lookup_int(sc, "holdoff-pktc-idx-10G", 2); 2013 (void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-10G", 2014 p->pktc_idx_10g); 2015 2016 p->tmr_idx_1g = prop_lookup_int(sc, "holdoff-timer-idx-1G", 0); 2017 (void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-1G", 2018 p->tmr_idx_1g); 2019 2020 p->pktc_idx_1g = prop_lookup_int(sc, "holdoff-pktc-idx-1G", 2); 2021 (void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-1G", 2022 p->pktc_idx_1g); 2023 2024 /* 2025 * Size (number of entries) of each tx and rx queue. 2026 */ 2027 i = prop_lookup_int(sc, "qsize-txq", TX_EQ_QSIZE); 2028 p->qsize_txq = max(i, 128); 2029 if (p->qsize_txq != i) { 2030 cxgb_printf(dip, CE_WARN, 2031 "using %d instead of %d as the tx queue size", 2032 p->qsize_txq, i); 2033 } 2034 (void) ddi_prop_update_int(dev, dip, "qsize-txq", p->qsize_txq); 2035 2036 i = prop_lookup_int(sc, "qsize-rxq", RX_IQ_QSIZE); 2037 p->qsize_rxq = max(i, 128); 2038 while (p->qsize_rxq & 7) 2039 p->qsize_rxq--; 2040 if (p->qsize_rxq != i) { 2041 cxgb_printf(dip, CE_WARN, 2042 "using %d instead of %d as the rx queue size", 2043 p->qsize_rxq, i); 2044 } 2045 (void) ddi_prop_update_int(dev, dip, "qsize-rxq", p->qsize_rxq); 2046 2047 /* 2048 * Interrupt types allowed. 2049 * Bits 0, 1, 2 = INTx, MSI, MSI-X respectively. See sys/ddi_intr.h 2050 */ 2051 p->intr_types = prop_lookup_int(sc, "interrupt-types", 2052 DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_FIXED); 2053 (void) ddi_prop_update_int(dev, dip, "interrupt-types", p->intr_types); 2054 2055 /* 2056 * Forwarded interrupt queues. Create this property to force the driver 2057 * to use forwarded interrupt queues. 2058 */ 2059 if (ddi_prop_exists(dev, dip, DDI_PROP_DONTPASS, 2060 "interrupt-forwarding") != 0 || 2061 ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 2062 "interrupt-forwarding") != 0) { 2063 UNIMPLEMENTED(); 2064 (void) ddi_prop_create(dev, dip, DDI_PROP_CANSLEEP, 2065 "interrupt-forwarding", NULL, 0); 2066 } 2067 2068 /* 2069 * Write combining 2070 * 0 to disable, 1 to enable 2071 */ 2072 p->wc = prop_lookup_int(sc, "write-combine", 1); 2073 cxgb_printf(dip, CE_WARN, "write-combine: using of %d", p->wc); 2074 if (p->wc != 0 && p->wc != 1) { 2075 cxgb_printf(dip, CE_WARN, 2076 "write-combine: using 1 instead of %d", p->wc); 2077 p->wc = 1; 2078 } 2079 (void) ddi_prop_update_int(dev, dip, "write-combine", p->wc); 2080 2081 p->t4_fw_install = prop_lookup_int(sc, "t4_fw_install", 1); 2082 if (p->t4_fw_install != 0 && p->t4_fw_install != 2) 2083 p->t4_fw_install = 1; 2084 (void) ddi_prop_update_int(dev, dip, "t4_fw_install", p->t4_fw_install); 2085 2086 /* Multiple Rings */ 2087 p->multi_rings = prop_lookup_int(sc, "multi-rings", 1); 2088 if (p->multi_rings != 0 && p->multi_rings != 1) { 2089 cxgb_printf(dip, CE_NOTE, 2090 "multi-rings: using value 1 instead of %d", p->multi_rings); 2091 p->multi_rings = 1; 2092 } 2093 2094 (void) ddi_prop_update_int(dev, dip, "multi-rings", p->multi_rings); 2095 2096 return (0); 2097 } 2098 2099 static int 2100 remove_extra_props(struct adapter *sc, int n10g, int n1g) 2101 { 2102 if (n10g == 0) { 2103 (void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-10G-port"); 2104 (void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-10G-port"); 2105 (void) ddi_prop_remove(sc->dev, sc->dip, 2106 "holdoff-timer-idx-10G"); 2107 (void) ddi_prop_remove(sc->dev, sc->dip, 2108 "holdoff-pktc-idx-10G"); 2109 } 2110 2111 if (n1g == 0) { 2112 (void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-1G-port"); 2113 (void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-1G-port"); 2114 (void) ddi_prop_remove(sc->dev, sc->dip, 2115 "holdoff-timer-idx-1G"); 2116 (void) ddi_prop_remove(sc->dev, sc->dip, "holdoff-pktc-idx-1G"); 2117 } 2118 2119 return (0); 2120 } 2121 2122 static int 2123 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, 2124 struct intrs_and_queues *iaq) 2125 { 2126 struct driver_properties *p = &sc->props; 2127 int rc, itype, itypes, navail, nc, nrxq10g, nrxq1g, n; 2128 int nofldrxq10g = 0, nofldrxq1g = 0; 2129 2130 bzero(iaq, sizeof (*iaq)); 2131 nc = ncpus; /* our snapshot of the number of CPUs */ 2132 iaq->ntxq10g = min(nc, p->max_ntxq_10g); 2133 iaq->ntxq1g = min(nc, p->max_ntxq_1g); 2134 iaq->nrxq10g = nrxq10g = min(nc, p->max_nrxq_10g); 2135 iaq->nrxq1g = nrxq1g = min(nc, p->max_nrxq_1g); 2136 #ifdef TCP_OFFLOAD_ENABLE 2137 iaq->nofldtxq10g = min(nc, p->max_nofldtxq_10g); 2138 iaq->nofldtxq1g = min(nc, p->max_nofldtxq_1g); 2139 iaq->nofldrxq10g = nofldrxq10g = min(nc, p->max_nofldrxq_10g); 2140 iaq->nofldrxq1g = nofldrxq1g = min(nc, p->max_nofldrxq_1g); 2141 #endif 2142 2143 rc = ddi_intr_get_supported_types(sc->dip, &itypes); 2144 if (rc != DDI_SUCCESS) { 2145 cxgb_printf(sc->dip, CE_WARN, 2146 "failed to determine supported interrupt types: %d", rc); 2147 return (rc); 2148 } 2149 2150 for (itype = DDI_INTR_TYPE_MSIX; itype; itype >>= 1) { 2151 ASSERT(itype == DDI_INTR_TYPE_MSIX || 2152 itype == DDI_INTR_TYPE_MSI || 2153 itype == DDI_INTR_TYPE_FIXED); 2154 2155 if ((itype & itypes & p->intr_types) == 0) 2156 continue; /* not supported or not allowed */ 2157 2158 navail = 0; 2159 rc = ddi_intr_get_navail(sc->dip, itype, &navail); 2160 if (rc != DDI_SUCCESS || navail == 0) { 2161 cxgb_printf(sc->dip, CE_WARN, 2162 "failed to get # of interrupts for type %d: %d", 2163 itype, rc); 2164 continue; /* carry on */ 2165 } 2166 2167 iaq->intr_type = itype; 2168 if (navail == 0) 2169 continue; 2170 2171 /* 2172 * Best option: an interrupt vector for errors, one for the 2173 * firmware event queue, and one each for each rxq (NIC as well 2174 * as offload). 2175 */ 2176 iaq->nirq = T4_EXTRA_INTR; 2177 iaq->nirq += n10g * (nrxq10g + nofldrxq10g); 2178 iaq->nirq += n1g * (nrxq1g + nofldrxq1g); 2179 2180 if (iaq->nirq <= navail && 2181 (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) { 2182 iaq->intr_fwd = 0; 2183 goto allocate; 2184 } 2185 2186 /* 2187 * Second best option: an interrupt vector for errors, one for 2188 * the firmware event queue, and one each for either NIC or 2189 * offload rxq's. 2190 */ 2191 iaq->nirq = T4_EXTRA_INTR; 2192 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g); 2193 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g); 2194 if (iaq->nirq <= navail && 2195 (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) { 2196 iaq->intr_fwd = 1; 2197 goto allocate; 2198 } 2199 2200 /* 2201 * Next best option: an interrupt vector for errors, one for the 2202 * firmware event queue, and at least one per port. At this 2203 * point we know we'll have to downsize nrxq or nofldrxq to fit 2204 * what's available to us. 2205 */ 2206 iaq->nirq = T4_EXTRA_INTR; 2207 iaq->nirq += n10g + n1g; 2208 if (iaq->nirq <= navail) { 2209 int leftover = navail - iaq->nirq; 2210 2211 if (n10g > 0) { 2212 int target = max(nrxq10g, nofldrxq10g); 2213 2214 n = 1; 2215 while (n < target && leftover >= n10g) { 2216 leftover -= n10g; 2217 iaq->nirq += n10g; 2218 n++; 2219 } 2220 iaq->nrxq10g = min(n, nrxq10g); 2221 #ifdef TCP_OFFLOAD_ENABLE 2222 iaq->nofldrxq10g = min(n, nofldrxq10g); 2223 #endif 2224 } 2225 2226 if (n1g > 0) { 2227 int target = max(nrxq1g, nofldrxq1g); 2228 2229 n = 1; 2230 while (n < target && leftover >= n1g) { 2231 leftover -= n1g; 2232 iaq->nirq += n1g; 2233 n++; 2234 } 2235 iaq->nrxq1g = min(n, nrxq1g); 2236 #ifdef TCP_OFFLOAD_ENABLE 2237 iaq->nofldrxq1g = min(n, nofldrxq1g); 2238 #endif 2239 } 2240 2241 /* We have arrived at a minimum value required to enable 2242 * per queue irq(either NIC or offload). Thus for non- 2243 * offload case, we will get a vector per queue, while 2244 * offload case, we will get a vector per offload/NIC q. 2245 * Hence enable Interrupt forwarding only for offload 2246 * case. 2247 */ 2248 #ifdef TCP_OFFLOAD_ENABLE 2249 if (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq)) { 2250 iaq->intr_fwd = 1; 2251 #else 2252 if (itype != DDI_INTR_TYPE_MSI) { 2253 #endif 2254 goto allocate; 2255 } 2256 } 2257 2258 /* 2259 * Least desirable option: one interrupt vector for everything. 2260 */ 2261 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1; 2262 #ifdef TCP_OFFLOAD_ENABLE 2263 iaq->nofldrxq10g = iaq->nofldrxq1g = 1; 2264 #endif 2265 iaq->intr_fwd = 1; 2266 2267 allocate: 2268 return (0); 2269 } 2270 2271 cxgb_printf(sc->dip, CE_WARN, 2272 "failed to find a usable interrupt type. supported=%d, allowed=%d", 2273 itypes, p->intr_types); 2274 return (DDI_FAILURE); 2275 } 2276 2277 static int 2278 add_child_node(struct adapter *sc, int idx) 2279 { 2280 int rc; 2281 struct port_info *pi; 2282 2283 if (idx < 0 || idx >= sc->params.nports) 2284 return (EINVAL); 2285 2286 pi = sc->port[idx]; 2287 if (pi == NULL) 2288 return (ENODEV); /* t4_port_init failed earlier */ 2289 2290 PORT_LOCK(pi); 2291 if (pi->dip != NULL) { 2292 rc = 0; /* EEXIST really, but then bus_config fails */ 2293 goto done; 2294 } 2295 2296 rc = ndi_devi_alloc(sc->dip, T4_PORT_NAME, DEVI_SID_NODEID, &pi->dip); 2297 if (rc != DDI_SUCCESS || pi->dip == NULL) { 2298 rc = ENOMEM; 2299 goto done; 2300 } 2301 2302 (void) ddi_set_parent_data(pi->dip, pi); 2303 (void) ndi_devi_bind_driver(pi->dip, 0); 2304 rc = 0; 2305 done: 2306 PORT_UNLOCK(pi); 2307 return (rc); 2308 } 2309 2310 static int 2311 remove_child_node(struct adapter *sc, int idx) 2312 { 2313 int rc; 2314 struct port_info *pi; 2315 2316 if (idx < 0 || idx >= sc->params.nports) 2317 return (EINVAL); 2318 2319 pi = sc->port[idx]; 2320 if (pi == NULL) 2321 return (ENODEV); 2322 2323 PORT_LOCK(pi); 2324 if (pi->dip == NULL) { 2325 rc = ENODEV; 2326 goto done; 2327 } 2328 2329 rc = ndi_devi_free(pi->dip); 2330 if (rc == 0) 2331 pi->dip = NULL; 2332 done: 2333 PORT_UNLOCK(pi); 2334 return (rc); 2335 } 2336 2337 #define KS_UINIT(x) kstat_named_init(&kstatp->x, #x, KSTAT_DATA_ULONG) 2338 #define KS_CINIT(x) kstat_named_init(&kstatp->x, #x, KSTAT_DATA_CHAR) 2339 #define KS_U_SET(x, y) kstatp->x.value.ul = (y) 2340 #define KS_C_SET(x, ...) \ 2341 (void) snprintf(kstatp->x.value.c, 16, __VA_ARGS__) 2342 2343 /* 2344 * t4nex:X:config 2345 */ 2346 struct t4_kstats { 2347 kstat_named_t chip_ver; 2348 kstat_named_t fw_vers; 2349 kstat_named_t tp_vers; 2350 kstat_named_t driver_version; 2351 kstat_named_t serial_number; 2352 kstat_named_t ec_level; 2353 kstat_named_t id; 2354 kstat_named_t bus_type; 2355 kstat_named_t bus_width; 2356 kstat_named_t bus_speed; 2357 kstat_named_t core_clock; 2358 kstat_named_t port_cnt; 2359 kstat_named_t port_type; 2360 kstat_named_t pci_vendor_id; 2361 kstat_named_t pci_device_id; 2362 }; 2363 static kstat_t * 2364 setup_kstats(struct adapter *sc) 2365 { 2366 kstat_t *ksp; 2367 struct t4_kstats *kstatp; 2368 int ndata; 2369 struct pci_params *p = &sc->params.pci; 2370 struct vpd_params *v = &sc->params.vpd; 2371 uint16_t pci_vendor, pci_device; 2372 2373 ndata = sizeof (struct t4_kstats) / sizeof (kstat_named_t); 2374 2375 ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "config", 2376 "nexus", KSTAT_TYPE_NAMED, ndata, 0); 2377 if (ksp == NULL) { 2378 cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats."); 2379 return (NULL); 2380 } 2381 2382 kstatp = (struct t4_kstats *)ksp->ks_data; 2383 2384 KS_UINIT(chip_ver); 2385 KS_CINIT(fw_vers); 2386 KS_CINIT(tp_vers); 2387 KS_CINIT(driver_version); 2388 KS_CINIT(serial_number); 2389 KS_CINIT(ec_level); 2390 KS_CINIT(id); 2391 KS_CINIT(bus_type); 2392 KS_CINIT(bus_width); 2393 KS_CINIT(bus_speed); 2394 KS_UINIT(core_clock); 2395 KS_UINIT(port_cnt); 2396 KS_CINIT(port_type); 2397 KS_CINIT(pci_vendor_id); 2398 KS_CINIT(pci_device_id); 2399 2400 KS_U_SET(chip_ver, sc->params.chip); 2401 KS_C_SET(fw_vers, "%d.%d.%d.%d", 2402 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 2403 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 2404 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 2405 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 2406 KS_C_SET(tp_vers, "%d.%d.%d.%d", 2407 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), 2408 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), 2409 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), 2410 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); 2411 KS_C_SET(driver_version, DRV_VERSION); 2412 KS_C_SET(serial_number, "%s", v->sn); 2413 KS_C_SET(ec_level, "%s", v->ec); 2414 KS_C_SET(id, "%s", v->id); 2415 KS_C_SET(bus_type, "pci-express"); 2416 KS_C_SET(bus_width, "x%d lanes", p->width); 2417 KS_C_SET(bus_speed, "%d", p->speed); 2418 KS_U_SET(core_clock, v->cclk); 2419 KS_U_SET(port_cnt, sc->params.nports); 2420 2421 t4_os_pci_read_cfg2(sc, PCI_CONF_VENID, &pci_vendor); 2422 KS_C_SET(pci_vendor_id, "0x%x", pci_vendor); 2423 2424 t4_os_pci_read_cfg2(sc, PCI_CONF_DEVID, &pci_device); 2425 KS_C_SET(pci_device_id, "0x%x", pci_device); 2426 2427 KS_C_SET(port_type, "%s/%s/%s/%s", 2428 print_port_speed(sc->port[0]), 2429 print_port_speed(sc->port[1]), 2430 print_port_speed(sc->port[2]), 2431 print_port_speed(sc->port[3])); 2432 2433 /* Do NOT set ksp->ks_update. These kstats do not change. */ 2434 2435 /* Install the kstat */ 2436 ksp->ks_private = (void *)sc; 2437 kstat_install(ksp); 2438 2439 return (ksp); 2440 } 2441 2442 /* 2443 * t4nex:X:stat 2444 */ 2445 struct t4_wc_kstats { 2446 kstat_named_t write_coal_success; 2447 kstat_named_t write_coal_failure; 2448 }; 2449 static kstat_t * 2450 setup_wc_kstats(struct adapter *sc) 2451 { 2452 kstat_t *ksp; 2453 struct t4_wc_kstats *kstatp; 2454 int ndata; 2455 2456 ndata = sizeof(struct t4_wc_kstats) / sizeof(kstat_named_t); 2457 ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "stats", 2458 "nexus", KSTAT_TYPE_NAMED, ndata, 0); 2459 if (ksp == NULL) { 2460 cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats."); 2461 return (NULL); 2462 } 2463 2464 kstatp = (struct t4_wc_kstats *)ksp->ks_data; 2465 2466 KS_UINIT(write_coal_success); 2467 KS_UINIT(write_coal_failure); 2468 2469 ksp->ks_update = update_wc_kstats; 2470 /* Install the kstat */ 2471 ksp->ks_private = (void *)sc; 2472 kstat_install(ksp); 2473 2474 return (ksp); 2475 } 2476 2477 static int 2478 update_wc_kstats(kstat_t *ksp, int rw) 2479 { 2480 struct t4_wc_kstats *kstatp = (struct t4_wc_kstats *)ksp->ks_data; 2481 struct adapter *sc = ksp->ks_private; 2482 uint32_t wc_total, wc_success, wc_failure; 2483 2484 if (rw == KSTAT_WRITE) 2485 return (0); 2486 2487 if (is_t5(sc->params.chip)) { 2488 wc_total = t4_read_reg(sc, A_SGE_STAT_TOTAL); 2489 wc_failure = t4_read_reg(sc, A_SGE_STAT_MATCH); 2490 wc_success = wc_total - wc_failure; 2491 } else { 2492 wc_success = 0; 2493 wc_failure = 0; 2494 } 2495 2496 KS_U_SET(write_coal_success, wc_success); 2497 KS_U_SET(write_coal_failure, wc_failure); 2498 2499 return (0); 2500 } 2501 2502 int 2503 adapter_full_init(struct adapter *sc) 2504 { 2505 int i, rc = 0; 2506 2507 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 2508 2509 rc = t4_setup_adapter_queues(sc); 2510 if (rc != 0) 2511 goto done; 2512 2513 if (sc->intr_cap & DDI_INTR_FLAG_BLOCK) 2514 (void) ddi_intr_block_enable(sc->intr_handle, sc->intr_count); 2515 else { 2516 for (i = 0; i < sc->intr_count; i++) 2517 (void) ddi_intr_enable(sc->intr_handle[i]); 2518 } 2519 t4_intr_enable(sc); 2520 sc->flags |= FULL_INIT_DONE; 2521 2522 #ifdef TCP_OFFLOAD_ENABLE 2523 /* TODO: wrong place to enable TOE capability */ 2524 if (is_offload(sc) != 0) { 2525 for_each_port(sc, i) { 2526 struct port_info *pi = sc->port[i]; 2527 rc = toe_capability(pi, 1); 2528 if (rc != 0) { 2529 cxgb_printf(pi->dip, CE_WARN, 2530 "Failed to activate toe capability: %d", 2531 rc); 2532 rc = 0; /* not a fatal error */ 2533 } 2534 } 2535 } 2536 #endif 2537 2538 done: 2539 if (rc != 0) 2540 (void) adapter_full_uninit(sc); 2541 2542 return (rc); 2543 } 2544 2545 int 2546 adapter_full_uninit(struct adapter *sc) 2547 { 2548 int i, rc = 0; 2549 2550 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 2551 2552 if (sc->intr_cap & DDI_INTR_FLAG_BLOCK) 2553 (void) ddi_intr_block_disable(sc->intr_handle, sc->intr_count); 2554 else { 2555 for (i = 0; i < sc->intr_count; i++) 2556 (void) ddi_intr_disable(sc->intr_handle[i]); 2557 } 2558 2559 rc = t4_teardown_adapter_queues(sc); 2560 if (rc != 0) 2561 return (rc); 2562 2563 sc->flags &= ~FULL_INIT_DONE; 2564 2565 return (0); 2566 } 2567 2568 int 2569 port_full_init(struct port_info *pi) 2570 { 2571 struct adapter *sc = pi->adapter; 2572 uint16_t *rss; 2573 struct sge_rxq *rxq; 2574 int rc, i; 2575 2576 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 2577 ASSERT((pi->flags & PORT_INIT_DONE) == 0); 2578 2579 /* 2580 * Allocate tx/rx/fl queues for this port. 2581 */ 2582 rc = t4_setup_port_queues(pi); 2583 if (rc != 0) 2584 goto done; /* error message displayed already */ 2585 2586 /* 2587 * Setup RSS for this port. 2588 */ 2589 rss = kmem_zalloc(pi->nrxq * sizeof (*rss), KM_SLEEP); 2590 for_each_rxq(pi, i, rxq) { 2591 rss[i] = rxq->iq.abs_id; 2592 } 2593 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, 2594 pi->rss_size, rss, pi->nrxq); 2595 kmem_free(rss, pi->nrxq * sizeof (*rss)); 2596 if (rc != 0) { 2597 cxgb_printf(pi->dip, CE_WARN, "rss_config failed: %d", rc); 2598 goto done; 2599 } 2600 2601 pi->flags |= PORT_INIT_DONE; 2602 done: 2603 if (rc != 0) 2604 (void) port_full_uninit(pi); 2605 2606 return (rc); 2607 } 2608 2609 /* 2610 * Idempotent. 2611 */ 2612 int 2613 port_full_uninit(struct port_info *pi) 2614 { 2615 2616 ASSERT(pi->flags & PORT_INIT_DONE); 2617 2618 (void) t4_teardown_port_queues(pi); 2619 pi->flags &= ~PORT_INIT_DONE; 2620 2621 return (0); 2622 } 2623 2624 void 2625 enable_port_queues(struct port_info *pi) 2626 { 2627 struct adapter *sc = pi->adapter; 2628 int i; 2629 struct sge_iq *iq; 2630 struct sge_rxq *rxq; 2631 #ifdef TCP_OFFLOAD_ENABLE 2632 struct sge_ofld_rxq *ofld_rxq; 2633 #endif 2634 2635 ASSERT(pi->flags & PORT_INIT_DONE); 2636 2637 /* 2638 * TODO: whatever was queued up after we set iq->state to IQS_DISABLED 2639 * back in disable_port_queues will be processed now, after an unbounded 2640 * delay. This can't be good. 2641 */ 2642 2643 #ifdef TCP_OFFLOAD_ENABLE 2644 for_each_ofld_rxq(pi, i, ofld_rxq) { 2645 iq = &ofld_rxq->iq; 2646 if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) != 2647 IQS_DISABLED) 2648 panic("%s: iq %p wasn't disabled", __func__, 2649 (void *)iq); 2650 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 2651 V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id)); 2652 } 2653 #endif 2654 2655 for_each_rxq(pi, i, rxq) { 2656 iq = &rxq->iq; 2657 if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) != 2658 IQS_DISABLED) 2659 panic("%s: iq %p wasn't disabled", __func__, 2660 (void *) iq); 2661 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 2662 V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id)); 2663 } 2664 } 2665 2666 void 2667 disable_port_queues(struct port_info *pi) 2668 { 2669 int i; 2670 struct adapter *sc = pi->adapter; 2671 struct sge_rxq *rxq; 2672 #ifdef TCP_OFFLOAD_ENABLE 2673 struct sge_ofld_rxq *ofld_rxq; 2674 #endif 2675 2676 ASSERT(pi->flags & PORT_INIT_DONE); 2677 2678 /* 2679 * TODO: need proper implementation for all tx queues (ctrl, eth, ofld). 2680 */ 2681 2682 #ifdef TCP_OFFLOAD_ENABLE 2683 for_each_ofld_rxq(pi, i, ofld_rxq) { 2684 while (atomic_cas_uint(&ofld_rxq->iq.state, IQS_IDLE, 2685 IQS_DISABLED) != IQS_IDLE) 2686 msleep(1); 2687 } 2688 #endif 2689 2690 for_each_rxq(pi, i, rxq) { 2691 while (atomic_cas_uint(&rxq->iq.state, IQS_IDLE, 2692 IQS_DISABLED) != IQS_IDLE) 2693 msleep(1); 2694 } 2695 2696 mutex_enter(&sc->sfl_lock); 2697 #ifdef TCP_OFFLOAD_ENABLE 2698 for_each_ofld_rxq(pi, i, ofld_rxq) 2699 ofld_rxq->fl.flags |= FL_DOOMED; 2700 #endif 2701 for_each_rxq(pi, i, rxq) 2702 rxq->fl.flags |= FL_DOOMED; 2703 mutex_exit(&sc->sfl_lock); 2704 /* TODO: need to wait for all fl's to be removed from sc->sfl */ 2705 } 2706 2707 void 2708 t4_fatal_err(struct adapter *sc) 2709 { 2710 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 2711 t4_intr_disable(sc); 2712 cxgb_printf(sc->dip, CE_WARN, 2713 "encountered fatal error, adapter stopped."); 2714 } 2715 2716 int 2717 t4_os_find_pci_capability(struct adapter *sc, int cap) 2718 { 2719 uint16_t stat; 2720 uint8_t cap_ptr, cap_id; 2721 2722 t4_os_pci_read_cfg2(sc, PCI_CONF_STAT, &stat); 2723 if ((stat & PCI_STAT_CAP) == 0) 2724 return (0); /* does not implement capabilities */ 2725 2726 t4_os_pci_read_cfg1(sc, PCI_CONF_CAP_PTR, &cap_ptr); 2727 while (cap_ptr) { 2728 t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_ID, &cap_id); 2729 if (cap_id == cap) 2730 return (cap_ptr); /* found */ 2731 t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_NEXT_PTR, &cap_ptr); 2732 } 2733 2734 return (0); /* not found */ 2735 } 2736 2737 void 2738 t4_os_portmod_changed(const struct adapter *sc, int idx) 2739 { 2740 static const char *mod_str[] = { 2741 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 2742 }; 2743 const struct port_info *pi = sc->port[idx]; 2744 2745 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 2746 cxgb_printf(pi->dip, CE_NOTE, "transceiver unplugged."); 2747 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 2748 cxgb_printf(pi->dip, CE_NOTE, 2749 "unknown transceiver inserted.\n"); 2750 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 2751 cxgb_printf(pi->dip, CE_NOTE, 2752 "unsupported transceiver inserted.\n"); 2753 else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str)) 2754 cxgb_printf(pi->dip, CE_NOTE, "%s transceiver inserted.\n", 2755 mod_str[pi->mod_type]); 2756 else 2757 cxgb_printf(pi->dip, CE_NOTE, "transceiver (type %d) inserted.", 2758 pi->mod_type); 2759 } 2760 2761 /* ARGSUSED */ 2762 static int 2763 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, mblk_t *m) 2764 { 2765 if (m != NULL) 2766 freemsg(m); 2767 return (0); 2768 } 2769 2770 int 2771 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h) 2772 { 2773 uint_t *loc, new; 2774 2775 if (opcode >= ARRAY_SIZE(sc->cpl_handler)) 2776 return (EINVAL); 2777 2778 new = (uint_t)(unsigned long) (h ? h : cpl_not_handled); 2779 loc = (uint_t *)&sc->cpl_handler[opcode]; 2780 (void) atomic_swap_uint(loc, new); 2781 2782 return (0); 2783 } 2784 2785 static int 2786 fw_msg_not_handled(struct adapter *sc, const __be64 *data) 2787 { 2788 struct cpl_fw6_msg *cpl; 2789 2790 cpl = __containerof((void *)data, struct cpl_fw6_msg, data); 2791 2792 cxgb_printf(sc->dip, CE_WARN, "%s fw_msg type %d", __func__, cpl->type); 2793 return (0); 2794 } 2795 2796 int 2797 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h) 2798 { 2799 fw_msg_handler_t *loc, new; 2800 2801 if (type >= ARRAY_SIZE(sc->fw_msg_handler)) 2802 return (EINVAL); 2803 2804 /* 2805 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL 2806 * handler dispatch table. Reject any attempt to install a handler for 2807 * this subtype. 2808 */ 2809 if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL) 2810 return (EINVAL); 2811 2812 new = h ? h : fw_msg_not_handled; 2813 loc = &sc->fw_msg_handler[type]; 2814 (void)atomic_swap_ptr(loc, (void *)new); 2815 2816 return (0); 2817 } 2818 2819 #ifdef TCP_OFFLOAD_ENABLE 2820 static int 2821 toe_capability(struct port_info *pi, int enable) 2822 { 2823 int rc; 2824 struct adapter *sc = pi->adapter; 2825 2826 if (!is_offload(sc)) 2827 return (ENODEV); 2828 2829 if (enable != 0) { 2830 if (isset(&sc->offload_map, pi->port_id) != 0) 2831 return (0); 2832 2833 if (sc->offload_map == 0) { 2834 rc = activate_uld(sc, ULD_TOM, &sc->tom); 2835 if (rc != 0) 2836 return (rc); 2837 } 2838 2839 setbit(&sc->offload_map, pi->port_id); 2840 } else { 2841 if (!isset(&sc->offload_map, pi->port_id)) 2842 return (0); 2843 2844 clrbit(&sc->offload_map, pi->port_id); 2845 2846 if (sc->offload_map == 0) { 2847 rc = deactivate_uld(&sc->tom); 2848 if (rc != 0) { 2849 setbit(&sc->offload_map, pi->port_id); 2850 return (rc); 2851 } 2852 } 2853 } 2854 2855 return (0); 2856 } 2857 2858 /* 2859 * Add an upper layer driver to the global list. 2860 */ 2861 int 2862 t4_register_uld(struct uld_info *ui) 2863 { 2864 int rc = 0; 2865 struct uld_info *u; 2866 2867 mutex_enter(&t4_uld_list_lock); 2868 SLIST_FOREACH(u, &t4_uld_list, link) { 2869 if (u->uld_id == ui->uld_id) { 2870 rc = EEXIST; 2871 goto done; 2872 } 2873 } 2874 2875 SLIST_INSERT_HEAD(&t4_uld_list, ui, link); 2876 ui->refcount = 0; 2877 done: 2878 mutex_exit(&t4_uld_list_lock); 2879 return (rc); 2880 } 2881 2882 int 2883 t4_unregister_uld(struct uld_info *ui) 2884 { 2885 int rc = EINVAL; 2886 struct uld_info *u; 2887 2888 mutex_enter(&t4_uld_list_lock); 2889 2890 SLIST_FOREACH(u, &t4_uld_list, link) { 2891 if (u == ui) { 2892 if (ui->refcount > 0) { 2893 rc = EBUSY; 2894 goto done; 2895 } 2896 2897 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); 2898 rc = 0; 2899 goto done; 2900 } 2901 } 2902 done: 2903 mutex_exit(&t4_uld_list_lock); 2904 return (rc); 2905 } 2906 2907 static int 2908 activate_uld(struct adapter *sc, int id, struct uld_softc *usc) 2909 { 2910 int rc = EAGAIN; 2911 struct uld_info *ui; 2912 2913 mutex_enter(&t4_uld_list_lock); 2914 2915 SLIST_FOREACH(ui, &t4_uld_list, link) { 2916 if (ui->uld_id == id) { 2917 rc = ui->attach(sc, &usc->softc); 2918 if (rc == 0) { 2919 ASSERT(usc->softc != NULL); 2920 ui->refcount++; 2921 usc->uld = ui; 2922 } 2923 goto done; 2924 } 2925 } 2926 done: 2927 mutex_exit(&t4_uld_list_lock); 2928 2929 return (rc); 2930 } 2931 2932 static int 2933 deactivate_uld(struct uld_softc *usc) 2934 { 2935 int rc; 2936 2937 mutex_enter(&t4_uld_list_lock); 2938 2939 if (usc->uld == NULL || usc->softc == NULL) { 2940 rc = EINVAL; 2941 goto done; 2942 } 2943 2944 rc = usc->uld->detach(usc->softc); 2945 if (rc == 0) { 2946 ASSERT(usc->uld->refcount > 0); 2947 usc->uld->refcount--; 2948 usc->uld = NULL; 2949 usc->softc = NULL; 2950 } 2951 done: 2952 mutex_exit(&t4_uld_list_lock); 2953 2954 return (rc); 2955 } 2956 2957 void 2958 t4_iterate(void (*func)(int, void *), void *arg) 2959 { 2960 struct adapter *sc; 2961 2962 mutex_enter(&t4_adapter_list_lock); 2963 SLIST_FOREACH(sc, &t4_adapter_list, link) { 2964 /* 2965 * func should not make any assumptions about what state sc is 2966 * in - the only guarantee is that sc->sc_lock is a valid lock. 2967 */ 2968 func(ddi_get_instance(sc->dip), arg); 2969 } 2970 mutex_exit(&t4_adapter_list_lock); 2971 } 2972 2973 #endif 2974 2975 static int 2976 t4_sensor_read(struct adapter *sc, uint32_t diag, uint32_t *valp) 2977 { 2978 int rc; 2979 struct port_info *pi = sc->port[0]; 2980 uint32_t param, val; 2981 2982 rc = begin_synchronized_op(pi, 1, 1); 2983 if (rc != 0) { 2984 return (rc); 2985 } 2986 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 2987 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 2988 V_FW_PARAMS_PARAM_Y(diag); 2989 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2990 end_synchronized_op(pi, 1); 2991 2992 if (rc != 0) { 2993 return (rc); 2994 } 2995 2996 if (val == 0) { 2997 return (EIO); 2998 } 2999 3000 *valp = val; 3001 return (0); 3002 } 3003 3004 static int 3005 t4_temperature_read(void *arg, sensor_ioctl_scalar_t *scalar) 3006 { 3007 int ret; 3008 struct adapter *sc = arg; 3009 uint32_t val; 3010 3011 ret = t4_sensor_read(sc, FW_PARAM_DEV_DIAG_TMP, &val); 3012 if (ret != 0) { 3013 return (ret); 3014 } 3015 3016 /* 3017 * The device measures temperature in units of 1 degree Celsius. We 3018 * don't know its precision. 3019 */ 3020 scalar->sis_unit = SENSOR_UNIT_CELSIUS; 3021 scalar->sis_gran = 1; 3022 scalar->sis_prec = 0; 3023 scalar->sis_value = val; 3024 3025 return (0); 3026 } 3027 3028 static int 3029 t4_voltage_read(void *arg, sensor_ioctl_scalar_t *scalar) 3030 { 3031 int ret; 3032 struct adapter *sc = arg; 3033 uint32_t val; 3034 3035 ret = t4_sensor_read(sc, FW_PARAM_DEV_DIAG_VDD, &val); 3036 if (ret != 0) { 3037 return (ret); 3038 } 3039 3040 scalar->sis_unit = SENSOR_UNIT_VOLTS; 3041 scalar->sis_gran = 1000; 3042 scalar->sis_prec = 0; 3043 scalar->sis_value = val; 3044 3045 return (0); 3046 } 3047 3048 /* 3049 * While the hardware supports the ability to read and write the flash image, 3050 * this is not currently wired up. 3051 */ 3052 static int 3053 t4_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps) 3054 { 3055 *caps = DDI_UFM_CAP_REPORT; 3056 return (0); 3057 } 3058 3059 static int 3060 t4_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 3061 ddi_ufm_image_t *imgp) 3062 { 3063 if (imgno != 0) { 3064 return (EINVAL); 3065 } 3066 3067 ddi_ufm_image_set_desc(imgp, "Firmware"); 3068 ddi_ufm_image_set_nslots(imgp, 1); 3069 3070 return (0); 3071 } 3072 3073 static int 3074 t4_ufm_fill_slot_version(nvlist_t *nvl, const char *key, uint32_t vers) 3075 { 3076 char buf[128]; 3077 3078 if (vers == 0) { 3079 return (0); 3080 } 3081 3082 if (snprintf(buf, sizeof (buf), "%u.%u.%u.%u", 3083 G_FW_HDR_FW_VER_MAJOR(vers), G_FW_HDR_FW_VER_MINOR(vers), 3084 G_FW_HDR_FW_VER_MICRO(vers), G_FW_HDR_FW_VER_BUILD(vers)) >= 3085 sizeof (buf)) { 3086 return (EOVERFLOW); 3087 } 3088 3089 return (nvlist_add_string(nvl, key, buf)); 3090 } 3091 3092 static int 3093 t4_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, uint_t slotno, 3094 ddi_ufm_slot_t *slotp) 3095 { 3096 int ret; 3097 struct adapter *sc = arg; 3098 nvlist_t *misc = NULL; 3099 char buf[128]; 3100 3101 if (imgno != 0 || slotno != 0) { 3102 return (EINVAL); 3103 } 3104 3105 if (snprintf(buf, sizeof (buf), "%u.%u.%u.%u", 3106 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 3107 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 3108 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 3109 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)) >= sizeof (buf)) { 3110 return (EOVERFLOW); 3111 } 3112 3113 ddi_ufm_slot_set_version(slotp, buf); 3114 3115 (void) nvlist_alloc(&misc, NV_UNIQUE_NAME, KM_SLEEP); 3116 if ((ret = t4_ufm_fill_slot_version(misc, "TP Microcode", 3117 sc->params.tp_vers)) != 0) { 3118 goto err; 3119 } 3120 3121 if ((ret = t4_ufm_fill_slot_version(misc, "Bootstrap", 3122 sc->params.bs_vers)) != 0) { 3123 goto err; 3124 } 3125 3126 if ((ret = t4_ufm_fill_slot_version(misc, "Expansion ROM", 3127 sc->params.er_vers)) != 0) { 3128 goto err; 3129 } 3130 3131 if ((ret = nvlist_add_uint32(misc, "Serial Configuration", 3132 sc->params.scfg_vers)) != 0) { 3133 goto err; 3134 } 3135 3136 if ((ret = nvlist_add_uint32(misc, "VPD Version", 3137 sc->params.vpd_vers)) != 0) { 3138 goto err; 3139 } 3140 3141 ddi_ufm_slot_set_misc(slotp, misc); 3142 ddi_ufm_slot_set_attrs(slotp, DDI_UFM_ATTR_ACTIVE | 3143 DDI_UFM_ATTR_WRITEABLE | DDI_UFM_ATTR_READABLE); 3144 return (0); 3145 3146 err: 3147 nvlist_free(misc); 3148 return (ret); 3149 3150 } 3151