1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * This file is part of the Chelsio T4 support code. 14 * 15 * Copyright (C) 2010-2013 Chelsio Communications. All rights reserved. 16 * 17 * This program is distributed in the hope that it will be useful, but WITHOUT 18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 19 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this 20 * release for licensing terms and conditions. 21 */ 22 23 /* 24 * Copyright 2024 Oxide Computer Company 25 */ 26 27 #include <sys/ddi.h> 28 #include <sys/sunddi.h> 29 #include <sys/sunndi.h> 30 #include <sys/modctl.h> 31 #include <sys/conf.h> 32 #include <sys/devops.h> 33 #include <sys/pci.h> 34 #include <sys/atomic.h> 35 #include <sys/types.h> 36 #include <sys/file.h> 37 #include <sys/errno.h> 38 #include <sys/open.h> 39 #include <sys/cred.h> 40 #include <sys/stat.h> 41 #include <sys/mkdev.h> 42 #include <sys/queue.h> 43 #include <sys/containerof.h> 44 #include <sys/sensors.h> 45 #include <sys/firmload.h> 46 #include <sys/mac_provider.h> 47 #include <sys/mac_ether.h> 48 #include <sys/vlan.h> 49 50 #include "version.h" 51 #include "common/common.h" 52 #include "common/t4_msg.h" 53 #include "common/t4_regs.h" 54 #include "common/t4_extra_regs.h" 55 #include "t4_l2t.h" 56 57 static int t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp); 58 static int t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp); 59 static int t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp, 60 int *rp); 61 struct cb_ops t4_cb_ops = { 62 .cb_open = t4_cb_open, 63 .cb_close = t4_cb_close, 64 .cb_strategy = nodev, 65 .cb_print = nodev, 66 .cb_dump = nodev, 67 .cb_read = nodev, 68 .cb_write = nodev, 69 .cb_ioctl = t4_cb_ioctl, 70 .cb_devmap = nodev, 71 .cb_mmap = nodev, 72 .cb_segmap = nodev, 73 .cb_chpoll = nochpoll, 74 .cb_prop_op = ddi_prop_op, 75 .cb_flag = D_MP, 76 .cb_rev = CB_REV, 77 .cb_aread = nodev, 78 .cb_awrite = nodev 79 }; 80 81 static int t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, 82 void *arg, void *result); 83 static int t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op, 84 void *arg, dev_info_t **cdipp); 85 static int t4_bus_unconfig(dev_info_t *dip, uint_t flags, 86 ddi_bus_config_op_t op, void *arg); 87 struct bus_ops t4_bus_ops = { 88 .busops_rev = BUSO_REV, 89 .bus_ctl = t4_bus_ctl, 90 .bus_prop_op = ddi_bus_prop_op, 91 .bus_config = t4_bus_config, 92 .bus_unconfig = t4_bus_unconfig, 93 }; 94 95 static int t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, 96 void **rp); 97 static int t4_devo_probe(dev_info_t *dip); 98 static int t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 99 static int t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 100 static int t4_devo_quiesce(dev_info_t *dip); 101 static struct dev_ops t4_dev_ops = { 102 .devo_rev = DEVO_REV, 103 .devo_getinfo = t4_devo_getinfo, 104 .devo_identify = nulldev, 105 .devo_probe = t4_devo_probe, 106 .devo_attach = t4_devo_attach, 107 .devo_detach = t4_devo_detach, 108 .devo_reset = nodev, 109 .devo_cb_ops = &t4_cb_ops, 110 .devo_bus_ops = &t4_bus_ops, 111 .devo_quiesce = &t4_devo_quiesce, 112 }; 113 114 static struct modldrv t4nex_modldrv = { 115 .drv_modops = &mod_driverops, 116 .drv_linkinfo = "Chelsio T4-T6 nexus " DRV_VERSION, 117 .drv_dev_ops = &t4_dev_ops 118 }; 119 120 static struct modlinkage t4nex_modlinkage = { 121 .ml_rev = MODREV_1, 122 .ml_linkage = {&t4nex_modldrv, NULL}, 123 }; 124 125 void *t4_list; 126 127 struct intrs_and_queues { 128 int intr_type; /* DDI_INTR_TYPE_* */ 129 int nirq; /* Number of vectors */ 130 int intr_fwd; /* Interrupts forwarded */ 131 int ntxq10g; /* # of NIC txq's for each 10G port */ 132 int nrxq10g; /* # of NIC rxq's for each 10G port */ 133 int ntxq1g; /* # of NIC txq's for each 1G port */ 134 int nrxq1g; /* # of NIC rxq's for each 1G port */ 135 }; 136 137 static int cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, 138 mblk_t *m); 139 static int fw_msg_not_handled(struct adapter *, const __be64 *); 140 int t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h); 141 static unsigned int getpf(struct adapter *sc); 142 static int prep_firmware(struct adapter *sc); 143 static int upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma); 144 static int partition_resources(struct adapter *sc); 145 static int adap__pre_init_tweaks(struct adapter *sc); 146 static int get_params__pre_init(struct adapter *sc); 147 static int get_params__post_init(struct adapter *sc); 148 static int set_params__post_init(struct adapter *); 149 static void setup_memwin(struct adapter *sc); 150 static int validate_mt_off_len(struct adapter *, int, uint32_t, int, 151 uint32_t *); 152 void memwin_info(struct adapter *, int, uint32_t *, uint32_t *); 153 uint32_t position_memwin(struct adapter *, int, uint32_t); 154 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data, 155 uint_t count); 156 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data, 157 uint_t count); 158 static int init_driver_props(struct adapter *sc, struct driver_properties *p); 159 static int remove_extra_props(struct adapter *sc, int n10g, int n1g); 160 static int cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, 161 struct intrs_and_queues *iaq); 162 static int add_child_node(struct adapter *sc, int idx); 163 static int remove_child_node(struct adapter *sc, int idx); 164 static kstat_t *setup_kstats(struct adapter *sc); 165 static kstat_t *setup_wc_kstats(struct adapter *); 166 static int update_wc_kstats(kstat_t *, int); 167 static kmutex_t t4_adapter_list_lock; 168 static SLIST_HEAD(, adapter) t4_adapter_list; 169 170 static int t4_temperature_read(void *, sensor_ioctl_scalar_t *); 171 static int t4_voltage_read(void *, sensor_ioctl_scalar_t *); 172 static const ksensor_ops_t t4_temp_ops = { 173 .kso_kind = ksensor_kind_temperature, 174 .kso_scalar = t4_temperature_read 175 }; 176 177 static const ksensor_ops_t t4_volt_ops = { 178 .kso_kind = ksensor_kind_voltage, 179 .kso_scalar = t4_voltage_read 180 }; 181 182 static int t4_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *); 183 static int t4_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t, 184 ddi_ufm_image_t *); 185 static int t4_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t, 186 ddi_ufm_slot_t *); 187 static ddi_ufm_ops_t t4_ufm_ops = { 188 .ddi_ufm_op_fill_image = t4_ufm_fill_image, 189 .ddi_ufm_op_fill_slot = t4_ufm_fill_slot, 190 .ddi_ufm_op_getcaps = t4_ufm_getcaps 191 }; 192 193 int 194 _init(void) 195 { 196 int rc; 197 198 rc = ddi_soft_state_init(&t4_list, sizeof (struct adapter), 0); 199 if (rc != 0) 200 return (rc); 201 202 rc = mod_install(&t4nex_modlinkage); 203 if (rc != 0) 204 ddi_soft_state_fini(&t4_list); 205 206 mutex_init(&t4_adapter_list_lock, NULL, MUTEX_DRIVER, NULL); 207 SLIST_INIT(&t4_adapter_list); 208 209 return (rc); 210 } 211 212 int 213 _fini(void) 214 { 215 int rc; 216 217 rc = mod_remove(&t4nex_modlinkage); 218 if (rc != 0) 219 return (rc); 220 221 ddi_soft_state_fini(&t4_list); 222 return (0); 223 } 224 225 int 226 _info(struct modinfo *mi) 227 { 228 return (mod_info(&t4nex_modlinkage, mi)); 229 } 230 231 /* ARGSUSED */ 232 static int 233 t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **rp) 234 { 235 struct adapter *sc; 236 minor_t minor; 237 238 minor = getminor((dev_t)arg); /* same as instance# in our case */ 239 240 if (cmd == DDI_INFO_DEVT2DEVINFO) { 241 sc = ddi_get_soft_state(t4_list, minor); 242 if (sc == NULL) 243 return (DDI_FAILURE); 244 245 ASSERT(sc->dev == (dev_t)arg); 246 *rp = (void *)sc->dip; 247 } else if (cmd == DDI_INFO_DEVT2INSTANCE) 248 *rp = (void *) (unsigned long) minor; 249 else 250 ASSERT(0); 251 252 return (DDI_SUCCESS); 253 } 254 255 static int 256 t4_devo_probe(dev_info_t *dip) 257 { 258 int rc, id, *reg; 259 uint_t n, pf; 260 261 id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 262 "device-id", 0xffff); 263 if (id == 0xffff) 264 return (DDI_PROBE_DONTCARE); 265 266 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 267 "reg", ®, &n); 268 if (rc != DDI_SUCCESS) 269 return (DDI_PROBE_DONTCARE); 270 271 pf = PCI_REG_FUNC_G(reg[0]); 272 ddi_prop_free(reg); 273 274 /* Prevent driver attachment on any PF except 0 on the FPGA */ 275 if (id == 0xa000 && pf != 0) 276 return (DDI_PROBE_FAILURE); 277 278 return (DDI_PROBE_DONTCARE); 279 } 280 281 static int 282 t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 283 { 284 struct adapter *sc = NULL; 285 struct sge *s; 286 int i, instance, rc = DDI_SUCCESS, rqidx, tqidx, q; 287 int irq = 0, nxg = 0, n1g = 0; 288 char name[16]; 289 struct driver_properties *prp; 290 struct intrs_and_queues iaq; 291 ddi_device_acc_attr_t da = { 292 .devacc_attr_version = DDI_DEVICE_ATTR_V0, 293 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC, 294 .devacc_attr_dataorder = DDI_STRICTORDER_ACC 295 }; 296 ddi_device_acc_attr_t da1 = { 297 .devacc_attr_version = DDI_DEVICE_ATTR_V0, 298 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC, 299 .devacc_attr_dataorder = DDI_STRICTORDER_ACC 300 }; 301 302 if (cmd != DDI_ATTACH) 303 return (DDI_FAILURE); 304 305 /* 306 * Allocate space for soft state. 307 */ 308 instance = ddi_get_instance(dip); 309 rc = ddi_soft_state_zalloc(t4_list, instance); 310 if (rc != DDI_SUCCESS) { 311 cxgb_printf(dip, CE_WARN, 312 "failed to allocate soft state: %d", rc); 313 return (DDI_FAILURE); 314 } 315 316 sc = ddi_get_soft_state(t4_list, instance); 317 sc->dip = dip; 318 sc->dev = makedevice(ddi_driver_major(dip), instance); 319 mutex_init(&sc->lock, NULL, MUTEX_DRIVER, NULL); 320 cv_init(&sc->cv, NULL, CV_DRIVER, NULL); 321 mutex_init(&sc->sfl_lock, NULL, MUTEX_DRIVER, NULL); 322 TAILQ_INIT(&sc->sfl); 323 mutex_init(&sc->mbox_lock, NULL, MUTEX_DRIVER, NULL); 324 STAILQ_INIT(&sc->mbox_list); 325 326 mutex_enter(&t4_adapter_list_lock); 327 SLIST_INSERT_HEAD(&t4_adapter_list, sc, link); 328 mutex_exit(&t4_adapter_list_lock); 329 330 sc->pf = getpf(sc); 331 if (sc->pf > 8) { 332 rc = EINVAL; 333 cxgb_printf(dip, CE_WARN, 334 "failed to determine PCI PF# of device"); 335 goto done; 336 } 337 sc->mbox = sc->pf; 338 339 /* Initialize the driver properties */ 340 prp = &sc->props; 341 (void) init_driver_props(sc, prp); 342 343 /* 344 * Enable access to the PCI config space. 345 */ 346 rc = pci_config_setup(dip, &sc->pci_regh); 347 if (rc != DDI_SUCCESS) { 348 cxgb_printf(dip, CE_WARN, 349 "failed to enable PCI config space access: %d", rc); 350 goto done; 351 } 352 353 /* TODO: Set max read request to 4K */ 354 355 /* 356 * Enable MMIO access. 357 */ 358 rc = ddi_regs_map_setup(dip, 1, &sc->regp, 0, 0, &da, &sc->regh); 359 if (rc != DDI_SUCCESS) { 360 cxgb_printf(dip, CE_WARN, 361 "failed to map device registers: %d", rc); 362 goto done; 363 } 364 365 (void) memset(sc->chan_map, 0xff, sizeof (sc->chan_map)); 366 367 /* 368 * Initialize cpl handler. 369 */ 370 for (i = 0; i < ARRAY_SIZE(sc->cpl_handler); i++) { 371 sc->cpl_handler[i] = cpl_not_handled; 372 } 373 374 for (i = 0; i < ARRAY_SIZE(sc->fw_msg_handler); i++) { 375 sc->fw_msg_handler[i] = fw_msg_not_handled; 376 } 377 378 for (i = 0; i < NCHAN; i++) { 379 (void) snprintf(name, sizeof (name), "%s-%d", "reclaim", i); 380 sc->tq[i] = ddi_taskq_create(sc->dip, name, 1, 381 TASKQ_DEFAULTPRI, 0); 382 383 if (sc->tq[i] == NULL) { 384 cxgb_printf(dip, CE_WARN, "failed to create taskqs"); 385 rc = DDI_FAILURE; 386 goto done; 387 } 388 } 389 390 /* 391 * Prepare the adapter for operation. 392 */ 393 rc = -t4_prep_adapter(sc, false); 394 if (rc != 0) { 395 cxgb_printf(dip, CE_WARN, "failed to prepare adapter: %d", rc); 396 goto done; 397 } 398 399 /* 400 * Enable BAR1 access. 401 */ 402 sc->doorbells |= DOORBELL_KDB; 403 rc = ddi_regs_map_setup(dip, 2, &sc->reg1p, 0, 0, &da1, &sc->reg1h); 404 if (rc != DDI_SUCCESS) { 405 cxgb_printf(dip, CE_WARN, 406 "failed to map BAR1 device registers: %d", rc); 407 goto done; 408 } else { 409 if (is_t5(sc->params.chip)) { 410 sc->doorbells |= DOORBELL_UDB; 411 if (prp->wc) { 412 /* 413 * Enable write combining on BAR2. This is the 414 * userspace doorbell BAR and is split into 128B 415 * (UDBS_SEG_SIZE) doorbell regions, each 416 * associated with an egress queue. The first 417 * 64B has the doorbell and the second 64B can 418 * be used to submit a tx work request with an 419 * implicit doorbell. 420 */ 421 sc->doorbells &= ~DOORBELL_UDB; 422 sc->doorbells |= (DOORBELL_WCWR | 423 DOORBELL_UDBWC); 424 t4_write_reg(sc, A_SGE_STAT_CFG, 425 V_STATSOURCE_T5(7) | V_STATMODE(0)); 426 } 427 } 428 } 429 430 /* 431 * Do this really early. Note that minor number = instance. 432 */ 433 (void) snprintf(name, sizeof (name), "%s,%d", T4_NEXUS_NAME, instance); 434 rc = ddi_create_minor_node(dip, name, S_IFCHR, instance, 435 DDI_NT_NEXUS, 0); 436 if (rc != DDI_SUCCESS) { 437 cxgb_printf(dip, CE_WARN, 438 "failed to create device node: %d", rc); 439 rc = DDI_SUCCESS; /* carry on */ 440 } 441 442 /* Do this early. Memory window is required for loading config file. */ 443 setup_memwin(sc); 444 445 /* Prepare the firmware for operation */ 446 rc = prep_firmware(sc); 447 if (rc != 0) 448 goto done; /* error message displayed already */ 449 450 rc = adap__pre_init_tweaks(sc); 451 if (rc != 0) 452 goto done; 453 454 rc = get_params__pre_init(sc); 455 if (rc != 0) 456 goto done; /* error message displayed already */ 457 458 t4_sge_init(sc); 459 460 if (sc->flags & MASTER_PF) { 461 /* get basic stuff going */ 462 rc = -t4_fw_initialize(sc, sc->mbox); 463 if (rc != 0) { 464 cxgb_printf(sc->dip, CE_WARN, 465 "early init failed: %d.\n", rc); 466 goto done; 467 } 468 } 469 470 rc = get_params__post_init(sc); 471 if (rc != 0) 472 goto done; /* error message displayed already */ 473 474 rc = set_params__post_init(sc); 475 if (rc != 0) 476 goto done; /* error message displayed already */ 477 478 /* 479 * TODO: This is the place to call t4_set_filter_mode() 480 */ 481 482 /* tweak some settings */ 483 t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) | 484 V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) | 485 V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9)); 486 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); 487 488 /* 489 * Work-around for bug 2619 490 * Set DisableVlan field in TP_RSS_CONFIG_VRT register so that the 491 * VLAN tag extraction is disabled. 492 */ 493 t4_set_reg_field(sc, A_TP_RSS_CONFIG_VRT, F_DISABLEVLAN, F_DISABLEVLAN); 494 495 /* Store filter mode */ 496 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1, 497 A_TP_VLAN_PRI_MAP); 498 499 /* 500 * First pass over all the ports - allocate VIs and initialize some 501 * basic parameters like mac address, port type, etc. We also figure 502 * out whether a port is 10G or 1G and use that information when 503 * calculating how many interrupts to attempt to allocate. 504 */ 505 for_each_port(sc, i) { 506 struct port_info *pi; 507 508 pi = kmem_zalloc(sizeof (*pi), KM_SLEEP); 509 sc->port[i] = pi; 510 511 /* These must be set before t4_port_init */ 512 pi->adapter = sc; 513 /* LINTED: E_ASSIGN_NARROW_CONV */ 514 pi->port_id = i; 515 } 516 517 /* Allocate the vi and initialize parameters like mac addr */ 518 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0); 519 if (rc) { 520 cxgb_printf(dip, CE_WARN, "unable to initialize port: %d", rc); 521 goto done; 522 } 523 524 for_each_port(sc, i) { 525 struct port_info *pi = sc->port[i]; 526 527 mutex_init(&pi->lock, NULL, MUTEX_DRIVER, NULL); 528 pi->mtu = ETHERMTU; 529 530 if (is_10XG_port(pi)) { 531 nxg++; 532 pi->tmr_idx = prp->tmr_idx_10g; 533 pi->pktc_idx = prp->pktc_idx_10g; 534 } else { 535 n1g++; 536 pi->tmr_idx = prp->tmr_idx_1g; 537 pi->pktc_idx = prp->pktc_idx_1g; 538 } 539 540 pi->xact_addr_filt = -1; 541 t4_mc_init(pi); 542 543 setbit(&sc->registered_device_map, i); 544 } 545 546 (void) remove_extra_props(sc, nxg, n1g); 547 548 if (sc->registered_device_map == 0) { 549 cxgb_printf(dip, CE_WARN, "no usable ports"); 550 rc = DDI_FAILURE; 551 goto done; 552 } 553 554 rc = cfg_itype_and_nqueues(sc, nxg, n1g, &iaq); 555 if (rc != 0) 556 goto done; /* error message displayed already */ 557 558 sc->intr_type = iaq.intr_type; 559 sc->intr_count = iaq.nirq; 560 561 if (sc->props.multi_rings && (sc->intr_type != DDI_INTR_TYPE_MSIX)) { 562 sc->props.multi_rings = 0; 563 cxgb_printf(dip, CE_WARN, 564 "Multiple rings disabled as interrupt type is not MSI-X"); 565 } 566 567 if (sc->props.multi_rings && iaq.intr_fwd) { 568 sc->props.multi_rings = 0; 569 cxgb_printf(dip, CE_WARN, 570 "Multiple rings disabled as interrupts are forwarded"); 571 } 572 573 if (!sc->props.multi_rings) { 574 iaq.ntxq10g = 1; 575 iaq.ntxq1g = 1; 576 } 577 s = &sc->sge; 578 s->nrxq = nxg * iaq.nrxq10g + n1g * iaq.nrxq1g; 579 s->ntxq = nxg * iaq.ntxq10g + n1g * iaq.ntxq1g; 580 s->neq = s->ntxq + s->nrxq; /* the fl in an rxq is an eq */ 581 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 582 if (iaq.intr_fwd != 0) 583 sc->flags |= INTR_FWD; 584 s->rxq = kmem_zalloc(s->nrxq * sizeof (struct sge_rxq), KM_SLEEP); 585 s->txq = kmem_zalloc(s->ntxq * sizeof (struct sge_txq), KM_SLEEP); 586 s->iqmap = 587 kmem_zalloc(s->iqmap_sz * sizeof (struct sge_iq *), KM_SLEEP); 588 s->eqmap = 589 kmem_zalloc(s->eqmap_sz * sizeof (struct sge_eq *), KM_SLEEP); 590 591 sc->intr_handle = 592 kmem_zalloc(sc->intr_count * sizeof (ddi_intr_handle_t), KM_SLEEP); 593 594 /* 595 * Second pass over the ports. This time we know the number of rx and 596 * tx queues that each port should get. 597 */ 598 rqidx = tqidx = 0; 599 for_each_port(sc, i) { 600 struct port_info *pi = sc->port[i]; 601 602 if (pi == NULL) 603 continue; 604 605 t4_mc_cb_init(pi); 606 /* LINTED: E_ASSIGN_NARROW_CONV */ 607 pi->first_rxq = rqidx; 608 /* LINTED: E_ASSIGN_NARROW_CONV */ 609 pi->nrxq = (is_10XG_port(pi)) ? iaq.nrxq10g 610 : iaq.nrxq1g; 611 /* LINTED: E_ASSIGN_NARROW_CONV */ 612 pi->first_txq = tqidx; 613 /* LINTED: E_ASSIGN_NARROW_CONV */ 614 pi->ntxq = (is_10XG_port(pi)) ? iaq.ntxq10g 615 : iaq.ntxq1g; 616 617 rqidx += pi->nrxq; 618 tqidx += pi->ntxq; 619 620 /* 621 * Enable hw checksumming and LSO for all ports by default. 622 * They can be disabled using ndd (hw_csum and hw_lso). 623 */ 624 pi->features |= (CXGBE_HW_CSUM | CXGBE_HW_LSO); 625 } 626 627 /* 628 * Setup Interrupts. 629 */ 630 631 i = 0; 632 rc = ddi_intr_alloc(dip, sc->intr_handle, sc->intr_type, 0, 633 sc->intr_count, &i, DDI_INTR_ALLOC_STRICT); 634 if (rc != DDI_SUCCESS) { 635 cxgb_printf(dip, CE_WARN, 636 "failed to allocate %d interrupt(s) of type %d: %d, %d", 637 sc->intr_count, sc->intr_type, rc, i); 638 goto done; 639 } 640 ASSERT(sc->intr_count == i); /* allocation was STRICT */ 641 (void) ddi_intr_get_cap(sc->intr_handle[0], &sc->intr_cap); 642 (void) ddi_intr_get_pri(sc->intr_handle[0], &sc->intr_pri); 643 if (sc->intr_count == 1) { 644 ASSERT(sc->flags & INTR_FWD); 645 (void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_all, sc, 646 &s->fwq); 647 } else { 648 /* Multiple interrupts. The first one is always error intr */ 649 (void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_err, sc, 650 NULL); 651 irq++; 652 653 /* The second one is always the firmware event queue */ 654 (void) ddi_intr_add_handler(sc->intr_handle[1], t4_intr, sc, 655 &s->fwq); 656 irq++; 657 /* 658 * Note that if INTR_FWD is set then either the NIC rx 659 * queues or (exclusive or) the TOE rx queueus will be taking 660 * direct interrupts. 661 * 662 * There is no need to check for is_offload(sc) as nofldrxq 663 * will be 0 if offload is disabled. 664 */ 665 for_each_port(sc, i) { 666 struct port_info *pi = sc->port[i]; 667 struct sge_rxq *rxq; 668 rxq = &s->rxq[pi->first_rxq]; 669 for (q = 0; q < pi->nrxq; q++, rxq++) { 670 (void) ddi_intr_add_handler( 671 sc->intr_handle[irq], t4_intr, sc, 672 &rxq->iq); 673 irq++; 674 } 675 } 676 677 } 678 sc->flags |= INTR_ALLOCATED; 679 680 if ((rc = ksensor_create_scalar_pcidev(dip, SENSOR_KIND_TEMPERATURE, 681 &t4_temp_ops, sc, "temp", &sc->temp_sensor)) != 0) { 682 cxgb_printf(dip, CE_WARN, "failed to create temperature " 683 "sensor: %d", rc); 684 rc = DDI_FAILURE; 685 goto done; 686 } 687 688 if ((rc = ksensor_create_scalar_pcidev(dip, SENSOR_KIND_VOLTAGE, 689 &t4_volt_ops, sc, "vdd", &sc->volt_sensor)) != 0) { 690 cxgb_printf(dip, CE_WARN, "failed to create voltage " 691 "sensor: %d", rc); 692 rc = DDI_FAILURE; 693 goto done; 694 } 695 696 697 if ((rc = ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &t4_ufm_ops, 698 &sc->ufm_hdl, sc)) != 0) { 699 cxgb_printf(dip, CE_WARN, "failed to enable UFM ops: %d", rc); 700 rc = DDI_FAILURE; 701 goto done; 702 } 703 ddi_ufm_update(sc->ufm_hdl); 704 ddi_report_dev(dip); 705 706 /* 707 * Hardware/Firmware/etc. Version/Revision IDs. 708 */ 709 t4_dump_version_info(sc); 710 711 cxgb_printf(dip, CE_NOTE, "(%d rxq, %d txq total) %d %s.", 712 rqidx, tqidx, sc->intr_count, 713 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" : 714 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" : 715 "fixed interrupt"); 716 717 sc->ksp = setup_kstats(sc); 718 sc->ksp_stat = setup_wc_kstats(sc); 719 sc->params.drv_memwin = MEMWIN_NIC; 720 721 done: 722 if (rc != DDI_SUCCESS) { 723 (void) t4_devo_detach(dip, DDI_DETACH); 724 725 /* rc may have errno style errors or DDI errors */ 726 rc = DDI_FAILURE; 727 } 728 729 return (rc); 730 } 731 732 static int 733 t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 734 { 735 int instance, i; 736 struct adapter *sc; 737 struct port_info *pi; 738 struct sge *s; 739 740 if (cmd != DDI_DETACH) 741 return (DDI_FAILURE); 742 743 instance = ddi_get_instance(dip); 744 sc = ddi_get_soft_state(t4_list, instance); 745 if (sc == NULL) 746 return (DDI_SUCCESS); 747 748 if (sc->flags & FULL_INIT_DONE) { 749 t4_intr_disable(sc); 750 for_each_port(sc, i) { 751 pi = sc->port[i]; 752 if (pi && pi->flags & PORT_INIT_DONE) 753 (void) port_full_uninit(pi); 754 } 755 (void) adapter_full_uninit(sc); 756 } 757 758 /* Safe to call no matter what */ 759 if (sc->ufm_hdl != NULL) { 760 ddi_ufm_fini(sc->ufm_hdl); 761 sc->ufm_hdl = NULL; 762 } 763 (void) ksensor_remove(dip, KSENSOR_ALL_IDS); 764 ddi_prop_remove_all(dip); 765 ddi_remove_minor_node(dip, NULL); 766 767 for (i = 0; i < NCHAN; i++) { 768 if (sc->tq[i]) { 769 ddi_taskq_wait(sc->tq[i]); 770 ddi_taskq_destroy(sc->tq[i]); 771 } 772 } 773 774 if (sc->ksp != NULL) 775 kstat_delete(sc->ksp); 776 if (sc->ksp_stat != NULL) 777 kstat_delete(sc->ksp_stat); 778 779 s = &sc->sge; 780 if (s->rxq != NULL) 781 kmem_free(s->rxq, s->nrxq * sizeof (struct sge_rxq)); 782 if (s->txq != NULL) 783 kmem_free(s->txq, s->ntxq * sizeof (struct sge_txq)); 784 if (s->iqmap != NULL) 785 kmem_free(s->iqmap, s->iqmap_sz * sizeof (struct sge_iq *)); 786 if (s->eqmap != NULL) 787 kmem_free(s->eqmap, s->eqmap_sz * sizeof (struct sge_eq *)); 788 789 if (s->rxbuf_cache != NULL) 790 kmem_cache_destroy(s->rxbuf_cache); 791 792 if (sc->flags & INTR_ALLOCATED) { 793 for (i = 0; i < sc->intr_count; i++) { 794 (void) ddi_intr_remove_handler(sc->intr_handle[i]); 795 (void) ddi_intr_free(sc->intr_handle[i]); 796 } 797 sc->flags &= ~INTR_ALLOCATED; 798 } 799 800 if (sc->intr_handle != NULL) { 801 kmem_free(sc->intr_handle, 802 sc->intr_count * sizeof (*sc->intr_handle)); 803 } 804 805 for_each_port(sc, i) { 806 pi = sc->port[i]; 807 if (pi != NULL) { 808 mutex_destroy(&pi->lock); 809 kmem_free(pi, sizeof (*pi)); 810 clrbit(&sc->registered_device_map, i); 811 } 812 } 813 814 if (sc->flags & FW_OK) 815 (void) t4_fw_bye(sc, sc->mbox); 816 817 if (sc->reg1h != NULL) 818 ddi_regs_map_free(&sc->reg1h); 819 820 if (sc->regh != NULL) 821 ddi_regs_map_free(&sc->regh); 822 823 if (sc->pci_regh != NULL) 824 pci_config_teardown(&sc->pci_regh); 825 826 mutex_enter(&t4_adapter_list_lock); 827 SLIST_REMOVE(&t4_adapter_list, sc, adapter, link); 828 mutex_exit(&t4_adapter_list_lock); 829 830 mutex_destroy(&sc->mbox_lock); 831 mutex_destroy(&sc->lock); 832 cv_destroy(&sc->cv); 833 mutex_destroy(&sc->sfl_lock); 834 835 #ifdef DEBUG 836 bzero(sc, sizeof (*sc)); 837 #endif 838 ddi_soft_state_free(t4_list, instance); 839 840 return (DDI_SUCCESS); 841 } 842 843 static int 844 t4_devo_quiesce(dev_info_t *dip) 845 { 846 int instance; 847 struct adapter *sc; 848 849 instance = ddi_get_instance(dip); 850 sc = ddi_get_soft_state(t4_list, instance); 851 if (sc == NULL) 852 return (DDI_SUCCESS); 853 854 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 855 t4_intr_disable(sc); 856 t4_write_reg(sc, A_PL_RST, F_PIORSTMODE | F_PIORST); 857 858 return (DDI_SUCCESS); 859 } 860 861 static int 862 t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, void *arg, 863 void *result) 864 { 865 char s[4]; 866 struct port_info *pi; 867 dev_info_t *child = (dev_info_t *)arg; 868 869 switch (op) { 870 case DDI_CTLOPS_REPORTDEV: 871 pi = ddi_get_parent_data(rdip); 872 pi->instance = ddi_get_instance(dip); 873 pi->child_inst = ddi_get_instance(rdip); 874 cmn_err(CE_CONT, "?%s%d is port %s on %s%d\n", 875 ddi_node_name(rdip), ddi_get_instance(rdip), 876 ddi_get_name_addr(rdip), ddi_driver_name(dip), 877 ddi_get_instance(dip)); 878 return (DDI_SUCCESS); 879 880 case DDI_CTLOPS_INITCHILD: 881 pi = ddi_get_parent_data(child); 882 if (pi == NULL) 883 return (DDI_NOT_WELL_FORMED); 884 (void) snprintf(s, sizeof (s), "%d", pi->port_id); 885 ddi_set_name_addr(child, s); 886 return (DDI_SUCCESS); 887 888 case DDI_CTLOPS_UNINITCHILD: 889 ddi_set_name_addr(child, NULL); 890 return (DDI_SUCCESS); 891 892 case DDI_CTLOPS_ATTACH: 893 case DDI_CTLOPS_DETACH: 894 return (DDI_SUCCESS); 895 896 default: 897 return (ddi_ctlops(dip, rdip, op, arg, result)); 898 } 899 } 900 901 static int 902 t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op, void *arg, 903 dev_info_t **cdipp) 904 { 905 int instance, i; 906 struct adapter *sc; 907 908 instance = ddi_get_instance(dip); 909 sc = ddi_get_soft_state(t4_list, instance); 910 911 if (op == BUS_CONFIG_ONE) { 912 char *c; 913 914 /* 915 * arg is something like "cxgb@0" where 0 is the port_id hanging 916 * off this nexus. 917 */ 918 919 c = arg; 920 while (*(c + 1)) 921 c++; 922 923 /* There should be exactly 1 digit after '@' */ 924 if (*(c - 1) != '@') 925 return (NDI_FAILURE); 926 927 i = *c - '0'; 928 929 if (add_child_node(sc, i) != 0) 930 return (NDI_FAILURE); 931 932 flags |= NDI_ONLINE_ATTACH; 933 934 } else if (op == BUS_CONFIG_ALL || op == BUS_CONFIG_DRIVER) { 935 /* Allocate and bind all child device nodes */ 936 for_each_port(sc, i) 937 (void) add_child_node(sc, i); 938 flags |= NDI_ONLINE_ATTACH; 939 } 940 941 return (ndi_busop_bus_config(dip, flags, op, arg, cdipp, 0)); 942 } 943 944 static int 945 t4_bus_unconfig(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op, 946 void *arg) 947 { 948 int instance, i, rc; 949 struct adapter *sc; 950 951 instance = ddi_get_instance(dip); 952 sc = ddi_get_soft_state(t4_list, instance); 953 954 if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ALL || 955 op == BUS_UNCONFIG_DRIVER) 956 flags |= NDI_UNCONFIG; 957 958 rc = ndi_busop_bus_unconfig(dip, flags, op, arg); 959 if (rc != 0) 960 return (rc); 961 962 if (op == BUS_UNCONFIG_ONE) { 963 char *c; 964 965 c = arg; 966 while (*(c + 1)) 967 c++; 968 969 if (*(c - 1) != '@') 970 return (NDI_SUCCESS); 971 972 i = *c - '0'; 973 974 rc = remove_child_node(sc, i); 975 976 } else if (op == BUS_UNCONFIG_ALL || op == BUS_UNCONFIG_DRIVER) { 977 978 for_each_port(sc, i) 979 (void) remove_child_node(sc, i); 980 } 981 982 return (rc); 983 } 984 985 /* ARGSUSED */ 986 static int 987 t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp) 988 { 989 struct adapter *sc; 990 991 if (otyp != OTYP_CHR) 992 return (EINVAL); 993 994 sc = ddi_get_soft_state(t4_list, getminor(*devp)); 995 if (sc == NULL) 996 return (ENXIO); 997 998 return (atomic_cas_uint(&sc->open, 0, EBUSY)); 999 } 1000 1001 /* ARGSUSED */ 1002 static int 1003 t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp) 1004 { 1005 struct adapter *sc; 1006 1007 sc = ddi_get_soft_state(t4_list, getminor(dev)); 1008 if (sc == NULL) 1009 return (EINVAL); 1010 1011 (void) atomic_swap_uint(&sc->open, 0); 1012 return (0); 1013 } 1014 1015 /* ARGSUSED */ 1016 static int 1017 t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp, int *rp) 1018 { 1019 int instance; 1020 struct adapter *sc; 1021 void *data = (void *)d; 1022 1023 if (crgetuid(credp) != 0) 1024 return (EPERM); 1025 1026 instance = getminor(dev); 1027 sc = ddi_get_soft_state(t4_list, instance); 1028 if (sc == NULL) 1029 return (EINVAL); 1030 1031 return (t4_ioctl(sc, cmd, data, mode)); 1032 } 1033 1034 static unsigned int 1035 getpf(struct adapter *sc) 1036 { 1037 int rc, *data; 1038 uint_t n, pf; 1039 1040 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip, 1041 DDI_PROP_DONTPASS, "reg", &data, &n); 1042 if (rc != DDI_SUCCESS) { 1043 cxgb_printf(sc->dip, CE_WARN, 1044 "failed to lookup \"reg\" property: %d", rc); 1045 return (0xff); 1046 } 1047 1048 pf = PCI_REG_FUNC_G(data[0]); 1049 ddi_prop_free(data); 1050 1051 return (pf); 1052 } 1053 1054 /* 1055 * Install a compatible firmware (if required), establish contact with it, 1056 * become the master, and reset the device. 1057 */ 1058 static int 1059 prep_firmware(struct adapter *sc) 1060 { 1061 int rc; 1062 size_t fw_size; 1063 int reset = 1; 1064 enum dev_state state; 1065 unsigned char *fw_data; 1066 struct fw_hdr *card_fw, *hdr; 1067 const char *fw_file = NULL; 1068 firmware_handle_t fw_hdl; 1069 struct fw_info fi, *fw_info = &fi; 1070 1071 struct driver_properties *p = &sc->props; 1072 1073 /* Contact firmware, request master */ 1074 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state); 1075 if (rc < 0) { 1076 rc = -rc; 1077 cxgb_printf(sc->dip, CE_WARN, 1078 "failed to connect to the firmware: %d.", rc); 1079 return (rc); 1080 } 1081 1082 if (rc == sc->mbox) 1083 sc->flags |= MASTER_PF; 1084 1085 /* We may need FW version info for later reporting */ 1086 t4_get_version_info(sc); 1087 1088 switch (CHELSIO_CHIP_VERSION(sc->params.chip)) { 1089 case CHELSIO_T4: 1090 fw_file = "t4fw.bin"; 1091 break; 1092 case CHELSIO_T5: 1093 fw_file = "t5fw.bin"; 1094 break; 1095 case CHELSIO_T6: 1096 fw_file = "t6fw.bin"; 1097 break; 1098 default: 1099 cxgb_printf(sc->dip, CE_WARN, "Adapter type not supported\n"); 1100 return (EINVAL); 1101 } 1102 1103 if (firmware_open(T4_PORT_NAME, fw_file, &fw_hdl) != 0) { 1104 cxgb_printf(sc->dip, CE_WARN, "Could not open %s\n", fw_file); 1105 return (EINVAL); 1106 } 1107 1108 fw_size = firmware_get_size(fw_hdl); 1109 1110 if (fw_size < sizeof (struct fw_hdr)) { 1111 cxgb_printf(sc->dip, CE_WARN, "%s is too small (%ld bytes)\n", 1112 fw_file, fw_size); 1113 firmware_close(fw_hdl); 1114 return (EINVAL); 1115 } 1116 1117 if (fw_size > FLASH_FW_MAX_SIZE) { 1118 cxgb_printf(sc->dip, CE_WARN, 1119 "%s is too large (%ld bytes, max allowed is %ld)\n", 1120 fw_file, fw_size, FLASH_FW_MAX_SIZE); 1121 firmware_close(fw_hdl); 1122 return (EFBIG); 1123 } 1124 1125 fw_data = kmem_zalloc(fw_size, KM_SLEEP); 1126 if (firmware_read(fw_hdl, 0, fw_data, fw_size) != 0) { 1127 cxgb_printf(sc->dip, CE_WARN, "Failed to read from %s\n", 1128 fw_file); 1129 firmware_close(fw_hdl); 1130 kmem_free(fw_data, fw_size); 1131 return (EINVAL); 1132 } 1133 firmware_close(fw_hdl); 1134 1135 bzero(fw_info, sizeof (*fw_info)); 1136 fw_info->chip = CHELSIO_CHIP_VERSION(sc->params.chip); 1137 1138 hdr = (struct fw_hdr *)fw_data; 1139 fw_info->fw_hdr.fw_ver = hdr->fw_ver; 1140 fw_info->fw_hdr.chip = hdr->chip; 1141 fw_info->fw_hdr.intfver_nic = hdr->intfver_nic; 1142 fw_info->fw_hdr.intfver_vnic = hdr->intfver_vnic; 1143 fw_info->fw_hdr.intfver_ofld = hdr->intfver_ofld; 1144 fw_info->fw_hdr.intfver_ri = hdr->intfver_ri; 1145 fw_info->fw_hdr.intfver_iscsipdu = hdr->intfver_iscsipdu; 1146 fw_info->fw_hdr.intfver_iscsi = hdr->intfver_iscsi; 1147 fw_info->fw_hdr.intfver_fcoepdu = hdr->intfver_fcoepdu; 1148 fw_info->fw_hdr.intfver_fcoe = hdr->intfver_fcoe; 1149 1150 /* allocate memory to read the header of the firmware on the card */ 1151 card_fw = kmem_zalloc(sizeof (*card_fw), KM_SLEEP); 1152 1153 rc = -t4_prep_fw(sc, fw_info, fw_data, fw_size, card_fw, 1154 p->t4_fw_install, state, &reset); 1155 1156 kmem_free(card_fw, sizeof (*card_fw)); 1157 kmem_free(fw_data, fw_size); 1158 1159 if (rc != 0) { 1160 cxgb_printf(sc->dip, CE_WARN, 1161 "failed to install firmware: %d", rc); 1162 return (rc); 1163 } else { 1164 /* refresh */ 1165 (void) t4_check_fw_version(sc); 1166 } 1167 1168 /* Reset device */ 1169 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST); 1170 if (rc != 0) { 1171 cxgb_printf(sc->dip, CE_WARN, 1172 "firmware reset failed: %d.", rc); 1173 if (rc != ETIMEDOUT && rc != EIO) 1174 (void) t4_fw_bye(sc, sc->mbox); 1175 return (rc); 1176 } 1177 1178 /* Partition adapter resources as specified in the config file. */ 1179 if (sc->flags & MASTER_PF) { 1180 /* Handle default vs special T4 config file */ 1181 1182 rc = partition_resources(sc); 1183 if (rc != 0) 1184 goto err; /* error message displayed already */ 1185 } 1186 1187 sc->flags |= FW_OK; 1188 return (0); 1189 err: 1190 return (rc); 1191 1192 } 1193 1194 static const struct memwin t4_memwin[] = { 1195 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 1196 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 1197 { MEMWIN2_BASE, MEMWIN2_APERTURE } 1198 }; 1199 1200 static const struct memwin t5_memwin[] = { 1201 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 1202 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 1203 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, 1204 }; 1205 1206 #define FW_PARAM_DEV(param) \ 1207 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 1208 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 1209 #define FW_PARAM_PFVF(param) \ 1210 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 1211 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 1212 1213 /* 1214 * Verify that the memory range specified by the memtype/offset/len pair is 1215 * valid and lies entirely within the memtype specified. The global address of 1216 * the start of the range is returned in addr. 1217 */ 1218 int 1219 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len, 1220 uint32_t *addr) 1221 { 1222 uint32_t em, addr_len, maddr, mlen; 1223 1224 /* Memory can only be accessed in naturally aligned 4 byte units */ 1225 if (off & 3 || len & 3 || len == 0) 1226 return (EINVAL); 1227 1228 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 1229 switch (mtype) { 1230 case MEM_EDC0: 1231 if (!(em & F_EDRAM0_ENABLE)) 1232 return (EINVAL); 1233 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 1234 maddr = G_EDRAM0_BASE(addr_len) << 20; 1235 mlen = G_EDRAM0_SIZE(addr_len) << 20; 1236 break; 1237 case MEM_EDC1: 1238 if (!(em & F_EDRAM1_ENABLE)) 1239 return (EINVAL); 1240 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 1241 maddr = G_EDRAM1_BASE(addr_len) << 20; 1242 mlen = G_EDRAM1_SIZE(addr_len) << 20; 1243 break; 1244 case MEM_MC: 1245 if (!(em & F_EXT_MEM_ENABLE)) 1246 return (EINVAL); 1247 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 1248 maddr = G_EXT_MEM_BASE(addr_len) << 20; 1249 mlen = G_EXT_MEM_SIZE(addr_len) << 20; 1250 break; 1251 case MEM_MC1: 1252 if (is_t4(sc->params.chip) || !(em & F_EXT_MEM1_ENABLE)) 1253 return (EINVAL); 1254 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 1255 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 1256 mlen = G_EXT_MEM1_SIZE(addr_len) << 20; 1257 break; 1258 default: 1259 return (EINVAL); 1260 } 1261 1262 if (mlen > 0 && off < mlen && off + len <= mlen) { 1263 *addr = maddr + off; /* global address */ 1264 return (0); 1265 } 1266 1267 return (EFAULT); 1268 } 1269 1270 void 1271 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture) 1272 { 1273 const struct memwin *mw; 1274 1275 if (is_t4(sc->params.chip)) { 1276 mw = &t4_memwin[win]; 1277 } else { 1278 mw = &t5_memwin[win]; 1279 } 1280 1281 if (base != NULL) 1282 *base = mw->base; 1283 if (aperture != NULL) 1284 *aperture = mw->aperture; 1285 } 1286 1287 /* 1288 * Upload configuration file to card's memory. 1289 */ 1290 static int 1291 upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma) 1292 { 1293 int rc = 0; 1294 size_t cflen, cfbaselen; 1295 uint_t i, n; 1296 uint32_t param, val, addr, mtype, maddr; 1297 uint32_t off, mw_base, mw_aperture; 1298 uint32_t *cfdata, *cfbase; 1299 firmware_handle_t fw_hdl; 1300 const char *cfg_file = NULL; 1301 1302 /* Figure out where the firmware wants us to upload it. */ 1303 param = FW_PARAM_DEV(CF); 1304 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 1305 if (rc != 0) { 1306 /* Firmwares without config file support will fail this way */ 1307 cxgb_printf(sc->dip, CE_WARN, 1308 "failed to query config file location: %d.\n", rc); 1309 return (rc); 1310 } 1311 *mt = mtype = G_FW_PARAMS_PARAM_Y(val); 1312 *ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16; 1313 1314 switch (CHELSIO_CHIP_VERSION(sc->params.chip)) { 1315 case CHELSIO_T4: 1316 cfg_file = "t4fw_cfg.txt"; 1317 break; 1318 case CHELSIO_T5: 1319 cfg_file = "t5fw_cfg.txt"; 1320 break; 1321 case CHELSIO_T6: 1322 cfg_file = "t6fw_cfg.txt"; 1323 break; 1324 default: 1325 cxgb_printf(sc->dip, CE_WARN, "Invalid Adapter detected\n"); 1326 return (EINVAL); 1327 } 1328 1329 if (firmware_open(T4_PORT_NAME, cfg_file, &fw_hdl) != 0) { 1330 cxgb_printf(sc->dip, CE_WARN, "Could not open %s\n", cfg_file); 1331 return (EINVAL); 1332 } 1333 1334 cflen = firmware_get_size(fw_hdl); 1335 /* 1336 * Truncate the length to a multiple of uint32_ts. The configuration 1337 * text files have trailing comments (and hopefully always will) so 1338 * nothing important is lost. 1339 */ 1340 cflen &= ~3; 1341 1342 if (cflen > FLASH_CFG_MAX_SIZE) { 1343 cxgb_printf(sc->dip, CE_WARN, 1344 "config file too long (%d, max allowed is %d). ", 1345 cflen, FLASH_CFG_MAX_SIZE); 1346 firmware_close(fw_hdl); 1347 return (EFBIG); 1348 } 1349 1350 rc = validate_mt_off_len(sc, mtype, maddr, cflen, &addr); 1351 if (rc != 0) { 1352 cxgb_printf(sc->dip, CE_WARN, 1353 "%s: addr (%d/0x%x) or len %d is not valid: %d. " 1354 "Will try to use the config on the card, if any.\n", 1355 __func__, mtype, maddr, cflen, rc); 1356 firmware_close(fw_hdl); 1357 return (EFAULT); 1358 } 1359 1360 cfbaselen = cflen; 1361 cfbase = cfdata = kmem_zalloc(cflen, KM_SLEEP); 1362 if (firmware_read(fw_hdl, 0, cfdata, cflen) != 0) { 1363 cxgb_printf(sc->dip, CE_WARN, "Failed to read from %s\n", 1364 cfg_file); 1365 firmware_close(fw_hdl); 1366 kmem_free(cfbase, cfbaselen); 1367 return (EINVAL); 1368 } 1369 firmware_close(fw_hdl); 1370 1371 memwin_info(sc, 2, &mw_base, &mw_aperture); 1372 while (cflen) { 1373 off = position_memwin(sc, 2, addr); 1374 n = min(cflen, mw_aperture - off); 1375 for (i = 0; i < n; i += 4) 1376 t4_write_reg(sc, mw_base + off + i, *cfdata++); 1377 cflen -= n; 1378 addr += n; 1379 } 1380 1381 kmem_free(cfbase, cfbaselen); 1382 1383 return (rc); 1384 } 1385 1386 /* 1387 * Partition chip resources for use between various PFs, VFs, etc. This is done 1388 * by uploading the firmware configuration file to the adapter and instructing 1389 * the firmware to process it. 1390 */ 1391 static int 1392 partition_resources(struct adapter *sc) 1393 { 1394 int rc; 1395 struct fw_caps_config_cmd caps; 1396 uint32_t mtype, maddr, finicsum, cfcsum; 1397 1398 rc = upload_config_file(sc, &mtype, &maddr); 1399 if (rc != 0) { 1400 mtype = FW_MEMTYPE_CF_FLASH; 1401 maddr = t4_flash_cfg_addr(sc); 1402 } 1403 1404 bzero(&caps, sizeof (caps)); 1405 caps.op_to_write = BE_32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1406 F_FW_CMD_REQUEST | F_FW_CMD_READ); 1407 caps.cfvalid_to_len16 = BE_32(F_FW_CAPS_CONFIG_CMD_CFVALID | 1408 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 1409 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps)); 1410 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps); 1411 if (rc != 0) { 1412 cxgb_printf(sc->dip, CE_WARN, 1413 "failed to pre-process config file: %d.\n", rc); 1414 return (rc); 1415 } 1416 1417 finicsum = ntohl(caps.finicsum); 1418 cfcsum = ntohl(caps.cfcsum); 1419 if (finicsum != cfcsum) { 1420 cxgb_printf(sc->dip, CE_WARN, 1421 "WARNING: config file checksum mismatch: %08x %08x\n", 1422 finicsum, cfcsum); 1423 } 1424 sc->cfcsum = cfcsum; 1425 1426 /* TODO: Need to configure this correctly */ 1427 caps.toecaps = htons(FW_CAPS_CONFIG_TOE); 1428 caps.iscsicaps = 0; 1429 caps.rdmacaps = 0; 1430 caps.fcoecaps = 0; 1431 /* TODO: Disable VNIC cap for now */ 1432 caps.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); 1433 1434 caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1435 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 1436 caps.cfvalid_to_len16 = htonl(FW_LEN16(caps)); 1437 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), NULL); 1438 if (rc != 0) { 1439 cxgb_printf(sc->dip, CE_WARN, 1440 "failed to process config file: %d.\n", rc); 1441 return (rc); 1442 } 1443 1444 return (0); 1445 } 1446 1447 /* 1448 * Tweak configuration based on module parameters, etc. Most of these have 1449 * defaults assigned to them by Firmware Configuration Files (if we're using 1450 * them) but need to be explicitly set if we're using hard-coded 1451 * initialization. But even in the case of using Firmware Configuration 1452 * Files, we'd like to expose the ability to change these via module 1453 * parameters so these are essentially common tweaks/settings for 1454 * Configuration Files and hard-coded initialization ... 1455 */ 1456 static int 1457 adap__pre_init_tweaks(struct adapter *sc) 1458 { 1459 int rx_dma_offset = 2; /* Offset of RX packets into DMA buffers */ 1460 1461 /* 1462 * Fix up various Host-Dependent Parameters like Page Size, Cache 1463 * Line Size, etc. The firmware default is for a 4KB Page Size and 1464 * 64B Cache Line Size ... 1465 */ 1466 (void) t4_fixup_host_params_compat(sc, PAGE_SIZE, _CACHE_LINE_SIZE, 1467 T5_LAST_REV); 1468 1469 t4_set_reg_field(sc, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT), 1470 V_PKTSHIFT(rx_dma_offset)); 1471 1472 return (0); 1473 } 1474 /* 1475 * Retrieve parameters that are needed (or nice to have) prior to calling 1476 * t4_sge_init and t4_fw_initialize. 1477 */ 1478 static int 1479 get_params__pre_init(struct adapter *sc) 1480 { 1481 int rc; 1482 uint32_t param[2], val[2]; 1483 struct fw_devlog_cmd cmd; 1484 struct devlog_params *dlog = &sc->params.devlog; 1485 1486 /* 1487 * Grab the raw VPD parameters. 1488 */ 1489 rc = -t4_get_raw_vpd_params(sc, &sc->params.vpd); 1490 if (rc != 0) { 1491 cxgb_printf(sc->dip, CE_WARN, 1492 "failed to query VPD parameters (pre_init): %d.\n", rc); 1493 return (rc); 1494 } 1495 1496 param[0] = FW_PARAM_DEV(PORTVEC); 1497 param[1] = FW_PARAM_DEV(CCLK); 1498 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 1499 if (rc != 0) { 1500 cxgb_printf(sc->dip, CE_WARN, 1501 "failed to query parameters (pre_init): %d.\n", rc); 1502 return (rc); 1503 } 1504 1505 sc->params.portvec = val[0]; 1506 sc->params.nports = 0; 1507 while (val[0]) { 1508 sc->params.nports++; 1509 val[0] &= val[0] - 1; 1510 } 1511 1512 sc->params.vpd.cclk = val[1]; 1513 1514 /* Read device log parameters. */ 1515 bzero(&cmd, sizeof (cmd)); 1516 cmd.op_to_write = htonl(V_FW_CMD_OP(FW_DEVLOG_CMD) | 1517 F_FW_CMD_REQUEST | F_FW_CMD_READ); 1518 cmd.retval_len16 = htonl(FW_LEN16(cmd)); 1519 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof (cmd), &cmd); 1520 if (rc != 0) { 1521 cxgb_printf(sc->dip, CE_WARN, 1522 "failed to get devlog parameters: %d.\n", rc); 1523 bzero(dlog, sizeof (*dlog)); 1524 rc = 0; /* devlog isn't critical for device operation */ 1525 } else { 1526 val[0] = ntohl(cmd.memtype_devlog_memaddr16_devlog); 1527 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]); 1528 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4; 1529 dlog->size = ntohl(cmd.memsize_devlog); 1530 } 1531 1532 return (rc); 1533 } 1534 1535 /* 1536 * Retrieve various parameters that are of interest to the driver. The device 1537 * has been initialized by the firmware at this point. 1538 */ 1539 static int 1540 get_params__post_init(struct adapter *sc) 1541 { 1542 int rc; 1543 uint32_t param[7], val[7]; 1544 struct fw_caps_config_cmd caps; 1545 1546 param[0] = FW_PARAM_PFVF(IQFLINT_START); 1547 param[1] = FW_PARAM_PFVF(EQ_START); 1548 param[2] = FW_PARAM_PFVF(FILTER_START); 1549 param[3] = FW_PARAM_PFVF(FILTER_END); 1550 param[4] = FW_PARAM_PFVF(L2T_START); 1551 param[5] = FW_PARAM_PFVF(L2T_END); 1552 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 1553 if (rc != 0) { 1554 cxgb_printf(sc->dip, CE_WARN, 1555 "failed to query parameters (post_init): %d.\n", rc); 1556 return (rc); 1557 } 1558 1559 /* LINTED: E_ASSIGN_NARROW_CONV */ 1560 sc->sge.iq_start = val[0]; 1561 sc->sge.eq_start = val[1]; 1562 sc->tids.ftid_base = val[2]; 1563 sc->tids.nftids = val[3] - val[2] + 1; 1564 sc->vres.l2t.start = val[4]; 1565 sc->vres.l2t.size = val[5] - val[4] + 1; 1566 1567 param[0] = FW_PARAM_PFVF(IQFLINT_END); 1568 param[1] = FW_PARAM_PFVF(EQ_END); 1569 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 1570 if (rc != 0) { 1571 cxgb_printf(sc->dip, CE_WARN, "failed to query eq/iq map " 1572 "size parameters (post_init): %d.\n", rc); 1573 return (rc); 1574 } 1575 1576 sc->sge.iqmap_sz = val[0] - sc->sge.iq_start + 1; 1577 sc->sge.eqmap_sz = val[1] - sc->sge.eq_start + 1; 1578 1579 /* get capabilites */ 1580 bzero(&caps, sizeof (caps)); 1581 caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1582 F_FW_CMD_REQUEST | F_FW_CMD_READ); 1583 caps.cfvalid_to_len16 = htonl(FW_LEN16(caps)); 1584 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps); 1585 if (rc != 0) { 1586 cxgb_printf(sc->dip, CE_WARN, 1587 "failed to get card capabilities: %d.\n", rc); 1588 return (rc); 1589 } 1590 1591 if (caps.toecaps != 0) { 1592 /* query offload-related parameters */ 1593 param[0] = FW_PARAM_DEV(NTID); 1594 param[1] = FW_PARAM_PFVF(SERVER_START); 1595 param[2] = FW_PARAM_PFVF(SERVER_END); 1596 param[3] = FW_PARAM_PFVF(TDDP_START); 1597 param[4] = FW_PARAM_PFVF(TDDP_END); 1598 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 1599 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 1600 if (rc != 0) { 1601 cxgb_printf(sc->dip, CE_WARN, 1602 "failed to query TOE parameters: %d.\n", rc); 1603 return (rc); 1604 } 1605 sc->tids.ntids = val[0]; 1606 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 1607 sc->tids.stid_base = val[1]; 1608 sc->tids.nstids = val[2] - val[1] + 1; 1609 sc->vres.ddp.start = val[3]; 1610 sc->vres.ddp.size = val[4] - val[3] + 1; 1611 sc->params.ofldq_wr_cred = val[5]; 1612 sc->params.offload = 1; 1613 } 1614 1615 rc = -t4_get_pfres(sc); 1616 if (rc != 0) { 1617 cxgb_printf(sc->dip, CE_WARN, 1618 "failed to query PF resource params: %d.\n", rc); 1619 return (rc); 1620 } 1621 1622 /* These are finalized by FW initialization, load their values now */ 1623 val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 1624 sc->params.tp.tre = G_TIMERRESOLUTION(val[0]); 1625 sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]); 1626 t4_read_mtu_tbl(sc, sc->params.mtus, NULL); 1627 1628 return (rc); 1629 } 1630 1631 static int 1632 set_params__post_init(struct adapter *sc) 1633 { 1634 uint32_t param, val; 1635 1636 /* ask for encapsulated CPLs */ 1637 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 1638 val = 1; 1639 (void) t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 1640 1641 return (0); 1642 } 1643 1644 /* TODO: verify */ 1645 static void 1646 setup_memwin(struct adapter *sc) 1647 { 1648 pci_regspec_t *data; 1649 int rc; 1650 uint_t n; 1651 uintptr_t bar0; 1652 uintptr_t mem_win0_base, mem_win1_base, mem_win2_base; 1653 uintptr_t mem_win2_aperture; 1654 1655 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip, 1656 DDI_PROP_DONTPASS, "assigned-addresses", (int **)&data, &n); 1657 if (rc != DDI_SUCCESS) { 1658 cxgb_printf(sc->dip, CE_WARN, 1659 "failed to lookup \"assigned-addresses\" property: %d", rc); 1660 return; 1661 } 1662 n /= sizeof (*data); 1663 1664 bar0 = ((uint64_t)data[0].pci_phys_mid << 32) | data[0].pci_phys_low; 1665 ddi_prop_free(data); 1666 1667 if (is_t4(sc->params.chip)) { 1668 mem_win0_base = bar0 + MEMWIN0_BASE; 1669 mem_win1_base = bar0 + MEMWIN1_BASE; 1670 mem_win2_base = bar0 + MEMWIN2_BASE; 1671 mem_win2_aperture = MEMWIN2_APERTURE; 1672 } else { 1673 /* For T5, only relative offset inside the PCIe BAR is passed */ 1674 mem_win0_base = MEMWIN0_BASE; 1675 mem_win1_base = MEMWIN1_BASE; 1676 mem_win2_base = MEMWIN2_BASE_T5; 1677 mem_win2_aperture = MEMWIN2_APERTURE_T5; 1678 } 1679 1680 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0), 1681 mem_win0_base | V_BIR(0) | 1682 V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); 1683 1684 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1), 1685 mem_win1_base | V_BIR(0) | 1686 V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); 1687 1688 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2), 1689 mem_win2_base | V_BIR(0) | 1690 V_WINDOW(ilog2(mem_win2_aperture) - 10)); 1691 1692 /* flush */ 1693 (void) t4_read_reg(sc, 1694 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 1695 } 1696 1697 /* 1698 * Positions the memory window such that it can be used to access the specified 1699 * address in the chip's address space. The return value is the offset of addr 1700 * from the start of the window. 1701 */ 1702 uint32_t 1703 position_memwin(struct adapter *sc, int n, uint32_t addr) 1704 { 1705 uint32_t start, pf; 1706 uint32_t reg; 1707 1708 if (addr & 3) { 1709 cxgb_printf(sc->dip, CE_WARN, 1710 "addr (0x%x) is not at a 4B boundary.\n", addr); 1711 return (EFAULT); 1712 } 1713 1714 if (is_t4(sc->params.chip)) { 1715 pf = 0; 1716 start = addr & ~0xf; /* start must be 16B aligned */ 1717 } else { 1718 pf = V_PFNUM(sc->pf); 1719 start = addr & ~0x7f; /* start must be 128B aligned */ 1720 } 1721 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n); 1722 1723 t4_write_reg(sc, reg, start | pf); 1724 (void) t4_read_reg(sc, reg); 1725 1726 return (addr - start); 1727 } 1728 1729 1730 /* 1731 * Reads the named property and fills up the "data" array (which has at least 1732 * "count" elements). We first try and lookup the property for our dev_t and 1733 * then retry with DDI_DEV_T_ANY if it's not found. 1734 * 1735 * Returns non-zero if the property was found and "data" has been updated. 1736 */ 1737 static int 1738 prop_lookup_int_array(struct adapter *sc, char *name, int *data, uint_t count) 1739 { 1740 dev_info_t *dip = sc->dip; 1741 dev_t dev = sc->dev; 1742 int rc, *d; 1743 uint_t i, n; 1744 1745 rc = ddi_prop_lookup_int_array(dev, dip, DDI_PROP_DONTPASS, 1746 name, &d, &n); 1747 if (rc == DDI_PROP_SUCCESS) 1748 goto found; 1749 1750 if (rc != DDI_PROP_NOT_FOUND) { 1751 cxgb_printf(dip, CE_WARN, 1752 "failed to lookup property %s for minor %d: %d.", 1753 name, getminor(dev), rc); 1754 return (0); 1755 } 1756 1757 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1758 name, &d, &n); 1759 if (rc == DDI_PROP_SUCCESS) 1760 goto found; 1761 1762 if (rc != DDI_PROP_NOT_FOUND) { 1763 cxgb_printf(dip, CE_WARN, 1764 "failed to lookup property %s: %d.", name, rc); 1765 return (0); 1766 } 1767 1768 return (0); 1769 1770 found: 1771 if (n > count) { 1772 cxgb_printf(dip, CE_NOTE, 1773 "property %s has too many elements (%d), ignoring extras", 1774 name, n); 1775 } 1776 1777 for (i = 0; i < n && i < count; i++) 1778 data[i] = d[i]; 1779 ddi_prop_free(d); 1780 1781 return (1); 1782 } 1783 1784 static int 1785 prop_lookup_int(struct adapter *sc, char *name, int defval) 1786 { 1787 int rc; 1788 1789 rc = ddi_prop_get_int(sc->dev, sc->dip, DDI_PROP_DONTPASS, name, -1); 1790 if (rc != -1) 1791 return (rc); 1792 1793 return (ddi_prop_get_int(DDI_DEV_T_ANY, sc->dip, DDI_PROP_DONTPASS, 1794 name, defval)); 1795 } 1796 1797 static int 1798 init_driver_props(struct adapter *sc, struct driver_properties *p) 1799 { 1800 dev_t dev = sc->dev; 1801 dev_info_t *dip = sc->dip; 1802 int i, *data; 1803 uint_t tmr[SGE_NTIMERS] = {5, 10, 20, 50, 100, 200}; 1804 uint_t cnt[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ 1805 1806 /* 1807 * Holdoff timer 1808 */ 1809 data = &p->timer_val[0]; 1810 for (i = 0; i < SGE_NTIMERS; i++) 1811 data[i] = tmr[i]; 1812 (void) prop_lookup_int_array(sc, "holdoff-timer-values", data, 1813 SGE_NTIMERS); 1814 for (i = 0; i < SGE_NTIMERS; i++) { 1815 int limit = 200U; 1816 if (data[i] > limit) { 1817 cxgb_printf(dip, CE_WARN, 1818 "holdoff timer %d is too high (%d), lowered to %d.", 1819 i, data[i], limit); 1820 data[i] = limit; 1821 } 1822 } 1823 (void) ddi_prop_update_int_array(dev, dip, "holdoff-timer-values", 1824 data, SGE_NTIMERS); 1825 1826 /* 1827 * Holdoff packet counter 1828 */ 1829 data = &p->counter_val[0]; 1830 for (i = 0; i < SGE_NCOUNTERS; i++) 1831 data[i] = cnt[i]; 1832 (void) prop_lookup_int_array(sc, "holdoff-pkt-counter-values", data, 1833 SGE_NCOUNTERS); 1834 for (i = 0; i < SGE_NCOUNTERS; i++) { 1835 int limit = M_THRESHOLD_0; 1836 if (data[i] > limit) { 1837 cxgb_printf(dip, CE_WARN, 1838 "holdoff pkt-counter %d is too high (%d), " 1839 "lowered to %d.", i, data[i], limit); 1840 data[i] = limit; 1841 } 1842 } 1843 (void) ddi_prop_update_int_array(dev, dip, "holdoff-pkt-counter-values", 1844 data, SGE_NCOUNTERS); 1845 1846 /* 1847 * Maximum # of tx and rx queues to use for each 1848 * 100G, 40G, 25G, 10G and 1G port. 1849 */ 1850 p->max_ntxq_10g = prop_lookup_int(sc, "max-ntxq-10G-port", 8); 1851 (void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port", 1852 p->max_ntxq_10g); 1853 1854 p->max_nrxq_10g = prop_lookup_int(sc, "max-nrxq-10G-port", 8); 1855 (void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port", 1856 p->max_nrxq_10g); 1857 1858 p->max_ntxq_1g = prop_lookup_int(sc, "max-ntxq-1G-port", 2); 1859 (void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port", 1860 p->max_ntxq_1g); 1861 1862 p->max_nrxq_1g = prop_lookup_int(sc, "max-nrxq-1G-port", 2); 1863 (void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port", 1864 p->max_nrxq_1g); 1865 1866 /* 1867 * Holdoff parameters for 10G and 1G ports. 1868 */ 1869 p->tmr_idx_10g = prop_lookup_int(sc, "holdoff-timer-idx-10G", 0); 1870 (void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-10G", 1871 p->tmr_idx_10g); 1872 1873 p->pktc_idx_10g = prop_lookup_int(sc, "holdoff-pktc-idx-10G", 2); 1874 (void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-10G", 1875 p->pktc_idx_10g); 1876 1877 p->tmr_idx_1g = prop_lookup_int(sc, "holdoff-timer-idx-1G", 0); 1878 (void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-1G", 1879 p->tmr_idx_1g); 1880 1881 p->pktc_idx_1g = prop_lookup_int(sc, "holdoff-pktc-idx-1G", 2); 1882 (void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-1G", 1883 p->pktc_idx_1g); 1884 1885 /* 1886 * Size (number of entries) of each tx and rx queue. 1887 */ 1888 i = prop_lookup_int(sc, "qsize-txq", TX_EQ_QSIZE); 1889 p->qsize_txq = max(i, 128); 1890 if (p->qsize_txq != i) { 1891 cxgb_printf(dip, CE_WARN, 1892 "using %d instead of %d as the tx queue size", 1893 p->qsize_txq, i); 1894 } 1895 (void) ddi_prop_update_int(dev, dip, "qsize-txq", p->qsize_txq); 1896 1897 i = prop_lookup_int(sc, "qsize-rxq", RX_IQ_QSIZE); 1898 p->qsize_rxq = max(i, 128); 1899 while (p->qsize_rxq & 7) 1900 p->qsize_rxq--; 1901 if (p->qsize_rxq != i) { 1902 cxgb_printf(dip, CE_WARN, 1903 "using %d instead of %d as the rx queue size", 1904 p->qsize_rxq, i); 1905 } 1906 (void) ddi_prop_update_int(dev, dip, "qsize-rxq", p->qsize_rxq); 1907 1908 /* 1909 * Interrupt types allowed. 1910 * Bits 0, 1, 2 = INTx, MSI, MSI-X respectively. See sys/ddi_intr.h 1911 */ 1912 p->intr_types = prop_lookup_int(sc, "interrupt-types", 1913 DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_FIXED); 1914 (void) ddi_prop_update_int(dev, dip, "interrupt-types", p->intr_types); 1915 1916 /* 1917 * Forwarded interrupt queues. Create this property to force the driver 1918 * to use forwarded interrupt queues. 1919 */ 1920 if (ddi_prop_exists(dev, dip, DDI_PROP_DONTPASS, 1921 "interrupt-forwarding") != 0 || 1922 ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1923 "interrupt-forwarding") != 0) { 1924 cmn_err(CE_WARN, "%s (%s:%d) unimplemented.", 1925 __func__, __FILE__, __LINE__); 1926 (void) ddi_prop_create(dev, dip, DDI_PROP_CANSLEEP, 1927 "interrupt-forwarding", NULL, 0); 1928 } 1929 1930 /* 1931 * Write combining 1932 * 0 to disable, 1 to enable 1933 */ 1934 p->wc = prop_lookup_int(sc, "write-combine", 1); 1935 cxgb_printf(dip, CE_WARN, "write-combine: using of %d", p->wc); 1936 if (p->wc != 0 && p->wc != 1) { 1937 cxgb_printf(dip, CE_WARN, 1938 "write-combine: using 1 instead of %d", p->wc); 1939 p->wc = 1; 1940 } 1941 (void) ddi_prop_update_int(dev, dip, "write-combine", p->wc); 1942 1943 p->t4_fw_install = prop_lookup_int(sc, "t4_fw_install", 1); 1944 if (p->t4_fw_install != 0 && p->t4_fw_install != 2) 1945 p->t4_fw_install = 1; 1946 (void) ddi_prop_update_int(dev, dip, "t4_fw_install", p->t4_fw_install); 1947 1948 /* Multiple Rings */ 1949 p->multi_rings = prop_lookup_int(sc, "multi-rings", 1); 1950 if (p->multi_rings != 0 && p->multi_rings != 1) { 1951 cxgb_printf(dip, CE_NOTE, 1952 "multi-rings: using value 1 instead of %d", p->multi_rings); 1953 p->multi_rings = 1; 1954 } 1955 1956 (void) ddi_prop_update_int(dev, dip, "multi-rings", p->multi_rings); 1957 1958 return (0); 1959 } 1960 1961 static int 1962 remove_extra_props(struct adapter *sc, int n10g, int n1g) 1963 { 1964 if (n10g == 0) { 1965 (void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-10G-port"); 1966 (void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-10G-port"); 1967 (void) ddi_prop_remove(sc->dev, sc->dip, 1968 "holdoff-timer-idx-10G"); 1969 (void) ddi_prop_remove(sc->dev, sc->dip, 1970 "holdoff-pktc-idx-10G"); 1971 } 1972 1973 if (n1g == 0) { 1974 (void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-1G-port"); 1975 (void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-1G-port"); 1976 (void) ddi_prop_remove(sc->dev, sc->dip, 1977 "holdoff-timer-idx-1G"); 1978 (void) ddi_prop_remove(sc->dev, sc->dip, "holdoff-pktc-idx-1G"); 1979 } 1980 1981 return (0); 1982 } 1983 1984 static int 1985 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, 1986 struct intrs_and_queues *iaq) 1987 { 1988 struct driver_properties *p = &sc->props; 1989 int rc, itype, itypes, navail, nc, n; 1990 int pfres_rxq, pfres_txq, pfresq; 1991 1992 bzero(iaq, sizeof (*iaq)); 1993 nc = ncpus; /* our snapshot of the number of CPUs */ 1994 iaq->ntxq10g = min(nc, p->max_ntxq_10g); 1995 iaq->ntxq1g = min(nc, p->max_ntxq_1g); 1996 iaq->nrxq10g = min(nc, p->max_nrxq_10g); 1997 iaq->nrxq1g = min(nc, p->max_nrxq_1g); 1998 1999 pfres_rxq = iaq->nrxq10g * n10g + iaq->nrxq1g * n1g; 2000 pfres_txq = iaq->ntxq10g * n10g + iaq->ntxq1g * n1g; 2001 2002 /* 2003 * If current configuration of max number of Rxqs and Txqs exceed 2004 * the max available for all the ports under this PF, then shrink 2005 * the queues to max available. Reduce them in a way that each 2006 * port under this PF has equally distributed number of queues. 2007 * Must guarantee at least 1 queue for each port for both NIC 2008 * and Offload queues. 2009 * 2010 * neq - fixed max number of Egress queues on Tx path and Free List 2011 * queues that hold Rx payload data on Rx path. Half are reserved 2012 * for Egress queues and the other half for Free List queues. 2013 * Hence, the division by 2. 2014 * 2015 * niqflint - max number of Ingress queues with interrupts on Rx 2016 * path to receive completions that indicate Rx payload has been 2017 * posted in its associated Free List queue. Also handles Tx 2018 * completions for packets successfully transmitted on Tx path. 2019 * 2020 * nethctrl - max number of Egress queues only for Tx path. This 2021 * number is usually half of neq. However, if it became less than 2022 * neq due to lack of resources based on firmware configuration, 2023 * then take the lower value. 2024 */ 2025 const uint_t max_rxq = 2026 MIN(sc->params.pfres.neq / 2, sc->params.pfres.niqflint); 2027 while (pfres_rxq > max_rxq) { 2028 pfresq = pfres_rxq; 2029 2030 if (iaq->nrxq10g > 1) { 2031 iaq->nrxq10g--; 2032 pfres_rxq -= n10g; 2033 } 2034 2035 if (iaq->nrxq1g > 1) { 2036 iaq->nrxq1g--; 2037 pfres_rxq -= n1g; 2038 } 2039 2040 /* Break if nothing changed */ 2041 if (pfresq == pfres_rxq) 2042 break; 2043 } 2044 2045 const uint_t max_txq = 2046 MIN(sc->params.pfres.neq / 2, sc->params.pfres.nethctrl); 2047 while (pfres_txq > max_txq) { 2048 pfresq = pfres_txq; 2049 2050 if (iaq->ntxq10g > 1) { 2051 iaq->ntxq10g--; 2052 pfres_txq -= n10g; 2053 } 2054 2055 if (iaq->ntxq1g > 1) { 2056 iaq->ntxq1g--; 2057 pfres_txq -= n1g; 2058 } 2059 2060 /* Break if nothing changed */ 2061 if (pfresq == pfres_txq) 2062 break; 2063 } 2064 2065 rc = ddi_intr_get_supported_types(sc->dip, &itypes); 2066 if (rc != DDI_SUCCESS) { 2067 cxgb_printf(sc->dip, CE_WARN, 2068 "failed to determine supported interrupt types: %d", rc); 2069 return (rc); 2070 } 2071 2072 for (itype = DDI_INTR_TYPE_MSIX; itype; itype >>= 1) { 2073 ASSERT(itype == DDI_INTR_TYPE_MSIX || 2074 itype == DDI_INTR_TYPE_MSI || 2075 itype == DDI_INTR_TYPE_FIXED); 2076 2077 if ((itype & itypes & p->intr_types) == 0) 2078 continue; /* not supported or not allowed */ 2079 2080 navail = 0; 2081 rc = ddi_intr_get_navail(sc->dip, itype, &navail); 2082 if (rc != DDI_SUCCESS || navail == 0) { 2083 cxgb_printf(sc->dip, CE_WARN, 2084 "failed to get # of interrupts for type %d: %d", 2085 itype, rc); 2086 continue; /* carry on */ 2087 } 2088 2089 iaq->intr_type = itype; 2090 if (navail == 0) 2091 continue; 2092 2093 /* 2094 * Best option: an interrupt vector for errors, one for the 2095 * firmware event queue, and one each for each rxq (NIC as well 2096 * as offload). 2097 */ 2098 iaq->nirq = T4_EXTRA_INTR; 2099 iaq->nirq += n10g * iaq->nrxq10g; 2100 iaq->nirq += n1g * iaq->nrxq1g; 2101 2102 if (iaq->nirq <= navail && 2103 (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) { 2104 iaq->intr_fwd = 0; 2105 goto allocate; 2106 } 2107 2108 /* 2109 * Second best option: an interrupt vector for errors, one for 2110 * the firmware event queue, and one each for either NIC or 2111 * offload rxq's. 2112 */ 2113 iaq->nirq = T4_EXTRA_INTR; 2114 iaq->nirq += n10g * iaq->nrxq10g; 2115 iaq->nirq += n1g * iaq->nrxq1g; 2116 if (iaq->nirq <= navail && 2117 (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) { 2118 iaq->intr_fwd = 1; 2119 goto allocate; 2120 } 2121 2122 /* 2123 * Next best option: an interrupt vector for errors, one for the 2124 * firmware event queue, and at least one per port. At this 2125 * point we know we'll have to downsize nrxq or nofldrxq to fit 2126 * what's available to us. 2127 */ 2128 iaq->nirq = T4_EXTRA_INTR; 2129 iaq->nirq += n10g + n1g; 2130 if (iaq->nirq <= navail) { 2131 int leftover = navail - iaq->nirq; 2132 2133 if (n10g > 0) { 2134 int target = iaq->nrxq10g; 2135 2136 n = 1; 2137 while (n < target && leftover >= n10g) { 2138 leftover -= n10g; 2139 iaq->nirq += n10g; 2140 n++; 2141 } 2142 iaq->nrxq10g = min(n, iaq->nrxq10g); 2143 } 2144 2145 if (n1g > 0) { 2146 int target = iaq->nrxq1g; 2147 2148 n = 1; 2149 while (n < target && leftover >= n1g) { 2150 leftover -= n1g; 2151 iaq->nirq += n1g; 2152 n++; 2153 } 2154 iaq->nrxq1g = min(n, iaq->nrxq1g); 2155 } 2156 2157 /* 2158 * We have arrived at a minimum value required to enable 2159 * per queue irq(either NIC or offload). Thus for non- 2160 * offload case, we will get a vector per queue, while 2161 * offload case, we will get a vector per offload/NIC q. 2162 * Hence enable Interrupt forwarding only for offload 2163 * case. 2164 */ 2165 if (itype != DDI_INTR_TYPE_MSI) { 2166 goto allocate; 2167 } 2168 } 2169 2170 /* 2171 * Least desirable option: one interrupt vector for everything. 2172 */ 2173 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1; 2174 iaq->intr_fwd = 1; 2175 2176 allocate: 2177 return (0); 2178 } 2179 2180 cxgb_printf(sc->dip, CE_WARN, 2181 "failed to find a usable interrupt type. supported=%d, allowed=%d", 2182 itypes, p->intr_types); 2183 return (DDI_FAILURE); 2184 } 2185 2186 static int 2187 add_child_node(struct adapter *sc, int idx) 2188 { 2189 int rc; 2190 struct port_info *pi; 2191 2192 if (idx < 0 || idx >= sc->params.nports) 2193 return (EINVAL); 2194 2195 pi = sc->port[idx]; 2196 if (pi == NULL) 2197 return (ENODEV); /* t4_port_init failed earlier */ 2198 2199 PORT_LOCK(pi); 2200 if (pi->dip != NULL) { 2201 rc = 0; /* EEXIST really, but then bus_config fails */ 2202 goto done; 2203 } 2204 2205 rc = ndi_devi_alloc(sc->dip, T4_PORT_NAME, DEVI_SID_NODEID, &pi->dip); 2206 if (rc != DDI_SUCCESS || pi->dip == NULL) { 2207 rc = ENOMEM; 2208 goto done; 2209 } 2210 2211 (void) ddi_set_parent_data(pi->dip, pi); 2212 (void) ndi_devi_bind_driver(pi->dip, 0); 2213 rc = 0; 2214 done: 2215 PORT_UNLOCK(pi); 2216 return (rc); 2217 } 2218 2219 static int 2220 remove_child_node(struct adapter *sc, int idx) 2221 { 2222 int rc; 2223 struct port_info *pi; 2224 2225 if (idx < 0 || idx >= sc->params.nports) 2226 return (EINVAL); 2227 2228 pi = sc->port[idx]; 2229 if (pi == NULL) 2230 return (ENODEV); 2231 2232 PORT_LOCK(pi); 2233 if (pi->dip == NULL) { 2234 rc = ENODEV; 2235 goto done; 2236 } 2237 2238 rc = ndi_devi_free(pi->dip); 2239 if (rc == 0) 2240 pi->dip = NULL; 2241 done: 2242 PORT_UNLOCK(pi); 2243 return (rc); 2244 } 2245 2246 static char * 2247 print_port_speed(const struct port_info *pi) 2248 { 2249 if (!pi) 2250 return ("-"); 2251 2252 if (is_100G_port(pi)) 2253 return ("100G"); 2254 else if (is_50G_port(pi)) 2255 return ("50G"); 2256 else if (is_40G_port(pi)) 2257 return ("40G"); 2258 else if (is_25G_port(pi)) 2259 return ("25G"); 2260 else if (is_10G_port(pi)) 2261 return ("10G"); 2262 else 2263 return ("1G"); 2264 } 2265 2266 #define KS_UINIT(x) kstat_named_init(&kstatp->x, #x, KSTAT_DATA_ULONG) 2267 #define KS_CINIT(x) kstat_named_init(&kstatp->x, #x, KSTAT_DATA_CHAR) 2268 #define KS_U64INIT(x) kstat_named_init(&kstatp->x, #x, KSTAT_DATA_UINT64) 2269 #define KS_U_SET(x, y) kstatp->x.value.ul = (y) 2270 #define KS_C_SET(x, ...) \ 2271 (void) snprintf(kstatp->x.value.c, 16, __VA_ARGS__) 2272 2273 /* 2274 * t4nex:X:config 2275 */ 2276 struct t4_kstats { 2277 kstat_named_t chip_ver; 2278 kstat_named_t fw_vers; 2279 kstat_named_t tp_vers; 2280 kstat_named_t driver_version; 2281 kstat_named_t serial_number; 2282 kstat_named_t ec_level; 2283 kstat_named_t id; 2284 kstat_named_t bus_type; 2285 kstat_named_t bus_width; 2286 kstat_named_t bus_speed; 2287 kstat_named_t core_clock; 2288 kstat_named_t port_cnt; 2289 kstat_named_t port_type; 2290 kstat_named_t pci_vendor_id; 2291 kstat_named_t pci_device_id; 2292 }; 2293 static kstat_t * 2294 setup_kstats(struct adapter *sc) 2295 { 2296 kstat_t *ksp; 2297 struct t4_kstats *kstatp; 2298 int ndata; 2299 struct pci_params *p = &sc->params.pci; 2300 struct vpd_params *v = &sc->params.vpd; 2301 uint16_t pci_vendor, pci_device; 2302 2303 ndata = sizeof (struct t4_kstats) / sizeof (kstat_named_t); 2304 2305 ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "config", 2306 "nexus", KSTAT_TYPE_NAMED, ndata, 0); 2307 if (ksp == NULL) { 2308 cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats."); 2309 return (NULL); 2310 } 2311 2312 kstatp = (struct t4_kstats *)ksp->ks_data; 2313 2314 KS_UINIT(chip_ver); 2315 KS_CINIT(fw_vers); 2316 KS_CINIT(tp_vers); 2317 KS_CINIT(driver_version); 2318 KS_CINIT(serial_number); 2319 KS_CINIT(ec_level); 2320 KS_CINIT(id); 2321 KS_CINIT(bus_type); 2322 KS_CINIT(bus_width); 2323 KS_CINIT(bus_speed); 2324 KS_UINIT(core_clock); 2325 KS_UINIT(port_cnt); 2326 KS_CINIT(port_type); 2327 KS_CINIT(pci_vendor_id); 2328 KS_CINIT(pci_device_id); 2329 2330 KS_U_SET(chip_ver, sc->params.chip); 2331 KS_C_SET(fw_vers, "%d.%d.%d.%d", 2332 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 2333 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 2334 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 2335 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 2336 KS_C_SET(tp_vers, "%d.%d.%d.%d", 2337 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), 2338 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), 2339 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), 2340 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); 2341 KS_C_SET(driver_version, DRV_VERSION); 2342 KS_C_SET(serial_number, "%s", v->sn); 2343 KS_C_SET(ec_level, "%s", v->ec); 2344 KS_C_SET(id, "%s", v->id); 2345 KS_C_SET(bus_type, "pci-express"); 2346 KS_C_SET(bus_width, "x%d lanes", p->width); 2347 KS_C_SET(bus_speed, "%d", p->speed); 2348 KS_U_SET(core_clock, v->cclk); 2349 KS_U_SET(port_cnt, sc->params.nports); 2350 2351 t4_os_pci_read_cfg2(sc, PCI_CONF_VENID, &pci_vendor); 2352 KS_C_SET(pci_vendor_id, "0x%x", pci_vendor); 2353 2354 t4_os_pci_read_cfg2(sc, PCI_CONF_DEVID, &pci_device); 2355 KS_C_SET(pci_device_id, "0x%x", pci_device); 2356 2357 KS_C_SET(port_type, "%s/%s/%s/%s", 2358 print_port_speed(sc->port[0]), 2359 print_port_speed(sc->port[1]), 2360 print_port_speed(sc->port[2]), 2361 print_port_speed(sc->port[3])); 2362 2363 /* Do NOT set ksp->ks_update. These kstats do not change. */ 2364 2365 /* Install the kstat */ 2366 ksp->ks_private = (void *)sc; 2367 kstat_install(ksp); 2368 2369 return (ksp); 2370 } 2371 2372 /* 2373 * t4nex:X:stat 2374 */ 2375 struct t4_wc_kstats { 2376 kstat_named_t write_coal_success; 2377 kstat_named_t write_coal_failure; 2378 }; 2379 static kstat_t * 2380 setup_wc_kstats(struct adapter *sc) 2381 { 2382 kstat_t *ksp; 2383 struct t4_wc_kstats *kstatp; 2384 2385 const uint_t ndata = 2386 sizeof (struct t4_wc_kstats) / sizeof (kstat_named_t); 2387 ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "stats", 2388 "nexus", KSTAT_TYPE_NAMED, ndata, 0); 2389 if (ksp == NULL) { 2390 cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats."); 2391 return (NULL); 2392 } 2393 2394 kstatp = (struct t4_wc_kstats *)ksp->ks_data; 2395 2396 KS_UINIT(write_coal_success); 2397 KS_UINIT(write_coal_failure); 2398 2399 ksp->ks_update = update_wc_kstats; 2400 /* Install the kstat */ 2401 ksp->ks_private = (void *)sc; 2402 kstat_install(ksp); 2403 2404 return (ksp); 2405 } 2406 2407 static int 2408 update_wc_kstats(kstat_t *ksp, int rw) 2409 { 2410 struct t4_wc_kstats *kstatp = (struct t4_wc_kstats *)ksp->ks_data; 2411 struct adapter *sc = ksp->ks_private; 2412 uint32_t wc_total, wc_success, wc_failure; 2413 2414 if (rw == KSTAT_WRITE) 2415 return (0); 2416 2417 if (is_t5(sc->params.chip)) { 2418 wc_total = t4_read_reg(sc, A_SGE_STAT_TOTAL); 2419 wc_failure = t4_read_reg(sc, A_SGE_STAT_MATCH); 2420 wc_success = wc_total - wc_failure; 2421 } else { 2422 wc_success = 0; 2423 wc_failure = 0; 2424 } 2425 2426 KS_U_SET(write_coal_success, wc_success); 2427 KS_U_SET(write_coal_failure, wc_failure); 2428 2429 return (0); 2430 } 2431 2432 /* 2433 * cxgbe:X:fec 2434 * 2435 * This provides visibility into the errors that have been found by the 2436 * different FEC subsystems. While it's tempting to combine the two different 2437 * FEC types logically, the data that the errors tell us are pretty different 2438 * between the two. Firecode is strictly per-lane, but RS has parts that are 2439 * related to symbol distribution to lanes and also to the overall channel. 2440 */ 2441 struct cxgbe_port_fec_kstats { 2442 kstat_named_t rs_corr; 2443 kstat_named_t rs_uncorr; 2444 kstat_named_t rs_sym0_corr; 2445 kstat_named_t rs_sym1_corr; 2446 kstat_named_t rs_sym2_corr; 2447 kstat_named_t rs_sym3_corr; 2448 kstat_named_t fc_lane0_corr; 2449 kstat_named_t fc_lane0_uncorr; 2450 kstat_named_t fc_lane1_corr; 2451 kstat_named_t fc_lane1_uncorr; 2452 kstat_named_t fc_lane2_corr; 2453 kstat_named_t fc_lane2_uncorr; 2454 kstat_named_t fc_lane3_corr; 2455 kstat_named_t fc_lane3_uncorr; 2456 }; 2457 2458 static uint32_t 2459 read_fec_pair(struct port_info *pi, uint32_t lo_reg, uint32_t high_reg) 2460 { 2461 struct adapter *sc = pi->adapter; 2462 uint8_t port = pi->tx_chan; 2463 uint32_t low, high, ret; 2464 2465 low = t4_read_reg32(sc, T5_PORT_REG(port, lo_reg)); 2466 high = t4_read_reg32(sc, T5_PORT_REG(port, high_reg)); 2467 ret = low & 0xffff; 2468 ret |= (high & 0xffff) << 16; 2469 return (ret); 2470 } 2471 2472 static int 2473 update_port_fec_kstats(kstat_t *ksp, int rw) 2474 { 2475 struct cxgbe_port_fec_kstats *fec = ksp->ks_data; 2476 struct port_info *pi = ksp->ks_private; 2477 2478 if (rw == KSTAT_WRITE) { 2479 return (EACCES); 2480 } 2481 2482 /* 2483 * First go ahead and gather RS related stats. 2484 */ 2485 fec->rs_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_CCW_LO, 2486 T6_RS_FEC_CCW_HI); 2487 fec->rs_uncorr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_NCCW_LO, 2488 T6_RS_FEC_NCCW_HI); 2489 fec->rs_sym0_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_SYMERR0_LO, 2490 T6_RS_FEC_SYMERR0_HI); 2491 fec->rs_sym1_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_SYMERR1_LO, 2492 T6_RS_FEC_SYMERR1_HI); 2493 fec->rs_sym2_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_SYMERR2_LO, 2494 T6_RS_FEC_SYMERR2_HI); 2495 fec->rs_sym3_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_SYMERR3_LO, 2496 T6_RS_FEC_SYMERR3_HI); 2497 2498 /* 2499 * Now go through and try to grab Firecode/BASE-R stats. 2500 */ 2501 fec->fc_lane0_corr.value.ui64 += read_fec_pair(pi, T6_FC_FEC_L0_CERR_LO, 2502 T6_FC_FEC_L0_CERR_HI); 2503 fec->fc_lane0_uncorr.value.ui64 += read_fec_pair(pi, 2504 T6_FC_FEC_L0_NCERR_LO, T6_FC_FEC_L0_NCERR_HI); 2505 fec->fc_lane1_corr.value.ui64 += read_fec_pair(pi, T6_FC_FEC_L1_CERR_LO, 2506 T6_FC_FEC_L1_CERR_HI); 2507 fec->fc_lane1_uncorr.value.ui64 += read_fec_pair(pi, 2508 T6_FC_FEC_L1_NCERR_LO, T6_FC_FEC_L1_NCERR_HI); 2509 fec->fc_lane2_corr.value.ui64 += read_fec_pair(pi, T6_FC_FEC_L2_CERR_LO, 2510 T6_FC_FEC_L2_CERR_HI); 2511 fec->fc_lane2_uncorr.value.ui64 += read_fec_pair(pi, 2512 T6_FC_FEC_L2_NCERR_LO, T6_FC_FEC_L2_NCERR_HI); 2513 fec->fc_lane3_corr.value.ui64 += read_fec_pair(pi, T6_FC_FEC_L3_CERR_LO, 2514 T6_FC_FEC_L3_CERR_HI); 2515 fec->fc_lane3_uncorr.value.ui64 += read_fec_pair(pi, 2516 T6_FC_FEC_L3_NCERR_LO, T6_FC_FEC_L3_NCERR_HI); 2517 2518 return (0); 2519 } 2520 2521 static kstat_t * 2522 setup_port_fec_kstats(struct port_info *pi) 2523 { 2524 kstat_t *ksp; 2525 struct cxgbe_port_fec_kstats *kstatp; 2526 2527 if (!is_t6(pi->adapter->params.chip)) { 2528 return (NULL); 2529 } 2530 2531 ksp = kstat_create(T4_PORT_NAME, ddi_get_instance(pi->dip), "fec", 2532 "net", KSTAT_TYPE_NAMED, sizeof (struct cxgbe_port_fec_kstats) / 2533 sizeof (kstat_named_t), 0); 2534 if (ksp == NULL) { 2535 cxgb_printf(pi->dip, CE_WARN, "failed to initialize fec " 2536 "kstats."); 2537 return (NULL); 2538 } 2539 2540 kstatp = ksp->ks_data; 2541 KS_U64INIT(rs_corr); 2542 KS_U64INIT(rs_uncorr); 2543 KS_U64INIT(rs_sym0_corr); 2544 KS_U64INIT(rs_sym1_corr); 2545 KS_U64INIT(rs_sym2_corr); 2546 KS_U64INIT(rs_sym3_corr); 2547 KS_U64INIT(fc_lane0_corr); 2548 KS_U64INIT(fc_lane0_uncorr); 2549 KS_U64INIT(fc_lane1_corr); 2550 KS_U64INIT(fc_lane1_uncorr); 2551 KS_U64INIT(fc_lane2_corr); 2552 KS_U64INIT(fc_lane2_uncorr); 2553 KS_U64INIT(fc_lane3_corr); 2554 KS_U64INIT(fc_lane3_uncorr); 2555 2556 ksp->ks_update = update_port_fec_kstats; 2557 ksp->ks_private = pi; 2558 kstat_install(ksp); 2559 2560 return (ksp); 2561 } 2562 2563 int 2564 adapter_full_init(struct adapter *sc) 2565 { 2566 int i, rc = 0; 2567 2568 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 2569 2570 rc = t4_setup_adapter_queues(sc); 2571 if (rc != 0) 2572 goto done; 2573 2574 if (sc->intr_cap & DDI_INTR_FLAG_BLOCK) 2575 (void) ddi_intr_block_enable(sc->intr_handle, sc->intr_count); 2576 else { 2577 for (i = 0; i < sc->intr_count; i++) 2578 (void) ddi_intr_enable(sc->intr_handle[i]); 2579 } 2580 t4_intr_enable(sc); 2581 sc->flags |= FULL_INIT_DONE; 2582 2583 done: 2584 if (rc != 0) 2585 (void) adapter_full_uninit(sc); 2586 2587 return (rc); 2588 } 2589 2590 int 2591 adapter_full_uninit(struct adapter *sc) 2592 { 2593 int i, rc = 0; 2594 2595 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 2596 2597 if (sc->intr_cap & DDI_INTR_FLAG_BLOCK) 2598 (void) ddi_intr_block_disable(sc->intr_handle, sc->intr_count); 2599 else { 2600 for (i = 0; i < sc->intr_count; i++) 2601 (void) ddi_intr_disable(sc->intr_handle[i]); 2602 } 2603 2604 rc = t4_teardown_adapter_queues(sc); 2605 if (rc != 0) 2606 return (rc); 2607 2608 sc->flags &= ~FULL_INIT_DONE; 2609 2610 return (0); 2611 } 2612 2613 int 2614 port_full_init(struct port_info *pi) 2615 { 2616 struct adapter *sc = pi->adapter; 2617 uint16_t *rss; 2618 struct sge_rxq *rxq; 2619 int rc, i; 2620 2621 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 2622 ASSERT((pi->flags & PORT_INIT_DONE) == 0); 2623 2624 /* 2625 * Allocate tx/rx/fl queues for this port. 2626 */ 2627 rc = t4_setup_port_queues(pi); 2628 if (rc != 0) 2629 goto done; /* error message displayed already */ 2630 2631 /* 2632 * Setup RSS for this port. 2633 */ 2634 rss = kmem_zalloc(pi->nrxq * sizeof (*rss), KM_SLEEP); 2635 for_each_rxq(pi, i, rxq) { 2636 rss[i] = rxq->iq.abs_id; 2637 } 2638 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, 2639 pi->rss_size, rss, pi->nrxq); 2640 kmem_free(rss, pi->nrxq * sizeof (*rss)); 2641 if (rc != 0) { 2642 cxgb_printf(pi->dip, CE_WARN, "rss_config failed: %d", rc); 2643 goto done; 2644 } 2645 2646 /* 2647 * Initialize our per-port FEC kstats. 2648 */ 2649 pi->ksp_fec = setup_port_fec_kstats(pi); 2650 2651 pi->flags |= PORT_INIT_DONE; 2652 done: 2653 if (rc != 0) 2654 (void) port_full_uninit(pi); 2655 2656 return (rc); 2657 } 2658 2659 /* 2660 * Idempotent. 2661 */ 2662 int 2663 port_full_uninit(struct port_info *pi) 2664 { 2665 2666 ASSERT(pi->flags & PORT_INIT_DONE); 2667 2668 if (pi->ksp_fec != NULL) { 2669 kstat_delete(pi->ksp_fec); 2670 pi->ksp_fec = NULL; 2671 } 2672 (void) t4_teardown_port_queues(pi); 2673 pi->flags &= ~PORT_INIT_DONE; 2674 2675 return (0); 2676 } 2677 2678 void 2679 enable_port_queues(struct port_info *pi) 2680 { 2681 struct adapter *sc = pi->adapter; 2682 int i; 2683 struct sge_iq *iq; 2684 struct sge_rxq *rxq; 2685 2686 ASSERT(pi->flags & PORT_INIT_DONE); 2687 2688 /* 2689 * TODO: whatever was queued up after we set iq->state to IQS_DISABLED 2690 * back in disable_port_queues will be processed now, after an unbounded 2691 * delay. This can't be good. 2692 */ 2693 2694 for_each_rxq(pi, i, rxq) { 2695 iq = &rxq->iq; 2696 if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) != 2697 IQS_DISABLED) 2698 panic("%s: iq %p wasn't disabled", __func__, 2699 (void *) iq); 2700 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 2701 V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id)); 2702 } 2703 } 2704 2705 void 2706 disable_port_queues(struct port_info *pi) 2707 { 2708 int i; 2709 struct adapter *sc = pi->adapter; 2710 struct sge_rxq *rxq; 2711 2712 ASSERT(pi->flags & PORT_INIT_DONE); 2713 2714 /* 2715 * TODO: need proper implementation for all tx queues (ctrl, eth, ofld). 2716 */ 2717 2718 for_each_rxq(pi, i, rxq) { 2719 while (atomic_cas_uint(&rxq->iq.state, IQS_IDLE, 2720 IQS_DISABLED) != IQS_IDLE) 2721 msleep(1); 2722 } 2723 2724 mutex_enter(&sc->sfl_lock); 2725 for_each_rxq(pi, i, rxq) 2726 rxq->fl.flags |= FL_DOOMED; 2727 mutex_exit(&sc->sfl_lock); 2728 /* TODO: need to wait for all fl's to be removed from sc->sfl */ 2729 } 2730 2731 void 2732 t4_fatal_err(struct adapter *sc) 2733 { 2734 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 2735 t4_intr_disable(sc); 2736 cxgb_printf(sc->dip, CE_WARN, 2737 "encountered fatal error, adapter stopped."); 2738 } 2739 2740 int 2741 t4_os_find_pci_capability(struct adapter *sc, int cap) 2742 { 2743 uint16_t stat; 2744 uint8_t cap_ptr, cap_id; 2745 2746 t4_os_pci_read_cfg2(sc, PCI_CONF_STAT, &stat); 2747 if ((stat & PCI_STAT_CAP) == 0) 2748 return (0); /* does not implement capabilities */ 2749 2750 t4_os_pci_read_cfg1(sc, PCI_CONF_CAP_PTR, &cap_ptr); 2751 while (cap_ptr) { 2752 t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_ID, &cap_id); 2753 if (cap_id == cap) 2754 return (cap_ptr); /* found */ 2755 t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_NEXT_PTR, &cap_ptr); 2756 } 2757 2758 return (0); /* not found */ 2759 } 2760 2761 void 2762 t4_os_portmod_changed(struct adapter *sc, int idx) 2763 { 2764 static const char *mod_str[] = { 2765 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 2766 }; 2767 struct port_info *pi = sc->port[idx]; 2768 2769 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 2770 cxgb_printf(pi->dip, CE_NOTE, "transceiver unplugged."); 2771 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 2772 cxgb_printf(pi->dip, CE_NOTE, 2773 "unknown transceiver inserted.\n"); 2774 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 2775 cxgb_printf(pi->dip, CE_NOTE, 2776 "unsupported transceiver inserted.\n"); 2777 else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str)) 2778 cxgb_printf(pi->dip, CE_NOTE, "%s transceiver inserted.\n", 2779 mod_str[pi->mod_type]); 2780 else 2781 cxgb_printf(pi->dip, CE_NOTE, "transceiver (type %d) inserted.", 2782 pi->mod_type); 2783 2784 if ((isset(&sc->open_device_map, pi->port_id) != 0) && 2785 pi->link_cfg.new_module) 2786 pi->link_cfg.redo_l1cfg = true; 2787 } 2788 2789 /* ARGSUSED */ 2790 static int 2791 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, mblk_t *m) 2792 { 2793 if (m != NULL) 2794 freemsg(m); 2795 return (0); 2796 } 2797 2798 int 2799 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h) 2800 { 2801 uint_t *loc, new; 2802 2803 if (opcode >= ARRAY_SIZE(sc->cpl_handler)) 2804 return (EINVAL); 2805 2806 new = (uint_t)(unsigned long) (h ? h : cpl_not_handled); 2807 loc = (uint_t *)&sc->cpl_handler[opcode]; 2808 (void) atomic_swap_uint(loc, new); 2809 2810 return (0); 2811 } 2812 2813 static int 2814 fw_msg_not_handled(struct adapter *sc, const __be64 *data) 2815 { 2816 struct cpl_fw6_msg *cpl; 2817 2818 cpl = __containerof((void *)data, struct cpl_fw6_msg, data); 2819 2820 cxgb_printf(sc->dip, CE_WARN, "%s fw_msg type %d", __func__, cpl->type); 2821 return (0); 2822 } 2823 2824 int 2825 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h) 2826 { 2827 fw_msg_handler_t *loc, new; 2828 2829 if (type >= ARRAY_SIZE(sc->fw_msg_handler)) 2830 return (EINVAL); 2831 2832 /* 2833 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL 2834 * handler dispatch table. Reject any attempt to install a handler for 2835 * this subtype. 2836 */ 2837 if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL) 2838 return (EINVAL); 2839 2840 new = h ? h : fw_msg_not_handled; 2841 loc = &sc->fw_msg_handler[type]; 2842 (void) atomic_swap_ptr(loc, (void *)new); 2843 2844 return (0); 2845 } 2846 2847 static int 2848 t4_sensor_read(struct adapter *sc, uint32_t diag, uint32_t *valp) 2849 { 2850 int rc; 2851 struct port_info *pi = sc->port[0]; 2852 uint32_t param, val; 2853 2854 rc = begin_synchronized_op(pi, 1, 1); 2855 if (rc != 0) { 2856 return (rc); 2857 } 2858 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 2859 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 2860 V_FW_PARAMS_PARAM_Y(diag); 2861 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2862 end_synchronized_op(pi, 1); 2863 2864 if (rc != 0) { 2865 return (rc); 2866 } 2867 2868 if (val == 0) { 2869 return (EIO); 2870 } 2871 2872 *valp = val; 2873 return (0); 2874 } 2875 2876 static int 2877 t4_temperature_read(void *arg, sensor_ioctl_scalar_t *scalar) 2878 { 2879 int ret; 2880 struct adapter *sc = arg; 2881 uint32_t val; 2882 2883 ret = t4_sensor_read(sc, FW_PARAM_DEV_DIAG_TMP, &val); 2884 if (ret != 0) { 2885 return (ret); 2886 } 2887 2888 /* 2889 * The device measures temperature in units of 1 degree Celsius. We 2890 * don't know its precision. 2891 */ 2892 scalar->sis_unit = SENSOR_UNIT_CELSIUS; 2893 scalar->sis_gran = 1; 2894 scalar->sis_prec = 0; 2895 scalar->sis_value = val; 2896 2897 return (0); 2898 } 2899 2900 static int 2901 t4_voltage_read(void *arg, sensor_ioctl_scalar_t *scalar) 2902 { 2903 int ret; 2904 struct adapter *sc = arg; 2905 uint32_t val; 2906 2907 ret = t4_sensor_read(sc, FW_PARAM_DEV_DIAG_VDD, &val); 2908 if (ret != 0) { 2909 return (ret); 2910 } 2911 2912 scalar->sis_unit = SENSOR_UNIT_VOLTS; 2913 scalar->sis_gran = 1000; 2914 scalar->sis_prec = 0; 2915 scalar->sis_value = val; 2916 2917 return (0); 2918 } 2919 2920 /* 2921 * While the hardware supports the ability to read and write the flash image, 2922 * this is not currently wired up. 2923 */ 2924 static int 2925 t4_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps) 2926 { 2927 *caps = DDI_UFM_CAP_REPORT; 2928 return (0); 2929 } 2930 2931 static int 2932 t4_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 2933 ddi_ufm_image_t *imgp) 2934 { 2935 if (imgno != 0) { 2936 return (EINVAL); 2937 } 2938 2939 ddi_ufm_image_set_desc(imgp, "Firmware"); 2940 ddi_ufm_image_set_nslots(imgp, 1); 2941 2942 return (0); 2943 } 2944 2945 static int 2946 t4_ufm_fill_slot_version(nvlist_t *nvl, const char *key, uint32_t vers) 2947 { 2948 char buf[128]; 2949 2950 if (vers == 0) { 2951 return (0); 2952 } 2953 2954 if (snprintf(buf, sizeof (buf), "%u.%u.%u.%u", 2955 G_FW_HDR_FW_VER_MAJOR(vers), G_FW_HDR_FW_VER_MINOR(vers), 2956 G_FW_HDR_FW_VER_MICRO(vers), G_FW_HDR_FW_VER_BUILD(vers)) >= 2957 sizeof (buf)) { 2958 return (EOVERFLOW); 2959 } 2960 2961 return (nvlist_add_string(nvl, key, buf)); 2962 } 2963 2964 static int 2965 t4_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, uint_t slotno, 2966 ddi_ufm_slot_t *slotp) 2967 { 2968 int ret; 2969 struct adapter *sc = arg; 2970 nvlist_t *misc = NULL; 2971 char buf[128]; 2972 2973 if (imgno != 0 || slotno != 0) { 2974 return (EINVAL); 2975 } 2976 2977 if (snprintf(buf, sizeof (buf), "%u.%u.%u.%u", 2978 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 2979 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 2980 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 2981 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)) >= sizeof (buf)) { 2982 return (EOVERFLOW); 2983 } 2984 2985 ddi_ufm_slot_set_version(slotp, buf); 2986 2987 (void) nvlist_alloc(&misc, NV_UNIQUE_NAME, KM_SLEEP); 2988 if ((ret = t4_ufm_fill_slot_version(misc, "TP Microcode", 2989 sc->params.tp_vers)) != 0) { 2990 goto err; 2991 } 2992 2993 if ((ret = t4_ufm_fill_slot_version(misc, "Bootstrap", 2994 sc->params.bs_vers)) != 0) { 2995 goto err; 2996 } 2997 2998 if ((ret = t4_ufm_fill_slot_version(misc, "Expansion ROM", 2999 sc->params.er_vers)) != 0) { 3000 goto err; 3001 } 3002 3003 if ((ret = nvlist_add_uint32(misc, "Serial Configuration", 3004 sc->params.scfg_vers)) != 0) { 3005 goto err; 3006 } 3007 3008 if ((ret = nvlist_add_uint32(misc, "VPD Version", 3009 sc->params.vpd_vers)) != 0) { 3010 goto err; 3011 } 3012 3013 ddi_ufm_slot_set_misc(slotp, misc); 3014 ddi_ufm_slot_set_attrs(slotp, DDI_UFM_ATTR_ACTIVE | 3015 DDI_UFM_ATTR_WRITEABLE | DDI_UFM_ATTR_READABLE); 3016 return (0); 3017 3018 err: 3019 nvlist_free(misc); 3020 return (ret); 3021 3022 } 3023 3024 3025 int 3026 t4_cxgbe_attach(struct port_info *pi, dev_info_t *dip) 3027 { 3028 ASSERT(pi != NULL); 3029 3030 mac_register_t *mac = mac_alloc(MAC_VERSION); 3031 if (mac == NULL) { 3032 return (DDI_FAILURE); 3033 } 3034 3035 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3036 mac->m_driver = pi; 3037 mac->m_dip = dip; 3038 mac->m_src_addr = pi->hw_addr; 3039 mac->m_callbacks = pi->mc; 3040 mac->m_max_sdu = pi->mtu; 3041 mac->m_priv_props = pi->props; 3042 mac->m_margin = VLAN_TAGSZ; 3043 3044 if (!mac->m_callbacks->mc_unicst) { 3045 /* Multiple rings enabled */ 3046 mac->m_v12n = MAC_VIRT_LEVEL1; 3047 } 3048 3049 mac_handle_t mh = NULL; 3050 const int rc = mac_register(mac, &mh); 3051 mac_free(mac); 3052 if (rc != 0) { 3053 return (DDI_FAILURE); 3054 } 3055 3056 pi->mh = mh; 3057 3058 /* 3059 * Link state from this point onwards to the time interface is plumbed, 3060 * should be set to LINK_STATE_UNKNOWN. The mac should be updated about 3061 * the link state as either LINK_STATE_UP or LINK_STATE_DOWN based on 3062 * the actual link state detection after interface plumb. 3063 */ 3064 mac_link_update(mh, LINK_STATE_UNKNOWN); 3065 3066 return (DDI_SUCCESS); 3067 } 3068 3069 int 3070 t4_cxgbe_detach(struct port_info *pi) 3071 { 3072 ASSERT(pi != NULL); 3073 ASSERT(pi->mh != NULL); 3074 3075 if (mac_unregister(pi->mh) == 0) { 3076 pi->mh = NULL; 3077 return (DDI_SUCCESS); 3078 } 3079 3080 return (DDI_FAILURE); 3081 } 3082