1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 NetXen, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 /* 27 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 31 #include <sys/types.h> 32 #include <sys/conf.h> 33 #include <sys/debug.h> 34 #include <sys/stropts.h> 35 #include <sys/stream.h> 36 #include <sys/strlog.h> 37 #include <sys/kmem.h> 38 #include <sys/stat.h> 39 #include <sys/kstat.h> 40 #include <sys/vtrace.h> 41 #include <sys/dlpi.h> 42 #include <sys/strsun.h> 43 #include <sys/ethernet.h> 44 #include <sys/modctl.h> 45 #include <sys/errno.h> 46 #include <sys/dditypes.h> 47 #include <sys/ddi.h> 48 #include <sys/sunddi.h> 49 #include <sys/sysmacros.h> 50 #include <sys/pci.h> 51 #include <sys/ddi_intr.h> 52 53 #include "unm_nic.h" 54 #include "unm_nic_hw.h" 55 #include "unm_brdcfg.h" 56 #include "nic_cmn.h" 57 #include "nic_phan_reg.h" 58 #include "unm_nic_ioctl.h" 59 #include "nx_hw_pci_regs.h" 60 61 char ident[] = "Netxen nic driver v" UNM_NIC_VERSIONID; 62 char unm_nic_driver_name[] = "ntxn"; 63 int verbmsg = 0; 64 65 static char txbcopythreshold_propname[] = "tx_bcopy_threshold"; 66 static char rxbcopythreshold_propname[] = "rx_bcopy_threshold"; 67 static char rxringsize_propname[] = "rx_ring_size"; 68 static char jumborxringsize_propname[] = "jumbo_rx_ring_size"; 69 static char txringsize_propname[] = "tx_ring_size"; 70 static char defaultmtu_propname[] = "default_mtu"; 71 static char dmesg_propname[] = "verbose_driver"; 72 73 #define STRUCT_COPY(a, b) bcopy(&(b), &(a), sizeof (a)) 74 75 extern int unm_register_mac(unm_adapter *adapter); 76 extern void unm_fini_kstats(unm_adapter* adapter); 77 extern void unm_nic_remove(unm_adapter *adapter); 78 extern int unm_nic_suspend(unm_adapter *); 79 extern uint_t unm_intr(caddr_t, caddr_t); 80 81 /* Data access requirements. */ 82 static struct ddi_device_acc_attr unm_dev_attr = { 83 DDI_DEVICE_ATTR_V0, 84 DDI_STRUCTURE_LE_ACC, 85 DDI_STRICTORDER_ACC 86 }; 87 88 static struct ddi_device_acc_attr unm_buf_attr = { 89 DDI_DEVICE_ATTR_V0, 90 DDI_NEVERSWAP_ACC, 91 DDI_STRICTORDER_ACC 92 }; 93 94 static ddi_dma_attr_t unm_dma_attr_desc = { 95 DMA_ATTR_V0, /* dma_attr_version */ 96 0, /* dma_attr_addr_lo */ 97 0xffffffffull, /* dma_attr_addr_hi */ 98 0x000fffffull, /* dma_attr_count_max */ 99 4096, /* dma_attr_align */ 100 0x000fffffull, /* dma_attr_burstsizes */ 101 4, /* dma_attr_minxfer */ 102 0x003fffffull, /* dma_attr_maxxfer */ 103 0xffffffffull, /* dma_attr_seg */ 104 1, /* dma_attr_sgllen */ 105 1, /* dma_attr_granular */ 106 0 /* dma_attr_flags */ 107 }; 108 109 static ddi_dma_attr_t unm_dma_attr_rxbuf = { 110 DMA_ATTR_V0, /* dma_attr_version */ 111 0, /* dma_attr_addr_lo */ 112 0x7ffffffffULL, /* dma_attr_addr_hi */ 113 0xffffull, /* dma_attr_count_max */ 114 4096, /* dma_attr_align */ 115 0xfff8ull, /* dma_attr_burstsizes */ 116 1, /* dma_attr_minxfer */ 117 0xffffffffull, /* dma_attr_maxxfer */ 118 0xffffull, /* dma_attr_seg */ 119 1, /* dma_attr_sgllen */ 120 1, /* dma_attr_granular */ 121 0 /* dma_attr_flags */ 122 }; 123 124 static ddi_dma_attr_t unm_dma_attr_cmddesc = { 125 DMA_ATTR_V0, /* dma_attr_version */ 126 0, /* dma_attr_addr_lo */ 127 0x7ffffffffULL, /* dma_attr_addr_hi */ 128 0xffffull, /* dma_attr_count_max */ 129 1, /* dma_attr_align */ 130 0xfff8ull, /* dma_attr_burstsizes */ 131 1, /* dma_attr_minxfer */ 132 0xffff0ull, /* dma_attr_maxxfer */ 133 0xffffull, /* dma_attr_seg */ 134 16, /* dma_attr_sgllen */ 135 1, /* dma_attr_granular */ 136 0 /* dma_attr_flags */ 137 }; 138 139 static struct nx_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; 140 141 static int 142 check_hw_init(struct unm_adapter_s *adapter) 143 { 144 u32 val; 145 int ret = 0; 146 147 adapter->unm_nic_hw_read_wx(adapter, UNM_CAM_RAM(0x1fc), &val, 4); 148 if (val == 0x55555555) { 149 /* This is the first boot after power up */ 150 adapter->unm_nic_hw_read_wx(adapter, UNM_ROMUSB_GLB_SW_RESET, 151 &val, 4); 152 if (val != 0x80000f) 153 ret = -1; 154 155 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 156 /* Start P2 boot loader */ 157 adapter->unm_nic_pci_write_normalize(adapter, 158 UNM_CAM_RAM(0x1fc), UNM_BDINFO_MAGIC); 159 adapter->unm_nic_pci_write_normalize(adapter, 160 UNM_ROMUSB_GLB_PEGTUNE_DONE, 1); 161 } 162 } 163 return (ret); 164 } 165 166 167 static int 168 unm_get_flash_block(unm_adapter *adapter, int base, int size, uint32_t *buf) 169 { 170 int i, addr; 171 uint32_t *ptr32; 172 173 addr = base; 174 ptr32 = buf; 175 for (i = 0; i < size / sizeof (uint32_t); i++) { 176 if (rom_fast_read(adapter, addr, (int *)ptr32) == -1) 177 return (-1); 178 ptr32++; 179 addr += sizeof (uint32_t); 180 } 181 if ((char *)buf + size > (char *)ptr32) { 182 int local; 183 184 if (rom_fast_read(adapter, addr, &local) == -1) 185 return (-1); 186 (void) memcpy(ptr32, &local, 187 (uintptr_t)((char *)buf + size) - (uintptr_t)(char *)ptr32); 188 } 189 190 return (0); 191 } 192 193 194 static int 195 get_flash_mac_addr(struct unm_adapter_s *adapter, u64 mac[]) 196 { 197 uint32_t *pmac = (uint32_t *)&mac[0]; 198 199 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 200 uint32_t temp, crbaddr; 201 uint16_t *pmac16 = (uint16_t *)pmac; 202 203 // FOR P3, read from CAM RAM 204 205 int pci_func = adapter->ahw.pci_func; 206 pmac16 += (4 * pci_func); 207 crbaddr = CRB_MAC_BLOCK_START + (4 * ((pci_func/2) * 3)) + 208 (4 * (pci_func & 1)); 209 210 adapter->unm_nic_hw_read_wx(adapter, crbaddr, &temp, 4); 211 if (pci_func & 1) { 212 *pmac16++ = (temp >> 16); 213 adapter->unm_nic_hw_read_wx(adapter, crbaddr+4, 214 &temp, 4); 215 *pmac16++ = (temp & 0xffff); 216 *pmac16++ = (temp >> 16); 217 *pmac16 = 0; 218 } else { 219 *pmac16++ = (temp & 0xffff); 220 *pmac16++ = (temp >> 16); 221 adapter->unm_nic_hw_read_wx(adapter, crbaddr+4, 222 &temp, 4); 223 *pmac16++ = (temp & 0xffff); 224 *pmac16 = 0; 225 } 226 return (0); 227 } 228 229 230 if (unm_get_flash_block(adapter, USER_START + 231 offsetof(unm_user_info_t, mac_addr), FLASH_NUM_PORTS * sizeof (U64), 232 pmac) == -1) 233 return (-1); 234 235 if (*mac == ~0ULL) { 236 if (unm_get_flash_block(adapter, USER_START_OLD + 237 offsetof(unm_old_user_info_t, mac_addr), 238 FLASH_NUM_PORTS * sizeof (U64), pmac) == -1) 239 return (-1); 240 241 if (*mac == ~0ULL) 242 return (-1); 243 } 244 245 return (0); 246 } 247 248 static int 249 is_flash_supported(unm_adapter *adapter) 250 { 251 int locs[] = { 0, 0x4, 0x100, 0x4000, 0x4128 }; 252 int addr, val01, val02, i, j; 253 254 /* if the flash size less than 4Mb, make huge war cry and die */ 255 for (j = 1; j < 4; j++) { 256 addr = j * 0x100000; 257 for (i = 0; i < (sizeof (locs) / sizeof (locs[0])); i++) { 258 if (rom_fast_read(adapter, locs[i], &val01) == 0 && 259 rom_fast_read(adapter, (addr + locs[i]), 260 &val02) == 0) { 261 if (val01 == val02) 262 return (-1); 263 } else { 264 return (-1); 265 } 266 } 267 } 268 269 return (0); 270 } 271 272 static int 273 unm_initialize_dummy_dma(unm_adapter *adapter) 274 { 275 uint32_t hi, lo, temp; 276 ddi_dma_cookie_t cookie; 277 278 if (unm_pci_alloc_consistent(adapter, UNM_HOST_DUMMY_DMA_SIZE, 279 (caddr_t *)&adapter->dummy_dma.addr, &cookie, 280 &adapter->dummy_dma.dma_handle, 281 &adapter->dummy_dma.acc_handle) != DDI_SUCCESS) { 282 cmn_err(CE_WARN, "%s%d: Unable to alloc dummy dma buf\n", 283 adapter->name, adapter->instance); 284 return (DDI_ENOMEM); 285 } 286 287 adapter->dummy_dma.phys_addr = cookie.dmac_laddress; 288 289 hi = (adapter->dummy_dma.phys_addr >> 32) & 0xffffffff; 290 lo = adapter->dummy_dma.phys_addr & 0xffffffff; 291 292 UNM_READ_LOCK(&adapter->adapter_lock); 293 adapter->unm_nic_hw_write_wx(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, 294 &hi, 4); 295 adapter->unm_nic_hw_write_wx(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, 296 &lo, 4); 297 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 298 temp = DUMMY_BUF_INIT; 299 adapter->unm_nic_hw_write_wx(adapter, CRB_HOST_DUMMY_BUF, 300 &temp, 4); 301 } 302 UNM_READ_UNLOCK(&adapter->adapter_lock); 303 304 return (DDI_SUCCESS); 305 } 306 307 void 308 unm_free_dummy_dma(unm_adapter *adapter) 309 { 310 if (adapter->dummy_dma.addr) { 311 unm_pci_free_consistent(&adapter->dummy_dma.dma_handle, 312 &adapter->dummy_dma.acc_handle); 313 adapter->dummy_dma.addr = NULL; 314 } 315 } 316 317 static int 318 unm_pci_cfg_init(unm_adapter *adapter) 319 { 320 hardware_context *hwcontext; 321 ddi_acc_handle_t pci_cfg_hdl; 322 int *reg_options; 323 dev_info_t *dip; 324 uint_t noptions; 325 int ret; 326 uint16_t vendor_id, pci_cmd_word; 327 uint8_t base_class, sub_class, prog_class; 328 uint32_t pexsizes; 329 struct nx_legacy_intr_set *legacy_intrp; 330 331 hwcontext = &adapter->ahw; 332 pci_cfg_hdl = adapter->pci_cfg_handle; 333 dip = adapter->dip; 334 335 vendor_id = pci_config_get16(pci_cfg_hdl, PCI_CONF_VENID); 336 337 if (vendor_id != 0x4040) { 338 cmn_err(CE_WARN, "%s%d: vendor id %x not 0x4040\n", 339 adapter->name, adapter->instance, vendor_id); 340 return (DDI_FAILURE); 341 } 342 343 ret = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 344 dip, 0, "reg", ®_options, &noptions); 345 if (ret != DDI_PROP_SUCCESS) { 346 cmn_err(CE_WARN, "%s%d: Could not determine reg property\n", 347 adapter->name, adapter->instance); 348 return (DDI_FAILURE); 349 } 350 351 hwcontext->pci_func = (reg_options[0] >> 8) & 0x7; 352 ddi_prop_free(reg_options); 353 354 base_class = pci_config_get8(pci_cfg_hdl, PCI_CONF_BASCLASS); 355 sub_class = pci_config_get8(pci_cfg_hdl, PCI_CONF_SUBCLASS); 356 prog_class = pci_config_get8(pci_cfg_hdl, PCI_CONF_PROGCLASS); 357 358 /* 359 * Need this check so that MEZZ card mgmt interface ntxn0 could fail 360 * attach & return and proceed to next interfaces ntxn1 and ntxn2 361 */ 362 if ((base_class != 0x02) || (sub_class != 0) || (prog_class != 0)) { 363 cmn_err(CE_WARN, "%s%d: Base/sub/prog class problem %d/%d/%d\n", 364 adapter->name, adapter->instance, base_class, sub_class, 365 prog_class); 366 return (DDI_FAILURE); 367 } 368 369 hwcontext->revision_id = pci_config_get8(pci_cfg_hdl, PCI_CONF_REVID); 370 371 /* 372 * Refuse to work with dubious P3 cards. 373 */ 374 if ((hwcontext->revision_id >= NX_P3_A0) && 375 (hwcontext->revision_id < NX_P3_B1)) { 376 cmn_err(CE_WARN, "%s%d: NetXen chip revs between 0x%x-0x%x " 377 "is unsupported\n", adapter->name, adapter->instance, 378 NX_P3_A0, NX_P3_B0); 379 return (DDI_FAILURE); 380 } 381 382 /* 383 * Save error reporting settings; clear [19:16] error status bits. 384 * Set max read request [14:12] to 0 for 128 bytes. Set max payload 385 * size[7:5] to 0 for for 128 bytes. 386 */ 387 if (NX_IS_REVISION_P2(hwcontext->revision_id)) { 388 pexsizes = pci_config_get32(pci_cfg_hdl, 0xd8); 389 pexsizes &= 7; 390 pexsizes |= 0xF0000; 391 pci_config_put32(pci_cfg_hdl, 0xd8, pexsizes); 392 } 393 394 pci_cmd_word = pci_config_get16(pci_cfg_hdl, PCI_CONF_COMM); 395 pci_cmd_word |= (PCI_COMM_INTX_DISABLE | PCI_COMM_SERR_ENABLE); 396 pci_config_put16(pci_cfg_hdl, PCI_CONF_COMM, pci_cmd_word); 397 398 if (hwcontext->revision_id >= NX_P3_B0) 399 legacy_intrp = &legacy_intr[hwcontext->pci_func]; 400 else 401 legacy_intrp = &legacy_intr[0]; 402 403 adapter->legacy_intr.int_vec_bit = legacy_intrp->int_vec_bit; 404 adapter->legacy_intr.tgt_status_reg = legacy_intrp->tgt_status_reg; 405 adapter->legacy_intr.tgt_mask_reg = legacy_intrp->tgt_mask_reg; 406 adapter->legacy_intr.pci_int_reg = legacy_intrp->pci_int_reg; 407 408 return (DDI_SUCCESS); 409 } 410 411 void 412 unm_free_tx_dmahdl(unm_adapter *adapter) 413 { 414 int i; 415 unm_dmah_node_t *nodep; 416 417 mutex_enter(&adapter->tx_lock); 418 nodep = &adapter->tx_dma_hdls[0]; 419 420 for (i = 0; i < adapter->MaxTxDescCount + EXTRA_HANDLES; i++) { 421 if (nodep->dmahdl != NULL) { 422 ddi_dma_free_handle(&nodep->dmahdl); 423 nodep->dmahdl = NULL; 424 } 425 nodep->next = NULL; 426 nodep++; 427 } 428 429 adapter->dmahdl_pool = NULL; 430 adapter->freehdls = 0; 431 mutex_exit(&adapter->tx_lock); 432 } 433 434 static int 435 unm_alloc_tx_dmahdl(unm_adapter *adapter) 436 { 437 int i; 438 unm_dmah_node_t *nodep = &adapter->tx_dma_hdls[0]; 439 440 mutex_enter(&adapter->tx_lock); 441 for (i = 0; i < adapter->MaxTxDescCount + EXTRA_HANDLES; i++) { 442 if (ddi_dma_alloc_handle(adapter->dip, &unm_dma_attr_cmddesc, 443 DDI_DMA_DONTWAIT, NULL, &nodep->dmahdl) != DDI_SUCCESS) { 444 mutex_exit(&adapter->tx_lock); 445 goto alloc_hdl_fail; 446 } 447 448 if (i > 0) 449 nodep->next = nodep - 1; 450 nodep++; 451 } 452 453 adapter->dmahdl_pool = nodep - 1; 454 adapter->freehdls = i; 455 mutex_exit(&adapter->tx_lock); 456 457 return (DDI_SUCCESS); 458 459 alloc_hdl_fail: 460 unm_free_tx_dmahdl(adapter); 461 cmn_err(CE_WARN, "%s%d: Failed transmit ring dma handle allocation\n", 462 adapter->name, adapter->instance); 463 return (DDI_FAILURE); 464 } 465 466 static void 467 unm_free_dma_mem(dma_area_t *dma_p) 468 { 469 if (dma_p->dma_hdl != NULL) { 470 if (dma_p->ncookies) { 471 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 472 dma_p->ncookies = 0; 473 } 474 } 475 if (dma_p->acc_hdl != NULL) { 476 ddi_dma_mem_free(&dma_p->acc_hdl); 477 dma_p->acc_hdl = NULL; 478 } 479 if (dma_p->dma_hdl != NULL) { 480 ddi_dma_free_handle(&dma_p->dma_hdl); 481 dma_p->dma_hdl = NULL; 482 } 483 } 484 485 static int 486 unm_alloc_dma_mem(unm_adapter *adapter, int size, uint_t dma_flag, 487 ddi_dma_attr_t *dma_attr_p, dma_area_t *dma_p) 488 { 489 int ret; 490 caddr_t vaddr; 491 size_t actual_size; 492 ddi_dma_cookie_t cookie; 493 494 ret = ddi_dma_alloc_handle(adapter->dip, 495 dma_attr_p, DDI_DMA_DONTWAIT, 496 NULL, &dma_p->dma_hdl); 497 if (ret != DDI_SUCCESS) { 498 cmn_err(CE_WARN, "%s%d: Failed ddi_dma_alloc_handle\n", 499 adapter->name, adapter->instance); 500 goto dma_mem_fail; 501 } 502 503 ret = ddi_dma_mem_alloc(dma_p->dma_hdl, 504 size, &adapter->gc_attr_desc, 505 dma_flag & (DDI_DMA_STREAMING | DDI_DMA_CONSISTENT), 506 DDI_DMA_DONTWAIT, NULL, &vaddr, &actual_size, 507 &dma_p->acc_hdl); 508 if (ret != DDI_SUCCESS) { 509 cmn_err(CE_WARN, "%s%d: ddi_dma_mem_alloc() failed\n", 510 adapter->name, adapter->instance); 511 goto dma_mem_fail; 512 } 513 514 if (actual_size < size) { 515 cmn_err(CE_WARN, "%s%d: ddi_dma_mem_alloc() allocated small\n", 516 adapter->name, adapter->instance); 517 goto dma_mem_fail; 518 } 519 520 ret = ddi_dma_addr_bind_handle(dma_p->dma_hdl, 521 NULL, vaddr, size, dma_flag, DDI_DMA_DONTWAIT, 522 NULL, &cookie, &dma_p->ncookies); 523 if (ret != DDI_DMA_MAPPED || dma_p->ncookies != 1) { 524 cmn_err(CE_WARN, "%s%d: ddi_dma_addr_bind_handle() failed, " 525 "%d, %d\n", adapter->name, adapter->instance, ret, 526 dma_p->ncookies); 527 goto dma_mem_fail; 528 } 529 530 dma_p->dma_addr = cookie.dmac_laddress; 531 dma_p->vaddr = vaddr; 532 (void) memset(vaddr, 0, size); 533 534 return (DDI_SUCCESS); 535 536 dma_mem_fail: 537 unm_free_dma_mem(dma_p); 538 return (DDI_FAILURE); 539 } 540 541 void 542 unm_free_tx_buffers(unm_adapter *adapter) 543 { 544 int i; 545 dma_area_t *dma_p; 546 struct unm_cmd_buffer *cmd_buf; 547 unm_dmah_node_t *nodep; 548 549 cmd_buf = &adapter->cmd_buf_arr[0]; 550 551 for (i = 0; i < adapter->MaxTxDescCount; i++) { 552 dma_p = &cmd_buf->dma_area; 553 unm_free_dma_mem(dma_p); 554 nodep = cmd_buf->head; 555 while (nodep != NULL) { 556 (void) ddi_dma_unbind_handle(nodep->dmahdl); 557 nodep = nodep->next; 558 } 559 if (cmd_buf->msg != NULL) 560 freemsg(cmd_buf->msg); 561 cmd_buf++; 562 } 563 adapter->freecmds = 0; 564 } 565 566 static int 567 unm_alloc_tx_buffers(unm_adapter *adapter) 568 { 569 int i, ret, size, allocated = 0; 570 dma_area_t *dma_p; 571 struct unm_cmd_buffer *cmd_buf; 572 573 cmd_buf = &adapter->cmd_buf_arr[0]; 574 size = adapter->maxmtu; 575 576 for (i = 0; i < adapter->MaxTxDescCount; i++) { 577 dma_p = &cmd_buf->dma_area; 578 ret = unm_alloc_dma_mem(adapter, size, 579 DDI_DMA_WRITE | DDI_DMA_STREAMING, 580 &unm_dma_attr_rxbuf, dma_p); 581 if (ret != DDI_SUCCESS) 582 goto alloc_tx_buffer_fail; 583 584 allocated++; 585 cmd_buf++; 586 } 587 adapter->freecmds = adapter->MaxTxDescCount; 588 return (DDI_SUCCESS); 589 590 alloc_tx_buffer_fail: 591 592 cmd_buf = &adapter->cmd_buf_arr[0]; 593 for (i = 0; i < allocated; i++) { 594 dma_p = &cmd_buf->dma_area; 595 unm_free_dma_mem(dma_p); 596 cmd_buf++; 597 } 598 cmn_err(CE_WARN, "%s%d: Failed transmit ring memory allocation\n", 599 adapter->name, adapter->instance); 600 return (DDI_FAILURE); 601 } 602 603 /* 604 * Called by freemsg() to "free" the resource. 605 */ 606 static void 607 unm_rx_buffer_recycle(char *arg) 608 { 609 unm_rx_buffer_t *rx_buffer = (unm_rx_buffer_t *)(uintptr_t)arg; 610 unm_adapter *adapter = rx_buffer->adapter; 611 unm_rcv_desc_ctx_t *rcv_desc = rx_buffer->rcv_desc; 612 613 rx_buffer->mp = desballoc(rx_buffer->dma_info.vaddr, 614 rcv_desc->dma_size, 0, &rx_buffer->rx_recycle); 615 616 if (rx_buffer->mp == NULL) 617 adapter->stats.desballocfailed++; 618 619 mutex_enter(rcv_desc->recycle_lock); 620 rx_buffer->next = rcv_desc->recycle_list; 621 rcv_desc->recycle_list = rx_buffer; 622 rcv_desc->rx_buf_recycle++; 623 mutex_exit(rcv_desc->recycle_lock); 624 } 625 626 void 627 unm_destroy_rx_ring(unm_rcv_desc_ctx_t *rcv_desc) 628 { 629 uint32_t i, total_buf; 630 unm_rx_buffer_t *buf_pool; 631 632 total_buf = rcv_desc->rx_buf_total; 633 buf_pool = rcv_desc->rx_buf_pool; 634 for (i = 0; i < total_buf; i++) { 635 if (buf_pool->mp != NULL) 636 freemsg(buf_pool->mp); 637 unm_free_dma_mem(&buf_pool->dma_info); 638 buf_pool++; 639 } 640 641 kmem_free(rcv_desc->rx_buf_pool, sizeof (unm_rx_buffer_t) * total_buf); 642 rcv_desc->rx_buf_pool = NULL; 643 rcv_desc->pool_list = NULL; 644 rcv_desc->recycle_list = NULL; 645 rcv_desc->rx_buf_free = 0; 646 647 mutex_destroy(rcv_desc->pool_lock); 648 mutex_destroy(rcv_desc->recycle_lock); 649 } 650 651 static int 652 unm_create_rx_ring(unm_adapter *adapter, unm_rcv_desc_ctx_t *rcv_desc) 653 { 654 int i, ret, allocate = 0, sreoff; 655 uint32_t total_buf; 656 dma_area_t *dma_info; 657 unm_rx_buffer_t *rx_buffer; 658 659 sreoff = adapter->ahw.cut_through ? 0 : IP_ALIGNMENT_BYTES; 660 661 /* temporarily set the total rx buffers two times of MaxRxDescCount */ 662 total_buf = rcv_desc->rx_buf_total = rcv_desc->MaxRxDescCount * 2; 663 664 rcv_desc->rx_buf_pool = kmem_zalloc(sizeof (unm_rx_buffer_t) * 665 total_buf, KM_SLEEP); 666 rx_buffer = rcv_desc->rx_buf_pool; 667 for (i = 0; i < total_buf; i++) { 668 dma_info = &rx_buffer->dma_info; 669 ret = unm_alloc_dma_mem(adapter, rcv_desc->buf_size, 670 DDI_DMA_READ | DDI_DMA_STREAMING, 671 &unm_dma_attr_rxbuf, dma_info); 672 if (ret != DDI_SUCCESS) 673 goto alloc_mem_failed; 674 else { 675 allocate++; 676 dma_info->vaddr = (void *) ((char *)dma_info->vaddr + 677 sreoff); 678 dma_info->dma_addr += sreoff; 679 rx_buffer->rx_recycle.free_func = 680 unm_rx_buffer_recycle; 681 rx_buffer->rx_recycle.free_arg = (caddr_t)rx_buffer; 682 rx_buffer->next = NULL; 683 rx_buffer->mp = desballoc(dma_info->vaddr, 684 rcv_desc->dma_size, 0, &rx_buffer->rx_recycle); 685 if (rx_buffer->mp == NULL) 686 adapter->stats.desballocfailed++; 687 rx_buffer->rcv_desc = rcv_desc; 688 rx_buffer->adapter = adapter; 689 rx_buffer++; 690 } 691 } 692 693 for (i = 0; i < (total_buf - 1); i++) { 694 rcv_desc->rx_buf_pool[i].next = &rcv_desc->rx_buf_pool[i + 1]; 695 } 696 697 rcv_desc->pool_list = rcv_desc->rx_buf_pool; 698 rcv_desc->recycle_list = NULL; 699 rcv_desc->rx_buf_free = total_buf; 700 701 mutex_init(rcv_desc->pool_lock, NULL, 702 MUTEX_DRIVER, (DDI_INTR_PRI(adapter->intr_pri))); 703 mutex_init(rcv_desc->recycle_lock, NULL, 704 MUTEX_DRIVER, (DDI_INTR_PRI(adapter->intr_pri))); 705 706 return (DDI_SUCCESS); 707 708 alloc_mem_failed: 709 rx_buffer = rcv_desc->rx_buf_pool; 710 for (i = 0; i < allocate; i++, rx_buffer++) { 711 dma_info = &rx_buffer->dma_info; 712 if (rx_buffer->mp != NULL) 713 freemsg(rx_buffer->mp); 714 unm_free_dma_mem(dma_info); 715 } 716 717 kmem_free(rcv_desc->rx_buf_pool, sizeof (unm_rx_buffer_t) * total_buf); 718 rcv_desc->rx_buf_pool = NULL; 719 720 cmn_err(CE_WARN, "%s%d: Failed receive ring resource allocation\n", 721 adapter->name, adapter->instance); 722 return (DDI_FAILURE); 723 } 724 725 static void 726 unm_check_options(unm_adapter *adapter) 727 { 728 int i, ring, tx_desc, rx_desc, rx_jdesc; 729 unm_recv_context_t *recv_ctx; 730 unm_rcv_desc_ctx_t *rcv_desc; 731 uint8_t revid = adapter->ahw.revision_id; 732 dev_info_t *dip = adapter->dip; 733 734 verbmsg = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 735 dmesg_propname, 0); 736 737 adapter->tx_bcopy_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, 738 dip, DDI_PROP_DONTPASS, txbcopythreshold_propname, 739 UNM_TX_BCOPY_THRESHOLD); 740 adapter->rx_bcopy_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, 741 dip, DDI_PROP_DONTPASS, rxbcopythreshold_propname, 742 UNM_RX_BCOPY_THRESHOLD); 743 744 tx_desc = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 745 txringsize_propname, MAX_CMD_DESCRIPTORS_HOST); 746 if (tx_desc >= 256 && tx_desc <= MAX_CMD_DESCRIPTORS && 747 !(tx_desc & (tx_desc - 1))) { 748 adapter->MaxTxDescCount = tx_desc; 749 } else { 750 cmn_err(CE_WARN, "%s%d: TxRingSize defaulting to %d, since " 751 ".conf value is not 2 power aligned in range 256 - %d\n", 752 adapter->name, adapter->instance, MAX_CMD_DESCRIPTORS_HOST, 753 MAX_CMD_DESCRIPTORS); 754 adapter->MaxTxDescCount = MAX_CMD_DESCRIPTORS_HOST; 755 } 756 757 rx_desc = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 758 rxringsize_propname, MAX_RCV_DESCRIPTORS); 759 if (rx_desc >= NX_MIN_DRIVER_RDS_SIZE && 760 rx_desc <= NX_MAX_SUPPORTED_RDS_SIZE && 761 !(rx_desc & (rx_desc - 1))) { 762 adapter->MaxRxDescCount = rx_desc; 763 } else { 764 cmn_err(CE_WARN, "%s%d: RxRingSize defaulting to %d, since " 765 ".conf value is not 2 power aligned in range %d - %d\n", 766 adapter->name, adapter->instance, MAX_RCV_DESCRIPTORS, 767 NX_MIN_DRIVER_RDS_SIZE, NX_MAX_SUPPORTED_RDS_SIZE); 768 adapter->MaxRxDescCount = MAX_RCV_DESCRIPTORS; 769 } 770 771 rx_jdesc = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 772 jumborxringsize_propname, MAX_JUMBO_RCV_DESCRIPTORS); 773 if (rx_jdesc >= NX_MIN_DRIVER_RDS_SIZE && 774 rx_jdesc <= NX_MAX_SUPPORTED_JUMBO_RDS_SIZE && 775 !(rx_jdesc & (rx_jdesc - 1))) { 776 adapter->MaxJumboRxDescCount = rx_jdesc; 777 } else { 778 cmn_err(CE_WARN, "%s%d: JumboRingSize defaulting to %d, since " 779 ".conf value is not 2 power aligned in range %d - %d\n", 780 adapter->name, adapter->instance, MAX_JUMBO_RCV_DESCRIPTORS, 781 NX_MIN_DRIVER_RDS_SIZE, NX_MAX_SUPPORTED_JUMBO_RDS_SIZE); 782 adapter->MaxJumboRxDescCount = MAX_JUMBO_RCV_DESCRIPTORS; 783 } 784 785 adapter->MaxLroRxDescCount = MAX_LRO_RCV_DESCRIPTORS; 786 787 adapter->mtu = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 788 DDI_PROP_DONTPASS, defaultmtu_propname, MTU_SIZE); 789 790 if (adapter->mtu < MTU_SIZE) { 791 cmn_err(CE_WARN, "Raising mtu to %d\n", MTU_SIZE); 792 adapter->mtu = MTU_SIZE; 793 } 794 adapter->maxmtu = NX_IS_REVISION_P2(revid) ? P2_MAX_MTU : P3_MAX_MTU; 795 if (adapter->mtu > adapter->maxmtu) { 796 cmn_err(CE_WARN, "Lowering mtu to %d\n", adapter->maxmtu); 797 adapter->mtu = adapter->maxmtu; 798 } 799 800 adapter->maxmtu += NX_MAX_ETHERHDR; 801 802 for (i = 0; i < MAX_RCV_CTX; ++i) { 803 recv_ctx = &adapter->recv_ctx[i]; 804 805 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 806 rcv_desc = &recv_ctx->rcv_desc[ring]; 807 808 switch (RCV_DESC_TYPE(ring)) { 809 case RCV_DESC_NORMAL: 810 rcv_desc->MaxRxDescCount = 811 adapter->MaxRxDescCount; 812 if (adapter->ahw.cut_through) { 813 rcv_desc->dma_size = 814 NX_CT_DEFAULT_RX_BUF_LEN; 815 rcv_desc->buf_size = rcv_desc->dma_size; 816 } else { 817 rcv_desc->dma_size = 818 NX_RX_NORMAL_BUF_MAX_LEN; 819 rcv_desc->buf_size = 820 rcv_desc->dma_size + 821 IP_ALIGNMENT_BYTES; 822 } 823 break; 824 825 case RCV_DESC_JUMBO: 826 rcv_desc->MaxRxDescCount = 827 adapter->MaxJumboRxDescCount; 828 if (adapter->ahw.cut_through) { 829 rcv_desc->dma_size = 830 rcv_desc->buf_size = 831 NX_P3_RX_JUMBO_BUF_MAX_LEN; 832 } else { 833 if (NX_IS_REVISION_P2(revid)) 834 rcv_desc->dma_size = 835 NX_P2_RX_JUMBO_BUF_MAX_LEN; 836 else 837 rcv_desc->dma_size = 838 NX_P3_RX_JUMBO_BUF_MAX_LEN; 839 rcv_desc->buf_size = 840 rcv_desc->dma_size + 841 IP_ALIGNMENT_BYTES; 842 } 843 break; 844 845 case RCV_RING_LRO: 846 rcv_desc->MaxRxDescCount = 847 adapter->MaxLroRxDescCount; 848 rcv_desc->buf_size = MAX_RX_LRO_BUFFER_LENGTH; 849 rcv_desc->dma_size = RX_LRO_DMA_MAP_LEN; 850 break; 851 default: 852 break; 853 } 854 } 855 } 856 } 857 858 static void 859 vector128M(unm_adapter *aptr) 860 { 861 aptr->unm_nic_pci_change_crbwindow = &unm_nic_pci_change_crbwindow_128M; 862 aptr->unm_crb_writelit_adapter = &unm_crb_writelit_adapter_128M; 863 aptr->unm_nic_hw_write_wx = &unm_nic_hw_write_wx_128M; 864 aptr->unm_nic_hw_read_wx = &unm_nic_hw_read_wx_128M; 865 aptr->unm_nic_hw_write_ioctl = &unm_nic_hw_write_ioctl_128M; 866 aptr->unm_nic_hw_read_ioctl = &unm_nic_hw_read_ioctl_128M; 867 aptr->unm_nic_pci_mem_write = &unm_nic_pci_mem_write_128M; 868 aptr->unm_nic_pci_mem_read = &unm_nic_pci_mem_read_128M; 869 aptr->unm_nic_pci_write_immediate = &unm_nic_pci_write_immediate_128M; 870 aptr->unm_nic_pci_read_immediate = &unm_nic_pci_read_immediate_128M; 871 aptr->unm_nic_pci_write_normalize = &unm_nic_pci_write_normalize_128M; 872 aptr->unm_nic_pci_read_normalize = &unm_nic_pci_read_normalize_128M; 873 aptr->unm_nic_pci_set_window = &unm_nic_pci_set_window_128M; 874 aptr->unm_nic_clear_statistics = &unm_nic_clear_statistics_128M; 875 aptr->unm_nic_fill_statistics = &unm_nic_fill_statistics_128M; 876 } 877 878 static void 879 vector2M(unm_adapter *aptr) 880 { 881 aptr->unm_nic_pci_change_crbwindow = &unm_nic_pci_change_crbwindow_2M; 882 aptr->unm_crb_writelit_adapter = &unm_crb_writelit_adapter_2M; 883 aptr->unm_nic_hw_write_wx = &unm_nic_hw_write_wx_2M; 884 aptr->unm_nic_hw_read_wx = &unm_nic_hw_read_wx_2M; 885 aptr->unm_nic_hw_write_ioctl = &unm_nic_hw_write_wx_2M; 886 aptr->unm_nic_hw_read_ioctl = &unm_nic_hw_read_wx_2M; 887 aptr->unm_nic_pci_mem_write = &unm_nic_pci_mem_write_2M; 888 aptr->unm_nic_pci_mem_read = &unm_nic_pci_mem_read_2M; 889 aptr->unm_nic_pci_write_immediate = &unm_nic_pci_write_immediate_2M; 890 aptr->unm_nic_pci_read_immediate = &unm_nic_pci_read_immediate_2M; 891 aptr->unm_nic_pci_write_normalize = &unm_nic_pci_write_normalize_2M; 892 aptr->unm_nic_pci_read_normalize = &unm_nic_pci_read_normalize_2M; 893 aptr->unm_nic_pci_set_window = &unm_nic_pci_set_window_2M; 894 aptr->unm_nic_clear_statistics = &unm_nic_clear_statistics_2M; 895 aptr->unm_nic_fill_statistics = &unm_nic_fill_statistics_2M; 896 } 897 898 static int 899 unm_pci_map_setup(unm_adapter *adapter) 900 { 901 int ret; 902 caddr_t reg_base, db_base; 903 caddr_t mem_ptr0, mem_ptr1 = NULL, mem_ptr2 = NULL; 904 unsigned long pci_len0; 905 unsigned long first_page_group_start, first_page_group_end; 906 907 off_t regsize, dbsize = UNM_DB_MAPSIZE_BYTES; 908 dev_info_t *dip = adapter->dip; 909 910 adapter->ahw.qdr_sn_window = adapter->ahw.ddr_mn_window = -1; 911 912 /* map register space */ 913 914 ret = ddi_dev_regsize(dip, 1, ®size); 915 if (ret != DDI_SUCCESS) { 916 cmn_err(CE_WARN, "%s%d: failed to read reg size for bar0\n", 917 adapter->name, adapter->instance); 918 return (DDI_FAILURE); 919 } 920 921 ret = ddi_regs_map_setup(dip, 1, ®_base, 0, 922 regsize, &unm_dev_attr, &adapter->regs_handle); 923 if (ret != DDI_SUCCESS) { 924 cmn_err(CE_WARN, "%s%d: failed to map registers\n", 925 adapter->name, adapter->instance); 926 return (DDI_FAILURE); 927 } 928 929 mem_ptr0 = reg_base; 930 931 if (regsize == UNM_PCI_128MB_SIZE) { 932 pci_len0 = FIRST_PAGE_GROUP_SIZE; 933 mem_ptr1 = mem_ptr0 + SECOND_PAGE_GROUP_START; 934 mem_ptr2 = mem_ptr0 + THIRD_PAGE_GROUP_START; 935 first_page_group_start = FIRST_PAGE_GROUP_START; 936 first_page_group_end = FIRST_PAGE_GROUP_END; 937 vector128M(adapter); 938 } else if (regsize == UNM_PCI_32MB_SIZE) { 939 pci_len0 = 0; 940 mem_ptr1 = mem_ptr0; 941 mem_ptr2 = mem_ptr0 + 942 (THIRD_PAGE_GROUP_START - SECOND_PAGE_GROUP_START); 943 first_page_group_start = 0; 944 first_page_group_end = 0; 945 vector128M(adapter); 946 } else if (regsize == UNM_PCI_2MB_SIZE) { 947 pci_len0 = UNM_PCI_2MB_SIZE; 948 first_page_group_start = 0; 949 first_page_group_end = 0; 950 adapter->ahw.ddr_mn_window = adapter->ahw.qdr_sn_window = 0; 951 adapter->ahw.mn_win_crb = 0x100000 + PCIX_MN_WINDOW + 952 (adapter->ahw.pci_func * 0x20); 953 if (adapter->ahw.pci_func < 4) 954 adapter->ahw.ms_win_crb = 0x100000 + PCIX_SN_WINDOW + 955 (adapter->ahw.pci_func * 0x20); 956 else 957 adapter->ahw.ms_win_crb = 0x100000 + PCIX_SN_WINDOW + 958 0xA0 + ((adapter->ahw.pci_func - 4) * 0x10); 959 vector2M(adapter); 960 } else { 961 cmn_err(CE_WARN, "%s%d: invalid pci regs map size %ld\n", 962 adapter->name, adapter->instance, regsize); 963 ddi_regs_map_free(&adapter->regs_handle); 964 return (DDI_FAILURE); 965 } 966 967 adapter->ahw.pci_base0 = (unsigned long)mem_ptr0; 968 adapter->ahw.pci_len0 = pci_len0; 969 adapter->ahw.pci_base1 = (unsigned long)mem_ptr1; 970 adapter->ahw.pci_len1 = SECOND_PAGE_GROUP_SIZE; 971 adapter->ahw.pci_base2 = (unsigned long)mem_ptr2; 972 adapter->ahw.pci_len2 = THIRD_PAGE_GROUP_SIZE; 973 adapter->ahw.crb_base = 974 PCI_OFFSET_SECOND_RANGE(adapter, UNM_PCI_CRBSPACE); 975 976 adapter->ahw.first_page_group_start = first_page_group_start; 977 adapter->ahw.first_page_group_end = first_page_group_end; 978 979 /* map doorbell */ 980 981 ret = ddi_regs_map_setup(dip, 2, &db_base, 0, 982 dbsize, &unm_dev_attr, &adapter->db_handle); 983 if (ret != DDI_SUCCESS) { 984 cmn_err(CE_WARN, "%s%d: failed to map doorbell\n", 985 adapter->name, adapter->instance); 986 ddi_regs_map_free(&adapter->regs_handle); 987 return (DDI_FAILURE); 988 } 989 990 adapter->ahw.db_base = (unsigned long)db_base; 991 adapter->ahw.db_len = dbsize; 992 993 return (DDI_SUCCESS); 994 } 995 996 static int 997 unm_initialize_intr(unm_adapter *adapter) 998 { 999 1000 int ret; 1001 int type, count, avail, actual; 1002 1003 ret = ddi_intr_get_supported_types(adapter->dip, &type); 1004 if (ret != DDI_SUCCESS) { 1005 cmn_err(CE_WARN, "%s%d: ddi_intr_get_supported_types() " 1006 "failed\n", adapter->name, adapter->instance); 1007 return (DDI_FAILURE); 1008 } 1009 1010 type = DDI_INTR_TYPE_MSI; 1011 ret = ddi_intr_get_nintrs(adapter->dip, type, &count); 1012 if ((ret == DDI_SUCCESS) && (count > 0)) 1013 goto found_msi; 1014 1015 type = DDI_INTR_TYPE_FIXED; 1016 ret = ddi_intr_get_nintrs(adapter->dip, type, &count); 1017 if ((ret != DDI_SUCCESS) || (count == 0)) { 1018 cmn_err(CE_WARN, 1019 "ddi_intr_get_nintrs() failure ret=%d\n", ret); 1020 return (DDI_FAILURE); 1021 } 1022 1023 found_msi: 1024 adapter->intr_type = type; 1025 adapter->flags &= ~(UNM_NIC_MSI_ENABLED | UNM_NIC_MSIX_ENABLED); 1026 if (type == DDI_INTR_TYPE_MSI) 1027 adapter->flags |= UNM_NIC_MSI_ENABLED; 1028 1029 /* Get number of available interrupts */ 1030 ret = ddi_intr_get_navail(adapter->dip, type, &avail); 1031 if ((ret != DDI_SUCCESS) || (avail == 0)) { 1032 cmn_err(CE_WARN, "ddi_intr_get_navail() failure, ret=%d\n", 1033 ret); 1034 return (DDI_FAILURE); 1035 } 1036 1037 ret = ddi_intr_alloc(adapter->dip, &adapter->intr_handle, 1038 type, 0, 1, &actual, DDI_INTR_ALLOC_NORMAL); 1039 if ((ret != DDI_SUCCESS) || (actual == 0)) { 1040 cmn_err(CE_WARN, "ddi_intr_alloc() failure: %d\n", ret); 1041 return (DDI_FAILURE); 1042 } 1043 1044 ret = ddi_intr_get_pri(adapter->intr_handle, &adapter->intr_pri); 1045 if (ret != DDI_SUCCESS) { 1046 cmn_err(CE_WARN, "ddi_intr_get_pri() failure: %d\n", ret); 1047 } 1048 1049 /* Call ddi_intr_add_handler() */ 1050 ret = ddi_intr_add_handler(adapter->intr_handle, unm_intr, 1051 (caddr_t)adapter, NULL); 1052 if (ret != DDI_SUCCESS) { 1053 cmn_err(CE_WARN, "%s%d: ddi_intr_add_handler() failure\n", 1054 adapter->name, adapter->instance); 1055 (void) ddi_intr_free(adapter->intr_handle); 1056 return (DDI_FAILURE); 1057 } 1058 1059 /* Add softintr if required */ 1060 1061 return (DDI_SUCCESS); 1062 1063 } 1064 1065 void 1066 unm_destroy_intr(unm_adapter *adapter) 1067 { 1068 /* disable interrupt */ 1069 if (adapter->intr_type == DDI_INTR_TYPE_MSI) 1070 (void) ddi_intr_block_disable(&adapter->intr_handle, 1); 1071 else 1072 (void) ddi_intr_disable(adapter->intr_handle); 1073 1074 (void) ddi_intr_remove_handler(adapter->intr_handle); 1075 (void) ddi_intr_free(adapter->intr_handle); 1076 1077 /* Remove the software intr handler */ 1078 } 1079 1080 static void 1081 netxen_set_port_mode(unm_adapter *adapter) 1082 { 1083 static int wol_port_mode = UNM_PORT_MODE_AUTO_NEG_1G; 1084 static int port_mode = UNM_PORT_MODE_AUTO_NEG; 1085 int btype = adapter->ahw.boardcfg.board_type, data = 0; 1086 1087 if (btype == UNM_BRDTYPE_P3_HMEZ || btype == UNM_BRDTYPE_P3_XG_LOM) { 1088 data = port_mode; /* set to port_mode normally */ 1089 if ((port_mode != UNM_PORT_MODE_802_3_AP) && 1090 (port_mode != UNM_PORT_MODE_XG) && 1091 (port_mode != UNM_PORT_MODE_AUTO_NEG_1G) && 1092 (port_mode != UNM_PORT_MODE_AUTO_NEG_XG)) 1093 data = UNM_PORT_MODE_AUTO_NEG; 1094 1095 adapter->unm_nic_hw_write_wx(adapter, UNM_PORT_MODE_ADDR, 1096 &data, 4); 1097 1098 if ((wol_port_mode != UNM_PORT_MODE_802_3_AP) && 1099 (wol_port_mode != UNM_PORT_MODE_XG) && 1100 (wol_port_mode != UNM_PORT_MODE_AUTO_NEG_1G) && 1101 (wol_port_mode != UNM_PORT_MODE_AUTO_NEG_XG)) 1102 wol_port_mode = UNM_PORT_MODE_AUTO_NEG; 1103 1104 adapter->unm_nic_hw_write_wx(adapter, UNM_WOL_PORT_MODE, 1105 &wol_port_mode, 4); 1106 } 1107 } 1108 1109 static void 1110 netxen_pcie_strap_init(unm_adapter *adapter) 1111 { 1112 ddi_acc_handle_t pcihdl = adapter->pci_cfg_handle; 1113 u32 chicken, control, c8c9value = 0xF1000; 1114 1115 adapter->unm_nic_hw_read_wx(adapter, UNM_PCIE_REG(PCIE_CHICKEN3), 1116 &chicken, 4); 1117 1118 chicken &= 0xFCFFFFFF; /* clear chicken3 25:24 */ 1119 control = pci_config_get32(pcihdl, 0xD0); 1120 if ((control & 0x000F0000) != 0x00020000) /* is it gen1? */ 1121 chicken |= 0x01000000; 1122 adapter->unm_nic_hw_write_wx(adapter, UNM_PCIE_REG(PCIE_CHICKEN3), 1123 &chicken, 4); 1124 control = pci_config_get32(pcihdl, 0xC8); 1125 control = pci_config_get32(pcihdl, 0xC8); 1126 pci_config_put32(pcihdl, 0xC8, c8c9value); 1127 } 1128 1129 static int 1130 netxen_read_mac_addr(unm_adapter *adapter) 1131 { 1132 u64 mac_addr[FLASH_NUM_PORTS + 1]; 1133 unsigned char *p; 1134 int i; 1135 1136 if (is_flash_supported(adapter) != 0) 1137 return (-1); 1138 1139 if (get_flash_mac_addr(adapter, mac_addr) != 0) 1140 return (-1); 1141 1142 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1143 p = (unsigned char *)&mac_addr[adapter->ahw.pci_func]; 1144 else 1145 p = (unsigned char *)&mac_addr[adapter->portnum]; 1146 1147 for (i = 0; i < 6; i++) 1148 adapter->mac_addr[i] = p[5 - i]; 1149 1150 if (unm_nic_macaddr_set(adapter, adapter->mac_addr) != 0) 1151 return (-1); 1152 1153 return (0); 1154 } 1155 1156 static int 1157 unmattach(dev_info_t *dip, ddi_attach_cmd_t cmd) 1158 { 1159 unm_adapter *adapter; 1160 unm_recv_context_t *recv_ctx = NULL; 1161 unm_rcv_desc_ctx_t *rcv_desc = NULL; 1162 int i, first_driver = 0; 1163 int ret, ring, temp; 1164 1165 switch (cmd) { 1166 case DDI_ATTACH: 1167 break; 1168 case DDI_RESUME: 1169 case DDI_PM_RESUME: 1170 default: 1171 return (DDI_FAILURE); 1172 } 1173 1174 adapter = kmem_zalloc(sizeof (unm_adapter), KM_SLEEP); 1175 adapter->dip = dip; 1176 ddi_set_driver_private(dip, adapter); 1177 adapter->instance = ddi_get_instance(dip); 1178 1179 adapter->name = ddi_driver_name(dip); 1180 1181 ret = pci_config_setup(dip, &adapter->pci_cfg_handle); 1182 if (ret != DDI_SUCCESS) { 1183 cmn_err(CE_WARN, "%s%d: pci_config_setup failed\n", 1184 adapter->name, adapter->instance); 1185 goto attach_setup_err; 1186 } 1187 1188 ret = unm_pci_cfg_init(adapter); 1189 if (ret != DDI_SUCCESS) 1190 goto attach_err; 1191 1192 ret = unm_pci_map_setup(adapter); 1193 if (ret != DDI_SUCCESS) 1194 goto attach_err; 1195 1196 if (unm_initialize_intr(adapter) != DDI_SUCCESS) 1197 goto attach_unmap_regs; 1198 1199 rw_init(&adapter->adapter_lock, NULL, 1200 RW_DRIVER, DDI_INTR_PRI(adapter->intr_pri)); 1201 mutex_init(&adapter->tx_lock, NULL, 1202 MUTEX_DRIVER, (DDI_INTR_PRI(adapter->intr_pri))); 1203 mutex_init(&adapter->lock, NULL, 1204 MUTEX_DRIVER, (DDI_INTR_PRI(adapter->intr_pri))); 1205 1206 adapter->portnum = (int8_t)adapter->ahw.pci_func; 1207 1208 /* 1209 * Set the CRB window to invalid. If any register in window 0 is 1210 * accessed it should set window to 0 and then reset it to 1. 1211 */ 1212 adapter->curr_window = 255; 1213 1214 adapter->fw_major = adapter->unm_nic_pci_read_normalize(adapter, 1215 UNM_FW_VERSION_MAJOR); 1216 1217 if (adapter->fw_major < 4) 1218 adapter->max_rds_rings = 3; 1219 else 1220 adapter->max_rds_rings = 2; 1221 1222 STRUCT_COPY(adapter->gc_dma_attr_desc, unm_dma_attr_desc); 1223 STRUCT_COPY(adapter->gc_attr_desc, unm_buf_attr); 1224 1225 ret = unm_nic_get_board_info(adapter); 1226 if (ret != DDI_SUCCESS) { 1227 cmn_err(CE_WARN, "%s%d: error reading board config\n", 1228 adapter->name, adapter->instance); 1229 goto attach_destroy_intr; 1230 } 1231 1232 /* Mezz cards have PCI function 0, 2, 3 enabled */ 1233 switch (adapter->ahw.boardcfg.board_type) { 1234 case UNM_BRDTYPE_P2_SB31_10G_IMEZ: 1235 case UNM_BRDTYPE_P2_SB31_10G_HMEZ: 1236 if (adapter->ahw.pci_func >= 2) { 1237 adapter->portnum = adapter->ahw.pci_func - 2; 1238 } 1239 default: 1240 break; 1241 } 1242 1243 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 1244 temp = UNM_CRB_READ_VAL_ADAPTER(UNM_MIU_MN_CONTROL, adapter); 1245 adapter->ahw.cut_through = NX_IS_SYSTEM_CUT_THROUGH(temp); 1246 if (adapter->ahw.pci_func == 0) 1247 first_driver = 1; 1248 } else { 1249 if (adapter->portnum == 0) 1250 first_driver = 1; 1251 } 1252 1253 unm_check_options(adapter); 1254 1255 if (first_driver) { 1256 int first_boot = adapter->unm_nic_pci_read_normalize(adapter, 1257 UNM_CAM_RAM(0x1fc)); 1258 1259 if (check_hw_init(adapter) != 0) { 1260 cmn_err(CE_WARN, "%s%d: Error in HW init sequence\n", 1261 adapter->name, adapter->instance); 1262 goto attach_destroy_intr; 1263 } 1264 1265 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1266 netxen_set_port_mode(adapter); 1267 1268 if (first_boot != 0x55555555) { 1269 temp = 0; 1270 adapter->unm_nic_hw_write_wx(adapter, CRB_CMDPEG_STATE, 1271 &temp, 4); 1272 if (pinit_from_rom(adapter, 0) != 0) 1273 goto attach_destroy_intr; 1274 1275 drv_usecwait(500); 1276 1277 ret = load_from_flash(adapter); 1278 if (ret != DDI_SUCCESS) 1279 goto attach_destroy_intr; 1280 } 1281 1282 if (ret = unm_initialize_dummy_dma(adapter)) 1283 goto attach_destroy_intr; 1284 1285 /* 1286 * Tell the hardware our version number. 1287 */ 1288 i = (_UNM_NIC_MAJOR << 16) | 1289 ((_UNM_NIC_MINOR << 8)) | (_UNM_NIC_SUBVERSION); 1290 adapter->unm_nic_hw_write_wx(adapter, CRB_DRIVER_VERSION, 1291 &i, 4); 1292 1293 /* Unlock the HW, prompting the boot sequence */ 1294 if ((first_boot == 0x55555555) && 1295 (NX_IS_REVISION_P2(adapter->ahw.revision_id))) 1296 adapter->unm_nic_pci_write_normalize(adapter, 1297 UNM_ROMUSB_GLB_PEGTUNE_DONE, 1); 1298 1299 /* Handshake with the card before we register the devices. */ 1300 if (phantom_init(adapter, 0) != DDI_SUCCESS) 1301 goto attach_destroy_intr; 1302 } 1303 1304 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1305 netxen_pcie_strap_init(adapter); 1306 1307 /* 1308 * See if the firmware gave us a virtual-physical port mapping. 1309 */ 1310 adapter->physical_port = adapter->portnum; 1311 i = adapter->unm_nic_pci_read_normalize(adapter, 1312 CRB_V2P(adapter->portnum)); 1313 if (i != 0x55555555) 1314 adapter->physical_port = (uint16_t)i; 1315 1316 adapter->cmd_buf_arr = (struct unm_cmd_buffer *)kmem_zalloc( 1317 sizeof (struct unm_cmd_buffer) * adapter->MaxTxDescCount, 1318 KM_SLEEP); 1319 1320 for (i = 0; i < MAX_RCV_CTX; ++i) { 1321 recv_ctx = &adapter->recv_ctx[i]; 1322 1323 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 1324 rcv_desc = &recv_ctx->rcv_desc[ring]; 1325 ret = unm_create_rx_ring(adapter, rcv_desc); 1326 if (ret != DDI_SUCCESS) 1327 goto attach_free_cmdbufs; 1328 } 1329 } 1330 1331 ret = unm_alloc_tx_dmahdl(adapter); 1332 if (ret != DDI_SUCCESS) 1333 goto attach_free_cmdbufs; 1334 1335 ret = unm_alloc_tx_buffers(adapter); 1336 if (ret != DDI_SUCCESS) 1337 goto attach_free_tx_dmahdl; 1338 1339 adapter->ahw.linkup = 0; 1340 1341 if (receive_peg_ready(adapter)) { 1342 ret = -EIO; 1343 goto attach_free_tx_buffers; 1344 } 1345 1346 if (netxen_read_mac_addr(adapter)) 1347 cmn_err(CE_WARN, "%s%d: Failed to read MAC addr\n", 1348 adapter->name, adapter->instance); 1349 1350 unm_nic_flash_print(adapter); 1351 1352 if (verbmsg != 0) { 1353 switch (adapter->ahw.board_type) { 1354 case UNM_NIC_GBE: 1355 cmn_err(CE_NOTE, "%s: QUAD GbE port %d initialized\n", 1356 unm_nic_driver_name, adapter->portnum); 1357 break; 1358 1359 case UNM_NIC_XGBE: 1360 cmn_err(CE_NOTE, "%s: XGbE port %d initialized\n", 1361 unm_nic_driver_name, adapter->portnum); 1362 break; 1363 } 1364 } 1365 1366 ret = unm_register_mac(adapter); 1367 if (ret != DDI_SUCCESS) { 1368 cmn_err(CE_NOTE, "%s%d: Mac registration error\n", 1369 adapter->name, adapter->instance); 1370 goto attach_free_tx_buffers; 1371 } 1372 1373 return (DDI_SUCCESS); 1374 1375 attach_free_tx_buffers: 1376 unm_free_tx_buffers(adapter); 1377 attach_free_tx_dmahdl: 1378 unm_free_tx_dmahdl(adapter); 1379 attach_free_cmdbufs: 1380 kmem_free(adapter->cmd_buf_arr, sizeof (struct unm_cmd_buffer) * 1381 adapter->MaxTxDescCount); 1382 for (i = 0; i < MAX_RCV_CTX; ++i) { 1383 recv_ctx = &adapter->recv_ctx[i]; 1384 1385 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 1386 rcv_desc = &recv_ctx->rcv_desc[ring]; 1387 if (rcv_desc->rx_buf_pool != NULL) 1388 unm_destroy_rx_ring(rcv_desc); 1389 } 1390 } 1391 1392 if (adapter->portnum == 0) 1393 unm_free_dummy_dma(adapter); 1394 attach_destroy_intr: 1395 unm_destroy_intr(adapter); 1396 attach_unmap_regs: 1397 ddi_regs_map_free(&(adapter->regs_handle)); 1398 ddi_regs_map_free(&(adapter->db_handle)); 1399 attach_err: 1400 pci_config_teardown(&adapter->pci_cfg_handle); 1401 attach_setup_err: 1402 kmem_free(adapter, sizeof (unm_adapter)); 1403 return (ret); 1404 } 1405 1406 static int 1407 unmdetach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1408 { 1409 unm_adapter *adapter = (unm_adapter *)ddi_get_driver_private(dip); 1410 1411 if (adapter == NULL) 1412 return (DDI_FAILURE); 1413 1414 switch (cmd) { 1415 case DDI_DETACH: 1416 1417 unm_fini_kstats(adapter); 1418 adapter->kstats[0] = NULL; 1419 1420 if (adapter->pci_cfg_handle != NULL) 1421 pci_config_teardown(&adapter->pci_cfg_handle); 1422 1423 unm_nd_cleanup(adapter); 1424 unm_nic_remove(adapter); 1425 return (DDI_SUCCESS); 1426 1427 case DDI_SUSPEND: 1428 return (unm_nic_suspend(adapter)); 1429 1430 default: 1431 break; 1432 } 1433 1434 return (DDI_FAILURE); 1435 } 1436 1437 #ifdef SOLARIS11 1438 DDI_DEFINE_STREAM_OPS(unm_ops, nulldev, nulldev, unmattach, unmdetach, 1439 nodev, NULL, D_MP, NULL, NULL); 1440 #else 1441 DDI_DEFINE_STREAM_OPS(unm_ops, nulldev, nulldev, unmattach, unmdetach, 1442 nodev, NULL, D_MP, NULL); 1443 #endif 1444 1445 static struct modldrv modldrv = { 1446 &mod_driverops, /* Type of module. This one is a driver */ 1447 ident, 1448 &unm_ops, /* driver ops */ 1449 }; 1450 1451 static struct modlinkage modlinkage = { 1452 MODREV_1, 1453 (&modldrv), 1454 NULL 1455 }; 1456 1457 1458 int 1459 _init(void) 1460 { 1461 int ret; 1462 1463 unm_ops.devo_cb_ops->cb_str = NULL; 1464 mac_init_ops(&unm_ops, "ntxn"); 1465 1466 ret = mod_install(&modlinkage); 1467 if (ret != DDI_SUCCESS) { 1468 mac_fini_ops(&unm_ops); 1469 cmn_err(CE_WARN, "ntxn: mod_install failed\n"); 1470 } 1471 1472 return (ret); 1473 } 1474 1475 1476 int 1477 _fini(void) 1478 { 1479 int ret; 1480 1481 ret = mod_remove(&modlinkage); 1482 if (ret == DDI_SUCCESS) 1483 mac_fini_ops(&unm_ops); 1484 return (ret); 1485 } 1486 1487 int 1488 _info(struct modinfo *modinfop) 1489 { 1490 return (mod_info(&modlinkage, modinfop)); 1491 } 1492