1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 NetXen, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 /* 26 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 #include <sys/types.h> 30 #include <sys/conf.h> 31 #include <sys/debug.h> 32 #include <sys/stropts.h> 33 #include <sys/stream.h> 34 #include <sys/strlog.h> 35 #include <sys/kmem.h> 36 #include <sys/stat.h> 37 #include <sys/kstat.h> 38 #include <sys/vtrace.h> 39 #include <sys/dlpi.h> 40 #include <sys/strsun.h> 41 #include <sys/ethernet.h> 42 #include <sys/modctl.h> 43 #include <sys/errno.h> 44 #include <sys/dditypes.h> 45 #include <sys/ddi.h> 46 #include <sys/sunddi.h> 47 #include <sys/sysmacros.h> 48 #include <sys/pci.h> 49 #include <sys/ddi_intr.h> 50 51 #include "unm_nic.h" 52 #include "unm_nic_hw.h" 53 #include "unm_brdcfg.h" 54 #include "nic_cmn.h" 55 #include "nic_phan_reg.h" 56 #include "unm_nic_ioctl.h" 57 #include "nx_hw_pci_regs.h" 58 59 char ident[] = "Netxen nic driver v" UNM_NIC_VERSIONID; 60 char unm_nic_driver_name[] = "ntxn"; 61 int verbmsg = 0; 62 63 static char txbcopythreshold_propname[] = "tx_bcopy_threshold"; 64 static char rxbcopythreshold_propname[] = "rx_bcopy_threshold"; 65 static char rxringsize_propname[] = "rx_ring_size"; 66 static char jumborxringsize_propname[] = "jumbo_rx_ring_size"; 67 static char txringsize_propname[] = "tx_ring_size"; 68 static char defaultmtu_propname[] = "default_mtu"; 69 static char dmesg_propname[] = "verbose_driver"; 70 71 #define STRUCT_COPY(a, b) bcopy(&(b), &(a), sizeof (a)) 72 73 extern int unm_register_mac(unm_adapter *adapter); 74 extern void unm_fini_kstats(unm_adapter* adapter); 75 extern void unm_nic_remove(unm_adapter *adapter); 76 extern int unm_nic_suspend(unm_adapter *); 77 extern uint_t unm_intr(caddr_t, caddr_t); 78 79 /* Data access requirements. */ 80 static struct ddi_device_acc_attr unm_dev_attr = { 81 DDI_DEVICE_ATTR_V0, 82 DDI_STRUCTURE_LE_ACC, 83 DDI_STRICTORDER_ACC 84 }; 85 86 static struct ddi_device_acc_attr unm_buf_attr = { 87 DDI_DEVICE_ATTR_V0, 88 DDI_NEVERSWAP_ACC, 89 DDI_STRICTORDER_ACC 90 }; 91 92 static ddi_dma_attr_t unm_dma_attr_desc = { 93 DMA_ATTR_V0, /* dma_attr_version */ 94 0, /* dma_attr_addr_lo */ 95 0xffffffffull, /* dma_attr_addr_hi */ 96 0x000fffffull, /* dma_attr_count_max */ 97 4096, /* dma_attr_align */ 98 0x000fffffull, /* dma_attr_burstsizes */ 99 4, /* dma_attr_minxfer */ 100 0x003fffffull, /* dma_attr_maxxfer */ 101 0xffffffffull, /* dma_attr_seg */ 102 1, /* dma_attr_sgllen */ 103 1, /* dma_attr_granular */ 104 0 /* dma_attr_flags */ 105 }; 106 107 static ddi_dma_attr_t unm_dma_attr_rxbuf = { 108 DMA_ATTR_V0, /* dma_attr_version */ 109 0, /* dma_attr_addr_lo */ 110 0x7ffffffffULL, /* dma_attr_addr_hi */ 111 0xffffull, /* dma_attr_count_max */ 112 4096, /* dma_attr_align */ 113 0xfff8ull, /* dma_attr_burstsizes */ 114 1, /* dma_attr_minxfer */ 115 0xffffffffull, /* dma_attr_maxxfer */ 116 0xffffull, /* dma_attr_seg */ 117 1, /* dma_attr_sgllen */ 118 1, /* dma_attr_granular */ 119 0 /* dma_attr_flags */ 120 }; 121 122 static ddi_dma_attr_t unm_dma_attr_cmddesc = { 123 DMA_ATTR_V0, /* dma_attr_version */ 124 0, /* dma_attr_addr_lo */ 125 0x7ffffffffULL, /* dma_attr_addr_hi */ 126 0xffffull, /* dma_attr_count_max */ 127 1, /* dma_attr_align */ 128 0xfff8ull, /* dma_attr_burstsizes */ 129 1, /* dma_attr_minxfer */ 130 0xffff0ull, /* dma_attr_maxxfer */ 131 0xffffull, /* dma_attr_seg */ 132 16, /* dma_attr_sgllen */ 133 1, /* dma_attr_granular */ 134 0 /* dma_attr_flags */ 135 }; 136 137 static struct nx_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; 138 139 static int 140 check_hw_init(struct unm_adapter_s *adapter) 141 { 142 u32 val; 143 int ret = 0; 144 145 adapter->unm_nic_hw_read_wx(adapter, UNM_CAM_RAM(0x1fc), &val, 4); 146 if (val == 0x55555555) { 147 /* This is the first boot after power up */ 148 adapter->unm_nic_hw_read_wx(adapter, UNM_ROMUSB_GLB_SW_RESET, 149 &val, 4); 150 if (val != 0x80000f) 151 ret = -1; 152 153 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 154 /* Start P2 boot loader */ 155 adapter->unm_nic_pci_write_normalize(adapter, 156 UNM_CAM_RAM(0x1fc), UNM_BDINFO_MAGIC); 157 adapter->unm_nic_pci_write_normalize(adapter, 158 UNM_ROMUSB_GLB_PEGTUNE_DONE, 1); 159 } 160 } 161 return (ret); 162 } 163 164 165 static int 166 unm_get_flash_block(unm_adapter *adapter, int base, int size, uint32_t *buf) 167 { 168 int i, addr; 169 uint32_t *ptr32; 170 171 addr = base; 172 ptr32 = buf; 173 for (i = 0; i < size / sizeof (uint32_t); i++) { 174 if (rom_fast_read(adapter, addr, (int *)ptr32) == -1) 175 return (-1); 176 ptr32++; 177 addr += sizeof (uint32_t); 178 } 179 if ((char *)buf + size > (char *)ptr32) { 180 int local; 181 182 if (rom_fast_read(adapter, addr, &local) == -1) 183 return (-1); 184 (void) memcpy(ptr32, &local, 185 (uintptr_t)((char *)buf + size) - (uintptr_t)(char *)ptr32); 186 } 187 188 return (0); 189 } 190 191 192 static int 193 get_flash_mac_addr(struct unm_adapter_s *adapter, u64 mac[]) 194 { 195 uint32_t *pmac = (uint32_t *)&mac[0]; 196 197 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 198 uint32_t temp, crbaddr; 199 uint16_t *pmac16 = (uint16_t *)pmac; 200 201 // FOR P3, read from CAM RAM 202 203 int pci_func = adapter->ahw.pci_func; 204 pmac16 += (4*pci_func); 205 crbaddr = CRB_MAC_BLOCK_START + (4 * ((pci_func/2) * 3)) + 206 (4 * (pci_func & 1)); 207 208 adapter->unm_nic_hw_read_wx(adapter, crbaddr, &temp, 4); 209 if (pci_func & 1) { 210 *pmac16++ = (temp >> 16); 211 adapter->unm_nic_hw_read_wx(adapter, crbaddr+4, 212 &temp, 4); 213 *pmac16++ = (temp & 0xffff); 214 *pmac16++ = (temp >> 16); 215 *pmac16 = 0; 216 } else { 217 *pmac16++ = (temp & 0xffff); 218 *pmac16++ = (temp >> 16); 219 adapter->unm_nic_hw_read_wx(adapter, crbaddr+4, 220 &temp, 4); 221 *pmac16++ = (temp & 0xffff); 222 *pmac16 = 0; 223 } 224 return (0); 225 } 226 227 228 if (unm_get_flash_block(adapter, USER_START + 229 offsetof(unm_user_info_t, mac_addr), FLASH_NUM_PORTS * sizeof (U64), 230 pmac) == -1) 231 return (-1); 232 233 if (*mac == ~0ULL) { 234 if (unm_get_flash_block(adapter, USER_START_OLD + 235 offsetof(unm_old_user_info_t, mac_addr), 236 FLASH_NUM_PORTS * sizeof (U64), pmac) == -1) 237 return (-1); 238 239 if (*mac == ~0ULL) 240 return (-1); 241 } 242 243 return (0); 244 } 245 246 static int 247 is_flash_supported(unm_adapter *adapter) 248 { 249 int locs[] = { 0, 0x4, 0x100, 0x4000, 0x4128 }; 250 int addr, val01, val02, i, j; 251 252 /* if the flash size less than 4Mb, make huge war cry and die */ 253 for (j = 1; j < 4; j++) { 254 addr = j * 0x100000; 255 for (i = 0; i < (sizeof (locs) / sizeof (locs[0])); i++) { 256 if (rom_fast_read(adapter, locs[i], &val01) == 0 && 257 rom_fast_read(adapter, (addr + locs[i]), 258 &val02) == 0) { 259 if (val01 == val02) 260 return (-1); 261 } else { 262 return (-1); 263 } 264 } 265 } 266 267 return (0); 268 } 269 270 static int 271 unm_initialize_dummy_dma(unm_adapter *adapter) 272 { 273 uint32_t hi, lo, temp; 274 ddi_dma_cookie_t cookie; 275 276 if (unm_pci_alloc_consistent(adapter, UNM_HOST_DUMMY_DMA_SIZE, 277 (caddr_t *)&adapter->dummy_dma.addr, &cookie, 278 &adapter->dummy_dma.dma_handle, 279 &adapter->dummy_dma.acc_handle) != DDI_SUCCESS) { 280 cmn_err(CE_WARN, "%s%d: Unable to alloc dummy dma buf\n", 281 adapter->name, adapter->instance); 282 return (DDI_ENOMEM); 283 } 284 285 adapter->dummy_dma.phys_addr = cookie.dmac_laddress; 286 287 hi = (adapter->dummy_dma.phys_addr >> 32) & 0xffffffff; 288 lo = adapter->dummy_dma.phys_addr & 0xffffffff; 289 290 UNM_READ_LOCK(&adapter->adapter_lock); 291 adapter->unm_nic_hw_write_wx(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, 292 &hi, 4); 293 adapter->unm_nic_hw_write_wx(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, 294 &lo, 4); 295 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 296 temp = DUMMY_BUF_INIT; 297 adapter->unm_nic_hw_write_wx(adapter, CRB_HOST_DUMMY_BUF, 298 &temp, 4); 299 } 300 UNM_READ_UNLOCK(&adapter->adapter_lock); 301 302 return (DDI_SUCCESS); 303 } 304 305 void 306 unm_free_dummy_dma(unm_adapter *adapter) 307 { 308 if (adapter->dummy_dma.addr) { 309 unm_pci_free_consistent(&adapter->dummy_dma.dma_handle, 310 &adapter->dummy_dma.acc_handle); 311 adapter->dummy_dma.addr = NULL; 312 } 313 } 314 315 static int 316 unm_pci_cfg_init(unm_adapter *adapter) 317 { 318 hardware_context *hwcontext; 319 ddi_acc_handle_t pci_cfg_hdl; 320 int *reg_options; 321 dev_info_t *dip; 322 uint_t noptions; 323 int ret; 324 uint16_t vendor_id, pci_cmd_word; 325 uint8_t base_class, sub_class, prog_class; 326 uint32_t pexsizes; 327 struct nx_legacy_intr_set *legacy_intrp; 328 329 hwcontext = &adapter->ahw; 330 pci_cfg_hdl = adapter->pci_cfg_handle; 331 dip = adapter->dip; 332 333 vendor_id = pci_config_get16(pci_cfg_hdl, PCI_CONF_VENID); 334 335 if (vendor_id != 0x4040) { 336 cmn_err(CE_WARN, "%s%d: vendor id %x not 0x4040\n", 337 adapter->name, adapter->instance, vendor_id); 338 return (DDI_FAILURE); 339 } 340 341 ret = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 342 dip, 0, "reg", ®_options, &noptions); 343 if (ret != DDI_PROP_SUCCESS) { 344 cmn_err(CE_WARN, "%s%d: Could not determine reg property\n", 345 adapter->name, adapter->instance); 346 return (DDI_FAILURE); 347 } 348 349 hwcontext->pci_func = (reg_options[0] >> 8) & 0x7; 350 ddi_prop_free(reg_options); 351 352 base_class = pci_config_get8(pci_cfg_hdl, PCI_CONF_BASCLASS); 353 sub_class = pci_config_get8(pci_cfg_hdl, PCI_CONF_SUBCLASS); 354 prog_class = pci_config_get8(pci_cfg_hdl, PCI_CONF_PROGCLASS); 355 356 /* 357 * Need this check so that MEZZ card mgmt interface ntxn0 could fail 358 * attach & return and proceed to next interfaces ntxn1 and ntxn2 359 */ 360 if ((base_class != 0x02) || (sub_class != 0) || (prog_class != 0)) { 361 cmn_err(CE_WARN, "%s%d: Base/sub/prog class problem %d/%d/%d\n", 362 adapter->name, adapter->instance, base_class, sub_class, 363 prog_class); 364 return (DDI_FAILURE); 365 } 366 367 hwcontext->revision_id = pci_config_get8(pci_cfg_hdl, PCI_CONF_REVID); 368 369 /* 370 * Refuse to work with dubious P3 cards. 371 */ 372 if ((hwcontext->revision_id >= NX_P3_A0) && 373 (hwcontext->revision_id < NX_P3_B1)) { 374 cmn_err(CE_WARN, "%s%d: NetXen chip revs between 0x%x-0x%x " 375 "is unsupported\n", adapter->name, adapter->instance, 376 NX_P3_A0, NX_P3_B0); 377 return (DDI_FAILURE); 378 } 379 380 /* 381 * Save error reporting settings; clear [19:16] error status bits. 382 * Set max read request [14:12] to 0 for 128 bytes. Set max payload 383 * size[7:5] to 0 for for 128 bytes. 384 */ 385 if (NX_IS_REVISION_P2(hwcontext->revision_id)) { 386 pexsizes = pci_config_get32(pci_cfg_hdl, 0xd8); 387 pexsizes &= 7; 388 pexsizes |= 0xF0000; 389 pci_config_put32(pci_cfg_hdl, 0xd8, pexsizes); 390 } 391 392 pci_cmd_word = pci_config_get16(pci_cfg_hdl, PCI_CONF_COMM); 393 pci_cmd_word |= (PCI_COMM_INTX_DISABLE | PCI_COMM_SERR_ENABLE); 394 pci_config_put16(pci_cfg_hdl, PCI_CONF_COMM, pci_cmd_word); 395 396 if (hwcontext->revision_id >= NX_P3_B0) 397 legacy_intrp = &legacy_intr[hwcontext->pci_func]; 398 else 399 legacy_intrp = &legacy_intr[0]; 400 401 adapter->legacy_intr.int_vec_bit = legacy_intrp->int_vec_bit; 402 adapter->legacy_intr.tgt_status_reg = legacy_intrp->tgt_status_reg; 403 adapter->legacy_intr.tgt_mask_reg = legacy_intrp->tgt_mask_reg; 404 adapter->legacy_intr.pci_int_reg = legacy_intrp->pci_int_reg; 405 406 return (DDI_SUCCESS); 407 } 408 409 void 410 unm_free_tx_dmahdl(unm_adapter *adapter) 411 { 412 int i; 413 unm_dmah_node_t *nodep; 414 415 mutex_enter(&adapter->tx_lock); 416 nodep = &adapter->tx_dma_hdls[0]; 417 418 for (i = 0; i < adapter->MaxTxDescCount + EXTRA_HANDLES; i++) { 419 if (nodep->dmahdl != NULL) { 420 ddi_dma_free_handle(&nodep->dmahdl); 421 nodep->dmahdl = NULL; 422 } 423 nodep->next = NULL; 424 nodep++; 425 } 426 427 adapter->dmahdl_pool = NULL; 428 adapter->freehdls = 0; 429 mutex_exit(&adapter->tx_lock); 430 } 431 432 static int 433 unm_alloc_tx_dmahdl(unm_adapter *adapter) 434 { 435 int i; 436 unm_dmah_node_t *nodep = &adapter->tx_dma_hdls[0]; 437 438 mutex_enter(&adapter->tx_lock); 439 for (i = 0; i < adapter->MaxTxDescCount + EXTRA_HANDLES; i++) { 440 if (ddi_dma_alloc_handle(adapter->dip, &unm_dma_attr_cmddesc, 441 DDI_DMA_DONTWAIT, NULL, &nodep->dmahdl) != DDI_SUCCESS) { 442 mutex_exit(&adapter->tx_lock); 443 goto alloc_hdl_fail; 444 } 445 446 if (i > 0) 447 nodep->next = nodep - 1; 448 nodep++; 449 } 450 451 adapter->dmahdl_pool = nodep - 1; 452 adapter->freehdls = i; 453 mutex_exit(&adapter->tx_lock); 454 455 return (DDI_SUCCESS); 456 457 alloc_hdl_fail: 458 unm_free_tx_dmahdl(adapter); 459 cmn_err(CE_WARN, "%s%d: Failed transmit ring dma handle allocation\n", 460 adapter->name, adapter->instance); 461 return (DDI_FAILURE); 462 } 463 464 static void 465 unm_free_dma_mem(dma_area_t *dma_p) 466 { 467 if (dma_p->dma_hdl != NULL) { 468 if (dma_p->ncookies) { 469 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 470 dma_p->ncookies = 0; 471 } 472 } 473 if (dma_p->acc_hdl != NULL) { 474 ddi_dma_mem_free(&dma_p->acc_hdl); 475 dma_p->acc_hdl = NULL; 476 } 477 if (dma_p->dma_hdl != NULL) { 478 ddi_dma_free_handle(&dma_p->dma_hdl); 479 dma_p->dma_hdl = NULL; 480 } 481 } 482 483 static int 484 unm_alloc_dma_mem(unm_adapter *adapter, int size, uint_t dma_flag, 485 ddi_dma_attr_t *dma_attr_p, dma_area_t *dma_p) 486 { 487 int ret; 488 caddr_t vaddr; 489 size_t actual_size; 490 ddi_dma_cookie_t cookie; 491 492 ret = ddi_dma_alloc_handle(adapter->dip, 493 dma_attr_p, DDI_DMA_DONTWAIT, 494 NULL, &dma_p->dma_hdl); 495 if (ret != DDI_SUCCESS) { 496 cmn_err(CE_WARN, "%s%d: Failed ddi_dma_alloc_handle\n", 497 adapter->name, adapter->instance); 498 goto dma_mem_fail; 499 } 500 501 ret = ddi_dma_mem_alloc(dma_p->dma_hdl, 502 size, &adapter->gc_attr_desc, 503 dma_flag & (DDI_DMA_STREAMING | DDI_DMA_CONSISTENT), 504 DDI_DMA_DONTWAIT, NULL, &vaddr, &actual_size, 505 &dma_p->acc_hdl); 506 if (ret != DDI_SUCCESS) { 507 cmn_err(CE_WARN, "%s%d: ddi_dma_mem_alloc() failed\n", 508 adapter->name, adapter->instance); 509 goto dma_mem_fail; 510 } 511 512 if (actual_size < size) { 513 cmn_err(CE_WARN, "%s%d: ddi_dma_mem_alloc() allocated small\n", 514 adapter->name, adapter->instance); 515 goto dma_mem_fail; 516 } 517 518 ret = ddi_dma_addr_bind_handle(dma_p->dma_hdl, 519 NULL, vaddr, size, dma_flag, DDI_DMA_DONTWAIT, 520 NULL, &cookie, &dma_p->ncookies); 521 if (ret != DDI_DMA_MAPPED || dma_p->ncookies != 1) { 522 cmn_err(CE_WARN, "%s%d: ddi_dma_addr_bind_handle() failed, " 523 "%d, %d\n", adapter->name, adapter->instance, ret, 524 dma_p->ncookies); 525 goto dma_mem_fail; 526 } 527 528 dma_p->dma_addr = cookie.dmac_laddress; 529 dma_p->vaddr = vaddr; 530 (void) memset(vaddr, 0, size); 531 532 return (DDI_SUCCESS); 533 534 dma_mem_fail: 535 unm_free_dma_mem(dma_p); 536 return (DDI_FAILURE); 537 } 538 539 void 540 unm_free_tx_buffers(unm_adapter *adapter) 541 { 542 int i; 543 dma_area_t *dma_p; 544 struct unm_cmd_buffer *cmd_buf; 545 unm_dmah_node_t *nodep; 546 547 cmd_buf = &adapter->cmd_buf_arr[0]; 548 549 for (i = 0; i < adapter->MaxTxDescCount; i++) { 550 dma_p = &cmd_buf->dma_area; 551 unm_free_dma_mem(dma_p); 552 nodep = cmd_buf->head; 553 while (nodep != NULL) { 554 (void) ddi_dma_unbind_handle(nodep->dmahdl); 555 nodep = nodep->next; 556 } 557 if (cmd_buf->msg != NULL) 558 freemsg(cmd_buf->msg); 559 cmd_buf++; 560 } 561 adapter->freecmds = 0; 562 } 563 564 static int 565 unm_alloc_tx_buffers(unm_adapter *adapter) 566 { 567 int i, ret, size, allocated = 0; 568 dma_area_t *dma_p; 569 struct unm_cmd_buffer *cmd_buf; 570 571 cmd_buf = &adapter->cmd_buf_arr[0]; 572 size = adapter->maxmtu; 573 574 for (i = 0; i < adapter->MaxTxDescCount; i++) { 575 dma_p = &cmd_buf->dma_area; 576 ret = unm_alloc_dma_mem(adapter, size, 577 DDI_DMA_WRITE | DDI_DMA_STREAMING, 578 &unm_dma_attr_rxbuf, dma_p); 579 if (ret != DDI_SUCCESS) 580 goto alloc_tx_buffer_fail; 581 582 allocated++; 583 cmd_buf++; 584 } 585 adapter->freecmds = adapter->MaxTxDescCount; 586 return (DDI_SUCCESS); 587 588 alloc_tx_buffer_fail: 589 590 cmd_buf = &adapter->cmd_buf_arr[0]; 591 for (i = 0; i < allocated; i++) { 592 dma_p = &cmd_buf->dma_area; 593 unm_free_dma_mem(dma_p); 594 cmd_buf++; 595 } 596 cmn_err(CE_WARN, "%s%d: Failed transmit ring memory allocation\n", 597 adapter->name, adapter->instance); 598 return (DDI_FAILURE); 599 } 600 601 /* 602 * Called by freemsg() to "free" the resource. 603 */ 604 static void 605 unm_rx_buffer_recycle(char *arg) 606 { 607 unm_rx_buffer_t *rx_buffer = (unm_rx_buffer_t *)(uintptr_t)arg; 608 unm_adapter *adapter = rx_buffer->adapter; 609 unm_rcv_desc_ctx_t *rcv_desc = rx_buffer->rcv_desc; 610 611 rx_buffer->mp = desballoc(rx_buffer->dma_info.vaddr, 612 rcv_desc->dma_size, 0, &rx_buffer->rx_recycle); 613 614 if (rx_buffer->mp == NULL) 615 adapter->stats.desballocfailed++; 616 617 mutex_enter(rcv_desc->recycle_lock); 618 rx_buffer->next = rcv_desc->recycle_list; 619 rcv_desc->recycle_list = rx_buffer; 620 rcv_desc->rx_buf_recycle++; 621 mutex_exit(rcv_desc->recycle_lock); 622 } 623 624 void 625 unm_destroy_rx_ring(unm_rcv_desc_ctx_t *rcv_desc) 626 { 627 uint32_t i, total_buf; 628 unm_rx_buffer_t *buf_pool; 629 630 total_buf = rcv_desc->rx_buf_total; 631 buf_pool = rcv_desc->rx_buf_pool; 632 for (i = 0; i < total_buf; i++) { 633 if (buf_pool->mp != NULL) 634 freemsg(buf_pool->mp); 635 unm_free_dma_mem(&buf_pool->dma_info); 636 buf_pool++; 637 } 638 639 kmem_free(rcv_desc->rx_buf_pool, sizeof (unm_rx_buffer_t) * total_buf); 640 rcv_desc->rx_buf_pool = NULL; 641 rcv_desc->pool_list = NULL; 642 rcv_desc->recycle_list = NULL; 643 rcv_desc->rx_buf_free = 0; 644 645 mutex_destroy(rcv_desc->pool_lock); 646 mutex_destroy(rcv_desc->recycle_lock); 647 } 648 649 static int 650 unm_create_rx_ring(unm_adapter *adapter, unm_rcv_desc_ctx_t *rcv_desc) 651 { 652 int i, ret, allocate = 0, sreoff; 653 uint32_t total_buf; 654 dma_area_t *dma_info; 655 unm_rx_buffer_t *rx_buffer; 656 657 sreoff = adapter->ahw.cut_through ? 0 : IP_ALIGNMENT_BYTES; 658 659 /* temporarily set the total rx buffers two times of MaxRxDescCount */ 660 total_buf = rcv_desc->rx_buf_total = rcv_desc->MaxRxDescCount * 2; 661 662 rcv_desc->rx_buf_pool = kmem_zalloc(sizeof (unm_rx_buffer_t) * 663 total_buf, KM_SLEEP); 664 rx_buffer = rcv_desc->rx_buf_pool; 665 for (i = 0; i < total_buf; i++) { 666 dma_info = &rx_buffer->dma_info; 667 ret = unm_alloc_dma_mem(adapter, rcv_desc->buf_size, 668 DDI_DMA_READ | DDI_DMA_STREAMING, 669 &unm_dma_attr_rxbuf, dma_info); 670 if (ret != DDI_SUCCESS) 671 goto alloc_mem_failed; 672 else { 673 allocate++; 674 dma_info->vaddr = (void *) ((char *)dma_info->vaddr + 675 sreoff); 676 dma_info->dma_addr += sreoff; 677 rx_buffer->rx_recycle.free_func = 678 unm_rx_buffer_recycle; 679 rx_buffer->rx_recycle.free_arg = (caddr_t)rx_buffer; 680 rx_buffer->next = NULL; 681 rx_buffer->mp = desballoc(dma_info->vaddr, 682 rcv_desc->dma_size, 0, &rx_buffer->rx_recycle); 683 if (rx_buffer->mp == NULL) 684 adapter->stats.desballocfailed++; 685 rx_buffer->rcv_desc = rcv_desc; 686 rx_buffer->adapter = adapter; 687 rx_buffer++; 688 } 689 } 690 691 for (i = 0; i < (total_buf - 1); i++) { 692 rcv_desc->rx_buf_pool[i].next = &rcv_desc->rx_buf_pool[i + 1]; 693 } 694 695 rcv_desc->pool_list = rcv_desc->rx_buf_pool; 696 rcv_desc->recycle_list = NULL; 697 rcv_desc->rx_buf_free = total_buf; 698 699 mutex_init(rcv_desc->pool_lock, NULL, 700 MUTEX_DRIVER, (DDI_INTR_PRI(adapter->intr_pri))); 701 mutex_init(rcv_desc->recycle_lock, NULL, 702 MUTEX_DRIVER, (DDI_INTR_PRI(adapter->intr_pri))); 703 704 return (DDI_SUCCESS); 705 706 alloc_mem_failed: 707 rx_buffer = rcv_desc->rx_buf_pool; 708 for (i = 0; i < allocate; i++, rx_buffer++) { 709 dma_info = &rx_buffer->dma_info; 710 if (rx_buffer->mp != NULL) 711 freemsg(rx_buffer->mp); 712 unm_free_dma_mem(dma_info); 713 } 714 715 kmem_free(rcv_desc->rx_buf_pool, sizeof (unm_rx_buffer_t) * total_buf); 716 rcv_desc->rx_buf_pool = NULL; 717 718 cmn_err(CE_WARN, "%s%d: Failed receive ring resource allocation\n", 719 adapter->name, adapter->instance); 720 return (DDI_FAILURE); 721 } 722 723 static void 724 unm_check_options(unm_adapter *adapter) 725 { 726 int i, ring, tx_desc, rx_desc, rx_jdesc; 727 unm_recv_context_t *recv_ctx; 728 unm_rcv_desc_ctx_t *rcv_desc; 729 uint8_t revid = adapter->ahw.revision_id; 730 dev_info_t *dip = adapter->dip; 731 732 verbmsg = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 733 dmesg_propname, 0); 734 735 adapter->tx_bcopy_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, 736 dip, DDI_PROP_DONTPASS, txbcopythreshold_propname, 737 UNM_TX_BCOPY_THRESHOLD); 738 adapter->rx_bcopy_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, 739 dip, DDI_PROP_DONTPASS, rxbcopythreshold_propname, 740 UNM_RX_BCOPY_THRESHOLD); 741 742 tx_desc = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 743 txringsize_propname, MAX_CMD_DESCRIPTORS_HOST); 744 if (tx_desc >= 256 && tx_desc <= MAX_CMD_DESCRIPTORS && 745 !(tx_desc & (tx_desc - 1))) { 746 adapter->MaxTxDescCount = tx_desc; 747 } else { 748 cmn_err(CE_WARN, "%s%d: TxRingSize defaulting to %d, since " 749 ".conf value is not 2 power aligned in range 256 - %d\n", 750 adapter->name, adapter->instance, MAX_CMD_DESCRIPTORS_HOST, 751 MAX_CMD_DESCRIPTORS); 752 adapter->MaxTxDescCount = MAX_CMD_DESCRIPTORS_HOST; 753 } 754 755 rx_desc = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 756 rxringsize_propname, MAX_RCV_DESCRIPTORS); 757 if (rx_desc >= NX_MIN_DRIVER_RDS_SIZE && 758 rx_desc <= NX_MAX_SUPPORTED_RDS_SIZE && 759 !(rx_desc & (rx_desc - 1))) { 760 adapter->MaxRxDescCount = rx_desc; 761 } else { 762 cmn_err(CE_WARN, "%s%d: RxRingSize defaulting to %d, since " 763 ".conf value is not 2 power aligned in range %d - %d\n", 764 adapter->name, adapter->instance, MAX_RCV_DESCRIPTORS, 765 NX_MIN_DRIVER_RDS_SIZE, NX_MAX_SUPPORTED_RDS_SIZE); 766 adapter->MaxRxDescCount = MAX_RCV_DESCRIPTORS; 767 } 768 769 rx_jdesc = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 770 jumborxringsize_propname, MAX_JUMBO_RCV_DESCRIPTORS); 771 if (rx_jdesc >= NX_MIN_DRIVER_RDS_SIZE && 772 rx_jdesc <= NX_MAX_SUPPORTED_JUMBO_RDS_SIZE && 773 !(rx_jdesc & (rx_jdesc - 1))) { 774 adapter->MaxJumboRxDescCount = rx_jdesc; 775 } else { 776 cmn_err(CE_WARN, "%s%d: JumboRingSize defaulting to %d, since " 777 ".conf value is not 2 power aligned in range %d - %d\n", 778 adapter->name, adapter->instance, MAX_JUMBO_RCV_DESCRIPTORS, 779 NX_MIN_DRIVER_RDS_SIZE, NX_MAX_SUPPORTED_JUMBO_RDS_SIZE); 780 adapter->MaxJumboRxDescCount = MAX_JUMBO_RCV_DESCRIPTORS; 781 } 782 783 adapter->MaxLroRxDescCount = MAX_LRO_RCV_DESCRIPTORS; 784 785 adapter->mtu = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 786 DDI_PROP_DONTPASS, defaultmtu_propname, MTU_SIZE); 787 788 if (adapter->mtu < MTU_SIZE) { 789 cmn_err(CE_WARN, "Raising mtu to %d\n", MTU_SIZE); 790 adapter->mtu = MTU_SIZE; 791 } 792 adapter->maxmtu = NX_IS_REVISION_P2(revid) ? P2_MAX_MTU : P3_MAX_MTU; 793 if (adapter->mtu > adapter->maxmtu) { 794 cmn_err(CE_WARN, "Lowering mtu to %d\n", adapter->maxmtu); 795 adapter->mtu = adapter->maxmtu; 796 } 797 798 adapter->maxmtu += NX_MAX_ETHERHDR; 799 800 for (i = 0; i < MAX_RCV_CTX; ++i) { 801 recv_ctx = &adapter->recv_ctx[i]; 802 803 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 804 rcv_desc = &recv_ctx->rcv_desc[ring]; 805 806 switch (RCV_DESC_TYPE(ring)) { 807 case RCV_DESC_NORMAL: 808 rcv_desc->MaxRxDescCount = 809 adapter->MaxRxDescCount; 810 if (adapter->ahw.cut_through) { 811 rcv_desc->dma_size = 812 NX_CT_DEFAULT_RX_BUF_LEN; 813 rcv_desc->buf_size = rcv_desc->dma_size; 814 } else { 815 rcv_desc->dma_size = 816 NX_RX_NORMAL_BUF_MAX_LEN; 817 rcv_desc->buf_size = 818 rcv_desc->dma_size + 819 IP_ALIGNMENT_BYTES; 820 } 821 break; 822 823 case RCV_DESC_JUMBO: 824 rcv_desc->MaxRxDescCount = 825 adapter->MaxJumboRxDescCount; 826 if (adapter->ahw.cut_through) { 827 rcv_desc->dma_size = 828 rcv_desc->buf_size = 829 NX_P3_RX_JUMBO_BUF_MAX_LEN; 830 } else { 831 if (NX_IS_REVISION_P2(revid)) 832 rcv_desc->dma_size = 833 NX_P2_RX_JUMBO_BUF_MAX_LEN; 834 else 835 rcv_desc->dma_size = 836 NX_P3_RX_JUMBO_BUF_MAX_LEN; 837 rcv_desc->buf_size = 838 rcv_desc->dma_size + 839 IP_ALIGNMENT_BYTES; 840 } 841 break; 842 843 case RCV_RING_LRO: 844 rcv_desc->MaxRxDescCount = 845 adapter->MaxLroRxDescCount; 846 rcv_desc->buf_size = MAX_RX_LRO_BUFFER_LENGTH; 847 rcv_desc->dma_size = RX_LRO_DMA_MAP_LEN; 848 break; 849 default: 850 break; 851 } 852 } 853 } 854 } 855 856 static void 857 vector128M(unm_adapter *aptr) 858 { 859 aptr->unm_nic_pci_change_crbwindow = &unm_nic_pci_change_crbwindow_128M; 860 aptr->unm_crb_writelit_adapter = &unm_crb_writelit_adapter_128M; 861 aptr->unm_nic_hw_write_wx = &unm_nic_hw_write_wx_128M; 862 aptr->unm_nic_hw_read_wx = &unm_nic_hw_read_wx_128M; 863 aptr->unm_nic_hw_write_ioctl = &unm_nic_hw_write_ioctl_128M; 864 aptr->unm_nic_hw_read_ioctl = &unm_nic_hw_read_ioctl_128M; 865 aptr->unm_nic_pci_mem_write = &unm_nic_pci_mem_write_128M; 866 aptr->unm_nic_pci_mem_read = &unm_nic_pci_mem_read_128M; 867 aptr->unm_nic_pci_write_immediate = &unm_nic_pci_write_immediate_128M; 868 aptr->unm_nic_pci_read_immediate = &unm_nic_pci_read_immediate_128M; 869 aptr->unm_nic_pci_write_normalize = &unm_nic_pci_write_normalize_128M; 870 aptr->unm_nic_pci_read_normalize = &unm_nic_pci_read_normalize_128M; 871 aptr->unm_nic_pci_set_window = &unm_nic_pci_set_window_128M; 872 aptr->unm_nic_clear_statistics = &unm_nic_clear_statistics_128M; 873 aptr->unm_nic_fill_statistics = &unm_nic_fill_statistics_128M; 874 } 875 876 static void 877 vector2M(unm_adapter *aptr) 878 { 879 aptr->unm_nic_pci_change_crbwindow = &unm_nic_pci_change_crbwindow_2M; 880 aptr->unm_crb_writelit_adapter = &unm_crb_writelit_adapter_2M; 881 aptr->unm_nic_hw_write_wx = &unm_nic_hw_write_wx_2M; 882 aptr->unm_nic_hw_read_wx = &unm_nic_hw_read_wx_2M; 883 aptr->unm_nic_hw_write_ioctl = &unm_nic_hw_write_wx_2M; 884 aptr->unm_nic_hw_read_ioctl = &unm_nic_hw_read_wx_2M; 885 aptr->unm_nic_pci_mem_write = &unm_nic_pci_mem_write_2M; 886 aptr->unm_nic_pci_mem_read = &unm_nic_pci_mem_read_2M; 887 aptr->unm_nic_pci_write_immediate = &unm_nic_pci_write_immediate_2M; 888 aptr->unm_nic_pci_read_immediate = &unm_nic_pci_read_immediate_2M; 889 aptr->unm_nic_pci_write_normalize = &unm_nic_pci_write_normalize_2M; 890 aptr->unm_nic_pci_read_normalize = &unm_nic_pci_read_normalize_2M; 891 aptr->unm_nic_pci_set_window = &unm_nic_pci_set_window_2M; 892 aptr->unm_nic_clear_statistics = &unm_nic_clear_statistics_2M; 893 aptr->unm_nic_fill_statistics = &unm_nic_fill_statistics_2M; 894 } 895 896 static int 897 unm_pci_map_setup(unm_adapter *adapter) 898 { 899 int ret; 900 caddr_t reg_base, db_base; 901 caddr_t mem_ptr0, mem_ptr1 = NULL, mem_ptr2 = NULL; 902 unsigned long pci_len0; 903 unsigned long first_page_group_start, first_page_group_end; 904 905 off_t regsize, dbsize = UNM_DB_MAPSIZE_BYTES; 906 dev_info_t *dip = adapter->dip; 907 908 adapter->ahw.qdr_sn_window = adapter->ahw.ddr_mn_window = -1; 909 910 /* map register space */ 911 912 ret = ddi_dev_regsize(dip, 1, ®size); 913 if (ret != DDI_SUCCESS) { 914 cmn_err(CE_WARN, "%s%d: failed to read reg size for bar0\n", 915 adapter->name, adapter->instance); 916 return (DDI_FAILURE); 917 } 918 919 ret = ddi_regs_map_setup(dip, 1, ®_base, 0, 920 regsize, &unm_dev_attr, &adapter->regs_handle); 921 if (ret != DDI_SUCCESS) { 922 cmn_err(CE_WARN, "%s%d: failed to map registers\n", 923 adapter->name, adapter->instance); 924 return (DDI_FAILURE); 925 } 926 927 mem_ptr0 = reg_base; 928 929 if (regsize == UNM_PCI_128MB_SIZE) { 930 pci_len0 = FIRST_PAGE_GROUP_SIZE; 931 mem_ptr1 = mem_ptr0 + SECOND_PAGE_GROUP_START; 932 mem_ptr2 = mem_ptr0 + THIRD_PAGE_GROUP_START; 933 first_page_group_start = FIRST_PAGE_GROUP_START; 934 first_page_group_end = FIRST_PAGE_GROUP_END; 935 vector128M(adapter); 936 } else if (regsize == UNM_PCI_32MB_SIZE) { 937 pci_len0 = 0; 938 mem_ptr1 = mem_ptr0; 939 mem_ptr2 = mem_ptr0 + 940 (THIRD_PAGE_GROUP_START - SECOND_PAGE_GROUP_START); 941 first_page_group_start = 0; 942 first_page_group_end = 0; 943 vector128M(adapter); 944 } else if (regsize == UNM_PCI_2MB_SIZE) { 945 pci_len0 = UNM_PCI_2MB_SIZE; 946 first_page_group_start = 0; 947 first_page_group_end = 0; 948 adapter->ahw.ddr_mn_window = adapter->ahw.qdr_sn_window = 0; 949 adapter->ahw.mn_win_crb = 0x100000 + PCIX_MN_WINDOW + 950 (adapter->ahw.pci_func * 0x20); 951 if (adapter->ahw.pci_func < 4) 952 adapter->ahw.ms_win_crb = 0x100000 + PCIX_SN_WINDOW + 953 (adapter->ahw.pci_func * 0x20); 954 else 955 adapter->ahw.ms_win_crb = 0x100000 + PCIX_SN_WINDOW + 956 0xA0 + ((adapter->ahw.pci_func - 4) * 0x10); 957 vector2M(adapter); 958 } else { 959 cmn_err(CE_WARN, "%s%d: invalid pci regs map size %ld\n", 960 adapter->name, adapter->instance, regsize); 961 ddi_regs_map_free(&adapter->regs_handle); 962 return (DDI_FAILURE); 963 } 964 965 adapter->ahw.pci_base0 = (unsigned long)mem_ptr0; 966 adapter->ahw.pci_len0 = pci_len0; 967 adapter->ahw.pci_base1 = (unsigned long)mem_ptr1; 968 adapter->ahw.pci_len1 = SECOND_PAGE_GROUP_SIZE; 969 adapter->ahw.pci_base2 = (unsigned long)mem_ptr2; 970 adapter->ahw.pci_len2 = THIRD_PAGE_GROUP_SIZE; 971 adapter->ahw.crb_base = 972 PCI_OFFSET_SECOND_RANGE(adapter, UNM_PCI_CRBSPACE); 973 974 adapter->ahw.first_page_group_start = first_page_group_start; 975 adapter->ahw.first_page_group_end = first_page_group_end; 976 977 /* map doorbell */ 978 979 ret = ddi_regs_map_setup(dip, 2, &db_base, 0, 980 dbsize, &unm_dev_attr, &adapter->db_handle); 981 if (ret != DDI_SUCCESS) { 982 cmn_err(CE_WARN, "%s%d: failed to map doorbell\n", 983 adapter->name, adapter->instance); 984 ddi_regs_map_free(&adapter->regs_handle); 985 return (DDI_FAILURE); 986 } 987 988 adapter->ahw.db_base = (unsigned long)db_base; 989 adapter->ahw.db_len = dbsize; 990 991 return (DDI_SUCCESS); 992 } 993 994 static int 995 unm_initialize_intr(unm_adapter *adapter) 996 { 997 998 int ret; 999 int type, count, avail, actual; 1000 1001 ret = ddi_intr_get_supported_types(adapter->dip, &type); 1002 if (ret != DDI_SUCCESS) { 1003 cmn_err(CE_WARN, "%s%d: ddi_intr_get_supported_types() " 1004 "failed\n", adapter->name, adapter->instance); 1005 return (DDI_FAILURE); 1006 } 1007 1008 type = DDI_INTR_TYPE_MSI; 1009 ret = ddi_intr_get_nintrs(adapter->dip, type, &count); 1010 if ((ret == DDI_SUCCESS) && (count > 0)) 1011 goto found_msi; 1012 1013 type = DDI_INTR_TYPE_FIXED; 1014 ret = ddi_intr_get_nintrs(adapter->dip, type, &count); 1015 if ((ret != DDI_SUCCESS) || (count == 0)) { 1016 cmn_err(CE_WARN, 1017 "ddi_intr_get_nintrs() failure ret=%d\n", ret); 1018 return (DDI_FAILURE); 1019 } 1020 1021 found_msi: 1022 adapter->intr_type = type; 1023 adapter->flags &= ~(UNM_NIC_MSI_ENABLED | UNM_NIC_MSIX_ENABLED); 1024 if (type == DDI_INTR_TYPE_MSI) 1025 adapter->flags |= UNM_NIC_MSI_ENABLED; 1026 1027 /* Get number of available interrupts */ 1028 ret = ddi_intr_get_navail(adapter->dip, type, &avail); 1029 if ((ret != DDI_SUCCESS) || (avail == 0)) { 1030 cmn_err(CE_WARN, "ddi_intr_get_navail() failure, ret=%d\n", 1031 ret); 1032 return (DDI_FAILURE); 1033 } 1034 1035 ret = ddi_intr_alloc(adapter->dip, &adapter->intr_handle, 1036 type, 0, 1, &actual, DDI_INTR_ALLOC_NORMAL); 1037 if ((ret != DDI_SUCCESS) || (actual == 0)) { 1038 cmn_err(CE_WARN, "ddi_intr_alloc() failure: %d\n", ret); 1039 return (DDI_FAILURE); 1040 } 1041 1042 ret = ddi_intr_get_pri(adapter->intr_handle, &adapter->intr_pri); 1043 if (ret != DDI_SUCCESS) { 1044 cmn_err(CE_WARN, "ddi_intr_get_pri() failure: %d\n", ret); 1045 } 1046 1047 /* Call ddi_intr_add_handler() */ 1048 ret = ddi_intr_add_handler(adapter->intr_handle, unm_intr, 1049 (caddr_t)adapter, NULL); 1050 if (ret != DDI_SUCCESS) { 1051 cmn_err(CE_WARN, "%s%d: ddi_intr_add_handler() failure\n", 1052 adapter->name, adapter->instance); 1053 (void) ddi_intr_free(adapter->intr_handle); 1054 return (DDI_FAILURE); 1055 } 1056 1057 /* Add softintr if required */ 1058 1059 return (DDI_SUCCESS); 1060 1061 } 1062 1063 void 1064 unm_destroy_intr(unm_adapter *adapter) 1065 { 1066 /* disable interrupt */ 1067 if (adapter->intr_type == DDI_INTR_TYPE_MSI) 1068 (void) ddi_intr_block_disable(&adapter->intr_handle, 1); 1069 else 1070 (void) ddi_intr_disable(adapter->intr_handle); 1071 1072 (void) ddi_intr_remove_handler(adapter->intr_handle); 1073 (void) ddi_intr_free(adapter->intr_handle); 1074 1075 /* Remove the software intr handler */ 1076 } 1077 1078 static void 1079 netxen_set_port_mode(unm_adapter *adapter) 1080 { 1081 static int wol_port_mode = UNM_PORT_MODE_AUTO_NEG_1G; 1082 static int port_mode = UNM_PORT_MODE_AUTO_NEG; 1083 int btype = adapter->ahw.boardcfg.board_type, data = 0; 1084 1085 if (btype == UNM_BRDTYPE_P3_HMEZ || btype == UNM_BRDTYPE_P3_XG_LOM) { 1086 data = port_mode; /* set to port_mode normally */ 1087 if ((port_mode != UNM_PORT_MODE_802_3_AP) && 1088 (port_mode != UNM_PORT_MODE_XG) && 1089 (port_mode != UNM_PORT_MODE_AUTO_NEG_1G) && 1090 (port_mode != UNM_PORT_MODE_AUTO_NEG_XG)) 1091 data = UNM_PORT_MODE_AUTO_NEG; 1092 1093 adapter->unm_nic_hw_write_wx(adapter, UNM_PORT_MODE_ADDR, 1094 &data, 4); 1095 1096 if ((wol_port_mode != UNM_PORT_MODE_802_3_AP) && 1097 (wol_port_mode != UNM_PORT_MODE_XG) && 1098 (wol_port_mode != UNM_PORT_MODE_AUTO_NEG_1G) && 1099 (wol_port_mode != UNM_PORT_MODE_AUTO_NEG_XG)) 1100 wol_port_mode = UNM_PORT_MODE_AUTO_NEG; 1101 1102 adapter->unm_nic_hw_write_wx(adapter, UNM_WOL_PORT_MODE, 1103 &wol_port_mode, 4); 1104 } 1105 } 1106 1107 static void 1108 netxen_pcie_strap_init(unm_adapter *adapter) 1109 { 1110 ddi_acc_handle_t pcihdl = adapter->pci_cfg_handle; 1111 u32 chicken, control, c8c9value = 0xF1000; 1112 1113 adapter->unm_nic_hw_read_wx(adapter, UNM_PCIE_REG(PCIE_CHICKEN3), 1114 &chicken, 4); 1115 1116 chicken &= 0xFCFFFFFF; /* clear chicken3 25:24 */ 1117 control = pci_config_get32(pcihdl, 0xD0); 1118 if ((control & 0x000F0000) != 0x00020000) /* is it gen1? */ 1119 chicken |= 0x01000000; 1120 adapter->unm_nic_hw_write_wx(adapter, UNM_PCIE_REG(PCIE_CHICKEN3), 1121 &chicken, 4); 1122 control = pci_config_get32(pcihdl, 0xC8); 1123 control = pci_config_get32(pcihdl, 0xC8); 1124 pci_config_put32(pcihdl, 0xC8, c8c9value); 1125 } 1126 1127 static int 1128 netxen_read_mac_addr(unm_adapter *adapter) 1129 { 1130 u64 mac_addr[FLASH_NUM_PORTS + 1]; 1131 unsigned char *p; 1132 int i; 1133 1134 if (is_flash_supported(adapter) != 0) 1135 return (-1); 1136 1137 if (get_flash_mac_addr(adapter, mac_addr) != 0) 1138 return (-1); 1139 1140 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1141 p = (unsigned char *)&mac_addr[adapter->ahw.pci_func]; 1142 else 1143 p = (unsigned char *)&mac_addr[adapter->portnum]; 1144 1145 for (i = 0; i < 6; i++) 1146 adapter->mac_addr[i] = p[5 - i]; 1147 1148 if (unm_nic_macaddr_set(adapter, adapter->mac_addr) != 0) 1149 return (-1); 1150 1151 return (0); 1152 } 1153 1154 static int 1155 unmattach(dev_info_t *dip, ddi_attach_cmd_t cmd) 1156 { 1157 unm_adapter *adapter; 1158 unm_recv_context_t *recv_ctx = NULL; 1159 unm_rcv_desc_ctx_t *rcv_desc = NULL; 1160 int i, first_driver = 0; 1161 int ret, ring, temp; 1162 1163 switch (cmd) { 1164 case DDI_ATTACH: 1165 break; 1166 case DDI_RESUME: 1167 case DDI_PM_RESUME: 1168 default: 1169 return (DDI_FAILURE); 1170 } 1171 1172 adapter = kmem_zalloc(sizeof (unm_adapter), KM_SLEEP); 1173 adapter->dip = dip; 1174 ddi_set_driver_private(dip, adapter); 1175 adapter->instance = ddi_get_instance(dip); 1176 1177 adapter->name = ddi_driver_name(dip); 1178 1179 ret = pci_config_setup(dip, &adapter->pci_cfg_handle); 1180 if (ret != DDI_SUCCESS) { 1181 cmn_err(CE_WARN, "%s%d: pci_config_setup failed\n", 1182 adapter->name, adapter->instance); 1183 goto attach_setup_err; 1184 } 1185 1186 ret = unm_pci_cfg_init(adapter); 1187 if (ret != DDI_SUCCESS) 1188 goto attach_err; 1189 1190 ret = unm_pci_map_setup(adapter); 1191 if (ret != DDI_SUCCESS) 1192 goto attach_err; 1193 1194 if (unm_initialize_intr(adapter) != DDI_SUCCESS) 1195 goto attach_unmap_regs; 1196 1197 rw_init(&adapter->adapter_lock, NULL, 1198 RW_DRIVER, DDI_INTR_PRI(adapter->intr_pri)); 1199 mutex_init(&adapter->tx_lock, NULL, 1200 MUTEX_DRIVER, (DDI_INTR_PRI(adapter->intr_pri))); 1201 mutex_init(&adapter->lock, NULL, 1202 MUTEX_DRIVER, (DDI_INTR_PRI(adapter->intr_pri))); 1203 1204 adapter->portnum = (int8_t)adapter->ahw.pci_func; 1205 1206 /* 1207 * Set the CRB window to invalid. If any register in window 0 is 1208 * accessed it should set window to 0 and then reset it to 1. 1209 */ 1210 adapter->curr_window = 255; 1211 1212 adapter->fw_major = adapter->unm_nic_pci_read_normalize(adapter, 1213 UNM_FW_VERSION_MAJOR); 1214 1215 if (adapter->fw_major < 4) 1216 adapter->max_rds_rings = 3; 1217 else 1218 adapter->max_rds_rings = 2; 1219 1220 STRUCT_COPY(adapter->gc_dma_attr_desc, unm_dma_attr_desc); 1221 STRUCT_COPY(adapter->gc_attr_desc, unm_buf_attr); 1222 1223 ret = unm_nic_get_board_info(adapter); 1224 if (ret != DDI_SUCCESS) { 1225 cmn_err(CE_WARN, "%s%d: error reading board config\n", 1226 adapter->name, adapter->instance); 1227 goto attach_destroy_intr; 1228 } 1229 1230 /* Mezz cards have PCI function 0, 2, 3 enabled */ 1231 switch (adapter->ahw.boardcfg.board_type) { 1232 case UNM_BRDTYPE_P2_SB31_10G_IMEZ: 1233 case UNM_BRDTYPE_P2_SB31_10G_HMEZ: 1234 if (adapter->ahw.pci_func >= 2) { 1235 adapter->portnum = adapter->ahw.pci_func - 2; 1236 } 1237 default: 1238 break; 1239 } 1240 1241 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 1242 temp = UNM_CRB_READ_VAL_ADAPTER(UNM_MIU_MN_CONTROL, adapter); 1243 adapter->ahw.cut_through = NX_IS_SYSTEM_CUT_THROUGH(temp); 1244 if (adapter->ahw.pci_func == 0) 1245 first_driver = 1; 1246 } else { 1247 if (adapter->portnum == 0) 1248 first_driver = 1; 1249 } 1250 1251 unm_check_options(adapter); 1252 1253 if (first_driver) { 1254 int first_boot = adapter->unm_nic_pci_read_normalize(adapter, 1255 UNM_CAM_RAM(0x1fc)); 1256 1257 if (check_hw_init(adapter) != 0) { 1258 cmn_err(CE_WARN, "%s%d: Error in HW init sequence\n", 1259 adapter->name, adapter->instance); 1260 goto attach_destroy_intr; 1261 } 1262 1263 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1264 netxen_set_port_mode(adapter); 1265 1266 if (first_boot != 0x55555555) { 1267 temp = 0; 1268 adapter->unm_nic_hw_write_wx(adapter, CRB_CMDPEG_STATE, 1269 &temp, 4); 1270 if (pinit_from_rom(adapter, 0) != 0) 1271 goto attach_destroy_intr; 1272 1273 drv_usecwait(500); 1274 1275 ret = load_from_flash(adapter); 1276 if (ret != DDI_SUCCESS) 1277 goto attach_destroy_intr; 1278 } 1279 1280 if (ret = unm_initialize_dummy_dma(adapter)) 1281 goto attach_destroy_intr; 1282 1283 /* 1284 * Tell the hardware our version number. 1285 */ 1286 i = (_UNM_NIC_MAJOR << 16) | 1287 ((_UNM_NIC_MINOR << 8)) | (_UNM_NIC_SUBVERSION); 1288 adapter->unm_nic_hw_write_wx(adapter, CRB_DRIVER_VERSION, 1289 &i, 4); 1290 1291 /* Unlock the HW, prompting the boot sequence */ 1292 if ((first_boot == 0x55555555) && 1293 (NX_IS_REVISION_P2(adapter->ahw.revision_id))) 1294 adapter->unm_nic_pci_write_normalize(adapter, 1295 UNM_ROMUSB_GLB_PEGTUNE_DONE, 1); 1296 1297 /* Handshake with the card before we register the devices. */ 1298 if (phantom_init(adapter, 0) != DDI_SUCCESS) 1299 goto attach_destroy_intr; 1300 } 1301 1302 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1303 netxen_pcie_strap_init(adapter); 1304 1305 /* 1306 * See if the firmware gave us a virtual-physical port mapping. 1307 */ 1308 adapter->physical_port = adapter->portnum; 1309 i = adapter->unm_nic_pci_read_normalize(adapter, 1310 CRB_V2P(adapter->portnum)); 1311 if (i != 0x55555555) 1312 adapter->physical_port = (uint16_t)i; 1313 1314 adapter->cmd_buf_arr = (struct unm_cmd_buffer *)kmem_zalloc( 1315 sizeof (struct unm_cmd_buffer) * adapter->MaxTxDescCount, 1316 KM_SLEEP); 1317 1318 for (i = 0; i < MAX_RCV_CTX; ++i) { 1319 recv_ctx = &adapter->recv_ctx[i]; 1320 1321 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 1322 rcv_desc = &recv_ctx->rcv_desc[ring]; 1323 ret = unm_create_rx_ring(adapter, rcv_desc); 1324 if (ret != DDI_SUCCESS) 1325 goto attach_free_cmdbufs; 1326 } 1327 } 1328 1329 ret = unm_alloc_tx_dmahdl(adapter); 1330 if (ret != DDI_SUCCESS) 1331 goto attach_free_cmdbufs; 1332 1333 ret = unm_alloc_tx_buffers(adapter); 1334 if (ret != DDI_SUCCESS) 1335 goto attach_free_tx_dmahdl; 1336 1337 adapter->ahw.linkup = 0; 1338 1339 if (receive_peg_ready(adapter)) { 1340 ret = -EIO; 1341 goto attach_free_tx_buffers; 1342 } 1343 1344 if (netxen_read_mac_addr(adapter)) 1345 cmn_err(CE_WARN, "%s%d: Failed to read MAC addr\n", 1346 adapter->name, adapter->instance); 1347 1348 unm_nic_flash_print(adapter); 1349 1350 if (verbmsg != 0) { 1351 switch (adapter->ahw.board_type) { 1352 case UNM_NIC_GBE: 1353 cmn_err(CE_NOTE, "%s: QUAD GbE port %d initialized\n", 1354 unm_nic_driver_name, adapter->portnum); 1355 break; 1356 1357 case UNM_NIC_XGBE: 1358 cmn_err(CE_NOTE, "%s: XGbE port %d initialized\n", 1359 unm_nic_driver_name, adapter->portnum); 1360 break; 1361 } 1362 } 1363 1364 ret = unm_register_mac(adapter); 1365 if (ret != DDI_SUCCESS) { 1366 cmn_err(CE_NOTE, "%s%d: Mac registration error\n", 1367 adapter->name, adapter->instance); 1368 goto attach_free_tx_buffers; 1369 } 1370 1371 return (DDI_SUCCESS); 1372 1373 attach_free_tx_buffers: 1374 unm_free_tx_buffers(adapter); 1375 attach_free_tx_dmahdl: 1376 unm_free_tx_dmahdl(adapter); 1377 attach_free_cmdbufs: 1378 kmem_free(adapter->cmd_buf_arr, sizeof (struct unm_cmd_buffer) * 1379 adapter->MaxTxDescCount); 1380 for (i = 0; i < MAX_RCV_CTX; ++i) { 1381 recv_ctx = &adapter->recv_ctx[i]; 1382 1383 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 1384 rcv_desc = &recv_ctx->rcv_desc[ring]; 1385 if (rcv_desc->rx_buf_pool != NULL) 1386 unm_destroy_rx_ring(rcv_desc); 1387 } 1388 } 1389 1390 if (adapter->portnum == 0) 1391 unm_free_dummy_dma(adapter); 1392 attach_destroy_intr: 1393 unm_destroy_intr(adapter); 1394 attach_unmap_regs: 1395 ddi_regs_map_free(&(adapter->regs_handle)); 1396 ddi_regs_map_free(&(adapter->db_handle)); 1397 attach_err: 1398 pci_config_teardown(&adapter->pci_cfg_handle); 1399 attach_setup_err: 1400 kmem_free(adapter, sizeof (unm_adapter)); 1401 return (ret); 1402 } 1403 1404 static int 1405 unmdetach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1406 { 1407 unm_adapter *adapter = (unm_adapter *)ddi_get_driver_private(dip); 1408 1409 if (adapter == NULL) 1410 return (DDI_FAILURE); 1411 1412 switch (cmd) { 1413 case DDI_DETACH: 1414 1415 unm_fini_kstats(adapter); 1416 adapter->kstats[0] = NULL; 1417 1418 if (adapter->pci_cfg_handle != NULL) 1419 pci_config_teardown(&adapter->pci_cfg_handle); 1420 1421 unm_nd_cleanup(adapter); 1422 unm_nic_remove(adapter); 1423 return (DDI_SUCCESS); 1424 1425 case DDI_SUSPEND: 1426 return (unm_nic_suspend(adapter)); 1427 1428 default: 1429 break; 1430 } 1431 1432 return (DDI_FAILURE); 1433 } 1434 1435 #ifdef SOLARIS11 1436 DDI_DEFINE_STREAM_OPS(unm_ops, nulldev, nulldev, unmattach, unmdetach, 1437 nodev, NULL, D_MP, NULL, NULL); 1438 #else 1439 DDI_DEFINE_STREAM_OPS(unm_ops, nulldev, nulldev, unmattach, unmdetach, 1440 nodev, NULL, D_MP, NULL); 1441 #endif 1442 1443 static struct modldrv modldrv = { 1444 &mod_driverops, /* Type of module. This one is a driver */ 1445 ident, 1446 &unm_ops, /* driver ops */ 1447 }; 1448 1449 static struct modlinkage modlinkage = { 1450 MODREV_1, 1451 (&modldrv), 1452 NULL 1453 }; 1454 1455 1456 int 1457 _init(void) 1458 { 1459 int ret; 1460 1461 unm_ops.devo_cb_ops->cb_str = NULL; 1462 mac_init_ops(&unm_ops, "ntxn"); 1463 1464 ret = mod_install(&modlinkage); 1465 if (ret != DDI_SUCCESS) { 1466 mac_fini_ops(&unm_ops); 1467 cmn_err(CE_WARN, "ntxn: mod_install failed\n"); 1468 } 1469 1470 return (ret); 1471 } 1472 1473 1474 int 1475 _fini(void) 1476 { 1477 int ret; 1478 1479 ret = mod_remove(&modlinkage); 1480 if (ret == DDI_SUCCESS) 1481 mac_fini_ops(&unm_ops); 1482 return (ret); 1483 } 1484 1485 int 1486 _info(struct modinfo *modinfop) 1487 { 1488 return (mod_info(&modlinkage, modinfop)); 1489 } 1490