1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 30 */ 31 #include <sys/nxge/nxge_impl.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * PSARC/2007/453 MSI-X interrupt limit override 39 * (This PSARC case is limited to MSI-X vectors 40 * and SPARC platforms only). 41 */ 42 #if defined(_BIG_ENDIAN) 43 uint32_t nxge_msi_enable = 2; 44 #else 45 uint32_t nxge_msi_enable = 1; 46 #endif 47 48 /* 49 * Globals: tunable parameters (/etc/system or adb) 50 * 51 */ 52 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 53 uint32_t nxge_rbr_spare_size = 0; 54 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 55 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 56 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 57 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 58 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 59 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 60 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 61 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 62 boolean_t nxge_jumbo_enable = B_FALSE; 63 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 64 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 65 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 66 67 /* 68 * Debugging flags: 69 * nxge_no_tx_lb : transmit load balancing 70 * nxge_tx_lb_policy: 0 - TCP port (default) 71 * 3 - DEST MAC 72 */ 73 uint32_t nxge_no_tx_lb = 0; 74 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 75 76 /* 77 * Add tunable to reduce the amount of time spent in the 78 * ISR doing Rx Processing. 79 */ 80 uint32_t nxge_max_rx_pkts = 1024; 81 82 /* 83 * Tunables to manage the receive buffer blocks. 84 * 85 * nxge_rx_threshold_hi: copy all buffers. 86 * nxge_rx_bcopy_size_type: receive buffer block size type. 87 * nxge_rx_threshold_lo: copy only up to tunable block size type. 88 */ 89 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 90 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 91 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 92 93 rtrace_t npi_rtracebuf; 94 95 #if defined(sun4v) 96 /* 97 * Hypervisor N2/NIU services information. 98 */ 99 static hsvc_info_t niu_hsvc = { 100 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 101 NIU_MINOR_VER, "nxge" 102 }; 103 #endif 104 105 /* 106 * Function Prototypes 107 */ 108 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 109 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 110 static void nxge_unattach(p_nxge_t); 111 112 #if NXGE_PROPERTY 113 static void nxge_remove_hard_properties(p_nxge_t); 114 #endif 115 116 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 117 118 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 119 static void nxge_destroy_mutexes(p_nxge_t); 120 121 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 122 static void nxge_unmap_regs(p_nxge_t nxgep); 123 #ifdef NXGE_DEBUG 124 static void nxge_test_map_regs(p_nxge_t nxgep); 125 #endif 126 127 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 128 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 129 static void nxge_remove_intrs(p_nxge_t nxgep); 130 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 131 132 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 133 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 134 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 135 static void nxge_intrs_enable(p_nxge_t nxgep); 136 static void nxge_intrs_disable(p_nxge_t nxgep); 137 138 static void nxge_suspend(p_nxge_t); 139 static nxge_status_t nxge_resume(p_nxge_t); 140 141 static nxge_status_t nxge_setup_dev(p_nxge_t); 142 static void nxge_destroy_dev(p_nxge_t); 143 144 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 145 static void nxge_free_mem_pool(p_nxge_t); 146 147 static nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 148 static void nxge_free_rx_mem_pool(p_nxge_t); 149 150 static nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 151 static void nxge_free_tx_mem_pool(p_nxge_t); 152 153 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 154 struct ddi_dma_attr *, 155 size_t, ddi_device_acc_attr_t *, uint_t, 156 p_nxge_dma_common_t); 157 158 static void nxge_dma_mem_free(p_nxge_dma_common_t); 159 160 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 161 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 162 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 163 164 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 165 p_nxge_dma_common_t *, size_t); 166 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 167 168 static nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 169 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 170 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 171 172 static nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 173 p_nxge_dma_common_t *, 174 size_t); 175 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 176 177 static int nxge_init_common_dev(p_nxge_t); 178 static void nxge_uninit_common_dev(p_nxge_t); 179 180 /* 181 * The next declarations are for the GLDv3 interface. 182 */ 183 static int nxge_m_start(void *); 184 static void nxge_m_stop(void *); 185 static int nxge_m_unicst(void *, const uint8_t *); 186 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 187 static int nxge_m_promisc(void *, boolean_t); 188 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 189 static void nxge_m_resources(void *); 190 mblk_t *nxge_m_tx(void *arg, mblk_t *); 191 static nxge_status_t nxge_mac_register(p_nxge_t); 192 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 193 mac_addr_slot_t slot); 194 static void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 195 boolean_t factory); 196 static int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 197 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 198 static int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 199 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 200 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 201 202 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 203 #define MAX_DUMP_SZ 256 204 205 #define NXGE_M_CALLBACK_FLAGS (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB) 206 207 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 208 static mac_callbacks_t nxge_m_callbacks = { 209 NXGE_M_CALLBACK_FLAGS, 210 nxge_m_stat, 211 nxge_m_start, 212 nxge_m_stop, 213 nxge_m_promisc, 214 nxge_m_multicst, 215 nxge_m_unicst, 216 nxge_m_tx, 217 nxge_m_resources, 218 nxge_m_ioctl, 219 nxge_m_getcapab 220 }; 221 222 void 223 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 224 225 /* PSARC/2007/453 MSI-X interrupt limit override. */ 226 #define NXGE_MSIX_REQUEST_10G 8 227 #define NXGE_MSIX_REQUEST_1G 2 228 static int nxge_create_msi_property(p_nxge_t); 229 230 /* 231 * These global variables control the message 232 * output. 233 */ 234 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 235 uint64_t nxge_debug_level = 0; 236 237 /* 238 * This list contains the instance structures for the Neptune 239 * devices present in the system. The lock exists to guarantee 240 * mutually exclusive access to the list. 241 */ 242 void *nxge_list = NULL; 243 244 void *nxge_hw_list = NULL; 245 nxge_os_mutex_t nxge_common_lock; 246 247 nxge_os_mutex_t nxge_mii_lock; 248 static uint32_t nxge_mii_lock_init = 0; 249 nxge_os_mutex_t nxge_mdio_lock; 250 static uint32_t nxge_mdio_lock_init = 0; 251 252 extern uint64_t npi_debug_level; 253 254 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 255 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 256 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 257 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 258 extern void nxge_fm_init(p_nxge_t, 259 ddi_device_acc_attr_t *, 260 ddi_device_acc_attr_t *, 261 ddi_dma_attr_t *); 262 extern void nxge_fm_fini(p_nxge_t); 263 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 264 265 /* 266 * Count used to maintain the number of buffers being used 267 * by Neptune instances and loaned up to the upper layers. 268 */ 269 uint32_t nxge_mblks_pending = 0; 270 271 /* 272 * Device register access attributes for PIO. 273 */ 274 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 275 DDI_DEVICE_ATTR_V0, 276 DDI_STRUCTURE_LE_ACC, 277 DDI_STRICTORDER_ACC, 278 }; 279 280 /* 281 * Device descriptor access attributes for DMA. 282 */ 283 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 284 DDI_DEVICE_ATTR_V0, 285 DDI_STRUCTURE_LE_ACC, 286 DDI_STRICTORDER_ACC 287 }; 288 289 /* 290 * Device buffer access attributes for DMA. 291 */ 292 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 293 DDI_DEVICE_ATTR_V0, 294 DDI_STRUCTURE_BE_ACC, 295 DDI_STRICTORDER_ACC 296 }; 297 298 ddi_dma_attr_t nxge_desc_dma_attr = { 299 DMA_ATTR_V0, /* version number. */ 300 0, /* low address */ 301 0xffffffffffffffff, /* high address */ 302 0xffffffffffffffff, /* address counter max */ 303 #ifndef NIU_PA_WORKAROUND 304 0x100000, /* alignment */ 305 #else 306 0x2000, 307 #endif 308 0xfc00fc, /* dlim_burstsizes */ 309 0x1, /* minimum transfer size */ 310 0xffffffffffffffff, /* maximum transfer size */ 311 0xffffffffffffffff, /* maximum segment size */ 312 1, /* scatter/gather list length */ 313 (unsigned int) 1, /* granularity */ 314 0 /* attribute flags */ 315 }; 316 317 ddi_dma_attr_t nxge_tx_dma_attr = { 318 DMA_ATTR_V0, /* version number. */ 319 0, /* low address */ 320 0xffffffffffffffff, /* high address */ 321 0xffffffffffffffff, /* address counter max */ 322 #if defined(_BIG_ENDIAN) 323 0x2000, /* alignment */ 324 #else 325 0x1000, /* alignment */ 326 #endif 327 0xfc00fc, /* dlim_burstsizes */ 328 0x1, /* minimum transfer size */ 329 0xffffffffffffffff, /* maximum transfer size */ 330 0xffffffffffffffff, /* maximum segment size */ 331 5, /* scatter/gather list length */ 332 (unsigned int) 1, /* granularity */ 333 0 /* attribute flags */ 334 }; 335 336 ddi_dma_attr_t nxge_rx_dma_attr = { 337 DMA_ATTR_V0, /* version number. */ 338 0, /* low address */ 339 0xffffffffffffffff, /* high address */ 340 0xffffffffffffffff, /* address counter max */ 341 0x2000, /* alignment */ 342 0xfc00fc, /* dlim_burstsizes */ 343 0x1, /* minimum transfer size */ 344 0xffffffffffffffff, /* maximum transfer size */ 345 0xffffffffffffffff, /* maximum segment size */ 346 1, /* scatter/gather list length */ 347 (unsigned int) 1, /* granularity */ 348 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 349 }; 350 351 ddi_dma_lim_t nxge_dma_limits = { 352 (uint_t)0, /* dlim_addr_lo */ 353 (uint_t)0xffffffff, /* dlim_addr_hi */ 354 (uint_t)0xffffffff, /* dlim_cntr_max */ 355 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 356 0x1, /* dlim_minxfer */ 357 1024 /* dlim_speed */ 358 }; 359 360 dma_method_t nxge_force_dma = DVMA; 361 362 /* 363 * dma chunk sizes. 364 * 365 * Try to allocate the largest possible size 366 * so that fewer number of dma chunks would be managed 367 */ 368 #ifdef NIU_PA_WORKAROUND 369 size_t alloc_sizes [] = {0x2000}; 370 #else 371 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 372 0x10000, 0x20000, 0x40000, 0x80000, 373 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000}; 374 #endif 375 376 /* 377 * Translate "dev_t" to a pointer to the associated "dev_info_t". 378 */ 379 380 static int 381 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 382 { 383 p_nxge_t nxgep = NULL; 384 int instance; 385 int status = DDI_SUCCESS; 386 uint8_t portn; 387 nxge_mmac_t *mmac_info; 388 389 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 390 391 /* 392 * Get the device instance since we'll need to setup 393 * or retrieve a soft state for this instance. 394 */ 395 instance = ddi_get_instance(dip); 396 397 switch (cmd) { 398 case DDI_ATTACH: 399 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 400 break; 401 402 case DDI_RESUME: 403 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 404 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 405 if (nxgep == NULL) { 406 status = DDI_FAILURE; 407 break; 408 } 409 if (nxgep->dip != dip) { 410 status = DDI_FAILURE; 411 break; 412 } 413 if (nxgep->suspended == DDI_PM_SUSPEND) { 414 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 415 } else { 416 status = nxge_resume(nxgep); 417 } 418 goto nxge_attach_exit; 419 420 case DDI_PM_RESUME: 421 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 422 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 423 if (nxgep == NULL) { 424 status = DDI_FAILURE; 425 break; 426 } 427 if (nxgep->dip != dip) { 428 status = DDI_FAILURE; 429 break; 430 } 431 status = nxge_resume(nxgep); 432 goto nxge_attach_exit; 433 434 default: 435 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 436 status = DDI_FAILURE; 437 goto nxge_attach_exit; 438 } 439 440 441 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 442 status = DDI_FAILURE; 443 goto nxge_attach_exit; 444 } 445 446 nxgep = ddi_get_soft_state(nxge_list, instance); 447 if (nxgep == NULL) { 448 status = NXGE_ERROR; 449 goto nxge_attach_fail2; 450 } 451 452 nxgep->nxge_magic = NXGE_MAGIC; 453 454 nxgep->drv_state = 0; 455 nxgep->dip = dip; 456 nxgep->instance = instance; 457 nxgep->p_dip = ddi_get_parent(dip); 458 nxgep->nxge_debug_level = nxge_debug_level; 459 npi_debug_level = nxge_debug_level; 460 461 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_dev_desc_dma_acc_attr, 462 &nxge_rx_dma_attr); 463 464 status = nxge_map_regs(nxgep); 465 if (status != NXGE_OK) { 466 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 467 goto nxge_attach_fail3; 468 } 469 470 status = nxge_init_common_dev(nxgep); 471 if (status != NXGE_OK) { 472 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 473 "nxge_init_common_dev failed")); 474 goto nxge_attach_fail4; 475 } 476 477 if (nxgep->niu_type == NEPTUNE_2_10GF) { 478 if (nxgep->function_num > 1) { 479 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Unsupported" 480 " function %d. Only functions 0 and 1 are " 481 "supported for this card.", nxgep->function_num)); 482 status = NXGE_ERROR; 483 goto nxge_attach_fail4; 484 } 485 } 486 487 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 488 nxgep->mac.portnum = portn; 489 if ((portn == 0) || (portn == 1)) 490 nxgep->mac.porttype = PORT_TYPE_XMAC; 491 else 492 nxgep->mac.porttype = PORT_TYPE_BMAC; 493 /* 494 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 495 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 496 * The two types of MACs have different characterizations. 497 */ 498 mmac_info = &nxgep->nxge_mmac_info; 499 if (nxgep->function_num < 2) { 500 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 501 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 502 } else { 503 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 504 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 505 } 506 /* 507 * Setup the Ndd parameters for the this instance. 508 */ 509 nxge_init_param(nxgep); 510 511 /* 512 * Setup Register Tracing Buffer. 513 */ 514 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 515 516 /* init stats ptr */ 517 nxge_init_statsp(nxgep); 518 519 /* 520 * read the vpd info from the eeprom into local data 521 * structure and check for the VPD info validity 522 */ 523 nxge_vpd_info_get(nxgep); 524 525 status = nxge_xcvr_find(nxgep); 526 527 if (status != NXGE_OK) { 528 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 529 " Couldn't determine card type" 530 " .... exit ")); 531 goto nxge_attach_fail5; 532 } 533 534 status = nxge_get_config_properties(nxgep); 535 536 if (status != NXGE_OK) { 537 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "get_hw create failed")); 538 goto nxge_attach_fail; 539 } 540 541 /* 542 * Setup the Kstats for the driver. 543 */ 544 nxge_setup_kstats(nxgep); 545 546 nxge_setup_param(nxgep); 547 548 status = nxge_setup_system_dma_pages(nxgep); 549 if (status != NXGE_OK) { 550 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 551 goto nxge_attach_fail; 552 } 553 554 #if defined(sun4v) 555 if (nxgep->niu_type == N2_NIU) { 556 nxgep->niu_hsvc_available = B_FALSE; 557 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 558 if ((status = 559 hsvc_register(&nxgep->niu_hsvc, 560 &nxgep->niu_min_ver)) != 0) { 561 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 562 "nxge_attach: " 563 "%s: cannot negotiate " 564 "hypervisor services " 565 "revision %d " 566 "group: 0x%lx " 567 "major: 0x%lx minor: 0x%lx " 568 "errno: %d", 569 niu_hsvc.hsvc_modname, 570 niu_hsvc.hsvc_rev, 571 niu_hsvc.hsvc_group, 572 niu_hsvc.hsvc_major, 573 niu_hsvc.hsvc_minor, 574 status)); 575 status = DDI_FAILURE; 576 goto nxge_attach_fail; 577 } 578 579 nxgep->niu_hsvc_available = B_TRUE; 580 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 581 "NIU Hypervisor service enabled")); 582 } 583 #endif 584 585 nxge_hw_id_init(nxgep); 586 nxge_hw_init_niu_common(nxgep); 587 588 status = nxge_setup_mutexes(nxgep); 589 if (status != NXGE_OK) { 590 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 591 goto nxge_attach_fail; 592 } 593 594 status = nxge_setup_dev(nxgep); 595 if (status != DDI_SUCCESS) { 596 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 597 goto nxge_attach_fail; 598 } 599 600 status = nxge_add_intrs(nxgep); 601 if (status != DDI_SUCCESS) { 602 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 603 goto nxge_attach_fail; 604 } 605 status = nxge_add_soft_intrs(nxgep); 606 if (status != DDI_SUCCESS) { 607 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "add_soft_intr failed")); 608 goto nxge_attach_fail; 609 } 610 611 /* 612 * Enable interrupts. 613 */ 614 nxge_intrs_enable(nxgep); 615 616 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 617 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 618 "unable to register to mac layer (%d)", status)); 619 goto nxge_attach_fail; 620 } 621 622 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 623 624 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "registered to mac (instance %d)", 625 instance)); 626 627 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 628 629 goto nxge_attach_exit; 630 631 nxge_attach_fail: 632 nxge_unattach(nxgep); 633 goto nxge_attach_fail1; 634 635 nxge_attach_fail5: 636 /* 637 * Tear down the ndd parameters setup. 638 */ 639 nxge_destroy_param(nxgep); 640 641 /* 642 * Tear down the kstat setup. 643 */ 644 nxge_destroy_kstats(nxgep); 645 646 nxge_attach_fail4: 647 if (nxgep->nxge_hw_p) { 648 nxge_uninit_common_dev(nxgep); 649 nxgep->nxge_hw_p = NULL; 650 } 651 652 nxge_attach_fail3: 653 /* 654 * Unmap the register setup. 655 */ 656 nxge_unmap_regs(nxgep); 657 658 nxge_fm_fini(nxgep); 659 660 nxge_attach_fail2: 661 ddi_soft_state_free(nxge_list, nxgep->instance); 662 663 nxge_attach_fail1: 664 if (status != NXGE_OK) 665 status = (NXGE_ERROR | NXGE_DDI_FAILED); 666 nxgep = NULL; 667 668 nxge_attach_exit: 669 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 670 status)); 671 672 return (status); 673 } 674 675 static int 676 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 677 { 678 int status = DDI_SUCCESS; 679 int instance; 680 p_nxge_t nxgep = NULL; 681 682 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 683 instance = ddi_get_instance(dip); 684 nxgep = ddi_get_soft_state(nxge_list, instance); 685 if (nxgep == NULL) { 686 status = DDI_FAILURE; 687 goto nxge_detach_exit; 688 } 689 690 switch (cmd) { 691 case DDI_DETACH: 692 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 693 break; 694 695 case DDI_PM_SUSPEND: 696 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 697 nxgep->suspended = DDI_PM_SUSPEND; 698 nxge_suspend(nxgep); 699 break; 700 701 case DDI_SUSPEND: 702 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 703 if (nxgep->suspended != DDI_PM_SUSPEND) { 704 nxgep->suspended = DDI_SUSPEND; 705 nxge_suspend(nxgep); 706 } 707 break; 708 709 default: 710 status = DDI_FAILURE; 711 } 712 713 if (cmd != DDI_DETACH) 714 goto nxge_detach_exit; 715 716 /* 717 * Stop the xcvr polling. 718 */ 719 nxgep->suspended = cmd; 720 721 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 722 723 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 724 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 725 "<== nxge_detach status = 0x%08X", status)); 726 return (DDI_FAILURE); 727 } 728 729 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 730 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 731 732 nxge_unattach(nxgep); 733 nxgep = NULL; 734 735 nxge_detach_exit: 736 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 737 status)); 738 739 return (status); 740 } 741 742 static void 743 nxge_unattach(p_nxge_t nxgep) 744 { 745 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 746 747 if (nxgep == NULL || nxgep->dev_regs == NULL) { 748 return; 749 } 750 751 nxgep->nxge_magic = 0; 752 753 if (nxgep->nxge_hw_p) { 754 nxge_uninit_common_dev(nxgep); 755 nxgep->nxge_hw_p = NULL; 756 } 757 758 if (nxgep->nxge_timerid) { 759 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 760 nxgep->nxge_timerid = 0; 761 } 762 763 #if defined(sun4v) 764 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 765 (void) hsvc_unregister(&nxgep->niu_hsvc); 766 nxgep->niu_hsvc_available = B_FALSE; 767 } 768 #endif 769 /* 770 * Stop any further interrupts. 771 */ 772 nxge_remove_intrs(nxgep); 773 774 /* remove soft interrups */ 775 nxge_remove_soft_intrs(nxgep); 776 777 /* 778 * Stop the device and free resources. 779 */ 780 nxge_destroy_dev(nxgep); 781 782 /* 783 * Tear down the ndd parameters setup. 784 */ 785 nxge_destroy_param(nxgep); 786 787 /* 788 * Tear down the kstat setup. 789 */ 790 nxge_destroy_kstats(nxgep); 791 792 /* 793 * Destroy all mutexes. 794 */ 795 nxge_destroy_mutexes(nxgep); 796 797 /* 798 * Remove the list of ndd parameters which 799 * were setup during attach. 800 */ 801 if (nxgep->dip) { 802 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 803 " nxge_unattach: remove all properties")); 804 805 (void) ddi_prop_remove_all(nxgep->dip); 806 } 807 808 #if NXGE_PROPERTY 809 nxge_remove_hard_properties(nxgep); 810 #endif 811 812 /* 813 * Unmap the register setup. 814 */ 815 nxge_unmap_regs(nxgep); 816 817 nxge_fm_fini(nxgep); 818 819 ddi_soft_state_free(nxge_list, nxgep->instance); 820 821 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 822 } 823 824 static char n2_siu_name[] = "niu"; 825 826 static nxge_status_t 827 nxge_map_regs(p_nxge_t nxgep) 828 { 829 int ddi_status = DDI_SUCCESS; 830 p_dev_regs_t dev_regs; 831 char buf[MAXPATHLEN + 1]; 832 char *devname; 833 #ifdef NXGE_DEBUG 834 char *sysname; 835 #endif 836 off_t regsize; 837 nxge_status_t status = NXGE_OK; 838 #if !defined(_BIG_ENDIAN) 839 off_t pci_offset; 840 uint16_t pcie_devctl; 841 #endif 842 843 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 844 nxgep->dev_regs = NULL; 845 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 846 dev_regs->nxge_regh = NULL; 847 dev_regs->nxge_pciregh = NULL; 848 dev_regs->nxge_msix_regh = NULL; 849 dev_regs->nxge_vir_regh = NULL; 850 dev_regs->nxge_vir2_regh = NULL; 851 nxgep->niu_type = NIU_TYPE_NONE; 852 853 devname = ddi_pathname(nxgep->dip, buf); 854 ASSERT(strlen(devname) > 0); 855 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 856 "nxge_map_regs: pathname devname %s", devname)); 857 858 if (strstr(devname, n2_siu_name)) { 859 /* N2/NIU */ 860 nxgep->niu_type = N2_NIU; 861 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 862 "nxge_map_regs: N2/NIU devname %s", devname)); 863 /* get function number */ 864 nxgep->function_num = 865 (devname[strlen(devname) -1] == '1' ? 1 : 0); 866 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 867 "nxge_map_regs: N2/NIU function number %d", 868 nxgep->function_num)); 869 } else { 870 int *prop_val; 871 uint_t prop_len; 872 uint8_t func_num; 873 874 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 875 0, "reg", 876 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 877 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 878 "Reg property not found")); 879 ddi_status = DDI_FAILURE; 880 goto nxge_map_regs_fail0; 881 882 } else { 883 func_num = (prop_val[0] >> 8) & 0x7; 884 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 885 "Reg property found: fun # %d", 886 func_num)); 887 nxgep->function_num = func_num; 888 ddi_prop_free(prop_val); 889 } 890 } 891 892 switch (nxgep->niu_type) { 893 default: 894 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 895 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 896 "nxge_map_regs: pci config size 0x%x", regsize)); 897 898 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 899 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 900 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 901 if (ddi_status != DDI_SUCCESS) { 902 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 903 "ddi_map_regs, nxge bus config regs failed")); 904 goto nxge_map_regs_fail0; 905 } 906 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 907 "nxge_map_reg: PCI config addr 0x%0llx " 908 " handle 0x%0llx", dev_regs->nxge_pciregp, 909 dev_regs->nxge_pciregh)); 910 /* 911 * IMP IMP 912 * workaround for bit swapping bug in HW 913 * which ends up in no-snoop = yes 914 * resulting, in DMA not synched properly 915 */ 916 #if !defined(_BIG_ENDIAN) 917 /* workarounds for x86 systems */ 918 pci_offset = 0x80 + PCIE_DEVCTL; 919 pcie_devctl = 0x0; 920 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 921 pcie_devctl |= PCIE_DEVCTL_RO_EN; 922 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 923 pcie_devctl); 924 #endif 925 926 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 927 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 928 "nxge_map_regs: pio size 0x%x", regsize)); 929 /* set up the device mapped register */ 930 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 931 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 932 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 933 if (ddi_status != DDI_SUCCESS) { 934 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 935 "ddi_map_regs for Neptune global reg failed")); 936 goto nxge_map_regs_fail1; 937 } 938 939 /* set up the msi/msi-x mapped register */ 940 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 941 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 942 "nxge_map_regs: msix size 0x%x", regsize)); 943 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 944 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 945 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 946 if (ddi_status != DDI_SUCCESS) { 947 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 948 "ddi_map_regs for msi reg failed")); 949 goto nxge_map_regs_fail2; 950 } 951 952 /* set up the vio region mapped register */ 953 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 954 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 955 "nxge_map_regs: vio size 0x%x", regsize)); 956 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 957 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 958 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 959 960 if (ddi_status != DDI_SUCCESS) { 961 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 962 "ddi_map_regs for nxge vio reg failed")); 963 goto nxge_map_regs_fail3; 964 } 965 nxgep->dev_regs = dev_regs; 966 967 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 968 NPI_PCI_ADD_HANDLE_SET(nxgep, 969 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 970 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 971 NPI_MSI_ADD_HANDLE_SET(nxgep, 972 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 973 974 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 975 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 976 977 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 978 NPI_REG_ADD_HANDLE_SET(nxgep, 979 (npi_reg_ptr_t)dev_regs->nxge_regp); 980 981 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 982 NPI_VREG_ADD_HANDLE_SET(nxgep, 983 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 984 985 break; 986 987 case N2_NIU: 988 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 989 /* 990 * Set up the device mapped register (FWARC 2006/556) 991 * (changed back to 1: reg starts at 1!) 992 */ 993 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 994 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 995 "nxge_map_regs: dev size 0x%x", regsize)); 996 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 997 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 998 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 999 1000 if (ddi_status != DDI_SUCCESS) { 1001 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1002 "ddi_map_regs for N2/NIU, global reg failed ")); 1003 goto nxge_map_regs_fail1; 1004 } 1005 1006 /* set up the vio region mapped register */ 1007 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1008 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1009 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1010 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1011 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1012 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1013 1014 if (ddi_status != DDI_SUCCESS) { 1015 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1016 "ddi_map_regs for nxge vio reg failed")); 1017 goto nxge_map_regs_fail2; 1018 } 1019 /* set up the vio region mapped register */ 1020 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1021 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1022 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1023 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1024 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1025 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1026 1027 if (ddi_status != DDI_SUCCESS) { 1028 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1029 "ddi_map_regs for nxge vio2 reg failed")); 1030 goto nxge_map_regs_fail3; 1031 } 1032 nxgep->dev_regs = dev_regs; 1033 1034 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1035 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1036 1037 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1038 NPI_REG_ADD_HANDLE_SET(nxgep, 1039 (npi_reg_ptr_t)dev_regs->nxge_regp); 1040 1041 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1042 NPI_VREG_ADD_HANDLE_SET(nxgep, 1043 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1044 1045 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1046 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1047 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1048 1049 break; 1050 } 1051 1052 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1053 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1054 1055 goto nxge_map_regs_exit; 1056 nxge_map_regs_fail3: 1057 if (dev_regs->nxge_msix_regh) { 1058 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1059 } 1060 if (dev_regs->nxge_vir_regh) { 1061 ddi_regs_map_free(&dev_regs->nxge_regh); 1062 } 1063 nxge_map_regs_fail2: 1064 if (dev_regs->nxge_regh) { 1065 ddi_regs_map_free(&dev_regs->nxge_regh); 1066 } 1067 nxge_map_regs_fail1: 1068 if (dev_regs->nxge_pciregh) { 1069 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1070 } 1071 nxge_map_regs_fail0: 1072 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1073 kmem_free(dev_regs, sizeof (dev_regs_t)); 1074 1075 nxge_map_regs_exit: 1076 if (ddi_status != DDI_SUCCESS) 1077 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1078 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1079 return (status); 1080 } 1081 1082 static void 1083 nxge_unmap_regs(p_nxge_t nxgep) 1084 { 1085 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1086 if (nxgep->dev_regs) { 1087 if (nxgep->dev_regs->nxge_pciregh) { 1088 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1089 "==> nxge_unmap_regs: bus")); 1090 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1091 nxgep->dev_regs->nxge_pciregh = NULL; 1092 } 1093 if (nxgep->dev_regs->nxge_regh) { 1094 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1095 "==> nxge_unmap_regs: device registers")); 1096 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1097 nxgep->dev_regs->nxge_regh = NULL; 1098 } 1099 if (nxgep->dev_regs->nxge_msix_regh) { 1100 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1101 "==> nxge_unmap_regs: device interrupts")); 1102 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1103 nxgep->dev_regs->nxge_msix_regh = NULL; 1104 } 1105 if (nxgep->dev_regs->nxge_vir_regh) { 1106 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1107 "==> nxge_unmap_regs: vio region")); 1108 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1109 nxgep->dev_regs->nxge_vir_regh = NULL; 1110 } 1111 if (nxgep->dev_regs->nxge_vir2_regh) { 1112 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1113 "==> nxge_unmap_regs: vio2 region")); 1114 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1115 nxgep->dev_regs->nxge_vir2_regh = NULL; 1116 } 1117 1118 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1119 nxgep->dev_regs = NULL; 1120 } 1121 1122 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1123 } 1124 1125 static nxge_status_t 1126 nxge_setup_mutexes(p_nxge_t nxgep) 1127 { 1128 int ddi_status = DDI_SUCCESS; 1129 nxge_status_t status = NXGE_OK; 1130 nxge_classify_t *classify_ptr; 1131 int partition; 1132 1133 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1134 1135 /* 1136 * Get the interrupt cookie so the mutexes can be 1137 * Initialized. 1138 */ 1139 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1140 &nxgep->interrupt_cookie); 1141 if (ddi_status != DDI_SUCCESS) { 1142 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1143 "<== nxge_setup_mutexes: failed 0x%x", ddi_status)); 1144 goto nxge_setup_mutexes_exit; 1145 } 1146 1147 /* Initialize global mutex */ 1148 1149 if (nxge_mdio_lock_init == 0) { 1150 MUTEX_INIT(&nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 1151 } 1152 atomic_add_32(&nxge_mdio_lock_init, 1); 1153 1154 if (nxge_mii_lock_init == 0) { 1155 MUTEX_INIT(&nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 1156 } 1157 atomic_add_32(&nxge_mii_lock_init, 1); 1158 1159 nxgep->drv_state |= STATE_MDIO_LOCK_INIT; 1160 nxgep->drv_state |= STATE_MII_LOCK_INIT; 1161 1162 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1163 MUTEX_INIT(&nxgep->poll_lock, NULL, 1164 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1165 1166 /* 1167 * Initialize mutexes for this device. 1168 */ 1169 MUTEX_INIT(nxgep->genlock, NULL, 1170 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1171 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1172 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1173 MUTEX_INIT(&nxgep->mif_lock, NULL, 1174 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1175 RW_INIT(&nxgep->filter_lock, NULL, 1176 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1177 1178 classify_ptr = &nxgep->classifier; 1179 /* 1180 * FFLP Mutexes are never used in interrupt context 1181 * as fflp operation can take very long time to 1182 * complete and hence not suitable to invoke from interrupt 1183 * handlers. 1184 */ 1185 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1186 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1187 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1188 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1189 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1190 for (partition = 0; partition < MAX_PARTITION; partition++) { 1191 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1192 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1193 } 1194 } 1195 1196 nxge_setup_mutexes_exit: 1197 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1198 "<== nxge_setup_mutexes status = %x", status)); 1199 1200 if (ddi_status != DDI_SUCCESS) 1201 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1202 1203 return (status); 1204 } 1205 1206 static void 1207 nxge_destroy_mutexes(p_nxge_t nxgep) 1208 { 1209 int partition; 1210 nxge_classify_t *classify_ptr; 1211 1212 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1213 RW_DESTROY(&nxgep->filter_lock); 1214 MUTEX_DESTROY(&nxgep->mif_lock); 1215 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1216 MUTEX_DESTROY(nxgep->genlock); 1217 1218 classify_ptr = &nxgep->classifier; 1219 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1220 1221 /* Destroy all polling resources. */ 1222 MUTEX_DESTROY(&nxgep->poll_lock); 1223 cv_destroy(&nxgep->poll_cv); 1224 1225 /* free data structures, based on HW type */ 1226 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1227 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1228 for (partition = 0; partition < MAX_PARTITION; partition++) { 1229 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1230 } 1231 } 1232 if (nxgep->drv_state & STATE_MDIO_LOCK_INIT) { 1233 if (nxge_mdio_lock_init == 1) { 1234 MUTEX_DESTROY(&nxge_mdio_lock); 1235 } 1236 atomic_add_32(&nxge_mdio_lock_init, -1); 1237 } 1238 if (nxgep->drv_state & STATE_MII_LOCK_INIT) { 1239 if (nxge_mii_lock_init == 1) { 1240 MUTEX_DESTROY(&nxge_mii_lock); 1241 } 1242 atomic_add_32(&nxge_mii_lock_init, -1); 1243 } 1244 1245 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1246 } 1247 1248 nxge_status_t 1249 nxge_init(p_nxge_t nxgep) 1250 { 1251 nxge_status_t status = NXGE_OK; 1252 1253 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1254 1255 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1256 return (status); 1257 } 1258 1259 /* 1260 * Allocate system memory for the receive/transmit buffer blocks 1261 * and receive/transmit descriptor rings. 1262 */ 1263 status = nxge_alloc_mem_pool(nxgep); 1264 if (status != NXGE_OK) { 1265 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1266 goto nxge_init_fail1; 1267 } 1268 1269 /* 1270 * Initialize and enable TXC registers 1271 * (Globally enable TX controller, 1272 * enable a port, configure dma channel bitmap, 1273 * configure the max burst size). 1274 */ 1275 status = nxge_txc_init(nxgep); 1276 if (status != NXGE_OK) { 1277 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txc failed\n")); 1278 goto nxge_init_fail2; 1279 } 1280 1281 /* 1282 * Initialize and enable TXDMA channels. 1283 */ 1284 status = nxge_init_txdma_channels(nxgep); 1285 if (status != NXGE_OK) { 1286 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1287 goto nxge_init_fail3; 1288 } 1289 1290 /* 1291 * Initialize and enable RXDMA channels. 1292 */ 1293 status = nxge_init_rxdma_channels(nxgep); 1294 if (status != NXGE_OK) { 1295 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1296 goto nxge_init_fail4; 1297 } 1298 1299 /* 1300 * Initialize TCAM and FCRAM (Neptune). 1301 */ 1302 status = nxge_classify_init(nxgep); 1303 if (status != NXGE_OK) { 1304 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1305 goto nxge_init_fail5; 1306 } 1307 1308 /* 1309 * Initialize ZCP 1310 */ 1311 status = nxge_zcp_init(nxgep); 1312 if (status != NXGE_OK) { 1313 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1314 goto nxge_init_fail5; 1315 } 1316 1317 /* 1318 * Initialize IPP. 1319 */ 1320 status = nxge_ipp_init(nxgep); 1321 if (status != NXGE_OK) { 1322 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1323 goto nxge_init_fail5; 1324 } 1325 1326 /* 1327 * Initialize the MAC block. 1328 */ 1329 status = nxge_mac_init(nxgep); 1330 if (status != NXGE_OK) { 1331 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1332 goto nxge_init_fail5; 1333 } 1334 1335 nxge_intrs_enable(nxgep); 1336 1337 /* 1338 * Enable hardware interrupts. 1339 */ 1340 nxge_intr_hw_enable(nxgep); 1341 nxgep->drv_state |= STATE_HW_INITIALIZED; 1342 1343 goto nxge_init_exit; 1344 1345 nxge_init_fail5: 1346 nxge_uninit_rxdma_channels(nxgep); 1347 nxge_init_fail4: 1348 nxge_uninit_txdma_channels(nxgep); 1349 nxge_init_fail3: 1350 (void) nxge_txc_uninit(nxgep); 1351 nxge_init_fail2: 1352 nxge_free_mem_pool(nxgep); 1353 nxge_init_fail1: 1354 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1355 "<== nxge_init status (failed) = 0x%08x", status)); 1356 return (status); 1357 1358 nxge_init_exit: 1359 1360 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1361 status)); 1362 return (status); 1363 } 1364 1365 1366 timeout_id_t 1367 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1368 { 1369 if ((nxgep->suspended == 0) || 1370 (nxgep->suspended == DDI_RESUME)) { 1371 return (timeout(func, (caddr_t)nxgep, 1372 drv_usectohz(1000 * msec))); 1373 } 1374 return (NULL); 1375 } 1376 1377 /*ARGSUSED*/ 1378 void 1379 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1380 { 1381 if (timerid) { 1382 (void) untimeout(timerid); 1383 } 1384 } 1385 1386 void 1387 nxge_uninit(p_nxge_t nxgep) 1388 { 1389 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1390 1391 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1392 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1393 "==> nxge_uninit: not initialized")); 1394 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1395 "<== nxge_uninit")); 1396 return; 1397 } 1398 1399 /* stop timer */ 1400 if (nxgep->nxge_timerid) { 1401 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1402 nxgep->nxge_timerid = 0; 1403 } 1404 1405 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1406 (void) nxge_intr_hw_disable(nxgep); 1407 1408 /* 1409 * Reset the receive MAC side. 1410 */ 1411 (void) nxge_rx_mac_disable(nxgep); 1412 1413 /* Disable and soft reset the IPP */ 1414 (void) nxge_ipp_disable(nxgep); 1415 1416 /* Free classification resources */ 1417 (void) nxge_classify_uninit(nxgep); 1418 1419 /* 1420 * Reset the transmit/receive DMA side. 1421 */ 1422 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1423 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1424 1425 nxge_uninit_txdma_channels(nxgep); 1426 nxge_uninit_rxdma_channels(nxgep); 1427 1428 /* 1429 * Reset the transmit MAC side. 1430 */ 1431 (void) nxge_tx_mac_disable(nxgep); 1432 1433 nxge_free_mem_pool(nxgep); 1434 1435 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1436 1437 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1438 1439 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1440 "nxge_mblks_pending %d", nxge_mblks_pending)); 1441 } 1442 1443 void 1444 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1445 { 1446 #if defined(__i386) 1447 size_t reg; 1448 #else 1449 uint64_t reg; 1450 #endif 1451 uint64_t regdata; 1452 int i, retry; 1453 1454 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1455 regdata = 0; 1456 retry = 1; 1457 1458 for (i = 0; i < retry; i++) { 1459 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1460 } 1461 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1462 } 1463 1464 void 1465 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1466 { 1467 #if defined(__i386) 1468 size_t reg; 1469 #else 1470 uint64_t reg; 1471 #endif 1472 uint64_t buf[2]; 1473 1474 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1475 #if defined(__i386) 1476 reg = (size_t)buf[0]; 1477 #else 1478 reg = buf[0]; 1479 #endif 1480 1481 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1482 } 1483 1484 1485 nxge_os_mutex_t nxgedebuglock; 1486 int nxge_debug_init = 0; 1487 1488 /*ARGSUSED*/ 1489 /*VARARGS*/ 1490 void 1491 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1492 { 1493 char msg_buffer[1048]; 1494 char prefix_buffer[32]; 1495 int instance; 1496 uint64_t debug_level; 1497 int cmn_level = CE_CONT; 1498 va_list ap; 1499 1500 debug_level = (nxgep == NULL) ? nxge_debug_level : 1501 nxgep->nxge_debug_level; 1502 1503 if ((level & debug_level) || 1504 (level == NXGE_NOTE) || 1505 (level == NXGE_ERR_CTL)) { 1506 /* do the msg processing */ 1507 if (nxge_debug_init == 0) { 1508 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1509 nxge_debug_init = 1; 1510 } 1511 1512 MUTEX_ENTER(&nxgedebuglock); 1513 1514 if ((level & NXGE_NOTE)) { 1515 cmn_level = CE_NOTE; 1516 } 1517 1518 if (level & NXGE_ERR_CTL) { 1519 cmn_level = CE_WARN; 1520 } 1521 1522 va_start(ap, fmt); 1523 (void) vsprintf(msg_buffer, fmt, ap); 1524 va_end(ap); 1525 if (nxgep == NULL) { 1526 instance = -1; 1527 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1528 } else { 1529 instance = nxgep->instance; 1530 (void) sprintf(prefix_buffer, 1531 "%s%d :", "nxge", instance); 1532 } 1533 1534 MUTEX_EXIT(&nxgedebuglock); 1535 cmn_err(cmn_level, "!%s %s\n", 1536 prefix_buffer, msg_buffer); 1537 1538 } 1539 } 1540 1541 char * 1542 nxge_dump_packet(char *addr, int size) 1543 { 1544 uchar_t *ap = (uchar_t *)addr; 1545 int i; 1546 static char etherbuf[1024]; 1547 char *cp = etherbuf; 1548 char digits[] = "0123456789abcdef"; 1549 1550 if (!size) 1551 size = 60; 1552 1553 if (size > MAX_DUMP_SZ) { 1554 /* Dump the leading bytes */ 1555 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1556 if (*ap > 0x0f) 1557 *cp++ = digits[*ap >> 4]; 1558 *cp++ = digits[*ap++ & 0xf]; 1559 *cp++ = ':'; 1560 } 1561 for (i = 0; i < 20; i++) 1562 *cp++ = '.'; 1563 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1564 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1565 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1566 if (*ap > 0x0f) 1567 *cp++ = digits[*ap >> 4]; 1568 *cp++ = digits[*ap++ & 0xf]; 1569 *cp++ = ':'; 1570 } 1571 } else { 1572 for (i = 0; i < size; i++) { 1573 if (*ap > 0x0f) 1574 *cp++ = digits[*ap >> 4]; 1575 *cp++ = digits[*ap++ & 0xf]; 1576 *cp++ = ':'; 1577 } 1578 } 1579 *--cp = 0; 1580 return (etherbuf); 1581 } 1582 1583 #ifdef NXGE_DEBUG 1584 static void 1585 nxge_test_map_regs(p_nxge_t nxgep) 1586 { 1587 ddi_acc_handle_t cfg_handle; 1588 p_pci_cfg_t cfg_ptr; 1589 ddi_acc_handle_t dev_handle; 1590 char *dev_ptr; 1591 ddi_acc_handle_t pci_config_handle; 1592 uint32_t regval; 1593 int i; 1594 1595 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1596 1597 dev_handle = nxgep->dev_regs->nxge_regh; 1598 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1599 1600 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1601 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1602 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1603 1604 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1605 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1606 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1607 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1608 &cfg_ptr->vendorid)); 1609 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1610 "\tvendorid 0x%x devid 0x%x", 1611 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1612 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1613 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1614 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1615 "bar1c 0x%x", 1616 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1617 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1618 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1619 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1620 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1621 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1622 "base 28 0x%x bar2c 0x%x\n", 1623 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1624 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1625 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1626 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1627 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1628 "\nNeptune PCI BAR: base30 0x%x\n", 1629 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1630 1631 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1632 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1633 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1634 "first 0x%llx second 0x%llx third 0x%llx " 1635 "last 0x%llx ", 1636 NXGE_PIO_READ64(dev_handle, 1637 (uint64_t *)(dev_ptr + 0), 0), 1638 NXGE_PIO_READ64(dev_handle, 1639 (uint64_t *)(dev_ptr + 8), 0), 1640 NXGE_PIO_READ64(dev_handle, 1641 (uint64_t *)(dev_ptr + 16), 0), 1642 NXGE_PIO_READ64(cfg_handle, 1643 (uint64_t *)(dev_ptr + 24), 0))); 1644 } 1645 } 1646 1647 #endif 1648 1649 static void 1650 nxge_suspend(p_nxge_t nxgep) 1651 { 1652 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1653 1654 nxge_intrs_disable(nxgep); 1655 nxge_destroy_dev(nxgep); 1656 1657 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1658 } 1659 1660 static nxge_status_t 1661 nxge_resume(p_nxge_t nxgep) 1662 { 1663 nxge_status_t status = NXGE_OK; 1664 1665 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1666 1667 nxgep->suspended = DDI_RESUME; 1668 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1669 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1670 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1671 (void) nxge_rx_mac_enable(nxgep); 1672 (void) nxge_tx_mac_enable(nxgep); 1673 nxge_intrs_enable(nxgep); 1674 nxgep->suspended = 0; 1675 1676 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1677 "<== nxge_resume status = 0x%x", status)); 1678 return (status); 1679 } 1680 1681 static nxge_status_t 1682 nxge_setup_dev(p_nxge_t nxgep) 1683 { 1684 nxge_status_t status = NXGE_OK; 1685 1686 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1687 nxgep->mac.portnum)); 1688 1689 status = nxge_link_init(nxgep); 1690 1691 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1692 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1693 "port%d Bad register acc handle", nxgep->mac.portnum)); 1694 status = NXGE_ERROR; 1695 } 1696 1697 if (status != NXGE_OK) { 1698 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1699 " nxge_setup_dev status " 1700 "(xcvr init 0x%08x)", status)); 1701 goto nxge_setup_dev_exit; 1702 } 1703 1704 nxge_setup_dev_exit: 1705 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1706 "<== nxge_setup_dev port %d status = 0x%08x", 1707 nxgep->mac.portnum, status)); 1708 1709 return (status); 1710 } 1711 1712 static void 1713 nxge_destroy_dev(p_nxge_t nxgep) 1714 { 1715 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 1716 1717 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1718 1719 (void) nxge_hw_stop(nxgep); 1720 1721 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 1722 } 1723 1724 static nxge_status_t 1725 nxge_setup_system_dma_pages(p_nxge_t nxgep) 1726 { 1727 int ddi_status = DDI_SUCCESS; 1728 uint_t count; 1729 ddi_dma_cookie_t cookie; 1730 uint_t iommu_pagesize; 1731 nxge_status_t status = NXGE_OK; 1732 1733 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 1734 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 1735 if (nxgep->niu_type != N2_NIU) { 1736 iommu_pagesize = dvma_pagesize(nxgep->dip); 1737 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1738 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1739 " default_block_size %d iommu_pagesize %d", 1740 nxgep->sys_page_sz, 1741 ddi_ptob(nxgep->dip, (ulong_t)1), 1742 nxgep->rx_default_block_size, 1743 iommu_pagesize)); 1744 1745 if (iommu_pagesize != 0) { 1746 if (nxgep->sys_page_sz == iommu_pagesize) { 1747 if (iommu_pagesize > 0x4000) 1748 nxgep->sys_page_sz = 0x4000; 1749 } else { 1750 if (nxgep->sys_page_sz > iommu_pagesize) 1751 nxgep->sys_page_sz = iommu_pagesize; 1752 } 1753 } 1754 } 1755 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1756 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1757 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1758 "default_block_size %d page mask %d", 1759 nxgep->sys_page_sz, 1760 ddi_ptob(nxgep->dip, (ulong_t)1), 1761 nxgep->rx_default_block_size, 1762 nxgep->sys_page_mask)); 1763 1764 1765 switch (nxgep->sys_page_sz) { 1766 default: 1767 nxgep->sys_page_sz = 0x1000; 1768 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1769 nxgep->rx_default_block_size = 0x1000; 1770 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1771 break; 1772 case 0x1000: 1773 nxgep->rx_default_block_size = 0x1000; 1774 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1775 break; 1776 case 0x2000: 1777 nxgep->rx_default_block_size = 0x2000; 1778 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1779 break; 1780 case 0x4000: 1781 nxgep->rx_default_block_size = 0x4000; 1782 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 1783 break; 1784 case 0x8000: 1785 nxgep->rx_default_block_size = 0x8000; 1786 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 1787 break; 1788 } 1789 1790 #ifndef USE_RX_BIG_BUF 1791 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 1792 #else 1793 nxgep->rx_default_block_size = 0x2000; 1794 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1795 #endif 1796 /* 1797 * Get the system DMA burst size. 1798 */ 1799 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 1800 DDI_DMA_DONTWAIT, 0, 1801 &nxgep->dmasparehandle); 1802 if (ddi_status != DDI_SUCCESS) { 1803 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1804 "ddi_dma_alloc_handle: failed " 1805 " status 0x%x", ddi_status)); 1806 goto nxge_get_soft_properties_exit; 1807 } 1808 1809 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 1810 (caddr_t)nxgep->dmasparehandle, 1811 sizeof (nxgep->dmasparehandle), 1812 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1813 DDI_DMA_DONTWAIT, 0, 1814 &cookie, &count); 1815 if (ddi_status != DDI_DMA_MAPPED) { 1816 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1817 "Binding spare handle to find system" 1818 " burstsize failed.")); 1819 ddi_status = DDI_FAILURE; 1820 goto nxge_get_soft_properties_fail1; 1821 } 1822 1823 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 1824 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 1825 1826 nxge_get_soft_properties_fail1: 1827 ddi_dma_free_handle(&nxgep->dmasparehandle); 1828 1829 nxge_get_soft_properties_exit: 1830 1831 if (ddi_status != DDI_SUCCESS) 1832 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1833 1834 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1835 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 1836 return (status); 1837 } 1838 1839 static nxge_status_t 1840 nxge_alloc_mem_pool(p_nxge_t nxgep) 1841 { 1842 nxge_status_t status = NXGE_OK; 1843 1844 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 1845 1846 status = nxge_alloc_rx_mem_pool(nxgep); 1847 if (status != NXGE_OK) { 1848 return (NXGE_ERROR); 1849 } 1850 1851 status = nxge_alloc_tx_mem_pool(nxgep); 1852 if (status != NXGE_OK) { 1853 nxge_free_rx_mem_pool(nxgep); 1854 return (NXGE_ERROR); 1855 } 1856 1857 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 1858 return (NXGE_OK); 1859 } 1860 1861 static void 1862 nxge_free_mem_pool(p_nxge_t nxgep) 1863 { 1864 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 1865 1866 nxge_free_rx_mem_pool(nxgep); 1867 nxge_free_tx_mem_pool(nxgep); 1868 1869 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 1870 } 1871 1872 static nxge_status_t 1873 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 1874 { 1875 int i, j; 1876 uint32_t ndmas, st_rdc; 1877 p_nxge_dma_pt_cfg_t p_all_cfgp; 1878 p_nxge_hw_pt_cfg_t p_cfgp; 1879 p_nxge_dma_pool_t dma_poolp; 1880 p_nxge_dma_common_t *dma_buf_p; 1881 p_nxge_dma_pool_t dma_cntl_poolp; 1882 p_nxge_dma_common_t *dma_cntl_p; 1883 size_t rx_buf_alloc_size; 1884 size_t rx_cntl_alloc_size; 1885 uint32_t *num_chunks; /* per dma */ 1886 nxge_status_t status = NXGE_OK; 1887 1888 uint32_t nxge_port_rbr_size; 1889 uint32_t nxge_port_rbr_spare_size; 1890 uint32_t nxge_port_rcr_size; 1891 1892 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 1893 1894 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 1895 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1896 st_rdc = p_cfgp->start_rdc; 1897 ndmas = p_cfgp->max_rdcs; 1898 1899 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1900 " nxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas)); 1901 1902 /* 1903 * Allocate memory for each receive DMA channel. 1904 */ 1905 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 1906 KM_SLEEP); 1907 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1908 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1909 1910 dma_cntl_poolp = (p_nxge_dma_pool_t) 1911 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 1912 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1913 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1914 1915 num_chunks = (uint32_t *)KMEM_ZALLOC( 1916 sizeof (uint32_t) * ndmas, KM_SLEEP); 1917 1918 /* 1919 * Assume that each DMA channel will be configured with default 1920 * block size. 1921 * rbr block counts are mod of batch count (16). 1922 */ 1923 nxge_port_rbr_size = p_all_cfgp->rbr_size; 1924 nxge_port_rcr_size = p_all_cfgp->rcr_size; 1925 1926 if (!nxge_port_rbr_size) { 1927 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 1928 } 1929 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 1930 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 1931 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 1932 } 1933 1934 p_all_cfgp->rbr_size = nxge_port_rbr_size; 1935 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 1936 1937 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 1938 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 1939 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 1940 } 1941 1942 /* 1943 * N2/NIU has limitation on the descriptor sizes (contiguous 1944 * memory allocation on data buffers to 4M (contig_mem_alloc) 1945 * and little endian for control buffers (must use the ddi/dki mem alloc 1946 * function). 1947 */ 1948 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1949 if (nxgep->niu_type == N2_NIU) { 1950 nxge_port_rbr_spare_size = 0; 1951 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 1952 (!ISP2(nxge_port_rbr_size))) { 1953 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 1954 } 1955 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 1956 (!ISP2(nxge_port_rcr_size))) { 1957 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 1958 } 1959 } 1960 #endif 1961 1962 rx_buf_alloc_size = (nxgep->rx_default_block_size * 1963 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 1964 1965 /* 1966 * Addresses of receive block ring, receive completion ring and the 1967 * mailbox must be all cache-aligned (64 bytes). 1968 */ 1969 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 1970 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 1971 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 1972 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 1973 1974 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 1975 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 1976 "nxge_port_rcr_size = %d " 1977 "rx_cntl_alloc_size = %d", 1978 nxge_port_rbr_size, nxge_port_rbr_spare_size, 1979 nxge_port_rcr_size, 1980 rx_cntl_alloc_size)); 1981 1982 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1983 if (nxgep->niu_type == N2_NIU) { 1984 if (!ISP2(rx_buf_alloc_size)) { 1985 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1986 "==> nxge_alloc_rx_mem_pool: " 1987 " must be power of 2")); 1988 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1989 goto nxge_alloc_rx_mem_pool_exit; 1990 } 1991 1992 if (rx_buf_alloc_size > (1 << 22)) { 1993 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1994 "==> nxge_alloc_rx_mem_pool: " 1995 " limit size to 4M")); 1996 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1997 goto nxge_alloc_rx_mem_pool_exit; 1998 } 1999 2000 if (rx_cntl_alloc_size < 0x2000) { 2001 rx_cntl_alloc_size = 0x2000; 2002 } 2003 } 2004 #endif 2005 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2006 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2007 2008 /* 2009 * Allocate memory for receive buffers and descriptor rings. 2010 * Replace allocation functions with interface functions provided 2011 * by the partition manager when it is available. 2012 */ 2013 /* 2014 * Allocate memory for the receive buffer blocks. 2015 */ 2016 for (i = 0; i < ndmas; i++) { 2017 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2018 " nxge_alloc_rx_mem_pool to alloc mem: " 2019 " dma %d dma_buf_p %llx &dma_buf_p %llx", 2020 i, dma_buf_p[i], &dma_buf_p[i])); 2021 num_chunks[i] = 0; 2022 status = nxge_alloc_rx_buf_dma(nxgep, st_rdc, &dma_buf_p[i], 2023 rx_buf_alloc_size, 2024 nxgep->rx_default_block_size, &num_chunks[i]); 2025 if (status != NXGE_OK) { 2026 break; 2027 } 2028 st_rdc++; 2029 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2030 " nxge_alloc_rx_mem_pool DONE alloc mem: " 2031 "dma %d dma_buf_p %llx &dma_buf_p %llx", i, 2032 dma_buf_p[i], &dma_buf_p[i])); 2033 } 2034 if (i < ndmas) { 2035 goto nxge_alloc_rx_mem_fail1; 2036 } 2037 /* 2038 * Allocate memory for descriptor rings and mailbox. 2039 */ 2040 st_rdc = p_cfgp->start_rdc; 2041 for (j = 0; j < ndmas; j++) { 2042 status = nxge_alloc_rx_cntl_dma(nxgep, st_rdc, &dma_cntl_p[j], 2043 rx_cntl_alloc_size); 2044 if (status != NXGE_OK) { 2045 break; 2046 } 2047 st_rdc++; 2048 } 2049 if (j < ndmas) { 2050 goto nxge_alloc_rx_mem_fail2; 2051 } 2052 2053 dma_poolp->ndmas = ndmas; 2054 dma_poolp->num_chunks = num_chunks; 2055 dma_poolp->buf_allocated = B_TRUE; 2056 nxgep->rx_buf_pool_p = dma_poolp; 2057 dma_poolp->dma_buf_pool_p = dma_buf_p; 2058 2059 dma_cntl_poolp->ndmas = ndmas; 2060 dma_cntl_poolp->buf_allocated = B_TRUE; 2061 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2062 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2063 2064 goto nxge_alloc_rx_mem_pool_exit; 2065 2066 nxge_alloc_rx_mem_fail2: 2067 /* Free control buffers */ 2068 j--; 2069 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2070 "==> nxge_alloc_rx_mem_pool: freeing control bufs (%d)", j)); 2071 for (; j >= 0; j--) { 2072 nxge_free_rx_cntl_dma(nxgep, 2073 (p_nxge_dma_common_t)dma_cntl_p[j]); 2074 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2075 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", 2076 j)); 2077 } 2078 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2079 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 2080 2081 nxge_alloc_rx_mem_fail1: 2082 /* Free data buffers */ 2083 i--; 2084 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2085 "==> nxge_alloc_rx_mem_pool: freeing data bufs (%d)", i)); 2086 for (; i >= 0; i--) { 2087 nxge_free_rx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2088 num_chunks[i]); 2089 } 2090 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2091 "==> nxge_alloc_rx_mem_pool: data bufs freed (%d)", i)); 2092 2093 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2094 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2095 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2096 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2097 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2098 2099 nxge_alloc_rx_mem_pool_exit: 2100 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2101 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2102 2103 return (status); 2104 } 2105 2106 static void 2107 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2108 { 2109 uint32_t i, ndmas; 2110 p_nxge_dma_pool_t dma_poolp; 2111 p_nxge_dma_common_t *dma_buf_p; 2112 p_nxge_dma_pool_t dma_cntl_poolp; 2113 p_nxge_dma_common_t *dma_cntl_p; 2114 uint32_t *num_chunks; 2115 2116 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2117 2118 dma_poolp = nxgep->rx_buf_pool_p; 2119 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2120 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2121 "<== nxge_free_rx_mem_pool " 2122 "(null rx buf pool or buf not allocated")); 2123 return; 2124 } 2125 2126 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2127 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2128 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2129 "<== nxge_free_rx_mem_pool " 2130 "(null rx cntl buf pool or cntl buf not allocated")); 2131 return; 2132 } 2133 2134 dma_buf_p = dma_poolp->dma_buf_pool_p; 2135 num_chunks = dma_poolp->num_chunks; 2136 2137 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2138 ndmas = dma_cntl_poolp->ndmas; 2139 2140 for (i = 0; i < ndmas; i++) { 2141 nxge_free_rx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2142 } 2143 2144 for (i = 0; i < ndmas; i++) { 2145 nxge_free_rx_cntl_dma(nxgep, dma_cntl_p[i]); 2146 } 2147 2148 for (i = 0; i < ndmas; i++) { 2149 KMEM_FREE(dma_buf_p[i], 2150 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2151 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2152 } 2153 2154 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2155 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2156 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2157 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2158 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2159 2160 nxgep->rx_buf_pool_p = NULL; 2161 nxgep->rx_cntl_pool_p = NULL; 2162 2163 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2164 } 2165 2166 2167 static nxge_status_t 2168 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2169 p_nxge_dma_common_t *dmap, 2170 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2171 { 2172 p_nxge_dma_common_t rx_dmap; 2173 nxge_status_t status = NXGE_OK; 2174 size_t total_alloc_size; 2175 size_t allocated = 0; 2176 int i, size_index, array_size; 2177 2178 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2179 2180 rx_dmap = (p_nxge_dma_common_t) 2181 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2182 KM_SLEEP); 2183 2184 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2185 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2186 dma_channel, alloc_size, block_size, dmap)); 2187 2188 total_alloc_size = alloc_size; 2189 2190 #if defined(RX_USE_RECLAIM_POST) 2191 total_alloc_size = alloc_size + alloc_size/4; 2192 #endif 2193 2194 i = 0; 2195 size_index = 0; 2196 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2197 while ((alloc_sizes[size_index] < alloc_size) && 2198 (size_index < array_size)) 2199 size_index++; 2200 if (size_index >= array_size) { 2201 size_index = array_size - 1; 2202 } 2203 2204 while ((allocated < total_alloc_size) && 2205 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2206 rx_dmap[i].dma_chunk_index = i; 2207 rx_dmap[i].block_size = block_size; 2208 rx_dmap[i].alength = alloc_sizes[size_index]; 2209 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2210 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2211 rx_dmap[i].dma_channel = dma_channel; 2212 rx_dmap[i].contig_alloc_type = B_FALSE; 2213 2214 /* 2215 * N2/NIU: data buffers must be contiguous as the driver 2216 * needs to call Hypervisor api to set up 2217 * logical pages. 2218 */ 2219 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2220 rx_dmap[i].contig_alloc_type = B_TRUE; 2221 } 2222 2223 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2224 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2225 "i %d nblocks %d alength %d", 2226 dma_channel, i, &rx_dmap[i], block_size, 2227 i, rx_dmap[i].nblocks, 2228 rx_dmap[i].alength)); 2229 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2230 &nxge_rx_dma_attr, 2231 rx_dmap[i].alength, 2232 &nxge_dev_buf_dma_acc_attr, 2233 DDI_DMA_READ | DDI_DMA_STREAMING, 2234 (p_nxge_dma_common_t)(&rx_dmap[i])); 2235 if (status != NXGE_OK) { 2236 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2237 " nxge_alloc_rx_buf_dma: Alloc Failed ")); 2238 size_index--; 2239 } else { 2240 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2241 " alloc_rx_buf_dma allocated rdc %d " 2242 "chunk %d size %x dvma %x bufp %llx ", 2243 dma_channel, i, rx_dmap[i].alength, 2244 rx_dmap[i].ioaddr_pp, &rx_dmap[i])); 2245 i++; 2246 allocated += alloc_sizes[size_index]; 2247 } 2248 } 2249 2250 2251 if (allocated < total_alloc_size) { 2252 goto nxge_alloc_rx_mem_fail1; 2253 } 2254 2255 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2256 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2257 dma_channel, i)); 2258 *num_chunks = i; 2259 *dmap = rx_dmap; 2260 2261 goto nxge_alloc_rx_mem_exit; 2262 2263 nxge_alloc_rx_mem_fail1: 2264 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2265 2266 nxge_alloc_rx_mem_exit: 2267 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2268 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2269 2270 return (status); 2271 } 2272 2273 /*ARGSUSED*/ 2274 static void 2275 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2276 uint32_t num_chunks) 2277 { 2278 int i; 2279 2280 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2281 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2282 2283 for (i = 0; i < num_chunks; i++) { 2284 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2285 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2286 i, dmap)); 2287 nxge_dma_mem_free(dmap++); 2288 } 2289 2290 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2291 } 2292 2293 /*ARGSUSED*/ 2294 static nxge_status_t 2295 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2296 p_nxge_dma_common_t *dmap, size_t size) 2297 { 2298 p_nxge_dma_common_t rx_dmap; 2299 nxge_status_t status = NXGE_OK; 2300 2301 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2302 2303 rx_dmap = (p_nxge_dma_common_t) 2304 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2305 2306 rx_dmap->contig_alloc_type = B_FALSE; 2307 2308 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2309 &nxge_desc_dma_attr, 2310 size, 2311 &nxge_dev_desc_dma_acc_attr, 2312 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2313 rx_dmap); 2314 if (status != NXGE_OK) { 2315 goto nxge_alloc_rx_cntl_dma_fail1; 2316 } 2317 2318 *dmap = rx_dmap; 2319 goto nxge_alloc_rx_cntl_dma_exit; 2320 2321 nxge_alloc_rx_cntl_dma_fail1: 2322 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2323 2324 nxge_alloc_rx_cntl_dma_exit: 2325 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2326 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2327 2328 return (status); 2329 } 2330 2331 /*ARGSUSED*/ 2332 static void 2333 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2334 { 2335 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2336 2337 nxge_dma_mem_free(dmap); 2338 2339 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2340 } 2341 2342 static nxge_status_t 2343 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2344 { 2345 nxge_status_t status = NXGE_OK; 2346 int i, j; 2347 uint32_t ndmas, st_tdc; 2348 p_nxge_dma_pt_cfg_t p_all_cfgp; 2349 p_nxge_hw_pt_cfg_t p_cfgp; 2350 p_nxge_dma_pool_t dma_poolp; 2351 p_nxge_dma_common_t *dma_buf_p; 2352 p_nxge_dma_pool_t dma_cntl_poolp; 2353 p_nxge_dma_common_t *dma_cntl_p; 2354 size_t tx_buf_alloc_size; 2355 size_t tx_cntl_alloc_size; 2356 uint32_t *num_chunks; /* per dma */ 2357 uint32_t bcopy_thresh; 2358 2359 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2360 2361 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2362 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2363 st_tdc = p_cfgp->start_tdc; 2364 ndmas = p_cfgp->max_tdcs; 2365 2366 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool: " 2367 "p_cfgp 0x%016llx start_tdc %d ndmas %d nxgep->max_tdcs %d", 2368 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, nxgep->max_tdcs)); 2369 /* 2370 * Allocate memory for each transmit DMA channel. 2371 */ 2372 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2373 KM_SLEEP); 2374 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2375 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2376 2377 dma_cntl_poolp = (p_nxge_dma_pool_t) 2378 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2379 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2380 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2381 2382 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2383 /* 2384 * N2/NIU has limitation on the descriptor sizes (contiguous 2385 * memory allocation on data buffers to 4M (contig_mem_alloc) 2386 * and little endian for control buffers (must use the ddi/dki mem alloc 2387 * function). The transmit ring is limited to 8K (includes the 2388 * mailbox). 2389 */ 2390 if (nxgep->niu_type == N2_NIU) { 2391 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2392 (!ISP2(nxge_tx_ring_size))) { 2393 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2394 } 2395 } 2396 #endif 2397 2398 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2399 2400 /* 2401 * Assume that each DMA channel will be configured with default 2402 * transmit bufer size for copying transmit data. 2403 * (For packet payload over this limit, packets will not be 2404 * copied.) 2405 */ 2406 if (nxgep->niu_type == N2_NIU) { 2407 bcopy_thresh = TX_BCOPY_SIZE; 2408 } else { 2409 bcopy_thresh = nxge_bcopy_thresh; 2410 } 2411 tx_buf_alloc_size = (bcopy_thresh * nxge_tx_ring_size); 2412 2413 /* 2414 * Addresses of transmit descriptor ring and the 2415 * mailbox must be all cache-aligned (64 bytes). 2416 */ 2417 tx_cntl_alloc_size = nxge_tx_ring_size; 2418 tx_cntl_alloc_size *= (sizeof (tx_desc_t)); 2419 tx_cntl_alloc_size += sizeof (txdma_mailbox_t); 2420 2421 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2422 if (nxgep->niu_type == N2_NIU) { 2423 if (!ISP2(tx_buf_alloc_size)) { 2424 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2425 "==> nxge_alloc_tx_mem_pool: " 2426 " must be power of 2")); 2427 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2428 goto nxge_alloc_tx_mem_pool_exit; 2429 } 2430 2431 if (tx_buf_alloc_size > (1 << 22)) { 2432 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2433 "==> nxge_alloc_tx_mem_pool: " 2434 " limit size to 4M")); 2435 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2436 goto nxge_alloc_tx_mem_pool_exit; 2437 } 2438 2439 if (tx_cntl_alloc_size < 0x2000) { 2440 tx_cntl_alloc_size = 0x2000; 2441 } 2442 } 2443 #endif 2444 2445 num_chunks = (uint32_t *)KMEM_ZALLOC( 2446 sizeof (uint32_t) * ndmas, KM_SLEEP); 2447 2448 /* 2449 * Allocate memory for transmit buffers and descriptor rings. 2450 * Replace allocation functions with interface functions provided 2451 * by the partition manager when it is available. 2452 * 2453 * Allocate memory for the transmit buffer pool. 2454 */ 2455 for (i = 0; i < ndmas; i++) { 2456 num_chunks[i] = 0; 2457 status = nxge_alloc_tx_buf_dma(nxgep, st_tdc, &dma_buf_p[i], 2458 tx_buf_alloc_size, 2459 bcopy_thresh, &num_chunks[i]); 2460 if (status != NXGE_OK) { 2461 break; 2462 } 2463 st_tdc++; 2464 } 2465 if (i < ndmas) { 2466 goto nxge_alloc_tx_mem_pool_fail1; 2467 } 2468 2469 st_tdc = p_cfgp->start_tdc; 2470 /* 2471 * Allocate memory for descriptor rings and mailbox. 2472 */ 2473 for (j = 0; j < ndmas; j++) { 2474 status = nxge_alloc_tx_cntl_dma(nxgep, st_tdc, &dma_cntl_p[j], 2475 tx_cntl_alloc_size); 2476 if (status != NXGE_OK) { 2477 break; 2478 } 2479 st_tdc++; 2480 } 2481 if (j < ndmas) { 2482 goto nxge_alloc_tx_mem_pool_fail2; 2483 } 2484 2485 dma_poolp->ndmas = ndmas; 2486 dma_poolp->num_chunks = num_chunks; 2487 dma_poolp->buf_allocated = B_TRUE; 2488 dma_poolp->dma_buf_pool_p = dma_buf_p; 2489 nxgep->tx_buf_pool_p = dma_poolp; 2490 2491 dma_cntl_poolp->ndmas = ndmas; 2492 dma_cntl_poolp->buf_allocated = B_TRUE; 2493 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2494 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2495 2496 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2497 "==> nxge_alloc_tx_mem_pool: start_tdc %d " 2498 "ndmas %d poolp->ndmas %d", 2499 st_tdc, ndmas, dma_poolp->ndmas)); 2500 2501 goto nxge_alloc_tx_mem_pool_exit; 2502 2503 nxge_alloc_tx_mem_pool_fail2: 2504 /* Free control buffers */ 2505 j--; 2506 for (; j >= 0; j--) { 2507 nxge_free_tx_cntl_dma(nxgep, 2508 (p_nxge_dma_common_t)dma_cntl_p[j]); 2509 } 2510 2511 nxge_alloc_tx_mem_pool_fail1: 2512 /* Free data buffers */ 2513 i--; 2514 for (; i >= 0; i--) { 2515 nxge_free_tx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2516 num_chunks[i]); 2517 } 2518 2519 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2520 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2521 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2522 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2523 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2524 2525 nxge_alloc_tx_mem_pool_exit: 2526 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2527 "<== nxge_alloc_tx_mem_pool:status 0x%08x", status)); 2528 2529 return (status); 2530 } 2531 2532 static nxge_status_t 2533 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2534 p_nxge_dma_common_t *dmap, size_t alloc_size, 2535 size_t block_size, uint32_t *num_chunks) 2536 { 2537 p_nxge_dma_common_t tx_dmap; 2538 nxge_status_t status = NXGE_OK; 2539 size_t total_alloc_size; 2540 size_t allocated = 0; 2541 int i, size_index, array_size; 2542 2543 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 2544 2545 tx_dmap = (p_nxge_dma_common_t) 2546 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2547 KM_SLEEP); 2548 2549 total_alloc_size = alloc_size; 2550 i = 0; 2551 size_index = 0; 2552 array_size = sizeof (alloc_sizes) / sizeof (size_t); 2553 while ((alloc_sizes[size_index] < alloc_size) && 2554 (size_index < array_size)) 2555 size_index++; 2556 if (size_index >= array_size) { 2557 size_index = array_size - 1; 2558 } 2559 2560 while ((allocated < total_alloc_size) && 2561 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2562 2563 tx_dmap[i].dma_chunk_index = i; 2564 tx_dmap[i].block_size = block_size; 2565 tx_dmap[i].alength = alloc_sizes[size_index]; 2566 tx_dmap[i].orig_alength = tx_dmap[i].alength; 2567 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2568 tx_dmap[i].dma_channel = dma_channel; 2569 tx_dmap[i].contig_alloc_type = B_FALSE; 2570 2571 /* 2572 * N2/NIU: data buffers must be contiguous as the driver 2573 * needs to call Hypervisor api to set up 2574 * logical pages. 2575 */ 2576 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2577 tx_dmap[i].contig_alloc_type = B_TRUE; 2578 } 2579 2580 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2581 &nxge_tx_dma_attr, 2582 tx_dmap[i].alength, 2583 &nxge_dev_buf_dma_acc_attr, 2584 DDI_DMA_WRITE | DDI_DMA_STREAMING, 2585 (p_nxge_dma_common_t)(&tx_dmap[i])); 2586 if (status != NXGE_OK) { 2587 size_index--; 2588 } else { 2589 i++; 2590 allocated += alloc_sizes[size_index]; 2591 } 2592 } 2593 2594 if (allocated < total_alloc_size) { 2595 goto nxge_alloc_tx_mem_fail1; 2596 } 2597 2598 *num_chunks = i; 2599 *dmap = tx_dmap; 2600 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2601 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 2602 *dmap, i)); 2603 goto nxge_alloc_tx_mem_exit; 2604 2605 nxge_alloc_tx_mem_fail1: 2606 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2607 2608 nxge_alloc_tx_mem_exit: 2609 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2610 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 2611 2612 return (status); 2613 } 2614 2615 /*ARGSUSED*/ 2616 static void 2617 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2618 uint32_t num_chunks) 2619 { 2620 int i; 2621 2622 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 2623 2624 for (i = 0; i < num_chunks; i++) { 2625 nxge_dma_mem_free(dmap++); 2626 } 2627 2628 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 2629 } 2630 2631 /*ARGSUSED*/ 2632 static nxge_status_t 2633 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2634 p_nxge_dma_common_t *dmap, size_t size) 2635 { 2636 p_nxge_dma_common_t tx_dmap; 2637 nxge_status_t status = NXGE_OK; 2638 2639 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 2640 tx_dmap = (p_nxge_dma_common_t) 2641 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2642 2643 tx_dmap->contig_alloc_type = B_FALSE; 2644 2645 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2646 &nxge_desc_dma_attr, 2647 size, 2648 &nxge_dev_desc_dma_acc_attr, 2649 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2650 tx_dmap); 2651 if (status != NXGE_OK) { 2652 goto nxge_alloc_tx_cntl_dma_fail1; 2653 } 2654 2655 *dmap = tx_dmap; 2656 goto nxge_alloc_tx_cntl_dma_exit; 2657 2658 nxge_alloc_tx_cntl_dma_fail1: 2659 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 2660 2661 nxge_alloc_tx_cntl_dma_exit: 2662 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2663 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 2664 2665 return (status); 2666 } 2667 2668 /*ARGSUSED*/ 2669 static void 2670 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2671 { 2672 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 2673 2674 nxge_dma_mem_free(dmap); 2675 2676 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 2677 } 2678 2679 static void 2680 nxge_free_tx_mem_pool(p_nxge_t nxgep) 2681 { 2682 uint32_t i, ndmas; 2683 p_nxge_dma_pool_t dma_poolp; 2684 p_nxge_dma_common_t *dma_buf_p; 2685 p_nxge_dma_pool_t dma_cntl_poolp; 2686 p_nxge_dma_common_t *dma_cntl_p; 2687 uint32_t *num_chunks; 2688 2689 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_free_tx_mem_pool")); 2690 2691 dma_poolp = nxgep->tx_buf_pool_p; 2692 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2693 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2694 "<== nxge_free_tx_mem_pool " 2695 "(null rx buf pool or buf not allocated")); 2696 return; 2697 } 2698 2699 dma_cntl_poolp = nxgep->tx_cntl_pool_p; 2700 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2701 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2702 "<== nxge_free_tx_mem_pool " 2703 "(null tx cntl buf pool or cntl buf not allocated")); 2704 return; 2705 } 2706 2707 dma_buf_p = dma_poolp->dma_buf_pool_p; 2708 num_chunks = dma_poolp->num_chunks; 2709 2710 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2711 ndmas = dma_cntl_poolp->ndmas; 2712 2713 for (i = 0; i < ndmas; i++) { 2714 nxge_free_tx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2715 } 2716 2717 for (i = 0; i < ndmas; i++) { 2718 nxge_free_tx_cntl_dma(nxgep, dma_cntl_p[i]); 2719 } 2720 2721 for (i = 0; i < ndmas; i++) { 2722 KMEM_FREE(dma_buf_p[i], 2723 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2724 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2725 } 2726 2727 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2728 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2729 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2730 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2731 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2732 2733 nxgep->tx_buf_pool_p = NULL; 2734 nxgep->tx_cntl_pool_p = NULL; 2735 2736 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_free_tx_mem_pool")); 2737 } 2738 2739 /*ARGSUSED*/ 2740 static nxge_status_t 2741 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 2742 struct ddi_dma_attr *dma_attrp, 2743 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 2744 p_nxge_dma_common_t dma_p) 2745 { 2746 caddr_t kaddrp; 2747 int ddi_status = DDI_SUCCESS; 2748 boolean_t contig_alloc_type; 2749 2750 contig_alloc_type = dma_p->contig_alloc_type; 2751 2752 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 2753 /* 2754 * contig_alloc_type for contiguous memory only allowed 2755 * for N2/NIU. 2756 */ 2757 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2758 "nxge_dma_mem_alloc: alloc type not allows (%d)", 2759 dma_p->contig_alloc_type)); 2760 return (NXGE_ERROR | NXGE_DDI_FAILED); 2761 } 2762 2763 dma_p->dma_handle = NULL; 2764 dma_p->acc_handle = NULL; 2765 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 2766 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 2767 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 2768 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 2769 if (ddi_status != DDI_SUCCESS) { 2770 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2771 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 2772 return (NXGE_ERROR | NXGE_DDI_FAILED); 2773 } 2774 2775 switch (contig_alloc_type) { 2776 case B_FALSE: 2777 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, 2778 acc_attr_p, 2779 xfer_flags, 2780 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 2781 &dma_p->acc_handle); 2782 if (ddi_status != DDI_SUCCESS) { 2783 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2784 "nxge_dma_mem_alloc:ddi_dma_mem_alloc failed")); 2785 ddi_dma_free_handle(&dma_p->dma_handle); 2786 dma_p->dma_handle = NULL; 2787 return (NXGE_ERROR | NXGE_DDI_FAILED); 2788 } 2789 if (dma_p->alength < length) { 2790 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2791 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 2792 "< length.")); 2793 ddi_dma_mem_free(&dma_p->acc_handle); 2794 ddi_dma_free_handle(&dma_p->dma_handle); 2795 dma_p->acc_handle = NULL; 2796 dma_p->dma_handle = NULL; 2797 return (NXGE_ERROR); 2798 } 2799 2800 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2801 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2802 &dma_p->dma_cookie, &dma_p->ncookies); 2803 if (ddi_status != DDI_DMA_MAPPED) { 2804 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2805 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2806 "(staus 0x%x ncookies %d.)", ddi_status, 2807 dma_p->ncookies)); 2808 if (dma_p->acc_handle) { 2809 ddi_dma_mem_free(&dma_p->acc_handle); 2810 dma_p->acc_handle = NULL; 2811 } 2812 ddi_dma_free_handle(&dma_p->dma_handle); 2813 dma_p->dma_handle = NULL; 2814 return (NXGE_ERROR | NXGE_DDI_FAILED); 2815 } 2816 2817 if (dma_p->ncookies != 1) { 2818 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2819 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 2820 "> 1 cookie" 2821 "(staus 0x%x ncookies %d.)", ddi_status, 2822 dma_p->ncookies)); 2823 if (dma_p->acc_handle) { 2824 ddi_dma_mem_free(&dma_p->acc_handle); 2825 dma_p->acc_handle = NULL; 2826 } 2827 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2828 ddi_dma_free_handle(&dma_p->dma_handle); 2829 dma_p->dma_handle = NULL; 2830 return (NXGE_ERROR); 2831 } 2832 break; 2833 2834 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2835 case B_TRUE: 2836 kaddrp = (caddr_t)contig_mem_alloc(length); 2837 if (kaddrp == NULL) { 2838 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2839 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 2840 ddi_dma_free_handle(&dma_p->dma_handle); 2841 return (NXGE_ERROR | NXGE_DDI_FAILED); 2842 } 2843 2844 dma_p->alength = length; 2845 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2846 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2847 &dma_p->dma_cookie, &dma_p->ncookies); 2848 if (ddi_status != DDI_DMA_MAPPED) { 2849 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2850 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2851 "(status 0x%x ncookies %d.)", ddi_status, 2852 dma_p->ncookies)); 2853 2854 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2855 "==> nxge_dma_mem_alloc: (not mapped)" 2856 "length %lu (0x%x) " 2857 "free contig kaddrp $%p " 2858 "va_to_pa $%p", 2859 length, length, 2860 kaddrp, 2861 va_to_pa(kaddrp))); 2862 2863 2864 contig_mem_free((void *)kaddrp, length); 2865 ddi_dma_free_handle(&dma_p->dma_handle); 2866 2867 dma_p->dma_handle = NULL; 2868 dma_p->acc_handle = NULL; 2869 dma_p->alength = NULL; 2870 dma_p->kaddrp = NULL; 2871 2872 return (NXGE_ERROR | NXGE_DDI_FAILED); 2873 } 2874 2875 if (dma_p->ncookies != 1 || 2876 (dma_p->dma_cookie.dmac_laddress == NULL)) { 2877 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2878 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 2879 "cookie or " 2880 "dmac_laddress is NULL $%p size %d " 2881 " (status 0x%x ncookies %d.)", 2882 ddi_status, 2883 dma_p->dma_cookie.dmac_laddress, 2884 dma_p->dma_cookie.dmac_size, 2885 dma_p->ncookies)); 2886 2887 contig_mem_free((void *)kaddrp, length); 2888 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2889 ddi_dma_free_handle(&dma_p->dma_handle); 2890 2891 dma_p->alength = 0; 2892 dma_p->dma_handle = NULL; 2893 dma_p->acc_handle = NULL; 2894 dma_p->kaddrp = NULL; 2895 2896 return (NXGE_ERROR | NXGE_DDI_FAILED); 2897 } 2898 break; 2899 2900 #else 2901 case B_TRUE: 2902 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2903 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 2904 return (NXGE_ERROR | NXGE_DDI_FAILED); 2905 #endif 2906 } 2907 2908 dma_p->kaddrp = kaddrp; 2909 dma_p->last_kaddrp = (unsigned char *)kaddrp + 2910 dma_p->alength - RXBUF_64B_ALIGNED; 2911 #if defined(__i386) 2912 dma_p->ioaddr_pp = 2913 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 2914 #else 2915 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2916 #endif 2917 dma_p->last_ioaddr_pp = 2918 #if defined(__i386) 2919 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 2920 #else 2921 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 2922 #endif 2923 dma_p->alength - RXBUF_64B_ALIGNED; 2924 2925 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 2926 2927 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2928 dma_p->orig_ioaddr_pp = 2929 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2930 dma_p->orig_alength = length; 2931 dma_p->orig_kaddrp = kaddrp; 2932 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 2933 #endif 2934 2935 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 2936 "dma buffer allocated: dma_p $%p " 2937 "return dmac_ladress from cookie $%p cookie dmac_size %d " 2938 "dma_p->ioaddr_p $%p " 2939 "dma_p->orig_ioaddr_p $%p " 2940 "orig_vatopa $%p " 2941 "alength %d (0x%x) " 2942 "kaddrp $%p " 2943 "length %d (0x%x)", 2944 dma_p, 2945 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 2946 dma_p->ioaddr_pp, 2947 dma_p->orig_ioaddr_pp, 2948 dma_p->orig_vatopa, 2949 dma_p->alength, dma_p->alength, 2950 kaddrp, 2951 length, length)); 2952 2953 return (NXGE_OK); 2954 } 2955 2956 static void 2957 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 2958 { 2959 if (dma_p->dma_handle != NULL) { 2960 if (dma_p->ncookies) { 2961 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2962 dma_p->ncookies = 0; 2963 } 2964 ddi_dma_free_handle(&dma_p->dma_handle); 2965 dma_p->dma_handle = NULL; 2966 } 2967 2968 if (dma_p->acc_handle != NULL) { 2969 ddi_dma_mem_free(&dma_p->acc_handle); 2970 dma_p->acc_handle = NULL; 2971 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 2972 } 2973 2974 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2975 if (dma_p->contig_alloc_type && 2976 dma_p->orig_kaddrp && dma_p->orig_alength) { 2977 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 2978 "kaddrp $%p (orig_kaddrp $%p)" 2979 "mem type %d ", 2980 "orig_alength %d " 2981 "alength 0x%x (%d)", 2982 dma_p->kaddrp, 2983 dma_p->orig_kaddrp, 2984 dma_p->contig_alloc_type, 2985 dma_p->orig_alength, 2986 dma_p->alength, dma_p->alength)); 2987 2988 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 2989 dma_p->orig_alength = NULL; 2990 dma_p->orig_kaddrp = NULL; 2991 dma_p->contig_alloc_type = B_FALSE; 2992 } 2993 #endif 2994 dma_p->kaddrp = NULL; 2995 dma_p->alength = NULL; 2996 } 2997 2998 /* 2999 * nxge_m_start() -- start transmitting and receiving. 3000 * 3001 * This function is called by the MAC layer when the first 3002 * stream is open to prepare the hardware ready for sending 3003 * and transmitting packets. 3004 */ 3005 static int 3006 nxge_m_start(void *arg) 3007 { 3008 p_nxge_t nxgep = (p_nxge_t)arg; 3009 3010 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3011 3012 MUTEX_ENTER(nxgep->genlock); 3013 if (nxge_init(nxgep) != NXGE_OK) { 3014 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3015 "<== nxge_m_start: initialization failed")); 3016 MUTEX_EXIT(nxgep->genlock); 3017 return (EIO); 3018 } 3019 3020 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 3021 goto nxge_m_start_exit; 3022 /* 3023 * Start timer to check the system error and tx hangs 3024 */ 3025 nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state, 3026 NXGE_CHECK_TIMER); 3027 3028 nxgep->link_notify = B_TRUE; 3029 3030 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3031 3032 nxge_m_start_exit: 3033 MUTEX_EXIT(nxgep->genlock); 3034 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3035 return (0); 3036 } 3037 3038 /* 3039 * nxge_m_stop(): stop transmitting and receiving. 3040 */ 3041 static void 3042 nxge_m_stop(void *arg) 3043 { 3044 p_nxge_t nxgep = (p_nxge_t)arg; 3045 3046 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3047 3048 if (nxgep->nxge_timerid) { 3049 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3050 nxgep->nxge_timerid = 0; 3051 } 3052 3053 MUTEX_ENTER(nxgep->genlock); 3054 nxge_uninit(nxgep); 3055 3056 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3057 3058 MUTEX_EXIT(nxgep->genlock); 3059 3060 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3061 } 3062 3063 static int 3064 nxge_m_unicst(void *arg, const uint8_t *macaddr) 3065 { 3066 p_nxge_t nxgep = (p_nxge_t)arg; 3067 struct ether_addr addrp; 3068 3069 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 3070 3071 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 3072 if (nxge_set_mac_addr(nxgep, &addrp)) { 3073 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3074 "<== nxge_m_unicst: set unitcast failed")); 3075 return (EINVAL); 3076 } 3077 3078 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 3079 3080 return (0); 3081 } 3082 3083 static int 3084 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3085 { 3086 p_nxge_t nxgep = (p_nxge_t)arg; 3087 struct ether_addr addrp; 3088 3089 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3090 "==> nxge_m_multicst: add %d", add)); 3091 3092 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3093 if (add) { 3094 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3095 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3096 "<== nxge_m_multicst: add multicast failed")); 3097 return (EINVAL); 3098 } 3099 } else { 3100 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3101 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3102 "<== nxge_m_multicst: del multicast failed")); 3103 return (EINVAL); 3104 } 3105 } 3106 3107 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3108 3109 return (0); 3110 } 3111 3112 static int 3113 nxge_m_promisc(void *arg, boolean_t on) 3114 { 3115 p_nxge_t nxgep = (p_nxge_t)arg; 3116 3117 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3118 "==> nxge_m_promisc: on %d", on)); 3119 3120 if (nxge_set_promisc(nxgep, on)) { 3121 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3122 "<== nxge_m_promisc: set promisc failed")); 3123 return (EINVAL); 3124 } 3125 3126 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3127 "<== nxge_m_promisc: on %d", on)); 3128 3129 return (0); 3130 } 3131 3132 static void 3133 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3134 { 3135 p_nxge_t nxgep = (p_nxge_t)arg; 3136 struct iocblk *iocp; 3137 boolean_t need_privilege; 3138 int err; 3139 int cmd; 3140 3141 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3142 3143 iocp = (struct iocblk *)mp->b_rptr; 3144 iocp->ioc_error = 0; 3145 need_privilege = B_TRUE; 3146 cmd = iocp->ioc_cmd; 3147 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3148 switch (cmd) { 3149 default: 3150 miocnak(wq, mp, 0, EINVAL); 3151 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3152 return; 3153 3154 case LB_GET_INFO_SIZE: 3155 case LB_GET_INFO: 3156 case LB_GET_MODE: 3157 need_privilege = B_FALSE; 3158 break; 3159 case LB_SET_MODE: 3160 break; 3161 3162 case ND_GET: 3163 need_privilege = B_FALSE; 3164 break; 3165 case ND_SET: 3166 break; 3167 3168 case NXGE_GET_MII: 3169 case NXGE_PUT_MII: 3170 case NXGE_GET64: 3171 case NXGE_PUT64: 3172 case NXGE_GET_TX_RING_SZ: 3173 case NXGE_GET_TX_DESC: 3174 case NXGE_TX_SIDE_RESET: 3175 case NXGE_RX_SIDE_RESET: 3176 case NXGE_GLOBAL_RESET: 3177 case NXGE_RESET_MAC: 3178 case NXGE_TX_REGS_DUMP: 3179 case NXGE_RX_REGS_DUMP: 3180 case NXGE_INT_REGS_DUMP: 3181 case NXGE_VIR_INT_REGS_DUMP: 3182 case NXGE_PUT_TCAM: 3183 case NXGE_GET_TCAM: 3184 case NXGE_RTRACE: 3185 case NXGE_RDUMP: 3186 3187 need_privilege = B_FALSE; 3188 break; 3189 case NXGE_INJECT_ERR: 3190 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3191 nxge_err_inject(nxgep, wq, mp); 3192 break; 3193 } 3194 3195 if (need_privilege) { 3196 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3197 if (err != 0) { 3198 miocnak(wq, mp, 0, err); 3199 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3200 "<== nxge_m_ioctl: no priv")); 3201 return; 3202 } 3203 } 3204 3205 switch (cmd) { 3206 case ND_GET: 3207 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command")); 3208 case ND_SET: 3209 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command")); 3210 nxge_param_ioctl(nxgep, wq, mp, iocp); 3211 break; 3212 3213 case LB_GET_MODE: 3214 case LB_SET_MODE: 3215 case LB_GET_INFO_SIZE: 3216 case LB_GET_INFO: 3217 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3218 break; 3219 3220 case NXGE_GET_MII: 3221 case NXGE_PUT_MII: 3222 case NXGE_PUT_TCAM: 3223 case NXGE_GET_TCAM: 3224 case NXGE_GET64: 3225 case NXGE_PUT64: 3226 case NXGE_GET_TX_RING_SZ: 3227 case NXGE_GET_TX_DESC: 3228 case NXGE_TX_SIDE_RESET: 3229 case NXGE_RX_SIDE_RESET: 3230 case NXGE_GLOBAL_RESET: 3231 case NXGE_RESET_MAC: 3232 case NXGE_TX_REGS_DUMP: 3233 case NXGE_RX_REGS_DUMP: 3234 case NXGE_INT_REGS_DUMP: 3235 case NXGE_VIR_INT_REGS_DUMP: 3236 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3237 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3238 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3239 break; 3240 } 3241 3242 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3243 } 3244 3245 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3246 3247 static void 3248 nxge_m_resources(void *arg) 3249 { 3250 p_nxge_t nxgep = arg; 3251 mac_rx_fifo_t mrf; 3252 p_rx_rcr_rings_t rcr_rings; 3253 p_rx_rcr_ring_t *rcr_p; 3254 uint32_t i, ndmas; 3255 nxge_status_t status; 3256 3257 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3258 3259 MUTEX_ENTER(nxgep->genlock); 3260 3261 /* 3262 * CR 6492541 Check to see if the drv_state has been initialized, 3263 * if not * call nxge_init(). 3264 */ 3265 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3266 status = nxge_init(nxgep); 3267 if (status != NXGE_OK) 3268 goto nxge_m_resources_exit; 3269 } 3270 3271 mrf.mrf_type = MAC_RX_FIFO; 3272 mrf.mrf_blank = nxge_rx_hw_blank; 3273 mrf.mrf_arg = (void *)nxgep; 3274 3275 mrf.mrf_normal_blank_time = 128; 3276 mrf.mrf_normal_pkt_count = 8; 3277 rcr_rings = nxgep->rx_rcr_rings; 3278 rcr_p = rcr_rings->rcr_rings; 3279 ndmas = rcr_rings->ndmas; 3280 3281 /* 3282 * Export our receive resources to the MAC layer. 3283 */ 3284 for (i = 0; i < ndmas; i++) { 3285 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle = 3286 mac_resource_add(nxgep->mach, 3287 (mac_resource_t *)&mrf); 3288 3289 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3290 "==> nxge_m_resources: vdma %d dma %d " 3291 "rcrptr 0x%016llx mac_handle 0x%016llx", 3292 i, ((p_rx_rcr_ring_t)rcr_p[i])->rdc, 3293 rcr_p[i], 3294 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle)); 3295 } 3296 3297 nxge_m_resources_exit: 3298 MUTEX_EXIT(nxgep->genlock); 3299 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 3300 } 3301 3302 static void 3303 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 3304 { 3305 p_nxge_mmac_stats_t mmac_stats; 3306 int i; 3307 nxge_mmac_t *mmac_info; 3308 3309 mmac_info = &nxgep->nxge_mmac_info; 3310 3311 mmac_stats = &nxgep->statsp->mmac_stats; 3312 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3313 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3314 3315 for (i = 0; i < ETHERADDRL; i++) { 3316 if (factory) { 3317 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3318 = mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i]; 3319 } else { 3320 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3321 = mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i]; 3322 } 3323 } 3324 } 3325 3326 /* 3327 * nxge_altmac_set() -- Set an alternate MAC address 3328 */ 3329 static int 3330 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 3331 { 3332 uint8_t addrn; 3333 uint8_t portn; 3334 npi_mac_addr_t altmac; 3335 hostinfo_t mac_rdc; 3336 p_nxge_class_pt_cfg_t clscfgp; 3337 3338 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3339 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3340 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3341 3342 portn = nxgep->mac.portnum; 3343 addrn = (uint8_t)slot - 1; 3344 3345 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 3346 addrn, &altmac) != NPI_SUCCESS) 3347 return (EIO); 3348 3349 /* 3350 * Set the rdc table number for the host info entry 3351 * for this mac address slot. 3352 */ 3353 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 3354 mac_rdc.value = 0; 3355 mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl; 3356 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 3357 3358 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 3359 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 3360 return (EIO); 3361 } 3362 3363 /* 3364 * Enable comparison with the alternate MAC address. 3365 * While the first alternate addr is enabled by bit 1 of register 3366 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 3367 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 3368 * accordingly before calling npi_mac_altaddr_entry. 3369 */ 3370 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3371 addrn = (uint8_t)slot - 1; 3372 else 3373 addrn = (uint8_t)slot; 3374 3375 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 3376 != NPI_SUCCESS) 3377 return (EIO); 3378 3379 return (0); 3380 } 3381 3382 /* 3383 * nxeg_m_mmac_add() - find an unused address slot, set the address 3384 * value to the one specified, enable the port to start filtering on 3385 * the new MAC address. Returns 0 on success. 3386 */ 3387 static int 3388 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 3389 { 3390 p_nxge_t nxgep = arg; 3391 mac_addr_slot_t slot; 3392 nxge_mmac_t *mmac_info; 3393 int err; 3394 nxge_status_t status; 3395 3396 mutex_enter(nxgep->genlock); 3397 3398 /* 3399 * Make sure that nxge is initialized, if _start() has 3400 * not been called. 3401 */ 3402 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3403 status = nxge_init(nxgep); 3404 if (status != NXGE_OK) { 3405 mutex_exit(nxgep->genlock); 3406 return (ENXIO); 3407 } 3408 } 3409 3410 mmac_info = &nxgep->nxge_mmac_info; 3411 if (mmac_info->naddrfree == 0) { 3412 mutex_exit(nxgep->genlock); 3413 return (ENOSPC); 3414 } 3415 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3416 maddr->mma_addrlen)) { 3417 mutex_exit(nxgep->genlock); 3418 return (EINVAL); 3419 } 3420 /* 3421 * Search for the first available slot. Because naddrfree 3422 * is not zero, we are guaranteed to find one. 3423 * Slot 0 is for unique (primary) MAC. The first alternate 3424 * MAC slot is slot 1. 3425 * Each of the first two ports of Neptune has 16 alternate 3426 * MAC slots but only the first 7 (or 15) slots have assigned factory 3427 * MAC addresses. We first search among the slots without bundled 3428 * factory MACs. If we fail to find one in that range, then we 3429 * search the slots with bundled factory MACs. A factory MAC 3430 * will be wasted while the slot is used with a user MAC address. 3431 * But the slot could be used by factory MAC again after calling 3432 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 3433 */ 3434 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 3435 for (slot = mmac_info->num_factory_mmac + 1; 3436 slot <= mmac_info->num_mmac; slot++) { 3437 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3438 break; 3439 } 3440 if (slot > mmac_info->num_mmac) { 3441 for (slot = 1; slot <= mmac_info->num_factory_mmac; 3442 slot++) { 3443 if (!(mmac_info->mac_pool[slot].flags 3444 & MMAC_SLOT_USED)) 3445 break; 3446 } 3447 } 3448 } else { 3449 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 3450 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3451 break; 3452 } 3453 } 3454 ASSERT(slot <= mmac_info->num_mmac); 3455 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 3456 mutex_exit(nxgep->genlock); 3457 return (err); 3458 } 3459 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 3460 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 3461 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3462 mmac_info->naddrfree--; 3463 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3464 3465 maddr->mma_slot = slot; 3466 3467 mutex_exit(nxgep->genlock); 3468 return (0); 3469 } 3470 3471 /* 3472 * This function reserves an unused slot and programs the slot and the HW 3473 * with a factory mac address. 3474 */ 3475 static int 3476 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 3477 { 3478 p_nxge_t nxgep = arg; 3479 mac_addr_slot_t slot; 3480 nxge_mmac_t *mmac_info; 3481 int err; 3482 nxge_status_t status; 3483 3484 mutex_enter(nxgep->genlock); 3485 3486 /* 3487 * Make sure that nxge is initialized, if _start() has 3488 * not been called. 3489 */ 3490 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3491 status = nxge_init(nxgep); 3492 if (status != NXGE_OK) { 3493 mutex_exit(nxgep->genlock); 3494 return (ENXIO); 3495 } 3496 } 3497 3498 mmac_info = &nxgep->nxge_mmac_info; 3499 if (mmac_info->naddrfree == 0) { 3500 mutex_exit(nxgep->genlock); 3501 return (ENOSPC); 3502 } 3503 3504 slot = maddr->mma_slot; 3505 if (slot == -1) { /* -1: Take the first available slot */ 3506 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 3507 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3508 break; 3509 } 3510 if (slot > mmac_info->num_factory_mmac) { 3511 mutex_exit(nxgep->genlock); 3512 return (ENOSPC); 3513 } 3514 } 3515 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 3516 /* 3517 * Do not support factory MAC at a slot greater than 3518 * num_factory_mmac even when there are available factory 3519 * MAC addresses because the alternate MACs are bundled with 3520 * slot[1] through slot[num_factory_mmac] 3521 */ 3522 mutex_exit(nxgep->genlock); 3523 return (EINVAL); 3524 } 3525 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3526 mutex_exit(nxgep->genlock); 3527 return (EBUSY); 3528 } 3529 /* Verify the address to be reserved */ 3530 if (!mac_unicst_verify(nxgep->mach, 3531 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 3532 mutex_exit(nxgep->genlock); 3533 return (EINVAL); 3534 } 3535 if (err = nxge_altmac_set(nxgep, 3536 mmac_info->factory_mac_pool[slot], slot)) { 3537 mutex_exit(nxgep->genlock); 3538 return (err); 3539 } 3540 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 3541 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3542 mmac_info->naddrfree--; 3543 3544 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 3545 mutex_exit(nxgep->genlock); 3546 3547 /* Pass info back to the caller */ 3548 maddr->mma_slot = slot; 3549 maddr->mma_addrlen = ETHERADDRL; 3550 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3551 3552 return (0); 3553 } 3554 3555 /* 3556 * Remove the specified mac address and update the HW not to filter 3557 * the mac address anymore. 3558 */ 3559 static int 3560 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 3561 { 3562 p_nxge_t nxgep = arg; 3563 nxge_mmac_t *mmac_info; 3564 uint8_t addrn; 3565 uint8_t portn; 3566 int err = 0; 3567 nxge_status_t status; 3568 3569 mutex_enter(nxgep->genlock); 3570 3571 /* 3572 * Make sure that nxge is initialized, if _start() has 3573 * not been called. 3574 */ 3575 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3576 status = nxge_init(nxgep); 3577 if (status != NXGE_OK) { 3578 mutex_exit(nxgep->genlock); 3579 return (ENXIO); 3580 } 3581 } 3582 3583 mmac_info = &nxgep->nxge_mmac_info; 3584 if (slot < 1 || slot > mmac_info->num_mmac) { 3585 mutex_exit(nxgep->genlock); 3586 return (EINVAL); 3587 } 3588 3589 portn = nxgep->mac.portnum; 3590 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3591 addrn = (uint8_t)slot - 1; 3592 else 3593 addrn = (uint8_t)slot; 3594 3595 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3596 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 3597 == NPI_SUCCESS) { 3598 mmac_info->naddrfree++; 3599 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 3600 /* 3601 * Regardless if the MAC we just stopped filtering 3602 * is a user addr or a facory addr, we must set 3603 * the MMAC_VENDOR_ADDR flag if this slot has an 3604 * associated factory MAC to indicate that a factory 3605 * MAC is available. 3606 */ 3607 if (slot <= mmac_info->num_factory_mmac) { 3608 mmac_info->mac_pool[slot].flags 3609 |= MMAC_VENDOR_ADDR; 3610 } 3611 /* 3612 * Clear mac_pool[slot].addr so that kstat shows 0 3613 * alternate MAC address if the slot is not used. 3614 * (But nxge_m_mmac_get returns the factory MAC even 3615 * when the slot is not used!) 3616 */ 3617 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 3618 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3619 } else { 3620 err = EIO; 3621 } 3622 } else { 3623 err = EINVAL; 3624 } 3625 3626 mutex_exit(nxgep->genlock); 3627 return (err); 3628 } 3629 3630 3631 /* 3632 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 3633 */ 3634 static int 3635 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 3636 { 3637 p_nxge_t nxgep = arg; 3638 mac_addr_slot_t slot; 3639 nxge_mmac_t *mmac_info; 3640 int err = 0; 3641 nxge_status_t status; 3642 3643 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3644 maddr->mma_addrlen)) 3645 return (EINVAL); 3646 3647 slot = maddr->mma_slot; 3648 3649 mutex_enter(nxgep->genlock); 3650 3651 /* 3652 * Make sure that nxge is initialized, if _start() has 3653 * not been called. 3654 */ 3655 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3656 status = nxge_init(nxgep); 3657 if (status != NXGE_OK) { 3658 mutex_exit(nxgep->genlock); 3659 return (ENXIO); 3660 } 3661 } 3662 3663 mmac_info = &nxgep->nxge_mmac_info; 3664 if (slot < 1 || slot > mmac_info->num_mmac) { 3665 mutex_exit(nxgep->genlock); 3666 return (EINVAL); 3667 } 3668 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3669 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 3670 != 0) { 3671 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 3672 ETHERADDRL); 3673 /* 3674 * Assume that the MAC passed down from the caller 3675 * is not a factory MAC address (The user should 3676 * call mmac_remove followed by mmac_reserve if 3677 * he wants to use the factory MAC for this slot). 3678 */ 3679 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3680 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3681 } 3682 } else { 3683 err = EINVAL; 3684 } 3685 mutex_exit(nxgep->genlock); 3686 return (err); 3687 } 3688 3689 /* 3690 * nxge_m_mmac_get() - Get the MAC address and other information 3691 * related to the slot. mma_flags should be set to 0 in the call. 3692 * Note: although kstat shows MAC address as zero when a slot is 3693 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 3694 * to the caller as long as the slot is not using a user MAC address. 3695 * The following table shows the rules, 3696 * 3697 * USED VENDOR mma_addr 3698 * ------------------------------------------------------------ 3699 * (1) Slot uses a user MAC: yes no user MAC 3700 * (2) Slot uses a factory MAC: yes yes factory MAC 3701 * (3) Slot is not used but is 3702 * factory MAC capable: no yes factory MAC 3703 * (4) Slot is not used and is 3704 * not factory MAC capable: no no 0 3705 * ------------------------------------------------------------ 3706 */ 3707 static int 3708 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 3709 { 3710 nxge_t *nxgep = arg; 3711 mac_addr_slot_t slot; 3712 nxge_mmac_t *mmac_info; 3713 nxge_status_t status; 3714 3715 slot = maddr->mma_slot; 3716 3717 mutex_enter(nxgep->genlock); 3718 3719 /* 3720 * Make sure that nxge is initialized, if _start() has 3721 * not been called. 3722 */ 3723 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3724 status = nxge_init(nxgep); 3725 if (status != NXGE_OK) { 3726 mutex_exit(nxgep->genlock); 3727 return (ENXIO); 3728 } 3729 } 3730 3731 mmac_info = &nxgep->nxge_mmac_info; 3732 3733 if (slot < 1 || slot > mmac_info->num_mmac) { 3734 mutex_exit(nxgep->genlock); 3735 return (EINVAL); 3736 } 3737 maddr->mma_flags = 0; 3738 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 3739 maddr->mma_flags |= MMAC_SLOT_USED; 3740 3741 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 3742 maddr->mma_flags |= MMAC_VENDOR_ADDR; 3743 bcopy(mmac_info->factory_mac_pool[slot], 3744 maddr->mma_addr, ETHERADDRL); 3745 maddr->mma_addrlen = ETHERADDRL; 3746 } else { 3747 if (maddr->mma_flags & MMAC_SLOT_USED) { 3748 bcopy(mmac_info->mac_pool[slot].addr, 3749 maddr->mma_addr, ETHERADDRL); 3750 maddr->mma_addrlen = ETHERADDRL; 3751 } else { 3752 bzero(maddr->mma_addr, ETHERADDRL); 3753 maddr->mma_addrlen = 0; 3754 } 3755 } 3756 mutex_exit(nxgep->genlock); 3757 return (0); 3758 } 3759 3760 3761 static boolean_t 3762 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 3763 { 3764 nxge_t *nxgep = arg; 3765 uint32_t *txflags = cap_data; 3766 multiaddress_capab_t *mmacp = cap_data; 3767 3768 switch (cap) { 3769 case MAC_CAPAB_HCKSUM: 3770 *txflags = HCKSUM_INET_PARTIAL; 3771 break; 3772 case MAC_CAPAB_POLL: 3773 /* 3774 * There's nothing for us to fill in, simply returning 3775 * B_TRUE stating that we support polling is sufficient. 3776 */ 3777 break; 3778 3779 case MAC_CAPAB_MULTIADDRESS: 3780 mutex_enter(nxgep->genlock); 3781 3782 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 3783 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 3784 mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */ 3785 /* 3786 * maddr_handle is driver's private data, passed back to 3787 * entry point functions as arg. 3788 */ 3789 mmacp->maddr_handle = nxgep; 3790 mmacp->maddr_add = nxge_m_mmac_add; 3791 mmacp->maddr_remove = nxge_m_mmac_remove; 3792 mmacp->maddr_modify = nxge_m_mmac_modify; 3793 mmacp->maddr_get = nxge_m_mmac_get; 3794 mmacp->maddr_reserve = nxge_m_mmac_reserve; 3795 3796 mutex_exit(nxgep->genlock); 3797 break; 3798 default: 3799 return (B_FALSE); 3800 } 3801 return (B_TRUE); 3802 } 3803 3804 /* 3805 * Module loading and removing entry points. 3806 */ 3807 3808 static struct cb_ops nxge_cb_ops = { 3809 nodev, /* cb_open */ 3810 nodev, /* cb_close */ 3811 nodev, /* cb_strategy */ 3812 nodev, /* cb_print */ 3813 nodev, /* cb_dump */ 3814 nodev, /* cb_read */ 3815 nodev, /* cb_write */ 3816 nodev, /* cb_ioctl */ 3817 nodev, /* cb_devmap */ 3818 nodev, /* cb_mmap */ 3819 nodev, /* cb_segmap */ 3820 nochpoll, /* cb_chpoll */ 3821 ddi_prop_op, /* cb_prop_op */ 3822 NULL, 3823 D_MP, /* cb_flag */ 3824 CB_REV, /* rev */ 3825 nodev, /* int (*cb_aread)() */ 3826 nodev /* int (*cb_awrite)() */ 3827 }; 3828 3829 static struct dev_ops nxge_dev_ops = { 3830 DEVO_REV, /* devo_rev */ 3831 0, /* devo_refcnt */ 3832 nulldev, 3833 nulldev, /* devo_identify */ 3834 nulldev, /* devo_probe */ 3835 nxge_attach, /* devo_attach */ 3836 nxge_detach, /* devo_detach */ 3837 nodev, /* devo_reset */ 3838 &nxge_cb_ops, /* devo_cb_ops */ 3839 (struct bus_ops *)NULL, /* devo_bus_ops */ 3840 ddi_power /* devo_power */ 3841 }; 3842 3843 extern struct mod_ops mod_driverops; 3844 3845 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 3846 3847 /* 3848 * Module linkage information for the kernel. 3849 */ 3850 static struct modldrv nxge_modldrv = { 3851 &mod_driverops, 3852 NXGE_DESC_VER, 3853 &nxge_dev_ops 3854 }; 3855 3856 static struct modlinkage modlinkage = { 3857 MODREV_1, (void *) &nxge_modldrv, NULL 3858 }; 3859 3860 int 3861 _init(void) 3862 { 3863 int status; 3864 3865 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 3866 mac_init_ops(&nxge_dev_ops, "nxge"); 3867 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 3868 if (status != 0) { 3869 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3870 "failed to init device soft state")); 3871 goto _init_exit; 3872 } 3873 3874 status = mod_install(&modlinkage); 3875 if (status != 0) { 3876 ddi_soft_state_fini(&nxge_list); 3877 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 3878 goto _init_exit; 3879 } 3880 3881 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 3882 3883 _init_exit: 3884 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 3885 3886 return (status); 3887 } 3888 3889 int 3890 _fini(void) 3891 { 3892 int status; 3893 3894 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 3895 3896 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 3897 3898 if (nxge_mblks_pending) 3899 return (EBUSY); 3900 3901 status = mod_remove(&modlinkage); 3902 if (status != DDI_SUCCESS) { 3903 NXGE_DEBUG_MSG((NULL, MOD_CTL, 3904 "Module removal failed 0x%08x", 3905 status)); 3906 goto _fini_exit; 3907 } 3908 3909 mac_fini_ops(&nxge_dev_ops); 3910 3911 ddi_soft_state_fini(&nxge_list); 3912 3913 MUTEX_DESTROY(&nxge_common_lock); 3914 _fini_exit: 3915 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 3916 3917 return (status); 3918 } 3919 3920 int 3921 _info(struct modinfo *modinfop) 3922 { 3923 int status; 3924 3925 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 3926 status = mod_info(&modlinkage, modinfop); 3927 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 3928 3929 return (status); 3930 } 3931 3932 /*ARGSUSED*/ 3933 static nxge_status_t 3934 nxge_add_intrs(p_nxge_t nxgep) 3935 { 3936 3937 int intr_types; 3938 int type = 0; 3939 int ddi_status = DDI_SUCCESS; 3940 nxge_status_t status = NXGE_OK; 3941 3942 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 3943 3944 nxgep->nxge_intr_type.intr_registered = B_FALSE; 3945 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 3946 nxgep->nxge_intr_type.msi_intx_cnt = 0; 3947 nxgep->nxge_intr_type.intr_added = 0; 3948 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 3949 nxgep->nxge_intr_type.intr_type = 0; 3950 3951 if (nxgep->niu_type == N2_NIU) { 3952 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3953 } else if (nxge_msi_enable) { 3954 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3955 } 3956 3957 /* Get the supported interrupt types */ 3958 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 3959 != DDI_SUCCESS) { 3960 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 3961 "ddi_intr_get_supported_types failed: status 0x%08x", 3962 ddi_status)); 3963 return (NXGE_ERROR | NXGE_DDI_FAILED); 3964 } 3965 nxgep->nxge_intr_type.intr_types = intr_types; 3966 3967 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3968 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3969 3970 /* 3971 * Solaris MSIX is not supported yet. use MSI for now. 3972 * nxge_msi_enable (1): 3973 * 1 - MSI 2 - MSI-X others - FIXED 3974 */ 3975 switch (nxge_msi_enable) { 3976 default: 3977 type = DDI_INTR_TYPE_FIXED; 3978 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3979 "use fixed (intx emulation) type %08x", 3980 type)); 3981 break; 3982 3983 case 2: 3984 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3985 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3986 if (intr_types & DDI_INTR_TYPE_MSIX) { 3987 type = DDI_INTR_TYPE_MSIX; 3988 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3989 "ddi_intr_get_supported_types: MSIX 0x%08x", 3990 type)); 3991 } else if (intr_types & DDI_INTR_TYPE_MSI) { 3992 type = DDI_INTR_TYPE_MSI; 3993 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3994 "ddi_intr_get_supported_types: MSI 0x%08x", 3995 type)); 3996 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3997 type = DDI_INTR_TYPE_FIXED; 3998 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3999 "ddi_intr_get_supported_types: MSXED0x%08x", 4000 type)); 4001 } 4002 break; 4003 4004 case 1: 4005 if (intr_types & DDI_INTR_TYPE_MSI) { 4006 type = DDI_INTR_TYPE_MSI; 4007 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 4008 "ddi_intr_get_supported_types: MSI 0x%08x", 4009 type)); 4010 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 4011 type = DDI_INTR_TYPE_MSIX; 4012 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4013 "ddi_intr_get_supported_types: MSIX 0x%08x", 4014 type)); 4015 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 4016 type = DDI_INTR_TYPE_FIXED; 4017 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4018 "ddi_intr_get_supported_types: MSXED0x%08x", 4019 type)); 4020 } 4021 } 4022 4023 nxgep->nxge_intr_type.intr_type = type; 4024 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 4025 type == DDI_INTR_TYPE_FIXED) && 4026 nxgep->nxge_intr_type.niu_msi_enable) { 4027 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 4028 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4029 " nxge_add_intrs: " 4030 " nxge_add_intrs_adv failed: status 0x%08x", 4031 status)); 4032 return (status); 4033 } else { 4034 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4035 "interrupts registered : type %d", type)); 4036 nxgep->nxge_intr_type.intr_registered = B_TRUE; 4037 4038 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4039 "\nAdded advanced nxge add_intr_adv " 4040 "intr type 0x%x\n", type)); 4041 4042 return (status); 4043 } 4044 } 4045 4046 if (!nxgep->nxge_intr_type.intr_registered) { 4047 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 4048 "failed to register interrupts")); 4049 return (NXGE_ERROR | NXGE_DDI_FAILED); 4050 } 4051 4052 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 4053 return (status); 4054 } 4055 4056 /*ARGSUSED*/ 4057 static nxge_status_t 4058 nxge_add_soft_intrs(p_nxge_t nxgep) 4059 { 4060 4061 int ddi_status = DDI_SUCCESS; 4062 nxge_status_t status = NXGE_OK; 4063 4064 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 4065 4066 nxgep->resched_id = NULL; 4067 nxgep->resched_running = B_FALSE; 4068 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 4069 &nxgep->resched_id, 4070 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 4071 if (ddi_status != DDI_SUCCESS) { 4072 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 4073 "ddi_add_softintrs failed: status 0x%08x", 4074 ddi_status)); 4075 return (NXGE_ERROR | NXGE_DDI_FAILED); 4076 } 4077 4078 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 4079 4080 return (status); 4081 } 4082 4083 static nxge_status_t 4084 nxge_add_intrs_adv(p_nxge_t nxgep) 4085 { 4086 int intr_type; 4087 p_nxge_intr_t intrp; 4088 4089 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 4090 4091 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4092 intr_type = intrp->intr_type; 4093 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 4094 intr_type)); 4095 4096 switch (intr_type) { 4097 case DDI_INTR_TYPE_MSI: /* 0x2 */ 4098 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 4099 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 4100 4101 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 4102 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 4103 4104 default: 4105 return (NXGE_ERROR); 4106 } 4107 } 4108 4109 4110 /*ARGSUSED*/ 4111 static nxge_status_t 4112 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 4113 { 4114 dev_info_t *dip = nxgep->dip; 4115 p_nxge_ldg_t ldgp; 4116 p_nxge_intr_t intrp; 4117 uint_t *inthandler; 4118 void *arg1, *arg2; 4119 int behavior; 4120 int nintrs, navail, nrequest; 4121 int nactual, nrequired; 4122 int inum = 0; 4123 int x, y; 4124 int ddi_status = DDI_SUCCESS; 4125 nxge_status_t status = NXGE_OK; 4126 4127 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 4128 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4129 intrp->start_inum = 0; 4130 4131 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4132 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4133 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4134 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4135 "nintrs: %d", ddi_status, nintrs)); 4136 return (NXGE_ERROR | NXGE_DDI_FAILED); 4137 } 4138 4139 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4140 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4141 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4142 "ddi_intr_get_navail() failed, status: 0x%x%, " 4143 "nintrs: %d", ddi_status, navail)); 4144 return (NXGE_ERROR | NXGE_DDI_FAILED); 4145 } 4146 4147 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4148 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 4149 nintrs, navail)); 4150 4151 /* PSARC/2007/453 MSI-X interrupt limit override */ 4152 if (int_type == DDI_INTR_TYPE_MSIX) { 4153 nrequest = nxge_create_msi_property(nxgep); 4154 if (nrequest < navail) { 4155 navail = nrequest; 4156 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4157 "nxge_add_intrs_adv_type: nintrs %d " 4158 "navail %d (nrequest %d)", 4159 nintrs, navail, nrequest)); 4160 } 4161 } 4162 4163 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 4164 /* MSI must be power of 2 */ 4165 if ((navail & 16) == 16) { 4166 navail = 16; 4167 } else if ((navail & 8) == 8) { 4168 navail = 8; 4169 } else if ((navail & 4) == 4) { 4170 navail = 4; 4171 } else if ((navail & 2) == 2) { 4172 navail = 2; 4173 } else { 4174 navail = 1; 4175 } 4176 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4177 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 4178 "navail %d", nintrs, navail)); 4179 } 4180 4181 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4182 DDI_INTR_ALLOC_NORMAL); 4183 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4184 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4185 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4186 navail, &nactual, behavior); 4187 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4188 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4189 " ddi_intr_alloc() failed: %d", 4190 ddi_status)); 4191 kmem_free(intrp->htable, intrp->intr_size); 4192 return (NXGE_ERROR | NXGE_DDI_FAILED); 4193 } 4194 4195 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4196 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4197 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4198 " ddi_intr_get_pri() failed: %d", 4199 ddi_status)); 4200 /* Free already allocated interrupts */ 4201 for (y = 0; y < nactual; y++) { 4202 (void) ddi_intr_free(intrp->htable[y]); 4203 } 4204 4205 kmem_free(intrp->htable, intrp->intr_size); 4206 return (NXGE_ERROR | NXGE_DDI_FAILED); 4207 } 4208 4209 nrequired = 0; 4210 switch (nxgep->niu_type) { 4211 default: 4212 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4213 break; 4214 4215 case N2_NIU: 4216 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4217 break; 4218 } 4219 4220 if (status != NXGE_OK) { 4221 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4222 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 4223 "failed: 0x%x", status)); 4224 /* Free already allocated interrupts */ 4225 for (y = 0; y < nactual; y++) { 4226 (void) ddi_intr_free(intrp->htable[y]); 4227 } 4228 4229 kmem_free(intrp->htable, intrp->intr_size); 4230 return (status); 4231 } 4232 4233 ldgp = nxgep->ldgvp->ldgp; 4234 for (x = 0; x < nrequired; x++, ldgp++) { 4235 ldgp->vector = (uint8_t)x; 4236 ldgp->intdata = SID_DATA(ldgp->func, x); 4237 arg1 = ldgp->ldvp; 4238 arg2 = nxgep; 4239 if (ldgp->nldvs == 1) { 4240 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4241 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4242 "nxge_add_intrs_adv_type: " 4243 "arg1 0x%x arg2 0x%x: " 4244 "1-1 int handler (entry %d intdata 0x%x)\n", 4245 arg1, arg2, 4246 x, ldgp->intdata)); 4247 } else if (ldgp->nldvs > 1) { 4248 inthandler = (uint_t *)ldgp->sys_intr_handler; 4249 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4250 "nxge_add_intrs_adv_type: " 4251 "arg1 0x%x arg2 0x%x: " 4252 "nldevs %d int handler " 4253 "(entry %d intdata 0x%x)\n", 4254 arg1, arg2, 4255 ldgp->nldvs, x, ldgp->intdata)); 4256 } 4257 4258 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4259 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 4260 "htable 0x%llx", x, intrp->htable[x])); 4261 4262 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4263 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4264 != DDI_SUCCESS) { 4265 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4266 "==> nxge_add_intrs_adv_type: failed #%d " 4267 "status 0x%x", x, ddi_status)); 4268 for (y = 0; y < intrp->intr_added; y++) { 4269 (void) ddi_intr_remove_handler( 4270 intrp->htable[y]); 4271 } 4272 /* Free already allocated intr */ 4273 for (y = 0; y < nactual; y++) { 4274 (void) ddi_intr_free(intrp->htable[y]); 4275 } 4276 kmem_free(intrp->htable, intrp->intr_size); 4277 4278 (void) nxge_ldgv_uninit(nxgep); 4279 4280 return (NXGE_ERROR | NXGE_DDI_FAILED); 4281 } 4282 intrp->intr_added++; 4283 } 4284 4285 intrp->msi_intx_cnt = nactual; 4286 4287 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4288 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 4289 navail, nactual, 4290 intrp->msi_intx_cnt, 4291 intrp->intr_added)); 4292 4293 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4294 4295 (void) nxge_intr_ldgv_init(nxgep); 4296 4297 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 4298 4299 return (status); 4300 } 4301 4302 /*ARGSUSED*/ 4303 static nxge_status_t 4304 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 4305 { 4306 dev_info_t *dip = nxgep->dip; 4307 p_nxge_ldg_t ldgp; 4308 p_nxge_intr_t intrp; 4309 uint_t *inthandler; 4310 void *arg1, *arg2; 4311 int behavior; 4312 int nintrs, navail; 4313 int nactual, nrequired; 4314 int inum = 0; 4315 int x, y; 4316 int ddi_status = DDI_SUCCESS; 4317 nxge_status_t status = NXGE_OK; 4318 4319 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 4320 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4321 intrp->start_inum = 0; 4322 4323 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4324 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4325 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4326 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4327 "nintrs: %d", status, nintrs)); 4328 return (NXGE_ERROR | NXGE_DDI_FAILED); 4329 } 4330 4331 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4332 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4333 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4334 "ddi_intr_get_navail() failed, status: 0x%x%, " 4335 "nintrs: %d", ddi_status, navail)); 4336 return (NXGE_ERROR | NXGE_DDI_FAILED); 4337 } 4338 4339 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4340 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 4341 nintrs, navail)); 4342 4343 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4344 DDI_INTR_ALLOC_NORMAL); 4345 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4346 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4347 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4348 navail, &nactual, behavior); 4349 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4350 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4351 " ddi_intr_alloc() failed: %d", 4352 ddi_status)); 4353 kmem_free(intrp->htable, intrp->intr_size); 4354 return (NXGE_ERROR | NXGE_DDI_FAILED); 4355 } 4356 4357 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4358 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4359 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4360 " ddi_intr_get_pri() failed: %d", 4361 ddi_status)); 4362 /* Free already allocated interrupts */ 4363 for (y = 0; y < nactual; y++) { 4364 (void) ddi_intr_free(intrp->htable[y]); 4365 } 4366 4367 kmem_free(intrp->htable, intrp->intr_size); 4368 return (NXGE_ERROR | NXGE_DDI_FAILED); 4369 } 4370 4371 nrequired = 0; 4372 switch (nxgep->niu_type) { 4373 default: 4374 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4375 break; 4376 4377 case N2_NIU: 4378 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4379 break; 4380 } 4381 4382 if (status != NXGE_OK) { 4383 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4384 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 4385 "failed: 0x%x", status)); 4386 /* Free already allocated interrupts */ 4387 for (y = 0; y < nactual; y++) { 4388 (void) ddi_intr_free(intrp->htable[y]); 4389 } 4390 4391 kmem_free(intrp->htable, intrp->intr_size); 4392 return (status); 4393 } 4394 4395 ldgp = nxgep->ldgvp->ldgp; 4396 for (x = 0; x < nrequired; x++, ldgp++) { 4397 ldgp->vector = (uint8_t)x; 4398 if (nxgep->niu_type != N2_NIU) { 4399 ldgp->intdata = SID_DATA(ldgp->func, x); 4400 } 4401 4402 arg1 = ldgp->ldvp; 4403 arg2 = nxgep; 4404 if (ldgp->nldvs == 1) { 4405 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4406 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4407 "nxge_add_intrs_adv_type_fix: " 4408 "1-1 int handler(%d) ldg %d ldv %d " 4409 "arg1 $%p arg2 $%p\n", 4410 x, ldgp->ldg, ldgp->ldvp->ldv, 4411 arg1, arg2)); 4412 } else if (ldgp->nldvs > 1) { 4413 inthandler = (uint_t *)ldgp->sys_intr_handler; 4414 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4415 "nxge_add_intrs_adv_type_fix: " 4416 "shared ldv %d int handler(%d) ldv %d ldg %d" 4417 "arg1 0x%016llx arg2 0x%016llx\n", 4418 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 4419 arg1, arg2)); 4420 } 4421 4422 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4423 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4424 != DDI_SUCCESS) { 4425 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4426 "==> nxge_add_intrs_adv_type_fix: failed #%d " 4427 "status 0x%x", x, ddi_status)); 4428 for (y = 0; y < intrp->intr_added; y++) { 4429 (void) ddi_intr_remove_handler( 4430 intrp->htable[y]); 4431 } 4432 for (y = 0; y < nactual; y++) { 4433 (void) ddi_intr_free(intrp->htable[y]); 4434 } 4435 /* Free already allocated intr */ 4436 kmem_free(intrp->htable, intrp->intr_size); 4437 4438 (void) nxge_ldgv_uninit(nxgep); 4439 4440 return (NXGE_ERROR | NXGE_DDI_FAILED); 4441 } 4442 intrp->intr_added++; 4443 } 4444 4445 intrp->msi_intx_cnt = nactual; 4446 4447 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4448 4449 status = nxge_intr_ldgv_init(nxgep); 4450 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 4451 4452 return (status); 4453 } 4454 4455 static void 4456 nxge_remove_intrs(p_nxge_t nxgep) 4457 { 4458 int i, inum; 4459 p_nxge_intr_t intrp; 4460 4461 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 4462 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4463 if (!intrp->intr_registered) { 4464 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4465 "<== nxge_remove_intrs: interrupts not registered")); 4466 return; 4467 } 4468 4469 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 4470 4471 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4472 (void) ddi_intr_block_disable(intrp->htable, 4473 intrp->intr_added); 4474 } else { 4475 for (i = 0; i < intrp->intr_added; i++) { 4476 (void) ddi_intr_disable(intrp->htable[i]); 4477 } 4478 } 4479 4480 for (inum = 0; inum < intrp->intr_added; inum++) { 4481 if (intrp->htable[inum]) { 4482 (void) ddi_intr_remove_handler(intrp->htable[inum]); 4483 } 4484 } 4485 4486 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 4487 if (intrp->htable[inum]) { 4488 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4489 "nxge_remove_intrs: ddi_intr_free inum %d " 4490 "msi_intx_cnt %d intr_added %d", 4491 inum, 4492 intrp->msi_intx_cnt, 4493 intrp->intr_added)); 4494 4495 (void) ddi_intr_free(intrp->htable[inum]); 4496 } 4497 } 4498 4499 kmem_free(intrp->htable, intrp->intr_size); 4500 intrp->intr_registered = B_FALSE; 4501 intrp->intr_enabled = B_FALSE; 4502 intrp->msi_intx_cnt = 0; 4503 intrp->intr_added = 0; 4504 4505 (void) nxge_ldgv_uninit(nxgep); 4506 4507 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 4508 "#msix-request"); 4509 4510 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 4511 } 4512 4513 /*ARGSUSED*/ 4514 static void 4515 nxge_remove_soft_intrs(p_nxge_t nxgep) 4516 { 4517 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 4518 if (nxgep->resched_id) { 4519 ddi_remove_softintr(nxgep->resched_id); 4520 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4521 "==> nxge_remove_soft_intrs: removed")); 4522 nxgep->resched_id = NULL; 4523 } 4524 4525 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 4526 } 4527 4528 /*ARGSUSED*/ 4529 static void 4530 nxge_intrs_enable(p_nxge_t nxgep) 4531 { 4532 p_nxge_intr_t intrp; 4533 int i; 4534 int status; 4535 4536 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 4537 4538 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4539 4540 if (!intrp->intr_registered) { 4541 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 4542 "interrupts are not registered")); 4543 return; 4544 } 4545 4546 if (intrp->intr_enabled) { 4547 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4548 "<== nxge_intrs_enable: already enabled")); 4549 return; 4550 } 4551 4552 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4553 status = ddi_intr_block_enable(intrp->htable, 4554 intrp->intr_added); 4555 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4556 "block enable - status 0x%x total inums #%d\n", 4557 status, intrp->intr_added)); 4558 } else { 4559 for (i = 0; i < intrp->intr_added; i++) { 4560 status = ddi_intr_enable(intrp->htable[i]); 4561 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4562 "ddi_intr_enable:enable - status 0x%x " 4563 "total inums %d enable inum #%d\n", 4564 status, intrp->intr_added, i)); 4565 if (status == DDI_SUCCESS) { 4566 intrp->intr_enabled = B_TRUE; 4567 } 4568 } 4569 } 4570 4571 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 4572 } 4573 4574 /*ARGSUSED*/ 4575 static void 4576 nxge_intrs_disable(p_nxge_t nxgep) 4577 { 4578 p_nxge_intr_t intrp; 4579 int i; 4580 4581 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 4582 4583 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4584 4585 if (!intrp->intr_registered) { 4586 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 4587 "interrupts are not registered")); 4588 return; 4589 } 4590 4591 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4592 (void) ddi_intr_block_disable(intrp->htable, 4593 intrp->intr_added); 4594 } else { 4595 for (i = 0; i < intrp->intr_added; i++) { 4596 (void) ddi_intr_disable(intrp->htable[i]); 4597 } 4598 } 4599 4600 intrp->intr_enabled = B_FALSE; 4601 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 4602 } 4603 4604 static nxge_status_t 4605 nxge_mac_register(p_nxge_t nxgep) 4606 { 4607 mac_register_t *macp; 4608 int status; 4609 4610 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 4611 4612 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 4613 return (NXGE_ERROR); 4614 4615 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 4616 macp->m_driver = nxgep; 4617 macp->m_dip = nxgep->dip; 4618 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 4619 macp->m_callbacks = &nxge_m_callbacks; 4620 macp->m_min_sdu = 0; 4621 macp->m_max_sdu = nxgep->mac.maxframesize - 4622 sizeof (struct ether_header) - ETHERFCSL - 4; 4623 4624 status = mac_register(macp, &nxgep->mach); 4625 mac_free(macp); 4626 4627 if (status != 0) { 4628 cmn_err(CE_WARN, 4629 "!nxge_mac_register failed (status %d instance %d)", 4630 status, nxgep->instance); 4631 return (NXGE_ERROR); 4632 } 4633 4634 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 4635 "(instance %d)", nxgep->instance)); 4636 4637 return (NXGE_OK); 4638 } 4639 4640 void 4641 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 4642 { 4643 ssize_t size; 4644 mblk_t *nmp; 4645 uint8_t blk_id; 4646 uint8_t chan; 4647 uint32_t err_id; 4648 err_inject_t *eip; 4649 4650 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 4651 4652 size = 1024; 4653 nmp = mp->b_cont; 4654 eip = (err_inject_t *)nmp->b_rptr; 4655 blk_id = eip->blk_id; 4656 err_id = eip->err_id; 4657 chan = eip->chan; 4658 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 4659 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 4660 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 4661 switch (blk_id) { 4662 case MAC_BLK_ID: 4663 break; 4664 case TXMAC_BLK_ID: 4665 break; 4666 case RXMAC_BLK_ID: 4667 break; 4668 case MIF_BLK_ID: 4669 break; 4670 case IPP_BLK_ID: 4671 nxge_ipp_inject_err(nxgep, err_id); 4672 break; 4673 case TXC_BLK_ID: 4674 nxge_txc_inject_err(nxgep, err_id); 4675 break; 4676 case TXDMA_BLK_ID: 4677 nxge_txdma_inject_err(nxgep, err_id, chan); 4678 break; 4679 case RXDMA_BLK_ID: 4680 nxge_rxdma_inject_err(nxgep, err_id, chan); 4681 break; 4682 case ZCP_BLK_ID: 4683 nxge_zcp_inject_err(nxgep, err_id); 4684 break; 4685 case ESPC_BLK_ID: 4686 break; 4687 case FFLP_BLK_ID: 4688 break; 4689 case PHY_BLK_ID: 4690 break; 4691 case ETHER_SERDES_BLK_ID: 4692 break; 4693 case PCIE_SERDES_BLK_ID: 4694 break; 4695 case VIR_BLK_ID: 4696 break; 4697 } 4698 4699 nmp->b_wptr = nmp->b_rptr + size; 4700 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 4701 4702 miocack(wq, mp, (int)size, 0); 4703 } 4704 4705 static int 4706 nxge_init_common_dev(p_nxge_t nxgep) 4707 { 4708 p_nxge_hw_list_t hw_p; 4709 dev_info_t *p_dip; 4710 4711 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 4712 4713 p_dip = nxgep->p_dip; 4714 MUTEX_ENTER(&nxge_common_lock); 4715 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4716 "==> nxge_init_common_dev:func # %d", 4717 nxgep->function_num)); 4718 /* 4719 * Loop through existing per neptune hardware list. 4720 */ 4721 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4722 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4723 "==> nxge_init_common_device:func # %d " 4724 "hw_p $%p parent dip $%p", 4725 nxgep->function_num, 4726 hw_p, 4727 p_dip)); 4728 if (hw_p->parent_devp == p_dip) { 4729 nxgep->nxge_hw_p = hw_p; 4730 hw_p->ndevs++; 4731 hw_p->nxge_p[nxgep->function_num] = nxgep; 4732 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4733 "==> nxge_init_common_device:func # %d " 4734 "hw_p $%p parent dip $%p " 4735 "ndevs %d (found)", 4736 nxgep->function_num, 4737 hw_p, 4738 p_dip, 4739 hw_p->ndevs)); 4740 break; 4741 } 4742 } 4743 4744 if (hw_p == NULL) { 4745 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4746 "==> nxge_init_common_device:func # %d " 4747 "parent dip $%p (new)", 4748 nxgep->function_num, 4749 p_dip)); 4750 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 4751 hw_p->parent_devp = p_dip; 4752 hw_p->magic = NXGE_NEPTUNE_MAGIC; 4753 nxgep->nxge_hw_p = hw_p; 4754 hw_p->ndevs++; 4755 hw_p->nxge_p[nxgep->function_num] = nxgep; 4756 hw_p->next = nxge_hw_list; 4757 if (nxgep->niu_type == N2_NIU) { 4758 hw_p->niu_type = N2_NIU; 4759 hw_p->platform_type = P_NEPTUNE_NIU; 4760 } else { 4761 hw_p->niu_type = NIU_TYPE_NONE; 4762 hw_p->platform_type = P_NEPTUNE_NONE; 4763 } 4764 4765 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 4766 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 4767 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 4768 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 4769 MUTEX_INIT(&hw_p->nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 4770 4771 nxge_hw_list = hw_p; 4772 4773 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 4774 } 4775 4776 MUTEX_EXIT(&nxge_common_lock); 4777 4778 nxgep->platform_type = hw_p->platform_type; 4779 if (nxgep->niu_type != N2_NIU) { 4780 nxgep->niu_type = hw_p->niu_type; 4781 } 4782 4783 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4784 "==> nxge_init_common_device (nxge_hw_list) $%p", 4785 nxge_hw_list)); 4786 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 4787 4788 return (NXGE_OK); 4789 } 4790 4791 static void 4792 nxge_uninit_common_dev(p_nxge_t nxgep) 4793 { 4794 p_nxge_hw_list_t hw_p, h_hw_p; 4795 dev_info_t *p_dip; 4796 4797 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 4798 if (nxgep->nxge_hw_p == NULL) { 4799 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4800 "<== nxge_uninit_common_device (no common)")); 4801 return; 4802 } 4803 4804 MUTEX_ENTER(&nxge_common_lock); 4805 h_hw_p = nxge_hw_list; 4806 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4807 p_dip = hw_p->parent_devp; 4808 if (nxgep->nxge_hw_p == hw_p && 4809 p_dip == nxgep->p_dip && 4810 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 4811 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 4812 4813 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4814 "==> nxge_uninit_common_device:func # %d " 4815 "hw_p $%p parent dip $%p " 4816 "ndevs %d (found)", 4817 nxgep->function_num, 4818 hw_p, 4819 p_dip, 4820 hw_p->ndevs)); 4821 4822 nxgep->nxge_hw_p = NULL; 4823 if (hw_p->ndevs) { 4824 hw_p->ndevs--; 4825 } 4826 hw_p->nxge_p[nxgep->function_num] = NULL; 4827 if (!hw_p->ndevs) { 4828 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 4829 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 4830 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 4831 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 4832 MUTEX_DESTROY(&hw_p->nxge_mii_lock); 4833 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4834 "==> nxge_uninit_common_device: " 4835 "func # %d " 4836 "hw_p $%p parent dip $%p " 4837 "ndevs %d (last)", 4838 nxgep->function_num, 4839 hw_p, 4840 p_dip, 4841 hw_p->ndevs)); 4842 4843 if (hw_p == nxge_hw_list) { 4844 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4845 "==> nxge_uninit_common_device:" 4846 "remove head func # %d " 4847 "hw_p $%p parent dip $%p " 4848 "ndevs %d (head)", 4849 nxgep->function_num, 4850 hw_p, 4851 p_dip, 4852 hw_p->ndevs)); 4853 nxge_hw_list = hw_p->next; 4854 } else { 4855 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4856 "==> nxge_uninit_common_device:" 4857 "remove middle func # %d " 4858 "hw_p $%p parent dip $%p " 4859 "ndevs %d (middle)", 4860 nxgep->function_num, 4861 hw_p, 4862 p_dip, 4863 hw_p->ndevs)); 4864 h_hw_p->next = hw_p->next; 4865 } 4866 4867 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 4868 } 4869 break; 4870 } else { 4871 h_hw_p = hw_p; 4872 } 4873 } 4874 4875 MUTEX_EXIT(&nxge_common_lock); 4876 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4877 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 4878 nxge_hw_list)); 4879 4880 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 4881 } 4882 4883 /* 4884 * Determines the number of ports from the niu_type or the platform type. 4885 * Returns the number of ports, or returns zero on failure. 4886 */ 4887 4888 int 4889 nxge_get_nports(p_nxge_t nxgep) 4890 { 4891 int nports = 0; 4892 4893 switch (nxgep->niu_type) { 4894 case N2_NIU: 4895 case NEPTUNE_2_10GF: 4896 nports = 2; 4897 break; 4898 case NEPTUNE_4_1GC: 4899 case NEPTUNE_2_10GF_2_1GC: 4900 case NEPTUNE_1_10GF_3_1GC: 4901 case NEPTUNE_1_1GC_1_10GF_2_1GC: 4902 nports = 4; 4903 break; 4904 default: 4905 switch (nxgep->platform_type) { 4906 case P_NEPTUNE_NIU: 4907 case P_NEPTUNE_ATLAS_2PORT: 4908 nports = 2; 4909 break; 4910 case P_NEPTUNE_ATLAS_4PORT: 4911 case P_NEPTUNE_MARAMBA_P0: 4912 case P_NEPTUNE_MARAMBA_P1: 4913 case P_NEPTUNE_ALONSO: 4914 nports = 4; 4915 break; 4916 default: 4917 break; 4918 } 4919 break; 4920 } 4921 4922 return (nports); 4923 } 4924 4925 /* 4926 * The following two functions are to support 4927 * PSARC/2007/453 MSI-X interrupt limit override. 4928 */ 4929 static int 4930 nxge_create_msi_property(p_nxge_t nxgep) 4931 { 4932 int nmsi; 4933 extern int ncpus; 4934 4935 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 4936 4937 switch (nxgep->mac.portmode) { 4938 case PORT_10G_COPPER: 4939 case PORT_10G_FIBER: 4940 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 4941 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 4942 /* 4943 * The maximum MSI-X requested will be 8. 4944 * If the # of CPUs is less than 8, we will reqeust 4945 * # MSI-X based on the # of CPUs. 4946 */ 4947 if (ncpus >= NXGE_MSIX_REQUEST_10G) { 4948 nmsi = NXGE_MSIX_REQUEST_10G; 4949 } else { 4950 nmsi = ncpus; 4951 } 4952 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4953 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 4954 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 4955 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 4956 break; 4957 4958 default: 4959 nmsi = NXGE_MSIX_REQUEST_1G; 4960 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4961 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 4962 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 4963 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 4964 break; 4965 } 4966 4967 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 4968 return (nmsi); 4969 } 4970