1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 30 */ 31 #include <sys/nxge/nxge_impl.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * until MSIX supported, assume msi, use 2 for msix 39 */ 40 uint32_t nxge_msi_enable = 1; /* debug: turn msi off */ 41 42 /* 43 * Globals: tunable parameters (/etc/system or adb) 44 * 45 */ 46 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 47 uint32_t nxge_rbr_spare_size = 0; 48 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 49 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 50 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 51 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 52 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 53 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 54 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 55 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 56 boolean_t nxge_jumbo_enable = B_FALSE; 57 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 58 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 59 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 60 61 /* 62 * Debugging flags: 63 * nxge_no_tx_lb : transmit load balancing 64 * nxge_tx_lb_policy: 0 - TCP port (default) 65 * 3 - DEST MAC 66 */ 67 uint32_t nxge_no_tx_lb = 0; 68 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 69 70 /* 71 * Add tunable to reduce the amount of time spent in the 72 * ISR doing Rx Processing. 73 */ 74 uint32_t nxge_max_rx_pkts = 1024; 75 76 /* 77 * Tunables to manage the receive buffer blocks. 78 * 79 * nxge_rx_threshold_hi: copy all buffers. 80 * nxge_rx_bcopy_size_type: receive buffer block size type. 81 * nxge_rx_threshold_lo: copy only up to tunable block size type. 82 */ 83 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 84 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 85 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 86 87 rtrace_t npi_rtracebuf; 88 89 #if defined(sun4v) 90 /* 91 * Hypervisor N2/NIU services information. 92 */ 93 static hsvc_info_t niu_hsvc = { 94 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 95 NIU_MINOR_VER, "nxge" 96 }; 97 #endif 98 99 /* 100 * Function Prototypes 101 */ 102 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 103 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 104 static void nxge_unattach(p_nxge_t); 105 106 #if NXGE_PROPERTY 107 static void nxge_remove_hard_properties(p_nxge_t); 108 #endif 109 110 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 111 112 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 113 static void nxge_destroy_mutexes(p_nxge_t); 114 115 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 116 static void nxge_unmap_regs(p_nxge_t nxgep); 117 #ifdef NXGE_DEBUG 118 static void nxge_test_map_regs(p_nxge_t nxgep); 119 #endif 120 121 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 122 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 123 static void nxge_remove_intrs(p_nxge_t nxgep); 124 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 125 126 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 127 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 128 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 129 static void nxge_intrs_enable(p_nxge_t nxgep); 130 static void nxge_intrs_disable(p_nxge_t nxgep); 131 132 static void nxge_suspend(p_nxge_t); 133 static nxge_status_t nxge_resume(p_nxge_t); 134 135 static nxge_status_t nxge_setup_dev(p_nxge_t); 136 static void nxge_destroy_dev(p_nxge_t); 137 138 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 139 static void nxge_free_mem_pool(p_nxge_t); 140 141 static nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 142 static void nxge_free_rx_mem_pool(p_nxge_t); 143 144 static nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 145 static void nxge_free_tx_mem_pool(p_nxge_t); 146 147 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 148 struct ddi_dma_attr *, 149 size_t, ddi_device_acc_attr_t *, uint_t, 150 p_nxge_dma_common_t); 151 152 static void nxge_dma_mem_free(p_nxge_dma_common_t); 153 154 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 155 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 156 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 157 158 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 159 p_nxge_dma_common_t *, size_t); 160 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 161 162 static nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 163 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 164 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 165 166 static nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 167 p_nxge_dma_common_t *, 168 size_t); 169 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 170 171 static int nxge_init_common_dev(p_nxge_t); 172 static void nxge_uninit_common_dev(p_nxge_t); 173 174 /* 175 * The next declarations are for the GLDv3 interface. 176 */ 177 static int nxge_m_start(void *); 178 static void nxge_m_stop(void *); 179 static int nxge_m_unicst(void *, const uint8_t *); 180 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 181 static int nxge_m_promisc(void *, boolean_t); 182 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 183 static void nxge_m_resources(void *); 184 mblk_t *nxge_m_tx(void *arg, mblk_t *); 185 static nxge_status_t nxge_mac_register(p_nxge_t); 186 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 187 mac_addr_slot_t slot); 188 static void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 189 boolean_t factory); 190 static int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 191 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 192 static int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 193 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 194 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 195 196 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 197 #define MAX_DUMP_SZ 256 198 199 #define NXGE_M_CALLBACK_FLAGS (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB) 200 201 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 202 static mac_callbacks_t nxge_m_callbacks = { 203 NXGE_M_CALLBACK_FLAGS, 204 nxge_m_stat, 205 nxge_m_start, 206 nxge_m_stop, 207 nxge_m_promisc, 208 nxge_m_multicst, 209 nxge_m_unicst, 210 nxge_m_tx, 211 nxge_m_resources, 212 nxge_m_ioctl, 213 nxge_m_getcapab 214 }; 215 216 void 217 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 218 219 /* 220 * These global variables control the message 221 * output. 222 */ 223 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 224 uint64_t nxge_debug_level = 0; 225 226 /* 227 * This list contains the instance structures for the Neptune 228 * devices present in the system. The lock exists to guarantee 229 * mutually exclusive access to the list. 230 */ 231 void *nxge_list = NULL; 232 233 void *nxge_hw_list = NULL; 234 nxge_os_mutex_t nxge_common_lock; 235 236 nxge_os_mutex_t nxge_mii_lock; 237 static uint32_t nxge_mii_lock_init = 0; 238 nxge_os_mutex_t nxge_mdio_lock; 239 static uint32_t nxge_mdio_lock_init = 0; 240 241 extern uint64_t npi_debug_level; 242 243 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 244 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 245 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 246 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 247 extern void nxge_fm_init(p_nxge_t, 248 ddi_device_acc_attr_t *, 249 ddi_device_acc_attr_t *, 250 ddi_dma_attr_t *); 251 extern void nxge_fm_fini(p_nxge_t); 252 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 253 254 /* 255 * Count used to maintain the number of buffers being used 256 * by Neptune instances and loaned up to the upper layers. 257 */ 258 uint32_t nxge_mblks_pending = 0; 259 260 /* 261 * Device register access attributes for PIO. 262 */ 263 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 264 DDI_DEVICE_ATTR_V0, 265 DDI_STRUCTURE_LE_ACC, 266 DDI_STRICTORDER_ACC, 267 }; 268 269 /* 270 * Device descriptor access attributes for DMA. 271 */ 272 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 273 DDI_DEVICE_ATTR_V0, 274 DDI_STRUCTURE_LE_ACC, 275 DDI_STRICTORDER_ACC 276 }; 277 278 /* 279 * Device buffer access attributes for DMA. 280 */ 281 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 282 DDI_DEVICE_ATTR_V0, 283 DDI_STRUCTURE_BE_ACC, 284 DDI_STRICTORDER_ACC 285 }; 286 287 ddi_dma_attr_t nxge_desc_dma_attr = { 288 DMA_ATTR_V0, /* version number. */ 289 0, /* low address */ 290 0xffffffffffffffff, /* high address */ 291 0xffffffffffffffff, /* address counter max */ 292 #ifndef NIU_PA_WORKAROUND 293 0x100000, /* alignment */ 294 #else 295 0x2000, 296 #endif 297 0xfc00fc, /* dlim_burstsizes */ 298 0x1, /* minimum transfer size */ 299 0xffffffffffffffff, /* maximum transfer size */ 300 0xffffffffffffffff, /* maximum segment size */ 301 1, /* scatter/gather list length */ 302 (unsigned int) 1, /* granularity */ 303 0 /* attribute flags */ 304 }; 305 306 ddi_dma_attr_t nxge_tx_dma_attr = { 307 DMA_ATTR_V0, /* version number. */ 308 0, /* low address */ 309 0xffffffffffffffff, /* high address */ 310 0xffffffffffffffff, /* address counter max */ 311 #if defined(_BIG_ENDIAN) 312 0x2000, /* alignment */ 313 #else 314 0x1000, /* alignment */ 315 #endif 316 0xfc00fc, /* dlim_burstsizes */ 317 0x1, /* minimum transfer size */ 318 0xffffffffffffffff, /* maximum transfer size */ 319 0xffffffffffffffff, /* maximum segment size */ 320 5, /* scatter/gather list length */ 321 (unsigned int) 1, /* granularity */ 322 0 /* attribute flags */ 323 }; 324 325 ddi_dma_attr_t nxge_rx_dma_attr = { 326 DMA_ATTR_V0, /* version number. */ 327 0, /* low address */ 328 0xffffffffffffffff, /* high address */ 329 0xffffffffffffffff, /* address counter max */ 330 0x2000, /* alignment */ 331 0xfc00fc, /* dlim_burstsizes */ 332 0x1, /* minimum transfer size */ 333 0xffffffffffffffff, /* maximum transfer size */ 334 0xffffffffffffffff, /* maximum segment size */ 335 1, /* scatter/gather list length */ 336 (unsigned int) 1, /* granularity */ 337 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 338 }; 339 340 ddi_dma_lim_t nxge_dma_limits = { 341 (uint_t)0, /* dlim_addr_lo */ 342 (uint_t)0xffffffff, /* dlim_addr_hi */ 343 (uint_t)0xffffffff, /* dlim_cntr_max */ 344 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 345 0x1, /* dlim_minxfer */ 346 1024 /* dlim_speed */ 347 }; 348 349 dma_method_t nxge_force_dma = DVMA; 350 351 /* 352 * dma chunk sizes. 353 * 354 * Try to allocate the largest possible size 355 * so that fewer number of dma chunks would be managed 356 */ 357 #ifdef NIU_PA_WORKAROUND 358 size_t alloc_sizes [] = {0x2000}; 359 #else 360 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 361 0x10000, 0x20000, 0x40000, 0x80000, 362 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000}; 363 #endif 364 365 /* 366 * Translate "dev_t" to a pointer to the associated "dev_info_t". 367 */ 368 369 static int 370 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 371 { 372 p_nxge_t nxgep = NULL; 373 int instance; 374 int status = DDI_SUCCESS; 375 uint8_t portn; 376 nxge_mmac_t *mmac_info; 377 378 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 379 380 /* 381 * Get the device instance since we'll need to setup 382 * or retrieve a soft state for this instance. 383 */ 384 instance = ddi_get_instance(dip); 385 386 switch (cmd) { 387 case DDI_ATTACH: 388 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 389 break; 390 391 case DDI_RESUME: 392 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 393 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 394 if (nxgep == NULL) { 395 status = DDI_FAILURE; 396 break; 397 } 398 if (nxgep->dip != dip) { 399 status = DDI_FAILURE; 400 break; 401 } 402 if (nxgep->suspended == DDI_PM_SUSPEND) { 403 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 404 } else { 405 status = nxge_resume(nxgep); 406 } 407 goto nxge_attach_exit; 408 409 case DDI_PM_RESUME: 410 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 411 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 412 if (nxgep == NULL) { 413 status = DDI_FAILURE; 414 break; 415 } 416 if (nxgep->dip != dip) { 417 status = DDI_FAILURE; 418 break; 419 } 420 status = nxge_resume(nxgep); 421 goto nxge_attach_exit; 422 423 default: 424 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 425 status = DDI_FAILURE; 426 goto nxge_attach_exit; 427 } 428 429 430 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 431 status = DDI_FAILURE; 432 goto nxge_attach_exit; 433 } 434 435 nxgep = ddi_get_soft_state(nxge_list, instance); 436 if (nxgep == NULL) { 437 goto nxge_attach_fail; 438 } 439 440 nxgep->nxge_magic = NXGE_MAGIC; 441 442 nxgep->drv_state = 0; 443 nxgep->dip = dip; 444 nxgep->instance = instance; 445 nxgep->p_dip = ddi_get_parent(dip); 446 nxgep->nxge_debug_level = nxge_debug_level; 447 npi_debug_level = nxge_debug_level; 448 449 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_dev_desc_dma_acc_attr, 450 &nxge_rx_dma_attr); 451 452 status = nxge_map_regs(nxgep); 453 if (status != NXGE_OK) { 454 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 455 goto nxge_attach_fail; 456 } 457 458 status = nxge_init_common_dev(nxgep); 459 if (status != NXGE_OK) { 460 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 461 "nxge_init_common_dev failed")); 462 goto nxge_attach_fail; 463 } 464 465 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 466 nxgep->mac.portnum = portn; 467 if ((portn == 0) || (portn == 1)) 468 nxgep->mac.porttype = PORT_TYPE_XMAC; 469 else 470 nxgep->mac.porttype = PORT_TYPE_BMAC; 471 /* 472 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 473 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 474 * The two types of MACs have different characterizations. 475 */ 476 mmac_info = &nxgep->nxge_mmac_info; 477 if (nxgep->function_num < 2) { 478 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 479 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 480 } else { 481 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 482 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 483 } 484 /* 485 * Setup the Ndd parameters for the this instance. 486 */ 487 nxge_init_param(nxgep); 488 489 /* 490 * Setup Register Tracing Buffer. 491 */ 492 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 493 494 /* init stats ptr */ 495 nxge_init_statsp(nxgep); 496 497 if (nxgep->niu_type != N2_NIU) { 498 /* 499 * read the vpd info from the eeprom into local data 500 * structure and check for the VPD info validity 501 */ 502 (void) nxge_vpd_info_get(nxgep); 503 } 504 505 status = nxge_get_xcvr_type(nxgep); 506 507 if (status != NXGE_OK) { 508 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 509 " Couldn't determine card type" 510 " .... exit ")); 511 goto nxge_attach_fail; 512 } 513 514 if ((nxgep->niu_type == NEPTUNE) && 515 (nxgep->mac.portmode == PORT_10G_FIBER)) { 516 nxgep->niu_type = NEPTUNE_2; 517 } 518 519 if ((nxgep->niu_type == NEPTUNE_2) && (nxgep->function_num > 1)) { 520 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Unsupported function %d." 521 "Only functions 0 and 1 are supported by this card", 522 nxgep->function_num)); 523 status = NXGE_ERROR; 524 goto nxge_attach_fail; 525 } 526 527 status = nxge_get_config_properties(nxgep); 528 529 if (status != NXGE_OK) { 530 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "get_hw create failed")); 531 goto nxge_attach_fail; 532 } 533 534 /* 535 * Setup the Kstats for the driver. 536 */ 537 nxge_setup_kstats(nxgep); 538 539 nxge_setup_param(nxgep); 540 541 status = nxge_setup_system_dma_pages(nxgep); 542 if (status != NXGE_OK) { 543 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 544 goto nxge_attach_fail; 545 } 546 547 #if defined(sun4v) 548 if (nxgep->niu_type == N2_NIU) { 549 nxgep->niu_hsvc_available = B_FALSE; 550 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 551 if ((status = 552 hsvc_register(&nxgep->niu_hsvc, 553 &nxgep->niu_min_ver)) != 0) { 554 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 555 "nxge_attach: " 556 "%s: cannot negotiate " 557 "hypervisor services " 558 "revision %d " 559 "group: 0x%lx " 560 "major: 0x%lx minor: 0x%lx " 561 "errno: %d", 562 niu_hsvc.hsvc_modname, 563 niu_hsvc.hsvc_rev, 564 niu_hsvc.hsvc_group, 565 niu_hsvc.hsvc_major, 566 niu_hsvc.hsvc_minor, 567 status)); 568 status = DDI_FAILURE; 569 goto nxge_attach_fail; 570 } 571 572 nxgep->niu_hsvc_available = B_TRUE; 573 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 574 "NIU Hypervisor service enabled")); 575 } 576 #endif 577 578 nxge_hw_id_init(nxgep); 579 nxge_hw_init_niu_common(nxgep); 580 581 status = nxge_setup_mutexes(nxgep); 582 if (status != NXGE_OK) { 583 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 584 goto nxge_attach_fail; 585 } 586 587 status = nxge_setup_dev(nxgep); 588 if (status != DDI_SUCCESS) { 589 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 590 goto nxge_attach_fail; 591 } 592 593 status = nxge_add_intrs(nxgep); 594 if (status != DDI_SUCCESS) { 595 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 596 goto nxge_attach_fail; 597 } 598 status = nxge_add_soft_intrs(nxgep); 599 if (status != DDI_SUCCESS) { 600 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "add_soft_intr failed")); 601 goto nxge_attach_fail; 602 } 603 604 /* 605 * Enable interrupts. 606 */ 607 nxge_intrs_enable(nxgep); 608 609 if ((status = nxge_mac_register(nxgep)) != DDI_SUCCESS) { 610 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 611 "unable to register to mac layer (%d)", status)); 612 goto nxge_attach_fail; 613 } 614 615 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 616 617 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "registered to mac (instance %d)", 618 instance)); 619 620 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 621 622 goto nxge_attach_exit; 623 624 nxge_attach_fail: 625 nxge_unattach(nxgep); 626 if (status != NXGE_OK) 627 status = (NXGE_ERROR | NXGE_DDI_FAILED); 628 nxgep = NULL; 629 630 nxge_attach_exit: 631 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 632 status)); 633 634 return (status); 635 } 636 637 static int 638 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 639 { 640 int status = DDI_SUCCESS; 641 int instance; 642 p_nxge_t nxgep = NULL; 643 644 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 645 instance = ddi_get_instance(dip); 646 nxgep = ddi_get_soft_state(nxge_list, instance); 647 if (nxgep == NULL) { 648 status = DDI_FAILURE; 649 goto nxge_detach_exit; 650 } 651 652 switch (cmd) { 653 case DDI_DETACH: 654 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 655 break; 656 657 case DDI_PM_SUSPEND: 658 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 659 nxgep->suspended = DDI_PM_SUSPEND; 660 nxge_suspend(nxgep); 661 break; 662 663 case DDI_SUSPEND: 664 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 665 if (nxgep->suspended != DDI_PM_SUSPEND) { 666 nxgep->suspended = DDI_SUSPEND; 667 nxge_suspend(nxgep); 668 } 669 break; 670 671 default: 672 status = DDI_FAILURE; 673 } 674 675 if (cmd != DDI_DETACH) 676 goto nxge_detach_exit; 677 678 /* 679 * Stop the xcvr polling. 680 */ 681 nxgep->suspended = cmd; 682 683 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 684 685 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 686 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 687 "<== nxge_detach status = 0x%08X", status)); 688 return (DDI_FAILURE); 689 } 690 691 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 692 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 693 694 nxge_unattach(nxgep); 695 nxgep = NULL; 696 697 nxge_detach_exit: 698 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 699 status)); 700 701 return (status); 702 } 703 704 static void 705 nxge_unattach(p_nxge_t nxgep) 706 { 707 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 708 709 if (nxgep == NULL || nxgep->dev_regs == NULL) { 710 return; 711 } 712 713 nxgep->nxge_magic = 0; 714 715 if (nxgep->nxge_hw_p) { 716 nxge_uninit_common_dev(nxgep); 717 nxgep->nxge_hw_p = NULL; 718 } 719 720 if (nxgep->nxge_timerid) { 721 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 722 nxgep->nxge_timerid = 0; 723 } 724 725 #if defined(sun4v) 726 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 727 (void) hsvc_unregister(&nxgep->niu_hsvc); 728 nxgep->niu_hsvc_available = B_FALSE; 729 } 730 #endif 731 /* 732 * Stop any further interrupts. 733 */ 734 nxge_remove_intrs(nxgep); 735 736 /* remove soft interrups */ 737 nxge_remove_soft_intrs(nxgep); 738 739 /* 740 * Stop the device and free resources. 741 */ 742 nxge_destroy_dev(nxgep); 743 744 /* 745 * Tear down the ndd parameters setup. 746 */ 747 nxge_destroy_param(nxgep); 748 749 /* 750 * Tear down the kstat setup. 751 */ 752 nxge_destroy_kstats(nxgep); 753 754 /* 755 * Destroy all mutexes. 756 */ 757 nxge_destroy_mutexes(nxgep); 758 759 /* 760 * Remove the list of ndd parameters which 761 * were setup during attach. 762 */ 763 if (nxgep->dip) { 764 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 765 " nxge_unattach: remove all properties")); 766 767 (void) ddi_prop_remove_all(nxgep->dip); 768 } 769 770 #if NXGE_PROPERTY 771 nxge_remove_hard_properties(nxgep); 772 #endif 773 774 /* 775 * Unmap the register setup. 776 */ 777 nxge_unmap_regs(nxgep); 778 779 nxge_fm_fini(nxgep); 780 781 ddi_soft_state_free(nxge_list, nxgep->instance); 782 783 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 784 } 785 786 static char n2_siu_name[] = "niu"; 787 788 static nxge_status_t 789 nxge_map_regs(p_nxge_t nxgep) 790 { 791 int ddi_status = DDI_SUCCESS; 792 p_dev_regs_t dev_regs; 793 char buf[MAXPATHLEN + 1]; 794 char *devname; 795 #ifdef NXGE_DEBUG 796 char *sysname; 797 #endif 798 off_t regsize; 799 nxge_status_t status = NXGE_OK; 800 #if !defined(_BIG_ENDIAN) 801 off_t pci_offset; 802 uint16_t pcie_devctl; 803 #endif 804 805 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 806 nxgep->dev_regs = NULL; 807 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 808 dev_regs->nxge_regh = NULL; 809 dev_regs->nxge_pciregh = NULL; 810 dev_regs->nxge_msix_regh = NULL; 811 dev_regs->nxge_vir_regh = NULL; 812 dev_regs->nxge_vir2_regh = NULL; 813 nxgep->niu_type = NEPTUNE; 814 815 devname = ddi_pathname(nxgep->dip, buf); 816 ASSERT(strlen(devname) > 0); 817 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 818 "nxge_map_regs: pathname devname %s", devname)); 819 820 if (strstr(devname, n2_siu_name)) { 821 /* N2/NIU */ 822 nxgep->niu_type = N2_NIU; 823 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 824 "nxge_map_regs: N2/NIU devname %s", devname)); 825 /* get function number */ 826 nxgep->function_num = 827 (devname[strlen(devname) -1] == '1' ? 1 : 0); 828 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 829 "nxge_map_regs: N2/NIU function number %d", 830 nxgep->function_num)); 831 } else { 832 int *prop_val; 833 uint_t prop_len; 834 uint8_t func_num; 835 836 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 837 0, "reg", 838 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 839 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 840 "Reg property not found")); 841 ddi_status = DDI_FAILURE; 842 goto nxge_map_regs_fail0; 843 844 } else { 845 func_num = (prop_val[0] >> 8) & 0x7; 846 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 847 "Reg property found: fun # %d", 848 func_num)); 849 nxgep->function_num = func_num; 850 ddi_prop_free(prop_val); 851 } 852 } 853 854 switch (nxgep->niu_type) { 855 case NEPTUNE: 856 case NEPTUNE_2: 857 default: 858 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 859 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 860 "nxge_map_regs: pci config size 0x%x", regsize)); 861 862 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 863 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 864 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 865 if (ddi_status != DDI_SUCCESS) { 866 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 867 "ddi_map_regs, nxge bus config regs failed")); 868 goto nxge_map_regs_fail0; 869 } 870 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 871 "nxge_map_reg: PCI config addr 0x%0llx " 872 " handle 0x%0llx", dev_regs->nxge_pciregp, 873 dev_regs->nxge_pciregh)); 874 /* 875 * IMP IMP 876 * workaround for bit swapping bug in HW 877 * which ends up in no-snoop = yes 878 * resulting, in DMA not synched properly 879 */ 880 #if !defined(_BIG_ENDIAN) 881 /* workarounds for x86 systems */ 882 pci_offset = 0x80 + PCIE_DEVCTL; 883 pcie_devctl = 0x0; 884 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 885 pcie_devctl |= PCIE_DEVCTL_RO_EN; 886 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 887 pcie_devctl); 888 #endif 889 890 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 891 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 892 "nxge_map_regs: pio size 0x%x", regsize)); 893 /* set up the device mapped register */ 894 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 895 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 896 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 897 if (ddi_status != DDI_SUCCESS) { 898 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 899 "ddi_map_regs for Neptune global reg failed")); 900 goto nxge_map_regs_fail1; 901 } 902 903 /* set up the msi/msi-x mapped register */ 904 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 905 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 906 "nxge_map_regs: msix size 0x%x", regsize)); 907 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 908 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 909 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 910 if (ddi_status != DDI_SUCCESS) { 911 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 912 "ddi_map_regs for msi reg failed")); 913 goto nxge_map_regs_fail2; 914 } 915 916 /* set up the vio region mapped register */ 917 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 918 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 919 "nxge_map_regs: vio size 0x%x", regsize)); 920 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 921 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 922 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 923 924 if (ddi_status != DDI_SUCCESS) { 925 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 926 "ddi_map_regs for nxge vio reg failed")); 927 goto nxge_map_regs_fail3; 928 } 929 nxgep->dev_regs = dev_regs; 930 931 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 932 NPI_PCI_ADD_HANDLE_SET(nxgep, 933 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 934 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 935 NPI_MSI_ADD_HANDLE_SET(nxgep, 936 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 937 938 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 939 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 940 941 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 942 NPI_REG_ADD_HANDLE_SET(nxgep, 943 (npi_reg_ptr_t)dev_regs->nxge_regp); 944 945 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 946 NPI_VREG_ADD_HANDLE_SET(nxgep, 947 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 948 949 break; 950 951 case N2_NIU: 952 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 953 /* 954 * Set up the device mapped register (FWARC 2006/556) 955 * (changed back to 1: reg starts at 1!) 956 */ 957 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 958 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 959 "nxge_map_regs: dev size 0x%x", regsize)); 960 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 961 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 962 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 963 964 if (ddi_status != DDI_SUCCESS) { 965 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 966 "ddi_map_regs for N2/NIU, global reg failed ")); 967 goto nxge_map_regs_fail1; 968 } 969 970 /* set up the vio region mapped register */ 971 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 972 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 973 "nxge_map_regs: vio (1) size 0x%x", regsize)); 974 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 975 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 976 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 977 978 if (ddi_status != DDI_SUCCESS) { 979 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 980 "ddi_map_regs for nxge vio reg failed")); 981 goto nxge_map_regs_fail2; 982 } 983 /* set up the vio region mapped register */ 984 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 985 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 986 "nxge_map_regs: vio (3) size 0x%x", regsize)); 987 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 988 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 989 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 990 991 if (ddi_status != DDI_SUCCESS) { 992 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 993 "ddi_map_regs for nxge vio2 reg failed")); 994 goto nxge_map_regs_fail3; 995 } 996 nxgep->dev_regs = dev_regs; 997 998 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 999 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1000 1001 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1002 NPI_REG_ADD_HANDLE_SET(nxgep, 1003 (npi_reg_ptr_t)dev_regs->nxge_regp); 1004 1005 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1006 NPI_VREG_ADD_HANDLE_SET(nxgep, 1007 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1008 1009 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1010 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1011 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1012 1013 break; 1014 } 1015 1016 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1017 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1018 1019 goto nxge_map_regs_exit; 1020 nxge_map_regs_fail3: 1021 if (dev_regs->nxge_msix_regh) { 1022 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1023 } 1024 if (dev_regs->nxge_vir_regh) { 1025 ddi_regs_map_free(&dev_regs->nxge_regh); 1026 } 1027 nxge_map_regs_fail2: 1028 if (dev_regs->nxge_regh) { 1029 ddi_regs_map_free(&dev_regs->nxge_regh); 1030 } 1031 nxge_map_regs_fail1: 1032 if (dev_regs->nxge_pciregh) { 1033 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1034 } 1035 nxge_map_regs_fail0: 1036 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1037 kmem_free(dev_regs, sizeof (dev_regs_t)); 1038 1039 nxge_map_regs_exit: 1040 if (ddi_status != DDI_SUCCESS) 1041 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1042 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1043 return (status); 1044 } 1045 1046 static void 1047 nxge_unmap_regs(p_nxge_t nxgep) 1048 { 1049 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1050 if (nxgep->dev_regs) { 1051 if (nxgep->dev_regs->nxge_pciregh) { 1052 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1053 "==> nxge_unmap_regs: bus")); 1054 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1055 nxgep->dev_regs->nxge_pciregh = NULL; 1056 } 1057 if (nxgep->dev_regs->nxge_regh) { 1058 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1059 "==> nxge_unmap_regs: device registers")); 1060 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1061 nxgep->dev_regs->nxge_regh = NULL; 1062 } 1063 if (nxgep->dev_regs->nxge_msix_regh) { 1064 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1065 "==> nxge_unmap_regs: device interrupts")); 1066 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1067 nxgep->dev_regs->nxge_msix_regh = NULL; 1068 } 1069 if (nxgep->dev_regs->nxge_vir_regh) { 1070 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1071 "==> nxge_unmap_regs: vio region")); 1072 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1073 nxgep->dev_regs->nxge_vir_regh = NULL; 1074 } 1075 if (nxgep->dev_regs->nxge_vir2_regh) { 1076 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1077 "==> nxge_unmap_regs: vio2 region")); 1078 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1079 nxgep->dev_regs->nxge_vir2_regh = NULL; 1080 } 1081 1082 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1083 nxgep->dev_regs = NULL; 1084 } 1085 1086 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1087 } 1088 1089 static nxge_status_t 1090 nxge_setup_mutexes(p_nxge_t nxgep) 1091 { 1092 int ddi_status = DDI_SUCCESS; 1093 nxge_status_t status = NXGE_OK; 1094 nxge_classify_t *classify_ptr; 1095 int partition; 1096 1097 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1098 1099 /* 1100 * Get the interrupt cookie so the mutexes can be 1101 * Initialized. 1102 */ 1103 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1104 &nxgep->interrupt_cookie); 1105 if (ddi_status != DDI_SUCCESS) { 1106 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1107 "<== nxge_setup_mutexes: failed 0x%x", ddi_status)); 1108 goto nxge_setup_mutexes_exit; 1109 } 1110 1111 /* Initialize global mutex */ 1112 1113 if (nxge_mdio_lock_init == 0) { 1114 MUTEX_INIT(&nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 1115 } 1116 atomic_add_32(&nxge_mdio_lock_init, 1); 1117 1118 if (nxge_mii_lock_init == 0) { 1119 MUTEX_INIT(&nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 1120 } 1121 atomic_add_32(&nxge_mii_lock_init, 1); 1122 1123 nxgep->drv_state |= STATE_MDIO_LOCK_INIT; 1124 nxgep->drv_state |= STATE_MII_LOCK_INIT; 1125 1126 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1127 MUTEX_INIT(&nxgep->poll_lock, NULL, 1128 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1129 1130 /* 1131 * Initialize mutexes for this device. 1132 */ 1133 MUTEX_INIT(nxgep->genlock, NULL, 1134 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1135 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1136 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1137 MUTEX_INIT(&nxgep->mif_lock, NULL, 1138 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1139 RW_INIT(&nxgep->filter_lock, NULL, 1140 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1141 1142 classify_ptr = &nxgep->classifier; 1143 /* 1144 * FFLP Mutexes are never used in interrupt context 1145 * as fflp operation can take very long time to 1146 * complete and hence not suitable to invoke from interrupt 1147 * handlers. 1148 */ 1149 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1150 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1151 if (nxgep->niu_type == NEPTUNE) { 1152 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1153 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1154 for (partition = 0; partition < MAX_PARTITION; partition++) { 1155 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1156 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1157 } 1158 } 1159 1160 nxge_setup_mutexes_exit: 1161 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1162 "<== nxge_setup_mutexes status = %x", status)); 1163 1164 if (ddi_status != DDI_SUCCESS) 1165 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1166 1167 return (status); 1168 } 1169 1170 static void 1171 nxge_destroy_mutexes(p_nxge_t nxgep) 1172 { 1173 int partition; 1174 nxge_classify_t *classify_ptr; 1175 1176 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1177 RW_DESTROY(&nxgep->filter_lock); 1178 MUTEX_DESTROY(&nxgep->mif_lock); 1179 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1180 MUTEX_DESTROY(nxgep->genlock); 1181 1182 classify_ptr = &nxgep->classifier; 1183 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1184 1185 /* Destroy all polling resources. */ 1186 MUTEX_DESTROY(&nxgep->poll_lock); 1187 cv_destroy(&nxgep->poll_cv); 1188 1189 /* free data structures, based on HW type */ 1190 if (nxgep->niu_type == NEPTUNE) { 1191 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1192 for (partition = 0; partition < MAX_PARTITION; partition++) { 1193 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1194 } 1195 } 1196 if (nxgep->drv_state & STATE_MDIO_LOCK_INIT) { 1197 if (nxge_mdio_lock_init == 1) { 1198 MUTEX_DESTROY(&nxge_mdio_lock); 1199 } 1200 atomic_add_32(&nxge_mdio_lock_init, -1); 1201 } 1202 if (nxgep->drv_state & STATE_MII_LOCK_INIT) { 1203 if (nxge_mii_lock_init == 1) { 1204 MUTEX_DESTROY(&nxge_mii_lock); 1205 } 1206 atomic_add_32(&nxge_mii_lock_init, -1); 1207 } 1208 1209 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1210 } 1211 1212 nxge_status_t 1213 nxge_init(p_nxge_t nxgep) 1214 { 1215 nxge_status_t status = NXGE_OK; 1216 1217 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1218 1219 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1220 return (status); 1221 } 1222 1223 /* 1224 * Allocate system memory for the receive/transmit buffer blocks 1225 * and receive/transmit descriptor rings. 1226 */ 1227 status = nxge_alloc_mem_pool(nxgep); 1228 if (status != NXGE_OK) { 1229 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1230 goto nxge_init_fail1; 1231 } 1232 1233 /* 1234 * Initialize and enable TXC registers 1235 * (Globally enable TX controller, 1236 * enable a port, configure dma channel bitmap, 1237 * configure the max burst size). 1238 */ 1239 status = nxge_txc_init(nxgep); 1240 if (status != NXGE_OK) { 1241 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txc failed\n")); 1242 goto nxge_init_fail2; 1243 } 1244 1245 /* 1246 * Initialize and enable TXDMA channels. 1247 */ 1248 status = nxge_init_txdma_channels(nxgep); 1249 if (status != NXGE_OK) { 1250 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1251 goto nxge_init_fail3; 1252 } 1253 1254 /* 1255 * Initialize and enable RXDMA channels. 1256 */ 1257 status = nxge_init_rxdma_channels(nxgep); 1258 if (status != NXGE_OK) { 1259 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1260 goto nxge_init_fail4; 1261 } 1262 1263 /* 1264 * Initialize TCAM and FCRAM (Neptune). 1265 */ 1266 status = nxge_classify_init(nxgep); 1267 if (status != NXGE_OK) { 1268 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1269 goto nxge_init_fail5; 1270 } 1271 1272 /* 1273 * Initialize ZCP 1274 */ 1275 status = nxge_zcp_init(nxgep); 1276 if (status != NXGE_OK) { 1277 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1278 goto nxge_init_fail5; 1279 } 1280 1281 /* 1282 * Initialize IPP. 1283 */ 1284 status = nxge_ipp_init(nxgep); 1285 if (status != NXGE_OK) { 1286 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1287 goto nxge_init_fail5; 1288 } 1289 1290 /* 1291 * Initialize the MAC block. 1292 */ 1293 status = nxge_mac_init(nxgep); 1294 if (status != NXGE_OK) { 1295 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1296 goto nxge_init_fail5; 1297 } 1298 1299 nxge_intrs_enable(nxgep); 1300 1301 /* 1302 * Enable hardware interrupts. 1303 */ 1304 nxge_intr_hw_enable(nxgep); 1305 nxgep->drv_state |= STATE_HW_INITIALIZED; 1306 1307 goto nxge_init_exit; 1308 1309 nxge_init_fail5: 1310 nxge_uninit_rxdma_channels(nxgep); 1311 nxge_init_fail4: 1312 nxge_uninit_txdma_channels(nxgep); 1313 nxge_init_fail3: 1314 (void) nxge_txc_uninit(nxgep); 1315 nxge_init_fail2: 1316 nxge_free_mem_pool(nxgep); 1317 nxge_init_fail1: 1318 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1319 "<== nxge_init status (failed) = 0x%08x", status)); 1320 return (status); 1321 1322 nxge_init_exit: 1323 1324 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1325 status)); 1326 return (status); 1327 } 1328 1329 1330 timeout_id_t 1331 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1332 { 1333 if ((nxgep->suspended == 0) || 1334 (nxgep->suspended == DDI_RESUME)) { 1335 return (timeout(func, (caddr_t)nxgep, 1336 drv_usectohz(1000 * msec))); 1337 } 1338 return (NULL); 1339 } 1340 1341 /*ARGSUSED*/ 1342 void 1343 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1344 { 1345 if (timerid) { 1346 (void) untimeout(timerid); 1347 } 1348 } 1349 1350 void 1351 nxge_uninit(p_nxge_t nxgep) 1352 { 1353 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1354 1355 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1356 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1357 "==> nxge_uninit: not initialized")); 1358 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1359 "<== nxge_uninit")); 1360 return; 1361 } 1362 1363 /* stop timer */ 1364 if (nxgep->nxge_timerid) { 1365 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1366 nxgep->nxge_timerid = 0; 1367 } 1368 1369 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1370 (void) nxge_intr_hw_disable(nxgep); 1371 1372 /* 1373 * Reset the receive MAC side. 1374 */ 1375 (void) nxge_rx_mac_disable(nxgep); 1376 1377 /* Disable and soft reset the IPP */ 1378 (void) nxge_ipp_disable(nxgep); 1379 1380 /* Free classification resources */ 1381 (void) nxge_classify_uninit(nxgep); 1382 1383 /* 1384 * Reset the transmit/receive DMA side. 1385 */ 1386 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1387 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1388 1389 nxge_uninit_txdma_channels(nxgep); 1390 nxge_uninit_rxdma_channels(nxgep); 1391 1392 /* 1393 * Reset the transmit MAC side. 1394 */ 1395 (void) nxge_tx_mac_disable(nxgep); 1396 1397 nxge_free_mem_pool(nxgep); 1398 1399 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1400 1401 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1402 1403 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1404 "nxge_mblks_pending %d", nxge_mblks_pending)); 1405 } 1406 1407 void 1408 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1409 { 1410 uint64_t reg; 1411 uint64_t regdata; 1412 int i, retry; 1413 1414 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1415 regdata = 0; 1416 retry = 1; 1417 1418 for (i = 0; i < retry; i++) { 1419 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1420 } 1421 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1422 } 1423 1424 void 1425 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1426 { 1427 uint64_t reg; 1428 uint64_t buf[2]; 1429 1430 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1431 reg = buf[0]; 1432 1433 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1434 } 1435 1436 1437 nxge_os_mutex_t nxgedebuglock; 1438 int nxge_debug_init = 0; 1439 1440 /*ARGSUSED*/ 1441 /*VARARGS*/ 1442 void 1443 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1444 { 1445 char msg_buffer[1048]; 1446 char prefix_buffer[32]; 1447 int instance; 1448 uint64_t debug_level; 1449 int cmn_level = CE_CONT; 1450 va_list ap; 1451 1452 debug_level = (nxgep == NULL) ? nxge_debug_level : 1453 nxgep->nxge_debug_level; 1454 1455 if ((level & debug_level) || 1456 (level == NXGE_NOTE) || 1457 (level == NXGE_ERR_CTL)) { 1458 /* do the msg processing */ 1459 if (nxge_debug_init == 0) { 1460 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1461 nxge_debug_init = 1; 1462 } 1463 1464 MUTEX_ENTER(&nxgedebuglock); 1465 1466 if ((level & NXGE_NOTE)) { 1467 cmn_level = CE_NOTE; 1468 } 1469 1470 if (level & NXGE_ERR_CTL) { 1471 cmn_level = CE_WARN; 1472 } 1473 1474 va_start(ap, fmt); 1475 (void) vsprintf(msg_buffer, fmt, ap); 1476 va_end(ap); 1477 if (nxgep == NULL) { 1478 instance = -1; 1479 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1480 } else { 1481 instance = nxgep->instance; 1482 (void) sprintf(prefix_buffer, 1483 "%s%d :", "nxge", instance); 1484 } 1485 1486 MUTEX_EXIT(&nxgedebuglock); 1487 cmn_err(cmn_level, "!%s %s\n", 1488 prefix_buffer, msg_buffer); 1489 1490 } 1491 } 1492 1493 char * 1494 nxge_dump_packet(char *addr, int size) 1495 { 1496 uchar_t *ap = (uchar_t *)addr; 1497 int i; 1498 static char etherbuf[1024]; 1499 char *cp = etherbuf; 1500 char digits[] = "0123456789abcdef"; 1501 1502 if (!size) 1503 size = 60; 1504 1505 if (size > MAX_DUMP_SZ) { 1506 /* Dump the leading bytes */ 1507 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1508 if (*ap > 0x0f) 1509 *cp++ = digits[*ap >> 4]; 1510 *cp++ = digits[*ap++ & 0xf]; 1511 *cp++ = ':'; 1512 } 1513 for (i = 0; i < 20; i++) 1514 *cp++ = '.'; 1515 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1516 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1517 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1518 if (*ap > 0x0f) 1519 *cp++ = digits[*ap >> 4]; 1520 *cp++ = digits[*ap++ & 0xf]; 1521 *cp++ = ':'; 1522 } 1523 } else { 1524 for (i = 0; i < size; i++) { 1525 if (*ap > 0x0f) 1526 *cp++ = digits[*ap >> 4]; 1527 *cp++ = digits[*ap++ & 0xf]; 1528 *cp++ = ':'; 1529 } 1530 } 1531 *--cp = 0; 1532 return (etherbuf); 1533 } 1534 1535 #ifdef NXGE_DEBUG 1536 static void 1537 nxge_test_map_regs(p_nxge_t nxgep) 1538 { 1539 ddi_acc_handle_t cfg_handle; 1540 p_pci_cfg_t cfg_ptr; 1541 ddi_acc_handle_t dev_handle; 1542 char *dev_ptr; 1543 ddi_acc_handle_t pci_config_handle; 1544 uint32_t regval; 1545 int i; 1546 1547 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1548 1549 dev_handle = nxgep->dev_regs->nxge_regh; 1550 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1551 1552 if (nxgep->niu_type == NEPTUNE) { 1553 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1554 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1555 1556 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1557 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1558 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1559 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1560 &cfg_ptr->vendorid)); 1561 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1562 "\tvendorid 0x%x devid 0x%x", 1563 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1564 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1565 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1566 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1567 "bar1c 0x%x", 1568 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1569 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1570 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1571 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1572 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1573 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1574 "base 28 0x%x bar2c 0x%x\n", 1575 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1576 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1577 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1578 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1579 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1580 "\nNeptune PCI BAR: base30 0x%x\n", 1581 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1582 1583 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1584 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1585 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1586 "first 0x%llx second 0x%llx third 0x%llx " 1587 "last 0x%llx ", 1588 NXGE_PIO_READ64(dev_handle, 1589 (uint64_t *)(dev_ptr + 0), 0), 1590 NXGE_PIO_READ64(dev_handle, 1591 (uint64_t *)(dev_ptr + 8), 0), 1592 NXGE_PIO_READ64(dev_handle, 1593 (uint64_t *)(dev_ptr + 16), 0), 1594 NXGE_PIO_READ64(cfg_handle, 1595 (uint64_t *)(dev_ptr + 24), 0))); 1596 } 1597 } 1598 1599 #endif 1600 1601 static void 1602 nxge_suspend(p_nxge_t nxgep) 1603 { 1604 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1605 1606 nxge_intrs_disable(nxgep); 1607 nxge_destroy_dev(nxgep); 1608 1609 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1610 } 1611 1612 static nxge_status_t 1613 nxge_resume(p_nxge_t nxgep) 1614 { 1615 nxge_status_t status = NXGE_OK; 1616 1617 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1618 1619 nxgep->suspended = DDI_RESUME; 1620 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1621 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1622 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1623 (void) nxge_rx_mac_enable(nxgep); 1624 (void) nxge_tx_mac_enable(nxgep); 1625 nxge_intrs_enable(nxgep); 1626 nxgep->suspended = 0; 1627 1628 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1629 "<== nxge_resume status = 0x%x", status)); 1630 return (status); 1631 } 1632 1633 static nxge_status_t 1634 nxge_setup_dev(p_nxge_t nxgep) 1635 { 1636 nxge_status_t status = NXGE_OK; 1637 1638 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1639 nxgep->mac.portnum)); 1640 1641 status = nxge_xcvr_find(nxgep); 1642 if (status != NXGE_OK) { 1643 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1644 " nxge_setup_dev status " 1645 " (xcvr find 0x%08x)", status)); 1646 goto nxge_setup_dev_exit; 1647 } 1648 1649 status = nxge_link_init(nxgep); 1650 1651 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1652 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1653 "port%d Bad register acc handle", nxgep->mac.portnum)); 1654 status = NXGE_ERROR; 1655 } 1656 1657 if (status != NXGE_OK) { 1658 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1659 " nxge_setup_dev status " 1660 "(xcvr init 0x%08x)", status)); 1661 goto nxge_setup_dev_exit; 1662 } 1663 1664 nxge_setup_dev_exit: 1665 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1666 "<== nxge_setup_dev port %d status = 0x%08x", 1667 nxgep->mac.portnum, status)); 1668 1669 return (status); 1670 } 1671 1672 static void 1673 nxge_destroy_dev(p_nxge_t nxgep) 1674 { 1675 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 1676 1677 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1678 1679 (void) nxge_hw_stop(nxgep); 1680 1681 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 1682 } 1683 1684 static nxge_status_t 1685 nxge_setup_system_dma_pages(p_nxge_t nxgep) 1686 { 1687 int ddi_status = DDI_SUCCESS; 1688 uint_t count; 1689 ddi_dma_cookie_t cookie; 1690 uint_t iommu_pagesize; 1691 nxge_status_t status = NXGE_OK; 1692 1693 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 1694 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 1695 if (nxgep->niu_type != N2_NIU) { 1696 iommu_pagesize = dvma_pagesize(nxgep->dip); 1697 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1698 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1699 " default_block_size %d iommu_pagesize %d", 1700 nxgep->sys_page_sz, 1701 ddi_ptob(nxgep->dip, (ulong_t)1), 1702 nxgep->rx_default_block_size, 1703 iommu_pagesize)); 1704 1705 if (iommu_pagesize != 0) { 1706 if (nxgep->sys_page_sz == iommu_pagesize) { 1707 if (iommu_pagesize > 0x4000) 1708 nxgep->sys_page_sz = 0x4000; 1709 } else { 1710 if (nxgep->sys_page_sz > iommu_pagesize) 1711 nxgep->sys_page_sz = iommu_pagesize; 1712 } 1713 } 1714 } 1715 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1716 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1717 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1718 "default_block_size %d page mask %d", 1719 nxgep->sys_page_sz, 1720 ddi_ptob(nxgep->dip, (ulong_t)1), 1721 nxgep->rx_default_block_size, 1722 nxgep->sys_page_mask)); 1723 1724 1725 switch (nxgep->sys_page_sz) { 1726 default: 1727 nxgep->sys_page_sz = 0x1000; 1728 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1729 nxgep->rx_default_block_size = 0x1000; 1730 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1731 break; 1732 case 0x1000: 1733 nxgep->rx_default_block_size = 0x1000; 1734 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1735 break; 1736 case 0x2000: 1737 nxgep->rx_default_block_size = 0x2000; 1738 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1739 break; 1740 case 0x4000: 1741 nxgep->rx_default_block_size = 0x4000; 1742 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 1743 break; 1744 case 0x8000: 1745 nxgep->rx_default_block_size = 0x8000; 1746 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 1747 break; 1748 } 1749 1750 #ifndef USE_RX_BIG_BUF 1751 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 1752 #else 1753 nxgep->rx_default_block_size = 0x2000; 1754 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1755 #endif 1756 /* 1757 * Get the system DMA burst size. 1758 */ 1759 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 1760 DDI_DMA_DONTWAIT, 0, 1761 &nxgep->dmasparehandle); 1762 if (ddi_status != DDI_SUCCESS) { 1763 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1764 "ddi_dma_alloc_handle: failed " 1765 " status 0x%x", ddi_status)); 1766 goto nxge_get_soft_properties_exit; 1767 } 1768 1769 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 1770 (caddr_t)nxgep->dmasparehandle, 1771 sizeof (nxgep->dmasparehandle), 1772 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1773 DDI_DMA_DONTWAIT, 0, 1774 &cookie, &count); 1775 if (ddi_status != DDI_DMA_MAPPED) { 1776 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1777 "Binding spare handle to find system" 1778 " burstsize failed.")); 1779 ddi_status = DDI_FAILURE; 1780 goto nxge_get_soft_properties_fail1; 1781 } 1782 1783 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 1784 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 1785 1786 nxge_get_soft_properties_fail1: 1787 ddi_dma_free_handle(&nxgep->dmasparehandle); 1788 1789 nxge_get_soft_properties_exit: 1790 1791 if (ddi_status != DDI_SUCCESS) 1792 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1793 1794 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1795 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 1796 return (status); 1797 } 1798 1799 static nxge_status_t 1800 nxge_alloc_mem_pool(p_nxge_t nxgep) 1801 { 1802 nxge_status_t status = NXGE_OK; 1803 1804 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 1805 1806 status = nxge_alloc_rx_mem_pool(nxgep); 1807 if (status != NXGE_OK) { 1808 return (NXGE_ERROR); 1809 } 1810 1811 status = nxge_alloc_tx_mem_pool(nxgep); 1812 if (status != NXGE_OK) { 1813 nxge_free_rx_mem_pool(nxgep); 1814 return (NXGE_ERROR); 1815 } 1816 1817 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 1818 return (NXGE_OK); 1819 } 1820 1821 static void 1822 nxge_free_mem_pool(p_nxge_t nxgep) 1823 { 1824 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 1825 1826 nxge_free_rx_mem_pool(nxgep); 1827 nxge_free_tx_mem_pool(nxgep); 1828 1829 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 1830 } 1831 1832 static nxge_status_t 1833 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 1834 { 1835 int i, j; 1836 uint32_t ndmas, st_rdc; 1837 p_nxge_dma_pt_cfg_t p_all_cfgp; 1838 p_nxge_hw_pt_cfg_t p_cfgp; 1839 p_nxge_dma_pool_t dma_poolp; 1840 p_nxge_dma_common_t *dma_buf_p; 1841 p_nxge_dma_pool_t dma_cntl_poolp; 1842 p_nxge_dma_common_t *dma_cntl_p; 1843 size_t rx_buf_alloc_size; 1844 size_t rx_cntl_alloc_size; 1845 uint32_t *num_chunks; /* per dma */ 1846 nxge_status_t status = NXGE_OK; 1847 1848 uint32_t nxge_port_rbr_size; 1849 uint32_t nxge_port_rbr_spare_size; 1850 uint32_t nxge_port_rcr_size; 1851 1852 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 1853 1854 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 1855 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1856 st_rdc = p_cfgp->start_rdc; 1857 ndmas = p_cfgp->max_rdcs; 1858 1859 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1860 " nxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas)); 1861 1862 /* 1863 * Allocate memory for each receive DMA channel. 1864 */ 1865 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 1866 KM_SLEEP); 1867 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1868 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1869 1870 dma_cntl_poolp = (p_nxge_dma_pool_t) 1871 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 1872 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1873 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1874 1875 num_chunks = (uint32_t *)KMEM_ZALLOC( 1876 sizeof (uint32_t) * ndmas, KM_SLEEP); 1877 1878 /* 1879 * Assume that each DMA channel will be configured with default 1880 * block size. 1881 * rbr block counts are mod of batch count (16). 1882 */ 1883 nxge_port_rbr_size = p_all_cfgp->rbr_size; 1884 nxge_port_rcr_size = p_all_cfgp->rcr_size; 1885 1886 if (!nxge_port_rbr_size) { 1887 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 1888 } 1889 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 1890 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 1891 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 1892 } 1893 1894 p_all_cfgp->rbr_size = nxge_port_rbr_size; 1895 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 1896 1897 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 1898 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 1899 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 1900 } 1901 1902 /* 1903 * N2/NIU has limitation on the descriptor sizes (contiguous 1904 * memory allocation on data buffers to 4M (contig_mem_alloc) 1905 * and little endian for control buffers (must use the ddi/dki mem alloc 1906 * function). 1907 */ 1908 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1909 if (nxgep->niu_type == N2_NIU) { 1910 nxge_port_rbr_spare_size = 0; 1911 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 1912 (!ISP2(nxge_port_rbr_size))) { 1913 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 1914 } 1915 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 1916 (!ISP2(nxge_port_rcr_size))) { 1917 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 1918 } 1919 } 1920 #endif 1921 1922 rx_buf_alloc_size = (nxgep->rx_default_block_size * 1923 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 1924 1925 /* 1926 * Addresses of receive block ring, receive completion ring and the 1927 * mailbox must be all cache-aligned (64 bytes). 1928 */ 1929 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 1930 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 1931 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 1932 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 1933 1934 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 1935 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 1936 "nxge_port_rcr_size = %d " 1937 "rx_cntl_alloc_size = %d", 1938 nxge_port_rbr_size, nxge_port_rbr_spare_size, 1939 nxge_port_rcr_size, 1940 rx_cntl_alloc_size)); 1941 1942 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1943 if (nxgep->niu_type == N2_NIU) { 1944 if (!ISP2(rx_buf_alloc_size)) { 1945 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1946 "==> nxge_alloc_rx_mem_pool: " 1947 " must be power of 2")); 1948 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1949 goto nxge_alloc_rx_mem_pool_exit; 1950 } 1951 1952 if (rx_buf_alloc_size > (1 << 22)) { 1953 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1954 "==> nxge_alloc_rx_mem_pool: " 1955 " limit size to 4M")); 1956 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1957 goto nxge_alloc_rx_mem_pool_exit; 1958 } 1959 1960 if (rx_cntl_alloc_size < 0x2000) { 1961 rx_cntl_alloc_size = 0x2000; 1962 } 1963 } 1964 #endif 1965 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 1966 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 1967 1968 /* 1969 * Allocate memory for receive buffers and descriptor rings. 1970 * Replace allocation functions with interface functions provided 1971 * by the partition manager when it is available. 1972 */ 1973 /* 1974 * Allocate memory for the receive buffer blocks. 1975 */ 1976 for (i = 0; i < ndmas; i++) { 1977 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1978 " nxge_alloc_rx_mem_pool to alloc mem: " 1979 " dma %d dma_buf_p %llx &dma_buf_p %llx", 1980 i, dma_buf_p[i], &dma_buf_p[i])); 1981 num_chunks[i] = 0; 1982 status = nxge_alloc_rx_buf_dma(nxgep, st_rdc, &dma_buf_p[i], 1983 rx_buf_alloc_size, 1984 nxgep->rx_default_block_size, &num_chunks[i]); 1985 if (status != NXGE_OK) { 1986 break; 1987 } 1988 st_rdc++; 1989 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1990 " nxge_alloc_rx_mem_pool DONE alloc mem: " 1991 "dma %d dma_buf_p %llx &dma_buf_p %llx", i, 1992 dma_buf_p[i], &dma_buf_p[i])); 1993 } 1994 if (i < ndmas) { 1995 goto nxge_alloc_rx_mem_fail1; 1996 } 1997 /* 1998 * Allocate memory for descriptor rings and mailbox. 1999 */ 2000 st_rdc = p_cfgp->start_rdc; 2001 for (j = 0; j < ndmas; j++) { 2002 status = nxge_alloc_rx_cntl_dma(nxgep, st_rdc, &dma_cntl_p[j], 2003 rx_cntl_alloc_size); 2004 if (status != NXGE_OK) { 2005 break; 2006 } 2007 st_rdc++; 2008 } 2009 if (j < ndmas) { 2010 goto nxge_alloc_rx_mem_fail2; 2011 } 2012 2013 dma_poolp->ndmas = ndmas; 2014 dma_poolp->num_chunks = num_chunks; 2015 dma_poolp->buf_allocated = B_TRUE; 2016 nxgep->rx_buf_pool_p = dma_poolp; 2017 dma_poolp->dma_buf_pool_p = dma_buf_p; 2018 2019 dma_cntl_poolp->ndmas = ndmas; 2020 dma_cntl_poolp->buf_allocated = B_TRUE; 2021 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2022 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2023 2024 goto nxge_alloc_rx_mem_pool_exit; 2025 2026 nxge_alloc_rx_mem_fail2: 2027 /* Free control buffers */ 2028 j--; 2029 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2030 "==> nxge_alloc_rx_mem_pool: freeing control bufs (%d)", j)); 2031 for (; j >= 0; j--) { 2032 nxge_free_rx_cntl_dma(nxgep, 2033 (p_nxge_dma_common_t)dma_cntl_p[j]); 2034 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2035 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", 2036 j)); 2037 } 2038 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2039 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 2040 2041 nxge_alloc_rx_mem_fail1: 2042 /* Free data buffers */ 2043 i--; 2044 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2045 "==> nxge_alloc_rx_mem_pool: freeing data bufs (%d)", i)); 2046 for (; i >= 0; i--) { 2047 nxge_free_rx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2048 num_chunks[i]); 2049 } 2050 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2051 "==> nxge_alloc_rx_mem_pool: data bufs freed (%d)", i)); 2052 2053 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2054 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2055 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2056 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2057 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2058 2059 nxge_alloc_rx_mem_pool_exit: 2060 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2061 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2062 2063 return (status); 2064 } 2065 2066 static void 2067 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2068 { 2069 uint32_t i, ndmas; 2070 p_nxge_dma_pool_t dma_poolp; 2071 p_nxge_dma_common_t *dma_buf_p; 2072 p_nxge_dma_pool_t dma_cntl_poolp; 2073 p_nxge_dma_common_t *dma_cntl_p; 2074 uint32_t *num_chunks; 2075 2076 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2077 2078 dma_poolp = nxgep->rx_buf_pool_p; 2079 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2080 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2081 "<== nxge_free_rx_mem_pool " 2082 "(null rx buf pool or buf not allocated")); 2083 return; 2084 } 2085 2086 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2087 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2088 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2089 "<== nxge_free_rx_mem_pool " 2090 "(null rx cntl buf pool or cntl buf not allocated")); 2091 return; 2092 } 2093 2094 dma_buf_p = dma_poolp->dma_buf_pool_p; 2095 num_chunks = dma_poolp->num_chunks; 2096 2097 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2098 ndmas = dma_cntl_poolp->ndmas; 2099 2100 for (i = 0; i < ndmas; i++) { 2101 nxge_free_rx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2102 } 2103 2104 for (i = 0; i < ndmas; i++) { 2105 nxge_free_rx_cntl_dma(nxgep, dma_cntl_p[i]); 2106 } 2107 2108 for (i = 0; i < ndmas; i++) { 2109 KMEM_FREE(dma_buf_p[i], 2110 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2111 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2112 } 2113 2114 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2115 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2116 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2117 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2118 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2119 2120 nxgep->rx_buf_pool_p = NULL; 2121 nxgep->rx_cntl_pool_p = NULL; 2122 2123 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2124 } 2125 2126 2127 static nxge_status_t 2128 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2129 p_nxge_dma_common_t *dmap, 2130 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2131 { 2132 p_nxge_dma_common_t rx_dmap; 2133 nxge_status_t status = NXGE_OK; 2134 size_t total_alloc_size; 2135 size_t allocated = 0; 2136 int i, size_index, array_size; 2137 2138 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2139 2140 rx_dmap = (p_nxge_dma_common_t) 2141 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2142 KM_SLEEP); 2143 2144 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2145 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2146 dma_channel, alloc_size, block_size, dmap)); 2147 2148 total_alloc_size = alloc_size; 2149 2150 #if defined(RX_USE_RECLAIM_POST) 2151 total_alloc_size = alloc_size + alloc_size/4; 2152 #endif 2153 2154 i = 0; 2155 size_index = 0; 2156 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2157 while ((alloc_sizes[size_index] < alloc_size) && 2158 (size_index < array_size)) 2159 size_index++; 2160 if (size_index >= array_size) { 2161 size_index = array_size - 1; 2162 } 2163 2164 while ((allocated < total_alloc_size) && 2165 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2166 rx_dmap[i].dma_chunk_index = i; 2167 rx_dmap[i].block_size = block_size; 2168 rx_dmap[i].alength = alloc_sizes[size_index]; 2169 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2170 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2171 rx_dmap[i].dma_channel = dma_channel; 2172 rx_dmap[i].contig_alloc_type = B_FALSE; 2173 2174 /* 2175 * N2/NIU: data buffers must be contiguous as the driver 2176 * needs to call Hypervisor api to set up 2177 * logical pages. 2178 */ 2179 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2180 rx_dmap[i].contig_alloc_type = B_TRUE; 2181 } 2182 2183 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2184 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2185 "i %d nblocks %d alength %d", 2186 dma_channel, i, &rx_dmap[i], block_size, 2187 i, rx_dmap[i].nblocks, 2188 rx_dmap[i].alength)); 2189 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2190 &nxge_rx_dma_attr, 2191 rx_dmap[i].alength, 2192 &nxge_dev_buf_dma_acc_attr, 2193 DDI_DMA_READ | DDI_DMA_STREAMING, 2194 (p_nxge_dma_common_t)(&rx_dmap[i])); 2195 if (status != NXGE_OK) { 2196 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2197 " nxge_alloc_rx_buf_dma: Alloc Failed ")); 2198 size_index--; 2199 } else { 2200 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2201 " alloc_rx_buf_dma allocated rdc %d " 2202 "chunk %d size %x dvma %x bufp %llx ", 2203 dma_channel, i, rx_dmap[i].alength, 2204 rx_dmap[i].ioaddr_pp, &rx_dmap[i])); 2205 i++; 2206 allocated += alloc_sizes[size_index]; 2207 } 2208 } 2209 2210 2211 if (allocated < total_alloc_size) { 2212 goto nxge_alloc_rx_mem_fail1; 2213 } 2214 2215 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2216 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2217 dma_channel, i)); 2218 *num_chunks = i; 2219 *dmap = rx_dmap; 2220 2221 goto nxge_alloc_rx_mem_exit; 2222 2223 nxge_alloc_rx_mem_fail1: 2224 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2225 2226 nxge_alloc_rx_mem_exit: 2227 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2228 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2229 2230 return (status); 2231 } 2232 2233 /*ARGSUSED*/ 2234 static void 2235 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2236 uint32_t num_chunks) 2237 { 2238 int i; 2239 2240 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2241 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2242 2243 for (i = 0; i < num_chunks; i++) { 2244 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2245 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2246 i, dmap)); 2247 nxge_dma_mem_free(dmap++); 2248 } 2249 2250 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2251 } 2252 2253 /*ARGSUSED*/ 2254 static nxge_status_t 2255 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2256 p_nxge_dma_common_t *dmap, size_t size) 2257 { 2258 p_nxge_dma_common_t rx_dmap; 2259 nxge_status_t status = NXGE_OK; 2260 2261 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2262 2263 rx_dmap = (p_nxge_dma_common_t) 2264 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2265 2266 rx_dmap->contig_alloc_type = B_FALSE; 2267 2268 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2269 &nxge_desc_dma_attr, 2270 size, 2271 &nxge_dev_desc_dma_acc_attr, 2272 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2273 rx_dmap); 2274 if (status != NXGE_OK) { 2275 goto nxge_alloc_rx_cntl_dma_fail1; 2276 } 2277 2278 *dmap = rx_dmap; 2279 goto nxge_alloc_rx_cntl_dma_exit; 2280 2281 nxge_alloc_rx_cntl_dma_fail1: 2282 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2283 2284 nxge_alloc_rx_cntl_dma_exit: 2285 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2286 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2287 2288 return (status); 2289 } 2290 2291 /*ARGSUSED*/ 2292 static void 2293 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2294 { 2295 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2296 2297 nxge_dma_mem_free(dmap); 2298 2299 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2300 } 2301 2302 static nxge_status_t 2303 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2304 { 2305 nxge_status_t status = NXGE_OK; 2306 int i, j; 2307 uint32_t ndmas, st_tdc; 2308 p_nxge_dma_pt_cfg_t p_all_cfgp; 2309 p_nxge_hw_pt_cfg_t p_cfgp; 2310 p_nxge_dma_pool_t dma_poolp; 2311 p_nxge_dma_common_t *dma_buf_p; 2312 p_nxge_dma_pool_t dma_cntl_poolp; 2313 p_nxge_dma_common_t *dma_cntl_p; 2314 size_t tx_buf_alloc_size; 2315 size_t tx_cntl_alloc_size; 2316 uint32_t *num_chunks; /* per dma */ 2317 uint32_t bcopy_thresh; 2318 2319 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2320 2321 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2322 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2323 st_tdc = p_cfgp->start_tdc; 2324 ndmas = p_cfgp->max_tdcs; 2325 2326 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool: " 2327 "p_cfgp 0x%016llx start_tdc %d ndmas %d nxgep->max_tdcs %d", 2328 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, nxgep->max_tdcs)); 2329 /* 2330 * Allocate memory for each transmit DMA channel. 2331 */ 2332 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2333 KM_SLEEP); 2334 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2335 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2336 2337 dma_cntl_poolp = (p_nxge_dma_pool_t) 2338 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2339 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2340 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2341 2342 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2343 /* 2344 * N2/NIU has limitation on the descriptor sizes (contiguous 2345 * memory allocation on data buffers to 4M (contig_mem_alloc) 2346 * and little endian for control buffers (must use the ddi/dki mem alloc 2347 * function). The transmit ring is limited to 8K (includes the 2348 * mailbox). 2349 */ 2350 if (nxgep->niu_type == N2_NIU) { 2351 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2352 (!ISP2(nxge_tx_ring_size))) { 2353 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2354 } 2355 } 2356 #endif 2357 2358 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2359 2360 /* 2361 * Assume that each DMA channel will be configured with default 2362 * transmit bufer size for copying transmit data. 2363 * (For packet payload over this limit, packets will not be 2364 * copied.) 2365 */ 2366 if (nxgep->niu_type == N2_NIU) { 2367 bcopy_thresh = TX_BCOPY_SIZE; 2368 } else { 2369 bcopy_thresh = nxge_bcopy_thresh; 2370 } 2371 tx_buf_alloc_size = (bcopy_thresh * nxge_tx_ring_size); 2372 2373 /* 2374 * Addresses of transmit descriptor ring and the 2375 * mailbox must be all cache-aligned (64 bytes). 2376 */ 2377 tx_cntl_alloc_size = nxge_tx_ring_size; 2378 tx_cntl_alloc_size *= (sizeof (tx_desc_t)); 2379 tx_cntl_alloc_size += sizeof (txdma_mailbox_t); 2380 2381 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2382 if (nxgep->niu_type == N2_NIU) { 2383 if (!ISP2(tx_buf_alloc_size)) { 2384 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2385 "==> nxge_alloc_tx_mem_pool: " 2386 " must be power of 2")); 2387 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2388 goto nxge_alloc_tx_mem_pool_exit; 2389 } 2390 2391 if (tx_buf_alloc_size > (1 << 22)) { 2392 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2393 "==> nxge_alloc_tx_mem_pool: " 2394 " limit size to 4M")); 2395 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2396 goto nxge_alloc_tx_mem_pool_exit; 2397 } 2398 2399 if (tx_cntl_alloc_size < 0x2000) { 2400 tx_cntl_alloc_size = 0x2000; 2401 } 2402 } 2403 #endif 2404 2405 num_chunks = (uint32_t *)KMEM_ZALLOC( 2406 sizeof (uint32_t) * ndmas, KM_SLEEP); 2407 2408 /* 2409 * Allocate memory for transmit buffers and descriptor rings. 2410 * Replace allocation functions with interface functions provided 2411 * by the partition manager when it is available. 2412 * 2413 * Allocate memory for the transmit buffer pool. 2414 */ 2415 for (i = 0; i < ndmas; i++) { 2416 num_chunks[i] = 0; 2417 status = nxge_alloc_tx_buf_dma(nxgep, st_tdc, &dma_buf_p[i], 2418 tx_buf_alloc_size, 2419 bcopy_thresh, &num_chunks[i]); 2420 if (status != NXGE_OK) { 2421 break; 2422 } 2423 st_tdc++; 2424 } 2425 if (i < ndmas) { 2426 goto nxge_alloc_tx_mem_pool_fail1; 2427 } 2428 2429 st_tdc = p_cfgp->start_tdc; 2430 /* 2431 * Allocate memory for descriptor rings and mailbox. 2432 */ 2433 for (j = 0; j < ndmas; j++) { 2434 status = nxge_alloc_tx_cntl_dma(nxgep, st_tdc, &dma_cntl_p[j], 2435 tx_cntl_alloc_size); 2436 if (status != NXGE_OK) { 2437 break; 2438 } 2439 st_tdc++; 2440 } 2441 if (j < ndmas) { 2442 goto nxge_alloc_tx_mem_pool_fail2; 2443 } 2444 2445 dma_poolp->ndmas = ndmas; 2446 dma_poolp->num_chunks = num_chunks; 2447 dma_poolp->buf_allocated = B_TRUE; 2448 dma_poolp->dma_buf_pool_p = dma_buf_p; 2449 nxgep->tx_buf_pool_p = dma_poolp; 2450 2451 dma_cntl_poolp->ndmas = ndmas; 2452 dma_cntl_poolp->buf_allocated = B_TRUE; 2453 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2454 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2455 2456 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2457 "==> nxge_alloc_tx_mem_pool: start_tdc %d " 2458 "ndmas %d poolp->ndmas %d", 2459 st_tdc, ndmas, dma_poolp->ndmas)); 2460 2461 goto nxge_alloc_tx_mem_pool_exit; 2462 2463 nxge_alloc_tx_mem_pool_fail2: 2464 /* Free control buffers */ 2465 j--; 2466 for (; j >= 0; j--) { 2467 nxge_free_tx_cntl_dma(nxgep, 2468 (p_nxge_dma_common_t)dma_cntl_p[j]); 2469 } 2470 2471 nxge_alloc_tx_mem_pool_fail1: 2472 /* Free data buffers */ 2473 i--; 2474 for (; i >= 0; i--) { 2475 nxge_free_tx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2476 num_chunks[i]); 2477 } 2478 2479 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2480 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2481 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2482 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2483 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2484 2485 nxge_alloc_tx_mem_pool_exit: 2486 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2487 "<== nxge_alloc_tx_mem_pool:status 0x%08x", status)); 2488 2489 return (status); 2490 } 2491 2492 static nxge_status_t 2493 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2494 p_nxge_dma_common_t *dmap, size_t alloc_size, 2495 size_t block_size, uint32_t *num_chunks) 2496 { 2497 p_nxge_dma_common_t tx_dmap; 2498 nxge_status_t status = NXGE_OK; 2499 size_t total_alloc_size; 2500 size_t allocated = 0; 2501 int i, size_index, array_size; 2502 2503 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 2504 2505 tx_dmap = (p_nxge_dma_common_t) 2506 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2507 KM_SLEEP); 2508 2509 total_alloc_size = alloc_size; 2510 i = 0; 2511 size_index = 0; 2512 array_size = sizeof (alloc_sizes) / sizeof (size_t); 2513 while ((alloc_sizes[size_index] < alloc_size) && 2514 (size_index < array_size)) 2515 size_index++; 2516 if (size_index >= array_size) { 2517 size_index = array_size - 1; 2518 } 2519 2520 while ((allocated < total_alloc_size) && 2521 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2522 2523 tx_dmap[i].dma_chunk_index = i; 2524 tx_dmap[i].block_size = block_size; 2525 tx_dmap[i].alength = alloc_sizes[size_index]; 2526 tx_dmap[i].orig_alength = tx_dmap[i].alength; 2527 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2528 tx_dmap[i].dma_channel = dma_channel; 2529 tx_dmap[i].contig_alloc_type = B_FALSE; 2530 2531 /* 2532 * N2/NIU: data buffers must be contiguous as the driver 2533 * needs to call Hypervisor api to set up 2534 * logical pages. 2535 */ 2536 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2537 tx_dmap[i].contig_alloc_type = B_TRUE; 2538 } 2539 2540 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2541 &nxge_tx_dma_attr, 2542 tx_dmap[i].alength, 2543 &nxge_dev_buf_dma_acc_attr, 2544 DDI_DMA_WRITE | DDI_DMA_STREAMING, 2545 (p_nxge_dma_common_t)(&tx_dmap[i])); 2546 if (status != NXGE_OK) { 2547 size_index--; 2548 } else { 2549 i++; 2550 allocated += alloc_sizes[size_index]; 2551 } 2552 } 2553 2554 if (allocated < total_alloc_size) { 2555 goto nxge_alloc_tx_mem_fail1; 2556 } 2557 2558 *num_chunks = i; 2559 *dmap = tx_dmap; 2560 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2561 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 2562 *dmap, i)); 2563 goto nxge_alloc_tx_mem_exit; 2564 2565 nxge_alloc_tx_mem_fail1: 2566 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2567 2568 nxge_alloc_tx_mem_exit: 2569 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2570 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 2571 2572 return (status); 2573 } 2574 2575 /*ARGSUSED*/ 2576 static void 2577 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2578 uint32_t num_chunks) 2579 { 2580 int i; 2581 2582 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 2583 2584 for (i = 0; i < num_chunks; i++) { 2585 nxge_dma_mem_free(dmap++); 2586 } 2587 2588 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 2589 } 2590 2591 /*ARGSUSED*/ 2592 static nxge_status_t 2593 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2594 p_nxge_dma_common_t *dmap, size_t size) 2595 { 2596 p_nxge_dma_common_t tx_dmap; 2597 nxge_status_t status = NXGE_OK; 2598 2599 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 2600 tx_dmap = (p_nxge_dma_common_t) 2601 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2602 2603 tx_dmap->contig_alloc_type = B_FALSE; 2604 2605 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2606 &nxge_desc_dma_attr, 2607 size, 2608 &nxge_dev_desc_dma_acc_attr, 2609 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2610 tx_dmap); 2611 if (status != NXGE_OK) { 2612 goto nxge_alloc_tx_cntl_dma_fail1; 2613 } 2614 2615 *dmap = tx_dmap; 2616 goto nxge_alloc_tx_cntl_dma_exit; 2617 2618 nxge_alloc_tx_cntl_dma_fail1: 2619 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 2620 2621 nxge_alloc_tx_cntl_dma_exit: 2622 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2623 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 2624 2625 return (status); 2626 } 2627 2628 /*ARGSUSED*/ 2629 static void 2630 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2631 { 2632 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 2633 2634 nxge_dma_mem_free(dmap); 2635 2636 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 2637 } 2638 2639 static void 2640 nxge_free_tx_mem_pool(p_nxge_t nxgep) 2641 { 2642 uint32_t i, ndmas; 2643 p_nxge_dma_pool_t dma_poolp; 2644 p_nxge_dma_common_t *dma_buf_p; 2645 p_nxge_dma_pool_t dma_cntl_poolp; 2646 p_nxge_dma_common_t *dma_cntl_p; 2647 uint32_t *num_chunks; 2648 2649 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_free_tx_mem_pool")); 2650 2651 dma_poolp = nxgep->tx_buf_pool_p; 2652 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2653 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2654 "<== nxge_free_tx_mem_pool " 2655 "(null rx buf pool or buf not allocated")); 2656 return; 2657 } 2658 2659 dma_cntl_poolp = nxgep->tx_cntl_pool_p; 2660 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2661 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2662 "<== nxge_free_tx_mem_pool " 2663 "(null tx cntl buf pool or cntl buf not allocated")); 2664 return; 2665 } 2666 2667 dma_buf_p = dma_poolp->dma_buf_pool_p; 2668 num_chunks = dma_poolp->num_chunks; 2669 2670 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2671 ndmas = dma_cntl_poolp->ndmas; 2672 2673 for (i = 0; i < ndmas; i++) { 2674 nxge_free_tx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2675 } 2676 2677 for (i = 0; i < ndmas; i++) { 2678 nxge_free_tx_cntl_dma(nxgep, dma_cntl_p[i]); 2679 } 2680 2681 for (i = 0; i < ndmas; i++) { 2682 KMEM_FREE(dma_buf_p[i], 2683 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2684 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2685 } 2686 2687 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2688 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2689 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2690 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2691 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2692 2693 nxgep->tx_buf_pool_p = NULL; 2694 nxgep->tx_cntl_pool_p = NULL; 2695 2696 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_free_tx_mem_pool")); 2697 } 2698 2699 /*ARGSUSED*/ 2700 static nxge_status_t 2701 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 2702 struct ddi_dma_attr *dma_attrp, 2703 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 2704 p_nxge_dma_common_t dma_p) 2705 { 2706 caddr_t kaddrp; 2707 int ddi_status = DDI_SUCCESS; 2708 boolean_t contig_alloc_type; 2709 2710 contig_alloc_type = dma_p->contig_alloc_type; 2711 2712 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 2713 /* 2714 * contig_alloc_type for contiguous memory only allowed 2715 * for N2/NIU. 2716 */ 2717 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2718 "nxge_dma_mem_alloc: alloc type not allows (%d)", 2719 dma_p->contig_alloc_type)); 2720 return (NXGE_ERROR | NXGE_DDI_FAILED); 2721 } 2722 2723 dma_p->dma_handle = NULL; 2724 dma_p->acc_handle = NULL; 2725 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 2726 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 2727 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 2728 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 2729 if (ddi_status != DDI_SUCCESS) { 2730 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2731 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 2732 return (NXGE_ERROR | NXGE_DDI_FAILED); 2733 } 2734 2735 switch (contig_alloc_type) { 2736 case B_FALSE: 2737 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, 2738 acc_attr_p, 2739 xfer_flags, 2740 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 2741 &dma_p->acc_handle); 2742 if (ddi_status != DDI_SUCCESS) { 2743 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2744 "nxge_dma_mem_alloc:ddi_dma_mem_alloc failed")); 2745 ddi_dma_free_handle(&dma_p->dma_handle); 2746 dma_p->dma_handle = NULL; 2747 return (NXGE_ERROR | NXGE_DDI_FAILED); 2748 } 2749 if (dma_p->alength < length) { 2750 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2751 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 2752 "< length.")); 2753 ddi_dma_mem_free(&dma_p->acc_handle); 2754 ddi_dma_free_handle(&dma_p->dma_handle); 2755 dma_p->acc_handle = NULL; 2756 dma_p->dma_handle = NULL; 2757 return (NXGE_ERROR); 2758 } 2759 2760 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2761 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2762 &dma_p->dma_cookie, &dma_p->ncookies); 2763 if (ddi_status != DDI_DMA_MAPPED) { 2764 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2765 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2766 "(staus 0x%x ncookies %d.)", ddi_status, 2767 dma_p->ncookies)); 2768 if (dma_p->acc_handle) { 2769 ddi_dma_mem_free(&dma_p->acc_handle); 2770 dma_p->acc_handle = NULL; 2771 } 2772 ddi_dma_free_handle(&dma_p->dma_handle); 2773 dma_p->dma_handle = NULL; 2774 return (NXGE_ERROR | NXGE_DDI_FAILED); 2775 } 2776 2777 if (dma_p->ncookies != 1) { 2778 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2779 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 2780 "> 1 cookie" 2781 "(staus 0x%x ncookies %d.)", ddi_status, 2782 dma_p->ncookies)); 2783 if (dma_p->acc_handle) { 2784 ddi_dma_mem_free(&dma_p->acc_handle); 2785 dma_p->acc_handle = NULL; 2786 } 2787 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2788 ddi_dma_free_handle(&dma_p->dma_handle); 2789 dma_p->dma_handle = NULL; 2790 return (NXGE_ERROR); 2791 } 2792 break; 2793 2794 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2795 case B_TRUE: 2796 kaddrp = (caddr_t)contig_mem_alloc(length); 2797 if (kaddrp == NULL) { 2798 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2799 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 2800 ddi_dma_free_handle(&dma_p->dma_handle); 2801 return (NXGE_ERROR | NXGE_DDI_FAILED); 2802 } 2803 2804 dma_p->alength = length; 2805 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2806 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2807 &dma_p->dma_cookie, &dma_p->ncookies); 2808 if (ddi_status != DDI_DMA_MAPPED) { 2809 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2810 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2811 "(status 0x%x ncookies %d.)", ddi_status, 2812 dma_p->ncookies)); 2813 2814 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2815 "==> nxge_dma_mem_alloc: (not mapped)" 2816 "length %lu (0x%x) " 2817 "free contig kaddrp $%p " 2818 "va_to_pa $%p", 2819 length, length, 2820 kaddrp, 2821 va_to_pa(kaddrp))); 2822 2823 2824 contig_mem_free((void *)kaddrp, length); 2825 ddi_dma_free_handle(&dma_p->dma_handle); 2826 2827 dma_p->dma_handle = NULL; 2828 dma_p->acc_handle = NULL; 2829 dma_p->alength = NULL; 2830 dma_p->kaddrp = NULL; 2831 2832 return (NXGE_ERROR | NXGE_DDI_FAILED); 2833 } 2834 2835 if (dma_p->ncookies != 1 || 2836 (dma_p->dma_cookie.dmac_laddress == NULL)) { 2837 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2838 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 2839 "cookie or " 2840 "dmac_laddress is NULL $%p size %d " 2841 " (status 0x%x ncookies %d.)", 2842 ddi_status, 2843 dma_p->dma_cookie.dmac_laddress, 2844 dma_p->dma_cookie.dmac_size, 2845 dma_p->ncookies)); 2846 2847 contig_mem_free((void *)kaddrp, length); 2848 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2849 ddi_dma_free_handle(&dma_p->dma_handle); 2850 2851 dma_p->alength = 0; 2852 dma_p->dma_handle = NULL; 2853 dma_p->acc_handle = NULL; 2854 dma_p->kaddrp = NULL; 2855 2856 return (NXGE_ERROR | NXGE_DDI_FAILED); 2857 } 2858 break; 2859 2860 #else 2861 case B_TRUE: 2862 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2863 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 2864 return (NXGE_ERROR | NXGE_DDI_FAILED); 2865 #endif 2866 } 2867 2868 dma_p->kaddrp = kaddrp; 2869 dma_p->last_kaddrp = (unsigned char *)kaddrp + 2870 dma_p->alength - RXBUF_64B_ALIGNED; 2871 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2872 dma_p->last_ioaddr_pp = 2873 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 2874 dma_p->alength - RXBUF_64B_ALIGNED; 2875 2876 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 2877 2878 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2879 dma_p->orig_ioaddr_pp = 2880 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2881 dma_p->orig_alength = length; 2882 dma_p->orig_kaddrp = kaddrp; 2883 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 2884 #endif 2885 2886 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 2887 "dma buffer allocated: dma_p $%p " 2888 "return dmac_ladress from cookie $%p cookie dmac_size %d " 2889 "dma_p->ioaddr_p $%p " 2890 "dma_p->orig_ioaddr_p $%p " 2891 "orig_vatopa $%p " 2892 "alength %d (0x%x) " 2893 "kaddrp $%p " 2894 "length %d (0x%x)", 2895 dma_p, 2896 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 2897 dma_p->ioaddr_pp, 2898 dma_p->orig_ioaddr_pp, 2899 dma_p->orig_vatopa, 2900 dma_p->alength, dma_p->alength, 2901 kaddrp, 2902 length, length)); 2903 2904 return (NXGE_OK); 2905 } 2906 2907 static void 2908 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 2909 { 2910 if (dma_p->dma_handle != NULL) { 2911 if (dma_p->ncookies) { 2912 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2913 dma_p->ncookies = 0; 2914 } 2915 ddi_dma_free_handle(&dma_p->dma_handle); 2916 dma_p->dma_handle = NULL; 2917 } 2918 2919 if (dma_p->acc_handle != NULL) { 2920 ddi_dma_mem_free(&dma_p->acc_handle); 2921 dma_p->acc_handle = NULL; 2922 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 2923 } 2924 2925 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2926 if (dma_p->contig_alloc_type && 2927 dma_p->orig_kaddrp && dma_p->orig_alength) { 2928 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 2929 "kaddrp $%p (orig_kaddrp $%p)" 2930 "mem type %d ", 2931 "orig_alength %d " 2932 "alength 0x%x (%d)", 2933 dma_p->kaddrp, 2934 dma_p->orig_kaddrp, 2935 dma_p->contig_alloc_type, 2936 dma_p->orig_alength, 2937 dma_p->alength, dma_p->alength)); 2938 2939 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 2940 dma_p->orig_alength = NULL; 2941 dma_p->orig_kaddrp = NULL; 2942 dma_p->contig_alloc_type = B_FALSE; 2943 } 2944 #endif 2945 dma_p->kaddrp = NULL; 2946 dma_p->alength = NULL; 2947 } 2948 2949 /* 2950 * nxge_m_start() -- start transmitting and receiving. 2951 * 2952 * This function is called by the MAC layer when the first 2953 * stream is open to prepare the hardware ready for sending 2954 * and transmitting packets. 2955 */ 2956 static int 2957 nxge_m_start(void *arg) 2958 { 2959 p_nxge_t nxgep = (p_nxge_t)arg; 2960 2961 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 2962 2963 MUTEX_ENTER(nxgep->genlock); 2964 if (nxge_init(nxgep) != NXGE_OK) { 2965 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2966 "<== nxge_m_start: initialization failed")); 2967 MUTEX_EXIT(nxgep->genlock); 2968 return (EIO); 2969 } 2970 2971 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 2972 goto nxge_m_start_exit; 2973 /* 2974 * Start timer to check the system error and tx hangs 2975 */ 2976 nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state, 2977 NXGE_CHECK_TIMER); 2978 2979 nxgep->link_notify = B_TRUE; 2980 2981 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 2982 2983 nxge_m_start_exit: 2984 MUTEX_EXIT(nxgep->genlock); 2985 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 2986 return (0); 2987 } 2988 2989 /* 2990 * nxge_m_stop(): stop transmitting and receiving. 2991 */ 2992 static void 2993 nxge_m_stop(void *arg) 2994 { 2995 p_nxge_t nxgep = (p_nxge_t)arg; 2996 2997 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 2998 2999 if (nxgep->nxge_timerid) { 3000 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3001 nxgep->nxge_timerid = 0; 3002 } 3003 3004 MUTEX_ENTER(nxgep->genlock); 3005 nxge_uninit(nxgep); 3006 3007 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3008 3009 MUTEX_EXIT(nxgep->genlock); 3010 3011 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3012 } 3013 3014 static int 3015 nxge_m_unicst(void *arg, const uint8_t *macaddr) 3016 { 3017 p_nxge_t nxgep = (p_nxge_t)arg; 3018 struct ether_addr addrp; 3019 3020 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 3021 3022 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 3023 if (nxge_set_mac_addr(nxgep, &addrp)) { 3024 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3025 "<== nxge_m_unicst: set unitcast failed")); 3026 return (EINVAL); 3027 } 3028 3029 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 3030 3031 return (0); 3032 } 3033 3034 static int 3035 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3036 { 3037 p_nxge_t nxgep = (p_nxge_t)arg; 3038 struct ether_addr addrp; 3039 3040 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3041 "==> nxge_m_multicst: add %d", add)); 3042 3043 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3044 if (add) { 3045 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3046 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3047 "<== nxge_m_multicst: add multicast failed")); 3048 return (EINVAL); 3049 } 3050 } else { 3051 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3052 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3053 "<== nxge_m_multicst: del multicast failed")); 3054 return (EINVAL); 3055 } 3056 } 3057 3058 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3059 3060 return (0); 3061 } 3062 3063 static int 3064 nxge_m_promisc(void *arg, boolean_t on) 3065 { 3066 p_nxge_t nxgep = (p_nxge_t)arg; 3067 3068 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3069 "==> nxge_m_promisc: on %d", on)); 3070 3071 if (nxge_set_promisc(nxgep, on)) { 3072 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3073 "<== nxge_m_promisc: set promisc failed")); 3074 return (EINVAL); 3075 } 3076 3077 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3078 "<== nxge_m_promisc: on %d", on)); 3079 3080 return (0); 3081 } 3082 3083 static void 3084 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3085 { 3086 p_nxge_t nxgep = (p_nxge_t)arg; 3087 struct iocblk *iocp; 3088 boolean_t need_privilege; 3089 int err; 3090 int cmd; 3091 3092 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3093 3094 iocp = (struct iocblk *)mp->b_rptr; 3095 iocp->ioc_error = 0; 3096 need_privilege = B_TRUE; 3097 cmd = iocp->ioc_cmd; 3098 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3099 switch (cmd) { 3100 default: 3101 miocnak(wq, mp, 0, EINVAL); 3102 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3103 return; 3104 3105 case LB_GET_INFO_SIZE: 3106 case LB_GET_INFO: 3107 case LB_GET_MODE: 3108 need_privilege = B_FALSE; 3109 break; 3110 case LB_SET_MODE: 3111 break; 3112 3113 case ND_GET: 3114 need_privilege = B_FALSE; 3115 break; 3116 case ND_SET: 3117 break; 3118 3119 case NXGE_GET_MII: 3120 case NXGE_PUT_MII: 3121 case NXGE_GET64: 3122 case NXGE_PUT64: 3123 case NXGE_GET_TX_RING_SZ: 3124 case NXGE_GET_TX_DESC: 3125 case NXGE_TX_SIDE_RESET: 3126 case NXGE_RX_SIDE_RESET: 3127 case NXGE_GLOBAL_RESET: 3128 case NXGE_RESET_MAC: 3129 case NXGE_TX_REGS_DUMP: 3130 case NXGE_RX_REGS_DUMP: 3131 case NXGE_INT_REGS_DUMP: 3132 case NXGE_VIR_INT_REGS_DUMP: 3133 case NXGE_PUT_TCAM: 3134 case NXGE_GET_TCAM: 3135 case NXGE_RTRACE: 3136 case NXGE_RDUMP: 3137 3138 need_privilege = B_FALSE; 3139 break; 3140 case NXGE_INJECT_ERR: 3141 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3142 nxge_err_inject(nxgep, wq, mp); 3143 break; 3144 } 3145 3146 if (need_privilege) { 3147 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3148 if (err != 0) { 3149 miocnak(wq, mp, 0, err); 3150 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3151 "<== nxge_m_ioctl: no priv")); 3152 return; 3153 } 3154 } 3155 3156 switch (cmd) { 3157 case ND_GET: 3158 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command")); 3159 case ND_SET: 3160 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command")); 3161 nxge_param_ioctl(nxgep, wq, mp, iocp); 3162 break; 3163 3164 case LB_GET_MODE: 3165 case LB_SET_MODE: 3166 case LB_GET_INFO_SIZE: 3167 case LB_GET_INFO: 3168 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3169 break; 3170 3171 case NXGE_GET_MII: 3172 case NXGE_PUT_MII: 3173 case NXGE_PUT_TCAM: 3174 case NXGE_GET_TCAM: 3175 case NXGE_GET64: 3176 case NXGE_PUT64: 3177 case NXGE_GET_TX_RING_SZ: 3178 case NXGE_GET_TX_DESC: 3179 case NXGE_TX_SIDE_RESET: 3180 case NXGE_RX_SIDE_RESET: 3181 case NXGE_GLOBAL_RESET: 3182 case NXGE_RESET_MAC: 3183 case NXGE_TX_REGS_DUMP: 3184 case NXGE_RX_REGS_DUMP: 3185 case NXGE_INT_REGS_DUMP: 3186 case NXGE_VIR_INT_REGS_DUMP: 3187 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3188 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3189 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3190 break; 3191 } 3192 3193 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3194 } 3195 3196 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3197 3198 static void 3199 nxge_m_resources(void *arg) 3200 { 3201 p_nxge_t nxgep = arg; 3202 mac_rx_fifo_t mrf; 3203 p_rx_rcr_rings_t rcr_rings; 3204 p_rx_rcr_ring_t *rcr_p; 3205 uint32_t i, ndmas; 3206 nxge_status_t status; 3207 3208 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3209 3210 MUTEX_ENTER(nxgep->genlock); 3211 3212 /* 3213 * CR 6492541 Check to see if the drv_state has been initialized, 3214 * if not * call nxge_init(). 3215 */ 3216 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3217 status = nxge_init(nxgep); 3218 if (status != NXGE_OK) 3219 goto nxge_m_resources_exit; 3220 } 3221 3222 mrf.mrf_type = MAC_RX_FIFO; 3223 mrf.mrf_blank = nxge_rx_hw_blank; 3224 mrf.mrf_arg = (void *)nxgep; 3225 3226 mrf.mrf_normal_blank_time = 128; 3227 mrf.mrf_normal_pkt_count = 8; 3228 rcr_rings = nxgep->rx_rcr_rings; 3229 rcr_p = rcr_rings->rcr_rings; 3230 ndmas = rcr_rings->ndmas; 3231 3232 /* 3233 * Export our receive resources to the MAC layer. 3234 */ 3235 for (i = 0; i < ndmas; i++) { 3236 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle = 3237 mac_resource_add(nxgep->mach, 3238 (mac_resource_t *)&mrf); 3239 3240 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3241 "==> nxge_m_resources: vdma %d dma %d " 3242 "rcrptr 0x%016llx mac_handle 0x%016llx", 3243 i, ((p_rx_rcr_ring_t)rcr_p[i])->rdc, 3244 rcr_p[i], 3245 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle)); 3246 } 3247 3248 nxge_m_resources_exit: 3249 MUTEX_EXIT(nxgep->genlock); 3250 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 3251 } 3252 3253 static void 3254 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 3255 { 3256 p_nxge_mmac_stats_t mmac_stats; 3257 int i; 3258 nxge_mmac_t *mmac_info; 3259 3260 mmac_info = &nxgep->nxge_mmac_info; 3261 3262 mmac_stats = &nxgep->statsp->mmac_stats; 3263 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3264 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3265 3266 for (i = 0; i < ETHERADDRL; i++) { 3267 if (factory) { 3268 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3269 = mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i]; 3270 } else { 3271 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3272 = mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i]; 3273 } 3274 } 3275 } 3276 3277 /* 3278 * nxge_altmac_set() -- Set an alternate MAC address 3279 */ 3280 static int 3281 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 3282 { 3283 uint8_t addrn; 3284 uint8_t portn; 3285 npi_mac_addr_t altmac; 3286 hostinfo_t mac_rdc; 3287 p_nxge_class_pt_cfg_t clscfgp; 3288 3289 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3290 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3291 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3292 3293 portn = nxgep->mac.portnum; 3294 addrn = (uint8_t)slot - 1; 3295 3296 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 3297 addrn, &altmac) != NPI_SUCCESS) 3298 return (EIO); 3299 3300 /* 3301 * Set the rdc table number for the host info entry 3302 * for this mac address slot. 3303 */ 3304 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 3305 mac_rdc.value = 0; 3306 mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl; 3307 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 3308 3309 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 3310 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 3311 return (EIO); 3312 } 3313 3314 /* 3315 * Enable comparison with the alternate MAC address. 3316 * While the first alternate addr is enabled by bit 1 of register 3317 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 3318 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 3319 * accordingly before calling npi_mac_altaddr_entry. 3320 */ 3321 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3322 addrn = (uint8_t)slot - 1; 3323 else 3324 addrn = (uint8_t)slot; 3325 3326 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 3327 != NPI_SUCCESS) 3328 return (EIO); 3329 3330 return (0); 3331 } 3332 3333 /* 3334 * nxeg_m_mmac_add() - find an unused address slot, set the address 3335 * value to the one specified, enable the port to start filtering on 3336 * the new MAC address. Returns 0 on success. 3337 */ 3338 static int 3339 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 3340 { 3341 p_nxge_t nxgep = arg; 3342 mac_addr_slot_t slot; 3343 nxge_mmac_t *mmac_info; 3344 int err; 3345 nxge_status_t status; 3346 3347 mutex_enter(nxgep->genlock); 3348 3349 /* 3350 * Make sure that nxge is initialized, if _start() has 3351 * not been called. 3352 */ 3353 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3354 status = nxge_init(nxgep); 3355 if (status != NXGE_OK) { 3356 mutex_exit(nxgep->genlock); 3357 return (ENXIO); 3358 } 3359 } 3360 3361 mmac_info = &nxgep->nxge_mmac_info; 3362 if (mmac_info->naddrfree == 0) { 3363 mutex_exit(nxgep->genlock); 3364 return (ENOSPC); 3365 } 3366 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3367 maddr->mma_addrlen)) { 3368 mutex_exit(nxgep->genlock); 3369 return (EINVAL); 3370 } 3371 /* 3372 * Search for the first available slot. Because naddrfree 3373 * is not zero, we are guaranteed to find one. 3374 * Slot 0 is for unique (primary) MAC. The first alternate 3375 * MAC slot is slot 1. 3376 * Each of the first two ports of Neptune has 16 alternate 3377 * MAC slots but only the first 7 (or 15) slots have assigned factory 3378 * MAC addresses. We first search among the slots without bundled 3379 * factory MACs. If we fail to find one in that range, then we 3380 * search the slots with bundled factory MACs. A factory MAC 3381 * will be wasted while the slot is used with a user MAC address. 3382 * But the slot could be used by factory MAC again after calling 3383 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 3384 */ 3385 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 3386 for (slot = mmac_info->num_factory_mmac + 1; 3387 slot <= mmac_info->num_mmac; slot++) { 3388 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3389 break; 3390 } 3391 if (slot > mmac_info->num_mmac) { 3392 for (slot = 1; slot <= mmac_info->num_factory_mmac; 3393 slot++) { 3394 if (!(mmac_info->mac_pool[slot].flags 3395 & MMAC_SLOT_USED)) 3396 break; 3397 } 3398 } 3399 } else { 3400 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 3401 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3402 break; 3403 } 3404 } 3405 ASSERT(slot <= mmac_info->num_mmac); 3406 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 3407 mutex_exit(nxgep->genlock); 3408 return (err); 3409 } 3410 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 3411 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 3412 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3413 mmac_info->naddrfree--; 3414 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3415 3416 maddr->mma_slot = slot; 3417 3418 mutex_exit(nxgep->genlock); 3419 return (0); 3420 } 3421 3422 /* 3423 * This function reserves an unused slot and programs the slot and the HW 3424 * with a factory mac address. 3425 */ 3426 static int 3427 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 3428 { 3429 p_nxge_t nxgep = arg; 3430 mac_addr_slot_t slot; 3431 nxge_mmac_t *mmac_info; 3432 int err; 3433 nxge_status_t status; 3434 3435 mutex_enter(nxgep->genlock); 3436 3437 /* 3438 * Make sure that nxge is initialized, if _start() has 3439 * not been called. 3440 */ 3441 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3442 status = nxge_init(nxgep); 3443 if (status != NXGE_OK) { 3444 mutex_exit(nxgep->genlock); 3445 return (ENXIO); 3446 } 3447 } 3448 3449 mmac_info = &nxgep->nxge_mmac_info; 3450 if (mmac_info->naddrfree == 0) { 3451 mutex_exit(nxgep->genlock); 3452 return (ENOSPC); 3453 } 3454 3455 slot = maddr->mma_slot; 3456 if (slot == -1) { /* -1: Take the first available slot */ 3457 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 3458 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3459 break; 3460 } 3461 if (slot > mmac_info->num_factory_mmac) { 3462 mutex_exit(nxgep->genlock); 3463 return (ENOSPC); 3464 } 3465 } 3466 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 3467 /* 3468 * Do not support factory MAC at a slot greater than 3469 * num_factory_mmac even when there are available factory 3470 * MAC addresses because the alternate MACs are bundled with 3471 * slot[1] through slot[num_factory_mmac] 3472 */ 3473 mutex_exit(nxgep->genlock); 3474 return (EINVAL); 3475 } 3476 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3477 mutex_exit(nxgep->genlock); 3478 return (EBUSY); 3479 } 3480 /* Verify the address to be reserved */ 3481 if (!mac_unicst_verify(nxgep->mach, 3482 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 3483 mutex_exit(nxgep->genlock); 3484 return (EINVAL); 3485 } 3486 if (err = nxge_altmac_set(nxgep, 3487 mmac_info->factory_mac_pool[slot], slot)) { 3488 mutex_exit(nxgep->genlock); 3489 return (err); 3490 } 3491 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 3492 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3493 mmac_info->naddrfree--; 3494 3495 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 3496 mutex_exit(nxgep->genlock); 3497 3498 /* Pass info back to the caller */ 3499 maddr->mma_slot = slot; 3500 maddr->mma_addrlen = ETHERADDRL; 3501 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3502 3503 return (0); 3504 } 3505 3506 /* 3507 * Remove the specified mac address and update the HW not to filter 3508 * the mac address anymore. 3509 */ 3510 static int 3511 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 3512 { 3513 p_nxge_t nxgep = arg; 3514 nxge_mmac_t *mmac_info; 3515 uint8_t addrn; 3516 uint8_t portn; 3517 int err = 0; 3518 nxge_status_t status; 3519 3520 mutex_enter(nxgep->genlock); 3521 3522 /* 3523 * Make sure that nxge is initialized, if _start() has 3524 * not been called. 3525 */ 3526 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3527 status = nxge_init(nxgep); 3528 if (status != NXGE_OK) { 3529 mutex_exit(nxgep->genlock); 3530 return (ENXIO); 3531 } 3532 } 3533 3534 mmac_info = &nxgep->nxge_mmac_info; 3535 if (slot < 1 || slot > mmac_info->num_mmac) { 3536 mutex_exit(nxgep->genlock); 3537 return (EINVAL); 3538 } 3539 3540 portn = nxgep->mac.portnum; 3541 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3542 addrn = (uint8_t)slot - 1; 3543 else 3544 addrn = (uint8_t)slot; 3545 3546 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3547 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 3548 == NPI_SUCCESS) { 3549 mmac_info->naddrfree++; 3550 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 3551 /* 3552 * Regardless if the MAC we just stopped filtering 3553 * is a user addr or a facory addr, we must set 3554 * the MMAC_VENDOR_ADDR flag if this slot has an 3555 * associated factory MAC to indicate that a factory 3556 * MAC is available. 3557 */ 3558 if (slot <= mmac_info->num_factory_mmac) { 3559 mmac_info->mac_pool[slot].flags 3560 |= MMAC_VENDOR_ADDR; 3561 } 3562 /* 3563 * Clear mac_pool[slot].addr so that kstat shows 0 3564 * alternate MAC address if the slot is not used. 3565 * (But nxge_m_mmac_get returns the factory MAC even 3566 * when the slot is not used!) 3567 */ 3568 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 3569 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3570 } else { 3571 err = EIO; 3572 } 3573 } else { 3574 err = EINVAL; 3575 } 3576 3577 mutex_exit(nxgep->genlock); 3578 return (err); 3579 } 3580 3581 3582 /* 3583 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 3584 */ 3585 static int 3586 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 3587 { 3588 p_nxge_t nxgep = arg; 3589 mac_addr_slot_t slot; 3590 nxge_mmac_t *mmac_info; 3591 int err = 0; 3592 nxge_status_t status; 3593 3594 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3595 maddr->mma_addrlen)) 3596 return (EINVAL); 3597 3598 slot = maddr->mma_slot; 3599 3600 mutex_enter(nxgep->genlock); 3601 3602 /* 3603 * Make sure that nxge is initialized, if _start() has 3604 * not been called. 3605 */ 3606 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3607 status = nxge_init(nxgep); 3608 if (status != NXGE_OK) { 3609 mutex_exit(nxgep->genlock); 3610 return (ENXIO); 3611 } 3612 } 3613 3614 mmac_info = &nxgep->nxge_mmac_info; 3615 if (slot < 1 || slot > mmac_info->num_mmac) { 3616 mutex_exit(nxgep->genlock); 3617 return (EINVAL); 3618 } 3619 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3620 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 3621 != 0) { 3622 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 3623 ETHERADDRL); 3624 /* 3625 * Assume that the MAC passed down from the caller 3626 * is not a factory MAC address (The user should 3627 * call mmac_remove followed by mmac_reserve if 3628 * he wants to use the factory MAC for this slot). 3629 */ 3630 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3631 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3632 } 3633 } else { 3634 err = EINVAL; 3635 } 3636 mutex_exit(nxgep->genlock); 3637 return (err); 3638 } 3639 3640 /* 3641 * nxge_m_mmac_get() - Get the MAC address and other information 3642 * related to the slot. mma_flags should be set to 0 in the call. 3643 * Note: although kstat shows MAC address as zero when a slot is 3644 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 3645 * to the caller as long as the slot is not using a user MAC address. 3646 * The following table shows the rules, 3647 * 3648 * USED VENDOR mma_addr 3649 * ------------------------------------------------------------ 3650 * (1) Slot uses a user MAC: yes no user MAC 3651 * (2) Slot uses a factory MAC: yes yes factory MAC 3652 * (3) Slot is not used but is 3653 * factory MAC capable: no yes factory MAC 3654 * (4) Slot is not used and is 3655 * not factory MAC capable: no no 0 3656 * ------------------------------------------------------------ 3657 */ 3658 static int 3659 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 3660 { 3661 nxge_t *nxgep = arg; 3662 mac_addr_slot_t slot; 3663 nxge_mmac_t *mmac_info; 3664 nxge_status_t status; 3665 3666 slot = maddr->mma_slot; 3667 3668 mutex_enter(nxgep->genlock); 3669 3670 /* 3671 * Make sure that nxge is initialized, if _start() has 3672 * not been called. 3673 */ 3674 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3675 status = nxge_init(nxgep); 3676 if (status != NXGE_OK) { 3677 mutex_exit(nxgep->genlock); 3678 return (ENXIO); 3679 } 3680 } 3681 3682 mmac_info = &nxgep->nxge_mmac_info; 3683 3684 if (slot < 1 || slot > mmac_info->num_mmac) { 3685 mutex_exit(nxgep->genlock); 3686 return (EINVAL); 3687 } 3688 maddr->mma_flags = 0; 3689 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 3690 maddr->mma_flags |= MMAC_SLOT_USED; 3691 3692 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 3693 maddr->mma_flags |= MMAC_VENDOR_ADDR; 3694 bcopy(mmac_info->factory_mac_pool[slot], 3695 maddr->mma_addr, ETHERADDRL); 3696 maddr->mma_addrlen = ETHERADDRL; 3697 } else { 3698 if (maddr->mma_flags & MMAC_SLOT_USED) { 3699 bcopy(mmac_info->mac_pool[slot].addr, 3700 maddr->mma_addr, ETHERADDRL); 3701 maddr->mma_addrlen = ETHERADDRL; 3702 } else { 3703 bzero(maddr->mma_addr, ETHERADDRL); 3704 maddr->mma_addrlen = 0; 3705 } 3706 } 3707 mutex_exit(nxgep->genlock); 3708 return (0); 3709 } 3710 3711 3712 static boolean_t 3713 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 3714 { 3715 nxge_t *nxgep = arg; 3716 uint32_t *txflags = cap_data; 3717 multiaddress_capab_t *mmacp = cap_data; 3718 3719 switch (cap) { 3720 case MAC_CAPAB_HCKSUM: 3721 *txflags = HCKSUM_INET_PARTIAL; 3722 break; 3723 case MAC_CAPAB_POLL: 3724 /* 3725 * There's nothing for us to fill in, simply returning 3726 * B_TRUE stating that we support polling is sufficient. 3727 */ 3728 break; 3729 3730 case MAC_CAPAB_MULTIADDRESS: 3731 mutex_enter(nxgep->genlock); 3732 3733 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 3734 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 3735 mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */ 3736 /* 3737 * maddr_handle is driver's private data, passed back to 3738 * entry point functions as arg. 3739 */ 3740 mmacp->maddr_handle = nxgep; 3741 mmacp->maddr_add = nxge_m_mmac_add; 3742 mmacp->maddr_remove = nxge_m_mmac_remove; 3743 mmacp->maddr_modify = nxge_m_mmac_modify; 3744 mmacp->maddr_get = nxge_m_mmac_get; 3745 mmacp->maddr_reserve = nxge_m_mmac_reserve; 3746 3747 mutex_exit(nxgep->genlock); 3748 break; 3749 default: 3750 return (B_FALSE); 3751 } 3752 return (B_TRUE); 3753 } 3754 3755 /* 3756 * Module loading and removing entry points. 3757 */ 3758 3759 static struct cb_ops nxge_cb_ops = { 3760 nodev, /* cb_open */ 3761 nodev, /* cb_close */ 3762 nodev, /* cb_strategy */ 3763 nodev, /* cb_print */ 3764 nodev, /* cb_dump */ 3765 nodev, /* cb_read */ 3766 nodev, /* cb_write */ 3767 nodev, /* cb_ioctl */ 3768 nodev, /* cb_devmap */ 3769 nodev, /* cb_mmap */ 3770 nodev, /* cb_segmap */ 3771 nochpoll, /* cb_chpoll */ 3772 ddi_prop_op, /* cb_prop_op */ 3773 NULL, 3774 D_MP, /* cb_flag */ 3775 CB_REV, /* rev */ 3776 nodev, /* int (*cb_aread)() */ 3777 nodev /* int (*cb_awrite)() */ 3778 }; 3779 3780 static struct dev_ops nxge_dev_ops = { 3781 DEVO_REV, /* devo_rev */ 3782 0, /* devo_refcnt */ 3783 nulldev, 3784 nulldev, /* devo_identify */ 3785 nulldev, /* devo_probe */ 3786 nxge_attach, /* devo_attach */ 3787 nxge_detach, /* devo_detach */ 3788 nodev, /* devo_reset */ 3789 &nxge_cb_ops, /* devo_cb_ops */ 3790 (struct bus_ops *)NULL, /* devo_bus_ops */ 3791 ddi_power /* devo_power */ 3792 }; 3793 3794 extern struct mod_ops mod_driverops; 3795 3796 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet %I%" 3797 3798 /* 3799 * Module linkage information for the kernel. 3800 */ 3801 static struct modldrv nxge_modldrv = { 3802 &mod_driverops, 3803 NXGE_DESC_VER, 3804 &nxge_dev_ops 3805 }; 3806 3807 static struct modlinkage modlinkage = { 3808 MODREV_1, (void *) &nxge_modldrv, NULL 3809 }; 3810 3811 int 3812 _init(void) 3813 { 3814 int status; 3815 3816 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 3817 mac_init_ops(&nxge_dev_ops, "nxge"); 3818 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 3819 if (status != 0) { 3820 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3821 "failed to init device soft state")); 3822 goto _init_exit; 3823 } 3824 3825 status = mod_install(&modlinkage); 3826 if (status != 0) { 3827 ddi_soft_state_fini(&nxge_list); 3828 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 3829 goto _init_exit; 3830 } 3831 3832 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 3833 3834 _init_exit: 3835 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 3836 3837 return (status); 3838 } 3839 3840 int 3841 _fini(void) 3842 { 3843 int status; 3844 3845 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 3846 3847 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 3848 3849 if (nxge_mblks_pending) 3850 return (EBUSY); 3851 3852 status = mod_remove(&modlinkage); 3853 if (status != DDI_SUCCESS) { 3854 NXGE_DEBUG_MSG((NULL, MOD_CTL, 3855 "Module removal failed 0x%08x", 3856 status)); 3857 goto _fini_exit; 3858 } 3859 3860 mac_fini_ops(&nxge_dev_ops); 3861 3862 ddi_soft_state_fini(&nxge_list); 3863 3864 MUTEX_DESTROY(&nxge_common_lock); 3865 _fini_exit: 3866 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 3867 3868 return (status); 3869 } 3870 3871 int 3872 _info(struct modinfo *modinfop) 3873 { 3874 int status; 3875 3876 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 3877 status = mod_info(&modlinkage, modinfop); 3878 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 3879 3880 return (status); 3881 } 3882 3883 /*ARGSUSED*/ 3884 static nxge_status_t 3885 nxge_add_intrs(p_nxge_t nxgep) 3886 { 3887 3888 int intr_types; 3889 int type = 0; 3890 int ddi_status = DDI_SUCCESS; 3891 nxge_status_t status = NXGE_OK; 3892 3893 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 3894 3895 nxgep->nxge_intr_type.intr_registered = B_FALSE; 3896 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 3897 nxgep->nxge_intr_type.msi_intx_cnt = 0; 3898 nxgep->nxge_intr_type.intr_added = 0; 3899 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 3900 nxgep->nxge_intr_type.intr_type = 0; 3901 3902 if (nxgep->niu_type == N2_NIU) { 3903 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3904 } else if (nxge_msi_enable) { 3905 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3906 } 3907 3908 /* Get the supported interrupt types */ 3909 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 3910 != DDI_SUCCESS) { 3911 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 3912 "ddi_intr_get_supported_types failed: status 0x%08x", 3913 ddi_status)); 3914 return (NXGE_ERROR | NXGE_DDI_FAILED); 3915 } 3916 nxgep->nxge_intr_type.intr_types = intr_types; 3917 3918 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3919 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3920 3921 /* 3922 * Solaris MSIX is not supported yet. use MSI for now. 3923 * nxge_msi_enable (1): 3924 * 1 - MSI 2 - MSI-X others - FIXED 3925 */ 3926 switch (nxge_msi_enable) { 3927 default: 3928 type = DDI_INTR_TYPE_FIXED; 3929 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3930 "use fixed (intx emulation) type %08x", 3931 type)); 3932 break; 3933 3934 case 2: 3935 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3936 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3937 if (intr_types & DDI_INTR_TYPE_MSIX) { 3938 type = DDI_INTR_TYPE_MSIX; 3939 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3940 "ddi_intr_get_supported_types: MSIX 0x%08x", 3941 type)); 3942 } else if (intr_types & DDI_INTR_TYPE_MSI) { 3943 type = DDI_INTR_TYPE_MSI; 3944 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3945 "ddi_intr_get_supported_types: MSI 0x%08x", 3946 type)); 3947 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3948 type = DDI_INTR_TYPE_FIXED; 3949 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3950 "ddi_intr_get_supported_types: MSXED0x%08x", 3951 type)); 3952 } 3953 break; 3954 3955 case 1: 3956 if (intr_types & DDI_INTR_TYPE_MSI) { 3957 type = DDI_INTR_TYPE_MSI; 3958 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3959 "ddi_intr_get_supported_types: MSI 0x%08x", 3960 type)); 3961 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 3962 type = DDI_INTR_TYPE_MSIX; 3963 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3964 "ddi_intr_get_supported_types: MSIX 0x%08x", 3965 type)); 3966 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3967 type = DDI_INTR_TYPE_FIXED; 3968 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3969 "ddi_intr_get_supported_types: MSXED0x%08x", 3970 type)); 3971 } 3972 } 3973 3974 nxgep->nxge_intr_type.intr_type = type; 3975 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 3976 type == DDI_INTR_TYPE_FIXED) && 3977 nxgep->nxge_intr_type.niu_msi_enable) { 3978 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 3979 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3980 " nxge_add_intrs: " 3981 " nxge_add_intrs_adv failed: status 0x%08x", 3982 status)); 3983 return (status); 3984 } else { 3985 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3986 "interrupts registered : type %d", type)); 3987 nxgep->nxge_intr_type.intr_registered = B_TRUE; 3988 3989 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 3990 "\nAdded advanced nxge add_intr_adv " 3991 "intr type 0x%x\n", type)); 3992 3993 return (status); 3994 } 3995 } 3996 3997 if (!nxgep->nxge_intr_type.intr_registered) { 3998 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 3999 "failed to register interrupts")); 4000 return (NXGE_ERROR | NXGE_DDI_FAILED); 4001 } 4002 4003 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 4004 return (status); 4005 } 4006 4007 /*ARGSUSED*/ 4008 static nxge_status_t 4009 nxge_add_soft_intrs(p_nxge_t nxgep) 4010 { 4011 4012 int ddi_status = DDI_SUCCESS; 4013 nxge_status_t status = NXGE_OK; 4014 4015 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 4016 4017 nxgep->resched_id = NULL; 4018 nxgep->resched_running = B_FALSE; 4019 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 4020 &nxgep->resched_id, 4021 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 4022 if (ddi_status != DDI_SUCCESS) { 4023 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 4024 "ddi_add_softintrs failed: status 0x%08x", 4025 ddi_status)); 4026 return (NXGE_ERROR | NXGE_DDI_FAILED); 4027 } 4028 4029 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 4030 4031 return (status); 4032 } 4033 4034 static nxge_status_t 4035 nxge_add_intrs_adv(p_nxge_t nxgep) 4036 { 4037 int intr_type; 4038 p_nxge_intr_t intrp; 4039 4040 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 4041 4042 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4043 intr_type = intrp->intr_type; 4044 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 4045 intr_type)); 4046 4047 switch (intr_type) { 4048 case DDI_INTR_TYPE_MSI: /* 0x2 */ 4049 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 4050 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 4051 4052 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 4053 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 4054 4055 default: 4056 return (NXGE_ERROR); 4057 } 4058 } 4059 4060 4061 /*ARGSUSED*/ 4062 static nxge_status_t 4063 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 4064 { 4065 dev_info_t *dip = nxgep->dip; 4066 p_nxge_ldg_t ldgp; 4067 p_nxge_intr_t intrp; 4068 uint_t *inthandler; 4069 void *arg1, *arg2; 4070 int behavior; 4071 int nintrs, navail; 4072 int nactual, nrequired; 4073 int inum = 0; 4074 int x, y; 4075 int ddi_status = DDI_SUCCESS; 4076 nxge_status_t status = NXGE_OK; 4077 4078 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 4079 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4080 intrp->start_inum = 0; 4081 4082 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4083 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4084 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4085 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4086 "nintrs: %d", ddi_status, nintrs)); 4087 return (NXGE_ERROR | NXGE_DDI_FAILED); 4088 } 4089 4090 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4091 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4092 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4093 "ddi_intr_get_navail() failed, status: 0x%x%, " 4094 "nintrs: %d", ddi_status, navail)); 4095 return (NXGE_ERROR | NXGE_DDI_FAILED); 4096 } 4097 4098 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4099 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 4100 nintrs, navail)); 4101 4102 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 4103 /* MSI must be power of 2 */ 4104 if ((navail & 16) == 16) { 4105 navail = 16; 4106 } else if ((navail & 8) == 8) { 4107 navail = 8; 4108 } else if ((navail & 4) == 4) { 4109 navail = 4; 4110 } else if ((navail & 2) == 2) { 4111 navail = 2; 4112 } else { 4113 navail = 1; 4114 } 4115 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4116 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 4117 "navail %d", nintrs, navail)); 4118 } 4119 4120 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4121 DDI_INTR_ALLOC_NORMAL); 4122 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4123 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4124 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4125 navail, &nactual, behavior); 4126 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4127 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4128 " ddi_intr_alloc() failed: %d", 4129 ddi_status)); 4130 kmem_free(intrp->htable, intrp->intr_size); 4131 return (NXGE_ERROR | NXGE_DDI_FAILED); 4132 } 4133 4134 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4135 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4136 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4137 " ddi_intr_get_pri() failed: %d", 4138 ddi_status)); 4139 /* Free already allocated interrupts */ 4140 for (y = 0; y < nactual; y++) { 4141 (void) ddi_intr_free(intrp->htable[y]); 4142 } 4143 4144 kmem_free(intrp->htable, intrp->intr_size); 4145 return (NXGE_ERROR | NXGE_DDI_FAILED); 4146 } 4147 4148 nrequired = 0; 4149 switch (nxgep->niu_type) { 4150 case NEPTUNE: 4151 case NEPTUNE_2: 4152 default: 4153 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4154 break; 4155 4156 case N2_NIU: 4157 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4158 break; 4159 } 4160 4161 if (status != NXGE_OK) { 4162 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4163 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 4164 "failed: 0x%x", status)); 4165 /* Free already allocated interrupts */ 4166 for (y = 0; y < nactual; y++) { 4167 (void) ddi_intr_free(intrp->htable[y]); 4168 } 4169 4170 kmem_free(intrp->htable, intrp->intr_size); 4171 return (status); 4172 } 4173 4174 ldgp = nxgep->ldgvp->ldgp; 4175 for (x = 0; x < nrequired; x++, ldgp++) { 4176 ldgp->vector = (uint8_t)x; 4177 ldgp->intdata = SID_DATA(ldgp->func, x); 4178 arg1 = ldgp->ldvp; 4179 arg2 = nxgep; 4180 if (ldgp->nldvs == 1) { 4181 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4182 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4183 "nxge_add_intrs_adv_type: " 4184 "arg1 0x%x arg2 0x%x: " 4185 "1-1 int handler (entry %d intdata 0x%x)\n", 4186 arg1, arg2, 4187 x, ldgp->intdata)); 4188 } else if (ldgp->nldvs > 1) { 4189 inthandler = (uint_t *)ldgp->sys_intr_handler; 4190 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4191 "nxge_add_intrs_adv_type: " 4192 "arg1 0x%x arg2 0x%x: " 4193 "nldevs %d int handler " 4194 "(entry %d intdata 0x%x)\n", 4195 arg1, arg2, 4196 ldgp->nldvs, x, ldgp->intdata)); 4197 } 4198 4199 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4200 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 4201 "htable 0x%llx", x, intrp->htable[x])); 4202 4203 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4204 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4205 != DDI_SUCCESS) { 4206 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4207 "==> nxge_add_intrs_adv_type: failed #%d " 4208 "status 0x%x", x, ddi_status)); 4209 for (y = 0; y < intrp->intr_added; y++) { 4210 (void) ddi_intr_remove_handler( 4211 intrp->htable[y]); 4212 } 4213 /* Free already allocated intr */ 4214 for (y = 0; y < nactual; y++) { 4215 (void) ddi_intr_free(intrp->htable[y]); 4216 } 4217 kmem_free(intrp->htable, intrp->intr_size); 4218 4219 (void) nxge_ldgv_uninit(nxgep); 4220 4221 return (NXGE_ERROR | NXGE_DDI_FAILED); 4222 } 4223 intrp->intr_added++; 4224 } 4225 4226 intrp->msi_intx_cnt = nactual; 4227 4228 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4229 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 4230 navail, nactual, 4231 intrp->msi_intx_cnt, 4232 intrp->intr_added)); 4233 4234 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4235 4236 (void) nxge_intr_ldgv_init(nxgep); 4237 4238 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 4239 4240 return (status); 4241 } 4242 4243 /*ARGSUSED*/ 4244 static nxge_status_t 4245 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 4246 { 4247 dev_info_t *dip = nxgep->dip; 4248 p_nxge_ldg_t ldgp; 4249 p_nxge_intr_t intrp; 4250 uint_t *inthandler; 4251 void *arg1, *arg2; 4252 int behavior; 4253 int nintrs, navail; 4254 int nactual, nrequired; 4255 int inum = 0; 4256 int x, y; 4257 int ddi_status = DDI_SUCCESS; 4258 nxge_status_t status = NXGE_OK; 4259 4260 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 4261 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4262 intrp->start_inum = 0; 4263 4264 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4265 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4266 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4267 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4268 "nintrs: %d", status, nintrs)); 4269 return (NXGE_ERROR | NXGE_DDI_FAILED); 4270 } 4271 4272 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4273 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4274 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4275 "ddi_intr_get_navail() failed, status: 0x%x%, " 4276 "nintrs: %d", ddi_status, navail)); 4277 return (NXGE_ERROR | NXGE_DDI_FAILED); 4278 } 4279 4280 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4281 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 4282 nintrs, navail)); 4283 4284 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4285 DDI_INTR_ALLOC_NORMAL); 4286 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4287 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4288 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4289 navail, &nactual, behavior); 4290 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4291 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4292 " ddi_intr_alloc() failed: %d", 4293 ddi_status)); 4294 kmem_free(intrp->htable, intrp->intr_size); 4295 return (NXGE_ERROR | NXGE_DDI_FAILED); 4296 } 4297 4298 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4299 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4300 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4301 " ddi_intr_get_pri() failed: %d", 4302 ddi_status)); 4303 /* Free already allocated interrupts */ 4304 for (y = 0; y < nactual; y++) { 4305 (void) ddi_intr_free(intrp->htable[y]); 4306 } 4307 4308 kmem_free(intrp->htable, intrp->intr_size); 4309 return (NXGE_ERROR | NXGE_DDI_FAILED); 4310 } 4311 4312 nrequired = 0; 4313 switch (nxgep->niu_type) { 4314 case NEPTUNE: 4315 case NEPTUNE_2: 4316 default: 4317 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4318 break; 4319 4320 case N2_NIU: 4321 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4322 break; 4323 } 4324 4325 if (status != NXGE_OK) { 4326 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4327 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 4328 "failed: 0x%x", status)); 4329 /* Free already allocated interrupts */ 4330 for (y = 0; y < nactual; y++) { 4331 (void) ddi_intr_free(intrp->htable[y]); 4332 } 4333 4334 kmem_free(intrp->htable, intrp->intr_size); 4335 return (status); 4336 } 4337 4338 ldgp = nxgep->ldgvp->ldgp; 4339 for (x = 0; x < nrequired; x++, ldgp++) { 4340 ldgp->vector = (uint8_t)x; 4341 if (nxgep->niu_type != N2_NIU) { 4342 ldgp->intdata = SID_DATA(ldgp->func, x); 4343 } 4344 4345 arg1 = ldgp->ldvp; 4346 arg2 = nxgep; 4347 if (ldgp->nldvs == 1) { 4348 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4349 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4350 "nxge_add_intrs_adv_type_fix: " 4351 "1-1 int handler(%d) ldg %d ldv %d " 4352 "arg1 $%p arg2 $%p\n", 4353 x, ldgp->ldg, ldgp->ldvp->ldv, 4354 arg1, arg2)); 4355 } else if (ldgp->nldvs > 1) { 4356 inthandler = (uint_t *)ldgp->sys_intr_handler; 4357 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4358 "nxge_add_intrs_adv_type_fix: " 4359 "shared ldv %d int handler(%d) ldv %d ldg %d" 4360 "arg1 0x%016llx arg2 0x%016llx\n", 4361 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 4362 arg1, arg2)); 4363 } 4364 4365 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4366 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4367 != DDI_SUCCESS) { 4368 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4369 "==> nxge_add_intrs_adv_type_fix: failed #%d " 4370 "status 0x%x", x, ddi_status)); 4371 for (y = 0; y < intrp->intr_added; y++) { 4372 (void) ddi_intr_remove_handler( 4373 intrp->htable[y]); 4374 } 4375 for (y = 0; y < nactual; y++) { 4376 (void) ddi_intr_free(intrp->htable[y]); 4377 } 4378 /* Free already allocated intr */ 4379 kmem_free(intrp->htable, intrp->intr_size); 4380 4381 (void) nxge_ldgv_uninit(nxgep); 4382 4383 return (NXGE_ERROR | NXGE_DDI_FAILED); 4384 } 4385 intrp->intr_added++; 4386 } 4387 4388 intrp->msi_intx_cnt = nactual; 4389 4390 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4391 4392 status = nxge_intr_ldgv_init(nxgep); 4393 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 4394 4395 return (status); 4396 } 4397 4398 static void 4399 nxge_remove_intrs(p_nxge_t nxgep) 4400 { 4401 int i, inum; 4402 p_nxge_intr_t intrp; 4403 4404 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 4405 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4406 if (!intrp->intr_registered) { 4407 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4408 "<== nxge_remove_intrs: interrupts not registered")); 4409 return; 4410 } 4411 4412 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 4413 4414 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4415 (void) ddi_intr_block_disable(intrp->htable, 4416 intrp->intr_added); 4417 } else { 4418 for (i = 0; i < intrp->intr_added; i++) { 4419 (void) ddi_intr_disable(intrp->htable[i]); 4420 } 4421 } 4422 4423 for (inum = 0; inum < intrp->intr_added; inum++) { 4424 if (intrp->htable[inum]) { 4425 (void) ddi_intr_remove_handler(intrp->htable[inum]); 4426 } 4427 } 4428 4429 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 4430 if (intrp->htable[inum]) { 4431 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4432 "nxge_remove_intrs: ddi_intr_free inum %d " 4433 "msi_intx_cnt %d intr_added %d", 4434 inum, 4435 intrp->msi_intx_cnt, 4436 intrp->intr_added)); 4437 4438 (void) ddi_intr_free(intrp->htable[inum]); 4439 } 4440 } 4441 4442 kmem_free(intrp->htable, intrp->intr_size); 4443 intrp->intr_registered = B_FALSE; 4444 intrp->intr_enabled = B_FALSE; 4445 intrp->msi_intx_cnt = 0; 4446 intrp->intr_added = 0; 4447 4448 (void) nxge_ldgv_uninit(nxgep); 4449 4450 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 4451 } 4452 4453 /*ARGSUSED*/ 4454 static void 4455 nxge_remove_soft_intrs(p_nxge_t nxgep) 4456 { 4457 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 4458 if (nxgep->resched_id) { 4459 ddi_remove_softintr(nxgep->resched_id); 4460 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4461 "==> nxge_remove_soft_intrs: removed")); 4462 nxgep->resched_id = NULL; 4463 } 4464 4465 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 4466 } 4467 4468 /*ARGSUSED*/ 4469 static void 4470 nxge_intrs_enable(p_nxge_t nxgep) 4471 { 4472 p_nxge_intr_t intrp; 4473 int i; 4474 int status; 4475 4476 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 4477 4478 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4479 4480 if (!intrp->intr_registered) { 4481 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 4482 "interrupts are not registered")); 4483 return; 4484 } 4485 4486 if (intrp->intr_enabled) { 4487 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4488 "<== nxge_intrs_enable: already enabled")); 4489 return; 4490 } 4491 4492 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4493 status = ddi_intr_block_enable(intrp->htable, 4494 intrp->intr_added); 4495 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4496 "block enable - status 0x%x total inums #%d\n", 4497 status, intrp->intr_added)); 4498 } else { 4499 for (i = 0; i < intrp->intr_added; i++) { 4500 status = ddi_intr_enable(intrp->htable[i]); 4501 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4502 "ddi_intr_enable:enable - status 0x%x " 4503 "total inums %d enable inum #%d\n", 4504 status, intrp->intr_added, i)); 4505 if (status == DDI_SUCCESS) { 4506 intrp->intr_enabled = B_TRUE; 4507 } 4508 } 4509 } 4510 4511 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 4512 } 4513 4514 /*ARGSUSED*/ 4515 static void 4516 nxge_intrs_disable(p_nxge_t nxgep) 4517 { 4518 p_nxge_intr_t intrp; 4519 int i; 4520 4521 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 4522 4523 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4524 4525 if (!intrp->intr_registered) { 4526 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 4527 "interrupts are not registered")); 4528 return; 4529 } 4530 4531 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4532 (void) ddi_intr_block_disable(intrp->htable, 4533 intrp->intr_added); 4534 } else { 4535 for (i = 0; i < intrp->intr_added; i++) { 4536 (void) ddi_intr_disable(intrp->htable[i]); 4537 } 4538 } 4539 4540 intrp->intr_enabled = B_FALSE; 4541 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 4542 } 4543 4544 static nxge_status_t 4545 nxge_mac_register(p_nxge_t nxgep) 4546 { 4547 mac_register_t *macp; 4548 int status; 4549 4550 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 4551 4552 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 4553 return (NXGE_ERROR); 4554 4555 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 4556 macp->m_driver = nxgep; 4557 macp->m_dip = nxgep->dip; 4558 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 4559 macp->m_callbacks = &nxge_m_callbacks; 4560 macp->m_min_sdu = 0; 4561 macp->m_max_sdu = nxgep->mac.maxframesize - 4562 sizeof (struct ether_header) - ETHERFCSL - 4; 4563 4564 status = mac_register(macp, &nxgep->mach); 4565 mac_free(macp); 4566 4567 if (status != 0) { 4568 cmn_err(CE_WARN, 4569 "!nxge_mac_register failed (status %d instance %d)", 4570 status, nxgep->instance); 4571 return (NXGE_ERROR); 4572 } 4573 4574 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 4575 "(instance %d)", nxgep->instance)); 4576 4577 return (NXGE_OK); 4578 } 4579 4580 void 4581 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 4582 { 4583 ssize_t size; 4584 mblk_t *nmp; 4585 uint8_t blk_id; 4586 uint8_t chan; 4587 uint32_t err_id; 4588 err_inject_t *eip; 4589 4590 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 4591 4592 size = 1024; 4593 nmp = mp->b_cont; 4594 eip = (err_inject_t *)nmp->b_rptr; 4595 blk_id = eip->blk_id; 4596 err_id = eip->err_id; 4597 chan = eip->chan; 4598 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 4599 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 4600 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 4601 switch (blk_id) { 4602 case MAC_BLK_ID: 4603 break; 4604 case TXMAC_BLK_ID: 4605 break; 4606 case RXMAC_BLK_ID: 4607 break; 4608 case MIF_BLK_ID: 4609 break; 4610 case IPP_BLK_ID: 4611 nxge_ipp_inject_err(nxgep, err_id); 4612 break; 4613 case TXC_BLK_ID: 4614 nxge_txc_inject_err(nxgep, err_id); 4615 break; 4616 case TXDMA_BLK_ID: 4617 nxge_txdma_inject_err(nxgep, err_id, chan); 4618 break; 4619 case RXDMA_BLK_ID: 4620 nxge_rxdma_inject_err(nxgep, err_id, chan); 4621 break; 4622 case ZCP_BLK_ID: 4623 nxge_zcp_inject_err(nxgep, err_id); 4624 break; 4625 case ESPC_BLK_ID: 4626 break; 4627 case FFLP_BLK_ID: 4628 break; 4629 case PHY_BLK_ID: 4630 break; 4631 case ETHER_SERDES_BLK_ID: 4632 break; 4633 case PCIE_SERDES_BLK_ID: 4634 break; 4635 case VIR_BLK_ID: 4636 break; 4637 } 4638 4639 nmp->b_wptr = nmp->b_rptr + size; 4640 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 4641 4642 miocack(wq, mp, (int)size, 0); 4643 } 4644 4645 static int 4646 nxge_init_common_dev(p_nxge_t nxgep) 4647 { 4648 p_nxge_hw_list_t hw_p; 4649 dev_info_t *p_dip; 4650 4651 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 4652 4653 p_dip = nxgep->p_dip; 4654 MUTEX_ENTER(&nxge_common_lock); 4655 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4656 "==> nxge_init_common_dev:func # %d", 4657 nxgep->function_num)); 4658 /* 4659 * Loop through existing per neptune hardware list. 4660 */ 4661 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4662 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4663 "==> nxge_init_common_device:func # %d " 4664 "hw_p $%p parent dip $%p", 4665 nxgep->function_num, 4666 hw_p, 4667 p_dip)); 4668 if (hw_p->parent_devp == p_dip) { 4669 nxgep->nxge_hw_p = hw_p; 4670 hw_p->ndevs++; 4671 hw_p->nxge_p[nxgep->function_num] = nxgep; 4672 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4673 "==> nxge_init_common_device:func # %d " 4674 "hw_p $%p parent dip $%p " 4675 "ndevs %d (found)", 4676 nxgep->function_num, 4677 hw_p, 4678 p_dip, 4679 hw_p->ndevs)); 4680 break; 4681 } 4682 } 4683 4684 if (hw_p == NULL) { 4685 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4686 "==> nxge_init_common_device:func # %d " 4687 "parent dip $%p (new)", 4688 nxgep->function_num, 4689 p_dip)); 4690 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 4691 hw_p->parent_devp = p_dip; 4692 hw_p->magic = NXGE_NEPTUNE_MAGIC; 4693 nxgep->nxge_hw_p = hw_p; 4694 hw_p->ndevs++; 4695 hw_p->nxge_p[nxgep->function_num] = nxgep; 4696 hw_p->next = nxge_hw_list; 4697 4698 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 4699 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 4700 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 4701 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 4702 MUTEX_INIT(&hw_p->nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 4703 4704 nxge_hw_list = hw_p; 4705 } 4706 4707 MUTEX_EXIT(&nxge_common_lock); 4708 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4709 "==> nxge_init_common_device (nxge_hw_list) $%p", 4710 nxge_hw_list)); 4711 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 4712 4713 return (NXGE_OK); 4714 } 4715 4716 static void 4717 nxge_uninit_common_dev(p_nxge_t nxgep) 4718 { 4719 p_nxge_hw_list_t hw_p, h_hw_p; 4720 dev_info_t *p_dip; 4721 4722 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 4723 if (nxgep->nxge_hw_p == NULL) { 4724 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4725 "<== nxge_uninit_common_device (no common)")); 4726 return; 4727 } 4728 4729 MUTEX_ENTER(&nxge_common_lock); 4730 h_hw_p = nxge_hw_list; 4731 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4732 p_dip = hw_p->parent_devp; 4733 if (nxgep->nxge_hw_p == hw_p && 4734 p_dip == nxgep->p_dip && 4735 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 4736 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 4737 4738 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4739 "==> nxge_uninit_common_device:func # %d " 4740 "hw_p $%p parent dip $%p " 4741 "ndevs %d (found)", 4742 nxgep->function_num, 4743 hw_p, 4744 p_dip, 4745 hw_p->ndevs)); 4746 4747 nxgep->nxge_hw_p = NULL; 4748 if (hw_p->ndevs) { 4749 hw_p->ndevs--; 4750 } 4751 hw_p->nxge_p[nxgep->function_num] = NULL; 4752 if (!hw_p->ndevs) { 4753 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 4754 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 4755 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 4756 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 4757 MUTEX_DESTROY(&hw_p->nxge_mii_lock); 4758 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4759 "==> nxge_uninit_common_device: " 4760 "func # %d " 4761 "hw_p $%p parent dip $%p " 4762 "ndevs %d (last)", 4763 nxgep->function_num, 4764 hw_p, 4765 p_dip, 4766 hw_p->ndevs)); 4767 4768 if (hw_p == nxge_hw_list) { 4769 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4770 "==> nxge_uninit_common_device:" 4771 "remove head func # %d " 4772 "hw_p $%p parent dip $%p " 4773 "ndevs %d (head)", 4774 nxgep->function_num, 4775 hw_p, 4776 p_dip, 4777 hw_p->ndevs)); 4778 nxge_hw_list = hw_p->next; 4779 } else { 4780 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4781 "==> nxge_uninit_common_device:" 4782 "remove middle func # %d " 4783 "hw_p $%p parent dip $%p " 4784 "ndevs %d (middle)", 4785 nxgep->function_num, 4786 hw_p, 4787 p_dip, 4788 hw_p->ndevs)); 4789 h_hw_p->next = hw_p->next; 4790 } 4791 4792 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 4793 } 4794 break; 4795 } else { 4796 h_hw_p = hw_p; 4797 } 4798 } 4799 4800 MUTEX_EXIT(&nxge_common_lock); 4801 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4802 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 4803 nxge_hw_list)); 4804 4805 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 4806 } 4807