1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 30 */ 31 #include <sys/nxge/nxge_impl.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * until MSIX supported, assume msi, use 2 for msix 39 */ 40 uint32_t nxge_msi_enable = 1; /* debug: turn msi off */ 41 42 /* 43 * Globals: tunable parameters (/etc/system or adb) 44 * 45 */ 46 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 47 uint32_t nxge_rbr_spare_size = 0; 48 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 49 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 50 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 51 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 52 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 53 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 54 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 55 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 56 boolean_t nxge_jumbo_enable = B_FALSE; 57 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 58 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 59 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 60 61 /* 62 * Debugging flags: 63 * nxge_no_tx_lb : transmit load balancing 64 * nxge_tx_lb_policy: 0 - TCP port (default) 65 * 3 - DEST MAC 66 */ 67 uint32_t nxge_no_tx_lb = 0; 68 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 69 70 /* 71 * Add tunable to reduce the amount of time spent in the 72 * ISR doing Rx Processing. 73 */ 74 uint32_t nxge_max_rx_pkts = 1024; 75 76 /* 77 * Tunables to manage the receive buffer blocks. 78 * 79 * nxge_rx_threshold_hi: copy all buffers. 80 * nxge_rx_bcopy_size_type: receive buffer block size type. 81 * nxge_rx_threshold_lo: copy only up to tunable block size type. 82 */ 83 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 84 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 85 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 86 87 rtrace_t npi_rtracebuf; 88 89 #if defined(sun4v) 90 /* 91 * Hypervisor N2/NIU services information. 92 */ 93 static hsvc_info_t niu_hsvc = { 94 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 95 NIU_MINOR_VER, "nxge" 96 }; 97 #endif 98 99 /* 100 * Function Prototypes 101 */ 102 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 103 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 104 static void nxge_unattach(p_nxge_t); 105 106 #if NXGE_PROPERTY 107 static void nxge_remove_hard_properties(p_nxge_t); 108 #endif 109 110 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 111 112 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 113 static void nxge_destroy_mutexes(p_nxge_t); 114 115 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 116 static void nxge_unmap_regs(p_nxge_t nxgep); 117 #ifdef NXGE_DEBUG 118 static void nxge_test_map_regs(p_nxge_t nxgep); 119 #endif 120 121 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 122 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 123 static void nxge_remove_intrs(p_nxge_t nxgep); 124 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 125 126 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 127 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 128 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 129 static void nxge_intrs_enable(p_nxge_t nxgep); 130 static void nxge_intrs_disable(p_nxge_t nxgep); 131 132 static void nxge_suspend(p_nxge_t); 133 static nxge_status_t nxge_resume(p_nxge_t); 134 135 static nxge_status_t nxge_setup_dev(p_nxge_t); 136 static void nxge_destroy_dev(p_nxge_t); 137 138 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 139 static void nxge_free_mem_pool(p_nxge_t); 140 141 static nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 142 static void nxge_free_rx_mem_pool(p_nxge_t); 143 144 static nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 145 static void nxge_free_tx_mem_pool(p_nxge_t); 146 147 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 148 struct ddi_dma_attr *, 149 size_t, ddi_device_acc_attr_t *, uint_t, 150 p_nxge_dma_common_t); 151 152 static void nxge_dma_mem_free(p_nxge_dma_common_t); 153 154 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 155 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 156 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 157 158 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 159 p_nxge_dma_common_t *, size_t); 160 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 161 162 static nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 163 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 164 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 165 166 static nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 167 p_nxge_dma_common_t *, 168 size_t); 169 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 170 171 static int nxge_init_common_dev(p_nxge_t); 172 static void nxge_uninit_common_dev(p_nxge_t); 173 174 /* 175 * The next declarations are for the GLDv3 interface. 176 */ 177 static int nxge_m_start(void *); 178 static void nxge_m_stop(void *); 179 static int nxge_m_unicst(void *, const uint8_t *); 180 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 181 static int nxge_m_promisc(void *, boolean_t); 182 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 183 static void nxge_m_resources(void *); 184 mblk_t *nxge_m_tx(void *arg, mblk_t *); 185 static nxge_status_t nxge_mac_register(p_nxge_t); 186 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 187 mac_addr_slot_t slot); 188 static void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 189 boolean_t factory); 190 static int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 191 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 192 static int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 193 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 194 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 195 196 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 197 #define MAX_DUMP_SZ 256 198 199 #define NXGE_M_CALLBACK_FLAGS (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB) 200 201 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 202 static mac_callbacks_t nxge_m_callbacks = { 203 NXGE_M_CALLBACK_FLAGS, 204 nxge_m_stat, 205 nxge_m_start, 206 nxge_m_stop, 207 nxge_m_promisc, 208 nxge_m_multicst, 209 nxge_m_unicst, 210 nxge_m_tx, 211 nxge_m_resources, 212 nxge_m_ioctl, 213 nxge_m_getcapab 214 }; 215 216 void 217 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 218 219 /* 220 * These global variables control the message 221 * output. 222 */ 223 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 224 uint64_t nxge_debug_level = 0; 225 226 /* 227 * This list contains the instance structures for the Neptune 228 * devices present in the system. The lock exists to guarantee 229 * mutually exclusive access to the list. 230 */ 231 void *nxge_list = NULL; 232 233 void *nxge_hw_list = NULL; 234 nxge_os_mutex_t nxge_common_lock; 235 236 nxge_os_mutex_t nxge_mii_lock; 237 static uint32_t nxge_mii_lock_init = 0; 238 nxge_os_mutex_t nxge_mdio_lock; 239 static uint32_t nxge_mdio_lock_init = 0; 240 241 extern uint64_t npi_debug_level; 242 243 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 244 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 245 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 246 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 247 extern void nxge_fm_init(p_nxge_t, 248 ddi_device_acc_attr_t *, 249 ddi_device_acc_attr_t *, 250 ddi_dma_attr_t *); 251 extern void nxge_fm_fini(p_nxge_t); 252 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 253 254 /* 255 * Count used to maintain the number of buffers being used 256 * by Neptune instances and loaned up to the upper layers. 257 */ 258 uint32_t nxge_mblks_pending = 0; 259 260 /* 261 * Device register access attributes for PIO. 262 */ 263 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 264 DDI_DEVICE_ATTR_V0, 265 DDI_STRUCTURE_LE_ACC, 266 DDI_STRICTORDER_ACC, 267 }; 268 269 /* 270 * Device descriptor access attributes for DMA. 271 */ 272 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 273 DDI_DEVICE_ATTR_V0, 274 DDI_STRUCTURE_LE_ACC, 275 DDI_STRICTORDER_ACC 276 }; 277 278 /* 279 * Device buffer access attributes for DMA. 280 */ 281 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 282 DDI_DEVICE_ATTR_V0, 283 DDI_STRUCTURE_BE_ACC, 284 DDI_STRICTORDER_ACC 285 }; 286 287 ddi_dma_attr_t nxge_desc_dma_attr = { 288 DMA_ATTR_V0, /* version number. */ 289 0, /* low address */ 290 0xffffffffffffffff, /* high address */ 291 0xffffffffffffffff, /* address counter max */ 292 #ifndef NIU_PA_WORKAROUND 293 0x100000, /* alignment */ 294 #else 295 0x2000, 296 #endif 297 0xfc00fc, /* dlim_burstsizes */ 298 0x1, /* minimum transfer size */ 299 0xffffffffffffffff, /* maximum transfer size */ 300 0xffffffffffffffff, /* maximum segment size */ 301 1, /* scatter/gather list length */ 302 (unsigned int) 1, /* granularity */ 303 0 /* attribute flags */ 304 }; 305 306 ddi_dma_attr_t nxge_tx_dma_attr = { 307 DMA_ATTR_V0, /* version number. */ 308 0, /* low address */ 309 0xffffffffffffffff, /* high address */ 310 0xffffffffffffffff, /* address counter max */ 311 #if defined(_BIG_ENDIAN) 312 0x2000, /* alignment */ 313 #else 314 0x1000, /* alignment */ 315 #endif 316 0xfc00fc, /* dlim_burstsizes */ 317 0x1, /* minimum transfer size */ 318 0xffffffffffffffff, /* maximum transfer size */ 319 0xffffffffffffffff, /* maximum segment size */ 320 5, /* scatter/gather list length */ 321 (unsigned int) 1, /* granularity */ 322 0 /* attribute flags */ 323 }; 324 325 ddi_dma_attr_t nxge_rx_dma_attr = { 326 DMA_ATTR_V0, /* version number. */ 327 0, /* low address */ 328 0xffffffffffffffff, /* high address */ 329 0xffffffffffffffff, /* address counter max */ 330 0x2000, /* alignment */ 331 0xfc00fc, /* dlim_burstsizes */ 332 0x1, /* minimum transfer size */ 333 0xffffffffffffffff, /* maximum transfer size */ 334 0xffffffffffffffff, /* maximum segment size */ 335 1, /* scatter/gather list length */ 336 (unsigned int) 1, /* granularity */ 337 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 338 }; 339 340 ddi_dma_lim_t nxge_dma_limits = { 341 (uint_t)0, /* dlim_addr_lo */ 342 (uint_t)0xffffffff, /* dlim_addr_hi */ 343 (uint_t)0xffffffff, /* dlim_cntr_max */ 344 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 345 0x1, /* dlim_minxfer */ 346 1024 /* dlim_speed */ 347 }; 348 349 dma_method_t nxge_force_dma = DVMA; 350 351 /* 352 * dma chunk sizes. 353 * 354 * Try to allocate the largest possible size 355 * so that fewer number of dma chunks would be managed 356 */ 357 #ifdef NIU_PA_WORKAROUND 358 size_t alloc_sizes [] = {0x2000}; 359 #else 360 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 361 0x10000, 0x20000, 0x40000, 0x80000, 362 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000}; 363 #endif 364 365 /* 366 * Translate "dev_t" to a pointer to the associated "dev_info_t". 367 */ 368 369 static int 370 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 371 { 372 p_nxge_t nxgep = NULL; 373 int instance; 374 int status = DDI_SUCCESS; 375 uint8_t portn; 376 nxge_mmac_t *mmac_info; 377 378 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 379 380 /* 381 * Get the device instance since we'll need to setup 382 * or retrieve a soft state for this instance. 383 */ 384 instance = ddi_get_instance(dip); 385 386 switch (cmd) { 387 case DDI_ATTACH: 388 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 389 break; 390 391 case DDI_RESUME: 392 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 393 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 394 if (nxgep == NULL) { 395 status = DDI_FAILURE; 396 break; 397 } 398 if (nxgep->dip != dip) { 399 status = DDI_FAILURE; 400 break; 401 } 402 if (nxgep->suspended == DDI_PM_SUSPEND) { 403 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 404 } else { 405 status = nxge_resume(nxgep); 406 } 407 goto nxge_attach_exit; 408 409 case DDI_PM_RESUME: 410 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 411 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 412 if (nxgep == NULL) { 413 status = DDI_FAILURE; 414 break; 415 } 416 if (nxgep->dip != dip) { 417 status = DDI_FAILURE; 418 break; 419 } 420 status = nxge_resume(nxgep); 421 goto nxge_attach_exit; 422 423 default: 424 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 425 status = DDI_FAILURE; 426 goto nxge_attach_exit; 427 } 428 429 430 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 431 status = DDI_FAILURE; 432 goto nxge_attach_exit; 433 } 434 435 nxgep = ddi_get_soft_state(nxge_list, instance); 436 if (nxgep == NULL) { 437 goto nxge_attach_fail; 438 } 439 440 nxgep->drv_state = 0; 441 nxgep->dip = dip; 442 nxgep->instance = instance; 443 nxgep->p_dip = ddi_get_parent(dip); 444 nxgep->nxge_debug_level = nxge_debug_level; 445 npi_debug_level = nxge_debug_level; 446 447 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_dev_desc_dma_acc_attr, 448 &nxge_rx_dma_attr); 449 450 status = nxge_map_regs(nxgep); 451 if (status != NXGE_OK) { 452 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 453 goto nxge_attach_fail; 454 } 455 456 status = nxge_init_common_dev(nxgep); 457 if (status != NXGE_OK) { 458 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 459 "nxge_init_common_dev failed")); 460 goto nxge_attach_fail; 461 } 462 463 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 464 nxgep->mac.portnum = portn; 465 if ((portn == 0) || (portn == 1)) 466 nxgep->mac.porttype = PORT_TYPE_XMAC; 467 else 468 nxgep->mac.porttype = PORT_TYPE_BMAC; 469 /* 470 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 471 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 472 * The two types of MACs have different characterizations. 473 */ 474 mmac_info = &nxgep->nxge_mmac_info; 475 if (nxgep->function_num < 2) { 476 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 477 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 478 } else { 479 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 480 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 481 } 482 /* 483 * Setup the Ndd parameters for the this instance. 484 */ 485 nxge_init_param(nxgep); 486 487 /* 488 * Setup Register Tracing Buffer. 489 */ 490 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 491 492 /* init stats ptr */ 493 nxge_init_statsp(nxgep); 494 495 if (nxgep->niu_type != N2_NIU) { 496 /* 497 * read the vpd info from the eeprom into local data 498 * structure and check for the VPD info validity 499 */ 500 (void) nxge_vpd_info_get(nxgep); 501 } 502 503 status = nxge_get_xcvr_type(nxgep); 504 505 if (status != NXGE_OK) { 506 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 507 " Couldn't determine card type" 508 " .... exit ")); 509 goto nxge_attach_fail; 510 } 511 512 if ((nxgep->niu_type == NEPTUNE) && 513 (nxgep->mac.portmode == PORT_10G_FIBER)) { 514 nxgep->niu_type = NEPTUNE_2; 515 } 516 517 if ((nxgep->niu_type == NEPTUNE_2) && (nxgep->function_num > 1)) { 518 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Unsupported function %d." 519 "Only functions 0 and 1 are supported by this card", 520 nxgep->function_num)); 521 status = NXGE_ERROR; 522 goto nxge_attach_fail; 523 } 524 525 status = nxge_get_config_properties(nxgep); 526 527 if (status != NXGE_OK) { 528 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "get_hw create failed")); 529 goto nxge_attach_fail; 530 } 531 532 /* 533 * Setup the Kstats for the driver. 534 */ 535 nxge_setup_kstats(nxgep); 536 537 nxge_setup_param(nxgep); 538 539 status = nxge_setup_system_dma_pages(nxgep); 540 if (status != NXGE_OK) { 541 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 542 goto nxge_attach_fail; 543 } 544 545 #if defined(sun4v) 546 if (nxgep->niu_type == N2_NIU) { 547 nxgep->niu_hsvc_available = B_FALSE; 548 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 549 if ((status = 550 hsvc_register(&nxgep->niu_hsvc, 551 &nxgep->niu_min_ver)) != 0) { 552 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 553 "nxge_attach: " 554 "%s: cannot negotiate " 555 "hypervisor services " 556 "revision %d " 557 "group: 0x%lx " 558 "major: 0x%lx minor: 0x%lx " 559 "errno: %d", 560 niu_hsvc.hsvc_modname, 561 niu_hsvc.hsvc_rev, 562 niu_hsvc.hsvc_group, 563 niu_hsvc.hsvc_major, 564 niu_hsvc.hsvc_minor, 565 status)); 566 status = DDI_FAILURE; 567 goto nxge_attach_fail; 568 } 569 570 nxgep->niu_hsvc_available = B_TRUE; 571 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 572 "NIU Hypervisor service enabled")); 573 } 574 #endif 575 576 nxge_hw_id_init(nxgep); 577 nxge_hw_init_niu_common(nxgep); 578 579 status = nxge_setup_mutexes(nxgep); 580 if (status != NXGE_OK) { 581 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 582 goto nxge_attach_fail; 583 } 584 585 status = nxge_setup_dev(nxgep); 586 if (status != DDI_SUCCESS) { 587 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 588 goto nxge_attach_fail; 589 } 590 591 status = nxge_add_intrs(nxgep); 592 if (status != DDI_SUCCESS) { 593 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 594 goto nxge_attach_fail; 595 } 596 status = nxge_add_soft_intrs(nxgep); 597 if (status != DDI_SUCCESS) { 598 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "add_soft_intr failed")); 599 goto nxge_attach_fail; 600 } 601 602 /* 603 * Enable interrupts. 604 */ 605 nxge_intrs_enable(nxgep); 606 607 if ((status = nxge_mac_register(nxgep)) != DDI_SUCCESS) { 608 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 609 "unable to register to mac layer (%d)", status)); 610 goto nxge_attach_fail; 611 } 612 613 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 614 615 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "registered to mac (instance %d)", 616 instance)); 617 618 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 619 620 goto nxge_attach_exit; 621 622 nxge_attach_fail: 623 nxge_unattach(nxgep); 624 if (status != NXGE_OK) 625 status = (NXGE_ERROR | NXGE_DDI_FAILED); 626 nxgep = NULL; 627 628 nxge_attach_exit: 629 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 630 status)); 631 632 return (status); 633 } 634 635 static int 636 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 637 { 638 int status = DDI_SUCCESS; 639 int instance; 640 p_nxge_t nxgep = NULL; 641 642 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 643 instance = ddi_get_instance(dip); 644 nxgep = ddi_get_soft_state(nxge_list, instance); 645 if (nxgep == NULL) { 646 status = DDI_FAILURE; 647 goto nxge_detach_exit; 648 } 649 650 switch (cmd) { 651 case DDI_DETACH: 652 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 653 break; 654 655 case DDI_PM_SUSPEND: 656 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 657 nxgep->suspended = DDI_PM_SUSPEND; 658 nxge_suspend(nxgep); 659 break; 660 661 case DDI_SUSPEND: 662 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 663 if (nxgep->suspended != DDI_PM_SUSPEND) { 664 nxgep->suspended = DDI_SUSPEND; 665 nxge_suspend(nxgep); 666 } 667 break; 668 669 default: 670 status = DDI_FAILURE; 671 } 672 673 if (cmd != DDI_DETACH) 674 goto nxge_detach_exit; 675 676 /* 677 * Stop the xcvr polling. 678 */ 679 nxgep->suspended = cmd; 680 681 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 682 683 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 684 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 685 "<== nxge_detach status = 0x%08X", status)); 686 return (DDI_FAILURE); 687 } 688 689 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 690 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 691 692 nxge_unattach(nxgep); 693 nxgep = NULL; 694 695 nxge_detach_exit: 696 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 697 status)); 698 699 return (status); 700 } 701 702 static void 703 nxge_unattach(p_nxge_t nxgep) 704 { 705 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 706 707 if (nxgep == NULL || nxgep->dev_regs == NULL) { 708 return; 709 } 710 711 if (nxgep->nxge_hw_p) { 712 nxge_uninit_common_dev(nxgep); 713 nxgep->nxge_hw_p = NULL; 714 } 715 716 if (nxgep->nxge_timerid) { 717 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 718 nxgep->nxge_timerid = 0; 719 } 720 721 #if defined(sun4v) 722 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 723 (void) hsvc_unregister(&nxgep->niu_hsvc); 724 nxgep->niu_hsvc_available = B_FALSE; 725 } 726 #endif 727 /* 728 * Stop any further interrupts. 729 */ 730 nxge_remove_intrs(nxgep); 731 732 /* remove soft interrups */ 733 nxge_remove_soft_intrs(nxgep); 734 735 /* 736 * Stop the device and free resources. 737 */ 738 nxge_destroy_dev(nxgep); 739 740 /* 741 * Tear down the ndd parameters setup. 742 */ 743 nxge_destroy_param(nxgep); 744 745 /* 746 * Tear down the kstat setup. 747 */ 748 nxge_destroy_kstats(nxgep); 749 750 /* 751 * Destroy all mutexes. 752 */ 753 nxge_destroy_mutexes(nxgep); 754 755 /* 756 * Remove the list of ndd parameters which 757 * were setup during attach. 758 */ 759 if (nxgep->dip) { 760 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 761 " nxge_unattach: remove all properties")); 762 763 (void) ddi_prop_remove_all(nxgep->dip); 764 } 765 766 #if NXGE_PROPERTY 767 nxge_remove_hard_properties(nxgep); 768 #endif 769 770 /* 771 * Unmap the register setup. 772 */ 773 nxge_unmap_regs(nxgep); 774 775 nxge_fm_fini(nxgep); 776 777 ddi_soft_state_free(nxge_list, nxgep->instance); 778 779 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 780 } 781 782 static char n2_siu_name[] = "niu"; 783 784 static nxge_status_t 785 nxge_map_regs(p_nxge_t nxgep) 786 { 787 int ddi_status = DDI_SUCCESS; 788 p_dev_regs_t dev_regs; 789 char buf[MAXPATHLEN + 1]; 790 char *devname; 791 #ifdef NXGE_DEBUG 792 char *sysname; 793 #endif 794 off_t regsize; 795 nxge_status_t status = NXGE_OK; 796 #if !defined(_BIG_ENDIAN) 797 off_t pci_offset; 798 uint16_t pcie_devctl; 799 #endif 800 801 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 802 nxgep->dev_regs = NULL; 803 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 804 dev_regs->nxge_regh = NULL; 805 dev_regs->nxge_pciregh = NULL; 806 dev_regs->nxge_msix_regh = NULL; 807 dev_regs->nxge_vir_regh = NULL; 808 dev_regs->nxge_vir2_regh = NULL; 809 nxgep->niu_type = NEPTUNE; 810 811 devname = ddi_pathname(nxgep->dip, buf); 812 ASSERT(strlen(devname) > 0); 813 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 814 "nxge_map_regs: pathname devname %s", devname)); 815 816 if (strstr(devname, n2_siu_name)) { 817 /* N2/NIU */ 818 nxgep->niu_type = N2_NIU; 819 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 820 "nxge_map_regs: N2/NIU devname %s", devname)); 821 /* get function number */ 822 nxgep->function_num = 823 (devname[strlen(devname) -1] == '1' ? 1 : 0); 824 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 825 "nxge_map_regs: N2/NIU function number %d", 826 nxgep->function_num)); 827 } else { 828 int *prop_val; 829 uint_t prop_len; 830 uint8_t func_num; 831 832 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 833 0, "reg", 834 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 835 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 836 "Reg property not found")); 837 ddi_status = DDI_FAILURE; 838 goto nxge_map_regs_fail0; 839 840 } else { 841 func_num = (prop_val[0] >> 8) & 0x7; 842 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 843 "Reg property found: fun # %d", 844 func_num)); 845 nxgep->function_num = func_num; 846 ddi_prop_free(prop_val); 847 } 848 } 849 850 switch (nxgep->niu_type) { 851 case NEPTUNE: 852 case NEPTUNE_2: 853 default: 854 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 855 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 856 "nxge_map_regs: pci config size 0x%x", regsize)); 857 858 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 859 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 860 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 861 if (ddi_status != DDI_SUCCESS) { 862 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 863 "ddi_map_regs, nxge bus config regs failed")); 864 goto nxge_map_regs_fail0; 865 } 866 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 867 "nxge_map_reg: PCI config addr 0x%0llx " 868 " handle 0x%0llx", dev_regs->nxge_pciregp, 869 dev_regs->nxge_pciregh)); 870 /* 871 * IMP IMP 872 * workaround for bit swapping bug in HW 873 * which ends up in no-snoop = yes 874 * resulting, in DMA not synched properly 875 */ 876 #if !defined(_BIG_ENDIAN) 877 /* workarounds for x86 systems */ 878 pci_offset = 0x80 + PCIE_DEVCTL; 879 pcie_devctl = 0x0; 880 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 881 pcie_devctl |= PCIE_DEVCTL_RO_EN; 882 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 883 pcie_devctl); 884 #endif 885 886 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 887 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 888 "nxge_map_regs: pio size 0x%x", regsize)); 889 /* set up the device mapped register */ 890 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 891 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 892 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 893 if (ddi_status != DDI_SUCCESS) { 894 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 895 "ddi_map_regs for Neptune global reg failed")); 896 goto nxge_map_regs_fail1; 897 } 898 899 /* set up the msi/msi-x mapped register */ 900 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 901 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 902 "nxge_map_regs: msix size 0x%x", regsize)); 903 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 904 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 905 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 906 if (ddi_status != DDI_SUCCESS) { 907 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 908 "ddi_map_regs for msi reg failed")); 909 goto nxge_map_regs_fail2; 910 } 911 912 /* set up the vio region mapped register */ 913 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 914 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 915 "nxge_map_regs: vio size 0x%x", regsize)); 916 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 917 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 918 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 919 920 if (ddi_status != DDI_SUCCESS) { 921 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 922 "ddi_map_regs for nxge vio reg failed")); 923 goto nxge_map_regs_fail3; 924 } 925 nxgep->dev_regs = dev_regs; 926 927 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 928 NPI_PCI_ADD_HANDLE_SET(nxgep, 929 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 930 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 931 NPI_MSI_ADD_HANDLE_SET(nxgep, 932 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 933 934 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 935 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 936 937 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 938 NPI_REG_ADD_HANDLE_SET(nxgep, 939 (npi_reg_ptr_t)dev_regs->nxge_regp); 940 941 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 942 NPI_VREG_ADD_HANDLE_SET(nxgep, 943 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 944 945 break; 946 947 case N2_NIU: 948 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 949 /* 950 * Set up the device mapped register (FWARC 2006/556) 951 * (changed back to 1: reg starts at 1!) 952 */ 953 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 954 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 955 "nxge_map_regs: dev size 0x%x", regsize)); 956 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 957 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 958 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 959 960 if (ddi_status != DDI_SUCCESS) { 961 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 962 "ddi_map_regs for N2/NIU, global reg failed ")); 963 goto nxge_map_regs_fail1; 964 } 965 966 /* set up the vio region mapped register */ 967 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 968 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 969 "nxge_map_regs: vio (1) size 0x%x", regsize)); 970 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 971 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 972 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 973 974 if (ddi_status != DDI_SUCCESS) { 975 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 976 "ddi_map_regs for nxge vio reg failed")); 977 goto nxge_map_regs_fail2; 978 } 979 /* set up the vio region mapped register */ 980 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 981 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 982 "nxge_map_regs: vio (3) size 0x%x", regsize)); 983 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 984 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 985 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 986 987 if (ddi_status != DDI_SUCCESS) { 988 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 989 "ddi_map_regs for nxge vio2 reg failed")); 990 goto nxge_map_regs_fail3; 991 } 992 nxgep->dev_regs = dev_regs; 993 994 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 995 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 996 997 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 998 NPI_REG_ADD_HANDLE_SET(nxgep, 999 (npi_reg_ptr_t)dev_regs->nxge_regp); 1000 1001 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1002 NPI_VREG_ADD_HANDLE_SET(nxgep, 1003 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1004 1005 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1006 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1007 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1008 1009 break; 1010 } 1011 1012 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1013 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1014 1015 goto nxge_map_regs_exit; 1016 nxge_map_regs_fail3: 1017 if (dev_regs->nxge_msix_regh) { 1018 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1019 } 1020 if (dev_regs->nxge_vir_regh) { 1021 ddi_regs_map_free(&dev_regs->nxge_regh); 1022 } 1023 nxge_map_regs_fail2: 1024 if (dev_regs->nxge_regh) { 1025 ddi_regs_map_free(&dev_regs->nxge_regh); 1026 } 1027 nxge_map_regs_fail1: 1028 if (dev_regs->nxge_pciregh) { 1029 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1030 } 1031 nxge_map_regs_fail0: 1032 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1033 kmem_free(dev_regs, sizeof (dev_regs_t)); 1034 1035 nxge_map_regs_exit: 1036 if (ddi_status != DDI_SUCCESS) 1037 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1038 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1039 return (status); 1040 } 1041 1042 static void 1043 nxge_unmap_regs(p_nxge_t nxgep) 1044 { 1045 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1046 if (nxgep->dev_regs) { 1047 if (nxgep->dev_regs->nxge_pciregh) { 1048 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1049 "==> nxge_unmap_regs: bus")); 1050 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1051 nxgep->dev_regs->nxge_pciregh = NULL; 1052 } 1053 if (nxgep->dev_regs->nxge_regh) { 1054 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1055 "==> nxge_unmap_regs: device registers")); 1056 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1057 nxgep->dev_regs->nxge_regh = NULL; 1058 } 1059 if (nxgep->dev_regs->nxge_msix_regh) { 1060 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1061 "==> nxge_unmap_regs: device interrupts")); 1062 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1063 nxgep->dev_regs->nxge_msix_regh = NULL; 1064 } 1065 if (nxgep->dev_regs->nxge_vir_regh) { 1066 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1067 "==> nxge_unmap_regs: vio region")); 1068 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1069 nxgep->dev_regs->nxge_vir_regh = NULL; 1070 } 1071 if (nxgep->dev_regs->nxge_vir2_regh) { 1072 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1073 "==> nxge_unmap_regs: vio2 region")); 1074 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1075 nxgep->dev_regs->nxge_vir2_regh = NULL; 1076 } 1077 1078 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1079 nxgep->dev_regs = NULL; 1080 } 1081 1082 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1083 } 1084 1085 static nxge_status_t 1086 nxge_setup_mutexes(p_nxge_t nxgep) 1087 { 1088 int ddi_status = DDI_SUCCESS; 1089 nxge_status_t status = NXGE_OK; 1090 nxge_classify_t *classify_ptr; 1091 int partition; 1092 1093 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1094 1095 /* 1096 * Get the interrupt cookie so the mutexes can be 1097 * Initialized. 1098 */ 1099 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1100 &nxgep->interrupt_cookie); 1101 if (ddi_status != DDI_SUCCESS) { 1102 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1103 "<== nxge_setup_mutexes: failed 0x%x", ddi_status)); 1104 goto nxge_setup_mutexes_exit; 1105 } 1106 1107 /* Initialize global mutex */ 1108 1109 if (nxge_mdio_lock_init == 0) { 1110 MUTEX_INIT(&nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 1111 } 1112 atomic_add_32(&nxge_mdio_lock_init, 1); 1113 1114 if (nxge_mii_lock_init == 0) { 1115 MUTEX_INIT(&nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 1116 } 1117 atomic_add_32(&nxge_mii_lock_init, 1); 1118 1119 nxgep->drv_state |= STATE_MDIO_LOCK_INIT; 1120 nxgep->drv_state |= STATE_MII_LOCK_INIT; 1121 1122 /* 1123 * Initialize mutex's for this device. 1124 */ 1125 MUTEX_INIT(nxgep->genlock, NULL, 1126 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1127 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1128 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1129 MUTEX_INIT(&nxgep->mif_lock, NULL, 1130 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1131 RW_INIT(&nxgep->filter_lock, NULL, 1132 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1133 1134 classify_ptr = &nxgep->classifier; 1135 /* 1136 * FFLP Mutexes are never used in interrupt context 1137 * as fflp operation can take very long time to 1138 * complete and hence not suitable to invoke from interrupt 1139 * handlers. 1140 */ 1141 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1142 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1143 if (nxgep->niu_type == NEPTUNE) { 1144 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1145 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1146 for (partition = 0; partition < MAX_PARTITION; partition++) { 1147 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1148 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1149 } 1150 } 1151 1152 nxge_setup_mutexes_exit: 1153 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1154 "<== nxge_setup_mutexes status = %x", status)); 1155 1156 if (ddi_status != DDI_SUCCESS) 1157 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1158 1159 return (status); 1160 } 1161 1162 static void 1163 nxge_destroy_mutexes(p_nxge_t nxgep) 1164 { 1165 int partition; 1166 nxge_classify_t *classify_ptr; 1167 1168 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1169 RW_DESTROY(&nxgep->filter_lock); 1170 MUTEX_DESTROY(&nxgep->mif_lock); 1171 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1172 MUTEX_DESTROY(nxgep->genlock); 1173 1174 classify_ptr = &nxgep->classifier; 1175 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1176 1177 /* free data structures, based on HW type */ 1178 if (nxgep->niu_type == NEPTUNE) { 1179 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1180 for (partition = 0; partition < MAX_PARTITION; partition++) { 1181 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1182 } 1183 } 1184 if (nxgep->drv_state & STATE_MDIO_LOCK_INIT) { 1185 if (nxge_mdio_lock_init == 1) { 1186 MUTEX_DESTROY(&nxge_mdio_lock); 1187 } 1188 atomic_add_32(&nxge_mdio_lock_init, -1); 1189 } 1190 if (nxgep->drv_state & STATE_MII_LOCK_INIT) { 1191 if (nxge_mii_lock_init == 1) { 1192 MUTEX_DESTROY(&nxge_mii_lock); 1193 } 1194 atomic_add_32(&nxge_mii_lock_init, -1); 1195 } 1196 1197 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1198 } 1199 1200 nxge_status_t 1201 nxge_init(p_nxge_t nxgep) 1202 { 1203 nxge_status_t status = NXGE_OK; 1204 1205 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1206 1207 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1208 return (status); 1209 } 1210 1211 /* 1212 * Allocate system memory for the receive/transmit buffer blocks 1213 * and receive/transmit descriptor rings. 1214 */ 1215 status = nxge_alloc_mem_pool(nxgep); 1216 if (status != NXGE_OK) { 1217 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1218 goto nxge_init_fail1; 1219 } 1220 1221 /* 1222 * Initialize and enable TXC registers 1223 * (Globally enable TX controller, 1224 * enable a port, configure dma channel bitmap, 1225 * configure the max burst size). 1226 */ 1227 status = nxge_txc_init(nxgep); 1228 if (status != NXGE_OK) { 1229 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txc failed\n")); 1230 goto nxge_init_fail2; 1231 } 1232 1233 /* 1234 * Initialize and enable TXDMA channels. 1235 */ 1236 status = nxge_init_txdma_channels(nxgep); 1237 if (status != NXGE_OK) { 1238 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1239 goto nxge_init_fail3; 1240 } 1241 1242 /* 1243 * Initialize and enable RXDMA channels. 1244 */ 1245 status = nxge_init_rxdma_channels(nxgep); 1246 if (status != NXGE_OK) { 1247 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1248 goto nxge_init_fail4; 1249 } 1250 1251 /* 1252 * Initialize TCAM and FCRAM (Neptune). 1253 */ 1254 status = nxge_classify_init(nxgep); 1255 if (status != NXGE_OK) { 1256 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1257 goto nxge_init_fail5; 1258 } 1259 1260 /* 1261 * Initialize ZCP 1262 */ 1263 status = nxge_zcp_init(nxgep); 1264 if (status != NXGE_OK) { 1265 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1266 goto nxge_init_fail5; 1267 } 1268 1269 /* 1270 * Initialize IPP. 1271 */ 1272 status = nxge_ipp_init(nxgep); 1273 if (status != NXGE_OK) { 1274 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1275 goto nxge_init_fail5; 1276 } 1277 1278 /* 1279 * Initialize the MAC block. 1280 */ 1281 status = nxge_mac_init(nxgep); 1282 if (status != NXGE_OK) { 1283 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1284 goto nxge_init_fail5; 1285 } 1286 1287 nxge_intrs_enable(nxgep); 1288 1289 /* 1290 * Enable hardware interrupts. 1291 */ 1292 nxge_intr_hw_enable(nxgep); 1293 nxgep->drv_state |= STATE_HW_INITIALIZED; 1294 1295 goto nxge_init_exit; 1296 1297 nxge_init_fail5: 1298 nxge_uninit_rxdma_channels(nxgep); 1299 nxge_init_fail4: 1300 nxge_uninit_txdma_channels(nxgep); 1301 nxge_init_fail3: 1302 (void) nxge_txc_uninit(nxgep); 1303 nxge_init_fail2: 1304 nxge_free_mem_pool(nxgep); 1305 nxge_init_fail1: 1306 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1307 "<== nxge_init status (failed) = 0x%08x", status)); 1308 return (status); 1309 1310 nxge_init_exit: 1311 1312 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1313 status)); 1314 return (status); 1315 } 1316 1317 1318 timeout_id_t 1319 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1320 { 1321 if ((nxgep->suspended == 0) || 1322 (nxgep->suspended == DDI_RESUME)) { 1323 return (timeout(func, (caddr_t)nxgep, 1324 drv_usectohz(1000 * msec))); 1325 } 1326 return (NULL); 1327 } 1328 1329 /*ARGSUSED*/ 1330 void 1331 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1332 { 1333 if (timerid) { 1334 (void) untimeout(timerid); 1335 } 1336 } 1337 1338 void 1339 nxge_uninit(p_nxge_t nxgep) 1340 { 1341 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1342 1343 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1344 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1345 "==> nxge_uninit: not initialized")); 1346 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1347 "<== nxge_uninit")); 1348 return; 1349 } 1350 1351 /* stop timer */ 1352 if (nxgep->nxge_timerid) { 1353 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1354 nxgep->nxge_timerid = 0; 1355 } 1356 1357 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1358 (void) nxge_intr_hw_disable(nxgep); 1359 1360 /* 1361 * Reset the receive MAC side. 1362 */ 1363 (void) nxge_rx_mac_disable(nxgep); 1364 1365 /* Disable and soft reset the IPP */ 1366 (void) nxge_ipp_disable(nxgep); 1367 1368 /* Free classification resources */ 1369 (void) nxge_classify_uninit(nxgep); 1370 1371 /* 1372 * Reset the transmit/receive DMA side. 1373 */ 1374 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1375 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1376 1377 nxge_uninit_txdma_channels(nxgep); 1378 nxge_uninit_rxdma_channels(nxgep); 1379 1380 /* 1381 * Reset the transmit MAC side. 1382 */ 1383 (void) nxge_tx_mac_disable(nxgep); 1384 1385 nxge_free_mem_pool(nxgep); 1386 1387 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1388 1389 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1390 1391 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1392 "nxge_mblks_pending %d", nxge_mblks_pending)); 1393 } 1394 1395 void 1396 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1397 { 1398 uint64_t reg; 1399 uint64_t regdata; 1400 int i, retry; 1401 1402 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1403 regdata = 0; 1404 retry = 1; 1405 1406 for (i = 0; i < retry; i++) { 1407 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1408 } 1409 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1410 } 1411 1412 void 1413 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1414 { 1415 uint64_t reg; 1416 uint64_t buf[2]; 1417 1418 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1419 reg = buf[0]; 1420 1421 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1422 } 1423 1424 1425 nxge_os_mutex_t nxgedebuglock; 1426 int nxge_debug_init = 0; 1427 1428 /*ARGSUSED*/ 1429 /*VARARGS*/ 1430 void 1431 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1432 { 1433 char msg_buffer[1048]; 1434 char prefix_buffer[32]; 1435 int instance; 1436 uint64_t debug_level; 1437 int cmn_level = CE_CONT; 1438 va_list ap; 1439 1440 debug_level = (nxgep == NULL) ? nxge_debug_level : 1441 nxgep->nxge_debug_level; 1442 1443 if ((level & debug_level) || 1444 (level == NXGE_NOTE) || 1445 (level == NXGE_ERR_CTL)) { 1446 /* do the msg processing */ 1447 if (nxge_debug_init == 0) { 1448 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1449 nxge_debug_init = 1; 1450 } 1451 1452 MUTEX_ENTER(&nxgedebuglock); 1453 1454 if ((level & NXGE_NOTE)) { 1455 cmn_level = CE_NOTE; 1456 } 1457 1458 if (level & NXGE_ERR_CTL) { 1459 cmn_level = CE_WARN; 1460 } 1461 1462 va_start(ap, fmt); 1463 (void) vsprintf(msg_buffer, fmt, ap); 1464 va_end(ap); 1465 if (nxgep == NULL) { 1466 instance = -1; 1467 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1468 } else { 1469 instance = nxgep->instance; 1470 (void) sprintf(prefix_buffer, 1471 "%s%d :", "nxge", instance); 1472 } 1473 1474 MUTEX_EXIT(&nxgedebuglock); 1475 cmn_err(cmn_level, "!%s %s\n", 1476 prefix_buffer, msg_buffer); 1477 1478 } 1479 } 1480 1481 char * 1482 nxge_dump_packet(char *addr, int size) 1483 { 1484 uchar_t *ap = (uchar_t *)addr; 1485 int i; 1486 static char etherbuf[1024]; 1487 char *cp = etherbuf; 1488 char digits[] = "0123456789abcdef"; 1489 1490 if (!size) 1491 size = 60; 1492 1493 if (size > MAX_DUMP_SZ) { 1494 /* Dump the leading bytes */ 1495 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1496 if (*ap > 0x0f) 1497 *cp++ = digits[*ap >> 4]; 1498 *cp++ = digits[*ap++ & 0xf]; 1499 *cp++ = ':'; 1500 } 1501 for (i = 0; i < 20; i++) 1502 *cp++ = '.'; 1503 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1504 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1505 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1506 if (*ap > 0x0f) 1507 *cp++ = digits[*ap >> 4]; 1508 *cp++ = digits[*ap++ & 0xf]; 1509 *cp++ = ':'; 1510 } 1511 } else { 1512 for (i = 0; i < size; i++) { 1513 if (*ap > 0x0f) 1514 *cp++ = digits[*ap >> 4]; 1515 *cp++ = digits[*ap++ & 0xf]; 1516 *cp++ = ':'; 1517 } 1518 } 1519 *--cp = 0; 1520 return (etherbuf); 1521 } 1522 1523 #ifdef NXGE_DEBUG 1524 static void 1525 nxge_test_map_regs(p_nxge_t nxgep) 1526 { 1527 ddi_acc_handle_t cfg_handle; 1528 p_pci_cfg_t cfg_ptr; 1529 ddi_acc_handle_t dev_handle; 1530 char *dev_ptr; 1531 ddi_acc_handle_t pci_config_handle; 1532 uint32_t regval; 1533 int i; 1534 1535 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1536 1537 dev_handle = nxgep->dev_regs->nxge_regh; 1538 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1539 1540 if (nxgep->niu_type == NEPTUNE) { 1541 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1542 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1543 1544 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1545 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1546 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1547 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1548 &cfg_ptr->vendorid)); 1549 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1550 "\tvendorid 0x%x devid 0x%x", 1551 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1552 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1553 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1554 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1555 "bar1c 0x%x", 1556 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1557 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1558 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1559 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1560 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1561 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1562 "base 28 0x%x bar2c 0x%x\n", 1563 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1564 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1565 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1566 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1567 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1568 "\nNeptune PCI BAR: base30 0x%x\n", 1569 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1570 1571 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1572 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1573 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1574 "first 0x%llx second 0x%llx third 0x%llx " 1575 "last 0x%llx ", 1576 NXGE_PIO_READ64(dev_handle, 1577 (uint64_t *)(dev_ptr + 0), 0), 1578 NXGE_PIO_READ64(dev_handle, 1579 (uint64_t *)(dev_ptr + 8), 0), 1580 NXGE_PIO_READ64(dev_handle, 1581 (uint64_t *)(dev_ptr + 16), 0), 1582 NXGE_PIO_READ64(cfg_handle, 1583 (uint64_t *)(dev_ptr + 24), 0))); 1584 } 1585 } 1586 1587 #endif 1588 1589 static void 1590 nxge_suspend(p_nxge_t nxgep) 1591 { 1592 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1593 1594 nxge_intrs_disable(nxgep); 1595 nxge_destroy_dev(nxgep); 1596 1597 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1598 } 1599 1600 static nxge_status_t 1601 nxge_resume(p_nxge_t nxgep) 1602 { 1603 nxge_status_t status = NXGE_OK; 1604 1605 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1606 nxgep->suspended = DDI_RESUME; 1607 1608 nxge_global_reset(nxgep); 1609 nxgep->suspended = 0; 1610 1611 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1612 "<== nxge_resume status = 0x%x", status)); 1613 return (status); 1614 } 1615 1616 static nxge_status_t 1617 nxge_setup_dev(p_nxge_t nxgep) 1618 { 1619 nxge_status_t status = NXGE_OK; 1620 1621 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1622 nxgep->mac.portnum)); 1623 1624 status = nxge_xcvr_find(nxgep); 1625 if (status != NXGE_OK) { 1626 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1627 " nxge_setup_dev status " 1628 " (xcvr find 0x%08x)", status)); 1629 goto nxge_setup_dev_exit; 1630 } 1631 1632 status = nxge_link_init(nxgep); 1633 1634 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1635 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1636 "port%d Bad register acc handle", nxgep->mac.portnum)); 1637 status = NXGE_ERROR; 1638 } 1639 1640 if (status != NXGE_OK) { 1641 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1642 " nxge_setup_dev status " 1643 "(xcvr init 0x%08x)", status)); 1644 goto nxge_setup_dev_exit; 1645 } 1646 1647 nxge_setup_dev_exit: 1648 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1649 "<== nxge_setup_dev port %d status = 0x%08x", 1650 nxgep->mac.portnum, status)); 1651 1652 return (status); 1653 } 1654 1655 static void 1656 nxge_destroy_dev(p_nxge_t nxgep) 1657 { 1658 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 1659 1660 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1661 1662 (void) nxge_hw_stop(nxgep); 1663 1664 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 1665 } 1666 1667 static nxge_status_t 1668 nxge_setup_system_dma_pages(p_nxge_t nxgep) 1669 { 1670 int ddi_status = DDI_SUCCESS; 1671 uint_t count; 1672 ddi_dma_cookie_t cookie; 1673 uint_t iommu_pagesize; 1674 nxge_status_t status = NXGE_OK; 1675 1676 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 1677 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 1678 if (nxgep->niu_type != N2_NIU) { 1679 iommu_pagesize = dvma_pagesize(nxgep->dip); 1680 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1681 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1682 " default_block_size %d iommu_pagesize %d", 1683 nxgep->sys_page_sz, 1684 ddi_ptob(nxgep->dip, (ulong_t)1), 1685 nxgep->rx_default_block_size, 1686 iommu_pagesize)); 1687 1688 if (iommu_pagesize != 0) { 1689 if (nxgep->sys_page_sz == iommu_pagesize) { 1690 if (iommu_pagesize > 0x4000) 1691 nxgep->sys_page_sz = 0x4000; 1692 } else { 1693 if (nxgep->sys_page_sz > iommu_pagesize) 1694 nxgep->sys_page_sz = iommu_pagesize; 1695 } 1696 } 1697 } 1698 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1699 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1700 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1701 "default_block_size %d page mask %d", 1702 nxgep->sys_page_sz, 1703 ddi_ptob(nxgep->dip, (ulong_t)1), 1704 nxgep->rx_default_block_size, 1705 nxgep->sys_page_mask)); 1706 1707 1708 switch (nxgep->sys_page_sz) { 1709 default: 1710 nxgep->sys_page_sz = 0x1000; 1711 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1712 nxgep->rx_default_block_size = 0x1000; 1713 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1714 break; 1715 case 0x1000: 1716 nxgep->rx_default_block_size = 0x1000; 1717 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1718 break; 1719 case 0x2000: 1720 nxgep->rx_default_block_size = 0x2000; 1721 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1722 break; 1723 case 0x4000: 1724 nxgep->rx_default_block_size = 0x4000; 1725 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 1726 break; 1727 case 0x8000: 1728 nxgep->rx_default_block_size = 0x8000; 1729 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 1730 break; 1731 } 1732 1733 #ifndef USE_RX_BIG_BUF 1734 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 1735 #else 1736 nxgep->rx_default_block_size = 0x2000; 1737 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1738 #endif 1739 /* 1740 * Get the system DMA burst size. 1741 */ 1742 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 1743 DDI_DMA_DONTWAIT, 0, 1744 &nxgep->dmasparehandle); 1745 if (ddi_status != DDI_SUCCESS) { 1746 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1747 "ddi_dma_alloc_handle: failed " 1748 " status 0x%x", ddi_status)); 1749 goto nxge_get_soft_properties_exit; 1750 } 1751 1752 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 1753 (caddr_t)nxgep->dmasparehandle, 1754 sizeof (nxgep->dmasparehandle), 1755 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1756 DDI_DMA_DONTWAIT, 0, 1757 &cookie, &count); 1758 if (ddi_status != DDI_DMA_MAPPED) { 1759 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1760 "Binding spare handle to find system" 1761 " burstsize failed.")); 1762 ddi_status = DDI_FAILURE; 1763 goto nxge_get_soft_properties_fail1; 1764 } 1765 1766 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 1767 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 1768 1769 nxge_get_soft_properties_fail1: 1770 ddi_dma_free_handle(&nxgep->dmasparehandle); 1771 1772 nxge_get_soft_properties_exit: 1773 1774 if (ddi_status != DDI_SUCCESS) 1775 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1776 1777 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1778 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 1779 return (status); 1780 } 1781 1782 static nxge_status_t 1783 nxge_alloc_mem_pool(p_nxge_t nxgep) 1784 { 1785 nxge_status_t status = NXGE_OK; 1786 1787 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 1788 1789 status = nxge_alloc_rx_mem_pool(nxgep); 1790 if (status != NXGE_OK) { 1791 return (NXGE_ERROR); 1792 } 1793 1794 status = nxge_alloc_tx_mem_pool(nxgep); 1795 if (status != NXGE_OK) { 1796 nxge_free_rx_mem_pool(nxgep); 1797 return (NXGE_ERROR); 1798 } 1799 1800 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 1801 return (NXGE_OK); 1802 } 1803 1804 static void 1805 nxge_free_mem_pool(p_nxge_t nxgep) 1806 { 1807 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 1808 1809 nxge_free_rx_mem_pool(nxgep); 1810 nxge_free_tx_mem_pool(nxgep); 1811 1812 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 1813 } 1814 1815 static nxge_status_t 1816 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 1817 { 1818 int i, j; 1819 uint32_t ndmas, st_rdc; 1820 p_nxge_dma_pt_cfg_t p_all_cfgp; 1821 p_nxge_hw_pt_cfg_t p_cfgp; 1822 p_nxge_dma_pool_t dma_poolp; 1823 p_nxge_dma_common_t *dma_buf_p; 1824 p_nxge_dma_pool_t dma_cntl_poolp; 1825 p_nxge_dma_common_t *dma_cntl_p; 1826 size_t rx_buf_alloc_size; 1827 size_t rx_cntl_alloc_size; 1828 uint32_t *num_chunks; /* per dma */ 1829 nxge_status_t status = NXGE_OK; 1830 1831 uint32_t nxge_port_rbr_size; 1832 uint32_t nxge_port_rbr_spare_size; 1833 uint32_t nxge_port_rcr_size; 1834 1835 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 1836 1837 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 1838 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1839 st_rdc = p_cfgp->start_rdc; 1840 ndmas = p_cfgp->max_rdcs; 1841 1842 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1843 " nxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas)); 1844 1845 /* 1846 * Allocate memory for each receive DMA channel. 1847 */ 1848 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 1849 KM_SLEEP); 1850 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1851 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1852 1853 dma_cntl_poolp = (p_nxge_dma_pool_t) 1854 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 1855 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1856 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1857 1858 num_chunks = (uint32_t *)KMEM_ZALLOC( 1859 sizeof (uint32_t) * ndmas, KM_SLEEP); 1860 1861 /* 1862 * Assume that each DMA channel will be configured with default 1863 * block size. 1864 * rbr block counts are mod of batch count (16). 1865 */ 1866 nxge_port_rbr_size = p_all_cfgp->rbr_size; 1867 nxge_port_rcr_size = p_all_cfgp->rcr_size; 1868 1869 if (!nxge_port_rbr_size) { 1870 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 1871 } 1872 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 1873 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 1874 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 1875 } 1876 1877 p_all_cfgp->rbr_size = nxge_port_rbr_size; 1878 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 1879 1880 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 1881 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 1882 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 1883 } 1884 1885 /* 1886 * N2/NIU has limitation on the descriptor sizes (contiguous 1887 * memory allocation on data buffers to 4M (contig_mem_alloc) 1888 * and little endian for control buffers (must use the ddi/dki mem alloc 1889 * function). 1890 */ 1891 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1892 if (nxgep->niu_type == N2_NIU) { 1893 nxge_port_rbr_spare_size = 0; 1894 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 1895 (!ISP2(nxge_port_rbr_size))) { 1896 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 1897 } 1898 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 1899 (!ISP2(nxge_port_rcr_size))) { 1900 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 1901 } 1902 } 1903 #endif 1904 1905 rx_buf_alloc_size = (nxgep->rx_default_block_size * 1906 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 1907 1908 /* 1909 * Addresses of receive block ring, receive completion ring and the 1910 * mailbox must be all cache-aligned (64 bytes). 1911 */ 1912 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 1913 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 1914 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 1915 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 1916 1917 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 1918 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 1919 "nxge_port_rcr_size = %d " 1920 "rx_cntl_alloc_size = %d", 1921 nxge_port_rbr_size, nxge_port_rbr_spare_size, 1922 nxge_port_rcr_size, 1923 rx_cntl_alloc_size)); 1924 1925 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1926 if (nxgep->niu_type == N2_NIU) { 1927 if (!ISP2(rx_buf_alloc_size)) { 1928 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1929 "==> nxge_alloc_rx_mem_pool: " 1930 " must be power of 2")); 1931 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1932 goto nxge_alloc_rx_mem_pool_exit; 1933 } 1934 1935 if (rx_buf_alloc_size > (1 << 22)) { 1936 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1937 "==> nxge_alloc_rx_mem_pool: " 1938 " limit size to 4M")); 1939 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1940 goto nxge_alloc_rx_mem_pool_exit; 1941 } 1942 1943 if (rx_cntl_alloc_size < 0x2000) { 1944 rx_cntl_alloc_size = 0x2000; 1945 } 1946 } 1947 #endif 1948 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 1949 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 1950 1951 /* 1952 * Allocate memory for receive buffers and descriptor rings. 1953 * Replace allocation functions with interface functions provided 1954 * by the partition manager when it is available. 1955 */ 1956 /* 1957 * Allocate memory for the receive buffer blocks. 1958 */ 1959 for (i = 0; i < ndmas; i++) { 1960 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1961 " nxge_alloc_rx_mem_pool to alloc mem: " 1962 " dma %d dma_buf_p %llx &dma_buf_p %llx", 1963 i, dma_buf_p[i], &dma_buf_p[i])); 1964 num_chunks[i] = 0; 1965 status = nxge_alloc_rx_buf_dma(nxgep, st_rdc, &dma_buf_p[i], 1966 rx_buf_alloc_size, 1967 nxgep->rx_default_block_size, &num_chunks[i]); 1968 if (status != NXGE_OK) { 1969 break; 1970 } 1971 st_rdc++; 1972 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1973 " nxge_alloc_rx_mem_pool DONE alloc mem: " 1974 "dma %d dma_buf_p %llx &dma_buf_p %llx", i, 1975 dma_buf_p[i], &dma_buf_p[i])); 1976 } 1977 if (i < ndmas) { 1978 goto nxge_alloc_rx_mem_fail1; 1979 } 1980 /* 1981 * Allocate memory for descriptor rings and mailbox. 1982 */ 1983 st_rdc = p_cfgp->start_rdc; 1984 for (j = 0; j < ndmas; j++) { 1985 status = nxge_alloc_rx_cntl_dma(nxgep, st_rdc, &dma_cntl_p[j], 1986 rx_cntl_alloc_size); 1987 if (status != NXGE_OK) { 1988 break; 1989 } 1990 st_rdc++; 1991 } 1992 if (j < ndmas) { 1993 goto nxge_alloc_rx_mem_fail2; 1994 } 1995 1996 dma_poolp->ndmas = ndmas; 1997 dma_poolp->num_chunks = num_chunks; 1998 dma_poolp->buf_allocated = B_TRUE; 1999 nxgep->rx_buf_pool_p = dma_poolp; 2000 dma_poolp->dma_buf_pool_p = dma_buf_p; 2001 2002 dma_cntl_poolp->ndmas = ndmas; 2003 dma_cntl_poolp->buf_allocated = B_TRUE; 2004 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2005 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2006 2007 goto nxge_alloc_rx_mem_pool_exit; 2008 2009 nxge_alloc_rx_mem_fail2: 2010 /* Free control buffers */ 2011 j--; 2012 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2013 "==> nxge_alloc_rx_mem_pool: freeing control bufs (%d)", j)); 2014 for (; j >= 0; j--) { 2015 nxge_free_rx_cntl_dma(nxgep, 2016 (p_nxge_dma_common_t)dma_cntl_p[j]); 2017 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2018 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", 2019 j)); 2020 } 2021 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2022 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 2023 2024 nxge_alloc_rx_mem_fail1: 2025 /* Free data buffers */ 2026 i--; 2027 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2028 "==> nxge_alloc_rx_mem_pool: freeing data bufs (%d)", i)); 2029 for (; i >= 0; i--) { 2030 nxge_free_rx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2031 num_chunks[i]); 2032 } 2033 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2034 "==> nxge_alloc_rx_mem_pool: data bufs freed (%d)", i)); 2035 2036 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2037 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2038 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2039 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2040 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2041 2042 nxge_alloc_rx_mem_pool_exit: 2043 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2044 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2045 2046 return (status); 2047 } 2048 2049 static void 2050 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2051 { 2052 uint32_t i, ndmas; 2053 p_nxge_dma_pool_t dma_poolp; 2054 p_nxge_dma_common_t *dma_buf_p; 2055 p_nxge_dma_pool_t dma_cntl_poolp; 2056 p_nxge_dma_common_t *dma_cntl_p; 2057 uint32_t *num_chunks; 2058 2059 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2060 2061 dma_poolp = nxgep->rx_buf_pool_p; 2062 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2063 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2064 "<== nxge_free_rx_mem_pool " 2065 "(null rx buf pool or buf not allocated")); 2066 return; 2067 } 2068 2069 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2070 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2071 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2072 "<== nxge_free_rx_mem_pool " 2073 "(null rx cntl buf pool or cntl buf not allocated")); 2074 return; 2075 } 2076 2077 dma_buf_p = dma_poolp->dma_buf_pool_p; 2078 num_chunks = dma_poolp->num_chunks; 2079 2080 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2081 ndmas = dma_cntl_poolp->ndmas; 2082 2083 for (i = 0; i < ndmas; i++) { 2084 nxge_free_rx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2085 } 2086 2087 for (i = 0; i < ndmas; i++) { 2088 nxge_free_rx_cntl_dma(nxgep, dma_cntl_p[i]); 2089 } 2090 2091 for (i = 0; i < ndmas; i++) { 2092 KMEM_FREE(dma_buf_p[i], 2093 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2094 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2095 } 2096 2097 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2098 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2099 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2100 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2101 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2102 2103 nxgep->rx_buf_pool_p = NULL; 2104 nxgep->rx_cntl_pool_p = NULL; 2105 2106 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2107 } 2108 2109 2110 static nxge_status_t 2111 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2112 p_nxge_dma_common_t *dmap, 2113 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2114 { 2115 p_nxge_dma_common_t rx_dmap; 2116 nxge_status_t status = NXGE_OK; 2117 size_t total_alloc_size; 2118 size_t allocated = 0; 2119 int i, size_index, array_size; 2120 2121 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2122 2123 rx_dmap = (p_nxge_dma_common_t) 2124 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2125 KM_SLEEP); 2126 2127 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2128 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2129 dma_channel, alloc_size, block_size, dmap)); 2130 2131 total_alloc_size = alloc_size; 2132 2133 #if defined(RX_USE_RECLAIM_POST) 2134 total_alloc_size = alloc_size + alloc_size/4; 2135 #endif 2136 2137 i = 0; 2138 size_index = 0; 2139 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2140 while ((alloc_sizes[size_index] < alloc_size) && 2141 (size_index < array_size)) 2142 size_index++; 2143 if (size_index >= array_size) { 2144 size_index = array_size - 1; 2145 } 2146 2147 while ((allocated < total_alloc_size) && 2148 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2149 rx_dmap[i].dma_chunk_index = i; 2150 rx_dmap[i].block_size = block_size; 2151 rx_dmap[i].alength = alloc_sizes[size_index]; 2152 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2153 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2154 rx_dmap[i].dma_channel = dma_channel; 2155 rx_dmap[i].contig_alloc_type = B_FALSE; 2156 2157 /* 2158 * N2/NIU: data buffers must be contiguous as the driver 2159 * needs to call Hypervisor api to set up 2160 * logical pages. 2161 */ 2162 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2163 rx_dmap[i].contig_alloc_type = B_TRUE; 2164 } 2165 2166 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2167 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2168 "i %d nblocks %d alength %d", 2169 dma_channel, i, &rx_dmap[i], block_size, 2170 i, rx_dmap[i].nblocks, 2171 rx_dmap[i].alength)); 2172 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2173 &nxge_rx_dma_attr, 2174 rx_dmap[i].alength, 2175 &nxge_dev_buf_dma_acc_attr, 2176 DDI_DMA_READ | DDI_DMA_STREAMING, 2177 (p_nxge_dma_common_t)(&rx_dmap[i])); 2178 if (status != NXGE_OK) { 2179 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2180 " nxge_alloc_rx_buf_dma: Alloc Failed ")); 2181 size_index--; 2182 } else { 2183 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2184 " alloc_rx_buf_dma allocated rdc %d " 2185 "chunk %d size %x dvma %x bufp %llx ", 2186 dma_channel, i, rx_dmap[i].alength, 2187 rx_dmap[i].ioaddr_pp, &rx_dmap[i])); 2188 i++; 2189 allocated += alloc_sizes[size_index]; 2190 } 2191 } 2192 2193 2194 if (allocated < total_alloc_size) { 2195 goto nxge_alloc_rx_mem_fail1; 2196 } 2197 2198 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2199 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2200 dma_channel, i)); 2201 *num_chunks = i; 2202 *dmap = rx_dmap; 2203 2204 goto nxge_alloc_rx_mem_exit; 2205 2206 nxge_alloc_rx_mem_fail1: 2207 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2208 2209 nxge_alloc_rx_mem_exit: 2210 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2211 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2212 2213 return (status); 2214 } 2215 2216 /*ARGSUSED*/ 2217 static void 2218 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2219 uint32_t num_chunks) 2220 { 2221 int i; 2222 2223 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2224 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2225 2226 for (i = 0; i < num_chunks; i++) { 2227 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2228 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2229 i, dmap)); 2230 nxge_dma_mem_free(dmap++); 2231 } 2232 2233 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2234 } 2235 2236 /*ARGSUSED*/ 2237 static nxge_status_t 2238 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2239 p_nxge_dma_common_t *dmap, size_t size) 2240 { 2241 p_nxge_dma_common_t rx_dmap; 2242 nxge_status_t status = NXGE_OK; 2243 2244 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2245 2246 rx_dmap = (p_nxge_dma_common_t) 2247 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2248 2249 rx_dmap->contig_alloc_type = B_FALSE; 2250 2251 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2252 &nxge_desc_dma_attr, 2253 size, 2254 &nxge_dev_desc_dma_acc_attr, 2255 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2256 rx_dmap); 2257 if (status != NXGE_OK) { 2258 goto nxge_alloc_rx_cntl_dma_fail1; 2259 } 2260 2261 *dmap = rx_dmap; 2262 goto nxge_alloc_rx_cntl_dma_exit; 2263 2264 nxge_alloc_rx_cntl_dma_fail1: 2265 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2266 2267 nxge_alloc_rx_cntl_dma_exit: 2268 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2269 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2270 2271 return (status); 2272 } 2273 2274 /*ARGSUSED*/ 2275 static void 2276 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2277 { 2278 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2279 2280 nxge_dma_mem_free(dmap); 2281 2282 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2283 } 2284 2285 static nxge_status_t 2286 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2287 { 2288 nxge_status_t status = NXGE_OK; 2289 int i, j; 2290 uint32_t ndmas, st_tdc; 2291 p_nxge_dma_pt_cfg_t p_all_cfgp; 2292 p_nxge_hw_pt_cfg_t p_cfgp; 2293 p_nxge_dma_pool_t dma_poolp; 2294 p_nxge_dma_common_t *dma_buf_p; 2295 p_nxge_dma_pool_t dma_cntl_poolp; 2296 p_nxge_dma_common_t *dma_cntl_p; 2297 size_t tx_buf_alloc_size; 2298 size_t tx_cntl_alloc_size; 2299 uint32_t *num_chunks; /* per dma */ 2300 uint32_t bcopy_thresh; 2301 2302 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2303 2304 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2305 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2306 st_tdc = p_cfgp->start_tdc; 2307 ndmas = p_cfgp->max_tdcs; 2308 2309 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool: " 2310 "p_cfgp 0x%016llx start_tdc %d ndmas %d nxgep->max_tdcs %d", 2311 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, nxgep->max_tdcs)); 2312 /* 2313 * Allocate memory for each transmit DMA channel. 2314 */ 2315 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2316 KM_SLEEP); 2317 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2318 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2319 2320 dma_cntl_poolp = (p_nxge_dma_pool_t) 2321 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2322 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2323 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2324 2325 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2326 /* 2327 * N2/NIU has limitation on the descriptor sizes (contiguous 2328 * memory allocation on data buffers to 4M (contig_mem_alloc) 2329 * and little endian for control buffers (must use the ddi/dki mem alloc 2330 * function). The transmit ring is limited to 8K (includes the 2331 * mailbox). 2332 */ 2333 if (nxgep->niu_type == N2_NIU) { 2334 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2335 (!ISP2(nxge_tx_ring_size))) { 2336 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2337 } 2338 } 2339 #endif 2340 2341 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2342 2343 /* 2344 * Assume that each DMA channel will be configured with default 2345 * transmit bufer size for copying transmit data. 2346 * (For packet payload over this limit, packets will not be 2347 * copied.) 2348 */ 2349 if (nxgep->niu_type == N2_NIU) { 2350 bcopy_thresh = TX_BCOPY_SIZE; 2351 } else { 2352 bcopy_thresh = nxge_bcopy_thresh; 2353 } 2354 tx_buf_alloc_size = (bcopy_thresh * nxge_tx_ring_size); 2355 2356 /* 2357 * Addresses of transmit descriptor ring and the 2358 * mailbox must be all cache-aligned (64 bytes). 2359 */ 2360 tx_cntl_alloc_size = nxge_tx_ring_size; 2361 tx_cntl_alloc_size *= (sizeof (tx_desc_t)); 2362 tx_cntl_alloc_size += sizeof (txdma_mailbox_t); 2363 2364 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2365 if (nxgep->niu_type == N2_NIU) { 2366 if (!ISP2(tx_buf_alloc_size)) { 2367 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2368 "==> nxge_alloc_tx_mem_pool: " 2369 " must be power of 2")); 2370 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2371 goto nxge_alloc_tx_mem_pool_exit; 2372 } 2373 2374 if (tx_buf_alloc_size > (1 << 22)) { 2375 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2376 "==> nxge_alloc_tx_mem_pool: " 2377 " limit size to 4M")); 2378 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2379 goto nxge_alloc_tx_mem_pool_exit; 2380 } 2381 2382 if (tx_cntl_alloc_size < 0x2000) { 2383 tx_cntl_alloc_size = 0x2000; 2384 } 2385 } 2386 #endif 2387 2388 num_chunks = (uint32_t *)KMEM_ZALLOC( 2389 sizeof (uint32_t) * ndmas, KM_SLEEP); 2390 2391 /* 2392 * Allocate memory for transmit buffers and descriptor rings. 2393 * Replace allocation functions with interface functions provided 2394 * by the partition manager when it is available. 2395 * 2396 * Allocate memory for the transmit buffer pool. 2397 */ 2398 for (i = 0; i < ndmas; i++) { 2399 num_chunks[i] = 0; 2400 status = nxge_alloc_tx_buf_dma(nxgep, st_tdc, &dma_buf_p[i], 2401 tx_buf_alloc_size, 2402 bcopy_thresh, &num_chunks[i]); 2403 if (status != NXGE_OK) { 2404 break; 2405 } 2406 st_tdc++; 2407 } 2408 if (i < ndmas) { 2409 goto nxge_alloc_tx_mem_pool_fail1; 2410 } 2411 2412 st_tdc = p_cfgp->start_tdc; 2413 /* 2414 * Allocate memory for descriptor rings and mailbox. 2415 */ 2416 for (j = 0; j < ndmas; j++) { 2417 status = nxge_alloc_tx_cntl_dma(nxgep, st_tdc, &dma_cntl_p[j], 2418 tx_cntl_alloc_size); 2419 if (status != NXGE_OK) { 2420 break; 2421 } 2422 st_tdc++; 2423 } 2424 if (j < ndmas) { 2425 goto nxge_alloc_tx_mem_pool_fail2; 2426 } 2427 2428 dma_poolp->ndmas = ndmas; 2429 dma_poolp->num_chunks = num_chunks; 2430 dma_poolp->buf_allocated = B_TRUE; 2431 dma_poolp->dma_buf_pool_p = dma_buf_p; 2432 nxgep->tx_buf_pool_p = dma_poolp; 2433 2434 dma_cntl_poolp->ndmas = ndmas; 2435 dma_cntl_poolp->buf_allocated = B_TRUE; 2436 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2437 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2438 2439 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2440 "==> nxge_alloc_tx_mem_pool: start_tdc %d " 2441 "ndmas %d poolp->ndmas %d", 2442 st_tdc, ndmas, dma_poolp->ndmas)); 2443 2444 goto nxge_alloc_tx_mem_pool_exit; 2445 2446 nxge_alloc_tx_mem_pool_fail2: 2447 /* Free control buffers */ 2448 j--; 2449 for (; j >= 0; j--) { 2450 nxge_free_tx_cntl_dma(nxgep, 2451 (p_nxge_dma_common_t)dma_cntl_p[j]); 2452 } 2453 2454 nxge_alloc_tx_mem_pool_fail1: 2455 /* Free data buffers */ 2456 i--; 2457 for (; i >= 0; i--) { 2458 nxge_free_tx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2459 num_chunks[i]); 2460 } 2461 2462 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2463 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2464 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2465 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2466 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2467 2468 nxge_alloc_tx_mem_pool_exit: 2469 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2470 "<== nxge_alloc_tx_mem_pool:status 0x%08x", status)); 2471 2472 return (status); 2473 } 2474 2475 static nxge_status_t 2476 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2477 p_nxge_dma_common_t *dmap, size_t alloc_size, 2478 size_t block_size, uint32_t *num_chunks) 2479 { 2480 p_nxge_dma_common_t tx_dmap; 2481 nxge_status_t status = NXGE_OK; 2482 size_t total_alloc_size; 2483 size_t allocated = 0; 2484 int i, size_index, array_size; 2485 2486 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 2487 2488 tx_dmap = (p_nxge_dma_common_t) 2489 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2490 KM_SLEEP); 2491 2492 total_alloc_size = alloc_size; 2493 i = 0; 2494 size_index = 0; 2495 array_size = sizeof (alloc_sizes) / sizeof (size_t); 2496 while ((alloc_sizes[size_index] < alloc_size) && 2497 (size_index < array_size)) 2498 size_index++; 2499 if (size_index >= array_size) { 2500 size_index = array_size - 1; 2501 } 2502 2503 while ((allocated < total_alloc_size) && 2504 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2505 2506 tx_dmap[i].dma_chunk_index = i; 2507 tx_dmap[i].block_size = block_size; 2508 tx_dmap[i].alength = alloc_sizes[size_index]; 2509 tx_dmap[i].orig_alength = tx_dmap[i].alength; 2510 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2511 tx_dmap[i].dma_channel = dma_channel; 2512 tx_dmap[i].contig_alloc_type = B_FALSE; 2513 2514 /* 2515 * N2/NIU: data buffers must be contiguous as the driver 2516 * needs to call Hypervisor api to set up 2517 * logical pages. 2518 */ 2519 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2520 tx_dmap[i].contig_alloc_type = B_TRUE; 2521 } 2522 2523 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2524 &nxge_tx_dma_attr, 2525 tx_dmap[i].alength, 2526 &nxge_dev_buf_dma_acc_attr, 2527 DDI_DMA_WRITE | DDI_DMA_STREAMING, 2528 (p_nxge_dma_common_t)(&tx_dmap[i])); 2529 if (status != NXGE_OK) { 2530 size_index--; 2531 } else { 2532 i++; 2533 allocated += alloc_sizes[size_index]; 2534 } 2535 } 2536 2537 if (allocated < total_alloc_size) { 2538 goto nxge_alloc_tx_mem_fail1; 2539 } 2540 2541 *num_chunks = i; 2542 *dmap = tx_dmap; 2543 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2544 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 2545 *dmap, i)); 2546 goto nxge_alloc_tx_mem_exit; 2547 2548 nxge_alloc_tx_mem_fail1: 2549 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2550 2551 nxge_alloc_tx_mem_exit: 2552 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2553 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 2554 2555 return (status); 2556 } 2557 2558 /*ARGSUSED*/ 2559 static void 2560 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2561 uint32_t num_chunks) 2562 { 2563 int i; 2564 2565 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 2566 2567 for (i = 0; i < num_chunks; i++) { 2568 nxge_dma_mem_free(dmap++); 2569 } 2570 2571 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 2572 } 2573 2574 /*ARGSUSED*/ 2575 static nxge_status_t 2576 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2577 p_nxge_dma_common_t *dmap, size_t size) 2578 { 2579 p_nxge_dma_common_t tx_dmap; 2580 nxge_status_t status = NXGE_OK; 2581 2582 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 2583 tx_dmap = (p_nxge_dma_common_t) 2584 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2585 2586 tx_dmap->contig_alloc_type = B_FALSE; 2587 2588 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2589 &nxge_desc_dma_attr, 2590 size, 2591 &nxge_dev_desc_dma_acc_attr, 2592 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2593 tx_dmap); 2594 if (status != NXGE_OK) { 2595 goto nxge_alloc_tx_cntl_dma_fail1; 2596 } 2597 2598 *dmap = tx_dmap; 2599 goto nxge_alloc_tx_cntl_dma_exit; 2600 2601 nxge_alloc_tx_cntl_dma_fail1: 2602 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 2603 2604 nxge_alloc_tx_cntl_dma_exit: 2605 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2606 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 2607 2608 return (status); 2609 } 2610 2611 /*ARGSUSED*/ 2612 static void 2613 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2614 { 2615 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 2616 2617 nxge_dma_mem_free(dmap); 2618 2619 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 2620 } 2621 2622 static void 2623 nxge_free_tx_mem_pool(p_nxge_t nxgep) 2624 { 2625 uint32_t i, ndmas; 2626 p_nxge_dma_pool_t dma_poolp; 2627 p_nxge_dma_common_t *dma_buf_p; 2628 p_nxge_dma_pool_t dma_cntl_poolp; 2629 p_nxge_dma_common_t *dma_cntl_p; 2630 uint32_t *num_chunks; 2631 2632 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_free_tx_mem_pool")); 2633 2634 dma_poolp = nxgep->tx_buf_pool_p; 2635 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2636 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2637 "<== nxge_free_tx_mem_pool " 2638 "(null rx buf pool or buf not allocated")); 2639 return; 2640 } 2641 2642 dma_cntl_poolp = nxgep->tx_cntl_pool_p; 2643 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2644 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2645 "<== nxge_free_tx_mem_pool " 2646 "(null tx cntl buf pool or cntl buf not allocated")); 2647 return; 2648 } 2649 2650 dma_buf_p = dma_poolp->dma_buf_pool_p; 2651 num_chunks = dma_poolp->num_chunks; 2652 2653 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2654 ndmas = dma_cntl_poolp->ndmas; 2655 2656 for (i = 0; i < ndmas; i++) { 2657 nxge_free_tx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2658 } 2659 2660 for (i = 0; i < ndmas; i++) { 2661 nxge_free_tx_cntl_dma(nxgep, dma_cntl_p[i]); 2662 } 2663 2664 for (i = 0; i < ndmas; i++) { 2665 KMEM_FREE(dma_buf_p[i], 2666 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2667 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2668 } 2669 2670 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2671 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2672 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2673 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2674 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2675 2676 nxgep->tx_buf_pool_p = NULL; 2677 nxgep->tx_cntl_pool_p = NULL; 2678 2679 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_free_tx_mem_pool")); 2680 } 2681 2682 /*ARGSUSED*/ 2683 static nxge_status_t 2684 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 2685 struct ddi_dma_attr *dma_attrp, 2686 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 2687 p_nxge_dma_common_t dma_p) 2688 { 2689 caddr_t kaddrp; 2690 int ddi_status = DDI_SUCCESS; 2691 boolean_t contig_alloc_type; 2692 2693 contig_alloc_type = dma_p->contig_alloc_type; 2694 2695 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 2696 /* 2697 * contig_alloc_type for contiguous memory only allowed 2698 * for N2/NIU. 2699 */ 2700 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2701 "nxge_dma_mem_alloc: alloc type not allows (%d)", 2702 dma_p->contig_alloc_type)); 2703 return (NXGE_ERROR | NXGE_DDI_FAILED); 2704 } 2705 2706 dma_p->dma_handle = NULL; 2707 dma_p->acc_handle = NULL; 2708 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 2709 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 2710 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 2711 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 2712 if (ddi_status != DDI_SUCCESS) { 2713 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2714 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 2715 return (NXGE_ERROR | NXGE_DDI_FAILED); 2716 } 2717 2718 switch (contig_alloc_type) { 2719 case B_FALSE: 2720 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, 2721 acc_attr_p, 2722 xfer_flags, 2723 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 2724 &dma_p->acc_handle); 2725 if (ddi_status != DDI_SUCCESS) { 2726 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2727 "nxge_dma_mem_alloc:ddi_dma_mem_alloc failed")); 2728 ddi_dma_free_handle(&dma_p->dma_handle); 2729 dma_p->dma_handle = NULL; 2730 return (NXGE_ERROR | NXGE_DDI_FAILED); 2731 } 2732 if (dma_p->alength < length) { 2733 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2734 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 2735 "< length.")); 2736 ddi_dma_mem_free(&dma_p->acc_handle); 2737 ddi_dma_free_handle(&dma_p->dma_handle); 2738 dma_p->acc_handle = NULL; 2739 dma_p->dma_handle = NULL; 2740 return (NXGE_ERROR); 2741 } 2742 2743 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2744 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2745 &dma_p->dma_cookie, &dma_p->ncookies); 2746 if (ddi_status != DDI_DMA_MAPPED) { 2747 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2748 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2749 "(staus 0x%x ncookies %d.)", ddi_status, 2750 dma_p->ncookies)); 2751 if (dma_p->acc_handle) { 2752 ddi_dma_mem_free(&dma_p->acc_handle); 2753 dma_p->acc_handle = NULL; 2754 } 2755 ddi_dma_free_handle(&dma_p->dma_handle); 2756 dma_p->dma_handle = NULL; 2757 return (NXGE_ERROR | NXGE_DDI_FAILED); 2758 } 2759 2760 if (dma_p->ncookies != 1) { 2761 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2762 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 2763 "> 1 cookie" 2764 "(staus 0x%x ncookies %d.)", ddi_status, 2765 dma_p->ncookies)); 2766 if (dma_p->acc_handle) { 2767 ddi_dma_mem_free(&dma_p->acc_handle); 2768 dma_p->acc_handle = NULL; 2769 } 2770 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2771 ddi_dma_free_handle(&dma_p->dma_handle); 2772 dma_p->dma_handle = NULL; 2773 return (NXGE_ERROR); 2774 } 2775 break; 2776 2777 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2778 case B_TRUE: 2779 kaddrp = (caddr_t)contig_mem_alloc(length); 2780 if (kaddrp == NULL) { 2781 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2782 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 2783 ddi_dma_free_handle(&dma_p->dma_handle); 2784 return (NXGE_ERROR | NXGE_DDI_FAILED); 2785 } 2786 2787 dma_p->alength = length; 2788 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2789 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2790 &dma_p->dma_cookie, &dma_p->ncookies); 2791 if (ddi_status != DDI_DMA_MAPPED) { 2792 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2793 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2794 "(status 0x%x ncookies %d.)", ddi_status, 2795 dma_p->ncookies)); 2796 2797 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2798 "==> nxge_dma_mem_alloc: (not mapped)" 2799 "length %lu (0x%x) " 2800 "free contig kaddrp $%p " 2801 "va_to_pa $%p", 2802 length, length, 2803 kaddrp, 2804 va_to_pa(kaddrp))); 2805 2806 2807 contig_mem_free((void *)kaddrp, length); 2808 ddi_dma_free_handle(&dma_p->dma_handle); 2809 2810 dma_p->dma_handle = NULL; 2811 dma_p->acc_handle = NULL; 2812 dma_p->alength = NULL; 2813 dma_p->kaddrp = NULL; 2814 2815 return (NXGE_ERROR | NXGE_DDI_FAILED); 2816 } 2817 2818 if (dma_p->ncookies != 1 || 2819 (dma_p->dma_cookie.dmac_laddress == NULL)) { 2820 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2821 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 2822 "cookie or " 2823 "dmac_laddress is NULL $%p size %d " 2824 " (status 0x%x ncookies %d.)", 2825 ddi_status, 2826 dma_p->dma_cookie.dmac_laddress, 2827 dma_p->dma_cookie.dmac_size, 2828 dma_p->ncookies)); 2829 2830 contig_mem_free((void *)kaddrp, length); 2831 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2832 ddi_dma_free_handle(&dma_p->dma_handle); 2833 2834 dma_p->alength = 0; 2835 dma_p->dma_handle = NULL; 2836 dma_p->acc_handle = NULL; 2837 dma_p->kaddrp = NULL; 2838 2839 return (NXGE_ERROR | NXGE_DDI_FAILED); 2840 } 2841 break; 2842 2843 #else 2844 case B_TRUE: 2845 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2846 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 2847 return (NXGE_ERROR | NXGE_DDI_FAILED); 2848 #endif 2849 } 2850 2851 dma_p->kaddrp = kaddrp; 2852 dma_p->last_kaddrp = (unsigned char *)kaddrp + 2853 dma_p->alength - RXBUF_64B_ALIGNED; 2854 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2855 dma_p->last_ioaddr_pp = 2856 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 2857 dma_p->alength - RXBUF_64B_ALIGNED; 2858 2859 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 2860 2861 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2862 dma_p->orig_ioaddr_pp = 2863 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2864 dma_p->orig_alength = length; 2865 dma_p->orig_kaddrp = kaddrp; 2866 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 2867 #endif 2868 2869 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 2870 "dma buffer allocated: dma_p $%p " 2871 "return dmac_ladress from cookie $%p cookie dmac_size %d " 2872 "dma_p->ioaddr_p $%p " 2873 "dma_p->orig_ioaddr_p $%p " 2874 "orig_vatopa $%p " 2875 "alength %d (0x%x) " 2876 "kaddrp $%p " 2877 "length %d (0x%x)", 2878 dma_p, 2879 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 2880 dma_p->ioaddr_pp, 2881 dma_p->orig_ioaddr_pp, 2882 dma_p->orig_vatopa, 2883 dma_p->alength, dma_p->alength, 2884 kaddrp, 2885 length, length)); 2886 2887 return (NXGE_OK); 2888 } 2889 2890 static void 2891 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 2892 { 2893 if (dma_p->dma_handle != NULL) { 2894 if (dma_p->ncookies) { 2895 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2896 dma_p->ncookies = 0; 2897 } 2898 ddi_dma_free_handle(&dma_p->dma_handle); 2899 dma_p->dma_handle = NULL; 2900 } 2901 2902 if (dma_p->acc_handle != NULL) { 2903 ddi_dma_mem_free(&dma_p->acc_handle); 2904 dma_p->acc_handle = NULL; 2905 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 2906 } 2907 2908 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2909 if (dma_p->contig_alloc_type && 2910 dma_p->orig_kaddrp && dma_p->orig_alength) { 2911 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 2912 "kaddrp $%p (orig_kaddrp $%p)" 2913 "mem type %d ", 2914 "orig_alength %d " 2915 "alength 0x%x (%d)", 2916 dma_p->kaddrp, 2917 dma_p->orig_kaddrp, 2918 dma_p->contig_alloc_type, 2919 dma_p->orig_alength, 2920 dma_p->alength, dma_p->alength)); 2921 2922 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 2923 dma_p->orig_alength = NULL; 2924 dma_p->orig_kaddrp = NULL; 2925 dma_p->contig_alloc_type = B_FALSE; 2926 } 2927 #endif 2928 dma_p->kaddrp = NULL; 2929 dma_p->alength = NULL; 2930 } 2931 2932 /* 2933 * nxge_m_start() -- start transmitting and receiving. 2934 * 2935 * This function is called by the MAC layer when the first 2936 * stream is open to prepare the hardware ready for sending 2937 * and transmitting packets. 2938 */ 2939 static int 2940 nxge_m_start(void *arg) 2941 { 2942 p_nxge_t nxgep = (p_nxge_t)arg; 2943 2944 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 2945 2946 MUTEX_ENTER(nxgep->genlock); 2947 if (nxge_init(nxgep) != NXGE_OK) { 2948 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2949 "<== nxge_m_start: initialization failed")); 2950 MUTEX_EXIT(nxgep->genlock); 2951 return (EIO); 2952 } 2953 2954 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 2955 goto nxge_m_start_exit; 2956 /* 2957 * Start timer to check the system error and tx hangs 2958 */ 2959 nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state, 2960 NXGE_CHECK_TIMER); 2961 2962 nxgep->link_notify = B_TRUE; 2963 2964 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 2965 2966 nxge_m_start_exit: 2967 MUTEX_EXIT(nxgep->genlock); 2968 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 2969 return (0); 2970 } 2971 2972 /* 2973 * nxge_m_stop(): stop transmitting and receiving. 2974 */ 2975 static void 2976 nxge_m_stop(void *arg) 2977 { 2978 p_nxge_t nxgep = (p_nxge_t)arg; 2979 2980 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 2981 2982 if (nxgep->nxge_timerid) { 2983 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 2984 nxgep->nxge_timerid = 0; 2985 } 2986 2987 MUTEX_ENTER(nxgep->genlock); 2988 nxge_uninit(nxgep); 2989 2990 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 2991 2992 MUTEX_EXIT(nxgep->genlock); 2993 2994 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 2995 } 2996 2997 static int 2998 nxge_m_unicst(void *arg, const uint8_t *macaddr) 2999 { 3000 p_nxge_t nxgep = (p_nxge_t)arg; 3001 struct ether_addr addrp; 3002 3003 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 3004 3005 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 3006 if (nxge_set_mac_addr(nxgep, &addrp)) { 3007 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3008 "<== nxge_m_unicst: set unitcast failed")); 3009 return (EINVAL); 3010 } 3011 3012 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 3013 3014 return (0); 3015 } 3016 3017 static int 3018 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3019 { 3020 p_nxge_t nxgep = (p_nxge_t)arg; 3021 struct ether_addr addrp; 3022 3023 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3024 "==> nxge_m_multicst: add %d", add)); 3025 3026 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3027 if (add) { 3028 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3029 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3030 "<== nxge_m_multicst: add multicast failed")); 3031 return (EINVAL); 3032 } 3033 } else { 3034 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3035 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3036 "<== nxge_m_multicst: del multicast failed")); 3037 return (EINVAL); 3038 } 3039 } 3040 3041 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3042 3043 return (0); 3044 } 3045 3046 static int 3047 nxge_m_promisc(void *arg, boolean_t on) 3048 { 3049 p_nxge_t nxgep = (p_nxge_t)arg; 3050 3051 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3052 "==> nxge_m_promisc: on %d", on)); 3053 3054 if (nxge_set_promisc(nxgep, on)) { 3055 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3056 "<== nxge_m_promisc: set promisc failed")); 3057 return (EINVAL); 3058 } 3059 3060 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3061 "<== nxge_m_promisc: on %d", on)); 3062 3063 return (0); 3064 } 3065 3066 static void 3067 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3068 { 3069 p_nxge_t nxgep = (p_nxge_t)arg; 3070 struct iocblk *iocp; 3071 boolean_t need_privilege; 3072 int err; 3073 int cmd; 3074 3075 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3076 3077 iocp = (struct iocblk *)mp->b_rptr; 3078 iocp->ioc_error = 0; 3079 need_privilege = B_TRUE; 3080 cmd = iocp->ioc_cmd; 3081 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3082 switch (cmd) { 3083 default: 3084 miocnak(wq, mp, 0, EINVAL); 3085 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3086 return; 3087 3088 case LB_GET_INFO_SIZE: 3089 case LB_GET_INFO: 3090 case LB_GET_MODE: 3091 need_privilege = B_FALSE; 3092 break; 3093 case LB_SET_MODE: 3094 break; 3095 3096 case ND_GET: 3097 need_privilege = B_FALSE; 3098 break; 3099 case ND_SET: 3100 break; 3101 3102 case NXGE_GET_MII: 3103 case NXGE_PUT_MII: 3104 case NXGE_GET64: 3105 case NXGE_PUT64: 3106 case NXGE_GET_TX_RING_SZ: 3107 case NXGE_GET_TX_DESC: 3108 case NXGE_TX_SIDE_RESET: 3109 case NXGE_RX_SIDE_RESET: 3110 case NXGE_GLOBAL_RESET: 3111 case NXGE_RESET_MAC: 3112 case NXGE_TX_REGS_DUMP: 3113 case NXGE_RX_REGS_DUMP: 3114 case NXGE_INT_REGS_DUMP: 3115 case NXGE_VIR_INT_REGS_DUMP: 3116 case NXGE_PUT_TCAM: 3117 case NXGE_GET_TCAM: 3118 case NXGE_RTRACE: 3119 case NXGE_RDUMP: 3120 3121 need_privilege = B_FALSE; 3122 break; 3123 case NXGE_INJECT_ERR: 3124 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3125 nxge_err_inject(nxgep, wq, mp); 3126 break; 3127 } 3128 3129 if (need_privilege) { 3130 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3131 if (err != 0) { 3132 miocnak(wq, mp, 0, err); 3133 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3134 "<== nxge_m_ioctl: no priv")); 3135 return; 3136 } 3137 } 3138 3139 switch (cmd) { 3140 case ND_GET: 3141 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command")); 3142 case ND_SET: 3143 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command")); 3144 nxge_param_ioctl(nxgep, wq, mp, iocp); 3145 break; 3146 3147 case LB_GET_MODE: 3148 case LB_SET_MODE: 3149 case LB_GET_INFO_SIZE: 3150 case LB_GET_INFO: 3151 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3152 break; 3153 3154 case NXGE_GET_MII: 3155 case NXGE_PUT_MII: 3156 case NXGE_PUT_TCAM: 3157 case NXGE_GET_TCAM: 3158 case NXGE_GET64: 3159 case NXGE_PUT64: 3160 case NXGE_GET_TX_RING_SZ: 3161 case NXGE_GET_TX_DESC: 3162 case NXGE_TX_SIDE_RESET: 3163 case NXGE_RX_SIDE_RESET: 3164 case NXGE_GLOBAL_RESET: 3165 case NXGE_RESET_MAC: 3166 case NXGE_TX_REGS_DUMP: 3167 case NXGE_RX_REGS_DUMP: 3168 case NXGE_INT_REGS_DUMP: 3169 case NXGE_VIR_INT_REGS_DUMP: 3170 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3171 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3172 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3173 break; 3174 } 3175 3176 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3177 } 3178 3179 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3180 3181 static void 3182 nxge_m_resources(void *arg) 3183 { 3184 p_nxge_t nxgep = arg; 3185 mac_rx_fifo_t mrf; 3186 p_rx_rcr_rings_t rcr_rings; 3187 p_rx_rcr_ring_t *rcr_p; 3188 uint32_t i, ndmas; 3189 nxge_status_t status; 3190 3191 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3192 3193 MUTEX_ENTER(nxgep->genlock); 3194 3195 /* 3196 * CR 6492541 Check to see if the drv_state has been initialized, 3197 * if not * call nxge_init(). 3198 */ 3199 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3200 status = nxge_init(nxgep); 3201 if (status != NXGE_OK) 3202 goto nxge_m_resources_exit; 3203 } 3204 3205 mrf.mrf_type = MAC_RX_FIFO; 3206 mrf.mrf_blank = nxge_rx_hw_blank; 3207 mrf.mrf_arg = (void *)nxgep; 3208 3209 mrf.mrf_normal_blank_time = 128; 3210 mrf.mrf_normal_pkt_count = 8; 3211 rcr_rings = nxgep->rx_rcr_rings; 3212 rcr_p = rcr_rings->rcr_rings; 3213 ndmas = rcr_rings->ndmas; 3214 3215 /* 3216 * Export our receive resources to the MAC layer. 3217 */ 3218 for (i = 0; i < ndmas; i++) { 3219 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle = 3220 mac_resource_add(nxgep->mach, 3221 (mac_resource_t *)&mrf); 3222 3223 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3224 "==> nxge_m_resources: vdma %d dma %d " 3225 "rcrptr 0x%016llx mac_handle 0x%016llx", 3226 i, ((p_rx_rcr_ring_t)rcr_p[i])->rdc, 3227 rcr_p[i], 3228 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle)); 3229 } 3230 3231 nxge_m_resources_exit: 3232 MUTEX_EXIT(nxgep->genlock); 3233 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 3234 } 3235 3236 static void 3237 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 3238 { 3239 p_nxge_mmac_stats_t mmac_stats; 3240 int i; 3241 nxge_mmac_t *mmac_info; 3242 3243 mmac_info = &nxgep->nxge_mmac_info; 3244 3245 mmac_stats = &nxgep->statsp->mmac_stats; 3246 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3247 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3248 3249 for (i = 0; i < ETHERADDRL; i++) { 3250 if (factory) { 3251 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3252 = mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i]; 3253 } else { 3254 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3255 = mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i]; 3256 } 3257 } 3258 } 3259 3260 /* 3261 * nxge_altmac_set() -- Set an alternate MAC address 3262 */ 3263 static int 3264 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 3265 { 3266 uint8_t addrn; 3267 uint8_t portn; 3268 npi_mac_addr_t altmac; 3269 hostinfo_t mac_rdc; 3270 p_nxge_class_pt_cfg_t clscfgp; 3271 3272 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3273 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3274 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3275 3276 portn = nxgep->mac.portnum; 3277 addrn = (uint8_t)slot - 1; 3278 3279 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 3280 addrn, &altmac) != NPI_SUCCESS) 3281 return (EIO); 3282 3283 /* 3284 * Set the rdc table number for the host info entry 3285 * for this mac address slot. 3286 */ 3287 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 3288 mac_rdc.value = 0; 3289 mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl; 3290 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 3291 3292 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 3293 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 3294 return (EIO); 3295 } 3296 3297 /* 3298 * Enable comparison with the alternate MAC address. 3299 * While the first alternate addr is enabled by bit 1 of register 3300 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 3301 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 3302 * accordingly before calling npi_mac_altaddr_entry. 3303 */ 3304 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3305 addrn = (uint8_t)slot - 1; 3306 else 3307 addrn = (uint8_t)slot; 3308 3309 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 3310 != NPI_SUCCESS) 3311 return (EIO); 3312 3313 return (0); 3314 } 3315 3316 /* 3317 * nxeg_m_mmac_add() - find an unused address slot, set the address 3318 * value to the one specified, enable the port to start filtering on 3319 * the new MAC address. Returns 0 on success. 3320 */ 3321 static int 3322 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 3323 { 3324 p_nxge_t nxgep = arg; 3325 mac_addr_slot_t slot; 3326 nxge_mmac_t *mmac_info; 3327 int err; 3328 nxge_status_t status; 3329 3330 mutex_enter(nxgep->genlock); 3331 3332 /* 3333 * Make sure that nxge is initialized, if _start() has 3334 * not been called. 3335 */ 3336 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3337 status = nxge_init(nxgep); 3338 if (status != NXGE_OK) { 3339 mutex_exit(nxgep->genlock); 3340 return (ENXIO); 3341 } 3342 } 3343 3344 mmac_info = &nxgep->nxge_mmac_info; 3345 if (mmac_info->naddrfree == 0) { 3346 mutex_exit(nxgep->genlock); 3347 return (ENOSPC); 3348 } 3349 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3350 maddr->mma_addrlen)) { 3351 mutex_exit(nxgep->genlock); 3352 return (EINVAL); 3353 } 3354 /* 3355 * Search for the first available slot. Because naddrfree 3356 * is not zero, we are guaranteed to find one. 3357 * Slot 0 is for unique (primary) MAC. The first alternate 3358 * MAC slot is slot 1. 3359 * Each of the first two ports of Neptune has 16 alternate 3360 * MAC slots but only the first 7 (or 15) slots have assigned factory 3361 * MAC addresses. We first search among the slots without bundled 3362 * factory MACs. If we fail to find one in that range, then we 3363 * search the slots with bundled factory MACs. A factory MAC 3364 * will be wasted while the slot is used with a user MAC address. 3365 * But the slot could be used by factory MAC again after calling 3366 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 3367 */ 3368 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 3369 for (slot = mmac_info->num_factory_mmac + 1; 3370 slot <= mmac_info->num_mmac; slot++) { 3371 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3372 break; 3373 } 3374 if (slot > mmac_info->num_mmac) { 3375 for (slot = 1; slot <= mmac_info->num_factory_mmac; 3376 slot++) { 3377 if (!(mmac_info->mac_pool[slot].flags 3378 & MMAC_SLOT_USED)) 3379 break; 3380 } 3381 } 3382 } else { 3383 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 3384 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3385 break; 3386 } 3387 } 3388 ASSERT(slot <= mmac_info->num_mmac); 3389 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 3390 mutex_exit(nxgep->genlock); 3391 return (err); 3392 } 3393 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 3394 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 3395 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3396 mmac_info->naddrfree--; 3397 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3398 3399 maddr->mma_slot = slot; 3400 3401 mutex_exit(nxgep->genlock); 3402 return (0); 3403 } 3404 3405 /* 3406 * This function reserves an unused slot and programs the slot and the HW 3407 * with a factory mac address. 3408 */ 3409 static int 3410 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 3411 { 3412 p_nxge_t nxgep = arg; 3413 mac_addr_slot_t slot; 3414 nxge_mmac_t *mmac_info; 3415 int err; 3416 nxge_status_t status; 3417 3418 mutex_enter(nxgep->genlock); 3419 3420 /* 3421 * Make sure that nxge is initialized, if _start() has 3422 * not been called. 3423 */ 3424 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3425 status = nxge_init(nxgep); 3426 if (status != NXGE_OK) { 3427 mutex_exit(nxgep->genlock); 3428 return (ENXIO); 3429 } 3430 } 3431 3432 mmac_info = &nxgep->nxge_mmac_info; 3433 if (mmac_info->naddrfree == 0) { 3434 mutex_exit(nxgep->genlock); 3435 return (ENOSPC); 3436 } 3437 3438 slot = maddr->mma_slot; 3439 if (slot == -1) { /* -1: Take the first available slot */ 3440 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 3441 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3442 break; 3443 } 3444 if (slot > mmac_info->num_factory_mmac) { 3445 mutex_exit(nxgep->genlock); 3446 return (ENOSPC); 3447 } 3448 } 3449 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 3450 /* 3451 * Do not support factory MAC at a slot greater than 3452 * num_factory_mmac even when there are available factory 3453 * MAC addresses because the alternate MACs are bundled with 3454 * slot[1] through slot[num_factory_mmac] 3455 */ 3456 mutex_exit(nxgep->genlock); 3457 return (EINVAL); 3458 } 3459 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3460 mutex_exit(nxgep->genlock); 3461 return (EBUSY); 3462 } 3463 /* Verify the address to be reserved */ 3464 if (!mac_unicst_verify(nxgep->mach, 3465 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 3466 mutex_exit(nxgep->genlock); 3467 return (EINVAL); 3468 } 3469 if (err = nxge_altmac_set(nxgep, 3470 mmac_info->factory_mac_pool[slot], slot)) { 3471 mutex_exit(nxgep->genlock); 3472 return (err); 3473 } 3474 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 3475 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3476 mmac_info->naddrfree--; 3477 3478 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 3479 mutex_exit(nxgep->genlock); 3480 3481 /* Pass info back to the caller */ 3482 maddr->mma_slot = slot; 3483 maddr->mma_addrlen = ETHERADDRL; 3484 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3485 3486 return (0); 3487 } 3488 3489 /* 3490 * Remove the specified mac address and update the HW not to filter 3491 * the mac address anymore. 3492 */ 3493 static int 3494 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 3495 { 3496 p_nxge_t nxgep = arg; 3497 nxge_mmac_t *mmac_info; 3498 uint8_t addrn; 3499 uint8_t portn; 3500 int err = 0; 3501 nxge_status_t status; 3502 3503 mutex_enter(nxgep->genlock); 3504 3505 /* 3506 * Make sure that nxge is initialized, if _start() has 3507 * not been called. 3508 */ 3509 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3510 status = nxge_init(nxgep); 3511 if (status != NXGE_OK) { 3512 mutex_exit(nxgep->genlock); 3513 return (ENXIO); 3514 } 3515 } 3516 3517 mmac_info = &nxgep->nxge_mmac_info; 3518 if (slot < 1 || slot > mmac_info->num_mmac) { 3519 mutex_exit(nxgep->genlock); 3520 return (EINVAL); 3521 } 3522 3523 portn = nxgep->mac.portnum; 3524 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3525 addrn = (uint8_t)slot - 1; 3526 else 3527 addrn = (uint8_t)slot; 3528 3529 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3530 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 3531 == NPI_SUCCESS) { 3532 mmac_info->naddrfree++; 3533 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 3534 /* 3535 * Regardless if the MAC we just stopped filtering 3536 * is a user addr or a facory addr, we must set 3537 * the MMAC_VENDOR_ADDR flag if this slot has an 3538 * associated factory MAC to indicate that a factory 3539 * MAC is available. 3540 */ 3541 if (slot <= mmac_info->num_factory_mmac) { 3542 mmac_info->mac_pool[slot].flags 3543 |= MMAC_VENDOR_ADDR; 3544 } 3545 /* 3546 * Clear mac_pool[slot].addr so that kstat shows 0 3547 * alternate MAC address if the slot is not used. 3548 * (But nxge_m_mmac_get returns the factory MAC even 3549 * when the slot is not used!) 3550 */ 3551 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 3552 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3553 } else { 3554 err = EIO; 3555 } 3556 } else { 3557 err = EINVAL; 3558 } 3559 3560 mutex_exit(nxgep->genlock); 3561 return (err); 3562 } 3563 3564 3565 /* 3566 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 3567 */ 3568 static int 3569 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 3570 { 3571 p_nxge_t nxgep = arg; 3572 mac_addr_slot_t slot; 3573 nxge_mmac_t *mmac_info; 3574 int err = 0; 3575 nxge_status_t status; 3576 3577 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3578 maddr->mma_addrlen)) 3579 return (EINVAL); 3580 3581 slot = maddr->mma_slot; 3582 3583 mutex_enter(nxgep->genlock); 3584 3585 /* 3586 * Make sure that nxge is initialized, if _start() has 3587 * not been called. 3588 */ 3589 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3590 status = nxge_init(nxgep); 3591 if (status != NXGE_OK) { 3592 mutex_exit(nxgep->genlock); 3593 return (ENXIO); 3594 } 3595 } 3596 3597 mmac_info = &nxgep->nxge_mmac_info; 3598 if (slot < 1 || slot > mmac_info->num_mmac) { 3599 mutex_exit(nxgep->genlock); 3600 return (EINVAL); 3601 } 3602 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3603 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 3604 != 0) { 3605 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 3606 ETHERADDRL); 3607 /* 3608 * Assume that the MAC passed down from the caller 3609 * is not a factory MAC address (The user should 3610 * call mmac_remove followed by mmac_reserve if 3611 * he wants to use the factory MAC for this slot). 3612 */ 3613 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3614 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3615 } 3616 } else { 3617 err = EINVAL; 3618 } 3619 mutex_exit(nxgep->genlock); 3620 return (err); 3621 } 3622 3623 /* 3624 * nxge_m_mmac_get() - Get the MAC address and other information 3625 * related to the slot. mma_flags should be set to 0 in the call. 3626 * Note: although kstat shows MAC address as zero when a slot is 3627 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 3628 * to the caller as long as the slot is not using a user MAC address. 3629 * The following table shows the rules, 3630 * 3631 * USED VENDOR mma_addr 3632 * ------------------------------------------------------------ 3633 * (1) Slot uses a user MAC: yes no user MAC 3634 * (2) Slot uses a factory MAC: yes yes factory MAC 3635 * (3) Slot is not used but is 3636 * factory MAC capable: no yes factory MAC 3637 * (4) Slot is not used and is 3638 * not factory MAC capable: no no 0 3639 * ------------------------------------------------------------ 3640 */ 3641 static int 3642 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 3643 { 3644 nxge_t *nxgep = arg; 3645 mac_addr_slot_t slot; 3646 nxge_mmac_t *mmac_info; 3647 nxge_status_t status; 3648 3649 slot = maddr->mma_slot; 3650 3651 mutex_enter(nxgep->genlock); 3652 3653 /* 3654 * Make sure that nxge is initialized, if _start() has 3655 * not been called. 3656 */ 3657 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3658 status = nxge_init(nxgep); 3659 if (status != NXGE_OK) { 3660 mutex_exit(nxgep->genlock); 3661 return (ENXIO); 3662 } 3663 } 3664 3665 mmac_info = &nxgep->nxge_mmac_info; 3666 3667 if (slot < 1 || slot > mmac_info->num_mmac) { 3668 mutex_exit(nxgep->genlock); 3669 return (EINVAL); 3670 } 3671 maddr->mma_flags = 0; 3672 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 3673 maddr->mma_flags |= MMAC_SLOT_USED; 3674 3675 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 3676 maddr->mma_flags |= MMAC_VENDOR_ADDR; 3677 bcopy(mmac_info->factory_mac_pool[slot], 3678 maddr->mma_addr, ETHERADDRL); 3679 maddr->mma_addrlen = ETHERADDRL; 3680 } else { 3681 if (maddr->mma_flags & MMAC_SLOT_USED) { 3682 bcopy(mmac_info->mac_pool[slot].addr, 3683 maddr->mma_addr, ETHERADDRL); 3684 maddr->mma_addrlen = ETHERADDRL; 3685 } else { 3686 bzero(maddr->mma_addr, ETHERADDRL); 3687 maddr->mma_addrlen = 0; 3688 } 3689 } 3690 mutex_exit(nxgep->genlock); 3691 return (0); 3692 } 3693 3694 3695 static boolean_t 3696 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 3697 { 3698 nxge_t *nxgep = arg; 3699 uint32_t *txflags = cap_data; 3700 multiaddress_capab_t *mmacp = cap_data; 3701 3702 switch (cap) { 3703 case MAC_CAPAB_HCKSUM: 3704 *txflags = HCKSUM_INET_PARTIAL; 3705 break; 3706 case MAC_CAPAB_POLL: 3707 /* 3708 * There's nothing for us to fill in, simply returning 3709 * B_TRUE stating that we support polling is sufficient. 3710 */ 3711 break; 3712 3713 case MAC_CAPAB_MULTIADDRESS: 3714 mutex_enter(nxgep->genlock); 3715 3716 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 3717 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 3718 mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */ 3719 /* 3720 * maddr_handle is driver's private data, passed back to 3721 * entry point functions as arg. 3722 */ 3723 mmacp->maddr_handle = nxgep; 3724 mmacp->maddr_add = nxge_m_mmac_add; 3725 mmacp->maddr_remove = nxge_m_mmac_remove; 3726 mmacp->maddr_modify = nxge_m_mmac_modify; 3727 mmacp->maddr_get = nxge_m_mmac_get; 3728 mmacp->maddr_reserve = nxge_m_mmac_reserve; 3729 3730 mutex_exit(nxgep->genlock); 3731 break; 3732 default: 3733 return (B_FALSE); 3734 } 3735 return (B_TRUE); 3736 } 3737 3738 /* 3739 * Module loading and removing entry points. 3740 */ 3741 3742 static struct cb_ops nxge_cb_ops = { 3743 nodev, /* cb_open */ 3744 nodev, /* cb_close */ 3745 nodev, /* cb_strategy */ 3746 nodev, /* cb_print */ 3747 nodev, /* cb_dump */ 3748 nodev, /* cb_read */ 3749 nodev, /* cb_write */ 3750 nodev, /* cb_ioctl */ 3751 nodev, /* cb_devmap */ 3752 nodev, /* cb_mmap */ 3753 nodev, /* cb_segmap */ 3754 nochpoll, /* cb_chpoll */ 3755 ddi_prop_op, /* cb_prop_op */ 3756 NULL, 3757 D_MP, /* cb_flag */ 3758 CB_REV, /* rev */ 3759 nodev, /* int (*cb_aread)() */ 3760 nodev /* int (*cb_awrite)() */ 3761 }; 3762 3763 static struct dev_ops nxge_dev_ops = { 3764 DEVO_REV, /* devo_rev */ 3765 0, /* devo_refcnt */ 3766 nulldev, 3767 nulldev, /* devo_identify */ 3768 nulldev, /* devo_probe */ 3769 nxge_attach, /* devo_attach */ 3770 nxge_detach, /* devo_detach */ 3771 nodev, /* devo_reset */ 3772 &nxge_cb_ops, /* devo_cb_ops */ 3773 (struct bus_ops *)NULL, /* devo_bus_ops */ 3774 ddi_power /* devo_power */ 3775 }; 3776 3777 extern struct mod_ops mod_driverops; 3778 3779 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet %I%" 3780 3781 /* 3782 * Module linkage information for the kernel. 3783 */ 3784 static struct modldrv nxge_modldrv = { 3785 &mod_driverops, 3786 NXGE_DESC_VER, 3787 &nxge_dev_ops 3788 }; 3789 3790 static struct modlinkage modlinkage = { 3791 MODREV_1, (void *) &nxge_modldrv, NULL 3792 }; 3793 3794 int 3795 _init(void) 3796 { 3797 int status; 3798 3799 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 3800 mac_init_ops(&nxge_dev_ops, "nxge"); 3801 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 3802 if (status != 0) { 3803 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3804 "failed to init device soft state")); 3805 goto _init_exit; 3806 } 3807 3808 status = mod_install(&modlinkage); 3809 if (status != 0) { 3810 ddi_soft_state_fini(&nxge_list); 3811 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 3812 goto _init_exit; 3813 } 3814 3815 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 3816 3817 _init_exit: 3818 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 3819 3820 return (status); 3821 } 3822 3823 int 3824 _fini(void) 3825 { 3826 int status; 3827 3828 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 3829 3830 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 3831 3832 if (nxge_mblks_pending) 3833 return (EBUSY); 3834 3835 status = mod_remove(&modlinkage); 3836 if (status != DDI_SUCCESS) { 3837 NXGE_DEBUG_MSG((NULL, MOD_CTL, 3838 "Module removal failed 0x%08x", 3839 status)); 3840 goto _fini_exit; 3841 } 3842 3843 mac_fini_ops(&nxge_dev_ops); 3844 3845 ddi_soft_state_fini(&nxge_list); 3846 3847 MUTEX_DESTROY(&nxge_common_lock); 3848 _fini_exit: 3849 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 3850 3851 return (status); 3852 } 3853 3854 int 3855 _info(struct modinfo *modinfop) 3856 { 3857 int status; 3858 3859 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 3860 status = mod_info(&modlinkage, modinfop); 3861 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 3862 3863 return (status); 3864 } 3865 3866 /*ARGSUSED*/ 3867 static nxge_status_t 3868 nxge_add_intrs(p_nxge_t nxgep) 3869 { 3870 3871 int intr_types; 3872 int type = 0; 3873 int ddi_status = DDI_SUCCESS; 3874 nxge_status_t status = NXGE_OK; 3875 3876 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 3877 3878 nxgep->nxge_intr_type.intr_registered = B_FALSE; 3879 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 3880 nxgep->nxge_intr_type.msi_intx_cnt = 0; 3881 nxgep->nxge_intr_type.intr_added = 0; 3882 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 3883 nxgep->nxge_intr_type.intr_type = 0; 3884 3885 if (nxgep->niu_type == N2_NIU) { 3886 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3887 } else if (nxge_msi_enable) { 3888 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3889 } 3890 3891 /* Get the supported interrupt types */ 3892 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 3893 != DDI_SUCCESS) { 3894 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 3895 "ddi_intr_get_supported_types failed: status 0x%08x", 3896 ddi_status)); 3897 return (NXGE_ERROR | NXGE_DDI_FAILED); 3898 } 3899 nxgep->nxge_intr_type.intr_types = intr_types; 3900 3901 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3902 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3903 3904 /* 3905 * Solaris MSIX is not supported yet. use MSI for now. 3906 * nxge_msi_enable (1): 3907 * 1 - MSI 2 - MSI-X others - FIXED 3908 */ 3909 switch (nxge_msi_enable) { 3910 default: 3911 type = DDI_INTR_TYPE_FIXED; 3912 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3913 "use fixed (intx emulation) type %08x", 3914 type)); 3915 break; 3916 3917 case 2: 3918 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3919 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3920 if (intr_types & DDI_INTR_TYPE_MSIX) { 3921 type = DDI_INTR_TYPE_MSIX; 3922 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3923 "ddi_intr_get_supported_types: MSIX 0x%08x", 3924 type)); 3925 } else if (intr_types & DDI_INTR_TYPE_MSI) { 3926 type = DDI_INTR_TYPE_MSI; 3927 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3928 "ddi_intr_get_supported_types: MSI 0x%08x", 3929 type)); 3930 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3931 type = DDI_INTR_TYPE_FIXED; 3932 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3933 "ddi_intr_get_supported_types: MSXED0x%08x", 3934 type)); 3935 } 3936 break; 3937 3938 case 1: 3939 if (intr_types & DDI_INTR_TYPE_MSI) { 3940 type = DDI_INTR_TYPE_MSI; 3941 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3942 "ddi_intr_get_supported_types: MSI 0x%08x", 3943 type)); 3944 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 3945 type = DDI_INTR_TYPE_MSIX; 3946 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3947 "ddi_intr_get_supported_types: MSIX 0x%08x", 3948 type)); 3949 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3950 type = DDI_INTR_TYPE_FIXED; 3951 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3952 "ddi_intr_get_supported_types: MSXED0x%08x", 3953 type)); 3954 } 3955 } 3956 3957 nxgep->nxge_intr_type.intr_type = type; 3958 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 3959 type == DDI_INTR_TYPE_FIXED) && 3960 nxgep->nxge_intr_type.niu_msi_enable) { 3961 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 3962 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3963 " nxge_add_intrs: " 3964 " nxge_add_intrs_adv failed: status 0x%08x", 3965 status)); 3966 return (status); 3967 } else { 3968 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3969 "interrupts registered : type %d", type)); 3970 nxgep->nxge_intr_type.intr_registered = B_TRUE; 3971 3972 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 3973 "\nAdded advanced nxge add_intr_adv " 3974 "intr type 0x%x\n", type)); 3975 3976 return (status); 3977 } 3978 } 3979 3980 if (!nxgep->nxge_intr_type.intr_registered) { 3981 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 3982 "failed to register interrupts")); 3983 return (NXGE_ERROR | NXGE_DDI_FAILED); 3984 } 3985 3986 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 3987 return (status); 3988 } 3989 3990 /*ARGSUSED*/ 3991 static nxge_status_t 3992 nxge_add_soft_intrs(p_nxge_t nxgep) 3993 { 3994 3995 int ddi_status = DDI_SUCCESS; 3996 nxge_status_t status = NXGE_OK; 3997 3998 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 3999 4000 nxgep->resched_id = NULL; 4001 nxgep->resched_running = B_FALSE; 4002 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 4003 &nxgep->resched_id, 4004 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 4005 if (ddi_status != DDI_SUCCESS) { 4006 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 4007 "ddi_add_softintrs failed: status 0x%08x", 4008 ddi_status)); 4009 return (NXGE_ERROR | NXGE_DDI_FAILED); 4010 } 4011 4012 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 4013 4014 return (status); 4015 } 4016 4017 static nxge_status_t 4018 nxge_add_intrs_adv(p_nxge_t nxgep) 4019 { 4020 int intr_type; 4021 p_nxge_intr_t intrp; 4022 4023 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 4024 4025 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4026 intr_type = intrp->intr_type; 4027 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 4028 intr_type)); 4029 4030 switch (intr_type) { 4031 case DDI_INTR_TYPE_MSI: /* 0x2 */ 4032 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 4033 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 4034 4035 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 4036 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 4037 4038 default: 4039 return (NXGE_ERROR); 4040 } 4041 } 4042 4043 4044 /*ARGSUSED*/ 4045 static nxge_status_t 4046 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 4047 { 4048 dev_info_t *dip = nxgep->dip; 4049 p_nxge_ldg_t ldgp; 4050 p_nxge_intr_t intrp; 4051 uint_t *inthandler; 4052 void *arg1, *arg2; 4053 int behavior; 4054 int nintrs, navail; 4055 int nactual, nrequired; 4056 int inum = 0; 4057 int x, y; 4058 int ddi_status = DDI_SUCCESS; 4059 nxge_status_t status = NXGE_OK; 4060 4061 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 4062 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4063 intrp->start_inum = 0; 4064 4065 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4066 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4067 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4068 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4069 "nintrs: %d", ddi_status, nintrs)); 4070 return (NXGE_ERROR | NXGE_DDI_FAILED); 4071 } 4072 4073 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4074 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4075 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4076 "ddi_intr_get_navail() failed, status: 0x%x%, " 4077 "nintrs: %d", ddi_status, navail)); 4078 return (NXGE_ERROR | NXGE_DDI_FAILED); 4079 } 4080 4081 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4082 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 4083 nintrs, navail)); 4084 4085 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 4086 /* MSI must be power of 2 */ 4087 if ((navail & 16) == 16) { 4088 navail = 16; 4089 } else if ((navail & 8) == 8) { 4090 navail = 8; 4091 } else if ((navail & 4) == 4) { 4092 navail = 4; 4093 } else if ((navail & 2) == 2) { 4094 navail = 2; 4095 } else { 4096 navail = 1; 4097 } 4098 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4099 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 4100 "navail %d", nintrs, navail)); 4101 } 4102 4103 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4104 DDI_INTR_ALLOC_NORMAL); 4105 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4106 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4107 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4108 navail, &nactual, behavior); 4109 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4110 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4111 " ddi_intr_alloc() failed: %d", 4112 ddi_status)); 4113 kmem_free(intrp->htable, intrp->intr_size); 4114 return (NXGE_ERROR | NXGE_DDI_FAILED); 4115 } 4116 4117 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4118 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4119 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4120 " ddi_intr_get_pri() failed: %d", 4121 ddi_status)); 4122 /* Free already allocated interrupts */ 4123 for (y = 0; y < nactual; y++) { 4124 (void) ddi_intr_free(intrp->htable[y]); 4125 } 4126 4127 kmem_free(intrp->htable, intrp->intr_size); 4128 return (NXGE_ERROR | NXGE_DDI_FAILED); 4129 } 4130 4131 nrequired = 0; 4132 switch (nxgep->niu_type) { 4133 case NEPTUNE: 4134 case NEPTUNE_2: 4135 default: 4136 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4137 break; 4138 4139 case N2_NIU: 4140 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4141 break; 4142 } 4143 4144 if (status != NXGE_OK) { 4145 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4146 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 4147 "failed: 0x%x", status)); 4148 /* Free already allocated interrupts */ 4149 for (y = 0; y < nactual; y++) { 4150 (void) ddi_intr_free(intrp->htable[y]); 4151 } 4152 4153 kmem_free(intrp->htable, intrp->intr_size); 4154 return (status); 4155 } 4156 4157 ldgp = nxgep->ldgvp->ldgp; 4158 for (x = 0; x < nrequired; x++, ldgp++) { 4159 ldgp->vector = (uint8_t)x; 4160 ldgp->intdata = SID_DATA(ldgp->func, x); 4161 arg1 = ldgp->ldvp; 4162 arg2 = nxgep; 4163 if (ldgp->nldvs == 1) { 4164 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4165 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4166 "nxge_add_intrs_adv_type: " 4167 "arg1 0x%x arg2 0x%x: " 4168 "1-1 int handler (entry %d intdata 0x%x)\n", 4169 arg1, arg2, 4170 x, ldgp->intdata)); 4171 } else if (ldgp->nldvs > 1) { 4172 inthandler = (uint_t *)ldgp->sys_intr_handler; 4173 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4174 "nxge_add_intrs_adv_type: " 4175 "arg1 0x%x arg2 0x%x: " 4176 "nldevs %d int handler " 4177 "(entry %d intdata 0x%x)\n", 4178 arg1, arg2, 4179 ldgp->nldvs, x, ldgp->intdata)); 4180 } 4181 4182 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4183 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 4184 "htable 0x%llx", x, intrp->htable[x])); 4185 4186 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4187 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4188 != DDI_SUCCESS) { 4189 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4190 "==> nxge_add_intrs_adv_type: failed #%d " 4191 "status 0x%x", x, ddi_status)); 4192 for (y = 0; y < intrp->intr_added; y++) { 4193 (void) ddi_intr_remove_handler( 4194 intrp->htable[y]); 4195 } 4196 /* Free already allocated intr */ 4197 for (y = 0; y < nactual; y++) { 4198 (void) ddi_intr_free(intrp->htable[y]); 4199 } 4200 kmem_free(intrp->htable, intrp->intr_size); 4201 4202 (void) nxge_ldgv_uninit(nxgep); 4203 4204 return (NXGE_ERROR | NXGE_DDI_FAILED); 4205 } 4206 intrp->intr_added++; 4207 } 4208 4209 intrp->msi_intx_cnt = nactual; 4210 4211 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4212 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 4213 navail, nactual, 4214 intrp->msi_intx_cnt, 4215 intrp->intr_added)); 4216 4217 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4218 4219 (void) nxge_intr_ldgv_init(nxgep); 4220 4221 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 4222 4223 return (status); 4224 } 4225 4226 /*ARGSUSED*/ 4227 static nxge_status_t 4228 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 4229 { 4230 dev_info_t *dip = nxgep->dip; 4231 p_nxge_ldg_t ldgp; 4232 p_nxge_intr_t intrp; 4233 uint_t *inthandler; 4234 void *arg1, *arg2; 4235 int behavior; 4236 int nintrs, navail; 4237 int nactual, nrequired; 4238 int inum = 0; 4239 int x, y; 4240 int ddi_status = DDI_SUCCESS; 4241 nxge_status_t status = NXGE_OK; 4242 4243 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 4244 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4245 intrp->start_inum = 0; 4246 4247 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4248 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4249 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4250 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4251 "nintrs: %d", status, nintrs)); 4252 return (NXGE_ERROR | NXGE_DDI_FAILED); 4253 } 4254 4255 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4256 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4257 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4258 "ddi_intr_get_navail() failed, status: 0x%x%, " 4259 "nintrs: %d", ddi_status, navail)); 4260 return (NXGE_ERROR | NXGE_DDI_FAILED); 4261 } 4262 4263 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4264 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 4265 nintrs, navail)); 4266 4267 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4268 DDI_INTR_ALLOC_NORMAL); 4269 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4270 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4271 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4272 navail, &nactual, behavior); 4273 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4274 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4275 " ddi_intr_alloc() failed: %d", 4276 ddi_status)); 4277 kmem_free(intrp->htable, intrp->intr_size); 4278 return (NXGE_ERROR | NXGE_DDI_FAILED); 4279 } 4280 4281 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4282 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4283 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4284 " ddi_intr_get_pri() failed: %d", 4285 ddi_status)); 4286 /* Free already allocated interrupts */ 4287 for (y = 0; y < nactual; y++) { 4288 (void) ddi_intr_free(intrp->htable[y]); 4289 } 4290 4291 kmem_free(intrp->htable, intrp->intr_size); 4292 return (NXGE_ERROR | NXGE_DDI_FAILED); 4293 } 4294 4295 nrequired = 0; 4296 switch (nxgep->niu_type) { 4297 case NEPTUNE: 4298 case NEPTUNE_2: 4299 default: 4300 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4301 break; 4302 4303 case N2_NIU: 4304 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4305 break; 4306 } 4307 4308 if (status != NXGE_OK) { 4309 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4310 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 4311 "failed: 0x%x", status)); 4312 /* Free already allocated interrupts */ 4313 for (y = 0; y < nactual; y++) { 4314 (void) ddi_intr_free(intrp->htable[y]); 4315 } 4316 4317 kmem_free(intrp->htable, intrp->intr_size); 4318 return (status); 4319 } 4320 4321 ldgp = nxgep->ldgvp->ldgp; 4322 for (x = 0; x < nrequired; x++, ldgp++) { 4323 ldgp->vector = (uint8_t)x; 4324 if (nxgep->niu_type != N2_NIU) { 4325 ldgp->intdata = SID_DATA(ldgp->func, x); 4326 } 4327 4328 arg1 = ldgp->ldvp; 4329 arg2 = nxgep; 4330 if (ldgp->nldvs == 1) { 4331 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4332 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4333 "nxge_add_intrs_adv_type_fix: " 4334 "1-1 int handler(%d) ldg %d ldv %d " 4335 "arg1 $%p arg2 $%p\n", 4336 x, ldgp->ldg, ldgp->ldvp->ldv, 4337 arg1, arg2)); 4338 } else if (ldgp->nldvs > 1) { 4339 inthandler = (uint_t *)ldgp->sys_intr_handler; 4340 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4341 "nxge_add_intrs_adv_type_fix: " 4342 "shared ldv %d int handler(%d) ldv %d ldg %d" 4343 "arg1 0x%016llx arg2 0x%016llx\n", 4344 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 4345 arg1, arg2)); 4346 } 4347 4348 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4349 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4350 != DDI_SUCCESS) { 4351 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4352 "==> nxge_add_intrs_adv_type_fix: failed #%d " 4353 "status 0x%x", x, ddi_status)); 4354 for (y = 0; y < intrp->intr_added; y++) { 4355 (void) ddi_intr_remove_handler( 4356 intrp->htable[y]); 4357 } 4358 for (y = 0; y < nactual; y++) { 4359 (void) ddi_intr_free(intrp->htable[y]); 4360 } 4361 /* Free already allocated intr */ 4362 kmem_free(intrp->htable, intrp->intr_size); 4363 4364 (void) nxge_ldgv_uninit(nxgep); 4365 4366 return (NXGE_ERROR | NXGE_DDI_FAILED); 4367 } 4368 intrp->intr_added++; 4369 } 4370 4371 intrp->msi_intx_cnt = nactual; 4372 4373 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4374 4375 status = nxge_intr_ldgv_init(nxgep); 4376 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 4377 4378 return (status); 4379 } 4380 4381 static void 4382 nxge_remove_intrs(p_nxge_t nxgep) 4383 { 4384 int i, inum; 4385 p_nxge_intr_t intrp; 4386 4387 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 4388 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4389 if (!intrp->intr_registered) { 4390 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4391 "<== nxge_remove_intrs: interrupts not registered")); 4392 return; 4393 } 4394 4395 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 4396 4397 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4398 (void) ddi_intr_block_disable(intrp->htable, 4399 intrp->intr_added); 4400 } else { 4401 for (i = 0; i < intrp->intr_added; i++) { 4402 (void) ddi_intr_disable(intrp->htable[i]); 4403 } 4404 } 4405 4406 for (inum = 0; inum < intrp->intr_added; inum++) { 4407 if (intrp->htable[inum]) { 4408 (void) ddi_intr_remove_handler(intrp->htable[inum]); 4409 } 4410 } 4411 4412 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 4413 if (intrp->htable[inum]) { 4414 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4415 "nxge_remove_intrs: ddi_intr_free inum %d " 4416 "msi_intx_cnt %d intr_added %d", 4417 inum, 4418 intrp->msi_intx_cnt, 4419 intrp->intr_added)); 4420 4421 (void) ddi_intr_free(intrp->htable[inum]); 4422 } 4423 } 4424 4425 kmem_free(intrp->htable, intrp->intr_size); 4426 intrp->intr_registered = B_FALSE; 4427 intrp->intr_enabled = B_FALSE; 4428 intrp->msi_intx_cnt = 0; 4429 intrp->intr_added = 0; 4430 4431 (void) nxge_ldgv_uninit(nxgep); 4432 4433 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 4434 } 4435 4436 /*ARGSUSED*/ 4437 static void 4438 nxge_remove_soft_intrs(p_nxge_t nxgep) 4439 { 4440 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 4441 if (nxgep->resched_id) { 4442 ddi_remove_softintr(nxgep->resched_id); 4443 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4444 "==> nxge_remove_soft_intrs: removed")); 4445 nxgep->resched_id = NULL; 4446 } 4447 4448 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 4449 } 4450 4451 /*ARGSUSED*/ 4452 static void 4453 nxge_intrs_enable(p_nxge_t nxgep) 4454 { 4455 p_nxge_intr_t intrp; 4456 int i; 4457 int status; 4458 4459 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 4460 4461 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4462 4463 if (!intrp->intr_registered) { 4464 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 4465 "interrupts are not registered")); 4466 return; 4467 } 4468 4469 if (intrp->intr_enabled) { 4470 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4471 "<== nxge_intrs_enable: already enabled")); 4472 return; 4473 } 4474 4475 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4476 status = ddi_intr_block_enable(intrp->htable, 4477 intrp->intr_added); 4478 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4479 "block enable - status 0x%x total inums #%d\n", 4480 status, intrp->intr_added)); 4481 } else { 4482 for (i = 0; i < intrp->intr_added; i++) { 4483 status = ddi_intr_enable(intrp->htable[i]); 4484 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4485 "ddi_intr_enable:enable - status 0x%x " 4486 "total inums %d enable inum #%d\n", 4487 status, intrp->intr_added, i)); 4488 if (status == DDI_SUCCESS) { 4489 intrp->intr_enabled = B_TRUE; 4490 } 4491 } 4492 } 4493 4494 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 4495 } 4496 4497 /*ARGSUSED*/ 4498 static void 4499 nxge_intrs_disable(p_nxge_t nxgep) 4500 { 4501 p_nxge_intr_t intrp; 4502 int i; 4503 4504 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 4505 4506 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4507 4508 if (!intrp->intr_registered) { 4509 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 4510 "interrupts are not registered")); 4511 return; 4512 } 4513 4514 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4515 (void) ddi_intr_block_disable(intrp->htable, 4516 intrp->intr_added); 4517 } else { 4518 for (i = 0; i < intrp->intr_added; i++) { 4519 (void) ddi_intr_disable(intrp->htable[i]); 4520 } 4521 } 4522 4523 intrp->intr_enabled = B_FALSE; 4524 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 4525 } 4526 4527 static nxge_status_t 4528 nxge_mac_register(p_nxge_t nxgep) 4529 { 4530 mac_register_t *macp; 4531 int status; 4532 4533 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 4534 4535 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 4536 return (NXGE_ERROR); 4537 4538 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 4539 macp->m_driver = nxgep; 4540 macp->m_dip = nxgep->dip; 4541 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 4542 macp->m_callbacks = &nxge_m_callbacks; 4543 macp->m_min_sdu = 0; 4544 macp->m_max_sdu = nxgep->mac.maxframesize - 4545 sizeof (struct ether_header) - ETHERFCSL - 4; 4546 4547 status = mac_register(macp, &nxgep->mach); 4548 mac_free(macp); 4549 4550 if (status != 0) { 4551 cmn_err(CE_WARN, 4552 "!nxge_mac_register failed (status %d instance %d)", 4553 status, nxgep->instance); 4554 return (NXGE_ERROR); 4555 } 4556 4557 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 4558 "(instance %d)", nxgep->instance)); 4559 4560 return (NXGE_OK); 4561 } 4562 4563 void 4564 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 4565 { 4566 ssize_t size; 4567 mblk_t *nmp; 4568 uint8_t blk_id; 4569 uint8_t chan; 4570 uint32_t err_id; 4571 err_inject_t *eip; 4572 4573 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 4574 4575 size = 1024; 4576 nmp = mp->b_cont; 4577 eip = (err_inject_t *)nmp->b_rptr; 4578 blk_id = eip->blk_id; 4579 err_id = eip->err_id; 4580 chan = eip->chan; 4581 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 4582 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 4583 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 4584 switch (blk_id) { 4585 case MAC_BLK_ID: 4586 break; 4587 case TXMAC_BLK_ID: 4588 break; 4589 case RXMAC_BLK_ID: 4590 break; 4591 case MIF_BLK_ID: 4592 break; 4593 case IPP_BLK_ID: 4594 nxge_ipp_inject_err(nxgep, err_id); 4595 break; 4596 case TXC_BLK_ID: 4597 nxge_txc_inject_err(nxgep, err_id); 4598 break; 4599 case TXDMA_BLK_ID: 4600 nxge_txdma_inject_err(nxgep, err_id, chan); 4601 break; 4602 case RXDMA_BLK_ID: 4603 nxge_rxdma_inject_err(nxgep, err_id, chan); 4604 break; 4605 case ZCP_BLK_ID: 4606 nxge_zcp_inject_err(nxgep, err_id); 4607 break; 4608 case ESPC_BLK_ID: 4609 break; 4610 case FFLP_BLK_ID: 4611 break; 4612 case PHY_BLK_ID: 4613 break; 4614 case ETHER_SERDES_BLK_ID: 4615 break; 4616 case PCIE_SERDES_BLK_ID: 4617 break; 4618 case VIR_BLK_ID: 4619 break; 4620 } 4621 4622 nmp->b_wptr = nmp->b_rptr + size; 4623 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 4624 4625 miocack(wq, mp, (int)size, 0); 4626 } 4627 4628 static int 4629 nxge_init_common_dev(p_nxge_t nxgep) 4630 { 4631 p_nxge_hw_list_t hw_p; 4632 dev_info_t *p_dip; 4633 4634 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 4635 4636 p_dip = nxgep->p_dip; 4637 MUTEX_ENTER(&nxge_common_lock); 4638 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4639 "==> nxge_init_common_dev:func # %d", 4640 nxgep->function_num)); 4641 /* 4642 * Loop through existing per neptune hardware list. 4643 */ 4644 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4645 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4646 "==> nxge_init_common_device:func # %d " 4647 "hw_p $%p parent dip $%p", 4648 nxgep->function_num, 4649 hw_p, 4650 p_dip)); 4651 if (hw_p->parent_devp == p_dip) { 4652 nxgep->nxge_hw_p = hw_p; 4653 hw_p->ndevs++; 4654 hw_p->nxge_p[nxgep->function_num] = nxgep; 4655 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4656 "==> nxge_init_common_device:func # %d " 4657 "hw_p $%p parent dip $%p " 4658 "ndevs %d (found)", 4659 nxgep->function_num, 4660 hw_p, 4661 p_dip, 4662 hw_p->ndevs)); 4663 break; 4664 } 4665 } 4666 4667 if (hw_p == NULL) { 4668 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4669 "==> nxge_init_common_device:func # %d " 4670 "parent dip $%p (new)", 4671 nxgep->function_num, 4672 p_dip)); 4673 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 4674 hw_p->parent_devp = p_dip; 4675 hw_p->magic = NXGE_NEPTUNE_MAGIC; 4676 nxgep->nxge_hw_p = hw_p; 4677 hw_p->ndevs++; 4678 hw_p->nxge_p[nxgep->function_num] = nxgep; 4679 hw_p->next = nxge_hw_list; 4680 4681 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 4682 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 4683 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 4684 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 4685 MUTEX_INIT(&hw_p->nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 4686 4687 nxge_hw_list = hw_p; 4688 } 4689 4690 MUTEX_EXIT(&nxge_common_lock); 4691 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4692 "==> nxge_init_common_device (nxge_hw_list) $%p", 4693 nxge_hw_list)); 4694 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 4695 4696 return (NXGE_OK); 4697 } 4698 4699 static void 4700 nxge_uninit_common_dev(p_nxge_t nxgep) 4701 { 4702 p_nxge_hw_list_t hw_p, h_hw_p; 4703 dev_info_t *p_dip; 4704 4705 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 4706 if (nxgep->nxge_hw_p == NULL) { 4707 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4708 "<== nxge_uninit_common_device (no common)")); 4709 return; 4710 } 4711 4712 MUTEX_ENTER(&nxge_common_lock); 4713 h_hw_p = nxge_hw_list; 4714 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4715 p_dip = hw_p->parent_devp; 4716 if (nxgep->nxge_hw_p == hw_p && 4717 p_dip == nxgep->p_dip && 4718 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 4719 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 4720 4721 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4722 "==> nxge_uninit_common_device:func # %d " 4723 "hw_p $%p parent dip $%p " 4724 "ndevs %d (found)", 4725 nxgep->function_num, 4726 hw_p, 4727 p_dip, 4728 hw_p->ndevs)); 4729 4730 nxgep->nxge_hw_p = NULL; 4731 if (hw_p->ndevs) { 4732 hw_p->ndevs--; 4733 } 4734 hw_p->nxge_p[nxgep->function_num] = NULL; 4735 if (!hw_p->ndevs) { 4736 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 4737 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 4738 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 4739 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 4740 MUTEX_DESTROY(&hw_p->nxge_mii_lock); 4741 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4742 "==> nxge_uninit_common_device: " 4743 "func # %d " 4744 "hw_p $%p parent dip $%p " 4745 "ndevs %d (last)", 4746 nxgep->function_num, 4747 hw_p, 4748 p_dip, 4749 hw_p->ndevs)); 4750 4751 if (hw_p == nxge_hw_list) { 4752 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4753 "==> nxge_uninit_common_device:" 4754 "remove head func # %d " 4755 "hw_p $%p parent dip $%p " 4756 "ndevs %d (head)", 4757 nxgep->function_num, 4758 hw_p, 4759 p_dip, 4760 hw_p->ndevs)); 4761 nxge_hw_list = hw_p->next; 4762 } else { 4763 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4764 "==> nxge_uninit_common_device:" 4765 "remove middle func # %d " 4766 "hw_p $%p parent dip $%p " 4767 "ndevs %d (middle)", 4768 nxgep->function_num, 4769 hw_p, 4770 p_dip, 4771 hw_p->ndevs)); 4772 h_hw_p->next = hw_p->next; 4773 } 4774 4775 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 4776 } 4777 break; 4778 } else { 4779 h_hw_p = hw_p; 4780 } 4781 } 4782 4783 MUTEX_EXIT(&nxge_common_lock); 4784 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4785 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 4786 nxge_hw_list)); 4787 4788 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 4789 } 4790