1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 30 */ 31 #include <sys/nxge/nxge_impl.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * until MSIX supported, assume msi, use 2 for msix 39 */ 40 uint32_t nxge_msi_enable = 1; /* debug: turn msi off */ 41 42 /* 43 * Globals: tunable parameters (/etc/system or adb) 44 * 45 */ 46 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 47 uint32_t nxge_rbr_spare_size = 0; 48 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 49 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 50 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 51 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 52 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 53 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 54 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 55 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 56 boolean_t nxge_jumbo_enable = B_FALSE; 57 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 58 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 59 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 60 61 /* 62 * Debugging flags: 63 * nxge_no_tx_lb : transmit load balancing 64 * nxge_tx_lb_policy: 0 - TCP port (default) 65 * 3 - DEST MAC 66 */ 67 uint32_t nxge_no_tx_lb = 0; 68 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 69 70 /* 71 * Add tunable to reduce the amount of time spent in the 72 * ISR doing Rx Processing. 73 */ 74 uint32_t nxge_max_rx_pkts = 1024; 75 76 /* 77 * Tunables to manage the receive buffer blocks. 78 * 79 * nxge_rx_threshold_hi: copy all buffers. 80 * nxge_rx_bcopy_size_type: receive buffer block size type. 81 * nxge_rx_threshold_lo: copy only up to tunable block size type. 82 */ 83 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 84 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 85 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 86 87 rtrace_t npi_rtracebuf; 88 89 #if defined(sun4v) 90 /* 91 * Hypervisor N2/NIU services information. 92 */ 93 static hsvc_info_t niu_hsvc = { 94 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 95 NIU_MINOR_VER, "nxge" 96 }; 97 #endif 98 99 /* 100 * Function Prototypes 101 */ 102 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 103 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 104 static void nxge_unattach(p_nxge_t); 105 106 #if NXGE_PROPERTY 107 static void nxge_remove_hard_properties(p_nxge_t); 108 #endif 109 110 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 111 112 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 113 static void nxge_destroy_mutexes(p_nxge_t); 114 115 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 116 static void nxge_unmap_regs(p_nxge_t nxgep); 117 #ifdef NXGE_DEBUG 118 static void nxge_test_map_regs(p_nxge_t nxgep); 119 #endif 120 121 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 122 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 123 static void nxge_remove_intrs(p_nxge_t nxgep); 124 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 125 126 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 127 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 128 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 129 static void nxge_intrs_enable(p_nxge_t nxgep); 130 static void nxge_intrs_disable(p_nxge_t nxgep); 131 132 static void nxge_suspend(p_nxge_t); 133 static nxge_status_t nxge_resume(p_nxge_t); 134 135 static nxge_status_t nxge_setup_dev(p_nxge_t); 136 static void nxge_destroy_dev(p_nxge_t); 137 138 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 139 static void nxge_free_mem_pool(p_nxge_t); 140 141 static nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 142 static void nxge_free_rx_mem_pool(p_nxge_t); 143 144 static nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 145 static void nxge_free_tx_mem_pool(p_nxge_t); 146 147 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 148 struct ddi_dma_attr *, 149 size_t, ddi_device_acc_attr_t *, uint_t, 150 p_nxge_dma_common_t); 151 152 static void nxge_dma_mem_free(p_nxge_dma_common_t); 153 154 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 155 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 156 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 157 158 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 159 p_nxge_dma_common_t *, size_t); 160 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 161 162 static nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 163 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 164 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 165 166 static nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 167 p_nxge_dma_common_t *, 168 size_t); 169 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 170 171 static int nxge_init_common_dev(p_nxge_t); 172 static void nxge_uninit_common_dev(p_nxge_t); 173 174 /* 175 * The next declarations are for the GLDv3 interface. 176 */ 177 static int nxge_m_start(void *); 178 static void nxge_m_stop(void *); 179 static int nxge_m_unicst(void *, const uint8_t *); 180 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 181 static int nxge_m_promisc(void *, boolean_t); 182 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 183 static void nxge_m_resources(void *); 184 mblk_t *nxge_m_tx(void *arg, mblk_t *); 185 static nxge_status_t nxge_mac_register(p_nxge_t); 186 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 187 mac_addr_slot_t slot); 188 static void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 189 boolean_t factory); 190 static int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 191 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 192 static int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 193 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 194 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 195 196 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 197 #define MAX_DUMP_SZ 256 198 199 #define NXGE_M_CALLBACK_FLAGS (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB) 200 201 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 202 static mac_callbacks_t nxge_m_callbacks = { 203 NXGE_M_CALLBACK_FLAGS, 204 nxge_m_stat, 205 nxge_m_start, 206 nxge_m_stop, 207 nxge_m_promisc, 208 nxge_m_multicst, 209 nxge_m_unicst, 210 nxge_m_tx, 211 nxge_m_resources, 212 nxge_m_ioctl, 213 nxge_m_getcapab 214 }; 215 216 void 217 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 218 219 /* 220 * These global variables control the message 221 * output. 222 */ 223 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 224 uint64_t nxge_debug_level = 0; 225 226 /* 227 * This list contains the instance structures for the Neptune 228 * devices present in the system. The lock exists to guarantee 229 * mutually exclusive access to the list. 230 */ 231 void *nxge_list = NULL; 232 233 void *nxge_hw_list = NULL; 234 nxge_os_mutex_t nxge_common_lock; 235 236 nxge_os_mutex_t nxge_mii_lock; 237 static uint32_t nxge_mii_lock_init = 0; 238 nxge_os_mutex_t nxge_mdio_lock; 239 static uint32_t nxge_mdio_lock_init = 0; 240 241 extern uint64_t npi_debug_level; 242 243 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 244 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 245 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 246 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 247 extern void nxge_fm_init(p_nxge_t, 248 ddi_device_acc_attr_t *, 249 ddi_device_acc_attr_t *, 250 ddi_dma_attr_t *); 251 extern void nxge_fm_fini(p_nxge_t); 252 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 253 254 /* 255 * Count used to maintain the number of buffers being used 256 * by Neptune instances and loaned up to the upper layers. 257 */ 258 uint32_t nxge_mblks_pending = 0; 259 260 /* 261 * Device register access attributes for PIO. 262 */ 263 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 264 DDI_DEVICE_ATTR_V0, 265 DDI_STRUCTURE_LE_ACC, 266 DDI_STRICTORDER_ACC, 267 }; 268 269 /* 270 * Device descriptor access attributes for DMA. 271 */ 272 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 273 DDI_DEVICE_ATTR_V0, 274 DDI_STRUCTURE_LE_ACC, 275 DDI_STRICTORDER_ACC 276 }; 277 278 /* 279 * Device buffer access attributes for DMA. 280 */ 281 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 282 DDI_DEVICE_ATTR_V0, 283 DDI_STRUCTURE_BE_ACC, 284 DDI_STRICTORDER_ACC 285 }; 286 287 ddi_dma_attr_t nxge_desc_dma_attr = { 288 DMA_ATTR_V0, /* version number. */ 289 0, /* low address */ 290 0xffffffffffffffff, /* high address */ 291 0xffffffffffffffff, /* address counter max */ 292 #ifndef NIU_PA_WORKAROUND 293 0x100000, /* alignment */ 294 #else 295 0x2000, 296 #endif 297 0xfc00fc, /* dlim_burstsizes */ 298 0x1, /* minimum transfer size */ 299 0xffffffffffffffff, /* maximum transfer size */ 300 0xffffffffffffffff, /* maximum segment size */ 301 1, /* scatter/gather list length */ 302 (unsigned int) 1, /* granularity */ 303 0 /* attribute flags */ 304 }; 305 306 ddi_dma_attr_t nxge_tx_dma_attr = { 307 DMA_ATTR_V0, /* version number. */ 308 0, /* low address */ 309 0xffffffffffffffff, /* high address */ 310 0xffffffffffffffff, /* address counter max */ 311 #if defined(_BIG_ENDIAN) 312 0x2000, /* alignment */ 313 #else 314 0x1000, /* alignment */ 315 #endif 316 0xfc00fc, /* dlim_burstsizes */ 317 0x1, /* minimum transfer size */ 318 0xffffffffffffffff, /* maximum transfer size */ 319 0xffffffffffffffff, /* maximum segment size */ 320 5, /* scatter/gather list length */ 321 (unsigned int) 1, /* granularity */ 322 0 /* attribute flags */ 323 }; 324 325 ddi_dma_attr_t nxge_rx_dma_attr = { 326 DMA_ATTR_V0, /* version number. */ 327 0, /* low address */ 328 0xffffffffffffffff, /* high address */ 329 0xffffffffffffffff, /* address counter max */ 330 0x2000, /* alignment */ 331 0xfc00fc, /* dlim_burstsizes */ 332 0x1, /* minimum transfer size */ 333 0xffffffffffffffff, /* maximum transfer size */ 334 0xffffffffffffffff, /* maximum segment size */ 335 1, /* scatter/gather list length */ 336 (unsigned int) 1, /* granularity */ 337 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 338 }; 339 340 ddi_dma_lim_t nxge_dma_limits = { 341 (uint_t)0, /* dlim_addr_lo */ 342 (uint_t)0xffffffff, /* dlim_addr_hi */ 343 (uint_t)0xffffffff, /* dlim_cntr_max */ 344 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 345 0x1, /* dlim_minxfer */ 346 1024 /* dlim_speed */ 347 }; 348 349 dma_method_t nxge_force_dma = DVMA; 350 351 /* 352 * dma chunk sizes. 353 * 354 * Try to allocate the largest possible size 355 * so that fewer number of dma chunks would be managed 356 */ 357 #ifdef NIU_PA_WORKAROUND 358 size_t alloc_sizes [] = {0x2000}; 359 #else 360 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 361 0x10000, 0x20000, 0x40000, 0x80000, 362 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000}; 363 #endif 364 365 /* 366 * Translate "dev_t" to a pointer to the associated "dev_info_t". 367 */ 368 369 static int 370 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 371 { 372 p_nxge_t nxgep = NULL; 373 int instance; 374 int status = DDI_SUCCESS; 375 uint8_t portn; 376 nxge_mmac_t *mmac_info; 377 378 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 379 380 /* 381 * Get the device instance since we'll need to setup 382 * or retrieve a soft state for this instance. 383 */ 384 instance = ddi_get_instance(dip); 385 386 switch (cmd) { 387 case DDI_ATTACH: 388 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 389 break; 390 391 case DDI_RESUME: 392 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 393 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 394 if (nxgep == NULL) { 395 status = DDI_FAILURE; 396 break; 397 } 398 if (nxgep->dip != dip) { 399 status = DDI_FAILURE; 400 break; 401 } 402 if (nxgep->suspended == DDI_PM_SUSPEND) { 403 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 404 } else { 405 status = nxge_resume(nxgep); 406 } 407 goto nxge_attach_exit; 408 409 case DDI_PM_RESUME: 410 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 411 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 412 if (nxgep == NULL) { 413 status = DDI_FAILURE; 414 break; 415 } 416 if (nxgep->dip != dip) { 417 status = DDI_FAILURE; 418 break; 419 } 420 status = nxge_resume(nxgep); 421 goto nxge_attach_exit; 422 423 default: 424 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 425 status = DDI_FAILURE; 426 goto nxge_attach_exit; 427 } 428 429 430 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 431 status = DDI_FAILURE; 432 goto nxge_attach_exit; 433 } 434 435 nxgep = ddi_get_soft_state(nxge_list, instance); 436 if (nxgep == NULL) { 437 goto nxge_attach_fail; 438 } 439 440 nxgep->nxge_magic = NXGE_MAGIC; 441 442 nxgep->drv_state = 0; 443 nxgep->dip = dip; 444 nxgep->instance = instance; 445 nxgep->p_dip = ddi_get_parent(dip); 446 nxgep->nxge_debug_level = nxge_debug_level; 447 npi_debug_level = nxge_debug_level; 448 449 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_dev_desc_dma_acc_attr, 450 &nxge_rx_dma_attr); 451 452 status = nxge_map_regs(nxgep); 453 if (status != NXGE_OK) { 454 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 455 goto nxge_attach_fail; 456 } 457 458 status = nxge_init_common_dev(nxgep); 459 if (status != NXGE_OK) { 460 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 461 "nxge_init_common_dev failed")); 462 goto nxge_attach_fail; 463 } 464 465 if (nxgep->niu_type == NEPTUNE_2_10GF) { 466 if (nxgep->function_num > 1) { 467 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Unsupported" 468 " function %d. Only functions 0 and 1 are " 469 "supported for this card.", nxgep->function_num)); 470 status = NXGE_ERROR; 471 goto nxge_attach_fail; 472 } 473 } 474 475 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 476 nxgep->mac.portnum = portn; 477 if ((portn == 0) || (portn == 1)) 478 nxgep->mac.porttype = PORT_TYPE_XMAC; 479 else 480 nxgep->mac.porttype = PORT_TYPE_BMAC; 481 /* 482 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 483 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 484 * The two types of MACs have different characterizations. 485 */ 486 mmac_info = &nxgep->nxge_mmac_info; 487 if (nxgep->function_num < 2) { 488 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 489 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 490 } else { 491 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 492 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 493 } 494 /* 495 * Setup the Ndd parameters for the this instance. 496 */ 497 nxge_init_param(nxgep); 498 499 /* 500 * Setup Register Tracing Buffer. 501 */ 502 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 503 504 /* init stats ptr */ 505 nxge_init_statsp(nxgep); 506 507 if (nxgep->nxge_hw_p->platform_type == P_NEPTUNE_ATLAS) { 508 /* 509 * read the vpd info from the eeprom into local data 510 * structure and check for the VPD info validity 511 */ 512 (void) nxge_vpd_info_get(nxgep); 513 } 514 515 status = nxge_setup_xcvr_table(nxgep); 516 517 if (status != NXGE_OK) { 518 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 519 " Couldn't determine card type" 520 " .... exit ")); 521 goto nxge_attach_fail; 522 } 523 524 status = nxge_get_config_properties(nxgep); 525 526 if (status != NXGE_OK) { 527 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "get_hw create failed")); 528 goto nxge_attach_fail; 529 } 530 531 /* 532 * Setup the Kstats for the driver. 533 */ 534 nxge_setup_kstats(nxgep); 535 536 nxge_setup_param(nxgep); 537 538 status = nxge_setup_system_dma_pages(nxgep); 539 if (status != NXGE_OK) { 540 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 541 goto nxge_attach_fail; 542 } 543 544 #if defined(sun4v) 545 if (nxgep->niu_type == N2_NIU) { 546 nxgep->niu_hsvc_available = B_FALSE; 547 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 548 if ((status = 549 hsvc_register(&nxgep->niu_hsvc, 550 &nxgep->niu_min_ver)) != 0) { 551 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 552 "nxge_attach: " 553 "%s: cannot negotiate " 554 "hypervisor services " 555 "revision %d " 556 "group: 0x%lx " 557 "major: 0x%lx minor: 0x%lx " 558 "errno: %d", 559 niu_hsvc.hsvc_modname, 560 niu_hsvc.hsvc_rev, 561 niu_hsvc.hsvc_group, 562 niu_hsvc.hsvc_major, 563 niu_hsvc.hsvc_minor, 564 status)); 565 status = DDI_FAILURE; 566 goto nxge_attach_fail; 567 } 568 569 nxgep->niu_hsvc_available = B_TRUE; 570 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 571 "NIU Hypervisor service enabled")); 572 } 573 #endif 574 575 nxge_hw_id_init(nxgep); 576 nxge_hw_init_niu_common(nxgep); 577 578 status = nxge_setup_mutexes(nxgep); 579 if (status != NXGE_OK) { 580 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 581 goto nxge_attach_fail; 582 } 583 584 status = nxge_setup_dev(nxgep); 585 if (status != DDI_SUCCESS) { 586 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 587 goto nxge_attach_fail; 588 } 589 590 status = nxge_add_intrs(nxgep); 591 if (status != DDI_SUCCESS) { 592 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 593 goto nxge_attach_fail; 594 } 595 status = nxge_add_soft_intrs(nxgep); 596 if (status != DDI_SUCCESS) { 597 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "add_soft_intr failed")); 598 goto nxge_attach_fail; 599 } 600 601 /* 602 * Enable interrupts. 603 */ 604 nxge_intrs_enable(nxgep); 605 606 if ((status = nxge_mac_register(nxgep)) != DDI_SUCCESS) { 607 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 608 "unable to register to mac layer (%d)", status)); 609 goto nxge_attach_fail; 610 } 611 612 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 613 614 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "registered to mac (instance %d)", 615 instance)); 616 617 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 618 619 goto nxge_attach_exit; 620 621 nxge_attach_fail: 622 nxge_unattach(nxgep); 623 if (status != NXGE_OK) 624 status = (NXGE_ERROR | NXGE_DDI_FAILED); 625 nxgep = NULL; 626 627 nxge_attach_exit: 628 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 629 status)); 630 631 return (status); 632 } 633 634 static int 635 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 636 { 637 int status = DDI_SUCCESS; 638 int instance; 639 p_nxge_t nxgep = NULL; 640 641 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 642 instance = ddi_get_instance(dip); 643 nxgep = ddi_get_soft_state(nxge_list, instance); 644 if (nxgep == NULL) { 645 status = DDI_FAILURE; 646 goto nxge_detach_exit; 647 } 648 649 switch (cmd) { 650 case DDI_DETACH: 651 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 652 break; 653 654 case DDI_PM_SUSPEND: 655 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 656 nxgep->suspended = DDI_PM_SUSPEND; 657 nxge_suspend(nxgep); 658 break; 659 660 case DDI_SUSPEND: 661 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 662 if (nxgep->suspended != DDI_PM_SUSPEND) { 663 nxgep->suspended = DDI_SUSPEND; 664 nxge_suspend(nxgep); 665 } 666 break; 667 668 default: 669 status = DDI_FAILURE; 670 } 671 672 if (cmd != DDI_DETACH) 673 goto nxge_detach_exit; 674 675 /* 676 * Stop the xcvr polling. 677 */ 678 nxgep->suspended = cmd; 679 680 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 681 682 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 683 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 684 "<== nxge_detach status = 0x%08X", status)); 685 return (DDI_FAILURE); 686 } 687 688 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 689 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 690 691 nxge_unattach(nxgep); 692 nxgep = NULL; 693 694 nxge_detach_exit: 695 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 696 status)); 697 698 return (status); 699 } 700 701 static void 702 nxge_unattach(p_nxge_t nxgep) 703 { 704 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 705 706 if (nxgep == NULL || nxgep->dev_regs == NULL) { 707 return; 708 } 709 710 nxgep->nxge_magic = 0; 711 712 if (nxgep->nxge_hw_p) { 713 nxge_uninit_common_dev(nxgep); 714 nxgep->nxge_hw_p = NULL; 715 } 716 717 if (nxgep->nxge_timerid) { 718 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 719 nxgep->nxge_timerid = 0; 720 } 721 722 #if defined(sun4v) 723 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 724 (void) hsvc_unregister(&nxgep->niu_hsvc); 725 nxgep->niu_hsvc_available = B_FALSE; 726 } 727 #endif 728 /* 729 * Stop any further interrupts. 730 */ 731 nxge_remove_intrs(nxgep); 732 733 /* remove soft interrups */ 734 nxge_remove_soft_intrs(nxgep); 735 736 /* 737 * Stop the device and free resources. 738 */ 739 nxge_destroy_dev(nxgep); 740 741 /* 742 * Tear down the ndd parameters setup. 743 */ 744 nxge_destroy_param(nxgep); 745 746 /* 747 * Tear down the kstat setup. 748 */ 749 nxge_destroy_kstats(nxgep); 750 751 /* 752 * Destroy all mutexes. 753 */ 754 nxge_destroy_mutexes(nxgep); 755 756 /* 757 * Remove the list of ndd parameters which 758 * were setup during attach. 759 */ 760 if (nxgep->dip) { 761 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 762 " nxge_unattach: remove all properties")); 763 764 (void) ddi_prop_remove_all(nxgep->dip); 765 } 766 767 #if NXGE_PROPERTY 768 nxge_remove_hard_properties(nxgep); 769 #endif 770 771 /* 772 * Unmap the register setup. 773 */ 774 nxge_unmap_regs(nxgep); 775 776 nxge_fm_fini(nxgep); 777 778 ddi_soft_state_free(nxge_list, nxgep->instance); 779 780 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 781 } 782 783 static char n2_siu_name[] = "niu"; 784 785 static nxge_status_t 786 nxge_map_regs(p_nxge_t nxgep) 787 { 788 int ddi_status = DDI_SUCCESS; 789 p_dev_regs_t dev_regs; 790 char buf[MAXPATHLEN + 1]; 791 char *devname; 792 #ifdef NXGE_DEBUG 793 char *sysname; 794 #endif 795 off_t regsize; 796 nxge_status_t status = NXGE_OK; 797 #if !defined(_BIG_ENDIAN) 798 off_t pci_offset; 799 uint16_t pcie_devctl; 800 #endif 801 802 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 803 nxgep->dev_regs = NULL; 804 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 805 dev_regs->nxge_regh = NULL; 806 dev_regs->nxge_pciregh = NULL; 807 dev_regs->nxge_msix_regh = NULL; 808 dev_regs->nxge_vir_regh = NULL; 809 dev_regs->nxge_vir2_regh = NULL; 810 nxgep->niu_type = NIU_TYPE_NONE; 811 812 devname = ddi_pathname(nxgep->dip, buf); 813 ASSERT(strlen(devname) > 0); 814 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 815 "nxge_map_regs: pathname devname %s", devname)); 816 817 if (strstr(devname, n2_siu_name)) { 818 /* N2/NIU */ 819 nxgep->niu_type = N2_NIU; 820 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 821 "nxge_map_regs: N2/NIU devname %s", devname)); 822 /* get function number */ 823 nxgep->function_num = 824 (devname[strlen(devname) -1] == '1' ? 1 : 0); 825 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 826 "nxge_map_regs: N2/NIU function number %d", 827 nxgep->function_num)); 828 } else { 829 int *prop_val; 830 uint_t prop_len; 831 uint8_t func_num; 832 833 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 834 0, "reg", 835 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 836 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 837 "Reg property not found")); 838 ddi_status = DDI_FAILURE; 839 goto nxge_map_regs_fail0; 840 841 } else { 842 func_num = (prop_val[0] >> 8) & 0x7; 843 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 844 "Reg property found: fun # %d", 845 func_num)); 846 nxgep->function_num = func_num; 847 ddi_prop_free(prop_val); 848 } 849 } 850 851 switch (nxgep->niu_type) { 852 default: 853 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 854 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 855 "nxge_map_regs: pci config size 0x%x", regsize)); 856 857 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 858 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 859 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 860 if (ddi_status != DDI_SUCCESS) { 861 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 862 "ddi_map_regs, nxge bus config regs failed")); 863 goto nxge_map_regs_fail0; 864 } 865 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 866 "nxge_map_reg: PCI config addr 0x%0llx " 867 " handle 0x%0llx", dev_regs->nxge_pciregp, 868 dev_regs->nxge_pciregh)); 869 /* 870 * IMP IMP 871 * workaround for bit swapping bug in HW 872 * which ends up in no-snoop = yes 873 * resulting, in DMA not synched properly 874 */ 875 #if !defined(_BIG_ENDIAN) 876 /* workarounds for x86 systems */ 877 pci_offset = 0x80 + PCIE_DEVCTL; 878 pcie_devctl = 0x0; 879 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 880 pcie_devctl |= PCIE_DEVCTL_RO_EN; 881 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 882 pcie_devctl); 883 #endif 884 885 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 886 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 887 "nxge_map_regs: pio size 0x%x", regsize)); 888 /* set up the device mapped register */ 889 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 890 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 891 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 892 if (ddi_status != DDI_SUCCESS) { 893 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 894 "ddi_map_regs for Neptune global reg failed")); 895 goto nxge_map_regs_fail1; 896 } 897 898 /* set up the msi/msi-x mapped register */ 899 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 900 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 901 "nxge_map_regs: msix size 0x%x", regsize)); 902 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 903 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 904 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 905 if (ddi_status != DDI_SUCCESS) { 906 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 907 "ddi_map_regs for msi reg failed")); 908 goto nxge_map_regs_fail2; 909 } 910 911 /* set up the vio region mapped register */ 912 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 913 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 914 "nxge_map_regs: vio size 0x%x", regsize)); 915 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 916 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 917 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 918 919 if (ddi_status != DDI_SUCCESS) { 920 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 921 "ddi_map_regs for nxge vio reg failed")); 922 goto nxge_map_regs_fail3; 923 } 924 nxgep->dev_regs = dev_regs; 925 926 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 927 NPI_PCI_ADD_HANDLE_SET(nxgep, 928 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 929 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 930 NPI_MSI_ADD_HANDLE_SET(nxgep, 931 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 932 933 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 934 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 935 936 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 937 NPI_REG_ADD_HANDLE_SET(nxgep, 938 (npi_reg_ptr_t)dev_regs->nxge_regp); 939 940 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 941 NPI_VREG_ADD_HANDLE_SET(nxgep, 942 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 943 944 break; 945 946 case N2_NIU: 947 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 948 /* 949 * Set up the device mapped register (FWARC 2006/556) 950 * (changed back to 1: reg starts at 1!) 951 */ 952 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 953 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 954 "nxge_map_regs: dev size 0x%x", regsize)); 955 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 956 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 957 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 958 959 if (ddi_status != DDI_SUCCESS) { 960 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 961 "ddi_map_regs for N2/NIU, global reg failed ")); 962 goto nxge_map_regs_fail1; 963 } 964 965 /* set up the vio region mapped register */ 966 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 967 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 968 "nxge_map_regs: vio (1) size 0x%x", regsize)); 969 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 970 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 971 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 972 973 if (ddi_status != DDI_SUCCESS) { 974 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 975 "ddi_map_regs for nxge vio reg failed")); 976 goto nxge_map_regs_fail2; 977 } 978 /* set up the vio region mapped register */ 979 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 980 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 981 "nxge_map_regs: vio (3) size 0x%x", regsize)); 982 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 983 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 984 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 985 986 if (ddi_status != DDI_SUCCESS) { 987 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 988 "ddi_map_regs for nxge vio2 reg failed")); 989 goto nxge_map_regs_fail3; 990 } 991 nxgep->dev_regs = dev_regs; 992 993 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 994 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 995 996 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 997 NPI_REG_ADD_HANDLE_SET(nxgep, 998 (npi_reg_ptr_t)dev_regs->nxge_regp); 999 1000 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1001 NPI_VREG_ADD_HANDLE_SET(nxgep, 1002 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1003 1004 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1005 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1006 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1007 1008 break; 1009 } 1010 1011 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1012 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1013 1014 goto nxge_map_regs_exit; 1015 nxge_map_regs_fail3: 1016 if (dev_regs->nxge_msix_regh) { 1017 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1018 } 1019 if (dev_regs->nxge_vir_regh) { 1020 ddi_regs_map_free(&dev_regs->nxge_regh); 1021 } 1022 nxge_map_regs_fail2: 1023 if (dev_regs->nxge_regh) { 1024 ddi_regs_map_free(&dev_regs->nxge_regh); 1025 } 1026 nxge_map_regs_fail1: 1027 if (dev_regs->nxge_pciregh) { 1028 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1029 } 1030 nxge_map_regs_fail0: 1031 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1032 kmem_free(dev_regs, sizeof (dev_regs_t)); 1033 1034 nxge_map_regs_exit: 1035 if (ddi_status != DDI_SUCCESS) 1036 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1037 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1038 return (status); 1039 } 1040 1041 static void 1042 nxge_unmap_regs(p_nxge_t nxgep) 1043 { 1044 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1045 if (nxgep->dev_regs) { 1046 if (nxgep->dev_regs->nxge_pciregh) { 1047 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1048 "==> nxge_unmap_regs: bus")); 1049 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1050 nxgep->dev_regs->nxge_pciregh = NULL; 1051 } 1052 if (nxgep->dev_regs->nxge_regh) { 1053 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1054 "==> nxge_unmap_regs: device registers")); 1055 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1056 nxgep->dev_regs->nxge_regh = NULL; 1057 } 1058 if (nxgep->dev_regs->nxge_msix_regh) { 1059 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1060 "==> nxge_unmap_regs: device interrupts")); 1061 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1062 nxgep->dev_regs->nxge_msix_regh = NULL; 1063 } 1064 if (nxgep->dev_regs->nxge_vir_regh) { 1065 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1066 "==> nxge_unmap_regs: vio region")); 1067 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1068 nxgep->dev_regs->nxge_vir_regh = NULL; 1069 } 1070 if (nxgep->dev_regs->nxge_vir2_regh) { 1071 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1072 "==> nxge_unmap_regs: vio2 region")); 1073 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1074 nxgep->dev_regs->nxge_vir2_regh = NULL; 1075 } 1076 1077 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1078 nxgep->dev_regs = NULL; 1079 } 1080 1081 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1082 } 1083 1084 static nxge_status_t 1085 nxge_setup_mutexes(p_nxge_t nxgep) 1086 { 1087 int ddi_status = DDI_SUCCESS; 1088 nxge_status_t status = NXGE_OK; 1089 nxge_classify_t *classify_ptr; 1090 int partition; 1091 1092 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1093 1094 /* 1095 * Get the interrupt cookie so the mutexes can be 1096 * Initialized. 1097 */ 1098 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1099 &nxgep->interrupt_cookie); 1100 if (ddi_status != DDI_SUCCESS) { 1101 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1102 "<== nxge_setup_mutexes: failed 0x%x", ddi_status)); 1103 goto nxge_setup_mutexes_exit; 1104 } 1105 1106 /* Initialize global mutex */ 1107 1108 if (nxge_mdio_lock_init == 0) { 1109 MUTEX_INIT(&nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 1110 } 1111 atomic_add_32(&nxge_mdio_lock_init, 1); 1112 1113 if (nxge_mii_lock_init == 0) { 1114 MUTEX_INIT(&nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 1115 } 1116 atomic_add_32(&nxge_mii_lock_init, 1); 1117 1118 nxgep->drv_state |= STATE_MDIO_LOCK_INIT; 1119 nxgep->drv_state |= STATE_MII_LOCK_INIT; 1120 1121 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1122 MUTEX_INIT(&nxgep->poll_lock, NULL, 1123 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1124 1125 /* 1126 * Initialize mutexes for this device. 1127 */ 1128 MUTEX_INIT(nxgep->genlock, NULL, 1129 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1130 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1131 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1132 MUTEX_INIT(&nxgep->mif_lock, NULL, 1133 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1134 RW_INIT(&nxgep->filter_lock, NULL, 1135 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1136 1137 classify_ptr = &nxgep->classifier; 1138 /* 1139 * FFLP Mutexes are never used in interrupt context 1140 * as fflp operation can take very long time to 1141 * complete and hence not suitable to invoke from interrupt 1142 * handlers. 1143 */ 1144 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1145 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1146 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep->niu_type)) { 1147 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1148 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1149 for (partition = 0; partition < MAX_PARTITION; partition++) { 1150 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1151 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1152 } 1153 } 1154 1155 nxge_setup_mutexes_exit: 1156 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1157 "<== nxge_setup_mutexes status = %x", status)); 1158 1159 if (ddi_status != DDI_SUCCESS) 1160 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1161 1162 return (status); 1163 } 1164 1165 static void 1166 nxge_destroy_mutexes(p_nxge_t nxgep) 1167 { 1168 int partition; 1169 nxge_classify_t *classify_ptr; 1170 1171 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1172 RW_DESTROY(&nxgep->filter_lock); 1173 MUTEX_DESTROY(&nxgep->mif_lock); 1174 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1175 MUTEX_DESTROY(nxgep->genlock); 1176 1177 classify_ptr = &nxgep->classifier; 1178 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1179 1180 /* Destroy all polling resources. */ 1181 MUTEX_DESTROY(&nxgep->poll_lock); 1182 cv_destroy(&nxgep->poll_cv); 1183 1184 /* free data structures, based on HW type */ 1185 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep->niu_type)) { 1186 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1187 for (partition = 0; partition < MAX_PARTITION; partition++) { 1188 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1189 } 1190 } 1191 if (nxgep->drv_state & STATE_MDIO_LOCK_INIT) { 1192 if (nxge_mdio_lock_init == 1) { 1193 MUTEX_DESTROY(&nxge_mdio_lock); 1194 } 1195 atomic_add_32(&nxge_mdio_lock_init, -1); 1196 } 1197 if (nxgep->drv_state & STATE_MII_LOCK_INIT) { 1198 if (nxge_mii_lock_init == 1) { 1199 MUTEX_DESTROY(&nxge_mii_lock); 1200 } 1201 atomic_add_32(&nxge_mii_lock_init, -1); 1202 } 1203 1204 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1205 } 1206 1207 nxge_status_t 1208 nxge_init(p_nxge_t nxgep) 1209 { 1210 nxge_status_t status = NXGE_OK; 1211 1212 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1213 1214 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1215 return (status); 1216 } 1217 1218 /* 1219 * Allocate system memory for the receive/transmit buffer blocks 1220 * and receive/transmit descriptor rings. 1221 */ 1222 status = nxge_alloc_mem_pool(nxgep); 1223 if (status != NXGE_OK) { 1224 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1225 goto nxge_init_fail1; 1226 } 1227 1228 /* 1229 * Initialize and enable TXC registers 1230 * (Globally enable TX controller, 1231 * enable a port, configure dma channel bitmap, 1232 * configure the max burst size). 1233 */ 1234 status = nxge_txc_init(nxgep); 1235 if (status != NXGE_OK) { 1236 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txc failed\n")); 1237 goto nxge_init_fail2; 1238 } 1239 1240 /* 1241 * Initialize and enable TXDMA channels. 1242 */ 1243 status = nxge_init_txdma_channels(nxgep); 1244 if (status != NXGE_OK) { 1245 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1246 goto nxge_init_fail3; 1247 } 1248 1249 /* 1250 * Initialize and enable RXDMA channels. 1251 */ 1252 status = nxge_init_rxdma_channels(nxgep); 1253 if (status != NXGE_OK) { 1254 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1255 goto nxge_init_fail4; 1256 } 1257 1258 /* 1259 * Initialize TCAM and FCRAM (Neptune). 1260 */ 1261 status = nxge_classify_init(nxgep); 1262 if (status != NXGE_OK) { 1263 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1264 goto nxge_init_fail5; 1265 } 1266 1267 /* 1268 * Initialize ZCP 1269 */ 1270 status = nxge_zcp_init(nxgep); 1271 if (status != NXGE_OK) { 1272 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1273 goto nxge_init_fail5; 1274 } 1275 1276 /* 1277 * Initialize IPP. 1278 */ 1279 status = nxge_ipp_init(nxgep); 1280 if (status != NXGE_OK) { 1281 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1282 goto nxge_init_fail5; 1283 } 1284 1285 /* 1286 * Initialize the MAC block. 1287 */ 1288 status = nxge_mac_init(nxgep); 1289 if (status != NXGE_OK) { 1290 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1291 goto nxge_init_fail5; 1292 } 1293 1294 nxge_intrs_enable(nxgep); 1295 1296 /* 1297 * Enable hardware interrupts. 1298 */ 1299 nxge_intr_hw_enable(nxgep); 1300 nxgep->drv_state |= STATE_HW_INITIALIZED; 1301 1302 goto nxge_init_exit; 1303 1304 nxge_init_fail5: 1305 nxge_uninit_rxdma_channels(nxgep); 1306 nxge_init_fail4: 1307 nxge_uninit_txdma_channels(nxgep); 1308 nxge_init_fail3: 1309 (void) nxge_txc_uninit(nxgep); 1310 nxge_init_fail2: 1311 nxge_free_mem_pool(nxgep); 1312 nxge_init_fail1: 1313 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1314 "<== nxge_init status (failed) = 0x%08x", status)); 1315 return (status); 1316 1317 nxge_init_exit: 1318 1319 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1320 status)); 1321 return (status); 1322 } 1323 1324 1325 timeout_id_t 1326 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1327 { 1328 if ((nxgep->suspended == 0) || 1329 (nxgep->suspended == DDI_RESUME)) { 1330 return (timeout(func, (caddr_t)nxgep, 1331 drv_usectohz(1000 * msec))); 1332 } 1333 return (NULL); 1334 } 1335 1336 /*ARGSUSED*/ 1337 void 1338 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1339 { 1340 if (timerid) { 1341 (void) untimeout(timerid); 1342 } 1343 } 1344 1345 void 1346 nxge_uninit(p_nxge_t nxgep) 1347 { 1348 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1349 1350 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1351 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1352 "==> nxge_uninit: not initialized")); 1353 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1354 "<== nxge_uninit")); 1355 return; 1356 } 1357 1358 /* stop timer */ 1359 if (nxgep->nxge_timerid) { 1360 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1361 nxgep->nxge_timerid = 0; 1362 } 1363 1364 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1365 (void) nxge_intr_hw_disable(nxgep); 1366 1367 /* 1368 * Reset the receive MAC side. 1369 */ 1370 (void) nxge_rx_mac_disable(nxgep); 1371 1372 /* Disable and soft reset the IPP */ 1373 (void) nxge_ipp_disable(nxgep); 1374 1375 /* Free classification resources */ 1376 (void) nxge_classify_uninit(nxgep); 1377 1378 /* 1379 * Reset the transmit/receive DMA side. 1380 */ 1381 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1382 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1383 1384 nxge_uninit_txdma_channels(nxgep); 1385 nxge_uninit_rxdma_channels(nxgep); 1386 1387 /* 1388 * Reset the transmit MAC side. 1389 */ 1390 (void) nxge_tx_mac_disable(nxgep); 1391 1392 nxge_free_mem_pool(nxgep); 1393 1394 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1395 1396 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1397 1398 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1399 "nxge_mblks_pending %d", nxge_mblks_pending)); 1400 } 1401 1402 void 1403 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1404 { 1405 uint64_t reg; 1406 uint64_t regdata; 1407 int i, retry; 1408 1409 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1410 regdata = 0; 1411 retry = 1; 1412 1413 for (i = 0; i < retry; i++) { 1414 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1415 } 1416 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1417 } 1418 1419 void 1420 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1421 { 1422 uint64_t reg; 1423 uint64_t buf[2]; 1424 1425 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1426 reg = buf[0]; 1427 1428 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1429 } 1430 1431 1432 nxge_os_mutex_t nxgedebuglock; 1433 int nxge_debug_init = 0; 1434 1435 /*ARGSUSED*/ 1436 /*VARARGS*/ 1437 void 1438 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1439 { 1440 char msg_buffer[1048]; 1441 char prefix_buffer[32]; 1442 int instance; 1443 uint64_t debug_level; 1444 int cmn_level = CE_CONT; 1445 va_list ap; 1446 1447 debug_level = (nxgep == NULL) ? nxge_debug_level : 1448 nxgep->nxge_debug_level; 1449 1450 if ((level & debug_level) || 1451 (level == NXGE_NOTE) || 1452 (level == NXGE_ERR_CTL)) { 1453 /* do the msg processing */ 1454 if (nxge_debug_init == 0) { 1455 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1456 nxge_debug_init = 1; 1457 } 1458 1459 MUTEX_ENTER(&nxgedebuglock); 1460 1461 if ((level & NXGE_NOTE)) { 1462 cmn_level = CE_NOTE; 1463 } 1464 1465 if (level & NXGE_ERR_CTL) { 1466 cmn_level = CE_WARN; 1467 } 1468 1469 va_start(ap, fmt); 1470 (void) vsprintf(msg_buffer, fmt, ap); 1471 va_end(ap); 1472 if (nxgep == NULL) { 1473 instance = -1; 1474 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1475 } else { 1476 instance = nxgep->instance; 1477 (void) sprintf(prefix_buffer, 1478 "%s%d :", "nxge", instance); 1479 } 1480 1481 MUTEX_EXIT(&nxgedebuglock); 1482 cmn_err(cmn_level, "!%s %s\n", 1483 prefix_buffer, msg_buffer); 1484 1485 } 1486 } 1487 1488 char * 1489 nxge_dump_packet(char *addr, int size) 1490 { 1491 uchar_t *ap = (uchar_t *)addr; 1492 int i; 1493 static char etherbuf[1024]; 1494 char *cp = etherbuf; 1495 char digits[] = "0123456789abcdef"; 1496 1497 if (!size) 1498 size = 60; 1499 1500 if (size > MAX_DUMP_SZ) { 1501 /* Dump the leading bytes */ 1502 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1503 if (*ap > 0x0f) 1504 *cp++ = digits[*ap >> 4]; 1505 *cp++ = digits[*ap++ & 0xf]; 1506 *cp++ = ':'; 1507 } 1508 for (i = 0; i < 20; i++) 1509 *cp++ = '.'; 1510 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1511 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1512 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1513 if (*ap > 0x0f) 1514 *cp++ = digits[*ap >> 4]; 1515 *cp++ = digits[*ap++ & 0xf]; 1516 *cp++ = ':'; 1517 } 1518 } else { 1519 for (i = 0; i < size; i++) { 1520 if (*ap > 0x0f) 1521 *cp++ = digits[*ap >> 4]; 1522 *cp++ = digits[*ap++ & 0xf]; 1523 *cp++ = ':'; 1524 } 1525 } 1526 *--cp = 0; 1527 return (etherbuf); 1528 } 1529 1530 #ifdef NXGE_DEBUG 1531 static void 1532 nxge_test_map_regs(p_nxge_t nxgep) 1533 { 1534 ddi_acc_handle_t cfg_handle; 1535 p_pci_cfg_t cfg_ptr; 1536 ddi_acc_handle_t dev_handle; 1537 char *dev_ptr; 1538 ddi_acc_handle_t pci_config_handle; 1539 uint32_t regval; 1540 int i; 1541 1542 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1543 1544 dev_handle = nxgep->dev_regs->nxge_regh; 1545 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1546 1547 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep->niu_type)) { 1548 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1549 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1550 1551 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1552 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1553 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1554 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1555 &cfg_ptr->vendorid)); 1556 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1557 "\tvendorid 0x%x devid 0x%x", 1558 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1559 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1560 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1561 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1562 "bar1c 0x%x", 1563 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1564 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1565 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1566 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1567 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1568 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1569 "base 28 0x%x bar2c 0x%x\n", 1570 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1571 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1572 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1573 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1574 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1575 "\nNeptune PCI BAR: base30 0x%x\n", 1576 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1577 1578 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1579 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1580 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1581 "first 0x%llx second 0x%llx third 0x%llx " 1582 "last 0x%llx ", 1583 NXGE_PIO_READ64(dev_handle, 1584 (uint64_t *)(dev_ptr + 0), 0), 1585 NXGE_PIO_READ64(dev_handle, 1586 (uint64_t *)(dev_ptr + 8), 0), 1587 NXGE_PIO_READ64(dev_handle, 1588 (uint64_t *)(dev_ptr + 16), 0), 1589 NXGE_PIO_READ64(cfg_handle, 1590 (uint64_t *)(dev_ptr + 24), 0))); 1591 } 1592 } 1593 1594 #endif 1595 1596 static void 1597 nxge_suspend(p_nxge_t nxgep) 1598 { 1599 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1600 1601 nxge_intrs_disable(nxgep); 1602 nxge_destroy_dev(nxgep); 1603 1604 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1605 } 1606 1607 static nxge_status_t 1608 nxge_resume(p_nxge_t nxgep) 1609 { 1610 nxge_status_t status = NXGE_OK; 1611 1612 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1613 1614 nxgep->suspended = DDI_RESUME; 1615 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1616 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1617 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1618 (void) nxge_rx_mac_enable(nxgep); 1619 (void) nxge_tx_mac_enable(nxgep); 1620 nxge_intrs_enable(nxgep); 1621 nxgep->suspended = 0; 1622 1623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1624 "<== nxge_resume status = 0x%x", status)); 1625 return (status); 1626 } 1627 1628 static nxge_status_t 1629 nxge_setup_dev(p_nxge_t nxgep) 1630 { 1631 nxge_status_t status = NXGE_OK; 1632 1633 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1634 nxgep->mac.portnum)); 1635 1636 status = nxge_link_init(nxgep); 1637 1638 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1639 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1640 "port%d Bad register acc handle", nxgep->mac.portnum)); 1641 status = NXGE_ERROR; 1642 } 1643 1644 if (status != NXGE_OK) { 1645 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1646 " nxge_setup_dev status " 1647 "(xcvr init 0x%08x)", status)); 1648 goto nxge_setup_dev_exit; 1649 } 1650 1651 nxge_setup_dev_exit: 1652 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1653 "<== nxge_setup_dev port %d status = 0x%08x", 1654 nxgep->mac.portnum, status)); 1655 1656 return (status); 1657 } 1658 1659 static void 1660 nxge_destroy_dev(p_nxge_t nxgep) 1661 { 1662 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 1663 1664 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1665 1666 (void) nxge_hw_stop(nxgep); 1667 1668 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 1669 } 1670 1671 static nxge_status_t 1672 nxge_setup_system_dma_pages(p_nxge_t nxgep) 1673 { 1674 int ddi_status = DDI_SUCCESS; 1675 uint_t count; 1676 ddi_dma_cookie_t cookie; 1677 uint_t iommu_pagesize; 1678 nxge_status_t status = NXGE_OK; 1679 1680 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 1681 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 1682 if (nxgep->niu_type != N2_NIU) { 1683 iommu_pagesize = dvma_pagesize(nxgep->dip); 1684 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1685 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1686 " default_block_size %d iommu_pagesize %d", 1687 nxgep->sys_page_sz, 1688 ddi_ptob(nxgep->dip, (ulong_t)1), 1689 nxgep->rx_default_block_size, 1690 iommu_pagesize)); 1691 1692 if (iommu_pagesize != 0) { 1693 if (nxgep->sys_page_sz == iommu_pagesize) { 1694 if (iommu_pagesize > 0x4000) 1695 nxgep->sys_page_sz = 0x4000; 1696 } else { 1697 if (nxgep->sys_page_sz > iommu_pagesize) 1698 nxgep->sys_page_sz = iommu_pagesize; 1699 } 1700 } 1701 } 1702 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1703 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1704 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1705 "default_block_size %d page mask %d", 1706 nxgep->sys_page_sz, 1707 ddi_ptob(nxgep->dip, (ulong_t)1), 1708 nxgep->rx_default_block_size, 1709 nxgep->sys_page_mask)); 1710 1711 1712 switch (nxgep->sys_page_sz) { 1713 default: 1714 nxgep->sys_page_sz = 0x1000; 1715 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1716 nxgep->rx_default_block_size = 0x1000; 1717 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1718 break; 1719 case 0x1000: 1720 nxgep->rx_default_block_size = 0x1000; 1721 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1722 break; 1723 case 0x2000: 1724 nxgep->rx_default_block_size = 0x2000; 1725 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1726 break; 1727 case 0x4000: 1728 nxgep->rx_default_block_size = 0x4000; 1729 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 1730 break; 1731 case 0x8000: 1732 nxgep->rx_default_block_size = 0x8000; 1733 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 1734 break; 1735 } 1736 1737 #ifndef USE_RX_BIG_BUF 1738 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 1739 #else 1740 nxgep->rx_default_block_size = 0x2000; 1741 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1742 #endif 1743 /* 1744 * Get the system DMA burst size. 1745 */ 1746 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 1747 DDI_DMA_DONTWAIT, 0, 1748 &nxgep->dmasparehandle); 1749 if (ddi_status != DDI_SUCCESS) { 1750 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1751 "ddi_dma_alloc_handle: failed " 1752 " status 0x%x", ddi_status)); 1753 goto nxge_get_soft_properties_exit; 1754 } 1755 1756 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 1757 (caddr_t)nxgep->dmasparehandle, 1758 sizeof (nxgep->dmasparehandle), 1759 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1760 DDI_DMA_DONTWAIT, 0, 1761 &cookie, &count); 1762 if (ddi_status != DDI_DMA_MAPPED) { 1763 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1764 "Binding spare handle to find system" 1765 " burstsize failed.")); 1766 ddi_status = DDI_FAILURE; 1767 goto nxge_get_soft_properties_fail1; 1768 } 1769 1770 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 1771 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 1772 1773 nxge_get_soft_properties_fail1: 1774 ddi_dma_free_handle(&nxgep->dmasparehandle); 1775 1776 nxge_get_soft_properties_exit: 1777 1778 if (ddi_status != DDI_SUCCESS) 1779 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1780 1781 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1782 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 1783 return (status); 1784 } 1785 1786 static nxge_status_t 1787 nxge_alloc_mem_pool(p_nxge_t nxgep) 1788 { 1789 nxge_status_t status = NXGE_OK; 1790 1791 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 1792 1793 status = nxge_alloc_rx_mem_pool(nxgep); 1794 if (status != NXGE_OK) { 1795 return (NXGE_ERROR); 1796 } 1797 1798 status = nxge_alloc_tx_mem_pool(nxgep); 1799 if (status != NXGE_OK) { 1800 nxge_free_rx_mem_pool(nxgep); 1801 return (NXGE_ERROR); 1802 } 1803 1804 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 1805 return (NXGE_OK); 1806 } 1807 1808 static void 1809 nxge_free_mem_pool(p_nxge_t nxgep) 1810 { 1811 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 1812 1813 nxge_free_rx_mem_pool(nxgep); 1814 nxge_free_tx_mem_pool(nxgep); 1815 1816 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 1817 } 1818 1819 static nxge_status_t 1820 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 1821 { 1822 int i, j; 1823 uint32_t ndmas, st_rdc; 1824 p_nxge_dma_pt_cfg_t p_all_cfgp; 1825 p_nxge_hw_pt_cfg_t p_cfgp; 1826 p_nxge_dma_pool_t dma_poolp; 1827 p_nxge_dma_common_t *dma_buf_p; 1828 p_nxge_dma_pool_t dma_cntl_poolp; 1829 p_nxge_dma_common_t *dma_cntl_p; 1830 size_t rx_buf_alloc_size; 1831 size_t rx_cntl_alloc_size; 1832 uint32_t *num_chunks; /* per dma */ 1833 nxge_status_t status = NXGE_OK; 1834 1835 uint32_t nxge_port_rbr_size; 1836 uint32_t nxge_port_rbr_spare_size; 1837 uint32_t nxge_port_rcr_size; 1838 1839 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 1840 1841 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 1842 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1843 st_rdc = p_cfgp->start_rdc; 1844 ndmas = p_cfgp->max_rdcs; 1845 1846 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1847 " nxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas)); 1848 1849 /* 1850 * Allocate memory for each receive DMA channel. 1851 */ 1852 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 1853 KM_SLEEP); 1854 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1855 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1856 1857 dma_cntl_poolp = (p_nxge_dma_pool_t) 1858 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 1859 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1860 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1861 1862 num_chunks = (uint32_t *)KMEM_ZALLOC( 1863 sizeof (uint32_t) * ndmas, KM_SLEEP); 1864 1865 /* 1866 * Assume that each DMA channel will be configured with default 1867 * block size. 1868 * rbr block counts are mod of batch count (16). 1869 */ 1870 nxge_port_rbr_size = p_all_cfgp->rbr_size; 1871 nxge_port_rcr_size = p_all_cfgp->rcr_size; 1872 1873 if (!nxge_port_rbr_size) { 1874 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 1875 } 1876 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 1877 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 1878 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 1879 } 1880 1881 p_all_cfgp->rbr_size = nxge_port_rbr_size; 1882 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 1883 1884 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 1885 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 1886 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 1887 } 1888 1889 /* 1890 * N2/NIU has limitation on the descriptor sizes (contiguous 1891 * memory allocation on data buffers to 4M (contig_mem_alloc) 1892 * and little endian for control buffers (must use the ddi/dki mem alloc 1893 * function). 1894 */ 1895 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1896 if (nxgep->niu_type == N2_NIU) { 1897 nxge_port_rbr_spare_size = 0; 1898 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 1899 (!ISP2(nxge_port_rbr_size))) { 1900 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 1901 } 1902 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 1903 (!ISP2(nxge_port_rcr_size))) { 1904 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 1905 } 1906 } 1907 #endif 1908 1909 rx_buf_alloc_size = (nxgep->rx_default_block_size * 1910 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 1911 1912 /* 1913 * Addresses of receive block ring, receive completion ring and the 1914 * mailbox must be all cache-aligned (64 bytes). 1915 */ 1916 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 1917 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 1918 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 1919 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 1920 1921 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 1922 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 1923 "nxge_port_rcr_size = %d " 1924 "rx_cntl_alloc_size = %d", 1925 nxge_port_rbr_size, nxge_port_rbr_spare_size, 1926 nxge_port_rcr_size, 1927 rx_cntl_alloc_size)); 1928 1929 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1930 if (nxgep->niu_type == N2_NIU) { 1931 if (!ISP2(rx_buf_alloc_size)) { 1932 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1933 "==> nxge_alloc_rx_mem_pool: " 1934 " must be power of 2")); 1935 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1936 goto nxge_alloc_rx_mem_pool_exit; 1937 } 1938 1939 if (rx_buf_alloc_size > (1 << 22)) { 1940 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1941 "==> nxge_alloc_rx_mem_pool: " 1942 " limit size to 4M")); 1943 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1944 goto nxge_alloc_rx_mem_pool_exit; 1945 } 1946 1947 if (rx_cntl_alloc_size < 0x2000) { 1948 rx_cntl_alloc_size = 0x2000; 1949 } 1950 } 1951 #endif 1952 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 1953 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 1954 1955 /* 1956 * Allocate memory for receive buffers and descriptor rings. 1957 * Replace allocation functions with interface functions provided 1958 * by the partition manager when it is available. 1959 */ 1960 /* 1961 * Allocate memory for the receive buffer blocks. 1962 */ 1963 for (i = 0; i < ndmas; i++) { 1964 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1965 " nxge_alloc_rx_mem_pool to alloc mem: " 1966 " dma %d dma_buf_p %llx &dma_buf_p %llx", 1967 i, dma_buf_p[i], &dma_buf_p[i])); 1968 num_chunks[i] = 0; 1969 status = nxge_alloc_rx_buf_dma(nxgep, st_rdc, &dma_buf_p[i], 1970 rx_buf_alloc_size, 1971 nxgep->rx_default_block_size, &num_chunks[i]); 1972 if (status != NXGE_OK) { 1973 break; 1974 } 1975 st_rdc++; 1976 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1977 " nxge_alloc_rx_mem_pool DONE alloc mem: " 1978 "dma %d dma_buf_p %llx &dma_buf_p %llx", i, 1979 dma_buf_p[i], &dma_buf_p[i])); 1980 } 1981 if (i < ndmas) { 1982 goto nxge_alloc_rx_mem_fail1; 1983 } 1984 /* 1985 * Allocate memory for descriptor rings and mailbox. 1986 */ 1987 st_rdc = p_cfgp->start_rdc; 1988 for (j = 0; j < ndmas; j++) { 1989 status = nxge_alloc_rx_cntl_dma(nxgep, st_rdc, &dma_cntl_p[j], 1990 rx_cntl_alloc_size); 1991 if (status != NXGE_OK) { 1992 break; 1993 } 1994 st_rdc++; 1995 } 1996 if (j < ndmas) { 1997 goto nxge_alloc_rx_mem_fail2; 1998 } 1999 2000 dma_poolp->ndmas = ndmas; 2001 dma_poolp->num_chunks = num_chunks; 2002 dma_poolp->buf_allocated = B_TRUE; 2003 nxgep->rx_buf_pool_p = dma_poolp; 2004 dma_poolp->dma_buf_pool_p = dma_buf_p; 2005 2006 dma_cntl_poolp->ndmas = ndmas; 2007 dma_cntl_poolp->buf_allocated = B_TRUE; 2008 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2009 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2010 2011 goto nxge_alloc_rx_mem_pool_exit; 2012 2013 nxge_alloc_rx_mem_fail2: 2014 /* Free control buffers */ 2015 j--; 2016 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2017 "==> nxge_alloc_rx_mem_pool: freeing control bufs (%d)", j)); 2018 for (; j >= 0; j--) { 2019 nxge_free_rx_cntl_dma(nxgep, 2020 (p_nxge_dma_common_t)dma_cntl_p[j]); 2021 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2022 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", 2023 j)); 2024 } 2025 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2026 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 2027 2028 nxge_alloc_rx_mem_fail1: 2029 /* Free data buffers */ 2030 i--; 2031 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2032 "==> nxge_alloc_rx_mem_pool: freeing data bufs (%d)", i)); 2033 for (; i >= 0; i--) { 2034 nxge_free_rx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2035 num_chunks[i]); 2036 } 2037 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2038 "==> nxge_alloc_rx_mem_pool: data bufs freed (%d)", i)); 2039 2040 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2041 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2042 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2043 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2044 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2045 2046 nxge_alloc_rx_mem_pool_exit: 2047 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2048 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2049 2050 return (status); 2051 } 2052 2053 static void 2054 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2055 { 2056 uint32_t i, ndmas; 2057 p_nxge_dma_pool_t dma_poolp; 2058 p_nxge_dma_common_t *dma_buf_p; 2059 p_nxge_dma_pool_t dma_cntl_poolp; 2060 p_nxge_dma_common_t *dma_cntl_p; 2061 uint32_t *num_chunks; 2062 2063 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2064 2065 dma_poolp = nxgep->rx_buf_pool_p; 2066 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2067 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2068 "<== nxge_free_rx_mem_pool " 2069 "(null rx buf pool or buf not allocated")); 2070 return; 2071 } 2072 2073 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2074 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2075 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2076 "<== nxge_free_rx_mem_pool " 2077 "(null rx cntl buf pool or cntl buf not allocated")); 2078 return; 2079 } 2080 2081 dma_buf_p = dma_poolp->dma_buf_pool_p; 2082 num_chunks = dma_poolp->num_chunks; 2083 2084 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2085 ndmas = dma_cntl_poolp->ndmas; 2086 2087 for (i = 0; i < ndmas; i++) { 2088 nxge_free_rx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2089 } 2090 2091 for (i = 0; i < ndmas; i++) { 2092 nxge_free_rx_cntl_dma(nxgep, dma_cntl_p[i]); 2093 } 2094 2095 for (i = 0; i < ndmas; i++) { 2096 KMEM_FREE(dma_buf_p[i], 2097 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2098 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2099 } 2100 2101 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2102 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2103 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2104 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2105 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2106 2107 nxgep->rx_buf_pool_p = NULL; 2108 nxgep->rx_cntl_pool_p = NULL; 2109 2110 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2111 } 2112 2113 2114 static nxge_status_t 2115 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2116 p_nxge_dma_common_t *dmap, 2117 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2118 { 2119 p_nxge_dma_common_t rx_dmap; 2120 nxge_status_t status = NXGE_OK; 2121 size_t total_alloc_size; 2122 size_t allocated = 0; 2123 int i, size_index, array_size; 2124 2125 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2126 2127 rx_dmap = (p_nxge_dma_common_t) 2128 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2129 KM_SLEEP); 2130 2131 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2132 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2133 dma_channel, alloc_size, block_size, dmap)); 2134 2135 total_alloc_size = alloc_size; 2136 2137 #if defined(RX_USE_RECLAIM_POST) 2138 total_alloc_size = alloc_size + alloc_size/4; 2139 #endif 2140 2141 i = 0; 2142 size_index = 0; 2143 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2144 while ((alloc_sizes[size_index] < alloc_size) && 2145 (size_index < array_size)) 2146 size_index++; 2147 if (size_index >= array_size) { 2148 size_index = array_size - 1; 2149 } 2150 2151 while ((allocated < total_alloc_size) && 2152 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2153 rx_dmap[i].dma_chunk_index = i; 2154 rx_dmap[i].block_size = block_size; 2155 rx_dmap[i].alength = alloc_sizes[size_index]; 2156 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2157 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2158 rx_dmap[i].dma_channel = dma_channel; 2159 rx_dmap[i].contig_alloc_type = B_FALSE; 2160 2161 /* 2162 * N2/NIU: data buffers must be contiguous as the driver 2163 * needs to call Hypervisor api to set up 2164 * logical pages. 2165 */ 2166 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2167 rx_dmap[i].contig_alloc_type = B_TRUE; 2168 } 2169 2170 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2171 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2172 "i %d nblocks %d alength %d", 2173 dma_channel, i, &rx_dmap[i], block_size, 2174 i, rx_dmap[i].nblocks, 2175 rx_dmap[i].alength)); 2176 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2177 &nxge_rx_dma_attr, 2178 rx_dmap[i].alength, 2179 &nxge_dev_buf_dma_acc_attr, 2180 DDI_DMA_READ | DDI_DMA_STREAMING, 2181 (p_nxge_dma_common_t)(&rx_dmap[i])); 2182 if (status != NXGE_OK) { 2183 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2184 " nxge_alloc_rx_buf_dma: Alloc Failed ")); 2185 size_index--; 2186 } else { 2187 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2188 " alloc_rx_buf_dma allocated rdc %d " 2189 "chunk %d size %x dvma %x bufp %llx ", 2190 dma_channel, i, rx_dmap[i].alength, 2191 rx_dmap[i].ioaddr_pp, &rx_dmap[i])); 2192 i++; 2193 allocated += alloc_sizes[size_index]; 2194 } 2195 } 2196 2197 2198 if (allocated < total_alloc_size) { 2199 goto nxge_alloc_rx_mem_fail1; 2200 } 2201 2202 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2203 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2204 dma_channel, i)); 2205 *num_chunks = i; 2206 *dmap = rx_dmap; 2207 2208 goto nxge_alloc_rx_mem_exit; 2209 2210 nxge_alloc_rx_mem_fail1: 2211 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2212 2213 nxge_alloc_rx_mem_exit: 2214 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2215 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2216 2217 return (status); 2218 } 2219 2220 /*ARGSUSED*/ 2221 static void 2222 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2223 uint32_t num_chunks) 2224 { 2225 int i; 2226 2227 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2228 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2229 2230 for (i = 0; i < num_chunks; i++) { 2231 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2232 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2233 i, dmap)); 2234 nxge_dma_mem_free(dmap++); 2235 } 2236 2237 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2238 } 2239 2240 /*ARGSUSED*/ 2241 static nxge_status_t 2242 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2243 p_nxge_dma_common_t *dmap, size_t size) 2244 { 2245 p_nxge_dma_common_t rx_dmap; 2246 nxge_status_t status = NXGE_OK; 2247 2248 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2249 2250 rx_dmap = (p_nxge_dma_common_t) 2251 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2252 2253 rx_dmap->contig_alloc_type = B_FALSE; 2254 2255 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2256 &nxge_desc_dma_attr, 2257 size, 2258 &nxge_dev_desc_dma_acc_attr, 2259 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2260 rx_dmap); 2261 if (status != NXGE_OK) { 2262 goto nxge_alloc_rx_cntl_dma_fail1; 2263 } 2264 2265 *dmap = rx_dmap; 2266 goto nxge_alloc_rx_cntl_dma_exit; 2267 2268 nxge_alloc_rx_cntl_dma_fail1: 2269 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2270 2271 nxge_alloc_rx_cntl_dma_exit: 2272 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2273 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2274 2275 return (status); 2276 } 2277 2278 /*ARGSUSED*/ 2279 static void 2280 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2281 { 2282 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2283 2284 nxge_dma_mem_free(dmap); 2285 2286 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2287 } 2288 2289 static nxge_status_t 2290 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2291 { 2292 nxge_status_t status = NXGE_OK; 2293 int i, j; 2294 uint32_t ndmas, st_tdc; 2295 p_nxge_dma_pt_cfg_t p_all_cfgp; 2296 p_nxge_hw_pt_cfg_t p_cfgp; 2297 p_nxge_dma_pool_t dma_poolp; 2298 p_nxge_dma_common_t *dma_buf_p; 2299 p_nxge_dma_pool_t dma_cntl_poolp; 2300 p_nxge_dma_common_t *dma_cntl_p; 2301 size_t tx_buf_alloc_size; 2302 size_t tx_cntl_alloc_size; 2303 uint32_t *num_chunks; /* per dma */ 2304 uint32_t bcopy_thresh; 2305 2306 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2307 2308 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2309 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2310 st_tdc = p_cfgp->start_tdc; 2311 ndmas = p_cfgp->max_tdcs; 2312 2313 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool: " 2314 "p_cfgp 0x%016llx start_tdc %d ndmas %d nxgep->max_tdcs %d", 2315 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, nxgep->max_tdcs)); 2316 /* 2317 * Allocate memory for each transmit DMA channel. 2318 */ 2319 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2320 KM_SLEEP); 2321 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2322 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2323 2324 dma_cntl_poolp = (p_nxge_dma_pool_t) 2325 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2326 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2327 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2328 2329 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2330 /* 2331 * N2/NIU has limitation on the descriptor sizes (contiguous 2332 * memory allocation on data buffers to 4M (contig_mem_alloc) 2333 * and little endian for control buffers (must use the ddi/dki mem alloc 2334 * function). The transmit ring is limited to 8K (includes the 2335 * mailbox). 2336 */ 2337 if (nxgep->niu_type == N2_NIU) { 2338 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2339 (!ISP2(nxge_tx_ring_size))) { 2340 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2341 } 2342 } 2343 #endif 2344 2345 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2346 2347 /* 2348 * Assume that each DMA channel will be configured with default 2349 * transmit bufer size for copying transmit data. 2350 * (For packet payload over this limit, packets will not be 2351 * copied.) 2352 */ 2353 if (nxgep->niu_type == N2_NIU) { 2354 bcopy_thresh = TX_BCOPY_SIZE; 2355 } else { 2356 bcopy_thresh = nxge_bcopy_thresh; 2357 } 2358 tx_buf_alloc_size = (bcopy_thresh * nxge_tx_ring_size); 2359 2360 /* 2361 * Addresses of transmit descriptor ring and the 2362 * mailbox must be all cache-aligned (64 bytes). 2363 */ 2364 tx_cntl_alloc_size = nxge_tx_ring_size; 2365 tx_cntl_alloc_size *= (sizeof (tx_desc_t)); 2366 tx_cntl_alloc_size += sizeof (txdma_mailbox_t); 2367 2368 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2369 if (nxgep->niu_type == N2_NIU) { 2370 if (!ISP2(tx_buf_alloc_size)) { 2371 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2372 "==> nxge_alloc_tx_mem_pool: " 2373 " must be power of 2")); 2374 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2375 goto nxge_alloc_tx_mem_pool_exit; 2376 } 2377 2378 if (tx_buf_alloc_size > (1 << 22)) { 2379 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2380 "==> nxge_alloc_tx_mem_pool: " 2381 " limit size to 4M")); 2382 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2383 goto nxge_alloc_tx_mem_pool_exit; 2384 } 2385 2386 if (tx_cntl_alloc_size < 0x2000) { 2387 tx_cntl_alloc_size = 0x2000; 2388 } 2389 } 2390 #endif 2391 2392 num_chunks = (uint32_t *)KMEM_ZALLOC( 2393 sizeof (uint32_t) * ndmas, KM_SLEEP); 2394 2395 /* 2396 * Allocate memory for transmit buffers and descriptor rings. 2397 * Replace allocation functions with interface functions provided 2398 * by the partition manager when it is available. 2399 * 2400 * Allocate memory for the transmit buffer pool. 2401 */ 2402 for (i = 0; i < ndmas; i++) { 2403 num_chunks[i] = 0; 2404 status = nxge_alloc_tx_buf_dma(nxgep, st_tdc, &dma_buf_p[i], 2405 tx_buf_alloc_size, 2406 bcopy_thresh, &num_chunks[i]); 2407 if (status != NXGE_OK) { 2408 break; 2409 } 2410 st_tdc++; 2411 } 2412 if (i < ndmas) { 2413 goto nxge_alloc_tx_mem_pool_fail1; 2414 } 2415 2416 st_tdc = p_cfgp->start_tdc; 2417 /* 2418 * Allocate memory for descriptor rings and mailbox. 2419 */ 2420 for (j = 0; j < ndmas; j++) { 2421 status = nxge_alloc_tx_cntl_dma(nxgep, st_tdc, &dma_cntl_p[j], 2422 tx_cntl_alloc_size); 2423 if (status != NXGE_OK) { 2424 break; 2425 } 2426 st_tdc++; 2427 } 2428 if (j < ndmas) { 2429 goto nxge_alloc_tx_mem_pool_fail2; 2430 } 2431 2432 dma_poolp->ndmas = ndmas; 2433 dma_poolp->num_chunks = num_chunks; 2434 dma_poolp->buf_allocated = B_TRUE; 2435 dma_poolp->dma_buf_pool_p = dma_buf_p; 2436 nxgep->tx_buf_pool_p = dma_poolp; 2437 2438 dma_cntl_poolp->ndmas = ndmas; 2439 dma_cntl_poolp->buf_allocated = B_TRUE; 2440 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2441 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2442 2443 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2444 "==> nxge_alloc_tx_mem_pool: start_tdc %d " 2445 "ndmas %d poolp->ndmas %d", 2446 st_tdc, ndmas, dma_poolp->ndmas)); 2447 2448 goto nxge_alloc_tx_mem_pool_exit; 2449 2450 nxge_alloc_tx_mem_pool_fail2: 2451 /* Free control buffers */ 2452 j--; 2453 for (; j >= 0; j--) { 2454 nxge_free_tx_cntl_dma(nxgep, 2455 (p_nxge_dma_common_t)dma_cntl_p[j]); 2456 } 2457 2458 nxge_alloc_tx_mem_pool_fail1: 2459 /* Free data buffers */ 2460 i--; 2461 for (; i >= 0; i--) { 2462 nxge_free_tx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2463 num_chunks[i]); 2464 } 2465 2466 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2467 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2468 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2469 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2470 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2471 2472 nxge_alloc_tx_mem_pool_exit: 2473 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2474 "<== nxge_alloc_tx_mem_pool:status 0x%08x", status)); 2475 2476 return (status); 2477 } 2478 2479 static nxge_status_t 2480 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2481 p_nxge_dma_common_t *dmap, size_t alloc_size, 2482 size_t block_size, uint32_t *num_chunks) 2483 { 2484 p_nxge_dma_common_t tx_dmap; 2485 nxge_status_t status = NXGE_OK; 2486 size_t total_alloc_size; 2487 size_t allocated = 0; 2488 int i, size_index, array_size; 2489 2490 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 2491 2492 tx_dmap = (p_nxge_dma_common_t) 2493 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2494 KM_SLEEP); 2495 2496 total_alloc_size = alloc_size; 2497 i = 0; 2498 size_index = 0; 2499 array_size = sizeof (alloc_sizes) / sizeof (size_t); 2500 while ((alloc_sizes[size_index] < alloc_size) && 2501 (size_index < array_size)) 2502 size_index++; 2503 if (size_index >= array_size) { 2504 size_index = array_size - 1; 2505 } 2506 2507 while ((allocated < total_alloc_size) && 2508 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2509 2510 tx_dmap[i].dma_chunk_index = i; 2511 tx_dmap[i].block_size = block_size; 2512 tx_dmap[i].alength = alloc_sizes[size_index]; 2513 tx_dmap[i].orig_alength = tx_dmap[i].alength; 2514 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2515 tx_dmap[i].dma_channel = dma_channel; 2516 tx_dmap[i].contig_alloc_type = B_FALSE; 2517 2518 /* 2519 * N2/NIU: data buffers must be contiguous as the driver 2520 * needs to call Hypervisor api to set up 2521 * logical pages. 2522 */ 2523 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2524 tx_dmap[i].contig_alloc_type = B_TRUE; 2525 } 2526 2527 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2528 &nxge_tx_dma_attr, 2529 tx_dmap[i].alength, 2530 &nxge_dev_buf_dma_acc_attr, 2531 DDI_DMA_WRITE | DDI_DMA_STREAMING, 2532 (p_nxge_dma_common_t)(&tx_dmap[i])); 2533 if (status != NXGE_OK) { 2534 size_index--; 2535 } else { 2536 i++; 2537 allocated += alloc_sizes[size_index]; 2538 } 2539 } 2540 2541 if (allocated < total_alloc_size) { 2542 goto nxge_alloc_tx_mem_fail1; 2543 } 2544 2545 *num_chunks = i; 2546 *dmap = tx_dmap; 2547 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2548 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 2549 *dmap, i)); 2550 goto nxge_alloc_tx_mem_exit; 2551 2552 nxge_alloc_tx_mem_fail1: 2553 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2554 2555 nxge_alloc_tx_mem_exit: 2556 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2557 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 2558 2559 return (status); 2560 } 2561 2562 /*ARGSUSED*/ 2563 static void 2564 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2565 uint32_t num_chunks) 2566 { 2567 int i; 2568 2569 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 2570 2571 for (i = 0; i < num_chunks; i++) { 2572 nxge_dma_mem_free(dmap++); 2573 } 2574 2575 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 2576 } 2577 2578 /*ARGSUSED*/ 2579 static nxge_status_t 2580 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2581 p_nxge_dma_common_t *dmap, size_t size) 2582 { 2583 p_nxge_dma_common_t tx_dmap; 2584 nxge_status_t status = NXGE_OK; 2585 2586 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 2587 tx_dmap = (p_nxge_dma_common_t) 2588 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2589 2590 tx_dmap->contig_alloc_type = B_FALSE; 2591 2592 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2593 &nxge_desc_dma_attr, 2594 size, 2595 &nxge_dev_desc_dma_acc_attr, 2596 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2597 tx_dmap); 2598 if (status != NXGE_OK) { 2599 goto nxge_alloc_tx_cntl_dma_fail1; 2600 } 2601 2602 *dmap = tx_dmap; 2603 goto nxge_alloc_tx_cntl_dma_exit; 2604 2605 nxge_alloc_tx_cntl_dma_fail1: 2606 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 2607 2608 nxge_alloc_tx_cntl_dma_exit: 2609 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2610 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 2611 2612 return (status); 2613 } 2614 2615 /*ARGSUSED*/ 2616 static void 2617 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2618 { 2619 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 2620 2621 nxge_dma_mem_free(dmap); 2622 2623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 2624 } 2625 2626 static void 2627 nxge_free_tx_mem_pool(p_nxge_t nxgep) 2628 { 2629 uint32_t i, ndmas; 2630 p_nxge_dma_pool_t dma_poolp; 2631 p_nxge_dma_common_t *dma_buf_p; 2632 p_nxge_dma_pool_t dma_cntl_poolp; 2633 p_nxge_dma_common_t *dma_cntl_p; 2634 uint32_t *num_chunks; 2635 2636 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_free_tx_mem_pool")); 2637 2638 dma_poolp = nxgep->tx_buf_pool_p; 2639 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2640 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2641 "<== nxge_free_tx_mem_pool " 2642 "(null rx buf pool or buf not allocated")); 2643 return; 2644 } 2645 2646 dma_cntl_poolp = nxgep->tx_cntl_pool_p; 2647 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2648 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2649 "<== nxge_free_tx_mem_pool " 2650 "(null tx cntl buf pool or cntl buf not allocated")); 2651 return; 2652 } 2653 2654 dma_buf_p = dma_poolp->dma_buf_pool_p; 2655 num_chunks = dma_poolp->num_chunks; 2656 2657 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2658 ndmas = dma_cntl_poolp->ndmas; 2659 2660 for (i = 0; i < ndmas; i++) { 2661 nxge_free_tx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2662 } 2663 2664 for (i = 0; i < ndmas; i++) { 2665 nxge_free_tx_cntl_dma(nxgep, dma_cntl_p[i]); 2666 } 2667 2668 for (i = 0; i < ndmas; i++) { 2669 KMEM_FREE(dma_buf_p[i], 2670 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2671 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2672 } 2673 2674 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2675 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2676 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2677 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2678 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2679 2680 nxgep->tx_buf_pool_p = NULL; 2681 nxgep->tx_cntl_pool_p = NULL; 2682 2683 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_free_tx_mem_pool")); 2684 } 2685 2686 /*ARGSUSED*/ 2687 static nxge_status_t 2688 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 2689 struct ddi_dma_attr *dma_attrp, 2690 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 2691 p_nxge_dma_common_t dma_p) 2692 { 2693 caddr_t kaddrp; 2694 int ddi_status = DDI_SUCCESS; 2695 boolean_t contig_alloc_type; 2696 2697 contig_alloc_type = dma_p->contig_alloc_type; 2698 2699 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 2700 /* 2701 * contig_alloc_type for contiguous memory only allowed 2702 * for N2/NIU. 2703 */ 2704 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2705 "nxge_dma_mem_alloc: alloc type not allows (%d)", 2706 dma_p->contig_alloc_type)); 2707 return (NXGE_ERROR | NXGE_DDI_FAILED); 2708 } 2709 2710 dma_p->dma_handle = NULL; 2711 dma_p->acc_handle = NULL; 2712 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 2713 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 2714 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 2715 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 2716 if (ddi_status != DDI_SUCCESS) { 2717 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2718 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 2719 return (NXGE_ERROR | NXGE_DDI_FAILED); 2720 } 2721 2722 switch (contig_alloc_type) { 2723 case B_FALSE: 2724 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, 2725 acc_attr_p, 2726 xfer_flags, 2727 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 2728 &dma_p->acc_handle); 2729 if (ddi_status != DDI_SUCCESS) { 2730 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2731 "nxge_dma_mem_alloc:ddi_dma_mem_alloc failed")); 2732 ddi_dma_free_handle(&dma_p->dma_handle); 2733 dma_p->dma_handle = NULL; 2734 return (NXGE_ERROR | NXGE_DDI_FAILED); 2735 } 2736 if (dma_p->alength < length) { 2737 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2738 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 2739 "< length.")); 2740 ddi_dma_mem_free(&dma_p->acc_handle); 2741 ddi_dma_free_handle(&dma_p->dma_handle); 2742 dma_p->acc_handle = NULL; 2743 dma_p->dma_handle = NULL; 2744 return (NXGE_ERROR); 2745 } 2746 2747 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2748 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2749 &dma_p->dma_cookie, &dma_p->ncookies); 2750 if (ddi_status != DDI_DMA_MAPPED) { 2751 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2752 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2753 "(staus 0x%x ncookies %d.)", ddi_status, 2754 dma_p->ncookies)); 2755 if (dma_p->acc_handle) { 2756 ddi_dma_mem_free(&dma_p->acc_handle); 2757 dma_p->acc_handle = NULL; 2758 } 2759 ddi_dma_free_handle(&dma_p->dma_handle); 2760 dma_p->dma_handle = NULL; 2761 return (NXGE_ERROR | NXGE_DDI_FAILED); 2762 } 2763 2764 if (dma_p->ncookies != 1) { 2765 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2766 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 2767 "> 1 cookie" 2768 "(staus 0x%x ncookies %d.)", ddi_status, 2769 dma_p->ncookies)); 2770 if (dma_p->acc_handle) { 2771 ddi_dma_mem_free(&dma_p->acc_handle); 2772 dma_p->acc_handle = NULL; 2773 } 2774 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2775 ddi_dma_free_handle(&dma_p->dma_handle); 2776 dma_p->dma_handle = NULL; 2777 return (NXGE_ERROR); 2778 } 2779 break; 2780 2781 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2782 case B_TRUE: 2783 kaddrp = (caddr_t)contig_mem_alloc(length); 2784 if (kaddrp == NULL) { 2785 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2786 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 2787 ddi_dma_free_handle(&dma_p->dma_handle); 2788 return (NXGE_ERROR | NXGE_DDI_FAILED); 2789 } 2790 2791 dma_p->alength = length; 2792 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2793 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2794 &dma_p->dma_cookie, &dma_p->ncookies); 2795 if (ddi_status != DDI_DMA_MAPPED) { 2796 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2797 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2798 "(status 0x%x ncookies %d.)", ddi_status, 2799 dma_p->ncookies)); 2800 2801 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2802 "==> nxge_dma_mem_alloc: (not mapped)" 2803 "length %lu (0x%x) " 2804 "free contig kaddrp $%p " 2805 "va_to_pa $%p", 2806 length, length, 2807 kaddrp, 2808 va_to_pa(kaddrp))); 2809 2810 2811 contig_mem_free((void *)kaddrp, length); 2812 ddi_dma_free_handle(&dma_p->dma_handle); 2813 2814 dma_p->dma_handle = NULL; 2815 dma_p->acc_handle = NULL; 2816 dma_p->alength = NULL; 2817 dma_p->kaddrp = NULL; 2818 2819 return (NXGE_ERROR | NXGE_DDI_FAILED); 2820 } 2821 2822 if (dma_p->ncookies != 1 || 2823 (dma_p->dma_cookie.dmac_laddress == NULL)) { 2824 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2825 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 2826 "cookie or " 2827 "dmac_laddress is NULL $%p size %d " 2828 " (status 0x%x ncookies %d.)", 2829 ddi_status, 2830 dma_p->dma_cookie.dmac_laddress, 2831 dma_p->dma_cookie.dmac_size, 2832 dma_p->ncookies)); 2833 2834 contig_mem_free((void *)kaddrp, length); 2835 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2836 ddi_dma_free_handle(&dma_p->dma_handle); 2837 2838 dma_p->alength = 0; 2839 dma_p->dma_handle = NULL; 2840 dma_p->acc_handle = NULL; 2841 dma_p->kaddrp = NULL; 2842 2843 return (NXGE_ERROR | NXGE_DDI_FAILED); 2844 } 2845 break; 2846 2847 #else 2848 case B_TRUE: 2849 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2850 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 2851 return (NXGE_ERROR | NXGE_DDI_FAILED); 2852 #endif 2853 } 2854 2855 dma_p->kaddrp = kaddrp; 2856 dma_p->last_kaddrp = (unsigned char *)kaddrp + 2857 dma_p->alength - RXBUF_64B_ALIGNED; 2858 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2859 dma_p->last_ioaddr_pp = 2860 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 2861 dma_p->alength - RXBUF_64B_ALIGNED; 2862 2863 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 2864 2865 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2866 dma_p->orig_ioaddr_pp = 2867 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2868 dma_p->orig_alength = length; 2869 dma_p->orig_kaddrp = kaddrp; 2870 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 2871 #endif 2872 2873 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 2874 "dma buffer allocated: dma_p $%p " 2875 "return dmac_ladress from cookie $%p cookie dmac_size %d " 2876 "dma_p->ioaddr_p $%p " 2877 "dma_p->orig_ioaddr_p $%p " 2878 "orig_vatopa $%p " 2879 "alength %d (0x%x) " 2880 "kaddrp $%p " 2881 "length %d (0x%x)", 2882 dma_p, 2883 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 2884 dma_p->ioaddr_pp, 2885 dma_p->orig_ioaddr_pp, 2886 dma_p->orig_vatopa, 2887 dma_p->alength, dma_p->alength, 2888 kaddrp, 2889 length, length)); 2890 2891 return (NXGE_OK); 2892 } 2893 2894 static void 2895 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 2896 { 2897 if (dma_p->dma_handle != NULL) { 2898 if (dma_p->ncookies) { 2899 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2900 dma_p->ncookies = 0; 2901 } 2902 ddi_dma_free_handle(&dma_p->dma_handle); 2903 dma_p->dma_handle = NULL; 2904 } 2905 2906 if (dma_p->acc_handle != NULL) { 2907 ddi_dma_mem_free(&dma_p->acc_handle); 2908 dma_p->acc_handle = NULL; 2909 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 2910 } 2911 2912 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2913 if (dma_p->contig_alloc_type && 2914 dma_p->orig_kaddrp && dma_p->orig_alength) { 2915 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 2916 "kaddrp $%p (orig_kaddrp $%p)" 2917 "mem type %d ", 2918 "orig_alength %d " 2919 "alength 0x%x (%d)", 2920 dma_p->kaddrp, 2921 dma_p->orig_kaddrp, 2922 dma_p->contig_alloc_type, 2923 dma_p->orig_alength, 2924 dma_p->alength, dma_p->alength)); 2925 2926 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 2927 dma_p->orig_alength = NULL; 2928 dma_p->orig_kaddrp = NULL; 2929 dma_p->contig_alloc_type = B_FALSE; 2930 } 2931 #endif 2932 dma_p->kaddrp = NULL; 2933 dma_p->alength = NULL; 2934 } 2935 2936 /* 2937 * nxge_m_start() -- start transmitting and receiving. 2938 * 2939 * This function is called by the MAC layer when the first 2940 * stream is open to prepare the hardware ready for sending 2941 * and transmitting packets. 2942 */ 2943 static int 2944 nxge_m_start(void *arg) 2945 { 2946 p_nxge_t nxgep = (p_nxge_t)arg; 2947 2948 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 2949 2950 MUTEX_ENTER(nxgep->genlock); 2951 if (nxge_init(nxgep) != NXGE_OK) { 2952 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2953 "<== nxge_m_start: initialization failed")); 2954 MUTEX_EXIT(nxgep->genlock); 2955 return (EIO); 2956 } 2957 2958 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 2959 goto nxge_m_start_exit; 2960 /* 2961 * Start timer to check the system error and tx hangs 2962 */ 2963 nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state, 2964 NXGE_CHECK_TIMER); 2965 2966 nxgep->link_notify = B_TRUE; 2967 2968 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 2969 2970 nxge_m_start_exit: 2971 MUTEX_EXIT(nxgep->genlock); 2972 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 2973 return (0); 2974 } 2975 2976 /* 2977 * nxge_m_stop(): stop transmitting and receiving. 2978 */ 2979 static void 2980 nxge_m_stop(void *arg) 2981 { 2982 p_nxge_t nxgep = (p_nxge_t)arg; 2983 2984 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 2985 2986 if (nxgep->nxge_timerid) { 2987 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 2988 nxgep->nxge_timerid = 0; 2989 } 2990 2991 MUTEX_ENTER(nxgep->genlock); 2992 nxge_uninit(nxgep); 2993 2994 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 2995 2996 MUTEX_EXIT(nxgep->genlock); 2997 2998 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 2999 } 3000 3001 static int 3002 nxge_m_unicst(void *arg, const uint8_t *macaddr) 3003 { 3004 p_nxge_t nxgep = (p_nxge_t)arg; 3005 struct ether_addr addrp; 3006 3007 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 3008 3009 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 3010 if (nxge_set_mac_addr(nxgep, &addrp)) { 3011 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3012 "<== nxge_m_unicst: set unitcast failed")); 3013 return (EINVAL); 3014 } 3015 3016 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 3017 3018 return (0); 3019 } 3020 3021 static int 3022 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3023 { 3024 p_nxge_t nxgep = (p_nxge_t)arg; 3025 struct ether_addr addrp; 3026 3027 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3028 "==> nxge_m_multicst: add %d", add)); 3029 3030 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3031 if (add) { 3032 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3033 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3034 "<== nxge_m_multicst: add multicast failed")); 3035 return (EINVAL); 3036 } 3037 } else { 3038 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3039 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3040 "<== nxge_m_multicst: del multicast failed")); 3041 return (EINVAL); 3042 } 3043 } 3044 3045 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3046 3047 return (0); 3048 } 3049 3050 static int 3051 nxge_m_promisc(void *arg, boolean_t on) 3052 { 3053 p_nxge_t nxgep = (p_nxge_t)arg; 3054 3055 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3056 "==> nxge_m_promisc: on %d", on)); 3057 3058 if (nxge_set_promisc(nxgep, on)) { 3059 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3060 "<== nxge_m_promisc: set promisc failed")); 3061 return (EINVAL); 3062 } 3063 3064 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3065 "<== nxge_m_promisc: on %d", on)); 3066 3067 return (0); 3068 } 3069 3070 static void 3071 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3072 { 3073 p_nxge_t nxgep = (p_nxge_t)arg; 3074 struct iocblk *iocp; 3075 boolean_t need_privilege; 3076 int err; 3077 int cmd; 3078 3079 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3080 3081 iocp = (struct iocblk *)mp->b_rptr; 3082 iocp->ioc_error = 0; 3083 need_privilege = B_TRUE; 3084 cmd = iocp->ioc_cmd; 3085 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3086 switch (cmd) { 3087 default: 3088 miocnak(wq, mp, 0, EINVAL); 3089 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3090 return; 3091 3092 case LB_GET_INFO_SIZE: 3093 case LB_GET_INFO: 3094 case LB_GET_MODE: 3095 need_privilege = B_FALSE; 3096 break; 3097 case LB_SET_MODE: 3098 break; 3099 3100 case ND_GET: 3101 need_privilege = B_FALSE; 3102 break; 3103 case ND_SET: 3104 break; 3105 3106 case NXGE_GET_MII: 3107 case NXGE_PUT_MII: 3108 case NXGE_GET64: 3109 case NXGE_PUT64: 3110 case NXGE_GET_TX_RING_SZ: 3111 case NXGE_GET_TX_DESC: 3112 case NXGE_TX_SIDE_RESET: 3113 case NXGE_RX_SIDE_RESET: 3114 case NXGE_GLOBAL_RESET: 3115 case NXGE_RESET_MAC: 3116 case NXGE_TX_REGS_DUMP: 3117 case NXGE_RX_REGS_DUMP: 3118 case NXGE_INT_REGS_DUMP: 3119 case NXGE_VIR_INT_REGS_DUMP: 3120 case NXGE_PUT_TCAM: 3121 case NXGE_GET_TCAM: 3122 case NXGE_RTRACE: 3123 case NXGE_RDUMP: 3124 3125 need_privilege = B_FALSE; 3126 break; 3127 case NXGE_INJECT_ERR: 3128 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3129 nxge_err_inject(nxgep, wq, mp); 3130 break; 3131 } 3132 3133 if (need_privilege) { 3134 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3135 if (err != 0) { 3136 miocnak(wq, mp, 0, err); 3137 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3138 "<== nxge_m_ioctl: no priv")); 3139 return; 3140 } 3141 } 3142 3143 switch (cmd) { 3144 case ND_GET: 3145 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command")); 3146 case ND_SET: 3147 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command")); 3148 nxge_param_ioctl(nxgep, wq, mp, iocp); 3149 break; 3150 3151 case LB_GET_MODE: 3152 case LB_SET_MODE: 3153 case LB_GET_INFO_SIZE: 3154 case LB_GET_INFO: 3155 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3156 break; 3157 3158 case NXGE_GET_MII: 3159 case NXGE_PUT_MII: 3160 case NXGE_PUT_TCAM: 3161 case NXGE_GET_TCAM: 3162 case NXGE_GET64: 3163 case NXGE_PUT64: 3164 case NXGE_GET_TX_RING_SZ: 3165 case NXGE_GET_TX_DESC: 3166 case NXGE_TX_SIDE_RESET: 3167 case NXGE_RX_SIDE_RESET: 3168 case NXGE_GLOBAL_RESET: 3169 case NXGE_RESET_MAC: 3170 case NXGE_TX_REGS_DUMP: 3171 case NXGE_RX_REGS_DUMP: 3172 case NXGE_INT_REGS_DUMP: 3173 case NXGE_VIR_INT_REGS_DUMP: 3174 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3175 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3176 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3177 break; 3178 } 3179 3180 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3181 } 3182 3183 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3184 3185 static void 3186 nxge_m_resources(void *arg) 3187 { 3188 p_nxge_t nxgep = arg; 3189 mac_rx_fifo_t mrf; 3190 p_rx_rcr_rings_t rcr_rings; 3191 p_rx_rcr_ring_t *rcr_p; 3192 uint32_t i, ndmas; 3193 nxge_status_t status; 3194 3195 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3196 3197 MUTEX_ENTER(nxgep->genlock); 3198 3199 /* 3200 * CR 6492541 Check to see if the drv_state has been initialized, 3201 * if not * call nxge_init(). 3202 */ 3203 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3204 status = nxge_init(nxgep); 3205 if (status != NXGE_OK) 3206 goto nxge_m_resources_exit; 3207 } 3208 3209 mrf.mrf_type = MAC_RX_FIFO; 3210 mrf.mrf_blank = nxge_rx_hw_blank; 3211 mrf.mrf_arg = (void *)nxgep; 3212 3213 mrf.mrf_normal_blank_time = 128; 3214 mrf.mrf_normal_pkt_count = 8; 3215 rcr_rings = nxgep->rx_rcr_rings; 3216 rcr_p = rcr_rings->rcr_rings; 3217 ndmas = rcr_rings->ndmas; 3218 3219 /* 3220 * Export our receive resources to the MAC layer. 3221 */ 3222 for (i = 0; i < ndmas; i++) { 3223 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle = 3224 mac_resource_add(nxgep->mach, 3225 (mac_resource_t *)&mrf); 3226 3227 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3228 "==> nxge_m_resources: vdma %d dma %d " 3229 "rcrptr 0x%016llx mac_handle 0x%016llx", 3230 i, ((p_rx_rcr_ring_t)rcr_p[i])->rdc, 3231 rcr_p[i], 3232 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle)); 3233 } 3234 3235 nxge_m_resources_exit: 3236 MUTEX_EXIT(nxgep->genlock); 3237 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 3238 } 3239 3240 static void 3241 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 3242 { 3243 p_nxge_mmac_stats_t mmac_stats; 3244 int i; 3245 nxge_mmac_t *mmac_info; 3246 3247 mmac_info = &nxgep->nxge_mmac_info; 3248 3249 mmac_stats = &nxgep->statsp->mmac_stats; 3250 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3251 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3252 3253 for (i = 0; i < ETHERADDRL; i++) { 3254 if (factory) { 3255 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3256 = mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i]; 3257 } else { 3258 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3259 = mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i]; 3260 } 3261 } 3262 } 3263 3264 /* 3265 * nxge_altmac_set() -- Set an alternate MAC address 3266 */ 3267 static int 3268 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 3269 { 3270 uint8_t addrn; 3271 uint8_t portn; 3272 npi_mac_addr_t altmac; 3273 hostinfo_t mac_rdc; 3274 p_nxge_class_pt_cfg_t clscfgp; 3275 3276 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3277 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3278 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3279 3280 portn = nxgep->mac.portnum; 3281 addrn = (uint8_t)slot - 1; 3282 3283 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 3284 addrn, &altmac) != NPI_SUCCESS) 3285 return (EIO); 3286 3287 /* 3288 * Set the rdc table number for the host info entry 3289 * for this mac address slot. 3290 */ 3291 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 3292 mac_rdc.value = 0; 3293 mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl; 3294 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 3295 3296 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 3297 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 3298 return (EIO); 3299 } 3300 3301 /* 3302 * Enable comparison with the alternate MAC address. 3303 * While the first alternate addr is enabled by bit 1 of register 3304 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 3305 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 3306 * accordingly before calling npi_mac_altaddr_entry. 3307 */ 3308 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3309 addrn = (uint8_t)slot - 1; 3310 else 3311 addrn = (uint8_t)slot; 3312 3313 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 3314 != NPI_SUCCESS) 3315 return (EIO); 3316 3317 return (0); 3318 } 3319 3320 /* 3321 * nxeg_m_mmac_add() - find an unused address slot, set the address 3322 * value to the one specified, enable the port to start filtering on 3323 * the new MAC address. Returns 0 on success. 3324 */ 3325 static int 3326 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 3327 { 3328 p_nxge_t nxgep = arg; 3329 mac_addr_slot_t slot; 3330 nxge_mmac_t *mmac_info; 3331 int err; 3332 nxge_status_t status; 3333 3334 mutex_enter(nxgep->genlock); 3335 3336 /* 3337 * Make sure that nxge is initialized, if _start() has 3338 * not been called. 3339 */ 3340 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3341 status = nxge_init(nxgep); 3342 if (status != NXGE_OK) { 3343 mutex_exit(nxgep->genlock); 3344 return (ENXIO); 3345 } 3346 } 3347 3348 mmac_info = &nxgep->nxge_mmac_info; 3349 if (mmac_info->naddrfree == 0) { 3350 mutex_exit(nxgep->genlock); 3351 return (ENOSPC); 3352 } 3353 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3354 maddr->mma_addrlen)) { 3355 mutex_exit(nxgep->genlock); 3356 return (EINVAL); 3357 } 3358 /* 3359 * Search for the first available slot. Because naddrfree 3360 * is not zero, we are guaranteed to find one. 3361 * Slot 0 is for unique (primary) MAC. The first alternate 3362 * MAC slot is slot 1. 3363 * Each of the first two ports of Neptune has 16 alternate 3364 * MAC slots but only the first 7 (or 15) slots have assigned factory 3365 * MAC addresses. We first search among the slots without bundled 3366 * factory MACs. If we fail to find one in that range, then we 3367 * search the slots with bundled factory MACs. A factory MAC 3368 * will be wasted while the slot is used with a user MAC address. 3369 * But the slot could be used by factory MAC again after calling 3370 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 3371 */ 3372 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 3373 for (slot = mmac_info->num_factory_mmac + 1; 3374 slot <= mmac_info->num_mmac; slot++) { 3375 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3376 break; 3377 } 3378 if (slot > mmac_info->num_mmac) { 3379 for (slot = 1; slot <= mmac_info->num_factory_mmac; 3380 slot++) { 3381 if (!(mmac_info->mac_pool[slot].flags 3382 & MMAC_SLOT_USED)) 3383 break; 3384 } 3385 } 3386 } else { 3387 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 3388 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3389 break; 3390 } 3391 } 3392 ASSERT(slot <= mmac_info->num_mmac); 3393 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 3394 mutex_exit(nxgep->genlock); 3395 return (err); 3396 } 3397 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 3398 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 3399 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3400 mmac_info->naddrfree--; 3401 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3402 3403 maddr->mma_slot = slot; 3404 3405 mutex_exit(nxgep->genlock); 3406 return (0); 3407 } 3408 3409 /* 3410 * This function reserves an unused slot and programs the slot and the HW 3411 * with a factory mac address. 3412 */ 3413 static int 3414 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 3415 { 3416 p_nxge_t nxgep = arg; 3417 mac_addr_slot_t slot; 3418 nxge_mmac_t *mmac_info; 3419 int err; 3420 nxge_status_t status; 3421 3422 mutex_enter(nxgep->genlock); 3423 3424 /* 3425 * Make sure that nxge is initialized, if _start() has 3426 * not been called. 3427 */ 3428 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3429 status = nxge_init(nxgep); 3430 if (status != NXGE_OK) { 3431 mutex_exit(nxgep->genlock); 3432 return (ENXIO); 3433 } 3434 } 3435 3436 mmac_info = &nxgep->nxge_mmac_info; 3437 if (mmac_info->naddrfree == 0) { 3438 mutex_exit(nxgep->genlock); 3439 return (ENOSPC); 3440 } 3441 3442 slot = maddr->mma_slot; 3443 if (slot == -1) { /* -1: Take the first available slot */ 3444 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 3445 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3446 break; 3447 } 3448 if (slot > mmac_info->num_factory_mmac) { 3449 mutex_exit(nxgep->genlock); 3450 return (ENOSPC); 3451 } 3452 } 3453 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 3454 /* 3455 * Do not support factory MAC at a slot greater than 3456 * num_factory_mmac even when there are available factory 3457 * MAC addresses because the alternate MACs are bundled with 3458 * slot[1] through slot[num_factory_mmac] 3459 */ 3460 mutex_exit(nxgep->genlock); 3461 return (EINVAL); 3462 } 3463 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3464 mutex_exit(nxgep->genlock); 3465 return (EBUSY); 3466 } 3467 /* Verify the address to be reserved */ 3468 if (!mac_unicst_verify(nxgep->mach, 3469 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 3470 mutex_exit(nxgep->genlock); 3471 return (EINVAL); 3472 } 3473 if (err = nxge_altmac_set(nxgep, 3474 mmac_info->factory_mac_pool[slot], slot)) { 3475 mutex_exit(nxgep->genlock); 3476 return (err); 3477 } 3478 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 3479 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3480 mmac_info->naddrfree--; 3481 3482 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 3483 mutex_exit(nxgep->genlock); 3484 3485 /* Pass info back to the caller */ 3486 maddr->mma_slot = slot; 3487 maddr->mma_addrlen = ETHERADDRL; 3488 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3489 3490 return (0); 3491 } 3492 3493 /* 3494 * Remove the specified mac address and update the HW not to filter 3495 * the mac address anymore. 3496 */ 3497 static int 3498 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 3499 { 3500 p_nxge_t nxgep = arg; 3501 nxge_mmac_t *mmac_info; 3502 uint8_t addrn; 3503 uint8_t portn; 3504 int err = 0; 3505 nxge_status_t status; 3506 3507 mutex_enter(nxgep->genlock); 3508 3509 /* 3510 * Make sure that nxge is initialized, if _start() has 3511 * not been called. 3512 */ 3513 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3514 status = nxge_init(nxgep); 3515 if (status != NXGE_OK) { 3516 mutex_exit(nxgep->genlock); 3517 return (ENXIO); 3518 } 3519 } 3520 3521 mmac_info = &nxgep->nxge_mmac_info; 3522 if (slot < 1 || slot > mmac_info->num_mmac) { 3523 mutex_exit(nxgep->genlock); 3524 return (EINVAL); 3525 } 3526 3527 portn = nxgep->mac.portnum; 3528 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3529 addrn = (uint8_t)slot - 1; 3530 else 3531 addrn = (uint8_t)slot; 3532 3533 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3534 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 3535 == NPI_SUCCESS) { 3536 mmac_info->naddrfree++; 3537 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 3538 /* 3539 * Regardless if the MAC we just stopped filtering 3540 * is a user addr or a facory addr, we must set 3541 * the MMAC_VENDOR_ADDR flag if this slot has an 3542 * associated factory MAC to indicate that a factory 3543 * MAC is available. 3544 */ 3545 if (slot <= mmac_info->num_factory_mmac) { 3546 mmac_info->mac_pool[slot].flags 3547 |= MMAC_VENDOR_ADDR; 3548 } 3549 /* 3550 * Clear mac_pool[slot].addr so that kstat shows 0 3551 * alternate MAC address if the slot is not used. 3552 * (But nxge_m_mmac_get returns the factory MAC even 3553 * when the slot is not used!) 3554 */ 3555 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 3556 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3557 } else { 3558 err = EIO; 3559 } 3560 } else { 3561 err = EINVAL; 3562 } 3563 3564 mutex_exit(nxgep->genlock); 3565 return (err); 3566 } 3567 3568 3569 /* 3570 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 3571 */ 3572 static int 3573 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 3574 { 3575 p_nxge_t nxgep = arg; 3576 mac_addr_slot_t slot; 3577 nxge_mmac_t *mmac_info; 3578 int err = 0; 3579 nxge_status_t status; 3580 3581 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3582 maddr->mma_addrlen)) 3583 return (EINVAL); 3584 3585 slot = maddr->mma_slot; 3586 3587 mutex_enter(nxgep->genlock); 3588 3589 /* 3590 * Make sure that nxge is initialized, if _start() has 3591 * not been called. 3592 */ 3593 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3594 status = nxge_init(nxgep); 3595 if (status != NXGE_OK) { 3596 mutex_exit(nxgep->genlock); 3597 return (ENXIO); 3598 } 3599 } 3600 3601 mmac_info = &nxgep->nxge_mmac_info; 3602 if (slot < 1 || slot > mmac_info->num_mmac) { 3603 mutex_exit(nxgep->genlock); 3604 return (EINVAL); 3605 } 3606 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3607 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 3608 != 0) { 3609 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 3610 ETHERADDRL); 3611 /* 3612 * Assume that the MAC passed down from the caller 3613 * is not a factory MAC address (The user should 3614 * call mmac_remove followed by mmac_reserve if 3615 * he wants to use the factory MAC for this slot). 3616 */ 3617 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3618 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3619 } 3620 } else { 3621 err = EINVAL; 3622 } 3623 mutex_exit(nxgep->genlock); 3624 return (err); 3625 } 3626 3627 /* 3628 * nxge_m_mmac_get() - Get the MAC address and other information 3629 * related to the slot. mma_flags should be set to 0 in the call. 3630 * Note: although kstat shows MAC address as zero when a slot is 3631 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 3632 * to the caller as long as the slot is not using a user MAC address. 3633 * The following table shows the rules, 3634 * 3635 * USED VENDOR mma_addr 3636 * ------------------------------------------------------------ 3637 * (1) Slot uses a user MAC: yes no user MAC 3638 * (2) Slot uses a factory MAC: yes yes factory MAC 3639 * (3) Slot is not used but is 3640 * factory MAC capable: no yes factory MAC 3641 * (4) Slot is not used and is 3642 * not factory MAC capable: no no 0 3643 * ------------------------------------------------------------ 3644 */ 3645 static int 3646 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 3647 { 3648 nxge_t *nxgep = arg; 3649 mac_addr_slot_t slot; 3650 nxge_mmac_t *mmac_info; 3651 nxge_status_t status; 3652 3653 slot = maddr->mma_slot; 3654 3655 mutex_enter(nxgep->genlock); 3656 3657 /* 3658 * Make sure that nxge is initialized, if _start() has 3659 * not been called. 3660 */ 3661 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3662 status = nxge_init(nxgep); 3663 if (status != NXGE_OK) { 3664 mutex_exit(nxgep->genlock); 3665 return (ENXIO); 3666 } 3667 } 3668 3669 mmac_info = &nxgep->nxge_mmac_info; 3670 3671 if (slot < 1 || slot > mmac_info->num_mmac) { 3672 mutex_exit(nxgep->genlock); 3673 return (EINVAL); 3674 } 3675 maddr->mma_flags = 0; 3676 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 3677 maddr->mma_flags |= MMAC_SLOT_USED; 3678 3679 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 3680 maddr->mma_flags |= MMAC_VENDOR_ADDR; 3681 bcopy(mmac_info->factory_mac_pool[slot], 3682 maddr->mma_addr, ETHERADDRL); 3683 maddr->mma_addrlen = ETHERADDRL; 3684 } else { 3685 if (maddr->mma_flags & MMAC_SLOT_USED) { 3686 bcopy(mmac_info->mac_pool[slot].addr, 3687 maddr->mma_addr, ETHERADDRL); 3688 maddr->mma_addrlen = ETHERADDRL; 3689 } else { 3690 bzero(maddr->mma_addr, ETHERADDRL); 3691 maddr->mma_addrlen = 0; 3692 } 3693 } 3694 mutex_exit(nxgep->genlock); 3695 return (0); 3696 } 3697 3698 3699 static boolean_t 3700 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 3701 { 3702 nxge_t *nxgep = arg; 3703 uint32_t *txflags = cap_data; 3704 multiaddress_capab_t *mmacp = cap_data; 3705 3706 switch (cap) { 3707 case MAC_CAPAB_HCKSUM: 3708 *txflags = HCKSUM_INET_PARTIAL; 3709 break; 3710 case MAC_CAPAB_POLL: 3711 /* 3712 * There's nothing for us to fill in, simply returning 3713 * B_TRUE stating that we support polling is sufficient. 3714 */ 3715 break; 3716 3717 case MAC_CAPAB_MULTIADDRESS: 3718 mutex_enter(nxgep->genlock); 3719 3720 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 3721 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 3722 mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */ 3723 /* 3724 * maddr_handle is driver's private data, passed back to 3725 * entry point functions as arg. 3726 */ 3727 mmacp->maddr_handle = nxgep; 3728 mmacp->maddr_add = nxge_m_mmac_add; 3729 mmacp->maddr_remove = nxge_m_mmac_remove; 3730 mmacp->maddr_modify = nxge_m_mmac_modify; 3731 mmacp->maddr_get = nxge_m_mmac_get; 3732 mmacp->maddr_reserve = nxge_m_mmac_reserve; 3733 3734 mutex_exit(nxgep->genlock); 3735 break; 3736 default: 3737 return (B_FALSE); 3738 } 3739 return (B_TRUE); 3740 } 3741 3742 /* 3743 * Module loading and removing entry points. 3744 */ 3745 3746 static struct cb_ops nxge_cb_ops = { 3747 nodev, /* cb_open */ 3748 nodev, /* cb_close */ 3749 nodev, /* cb_strategy */ 3750 nodev, /* cb_print */ 3751 nodev, /* cb_dump */ 3752 nodev, /* cb_read */ 3753 nodev, /* cb_write */ 3754 nodev, /* cb_ioctl */ 3755 nodev, /* cb_devmap */ 3756 nodev, /* cb_mmap */ 3757 nodev, /* cb_segmap */ 3758 nochpoll, /* cb_chpoll */ 3759 ddi_prop_op, /* cb_prop_op */ 3760 NULL, 3761 D_MP, /* cb_flag */ 3762 CB_REV, /* rev */ 3763 nodev, /* int (*cb_aread)() */ 3764 nodev /* int (*cb_awrite)() */ 3765 }; 3766 3767 static struct dev_ops nxge_dev_ops = { 3768 DEVO_REV, /* devo_rev */ 3769 0, /* devo_refcnt */ 3770 nulldev, 3771 nulldev, /* devo_identify */ 3772 nulldev, /* devo_probe */ 3773 nxge_attach, /* devo_attach */ 3774 nxge_detach, /* devo_detach */ 3775 nodev, /* devo_reset */ 3776 &nxge_cb_ops, /* devo_cb_ops */ 3777 (struct bus_ops *)NULL, /* devo_bus_ops */ 3778 ddi_power /* devo_power */ 3779 }; 3780 3781 extern struct mod_ops mod_driverops; 3782 3783 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet %I%" 3784 3785 /* 3786 * Module linkage information for the kernel. 3787 */ 3788 static struct modldrv nxge_modldrv = { 3789 &mod_driverops, 3790 NXGE_DESC_VER, 3791 &nxge_dev_ops 3792 }; 3793 3794 static struct modlinkage modlinkage = { 3795 MODREV_1, (void *) &nxge_modldrv, NULL 3796 }; 3797 3798 int 3799 _init(void) 3800 { 3801 int status; 3802 3803 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 3804 mac_init_ops(&nxge_dev_ops, "nxge"); 3805 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 3806 if (status != 0) { 3807 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3808 "failed to init device soft state")); 3809 goto _init_exit; 3810 } 3811 3812 status = mod_install(&modlinkage); 3813 if (status != 0) { 3814 ddi_soft_state_fini(&nxge_list); 3815 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 3816 goto _init_exit; 3817 } 3818 3819 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 3820 3821 _init_exit: 3822 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 3823 3824 return (status); 3825 } 3826 3827 int 3828 _fini(void) 3829 { 3830 int status; 3831 3832 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 3833 3834 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 3835 3836 if (nxge_mblks_pending) 3837 return (EBUSY); 3838 3839 status = mod_remove(&modlinkage); 3840 if (status != DDI_SUCCESS) { 3841 NXGE_DEBUG_MSG((NULL, MOD_CTL, 3842 "Module removal failed 0x%08x", 3843 status)); 3844 goto _fini_exit; 3845 } 3846 3847 mac_fini_ops(&nxge_dev_ops); 3848 3849 ddi_soft_state_fini(&nxge_list); 3850 3851 MUTEX_DESTROY(&nxge_common_lock); 3852 _fini_exit: 3853 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 3854 3855 return (status); 3856 } 3857 3858 int 3859 _info(struct modinfo *modinfop) 3860 { 3861 int status; 3862 3863 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 3864 status = mod_info(&modlinkage, modinfop); 3865 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 3866 3867 return (status); 3868 } 3869 3870 /*ARGSUSED*/ 3871 static nxge_status_t 3872 nxge_add_intrs(p_nxge_t nxgep) 3873 { 3874 3875 int intr_types; 3876 int type = 0; 3877 int ddi_status = DDI_SUCCESS; 3878 nxge_status_t status = NXGE_OK; 3879 3880 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 3881 3882 nxgep->nxge_intr_type.intr_registered = B_FALSE; 3883 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 3884 nxgep->nxge_intr_type.msi_intx_cnt = 0; 3885 nxgep->nxge_intr_type.intr_added = 0; 3886 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 3887 nxgep->nxge_intr_type.intr_type = 0; 3888 3889 if (nxgep->niu_type == N2_NIU) { 3890 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3891 } else if (nxge_msi_enable) { 3892 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3893 } 3894 3895 /* Get the supported interrupt types */ 3896 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 3897 != DDI_SUCCESS) { 3898 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 3899 "ddi_intr_get_supported_types failed: status 0x%08x", 3900 ddi_status)); 3901 return (NXGE_ERROR | NXGE_DDI_FAILED); 3902 } 3903 nxgep->nxge_intr_type.intr_types = intr_types; 3904 3905 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3906 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3907 3908 /* 3909 * Solaris MSIX is not supported yet. use MSI for now. 3910 * nxge_msi_enable (1): 3911 * 1 - MSI 2 - MSI-X others - FIXED 3912 */ 3913 switch (nxge_msi_enable) { 3914 default: 3915 type = DDI_INTR_TYPE_FIXED; 3916 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3917 "use fixed (intx emulation) type %08x", 3918 type)); 3919 break; 3920 3921 case 2: 3922 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3923 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3924 if (intr_types & DDI_INTR_TYPE_MSIX) { 3925 type = DDI_INTR_TYPE_MSIX; 3926 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3927 "ddi_intr_get_supported_types: MSIX 0x%08x", 3928 type)); 3929 } else if (intr_types & DDI_INTR_TYPE_MSI) { 3930 type = DDI_INTR_TYPE_MSI; 3931 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3932 "ddi_intr_get_supported_types: MSI 0x%08x", 3933 type)); 3934 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3935 type = DDI_INTR_TYPE_FIXED; 3936 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3937 "ddi_intr_get_supported_types: MSXED0x%08x", 3938 type)); 3939 } 3940 break; 3941 3942 case 1: 3943 if (intr_types & DDI_INTR_TYPE_MSI) { 3944 type = DDI_INTR_TYPE_MSI; 3945 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3946 "ddi_intr_get_supported_types: MSI 0x%08x", 3947 type)); 3948 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 3949 type = DDI_INTR_TYPE_MSIX; 3950 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3951 "ddi_intr_get_supported_types: MSIX 0x%08x", 3952 type)); 3953 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3954 type = DDI_INTR_TYPE_FIXED; 3955 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3956 "ddi_intr_get_supported_types: MSXED0x%08x", 3957 type)); 3958 } 3959 } 3960 3961 nxgep->nxge_intr_type.intr_type = type; 3962 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 3963 type == DDI_INTR_TYPE_FIXED) && 3964 nxgep->nxge_intr_type.niu_msi_enable) { 3965 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 3966 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3967 " nxge_add_intrs: " 3968 " nxge_add_intrs_adv failed: status 0x%08x", 3969 status)); 3970 return (status); 3971 } else { 3972 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3973 "interrupts registered : type %d", type)); 3974 nxgep->nxge_intr_type.intr_registered = B_TRUE; 3975 3976 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 3977 "\nAdded advanced nxge add_intr_adv " 3978 "intr type 0x%x\n", type)); 3979 3980 return (status); 3981 } 3982 } 3983 3984 if (!nxgep->nxge_intr_type.intr_registered) { 3985 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 3986 "failed to register interrupts")); 3987 return (NXGE_ERROR | NXGE_DDI_FAILED); 3988 } 3989 3990 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 3991 return (status); 3992 } 3993 3994 /*ARGSUSED*/ 3995 static nxge_status_t 3996 nxge_add_soft_intrs(p_nxge_t nxgep) 3997 { 3998 3999 int ddi_status = DDI_SUCCESS; 4000 nxge_status_t status = NXGE_OK; 4001 4002 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 4003 4004 nxgep->resched_id = NULL; 4005 nxgep->resched_running = B_FALSE; 4006 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 4007 &nxgep->resched_id, 4008 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 4009 if (ddi_status != DDI_SUCCESS) { 4010 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 4011 "ddi_add_softintrs failed: status 0x%08x", 4012 ddi_status)); 4013 return (NXGE_ERROR | NXGE_DDI_FAILED); 4014 } 4015 4016 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 4017 4018 return (status); 4019 } 4020 4021 static nxge_status_t 4022 nxge_add_intrs_adv(p_nxge_t nxgep) 4023 { 4024 int intr_type; 4025 p_nxge_intr_t intrp; 4026 4027 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 4028 4029 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4030 intr_type = intrp->intr_type; 4031 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 4032 intr_type)); 4033 4034 switch (intr_type) { 4035 case DDI_INTR_TYPE_MSI: /* 0x2 */ 4036 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 4037 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 4038 4039 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 4040 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 4041 4042 default: 4043 return (NXGE_ERROR); 4044 } 4045 } 4046 4047 4048 /*ARGSUSED*/ 4049 static nxge_status_t 4050 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 4051 { 4052 dev_info_t *dip = nxgep->dip; 4053 p_nxge_ldg_t ldgp; 4054 p_nxge_intr_t intrp; 4055 uint_t *inthandler; 4056 void *arg1, *arg2; 4057 int behavior; 4058 int nintrs, navail; 4059 int nactual, nrequired; 4060 int inum = 0; 4061 int x, y; 4062 int ddi_status = DDI_SUCCESS; 4063 nxge_status_t status = NXGE_OK; 4064 4065 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 4066 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4067 intrp->start_inum = 0; 4068 4069 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4070 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4071 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4072 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4073 "nintrs: %d", ddi_status, nintrs)); 4074 return (NXGE_ERROR | NXGE_DDI_FAILED); 4075 } 4076 4077 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4078 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4079 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4080 "ddi_intr_get_navail() failed, status: 0x%x%, " 4081 "nintrs: %d", ddi_status, navail)); 4082 return (NXGE_ERROR | NXGE_DDI_FAILED); 4083 } 4084 4085 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4086 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 4087 nintrs, navail)); 4088 4089 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 4090 /* MSI must be power of 2 */ 4091 if ((navail & 16) == 16) { 4092 navail = 16; 4093 } else if ((navail & 8) == 8) { 4094 navail = 8; 4095 } else if ((navail & 4) == 4) { 4096 navail = 4; 4097 } else if ((navail & 2) == 2) { 4098 navail = 2; 4099 } else { 4100 navail = 1; 4101 } 4102 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4103 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 4104 "navail %d", nintrs, navail)); 4105 } 4106 4107 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4108 DDI_INTR_ALLOC_NORMAL); 4109 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4110 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4111 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4112 navail, &nactual, behavior); 4113 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4114 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4115 " ddi_intr_alloc() failed: %d", 4116 ddi_status)); 4117 kmem_free(intrp->htable, intrp->intr_size); 4118 return (NXGE_ERROR | NXGE_DDI_FAILED); 4119 } 4120 4121 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4122 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4123 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4124 " ddi_intr_get_pri() failed: %d", 4125 ddi_status)); 4126 /* Free already allocated interrupts */ 4127 for (y = 0; y < nactual; y++) { 4128 (void) ddi_intr_free(intrp->htable[y]); 4129 } 4130 4131 kmem_free(intrp->htable, intrp->intr_size); 4132 return (NXGE_ERROR | NXGE_DDI_FAILED); 4133 } 4134 4135 nrequired = 0; 4136 switch (nxgep->niu_type) { 4137 default: 4138 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4139 break; 4140 4141 case N2_NIU: 4142 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4143 break; 4144 } 4145 4146 if (status != NXGE_OK) { 4147 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4148 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 4149 "failed: 0x%x", status)); 4150 /* Free already allocated interrupts */ 4151 for (y = 0; y < nactual; y++) { 4152 (void) ddi_intr_free(intrp->htable[y]); 4153 } 4154 4155 kmem_free(intrp->htable, intrp->intr_size); 4156 return (status); 4157 } 4158 4159 ldgp = nxgep->ldgvp->ldgp; 4160 for (x = 0; x < nrequired; x++, ldgp++) { 4161 ldgp->vector = (uint8_t)x; 4162 ldgp->intdata = SID_DATA(ldgp->func, x); 4163 arg1 = ldgp->ldvp; 4164 arg2 = nxgep; 4165 if (ldgp->nldvs == 1) { 4166 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4167 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4168 "nxge_add_intrs_adv_type: " 4169 "arg1 0x%x arg2 0x%x: " 4170 "1-1 int handler (entry %d intdata 0x%x)\n", 4171 arg1, arg2, 4172 x, ldgp->intdata)); 4173 } else if (ldgp->nldvs > 1) { 4174 inthandler = (uint_t *)ldgp->sys_intr_handler; 4175 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4176 "nxge_add_intrs_adv_type: " 4177 "arg1 0x%x arg2 0x%x: " 4178 "nldevs %d int handler " 4179 "(entry %d intdata 0x%x)\n", 4180 arg1, arg2, 4181 ldgp->nldvs, x, ldgp->intdata)); 4182 } 4183 4184 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4185 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 4186 "htable 0x%llx", x, intrp->htable[x])); 4187 4188 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4189 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4190 != DDI_SUCCESS) { 4191 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4192 "==> nxge_add_intrs_adv_type: failed #%d " 4193 "status 0x%x", x, ddi_status)); 4194 for (y = 0; y < intrp->intr_added; y++) { 4195 (void) ddi_intr_remove_handler( 4196 intrp->htable[y]); 4197 } 4198 /* Free already allocated intr */ 4199 for (y = 0; y < nactual; y++) { 4200 (void) ddi_intr_free(intrp->htable[y]); 4201 } 4202 kmem_free(intrp->htable, intrp->intr_size); 4203 4204 (void) nxge_ldgv_uninit(nxgep); 4205 4206 return (NXGE_ERROR | NXGE_DDI_FAILED); 4207 } 4208 intrp->intr_added++; 4209 } 4210 4211 intrp->msi_intx_cnt = nactual; 4212 4213 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4214 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 4215 navail, nactual, 4216 intrp->msi_intx_cnt, 4217 intrp->intr_added)); 4218 4219 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4220 4221 (void) nxge_intr_ldgv_init(nxgep); 4222 4223 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 4224 4225 return (status); 4226 } 4227 4228 /*ARGSUSED*/ 4229 static nxge_status_t 4230 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 4231 { 4232 dev_info_t *dip = nxgep->dip; 4233 p_nxge_ldg_t ldgp; 4234 p_nxge_intr_t intrp; 4235 uint_t *inthandler; 4236 void *arg1, *arg2; 4237 int behavior; 4238 int nintrs, navail; 4239 int nactual, nrequired; 4240 int inum = 0; 4241 int x, y; 4242 int ddi_status = DDI_SUCCESS; 4243 nxge_status_t status = NXGE_OK; 4244 4245 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 4246 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4247 intrp->start_inum = 0; 4248 4249 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4250 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4251 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4252 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4253 "nintrs: %d", status, nintrs)); 4254 return (NXGE_ERROR | NXGE_DDI_FAILED); 4255 } 4256 4257 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4258 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4259 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4260 "ddi_intr_get_navail() failed, status: 0x%x%, " 4261 "nintrs: %d", ddi_status, navail)); 4262 return (NXGE_ERROR | NXGE_DDI_FAILED); 4263 } 4264 4265 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4266 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 4267 nintrs, navail)); 4268 4269 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4270 DDI_INTR_ALLOC_NORMAL); 4271 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4272 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4273 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4274 navail, &nactual, behavior); 4275 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4276 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4277 " ddi_intr_alloc() failed: %d", 4278 ddi_status)); 4279 kmem_free(intrp->htable, intrp->intr_size); 4280 return (NXGE_ERROR | NXGE_DDI_FAILED); 4281 } 4282 4283 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4284 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4285 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4286 " ddi_intr_get_pri() failed: %d", 4287 ddi_status)); 4288 /* Free already allocated interrupts */ 4289 for (y = 0; y < nactual; y++) { 4290 (void) ddi_intr_free(intrp->htable[y]); 4291 } 4292 4293 kmem_free(intrp->htable, intrp->intr_size); 4294 return (NXGE_ERROR | NXGE_DDI_FAILED); 4295 } 4296 4297 nrequired = 0; 4298 switch (nxgep->niu_type) { 4299 default: 4300 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4301 break; 4302 4303 case N2_NIU: 4304 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4305 break; 4306 } 4307 4308 if (status != NXGE_OK) { 4309 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4310 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 4311 "failed: 0x%x", status)); 4312 /* Free already allocated interrupts */ 4313 for (y = 0; y < nactual; y++) { 4314 (void) ddi_intr_free(intrp->htable[y]); 4315 } 4316 4317 kmem_free(intrp->htable, intrp->intr_size); 4318 return (status); 4319 } 4320 4321 ldgp = nxgep->ldgvp->ldgp; 4322 for (x = 0; x < nrequired; x++, ldgp++) { 4323 ldgp->vector = (uint8_t)x; 4324 if (nxgep->niu_type != N2_NIU) { 4325 ldgp->intdata = SID_DATA(ldgp->func, x); 4326 } 4327 4328 arg1 = ldgp->ldvp; 4329 arg2 = nxgep; 4330 if (ldgp->nldvs == 1) { 4331 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4332 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4333 "nxge_add_intrs_adv_type_fix: " 4334 "1-1 int handler(%d) ldg %d ldv %d " 4335 "arg1 $%p arg2 $%p\n", 4336 x, ldgp->ldg, ldgp->ldvp->ldv, 4337 arg1, arg2)); 4338 } else if (ldgp->nldvs > 1) { 4339 inthandler = (uint_t *)ldgp->sys_intr_handler; 4340 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4341 "nxge_add_intrs_adv_type_fix: " 4342 "shared ldv %d int handler(%d) ldv %d ldg %d" 4343 "arg1 0x%016llx arg2 0x%016llx\n", 4344 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 4345 arg1, arg2)); 4346 } 4347 4348 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4349 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4350 != DDI_SUCCESS) { 4351 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4352 "==> nxge_add_intrs_adv_type_fix: failed #%d " 4353 "status 0x%x", x, ddi_status)); 4354 for (y = 0; y < intrp->intr_added; y++) { 4355 (void) ddi_intr_remove_handler( 4356 intrp->htable[y]); 4357 } 4358 for (y = 0; y < nactual; y++) { 4359 (void) ddi_intr_free(intrp->htable[y]); 4360 } 4361 /* Free already allocated intr */ 4362 kmem_free(intrp->htable, intrp->intr_size); 4363 4364 (void) nxge_ldgv_uninit(nxgep); 4365 4366 return (NXGE_ERROR | NXGE_DDI_FAILED); 4367 } 4368 intrp->intr_added++; 4369 } 4370 4371 intrp->msi_intx_cnt = nactual; 4372 4373 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4374 4375 status = nxge_intr_ldgv_init(nxgep); 4376 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 4377 4378 return (status); 4379 } 4380 4381 static void 4382 nxge_remove_intrs(p_nxge_t nxgep) 4383 { 4384 int i, inum; 4385 p_nxge_intr_t intrp; 4386 4387 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 4388 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4389 if (!intrp->intr_registered) { 4390 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4391 "<== nxge_remove_intrs: interrupts not registered")); 4392 return; 4393 } 4394 4395 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 4396 4397 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4398 (void) ddi_intr_block_disable(intrp->htable, 4399 intrp->intr_added); 4400 } else { 4401 for (i = 0; i < intrp->intr_added; i++) { 4402 (void) ddi_intr_disable(intrp->htable[i]); 4403 } 4404 } 4405 4406 for (inum = 0; inum < intrp->intr_added; inum++) { 4407 if (intrp->htable[inum]) { 4408 (void) ddi_intr_remove_handler(intrp->htable[inum]); 4409 } 4410 } 4411 4412 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 4413 if (intrp->htable[inum]) { 4414 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4415 "nxge_remove_intrs: ddi_intr_free inum %d " 4416 "msi_intx_cnt %d intr_added %d", 4417 inum, 4418 intrp->msi_intx_cnt, 4419 intrp->intr_added)); 4420 4421 (void) ddi_intr_free(intrp->htable[inum]); 4422 } 4423 } 4424 4425 kmem_free(intrp->htable, intrp->intr_size); 4426 intrp->intr_registered = B_FALSE; 4427 intrp->intr_enabled = B_FALSE; 4428 intrp->msi_intx_cnt = 0; 4429 intrp->intr_added = 0; 4430 4431 (void) nxge_ldgv_uninit(nxgep); 4432 4433 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 4434 } 4435 4436 /*ARGSUSED*/ 4437 static void 4438 nxge_remove_soft_intrs(p_nxge_t nxgep) 4439 { 4440 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 4441 if (nxgep->resched_id) { 4442 ddi_remove_softintr(nxgep->resched_id); 4443 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4444 "==> nxge_remove_soft_intrs: removed")); 4445 nxgep->resched_id = NULL; 4446 } 4447 4448 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 4449 } 4450 4451 /*ARGSUSED*/ 4452 static void 4453 nxge_intrs_enable(p_nxge_t nxgep) 4454 { 4455 p_nxge_intr_t intrp; 4456 int i; 4457 int status; 4458 4459 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 4460 4461 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4462 4463 if (!intrp->intr_registered) { 4464 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 4465 "interrupts are not registered")); 4466 return; 4467 } 4468 4469 if (intrp->intr_enabled) { 4470 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4471 "<== nxge_intrs_enable: already enabled")); 4472 return; 4473 } 4474 4475 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4476 status = ddi_intr_block_enable(intrp->htable, 4477 intrp->intr_added); 4478 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4479 "block enable - status 0x%x total inums #%d\n", 4480 status, intrp->intr_added)); 4481 } else { 4482 for (i = 0; i < intrp->intr_added; i++) { 4483 status = ddi_intr_enable(intrp->htable[i]); 4484 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4485 "ddi_intr_enable:enable - status 0x%x " 4486 "total inums %d enable inum #%d\n", 4487 status, intrp->intr_added, i)); 4488 if (status == DDI_SUCCESS) { 4489 intrp->intr_enabled = B_TRUE; 4490 } 4491 } 4492 } 4493 4494 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 4495 } 4496 4497 /*ARGSUSED*/ 4498 static void 4499 nxge_intrs_disable(p_nxge_t nxgep) 4500 { 4501 p_nxge_intr_t intrp; 4502 int i; 4503 4504 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 4505 4506 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4507 4508 if (!intrp->intr_registered) { 4509 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 4510 "interrupts are not registered")); 4511 return; 4512 } 4513 4514 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4515 (void) ddi_intr_block_disable(intrp->htable, 4516 intrp->intr_added); 4517 } else { 4518 for (i = 0; i < intrp->intr_added; i++) { 4519 (void) ddi_intr_disable(intrp->htable[i]); 4520 } 4521 } 4522 4523 intrp->intr_enabled = B_FALSE; 4524 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 4525 } 4526 4527 static nxge_status_t 4528 nxge_mac_register(p_nxge_t nxgep) 4529 { 4530 mac_register_t *macp; 4531 int status; 4532 4533 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 4534 4535 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 4536 return (NXGE_ERROR); 4537 4538 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 4539 macp->m_driver = nxgep; 4540 macp->m_dip = nxgep->dip; 4541 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 4542 macp->m_callbacks = &nxge_m_callbacks; 4543 macp->m_min_sdu = 0; 4544 macp->m_max_sdu = nxgep->mac.maxframesize - 4545 sizeof (struct ether_header) - ETHERFCSL - 4; 4546 4547 status = mac_register(macp, &nxgep->mach); 4548 mac_free(macp); 4549 4550 if (status != 0) { 4551 cmn_err(CE_WARN, 4552 "!nxge_mac_register failed (status %d instance %d)", 4553 status, nxgep->instance); 4554 return (NXGE_ERROR); 4555 } 4556 4557 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 4558 "(instance %d)", nxgep->instance)); 4559 4560 return (NXGE_OK); 4561 } 4562 4563 void 4564 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 4565 { 4566 ssize_t size; 4567 mblk_t *nmp; 4568 uint8_t blk_id; 4569 uint8_t chan; 4570 uint32_t err_id; 4571 err_inject_t *eip; 4572 4573 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 4574 4575 size = 1024; 4576 nmp = mp->b_cont; 4577 eip = (err_inject_t *)nmp->b_rptr; 4578 blk_id = eip->blk_id; 4579 err_id = eip->err_id; 4580 chan = eip->chan; 4581 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 4582 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 4583 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 4584 switch (blk_id) { 4585 case MAC_BLK_ID: 4586 break; 4587 case TXMAC_BLK_ID: 4588 break; 4589 case RXMAC_BLK_ID: 4590 break; 4591 case MIF_BLK_ID: 4592 break; 4593 case IPP_BLK_ID: 4594 nxge_ipp_inject_err(nxgep, err_id); 4595 break; 4596 case TXC_BLK_ID: 4597 nxge_txc_inject_err(nxgep, err_id); 4598 break; 4599 case TXDMA_BLK_ID: 4600 nxge_txdma_inject_err(nxgep, err_id, chan); 4601 break; 4602 case RXDMA_BLK_ID: 4603 nxge_rxdma_inject_err(nxgep, err_id, chan); 4604 break; 4605 case ZCP_BLK_ID: 4606 nxge_zcp_inject_err(nxgep, err_id); 4607 break; 4608 case ESPC_BLK_ID: 4609 break; 4610 case FFLP_BLK_ID: 4611 break; 4612 case PHY_BLK_ID: 4613 break; 4614 case ETHER_SERDES_BLK_ID: 4615 break; 4616 case PCIE_SERDES_BLK_ID: 4617 break; 4618 case VIR_BLK_ID: 4619 break; 4620 } 4621 4622 nmp->b_wptr = nmp->b_rptr + size; 4623 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 4624 4625 miocack(wq, mp, (int)size, 0); 4626 } 4627 4628 static int 4629 nxge_init_common_dev(p_nxge_t nxgep) 4630 { 4631 p_nxge_hw_list_t hw_p; 4632 dev_info_t *p_dip; 4633 4634 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 4635 4636 p_dip = nxgep->p_dip; 4637 MUTEX_ENTER(&nxge_common_lock); 4638 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4639 "==> nxge_init_common_dev:func # %d", 4640 nxgep->function_num)); 4641 /* 4642 * Loop through existing per neptune hardware list. 4643 */ 4644 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4645 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4646 "==> nxge_init_common_device:func # %d " 4647 "hw_p $%p parent dip $%p", 4648 nxgep->function_num, 4649 hw_p, 4650 p_dip)); 4651 if (hw_p->parent_devp == p_dip) { 4652 nxgep->nxge_hw_p = hw_p; 4653 hw_p->ndevs++; 4654 hw_p->nxge_p[nxgep->function_num] = nxgep; 4655 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4656 "==> nxge_init_common_device:func # %d " 4657 "hw_p $%p parent dip $%p " 4658 "ndevs %d (found)", 4659 nxgep->function_num, 4660 hw_p, 4661 p_dip, 4662 hw_p->ndevs)); 4663 break; 4664 } 4665 } 4666 4667 if (hw_p == NULL) { 4668 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4669 "==> nxge_init_common_device:func # %d " 4670 "parent dip $%p (new)", 4671 nxgep->function_num, 4672 p_dip)); 4673 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 4674 hw_p->parent_devp = p_dip; 4675 hw_p->magic = NXGE_NEPTUNE_MAGIC; 4676 nxgep->nxge_hw_p = hw_p; 4677 hw_p->ndevs++; 4678 hw_p->nxge_p[nxgep->function_num] = nxgep; 4679 hw_p->next = nxge_hw_list; 4680 if (nxgep->niu_type == N2_NIU) { 4681 hw_p->niu_type = N2_NIU; 4682 hw_p->platform_type = P_NEPTUNE_NIU; 4683 } else { 4684 hw_p->niu_type = NIU_TYPE_NONE; 4685 hw_p->platform_type = P_NEPTUNE_ATLAS; 4686 } 4687 4688 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 4689 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 4690 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 4691 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 4692 MUTEX_INIT(&hw_p->nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 4693 4694 nxge_hw_list = hw_p; 4695 4696 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 4697 } 4698 4699 MUTEX_EXIT(&nxge_common_lock); 4700 4701 if (nxgep->niu_type != N2_NIU) { 4702 nxgep->niu_type = hw_p->niu_type; 4703 if (!NXGE_IS_VALID_NEPTUNE_TYPE(nxgep->niu_type)) { 4704 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4705 "<== nxge_init_common_device" 4706 " Invalid Neptune type [0x%x]", nxgep->niu_type)); 4707 return (NXGE_ERROR); 4708 } 4709 } 4710 4711 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4712 "==> nxge_init_common_device (nxge_hw_list) $%p", 4713 nxge_hw_list)); 4714 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 4715 4716 return (NXGE_OK); 4717 } 4718 4719 static void 4720 nxge_uninit_common_dev(p_nxge_t nxgep) 4721 { 4722 p_nxge_hw_list_t hw_p, h_hw_p; 4723 dev_info_t *p_dip; 4724 4725 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 4726 if (nxgep->nxge_hw_p == NULL) { 4727 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4728 "<== nxge_uninit_common_device (no common)")); 4729 return; 4730 } 4731 4732 MUTEX_ENTER(&nxge_common_lock); 4733 h_hw_p = nxge_hw_list; 4734 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4735 p_dip = hw_p->parent_devp; 4736 if (nxgep->nxge_hw_p == hw_p && 4737 p_dip == nxgep->p_dip && 4738 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 4739 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 4740 4741 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4742 "==> nxge_uninit_common_device:func # %d " 4743 "hw_p $%p parent dip $%p " 4744 "ndevs %d (found)", 4745 nxgep->function_num, 4746 hw_p, 4747 p_dip, 4748 hw_p->ndevs)); 4749 4750 nxgep->nxge_hw_p = NULL; 4751 if (hw_p->ndevs) { 4752 hw_p->ndevs--; 4753 } 4754 hw_p->nxge_p[nxgep->function_num] = NULL; 4755 if (!hw_p->ndevs) { 4756 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 4757 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 4758 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 4759 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 4760 MUTEX_DESTROY(&hw_p->nxge_mii_lock); 4761 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4762 "==> nxge_uninit_common_device: " 4763 "func # %d " 4764 "hw_p $%p parent dip $%p " 4765 "ndevs %d (last)", 4766 nxgep->function_num, 4767 hw_p, 4768 p_dip, 4769 hw_p->ndevs)); 4770 4771 if (hw_p == nxge_hw_list) { 4772 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4773 "==> nxge_uninit_common_device:" 4774 "remove head func # %d " 4775 "hw_p $%p parent dip $%p " 4776 "ndevs %d (head)", 4777 nxgep->function_num, 4778 hw_p, 4779 p_dip, 4780 hw_p->ndevs)); 4781 nxge_hw_list = hw_p->next; 4782 } else { 4783 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4784 "==> nxge_uninit_common_device:" 4785 "remove middle func # %d " 4786 "hw_p $%p parent dip $%p " 4787 "ndevs %d (middle)", 4788 nxgep->function_num, 4789 hw_p, 4790 p_dip, 4791 hw_p->ndevs)); 4792 h_hw_p->next = hw_p->next; 4793 } 4794 4795 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 4796 } 4797 break; 4798 } else { 4799 h_hw_p = hw_p; 4800 } 4801 } 4802 4803 MUTEX_EXIT(&nxge_common_lock); 4804 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4805 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 4806 nxge_hw_list)); 4807 4808 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 4809 } 4810 4811 /* 4812 * Determines the number of ports from the given niu_type. 4813 * Returns the number of ports, or returns zero on failure. 4814 */ 4815 4816 int 4817 nxge_nports_from_niu_type(niu_type_t niu_type) 4818 { 4819 int nports = 0; 4820 4821 switch (niu_type) { 4822 case N2_NIU: 4823 nports = 2; 4824 break; 4825 case NEPTUNE_2_10GF: 4826 nports = 2; 4827 break; 4828 case NEPTUNE_4_1GC: 4829 case NEPTUNE_2_10GF_2_1GC: 4830 case NEPTUNE_1_10GF_3_1GC: 4831 case NEPTUNE_1_1GC_1_10GF_2_1GC: 4832 nports = 4; 4833 break; 4834 default: 4835 break; 4836 } 4837 4838 return (nports); 4839 } 4840