1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 30 */ 31 #include <sys/nxge/nxge_impl.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * PSARC/2007/453 MSI-X interrupt limit override 39 * (This PSARC case is limited to MSI-X vectors 40 * and SPARC platforms only). 41 */ 42 #if defined(_BIG_ENDIAN) 43 uint32_t nxge_msi_enable = 2; 44 #else 45 uint32_t nxge_msi_enable = 1; 46 #endif 47 48 /* 49 * Globals: tunable parameters (/etc/system or adb) 50 * 51 */ 52 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 53 uint32_t nxge_rbr_spare_size = 0; 54 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 55 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 56 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 57 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 58 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 59 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 60 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 61 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 62 boolean_t nxge_jumbo_enable = B_FALSE; 63 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 64 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 65 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 66 67 /* 68 * Debugging flags: 69 * nxge_no_tx_lb : transmit load balancing 70 * nxge_tx_lb_policy: 0 - TCP port (default) 71 * 3 - DEST MAC 72 */ 73 uint32_t nxge_no_tx_lb = 0; 74 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 75 76 /* 77 * Add tunable to reduce the amount of time spent in the 78 * ISR doing Rx Processing. 79 */ 80 uint32_t nxge_max_rx_pkts = 1024; 81 82 /* 83 * Tunables to manage the receive buffer blocks. 84 * 85 * nxge_rx_threshold_hi: copy all buffers. 86 * nxge_rx_bcopy_size_type: receive buffer block size type. 87 * nxge_rx_threshold_lo: copy only up to tunable block size type. 88 */ 89 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 90 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 91 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 92 93 rtrace_t npi_rtracebuf; 94 95 #if defined(sun4v) 96 /* 97 * Hypervisor N2/NIU services information. 98 */ 99 static hsvc_info_t niu_hsvc = { 100 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 101 NIU_MINOR_VER, "nxge" 102 }; 103 #endif 104 105 /* 106 * Function Prototypes 107 */ 108 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 109 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 110 static void nxge_unattach(p_nxge_t); 111 112 #if NXGE_PROPERTY 113 static void nxge_remove_hard_properties(p_nxge_t); 114 #endif 115 116 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 117 118 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 119 static void nxge_destroy_mutexes(p_nxge_t); 120 121 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 122 static void nxge_unmap_regs(p_nxge_t nxgep); 123 #ifdef NXGE_DEBUG 124 static void nxge_test_map_regs(p_nxge_t nxgep); 125 #endif 126 127 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 128 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 129 static void nxge_remove_intrs(p_nxge_t nxgep); 130 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 131 132 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 133 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 134 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 135 static void nxge_intrs_enable(p_nxge_t nxgep); 136 static void nxge_intrs_disable(p_nxge_t nxgep); 137 138 static void nxge_suspend(p_nxge_t); 139 static nxge_status_t nxge_resume(p_nxge_t); 140 141 static nxge_status_t nxge_setup_dev(p_nxge_t); 142 static void nxge_destroy_dev(p_nxge_t); 143 144 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 145 static void nxge_free_mem_pool(p_nxge_t); 146 147 static nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 148 static void nxge_free_rx_mem_pool(p_nxge_t); 149 150 static nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 151 static void nxge_free_tx_mem_pool(p_nxge_t); 152 153 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 154 struct ddi_dma_attr *, 155 size_t, ddi_device_acc_attr_t *, uint_t, 156 p_nxge_dma_common_t); 157 158 static void nxge_dma_mem_free(p_nxge_dma_common_t); 159 160 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 161 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 162 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 163 164 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 165 p_nxge_dma_common_t *, size_t); 166 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 167 168 static nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 169 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 170 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 171 172 static nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 173 p_nxge_dma_common_t *, 174 size_t); 175 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 176 177 static int nxge_init_common_dev(p_nxge_t); 178 static void nxge_uninit_common_dev(p_nxge_t); 179 180 /* 181 * The next declarations are for the GLDv3 interface. 182 */ 183 static int nxge_m_start(void *); 184 static void nxge_m_stop(void *); 185 static int nxge_m_unicst(void *, const uint8_t *); 186 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 187 static int nxge_m_promisc(void *, boolean_t); 188 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 189 static void nxge_m_resources(void *); 190 mblk_t *nxge_m_tx(void *arg, mblk_t *); 191 static nxge_status_t nxge_mac_register(p_nxge_t); 192 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 193 mac_addr_slot_t slot); 194 static void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 195 boolean_t factory); 196 static int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 197 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 198 static int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 199 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 200 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 201 202 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 203 #define MAX_DUMP_SZ 256 204 205 #define NXGE_M_CALLBACK_FLAGS (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB) 206 207 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 208 static mac_callbacks_t nxge_m_callbacks = { 209 NXGE_M_CALLBACK_FLAGS, 210 nxge_m_stat, 211 nxge_m_start, 212 nxge_m_stop, 213 nxge_m_promisc, 214 nxge_m_multicst, 215 nxge_m_unicst, 216 nxge_m_tx, 217 nxge_m_resources, 218 nxge_m_ioctl, 219 nxge_m_getcapab 220 }; 221 222 void 223 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 224 225 /* PSARC/2007/453 MSI-X interrupt limit override. */ 226 #define NXGE_MSIX_REQUEST_10G 8 227 #define NXGE_MSIX_REQUEST_1G 2 228 static int nxge_create_msi_property(p_nxge_t); 229 230 /* 231 * These global variables control the message 232 * output. 233 */ 234 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 235 uint64_t nxge_debug_level = 0; 236 237 /* 238 * This list contains the instance structures for the Neptune 239 * devices present in the system. The lock exists to guarantee 240 * mutually exclusive access to the list. 241 */ 242 void *nxge_list = NULL; 243 244 void *nxge_hw_list = NULL; 245 nxge_os_mutex_t nxge_common_lock; 246 247 nxge_os_mutex_t nxge_mii_lock; 248 static uint32_t nxge_mii_lock_init = 0; 249 nxge_os_mutex_t nxge_mdio_lock; 250 static uint32_t nxge_mdio_lock_init = 0; 251 252 extern uint64_t npi_debug_level; 253 254 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 255 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 256 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 257 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 258 extern void nxge_fm_init(p_nxge_t, 259 ddi_device_acc_attr_t *, 260 ddi_device_acc_attr_t *, 261 ddi_dma_attr_t *); 262 extern void nxge_fm_fini(p_nxge_t); 263 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 264 265 /* 266 * Count used to maintain the number of buffers being used 267 * by Neptune instances and loaned up to the upper layers. 268 */ 269 uint32_t nxge_mblks_pending = 0; 270 271 /* 272 * Device register access attributes for PIO. 273 */ 274 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 275 DDI_DEVICE_ATTR_V0, 276 DDI_STRUCTURE_LE_ACC, 277 DDI_STRICTORDER_ACC, 278 }; 279 280 /* 281 * Device descriptor access attributes for DMA. 282 */ 283 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 284 DDI_DEVICE_ATTR_V0, 285 DDI_STRUCTURE_LE_ACC, 286 DDI_STRICTORDER_ACC 287 }; 288 289 /* 290 * Device buffer access attributes for DMA. 291 */ 292 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 293 DDI_DEVICE_ATTR_V0, 294 DDI_STRUCTURE_BE_ACC, 295 DDI_STRICTORDER_ACC 296 }; 297 298 ddi_dma_attr_t nxge_desc_dma_attr = { 299 DMA_ATTR_V0, /* version number. */ 300 0, /* low address */ 301 0xffffffffffffffff, /* high address */ 302 0xffffffffffffffff, /* address counter max */ 303 #ifndef NIU_PA_WORKAROUND 304 0x100000, /* alignment */ 305 #else 306 0x2000, 307 #endif 308 0xfc00fc, /* dlim_burstsizes */ 309 0x1, /* minimum transfer size */ 310 0xffffffffffffffff, /* maximum transfer size */ 311 0xffffffffffffffff, /* maximum segment size */ 312 1, /* scatter/gather list length */ 313 (unsigned int) 1, /* granularity */ 314 0 /* attribute flags */ 315 }; 316 317 ddi_dma_attr_t nxge_tx_dma_attr = { 318 DMA_ATTR_V0, /* version number. */ 319 0, /* low address */ 320 0xffffffffffffffff, /* high address */ 321 0xffffffffffffffff, /* address counter max */ 322 #if defined(_BIG_ENDIAN) 323 0x2000, /* alignment */ 324 #else 325 0x1000, /* alignment */ 326 #endif 327 0xfc00fc, /* dlim_burstsizes */ 328 0x1, /* minimum transfer size */ 329 0xffffffffffffffff, /* maximum transfer size */ 330 0xffffffffffffffff, /* maximum segment size */ 331 5, /* scatter/gather list length */ 332 (unsigned int) 1, /* granularity */ 333 0 /* attribute flags */ 334 }; 335 336 ddi_dma_attr_t nxge_rx_dma_attr = { 337 DMA_ATTR_V0, /* version number. */ 338 0, /* low address */ 339 0xffffffffffffffff, /* high address */ 340 0xffffffffffffffff, /* address counter max */ 341 0x2000, /* alignment */ 342 0xfc00fc, /* dlim_burstsizes */ 343 0x1, /* minimum transfer size */ 344 0xffffffffffffffff, /* maximum transfer size */ 345 0xffffffffffffffff, /* maximum segment size */ 346 1, /* scatter/gather list length */ 347 (unsigned int) 1, /* granularity */ 348 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 349 }; 350 351 ddi_dma_lim_t nxge_dma_limits = { 352 (uint_t)0, /* dlim_addr_lo */ 353 (uint_t)0xffffffff, /* dlim_addr_hi */ 354 (uint_t)0xffffffff, /* dlim_cntr_max */ 355 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 356 0x1, /* dlim_minxfer */ 357 1024 /* dlim_speed */ 358 }; 359 360 dma_method_t nxge_force_dma = DVMA; 361 362 /* 363 * dma chunk sizes. 364 * 365 * Try to allocate the largest possible size 366 * so that fewer number of dma chunks would be managed 367 */ 368 #ifdef NIU_PA_WORKAROUND 369 size_t alloc_sizes [] = {0x2000}; 370 #else 371 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 372 0x10000, 0x20000, 0x40000, 0x80000, 373 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000}; 374 #endif 375 376 /* 377 * Translate "dev_t" to a pointer to the associated "dev_info_t". 378 */ 379 380 static int 381 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 382 { 383 p_nxge_t nxgep = NULL; 384 int instance; 385 int status = DDI_SUCCESS; 386 uint8_t portn; 387 nxge_mmac_t *mmac_info; 388 389 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 390 391 /* 392 * Get the device instance since we'll need to setup 393 * or retrieve a soft state for this instance. 394 */ 395 instance = ddi_get_instance(dip); 396 397 switch (cmd) { 398 case DDI_ATTACH: 399 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 400 break; 401 402 case DDI_RESUME: 403 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 404 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 405 if (nxgep == NULL) { 406 status = DDI_FAILURE; 407 break; 408 } 409 if (nxgep->dip != dip) { 410 status = DDI_FAILURE; 411 break; 412 } 413 if (nxgep->suspended == DDI_PM_SUSPEND) { 414 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 415 } else { 416 status = nxge_resume(nxgep); 417 } 418 goto nxge_attach_exit; 419 420 case DDI_PM_RESUME: 421 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 422 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 423 if (nxgep == NULL) { 424 status = DDI_FAILURE; 425 break; 426 } 427 if (nxgep->dip != dip) { 428 status = DDI_FAILURE; 429 break; 430 } 431 status = nxge_resume(nxgep); 432 goto nxge_attach_exit; 433 434 default: 435 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 436 status = DDI_FAILURE; 437 goto nxge_attach_exit; 438 } 439 440 441 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 442 status = DDI_FAILURE; 443 goto nxge_attach_exit; 444 } 445 446 nxgep = ddi_get_soft_state(nxge_list, instance); 447 if (nxgep == NULL) { 448 status = NXGE_ERROR; 449 goto nxge_attach_fail2; 450 } 451 452 nxgep->nxge_magic = NXGE_MAGIC; 453 454 nxgep->drv_state = 0; 455 nxgep->dip = dip; 456 nxgep->instance = instance; 457 nxgep->p_dip = ddi_get_parent(dip); 458 nxgep->nxge_debug_level = nxge_debug_level; 459 npi_debug_level = nxge_debug_level; 460 461 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_dev_desc_dma_acc_attr, 462 &nxge_rx_dma_attr); 463 464 status = nxge_map_regs(nxgep); 465 if (status != NXGE_OK) { 466 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 467 goto nxge_attach_fail3; 468 } 469 470 status = nxge_init_common_dev(nxgep); 471 if (status != NXGE_OK) { 472 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 473 "nxge_init_common_dev failed")); 474 goto nxge_attach_fail4; 475 } 476 477 if (nxgep->niu_type == NEPTUNE_2_10GF) { 478 if (nxgep->function_num > 1) { 479 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Unsupported" 480 " function %d. Only functions 0 and 1 are " 481 "supported for this card.", nxgep->function_num)); 482 status = NXGE_ERROR; 483 goto nxge_attach_fail4; 484 } 485 } 486 487 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 488 nxgep->mac.portnum = portn; 489 if ((portn == 0) || (portn == 1)) 490 nxgep->mac.porttype = PORT_TYPE_XMAC; 491 else 492 nxgep->mac.porttype = PORT_TYPE_BMAC; 493 /* 494 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 495 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 496 * The two types of MACs have different characterizations. 497 */ 498 mmac_info = &nxgep->nxge_mmac_info; 499 if (nxgep->function_num < 2) { 500 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 501 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 502 } else { 503 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 504 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 505 } 506 /* 507 * Setup the Ndd parameters for the this instance. 508 */ 509 nxge_init_param(nxgep); 510 511 /* 512 * Setup Register Tracing Buffer. 513 */ 514 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 515 516 /* init stats ptr */ 517 nxge_init_statsp(nxgep); 518 519 /* 520 * read the vpd info from the eeprom into local data 521 * structure and check for the VPD info validity 522 */ 523 nxge_vpd_info_get(nxgep); 524 525 status = nxge_xcvr_find(nxgep); 526 527 if (status != NXGE_OK) { 528 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 529 " Couldn't determine card type" 530 " .... exit ")); 531 goto nxge_attach_fail5; 532 } 533 534 status = nxge_get_config_properties(nxgep); 535 536 if (status != NXGE_OK) { 537 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "get_hw create failed")); 538 goto nxge_attach_fail; 539 } 540 541 /* 542 * Setup the Kstats for the driver. 543 */ 544 nxge_setup_kstats(nxgep); 545 546 nxge_setup_param(nxgep); 547 548 status = nxge_setup_system_dma_pages(nxgep); 549 if (status != NXGE_OK) { 550 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 551 goto nxge_attach_fail; 552 } 553 554 #if defined(sun4v) 555 if (nxgep->niu_type == N2_NIU) { 556 nxgep->niu_hsvc_available = B_FALSE; 557 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 558 if ((status = 559 hsvc_register(&nxgep->niu_hsvc, 560 &nxgep->niu_min_ver)) != 0) { 561 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 562 "nxge_attach: " 563 "%s: cannot negotiate " 564 "hypervisor services " 565 "revision %d " 566 "group: 0x%lx " 567 "major: 0x%lx minor: 0x%lx " 568 "errno: %d", 569 niu_hsvc.hsvc_modname, 570 niu_hsvc.hsvc_rev, 571 niu_hsvc.hsvc_group, 572 niu_hsvc.hsvc_major, 573 niu_hsvc.hsvc_minor, 574 status)); 575 status = DDI_FAILURE; 576 goto nxge_attach_fail; 577 } 578 579 nxgep->niu_hsvc_available = B_TRUE; 580 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 581 "NIU Hypervisor service enabled")); 582 } 583 #endif 584 585 nxge_hw_id_init(nxgep); 586 nxge_hw_init_niu_common(nxgep); 587 588 status = nxge_setup_mutexes(nxgep); 589 if (status != NXGE_OK) { 590 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 591 goto nxge_attach_fail; 592 } 593 594 status = nxge_setup_dev(nxgep); 595 if (status != DDI_SUCCESS) { 596 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 597 goto nxge_attach_fail; 598 } 599 600 status = nxge_add_intrs(nxgep); 601 if (status != DDI_SUCCESS) { 602 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 603 goto nxge_attach_fail; 604 } 605 status = nxge_add_soft_intrs(nxgep); 606 if (status != DDI_SUCCESS) { 607 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "add_soft_intr failed")); 608 goto nxge_attach_fail; 609 } 610 611 /* 612 * Enable interrupts. 613 */ 614 nxge_intrs_enable(nxgep); 615 616 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 617 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 618 "unable to register to mac layer (%d)", status)); 619 goto nxge_attach_fail; 620 } 621 622 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 623 624 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "registered to mac (instance %d)", 625 instance)); 626 627 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 628 629 goto nxge_attach_exit; 630 631 nxge_attach_fail: 632 nxge_unattach(nxgep); 633 goto nxge_attach_fail1; 634 635 nxge_attach_fail5: 636 /* 637 * Tear down the ndd parameters setup. 638 */ 639 nxge_destroy_param(nxgep); 640 641 /* 642 * Tear down the kstat setup. 643 */ 644 nxge_destroy_kstats(nxgep); 645 646 nxge_attach_fail4: 647 if (nxgep->nxge_hw_p) { 648 nxge_uninit_common_dev(nxgep); 649 nxgep->nxge_hw_p = NULL; 650 } 651 652 nxge_attach_fail3: 653 /* 654 * Unmap the register setup. 655 */ 656 nxge_unmap_regs(nxgep); 657 658 nxge_fm_fini(nxgep); 659 660 nxge_attach_fail2: 661 ddi_soft_state_free(nxge_list, nxgep->instance); 662 663 nxge_attach_fail1: 664 if (status != NXGE_OK) 665 status = (NXGE_ERROR | NXGE_DDI_FAILED); 666 nxgep = NULL; 667 668 nxge_attach_exit: 669 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 670 status)); 671 672 return (status); 673 } 674 675 static int 676 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 677 { 678 int status = DDI_SUCCESS; 679 int instance; 680 p_nxge_t nxgep = NULL; 681 682 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 683 instance = ddi_get_instance(dip); 684 nxgep = ddi_get_soft_state(nxge_list, instance); 685 if (nxgep == NULL) { 686 status = DDI_FAILURE; 687 goto nxge_detach_exit; 688 } 689 690 switch (cmd) { 691 case DDI_DETACH: 692 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 693 break; 694 695 case DDI_PM_SUSPEND: 696 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 697 nxgep->suspended = DDI_PM_SUSPEND; 698 nxge_suspend(nxgep); 699 break; 700 701 case DDI_SUSPEND: 702 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 703 if (nxgep->suspended != DDI_PM_SUSPEND) { 704 nxgep->suspended = DDI_SUSPEND; 705 nxge_suspend(nxgep); 706 } 707 break; 708 709 default: 710 status = DDI_FAILURE; 711 } 712 713 if (cmd != DDI_DETACH) 714 goto nxge_detach_exit; 715 716 /* 717 * Stop the xcvr polling. 718 */ 719 nxgep->suspended = cmd; 720 721 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 722 723 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 724 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 725 "<== nxge_detach status = 0x%08X", status)); 726 return (DDI_FAILURE); 727 } 728 729 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 730 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 731 732 nxge_unattach(nxgep); 733 nxgep = NULL; 734 735 nxge_detach_exit: 736 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 737 status)); 738 739 return (status); 740 } 741 742 static void 743 nxge_unattach(p_nxge_t nxgep) 744 { 745 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 746 747 if (nxgep == NULL || nxgep->dev_regs == NULL) { 748 return; 749 } 750 751 nxgep->nxge_magic = 0; 752 753 if (nxgep->nxge_hw_p) { 754 nxge_uninit_common_dev(nxgep); 755 nxgep->nxge_hw_p = NULL; 756 } 757 758 if (nxgep->nxge_timerid) { 759 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 760 nxgep->nxge_timerid = 0; 761 } 762 763 #if defined(sun4v) 764 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 765 (void) hsvc_unregister(&nxgep->niu_hsvc); 766 nxgep->niu_hsvc_available = B_FALSE; 767 } 768 #endif 769 /* 770 * Stop any further interrupts. 771 */ 772 nxge_remove_intrs(nxgep); 773 774 /* remove soft interrups */ 775 nxge_remove_soft_intrs(nxgep); 776 777 /* 778 * Stop the device and free resources. 779 */ 780 nxge_destroy_dev(nxgep); 781 782 /* 783 * Tear down the ndd parameters setup. 784 */ 785 nxge_destroy_param(nxgep); 786 787 /* 788 * Tear down the kstat setup. 789 */ 790 nxge_destroy_kstats(nxgep); 791 792 /* 793 * Destroy all mutexes. 794 */ 795 nxge_destroy_mutexes(nxgep); 796 797 /* 798 * Remove the list of ndd parameters which 799 * were setup during attach. 800 */ 801 if (nxgep->dip) { 802 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 803 " nxge_unattach: remove all properties")); 804 805 (void) ddi_prop_remove_all(nxgep->dip); 806 } 807 808 #if NXGE_PROPERTY 809 nxge_remove_hard_properties(nxgep); 810 #endif 811 812 /* 813 * Unmap the register setup. 814 */ 815 nxge_unmap_regs(nxgep); 816 817 nxge_fm_fini(nxgep); 818 819 ddi_soft_state_free(nxge_list, nxgep->instance); 820 821 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 822 } 823 824 static char n2_siu_name[] = "niu"; 825 826 static nxge_status_t 827 nxge_map_regs(p_nxge_t nxgep) 828 { 829 int ddi_status = DDI_SUCCESS; 830 p_dev_regs_t dev_regs; 831 char buf[MAXPATHLEN + 1]; 832 char *devname; 833 #ifdef NXGE_DEBUG 834 char *sysname; 835 #endif 836 off_t regsize; 837 nxge_status_t status = NXGE_OK; 838 #if !defined(_BIG_ENDIAN) 839 off_t pci_offset; 840 uint16_t pcie_devctl; 841 #endif 842 843 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 844 nxgep->dev_regs = NULL; 845 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 846 dev_regs->nxge_regh = NULL; 847 dev_regs->nxge_pciregh = NULL; 848 dev_regs->nxge_msix_regh = NULL; 849 dev_regs->nxge_vir_regh = NULL; 850 dev_regs->nxge_vir2_regh = NULL; 851 nxgep->niu_type = NIU_TYPE_NONE; 852 853 devname = ddi_pathname(nxgep->dip, buf); 854 ASSERT(strlen(devname) > 0); 855 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 856 "nxge_map_regs: pathname devname %s", devname)); 857 858 if (strstr(devname, n2_siu_name)) { 859 /* N2/NIU */ 860 nxgep->niu_type = N2_NIU; 861 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 862 "nxge_map_regs: N2/NIU devname %s", devname)); 863 /* get function number */ 864 nxgep->function_num = 865 (devname[strlen(devname) -1] == '1' ? 1 : 0); 866 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 867 "nxge_map_regs: N2/NIU function number %d", 868 nxgep->function_num)); 869 } else { 870 int *prop_val; 871 uint_t prop_len; 872 uint8_t func_num; 873 874 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 875 0, "reg", 876 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 877 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 878 "Reg property not found")); 879 ddi_status = DDI_FAILURE; 880 goto nxge_map_regs_fail0; 881 882 } else { 883 func_num = (prop_val[0] >> 8) & 0x7; 884 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 885 "Reg property found: fun # %d", 886 func_num)); 887 nxgep->function_num = func_num; 888 ddi_prop_free(prop_val); 889 } 890 } 891 892 switch (nxgep->niu_type) { 893 default: 894 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 895 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 896 "nxge_map_regs: pci config size 0x%x", regsize)); 897 898 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 899 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 900 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 901 if (ddi_status != DDI_SUCCESS) { 902 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 903 "ddi_map_regs, nxge bus config regs failed")); 904 goto nxge_map_regs_fail0; 905 } 906 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 907 "nxge_map_reg: PCI config addr 0x%0llx " 908 " handle 0x%0llx", dev_regs->nxge_pciregp, 909 dev_regs->nxge_pciregh)); 910 /* 911 * IMP IMP 912 * workaround for bit swapping bug in HW 913 * which ends up in no-snoop = yes 914 * resulting, in DMA not synched properly 915 */ 916 #if !defined(_BIG_ENDIAN) 917 /* workarounds for x86 systems */ 918 pci_offset = 0x80 + PCIE_DEVCTL; 919 pcie_devctl = 0x0; 920 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 921 pcie_devctl |= PCIE_DEVCTL_RO_EN; 922 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 923 pcie_devctl); 924 #endif 925 926 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 927 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 928 "nxge_map_regs: pio size 0x%x", regsize)); 929 /* set up the device mapped register */ 930 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 931 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 932 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 933 if (ddi_status != DDI_SUCCESS) { 934 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 935 "ddi_map_regs for Neptune global reg failed")); 936 goto nxge_map_regs_fail1; 937 } 938 939 /* set up the msi/msi-x mapped register */ 940 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 941 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 942 "nxge_map_regs: msix size 0x%x", regsize)); 943 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 944 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 945 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 946 if (ddi_status != DDI_SUCCESS) { 947 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 948 "ddi_map_regs for msi reg failed")); 949 goto nxge_map_regs_fail2; 950 } 951 952 /* set up the vio region mapped register */ 953 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 954 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 955 "nxge_map_regs: vio size 0x%x", regsize)); 956 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 957 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 958 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 959 960 if (ddi_status != DDI_SUCCESS) { 961 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 962 "ddi_map_regs for nxge vio reg failed")); 963 goto nxge_map_regs_fail3; 964 } 965 nxgep->dev_regs = dev_regs; 966 967 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 968 NPI_PCI_ADD_HANDLE_SET(nxgep, 969 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 970 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 971 NPI_MSI_ADD_HANDLE_SET(nxgep, 972 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 973 974 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 975 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 976 977 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 978 NPI_REG_ADD_HANDLE_SET(nxgep, 979 (npi_reg_ptr_t)dev_regs->nxge_regp); 980 981 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 982 NPI_VREG_ADD_HANDLE_SET(nxgep, 983 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 984 985 break; 986 987 case N2_NIU: 988 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 989 /* 990 * Set up the device mapped register (FWARC 2006/556) 991 * (changed back to 1: reg starts at 1!) 992 */ 993 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 994 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 995 "nxge_map_regs: dev size 0x%x", regsize)); 996 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 997 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 998 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 999 1000 if (ddi_status != DDI_SUCCESS) { 1001 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1002 "ddi_map_regs for N2/NIU, global reg failed ")); 1003 goto nxge_map_regs_fail1; 1004 } 1005 1006 /* set up the vio region mapped register */ 1007 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1008 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1009 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1010 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1011 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1012 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1013 1014 if (ddi_status != DDI_SUCCESS) { 1015 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1016 "ddi_map_regs for nxge vio reg failed")); 1017 goto nxge_map_regs_fail2; 1018 } 1019 /* set up the vio region mapped register */ 1020 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1021 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1022 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1023 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1024 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1025 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1026 1027 if (ddi_status != DDI_SUCCESS) { 1028 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1029 "ddi_map_regs for nxge vio2 reg failed")); 1030 goto nxge_map_regs_fail3; 1031 } 1032 nxgep->dev_regs = dev_regs; 1033 1034 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1035 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1036 1037 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1038 NPI_REG_ADD_HANDLE_SET(nxgep, 1039 (npi_reg_ptr_t)dev_regs->nxge_regp); 1040 1041 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1042 NPI_VREG_ADD_HANDLE_SET(nxgep, 1043 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1044 1045 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1046 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1047 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1048 1049 break; 1050 } 1051 1052 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1053 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1054 1055 goto nxge_map_regs_exit; 1056 nxge_map_regs_fail3: 1057 if (dev_regs->nxge_msix_regh) { 1058 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1059 } 1060 if (dev_regs->nxge_vir_regh) { 1061 ddi_regs_map_free(&dev_regs->nxge_regh); 1062 } 1063 nxge_map_regs_fail2: 1064 if (dev_regs->nxge_regh) { 1065 ddi_regs_map_free(&dev_regs->nxge_regh); 1066 } 1067 nxge_map_regs_fail1: 1068 if (dev_regs->nxge_pciregh) { 1069 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1070 } 1071 nxge_map_regs_fail0: 1072 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1073 kmem_free(dev_regs, sizeof (dev_regs_t)); 1074 1075 nxge_map_regs_exit: 1076 if (ddi_status != DDI_SUCCESS) 1077 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1078 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1079 return (status); 1080 } 1081 1082 static void 1083 nxge_unmap_regs(p_nxge_t nxgep) 1084 { 1085 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1086 if (nxgep->dev_regs) { 1087 if (nxgep->dev_regs->nxge_pciregh) { 1088 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1089 "==> nxge_unmap_regs: bus")); 1090 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1091 nxgep->dev_regs->nxge_pciregh = NULL; 1092 } 1093 if (nxgep->dev_regs->nxge_regh) { 1094 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1095 "==> nxge_unmap_regs: device registers")); 1096 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1097 nxgep->dev_regs->nxge_regh = NULL; 1098 } 1099 if (nxgep->dev_regs->nxge_msix_regh) { 1100 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1101 "==> nxge_unmap_regs: device interrupts")); 1102 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1103 nxgep->dev_regs->nxge_msix_regh = NULL; 1104 } 1105 if (nxgep->dev_regs->nxge_vir_regh) { 1106 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1107 "==> nxge_unmap_regs: vio region")); 1108 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1109 nxgep->dev_regs->nxge_vir_regh = NULL; 1110 } 1111 if (nxgep->dev_regs->nxge_vir2_regh) { 1112 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1113 "==> nxge_unmap_regs: vio2 region")); 1114 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1115 nxgep->dev_regs->nxge_vir2_regh = NULL; 1116 } 1117 1118 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1119 nxgep->dev_regs = NULL; 1120 } 1121 1122 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1123 } 1124 1125 static nxge_status_t 1126 nxge_setup_mutexes(p_nxge_t nxgep) 1127 { 1128 int ddi_status = DDI_SUCCESS; 1129 nxge_status_t status = NXGE_OK; 1130 nxge_classify_t *classify_ptr; 1131 int partition; 1132 1133 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1134 1135 /* 1136 * Get the interrupt cookie so the mutexes can be 1137 * Initialized. 1138 */ 1139 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1140 &nxgep->interrupt_cookie); 1141 if (ddi_status != DDI_SUCCESS) { 1142 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1143 "<== nxge_setup_mutexes: failed 0x%x", ddi_status)); 1144 goto nxge_setup_mutexes_exit; 1145 } 1146 1147 /* Initialize global mutex */ 1148 1149 if (nxge_mdio_lock_init == 0) { 1150 MUTEX_INIT(&nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 1151 } 1152 atomic_add_32(&nxge_mdio_lock_init, 1); 1153 1154 if (nxge_mii_lock_init == 0) { 1155 MUTEX_INIT(&nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 1156 } 1157 atomic_add_32(&nxge_mii_lock_init, 1); 1158 1159 nxgep->drv_state |= STATE_MDIO_LOCK_INIT; 1160 nxgep->drv_state |= STATE_MII_LOCK_INIT; 1161 1162 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1163 MUTEX_INIT(&nxgep->poll_lock, NULL, 1164 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1165 1166 /* 1167 * Initialize mutexes for this device. 1168 */ 1169 MUTEX_INIT(nxgep->genlock, NULL, 1170 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1171 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1172 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1173 MUTEX_INIT(&nxgep->mif_lock, NULL, 1174 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1175 RW_INIT(&nxgep->filter_lock, NULL, 1176 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1177 1178 classify_ptr = &nxgep->classifier; 1179 /* 1180 * FFLP Mutexes are never used in interrupt context 1181 * as fflp operation can take very long time to 1182 * complete and hence not suitable to invoke from interrupt 1183 * handlers. 1184 */ 1185 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1186 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1187 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1188 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1189 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1190 for (partition = 0; partition < MAX_PARTITION; partition++) { 1191 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1192 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1193 } 1194 } 1195 1196 nxge_setup_mutexes_exit: 1197 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1198 "<== nxge_setup_mutexes status = %x", status)); 1199 1200 if (ddi_status != DDI_SUCCESS) 1201 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1202 1203 return (status); 1204 } 1205 1206 static void 1207 nxge_destroy_mutexes(p_nxge_t nxgep) 1208 { 1209 int partition; 1210 nxge_classify_t *classify_ptr; 1211 1212 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1213 RW_DESTROY(&nxgep->filter_lock); 1214 MUTEX_DESTROY(&nxgep->mif_lock); 1215 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1216 MUTEX_DESTROY(nxgep->genlock); 1217 1218 classify_ptr = &nxgep->classifier; 1219 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1220 1221 /* Destroy all polling resources. */ 1222 MUTEX_DESTROY(&nxgep->poll_lock); 1223 cv_destroy(&nxgep->poll_cv); 1224 1225 /* free data structures, based on HW type */ 1226 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1227 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1228 for (partition = 0; partition < MAX_PARTITION; partition++) { 1229 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1230 } 1231 } 1232 if (nxgep->drv_state & STATE_MDIO_LOCK_INIT) { 1233 if (nxge_mdio_lock_init == 1) { 1234 MUTEX_DESTROY(&nxge_mdio_lock); 1235 } 1236 atomic_add_32(&nxge_mdio_lock_init, -1); 1237 } 1238 if (nxgep->drv_state & STATE_MII_LOCK_INIT) { 1239 if (nxge_mii_lock_init == 1) { 1240 MUTEX_DESTROY(&nxge_mii_lock); 1241 } 1242 atomic_add_32(&nxge_mii_lock_init, -1); 1243 } 1244 1245 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1246 } 1247 1248 nxge_status_t 1249 nxge_init(p_nxge_t nxgep) 1250 { 1251 nxge_status_t status = NXGE_OK; 1252 1253 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1254 1255 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1256 return (status); 1257 } 1258 1259 /* 1260 * Allocate system memory for the receive/transmit buffer blocks 1261 * and receive/transmit descriptor rings. 1262 */ 1263 status = nxge_alloc_mem_pool(nxgep); 1264 if (status != NXGE_OK) { 1265 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1266 goto nxge_init_fail1; 1267 } 1268 1269 /* 1270 * Initialize and enable TXC registers 1271 * (Globally enable TX controller, 1272 * enable a port, configure dma channel bitmap, 1273 * configure the max burst size). 1274 */ 1275 status = nxge_txc_init(nxgep); 1276 if (status != NXGE_OK) { 1277 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txc failed\n")); 1278 goto nxge_init_fail2; 1279 } 1280 1281 /* 1282 * Initialize and enable TXDMA channels. 1283 */ 1284 status = nxge_init_txdma_channels(nxgep); 1285 if (status != NXGE_OK) { 1286 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1287 goto nxge_init_fail3; 1288 } 1289 1290 /* 1291 * Initialize and enable RXDMA channels. 1292 */ 1293 status = nxge_init_rxdma_channels(nxgep); 1294 if (status != NXGE_OK) { 1295 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1296 goto nxge_init_fail4; 1297 } 1298 1299 /* 1300 * Initialize TCAM and FCRAM (Neptune). 1301 */ 1302 status = nxge_classify_init(nxgep); 1303 if (status != NXGE_OK) { 1304 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1305 goto nxge_init_fail5; 1306 } 1307 1308 /* 1309 * Initialize ZCP 1310 */ 1311 status = nxge_zcp_init(nxgep); 1312 if (status != NXGE_OK) { 1313 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1314 goto nxge_init_fail5; 1315 } 1316 1317 /* 1318 * Initialize IPP. 1319 */ 1320 status = nxge_ipp_init(nxgep); 1321 if (status != NXGE_OK) { 1322 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1323 goto nxge_init_fail5; 1324 } 1325 1326 /* 1327 * Initialize the MAC block. 1328 */ 1329 status = nxge_mac_init(nxgep); 1330 if (status != NXGE_OK) { 1331 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1332 goto nxge_init_fail5; 1333 } 1334 1335 nxge_intrs_enable(nxgep); 1336 1337 /* 1338 * Enable hardware interrupts. 1339 */ 1340 nxge_intr_hw_enable(nxgep); 1341 nxgep->drv_state |= STATE_HW_INITIALIZED; 1342 1343 goto nxge_init_exit; 1344 1345 nxge_init_fail5: 1346 nxge_uninit_rxdma_channels(nxgep); 1347 nxge_init_fail4: 1348 nxge_uninit_txdma_channels(nxgep); 1349 nxge_init_fail3: 1350 (void) nxge_txc_uninit(nxgep); 1351 nxge_init_fail2: 1352 nxge_free_mem_pool(nxgep); 1353 nxge_init_fail1: 1354 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1355 "<== nxge_init status (failed) = 0x%08x", status)); 1356 return (status); 1357 1358 nxge_init_exit: 1359 1360 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1361 status)); 1362 return (status); 1363 } 1364 1365 1366 timeout_id_t 1367 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1368 { 1369 if ((nxgep->suspended == 0) || 1370 (nxgep->suspended == DDI_RESUME)) { 1371 return (timeout(func, (caddr_t)nxgep, 1372 drv_usectohz(1000 * msec))); 1373 } 1374 return (NULL); 1375 } 1376 1377 /*ARGSUSED*/ 1378 void 1379 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1380 { 1381 if (timerid) { 1382 (void) untimeout(timerid); 1383 } 1384 } 1385 1386 void 1387 nxge_uninit(p_nxge_t nxgep) 1388 { 1389 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1390 1391 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1392 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1393 "==> nxge_uninit: not initialized")); 1394 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1395 "<== nxge_uninit")); 1396 return; 1397 } 1398 1399 /* stop timer */ 1400 if (nxgep->nxge_timerid) { 1401 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1402 nxgep->nxge_timerid = 0; 1403 } 1404 1405 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1406 (void) nxge_intr_hw_disable(nxgep); 1407 1408 /* 1409 * Reset the receive MAC side. 1410 */ 1411 (void) nxge_rx_mac_disable(nxgep); 1412 1413 /* Disable and soft reset the IPP */ 1414 (void) nxge_ipp_disable(nxgep); 1415 1416 /* Free classification resources */ 1417 (void) nxge_classify_uninit(nxgep); 1418 1419 /* 1420 * Reset the transmit/receive DMA side. 1421 */ 1422 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1423 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1424 1425 nxge_uninit_txdma_channels(nxgep); 1426 nxge_uninit_rxdma_channels(nxgep); 1427 1428 /* 1429 * Reset the transmit MAC side. 1430 */ 1431 (void) nxge_tx_mac_disable(nxgep); 1432 1433 nxge_free_mem_pool(nxgep); 1434 1435 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1436 1437 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1438 1439 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1440 "nxge_mblks_pending %d", nxge_mblks_pending)); 1441 } 1442 1443 void 1444 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1445 { 1446 uint64_t reg; 1447 uint64_t regdata; 1448 int i, retry; 1449 1450 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1451 regdata = 0; 1452 retry = 1; 1453 1454 for (i = 0; i < retry; i++) { 1455 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1456 } 1457 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1458 } 1459 1460 void 1461 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1462 { 1463 uint64_t reg; 1464 uint64_t buf[2]; 1465 1466 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1467 reg = buf[0]; 1468 1469 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1470 } 1471 1472 1473 nxge_os_mutex_t nxgedebuglock; 1474 int nxge_debug_init = 0; 1475 1476 /*ARGSUSED*/ 1477 /*VARARGS*/ 1478 void 1479 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1480 { 1481 char msg_buffer[1048]; 1482 char prefix_buffer[32]; 1483 int instance; 1484 uint64_t debug_level; 1485 int cmn_level = CE_CONT; 1486 va_list ap; 1487 1488 debug_level = (nxgep == NULL) ? nxge_debug_level : 1489 nxgep->nxge_debug_level; 1490 1491 if ((level & debug_level) || 1492 (level == NXGE_NOTE) || 1493 (level == NXGE_ERR_CTL)) { 1494 /* do the msg processing */ 1495 if (nxge_debug_init == 0) { 1496 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1497 nxge_debug_init = 1; 1498 } 1499 1500 MUTEX_ENTER(&nxgedebuglock); 1501 1502 if ((level & NXGE_NOTE)) { 1503 cmn_level = CE_NOTE; 1504 } 1505 1506 if (level & NXGE_ERR_CTL) { 1507 cmn_level = CE_WARN; 1508 } 1509 1510 va_start(ap, fmt); 1511 (void) vsprintf(msg_buffer, fmt, ap); 1512 va_end(ap); 1513 if (nxgep == NULL) { 1514 instance = -1; 1515 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1516 } else { 1517 instance = nxgep->instance; 1518 (void) sprintf(prefix_buffer, 1519 "%s%d :", "nxge", instance); 1520 } 1521 1522 MUTEX_EXIT(&nxgedebuglock); 1523 cmn_err(cmn_level, "!%s %s\n", 1524 prefix_buffer, msg_buffer); 1525 1526 } 1527 } 1528 1529 char * 1530 nxge_dump_packet(char *addr, int size) 1531 { 1532 uchar_t *ap = (uchar_t *)addr; 1533 int i; 1534 static char etherbuf[1024]; 1535 char *cp = etherbuf; 1536 char digits[] = "0123456789abcdef"; 1537 1538 if (!size) 1539 size = 60; 1540 1541 if (size > MAX_DUMP_SZ) { 1542 /* Dump the leading bytes */ 1543 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1544 if (*ap > 0x0f) 1545 *cp++ = digits[*ap >> 4]; 1546 *cp++ = digits[*ap++ & 0xf]; 1547 *cp++ = ':'; 1548 } 1549 for (i = 0; i < 20; i++) 1550 *cp++ = '.'; 1551 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1552 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1553 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1554 if (*ap > 0x0f) 1555 *cp++ = digits[*ap >> 4]; 1556 *cp++ = digits[*ap++ & 0xf]; 1557 *cp++ = ':'; 1558 } 1559 } else { 1560 for (i = 0; i < size; i++) { 1561 if (*ap > 0x0f) 1562 *cp++ = digits[*ap >> 4]; 1563 *cp++ = digits[*ap++ & 0xf]; 1564 *cp++ = ':'; 1565 } 1566 } 1567 *--cp = 0; 1568 return (etherbuf); 1569 } 1570 1571 #ifdef NXGE_DEBUG 1572 static void 1573 nxge_test_map_regs(p_nxge_t nxgep) 1574 { 1575 ddi_acc_handle_t cfg_handle; 1576 p_pci_cfg_t cfg_ptr; 1577 ddi_acc_handle_t dev_handle; 1578 char *dev_ptr; 1579 ddi_acc_handle_t pci_config_handle; 1580 uint32_t regval; 1581 int i; 1582 1583 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1584 1585 dev_handle = nxgep->dev_regs->nxge_regh; 1586 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1587 1588 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1589 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1590 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1591 1592 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1593 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1594 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1595 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1596 &cfg_ptr->vendorid)); 1597 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1598 "\tvendorid 0x%x devid 0x%x", 1599 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1600 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1601 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1602 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1603 "bar1c 0x%x", 1604 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1605 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1606 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1607 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1608 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1609 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1610 "base 28 0x%x bar2c 0x%x\n", 1611 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1612 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1613 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1614 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1615 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1616 "\nNeptune PCI BAR: base30 0x%x\n", 1617 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1618 1619 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1620 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1621 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1622 "first 0x%llx second 0x%llx third 0x%llx " 1623 "last 0x%llx ", 1624 NXGE_PIO_READ64(dev_handle, 1625 (uint64_t *)(dev_ptr + 0), 0), 1626 NXGE_PIO_READ64(dev_handle, 1627 (uint64_t *)(dev_ptr + 8), 0), 1628 NXGE_PIO_READ64(dev_handle, 1629 (uint64_t *)(dev_ptr + 16), 0), 1630 NXGE_PIO_READ64(cfg_handle, 1631 (uint64_t *)(dev_ptr + 24), 0))); 1632 } 1633 } 1634 1635 #endif 1636 1637 static void 1638 nxge_suspend(p_nxge_t nxgep) 1639 { 1640 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1641 1642 nxge_intrs_disable(nxgep); 1643 nxge_destroy_dev(nxgep); 1644 1645 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1646 } 1647 1648 static nxge_status_t 1649 nxge_resume(p_nxge_t nxgep) 1650 { 1651 nxge_status_t status = NXGE_OK; 1652 1653 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1654 1655 nxgep->suspended = DDI_RESUME; 1656 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1657 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1658 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1659 (void) nxge_rx_mac_enable(nxgep); 1660 (void) nxge_tx_mac_enable(nxgep); 1661 nxge_intrs_enable(nxgep); 1662 nxgep->suspended = 0; 1663 1664 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1665 "<== nxge_resume status = 0x%x", status)); 1666 return (status); 1667 } 1668 1669 static nxge_status_t 1670 nxge_setup_dev(p_nxge_t nxgep) 1671 { 1672 nxge_status_t status = NXGE_OK; 1673 1674 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1675 nxgep->mac.portnum)); 1676 1677 status = nxge_link_init(nxgep); 1678 1679 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1680 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1681 "port%d Bad register acc handle", nxgep->mac.portnum)); 1682 status = NXGE_ERROR; 1683 } 1684 1685 if (status != NXGE_OK) { 1686 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1687 " nxge_setup_dev status " 1688 "(xcvr init 0x%08x)", status)); 1689 goto nxge_setup_dev_exit; 1690 } 1691 1692 nxge_setup_dev_exit: 1693 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1694 "<== nxge_setup_dev port %d status = 0x%08x", 1695 nxgep->mac.portnum, status)); 1696 1697 return (status); 1698 } 1699 1700 static void 1701 nxge_destroy_dev(p_nxge_t nxgep) 1702 { 1703 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 1704 1705 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1706 1707 (void) nxge_hw_stop(nxgep); 1708 1709 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 1710 } 1711 1712 static nxge_status_t 1713 nxge_setup_system_dma_pages(p_nxge_t nxgep) 1714 { 1715 int ddi_status = DDI_SUCCESS; 1716 uint_t count; 1717 ddi_dma_cookie_t cookie; 1718 uint_t iommu_pagesize; 1719 nxge_status_t status = NXGE_OK; 1720 1721 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 1722 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 1723 if (nxgep->niu_type != N2_NIU) { 1724 iommu_pagesize = dvma_pagesize(nxgep->dip); 1725 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1726 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1727 " default_block_size %d iommu_pagesize %d", 1728 nxgep->sys_page_sz, 1729 ddi_ptob(nxgep->dip, (ulong_t)1), 1730 nxgep->rx_default_block_size, 1731 iommu_pagesize)); 1732 1733 if (iommu_pagesize != 0) { 1734 if (nxgep->sys_page_sz == iommu_pagesize) { 1735 if (iommu_pagesize > 0x4000) 1736 nxgep->sys_page_sz = 0x4000; 1737 } else { 1738 if (nxgep->sys_page_sz > iommu_pagesize) 1739 nxgep->sys_page_sz = iommu_pagesize; 1740 } 1741 } 1742 } 1743 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1744 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1745 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1746 "default_block_size %d page mask %d", 1747 nxgep->sys_page_sz, 1748 ddi_ptob(nxgep->dip, (ulong_t)1), 1749 nxgep->rx_default_block_size, 1750 nxgep->sys_page_mask)); 1751 1752 1753 switch (nxgep->sys_page_sz) { 1754 default: 1755 nxgep->sys_page_sz = 0x1000; 1756 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1757 nxgep->rx_default_block_size = 0x1000; 1758 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1759 break; 1760 case 0x1000: 1761 nxgep->rx_default_block_size = 0x1000; 1762 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1763 break; 1764 case 0x2000: 1765 nxgep->rx_default_block_size = 0x2000; 1766 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1767 break; 1768 case 0x4000: 1769 nxgep->rx_default_block_size = 0x4000; 1770 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 1771 break; 1772 case 0x8000: 1773 nxgep->rx_default_block_size = 0x8000; 1774 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 1775 break; 1776 } 1777 1778 #ifndef USE_RX_BIG_BUF 1779 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 1780 #else 1781 nxgep->rx_default_block_size = 0x2000; 1782 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1783 #endif 1784 /* 1785 * Get the system DMA burst size. 1786 */ 1787 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 1788 DDI_DMA_DONTWAIT, 0, 1789 &nxgep->dmasparehandle); 1790 if (ddi_status != DDI_SUCCESS) { 1791 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1792 "ddi_dma_alloc_handle: failed " 1793 " status 0x%x", ddi_status)); 1794 goto nxge_get_soft_properties_exit; 1795 } 1796 1797 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 1798 (caddr_t)nxgep->dmasparehandle, 1799 sizeof (nxgep->dmasparehandle), 1800 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1801 DDI_DMA_DONTWAIT, 0, 1802 &cookie, &count); 1803 if (ddi_status != DDI_DMA_MAPPED) { 1804 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1805 "Binding spare handle to find system" 1806 " burstsize failed.")); 1807 ddi_status = DDI_FAILURE; 1808 goto nxge_get_soft_properties_fail1; 1809 } 1810 1811 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 1812 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 1813 1814 nxge_get_soft_properties_fail1: 1815 ddi_dma_free_handle(&nxgep->dmasparehandle); 1816 1817 nxge_get_soft_properties_exit: 1818 1819 if (ddi_status != DDI_SUCCESS) 1820 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1821 1822 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1823 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 1824 return (status); 1825 } 1826 1827 static nxge_status_t 1828 nxge_alloc_mem_pool(p_nxge_t nxgep) 1829 { 1830 nxge_status_t status = NXGE_OK; 1831 1832 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 1833 1834 status = nxge_alloc_rx_mem_pool(nxgep); 1835 if (status != NXGE_OK) { 1836 return (NXGE_ERROR); 1837 } 1838 1839 status = nxge_alloc_tx_mem_pool(nxgep); 1840 if (status != NXGE_OK) { 1841 nxge_free_rx_mem_pool(nxgep); 1842 return (NXGE_ERROR); 1843 } 1844 1845 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 1846 return (NXGE_OK); 1847 } 1848 1849 static void 1850 nxge_free_mem_pool(p_nxge_t nxgep) 1851 { 1852 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 1853 1854 nxge_free_rx_mem_pool(nxgep); 1855 nxge_free_tx_mem_pool(nxgep); 1856 1857 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 1858 } 1859 1860 static nxge_status_t 1861 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 1862 { 1863 int i, j; 1864 uint32_t ndmas, st_rdc; 1865 p_nxge_dma_pt_cfg_t p_all_cfgp; 1866 p_nxge_hw_pt_cfg_t p_cfgp; 1867 p_nxge_dma_pool_t dma_poolp; 1868 p_nxge_dma_common_t *dma_buf_p; 1869 p_nxge_dma_pool_t dma_cntl_poolp; 1870 p_nxge_dma_common_t *dma_cntl_p; 1871 size_t rx_buf_alloc_size; 1872 size_t rx_cntl_alloc_size; 1873 uint32_t *num_chunks; /* per dma */ 1874 nxge_status_t status = NXGE_OK; 1875 1876 uint32_t nxge_port_rbr_size; 1877 uint32_t nxge_port_rbr_spare_size; 1878 uint32_t nxge_port_rcr_size; 1879 1880 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 1881 1882 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 1883 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1884 st_rdc = p_cfgp->start_rdc; 1885 ndmas = p_cfgp->max_rdcs; 1886 1887 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1888 " nxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas)); 1889 1890 /* 1891 * Allocate memory for each receive DMA channel. 1892 */ 1893 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 1894 KM_SLEEP); 1895 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1896 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1897 1898 dma_cntl_poolp = (p_nxge_dma_pool_t) 1899 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 1900 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1901 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1902 1903 num_chunks = (uint32_t *)KMEM_ZALLOC( 1904 sizeof (uint32_t) * ndmas, KM_SLEEP); 1905 1906 /* 1907 * Assume that each DMA channel will be configured with default 1908 * block size. 1909 * rbr block counts are mod of batch count (16). 1910 */ 1911 nxge_port_rbr_size = p_all_cfgp->rbr_size; 1912 nxge_port_rcr_size = p_all_cfgp->rcr_size; 1913 1914 if (!nxge_port_rbr_size) { 1915 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 1916 } 1917 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 1918 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 1919 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 1920 } 1921 1922 p_all_cfgp->rbr_size = nxge_port_rbr_size; 1923 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 1924 1925 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 1926 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 1927 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 1928 } 1929 1930 /* 1931 * N2/NIU has limitation on the descriptor sizes (contiguous 1932 * memory allocation on data buffers to 4M (contig_mem_alloc) 1933 * and little endian for control buffers (must use the ddi/dki mem alloc 1934 * function). 1935 */ 1936 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1937 if (nxgep->niu_type == N2_NIU) { 1938 nxge_port_rbr_spare_size = 0; 1939 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 1940 (!ISP2(nxge_port_rbr_size))) { 1941 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 1942 } 1943 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 1944 (!ISP2(nxge_port_rcr_size))) { 1945 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 1946 } 1947 } 1948 #endif 1949 1950 rx_buf_alloc_size = (nxgep->rx_default_block_size * 1951 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 1952 1953 /* 1954 * Addresses of receive block ring, receive completion ring and the 1955 * mailbox must be all cache-aligned (64 bytes). 1956 */ 1957 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 1958 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 1959 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 1960 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 1961 1962 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 1963 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 1964 "nxge_port_rcr_size = %d " 1965 "rx_cntl_alloc_size = %d", 1966 nxge_port_rbr_size, nxge_port_rbr_spare_size, 1967 nxge_port_rcr_size, 1968 rx_cntl_alloc_size)); 1969 1970 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1971 if (nxgep->niu_type == N2_NIU) { 1972 if (!ISP2(rx_buf_alloc_size)) { 1973 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1974 "==> nxge_alloc_rx_mem_pool: " 1975 " must be power of 2")); 1976 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1977 goto nxge_alloc_rx_mem_pool_exit; 1978 } 1979 1980 if (rx_buf_alloc_size > (1 << 22)) { 1981 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1982 "==> nxge_alloc_rx_mem_pool: " 1983 " limit size to 4M")); 1984 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1985 goto nxge_alloc_rx_mem_pool_exit; 1986 } 1987 1988 if (rx_cntl_alloc_size < 0x2000) { 1989 rx_cntl_alloc_size = 0x2000; 1990 } 1991 } 1992 #endif 1993 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 1994 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 1995 1996 /* 1997 * Allocate memory for receive buffers and descriptor rings. 1998 * Replace allocation functions with interface functions provided 1999 * by the partition manager when it is available. 2000 */ 2001 /* 2002 * Allocate memory for the receive buffer blocks. 2003 */ 2004 for (i = 0; i < ndmas; i++) { 2005 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2006 " nxge_alloc_rx_mem_pool to alloc mem: " 2007 " dma %d dma_buf_p %llx &dma_buf_p %llx", 2008 i, dma_buf_p[i], &dma_buf_p[i])); 2009 num_chunks[i] = 0; 2010 status = nxge_alloc_rx_buf_dma(nxgep, st_rdc, &dma_buf_p[i], 2011 rx_buf_alloc_size, 2012 nxgep->rx_default_block_size, &num_chunks[i]); 2013 if (status != NXGE_OK) { 2014 break; 2015 } 2016 st_rdc++; 2017 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2018 " nxge_alloc_rx_mem_pool DONE alloc mem: " 2019 "dma %d dma_buf_p %llx &dma_buf_p %llx", i, 2020 dma_buf_p[i], &dma_buf_p[i])); 2021 } 2022 if (i < ndmas) { 2023 goto nxge_alloc_rx_mem_fail1; 2024 } 2025 /* 2026 * Allocate memory for descriptor rings and mailbox. 2027 */ 2028 st_rdc = p_cfgp->start_rdc; 2029 for (j = 0; j < ndmas; j++) { 2030 status = nxge_alloc_rx_cntl_dma(nxgep, st_rdc, &dma_cntl_p[j], 2031 rx_cntl_alloc_size); 2032 if (status != NXGE_OK) { 2033 break; 2034 } 2035 st_rdc++; 2036 } 2037 if (j < ndmas) { 2038 goto nxge_alloc_rx_mem_fail2; 2039 } 2040 2041 dma_poolp->ndmas = ndmas; 2042 dma_poolp->num_chunks = num_chunks; 2043 dma_poolp->buf_allocated = B_TRUE; 2044 nxgep->rx_buf_pool_p = dma_poolp; 2045 dma_poolp->dma_buf_pool_p = dma_buf_p; 2046 2047 dma_cntl_poolp->ndmas = ndmas; 2048 dma_cntl_poolp->buf_allocated = B_TRUE; 2049 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2050 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2051 2052 goto nxge_alloc_rx_mem_pool_exit; 2053 2054 nxge_alloc_rx_mem_fail2: 2055 /* Free control buffers */ 2056 j--; 2057 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2058 "==> nxge_alloc_rx_mem_pool: freeing control bufs (%d)", j)); 2059 for (; j >= 0; j--) { 2060 nxge_free_rx_cntl_dma(nxgep, 2061 (p_nxge_dma_common_t)dma_cntl_p[j]); 2062 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2063 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", 2064 j)); 2065 } 2066 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2067 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 2068 2069 nxge_alloc_rx_mem_fail1: 2070 /* Free data buffers */ 2071 i--; 2072 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2073 "==> nxge_alloc_rx_mem_pool: freeing data bufs (%d)", i)); 2074 for (; i >= 0; i--) { 2075 nxge_free_rx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2076 num_chunks[i]); 2077 } 2078 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2079 "==> nxge_alloc_rx_mem_pool: data bufs freed (%d)", i)); 2080 2081 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2082 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2083 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2084 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2085 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2086 2087 nxge_alloc_rx_mem_pool_exit: 2088 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2089 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2090 2091 return (status); 2092 } 2093 2094 static void 2095 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2096 { 2097 uint32_t i, ndmas; 2098 p_nxge_dma_pool_t dma_poolp; 2099 p_nxge_dma_common_t *dma_buf_p; 2100 p_nxge_dma_pool_t dma_cntl_poolp; 2101 p_nxge_dma_common_t *dma_cntl_p; 2102 uint32_t *num_chunks; 2103 2104 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2105 2106 dma_poolp = nxgep->rx_buf_pool_p; 2107 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2108 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2109 "<== nxge_free_rx_mem_pool " 2110 "(null rx buf pool or buf not allocated")); 2111 return; 2112 } 2113 2114 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2115 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2116 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2117 "<== nxge_free_rx_mem_pool " 2118 "(null rx cntl buf pool or cntl buf not allocated")); 2119 return; 2120 } 2121 2122 dma_buf_p = dma_poolp->dma_buf_pool_p; 2123 num_chunks = dma_poolp->num_chunks; 2124 2125 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2126 ndmas = dma_cntl_poolp->ndmas; 2127 2128 for (i = 0; i < ndmas; i++) { 2129 nxge_free_rx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2130 } 2131 2132 for (i = 0; i < ndmas; i++) { 2133 nxge_free_rx_cntl_dma(nxgep, dma_cntl_p[i]); 2134 } 2135 2136 for (i = 0; i < ndmas; i++) { 2137 KMEM_FREE(dma_buf_p[i], 2138 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2139 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2140 } 2141 2142 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2143 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2144 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2145 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2146 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2147 2148 nxgep->rx_buf_pool_p = NULL; 2149 nxgep->rx_cntl_pool_p = NULL; 2150 2151 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2152 } 2153 2154 2155 static nxge_status_t 2156 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2157 p_nxge_dma_common_t *dmap, 2158 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2159 { 2160 p_nxge_dma_common_t rx_dmap; 2161 nxge_status_t status = NXGE_OK; 2162 size_t total_alloc_size; 2163 size_t allocated = 0; 2164 int i, size_index, array_size; 2165 2166 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2167 2168 rx_dmap = (p_nxge_dma_common_t) 2169 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2170 KM_SLEEP); 2171 2172 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2173 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2174 dma_channel, alloc_size, block_size, dmap)); 2175 2176 total_alloc_size = alloc_size; 2177 2178 #if defined(RX_USE_RECLAIM_POST) 2179 total_alloc_size = alloc_size + alloc_size/4; 2180 #endif 2181 2182 i = 0; 2183 size_index = 0; 2184 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2185 while ((alloc_sizes[size_index] < alloc_size) && 2186 (size_index < array_size)) 2187 size_index++; 2188 if (size_index >= array_size) { 2189 size_index = array_size - 1; 2190 } 2191 2192 while ((allocated < total_alloc_size) && 2193 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2194 rx_dmap[i].dma_chunk_index = i; 2195 rx_dmap[i].block_size = block_size; 2196 rx_dmap[i].alength = alloc_sizes[size_index]; 2197 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2198 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2199 rx_dmap[i].dma_channel = dma_channel; 2200 rx_dmap[i].contig_alloc_type = B_FALSE; 2201 2202 /* 2203 * N2/NIU: data buffers must be contiguous as the driver 2204 * needs to call Hypervisor api to set up 2205 * logical pages. 2206 */ 2207 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2208 rx_dmap[i].contig_alloc_type = B_TRUE; 2209 } 2210 2211 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2212 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2213 "i %d nblocks %d alength %d", 2214 dma_channel, i, &rx_dmap[i], block_size, 2215 i, rx_dmap[i].nblocks, 2216 rx_dmap[i].alength)); 2217 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2218 &nxge_rx_dma_attr, 2219 rx_dmap[i].alength, 2220 &nxge_dev_buf_dma_acc_attr, 2221 DDI_DMA_READ | DDI_DMA_STREAMING, 2222 (p_nxge_dma_common_t)(&rx_dmap[i])); 2223 if (status != NXGE_OK) { 2224 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2225 " nxge_alloc_rx_buf_dma: Alloc Failed ")); 2226 size_index--; 2227 } else { 2228 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2229 " alloc_rx_buf_dma allocated rdc %d " 2230 "chunk %d size %x dvma %x bufp %llx ", 2231 dma_channel, i, rx_dmap[i].alength, 2232 rx_dmap[i].ioaddr_pp, &rx_dmap[i])); 2233 i++; 2234 allocated += alloc_sizes[size_index]; 2235 } 2236 } 2237 2238 2239 if (allocated < total_alloc_size) { 2240 goto nxge_alloc_rx_mem_fail1; 2241 } 2242 2243 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2244 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2245 dma_channel, i)); 2246 *num_chunks = i; 2247 *dmap = rx_dmap; 2248 2249 goto nxge_alloc_rx_mem_exit; 2250 2251 nxge_alloc_rx_mem_fail1: 2252 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2253 2254 nxge_alloc_rx_mem_exit: 2255 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2256 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2257 2258 return (status); 2259 } 2260 2261 /*ARGSUSED*/ 2262 static void 2263 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2264 uint32_t num_chunks) 2265 { 2266 int i; 2267 2268 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2269 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2270 2271 for (i = 0; i < num_chunks; i++) { 2272 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2273 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2274 i, dmap)); 2275 nxge_dma_mem_free(dmap++); 2276 } 2277 2278 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2279 } 2280 2281 /*ARGSUSED*/ 2282 static nxge_status_t 2283 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2284 p_nxge_dma_common_t *dmap, size_t size) 2285 { 2286 p_nxge_dma_common_t rx_dmap; 2287 nxge_status_t status = NXGE_OK; 2288 2289 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2290 2291 rx_dmap = (p_nxge_dma_common_t) 2292 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2293 2294 rx_dmap->contig_alloc_type = B_FALSE; 2295 2296 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2297 &nxge_desc_dma_attr, 2298 size, 2299 &nxge_dev_desc_dma_acc_attr, 2300 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2301 rx_dmap); 2302 if (status != NXGE_OK) { 2303 goto nxge_alloc_rx_cntl_dma_fail1; 2304 } 2305 2306 *dmap = rx_dmap; 2307 goto nxge_alloc_rx_cntl_dma_exit; 2308 2309 nxge_alloc_rx_cntl_dma_fail1: 2310 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2311 2312 nxge_alloc_rx_cntl_dma_exit: 2313 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2314 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2315 2316 return (status); 2317 } 2318 2319 /*ARGSUSED*/ 2320 static void 2321 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2322 { 2323 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2324 2325 nxge_dma_mem_free(dmap); 2326 2327 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2328 } 2329 2330 static nxge_status_t 2331 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2332 { 2333 nxge_status_t status = NXGE_OK; 2334 int i, j; 2335 uint32_t ndmas, st_tdc; 2336 p_nxge_dma_pt_cfg_t p_all_cfgp; 2337 p_nxge_hw_pt_cfg_t p_cfgp; 2338 p_nxge_dma_pool_t dma_poolp; 2339 p_nxge_dma_common_t *dma_buf_p; 2340 p_nxge_dma_pool_t dma_cntl_poolp; 2341 p_nxge_dma_common_t *dma_cntl_p; 2342 size_t tx_buf_alloc_size; 2343 size_t tx_cntl_alloc_size; 2344 uint32_t *num_chunks; /* per dma */ 2345 uint32_t bcopy_thresh; 2346 2347 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2348 2349 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2350 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2351 st_tdc = p_cfgp->start_tdc; 2352 ndmas = p_cfgp->max_tdcs; 2353 2354 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool: " 2355 "p_cfgp 0x%016llx start_tdc %d ndmas %d nxgep->max_tdcs %d", 2356 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, nxgep->max_tdcs)); 2357 /* 2358 * Allocate memory for each transmit DMA channel. 2359 */ 2360 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2361 KM_SLEEP); 2362 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2363 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2364 2365 dma_cntl_poolp = (p_nxge_dma_pool_t) 2366 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2367 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2368 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2369 2370 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2371 /* 2372 * N2/NIU has limitation on the descriptor sizes (contiguous 2373 * memory allocation on data buffers to 4M (contig_mem_alloc) 2374 * and little endian for control buffers (must use the ddi/dki mem alloc 2375 * function). The transmit ring is limited to 8K (includes the 2376 * mailbox). 2377 */ 2378 if (nxgep->niu_type == N2_NIU) { 2379 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2380 (!ISP2(nxge_tx_ring_size))) { 2381 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2382 } 2383 } 2384 #endif 2385 2386 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2387 2388 /* 2389 * Assume that each DMA channel will be configured with default 2390 * transmit bufer size for copying transmit data. 2391 * (For packet payload over this limit, packets will not be 2392 * copied.) 2393 */ 2394 if (nxgep->niu_type == N2_NIU) { 2395 bcopy_thresh = TX_BCOPY_SIZE; 2396 } else { 2397 bcopy_thresh = nxge_bcopy_thresh; 2398 } 2399 tx_buf_alloc_size = (bcopy_thresh * nxge_tx_ring_size); 2400 2401 /* 2402 * Addresses of transmit descriptor ring and the 2403 * mailbox must be all cache-aligned (64 bytes). 2404 */ 2405 tx_cntl_alloc_size = nxge_tx_ring_size; 2406 tx_cntl_alloc_size *= (sizeof (tx_desc_t)); 2407 tx_cntl_alloc_size += sizeof (txdma_mailbox_t); 2408 2409 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2410 if (nxgep->niu_type == N2_NIU) { 2411 if (!ISP2(tx_buf_alloc_size)) { 2412 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2413 "==> nxge_alloc_tx_mem_pool: " 2414 " must be power of 2")); 2415 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2416 goto nxge_alloc_tx_mem_pool_exit; 2417 } 2418 2419 if (tx_buf_alloc_size > (1 << 22)) { 2420 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2421 "==> nxge_alloc_tx_mem_pool: " 2422 " limit size to 4M")); 2423 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2424 goto nxge_alloc_tx_mem_pool_exit; 2425 } 2426 2427 if (tx_cntl_alloc_size < 0x2000) { 2428 tx_cntl_alloc_size = 0x2000; 2429 } 2430 } 2431 #endif 2432 2433 num_chunks = (uint32_t *)KMEM_ZALLOC( 2434 sizeof (uint32_t) * ndmas, KM_SLEEP); 2435 2436 /* 2437 * Allocate memory for transmit buffers and descriptor rings. 2438 * Replace allocation functions with interface functions provided 2439 * by the partition manager when it is available. 2440 * 2441 * Allocate memory for the transmit buffer pool. 2442 */ 2443 for (i = 0; i < ndmas; i++) { 2444 num_chunks[i] = 0; 2445 status = nxge_alloc_tx_buf_dma(nxgep, st_tdc, &dma_buf_p[i], 2446 tx_buf_alloc_size, 2447 bcopy_thresh, &num_chunks[i]); 2448 if (status != NXGE_OK) { 2449 break; 2450 } 2451 st_tdc++; 2452 } 2453 if (i < ndmas) { 2454 goto nxge_alloc_tx_mem_pool_fail1; 2455 } 2456 2457 st_tdc = p_cfgp->start_tdc; 2458 /* 2459 * Allocate memory for descriptor rings and mailbox. 2460 */ 2461 for (j = 0; j < ndmas; j++) { 2462 status = nxge_alloc_tx_cntl_dma(nxgep, st_tdc, &dma_cntl_p[j], 2463 tx_cntl_alloc_size); 2464 if (status != NXGE_OK) { 2465 break; 2466 } 2467 st_tdc++; 2468 } 2469 if (j < ndmas) { 2470 goto nxge_alloc_tx_mem_pool_fail2; 2471 } 2472 2473 dma_poolp->ndmas = ndmas; 2474 dma_poolp->num_chunks = num_chunks; 2475 dma_poolp->buf_allocated = B_TRUE; 2476 dma_poolp->dma_buf_pool_p = dma_buf_p; 2477 nxgep->tx_buf_pool_p = dma_poolp; 2478 2479 dma_cntl_poolp->ndmas = ndmas; 2480 dma_cntl_poolp->buf_allocated = B_TRUE; 2481 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2482 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2483 2484 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2485 "==> nxge_alloc_tx_mem_pool: start_tdc %d " 2486 "ndmas %d poolp->ndmas %d", 2487 st_tdc, ndmas, dma_poolp->ndmas)); 2488 2489 goto nxge_alloc_tx_mem_pool_exit; 2490 2491 nxge_alloc_tx_mem_pool_fail2: 2492 /* Free control buffers */ 2493 j--; 2494 for (; j >= 0; j--) { 2495 nxge_free_tx_cntl_dma(nxgep, 2496 (p_nxge_dma_common_t)dma_cntl_p[j]); 2497 } 2498 2499 nxge_alloc_tx_mem_pool_fail1: 2500 /* Free data buffers */ 2501 i--; 2502 for (; i >= 0; i--) { 2503 nxge_free_tx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2504 num_chunks[i]); 2505 } 2506 2507 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2508 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2509 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2510 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2511 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2512 2513 nxge_alloc_tx_mem_pool_exit: 2514 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2515 "<== nxge_alloc_tx_mem_pool:status 0x%08x", status)); 2516 2517 return (status); 2518 } 2519 2520 static nxge_status_t 2521 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2522 p_nxge_dma_common_t *dmap, size_t alloc_size, 2523 size_t block_size, uint32_t *num_chunks) 2524 { 2525 p_nxge_dma_common_t tx_dmap; 2526 nxge_status_t status = NXGE_OK; 2527 size_t total_alloc_size; 2528 size_t allocated = 0; 2529 int i, size_index, array_size; 2530 2531 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 2532 2533 tx_dmap = (p_nxge_dma_common_t) 2534 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2535 KM_SLEEP); 2536 2537 total_alloc_size = alloc_size; 2538 i = 0; 2539 size_index = 0; 2540 array_size = sizeof (alloc_sizes) / sizeof (size_t); 2541 while ((alloc_sizes[size_index] < alloc_size) && 2542 (size_index < array_size)) 2543 size_index++; 2544 if (size_index >= array_size) { 2545 size_index = array_size - 1; 2546 } 2547 2548 while ((allocated < total_alloc_size) && 2549 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2550 2551 tx_dmap[i].dma_chunk_index = i; 2552 tx_dmap[i].block_size = block_size; 2553 tx_dmap[i].alength = alloc_sizes[size_index]; 2554 tx_dmap[i].orig_alength = tx_dmap[i].alength; 2555 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2556 tx_dmap[i].dma_channel = dma_channel; 2557 tx_dmap[i].contig_alloc_type = B_FALSE; 2558 2559 /* 2560 * N2/NIU: data buffers must be contiguous as the driver 2561 * needs to call Hypervisor api to set up 2562 * logical pages. 2563 */ 2564 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2565 tx_dmap[i].contig_alloc_type = B_TRUE; 2566 } 2567 2568 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2569 &nxge_tx_dma_attr, 2570 tx_dmap[i].alength, 2571 &nxge_dev_buf_dma_acc_attr, 2572 DDI_DMA_WRITE | DDI_DMA_STREAMING, 2573 (p_nxge_dma_common_t)(&tx_dmap[i])); 2574 if (status != NXGE_OK) { 2575 size_index--; 2576 } else { 2577 i++; 2578 allocated += alloc_sizes[size_index]; 2579 } 2580 } 2581 2582 if (allocated < total_alloc_size) { 2583 goto nxge_alloc_tx_mem_fail1; 2584 } 2585 2586 *num_chunks = i; 2587 *dmap = tx_dmap; 2588 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2589 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 2590 *dmap, i)); 2591 goto nxge_alloc_tx_mem_exit; 2592 2593 nxge_alloc_tx_mem_fail1: 2594 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2595 2596 nxge_alloc_tx_mem_exit: 2597 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2598 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 2599 2600 return (status); 2601 } 2602 2603 /*ARGSUSED*/ 2604 static void 2605 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2606 uint32_t num_chunks) 2607 { 2608 int i; 2609 2610 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 2611 2612 for (i = 0; i < num_chunks; i++) { 2613 nxge_dma_mem_free(dmap++); 2614 } 2615 2616 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 2617 } 2618 2619 /*ARGSUSED*/ 2620 static nxge_status_t 2621 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2622 p_nxge_dma_common_t *dmap, size_t size) 2623 { 2624 p_nxge_dma_common_t tx_dmap; 2625 nxge_status_t status = NXGE_OK; 2626 2627 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 2628 tx_dmap = (p_nxge_dma_common_t) 2629 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2630 2631 tx_dmap->contig_alloc_type = B_FALSE; 2632 2633 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2634 &nxge_desc_dma_attr, 2635 size, 2636 &nxge_dev_desc_dma_acc_attr, 2637 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2638 tx_dmap); 2639 if (status != NXGE_OK) { 2640 goto nxge_alloc_tx_cntl_dma_fail1; 2641 } 2642 2643 *dmap = tx_dmap; 2644 goto nxge_alloc_tx_cntl_dma_exit; 2645 2646 nxge_alloc_tx_cntl_dma_fail1: 2647 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 2648 2649 nxge_alloc_tx_cntl_dma_exit: 2650 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2651 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 2652 2653 return (status); 2654 } 2655 2656 /*ARGSUSED*/ 2657 static void 2658 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2659 { 2660 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 2661 2662 nxge_dma_mem_free(dmap); 2663 2664 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 2665 } 2666 2667 static void 2668 nxge_free_tx_mem_pool(p_nxge_t nxgep) 2669 { 2670 uint32_t i, ndmas; 2671 p_nxge_dma_pool_t dma_poolp; 2672 p_nxge_dma_common_t *dma_buf_p; 2673 p_nxge_dma_pool_t dma_cntl_poolp; 2674 p_nxge_dma_common_t *dma_cntl_p; 2675 uint32_t *num_chunks; 2676 2677 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_free_tx_mem_pool")); 2678 2679 dma_poolp = nxgep->tx_buf_pool_p; 2680 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2681 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2682 "<== nxge_free_tx_mem_pool " 2683 "(null rx buf pool or buf not allocated")); 2684 return; 2685 } 2686 2687 dma_cntl_poolp = nxgep->tx_cntl_pool_p; 2688 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2689 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2690 "<== nxge_free_tx_mem_pool " 2691 "(null tx cntl buf pool or cntl buf not allocated")); 2692 return; 2693 } 2694 2695 dma_buf_p = dma_poolp->dma_buf_pool_p; 2696 num_chunks = dma_poolp->num_chunks; 2697 2698 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2699 ndmas = dma_cntl_poolp->ndmas; 2700 2701 for (i = 0; i < ndmas; i++) { 2702 nxge_free_tx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2703 } 2704 2705 for (i = 0; i < ndmas; i++) { 2706 nxge_free_tx_cntl_dma(nxgep, dma_cntl_p[i]); 2707 } 2708 2709 for (i = 0; i < ndmas; i++) { 2710 KMEM_FREE(dma_buf_p[i], 2711 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2712 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2713 } 2714 2715 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2716 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2717 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2718 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2719 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2720 2721 nxgep->tx_buf_pool_p = NULL; 2722 nxgep->tx_cntl_pool_p = NULL; 2723 2724 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_free_tx_mem_pool")); 2725 } 2726 2727 /*ARGSUSED*/ 2728 static nxge_status_t 2729 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 2730 struct ddi_dma_attr *dma_attrp, 2731 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 2732 p_nxge_dma_common_t dma_p) 2733 { 2734 caddr_t kaddrp; 2735 int ddi_status = DDI_SUCCESS; 2736 boolean_t contig_alloc_type; 2737 2738 contig_alloc_type = dma_p->contig_alloc_type; 2739 2740 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 2741 /* 2742 * contig_alloc_type for contiguous memory only allowed 2743 * for N2/NIU. 2744 */ 2745 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2746 "nxge_dma_mem_alloc: alloc type not allows (%d)", 2747 dma_p->contig_alloc_type)); 2748 return (NXGE_ERROR | NXGE_DDI_FAILED); 2749 } 2750 2751 dma_p->dma_handle = NULL; 2752 dma_p->acc_handle = NULL; 2753 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 2754 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 2755 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 2756 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 2757 if (ddi_status != DDI_SUCCESS) { 2758 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2759 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 2760 return (NXGE_ERROR | NXGE_DDI_FAILED); 2761 } 2762 2763 switch (contig_alloc_type) { 2764 case B_FALSE: 2765 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, 2766 acc_attr_p, 2767 xfer_flags, 2768 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 2769 &dma_p->acc_handle); 2770 if (ddi_status != DDI_SUCCESS) { 2771 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2772 "nxge_dma_mem_alloc:ddi_dma_mem_alloc failed")); 2773 ddi_dma_free_handle(&dma_p->dma_handle); 2774 dma_p->dma_handle = NULL; 2775 return (NXGE_ERROR | NXGE_DDI_FAILED); 2776 } 2777 if (dma_p->alength < length) { 2778 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2779 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 2780 "< length.")); 2781 ddi_dma_mem_free(&dma_p->acc_handle); 2782 ddi_dma_free_handle(&dma_p->dma_handle); 2783 dma_p->acc_handle = NULL; 2784 dma_p->dma_handle = NULL; 2785 return (NXGE_ERROR); 2786 } 2787 2788 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2789 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2790 &dma_p->dma_cookie, &dma_p->ncookies); 2791 if (ddi_status != DDI_DMA_MAPPED) { 2792 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2793 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2794 "(staus 0x%x ncookies %d.)", ddi_status, 2795 dma_p->ncookies)); 2796 if (dma_p->acc_handle) { 2797 ddi_dma_mem_free(&dma_p->acc_handle); 2798 dma_p->acc_handle = NULL; 2799 } 2800 ddi_dma_free_handle(&dma_p->dma_handle); 2801 dma_p->dma_handle = NULL; 2802 return (NXGE_ERROR | NXGE_DDI_FAILED); 2803 } 2804 2805 if (dma_p->ncookies != 1) { 2806 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2807 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 2808 "> 1 cookie" 2809 "(staus 0x%x ncookies %d.)", ddi_status, 2810 dma_p->ncookies)); 2811 if (dma_p->acc_handle) { 2812 ddi_dma_mem_free(&dma_p->acc_handle); 2813 dma_p->acc_handle = NULL; 2814 } 2815 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2816 ddi_dma_free_handle(&dma_p->dma_handle); 2817 dma_p->dma_handle = NULL; 2818 return (NXGE_ERROR); 2819 } 2820 break; 2821 2822 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2823 case B_TRUE: 2824 kaddrp = (caddr_t)contig_mem_alloc(length); 2825 if (kaddrp == NULL) { 2826 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2827 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 2828 ddi_dma_free_handle(&dma_p->dma_handle); 2829 return (NXGE_ERROR | NXGE_DDI_FAILED); 2830 } 2831 2832 dma_p->alength = length; 2833 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2834 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2835 &dma_p->dma_cookie, &dma_p->ncookies); 2836 if (ddi_status != DDI_DMA_MAPPED) { 2837 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2838 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2839 "(status 0x%x ncookies %d.)", ddi_status, 2840 dma_p->ncookies)); 2841 2842 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2843 "==> nxge_dma_mem_alloc: (not mapped)" 2844 "length %lu (0x%x) " 2845 "free contig kaddrp $%p " 2846 "va_to_pa $%p", 2847 length, length, 2848 kaddrp, 2849 va_to_pa(kaddrp))); 2850 2851 2852 contig_mem_free((void *)kaddrp, length); 2853 ddi_dma_free_handle(&dma_p->dma_handle); 2854 2855 dma_p->dma_handle = NULL; 2856 dma_p->acc_handle = NULL; 2857 dma_p->alength = NULL; 2858 dma_p->kaddrp = NULL; 2859 2860 return (NXGE_ERROR | NXGE_DDI_FAILED); 2861 } 2862 2863 if (dma_p->ncookies != 1 || 2864 (dma_p->dma_cookie.dmac_laddress == NULL)) { 2865 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2866 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 2867 "cookie or " 2868 "dmac_laddress is NULL $%p size %d " 2869 " (status 0x%x ncookies %d.)", 2870 ddi_status, 2871 dma_p->dma_cookie.dmac_laddress, 2872 dma_p->dma_cookie.dmac_size, 2873 dma_p->ncookies)); 2874 2875 contig_mem_free((void *)kaddrp, length); 2876 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2877 ddi_dma_free_handle(&dma_p->dma_handle); 2878 2879 dma_p->alength = 0; 2880 dma_p->dma_handle = NULL; 2881 dma_p->acc_handle = NULL; 2882 dma_p->kaddrp = NULL; 2883 2884 return (NXGE_ERROR | NXGE_DDI_FAILED); 2885 } 2886 break; 2887 2888 #else 2889 case B_TRUE: 2890 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2891 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 2892 return (NXGE_ERROR | NXGE_DDI_FAILED); 2893 #endif 2894 } 2895 2896 dma_p->kaddrp = kaddrp; 2897 dma_p->last_kaddrp = (unsigned char *)kaddrp + 2898 dma_p->alength - RXBUF_64B_ALIGNED; 2899 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2900 dma_p->last_ioaddr_pp = 2901 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 2902 dma_p->alength - RXBUF_64B_ALIGNED; 2903 2904 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 2905 2906 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2907 dma_p->orig_ioaddr_pp = 2908 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2909 dma_p->orig_alength = length; 2910 dma_p->orig_kaddrp = kaddrp; 2911 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 2912 #endif 2913 2914 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 2915 "dma buffer allocated: dma_p $%p " 2916 "return dmac_ladress from cookie $%p cookie dmac_size %d " 2917 "dma_p->ioaddr_p $%p " 2918 "dma_p->orig_ioaddr_p $%p " 2919 "orig_vatopa $%p " 2920 "alength %d (0x%x) " 2921 "kaddrp $%p " 2922 "length %d (0x%x)", 2923 dma_p, 2924 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 2925 dma_p->ioaddr_pp, 2926 dma_p->orig_ioaddr_pp, 2927 dma_p->orig_vatopa, 2928 dma_p->alength, dma_p->alength, 2929 kaddrp, 2930 length, length)); 2931 2932 return (NXGE_OK); 2933 } 2934 2935 static void 2936 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 2937 { 2938 if (dma_p->dma_handle != NULL) { 2939 if (dma_p->ncookies) { 2940 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2941 dma_p->ncookies = 0; 2942 } 2943 ddi_dma_free_handle(&dma_p->dma_handle); 2944 dma_p->dma_handle = NULL; 2945 } 2946 2947 if (dma_p->acc_handle != NULL) { 2948 ddi_dma_mem_free(&dma_p->acc_handle); 2949 dma_p->acc_handle = NULL; 2950 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 2951 } 2952 2953 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2954 if (dma_p->contig_alloc_type && 2955 dma_p->orig_kaddrp && dma_p->orig_alength) { 2956 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 2957 "kaddrp $%p (orig_kaddrp $%p)" 2958 "mem type %d ", 2959 "orig_alength %d " 2960 "alength 0x%x (%d)", 2961 dma_p->kaddrp, 2962 dma_p->orig_kaddrp, 2963 dma_p->contig_alloc_type, 2964 dma_p->orig_alength, 2965 dma_p->alength, dma_p->alength)); 2966 2967 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 2968 dma_p->orig_alength = NULL; 2969 dma_p->orig_kaddrp = NULL; 2970 dma_p->contig_alloc_type = B_FALSE; 2971 } 2972 #endif 2973 dma_p->kaddrp = NULL; 2974 dma_p->alength = NULL; 2975 } 2976 2977 /* 2978 * nxge_m_start() -- start transmitting and receiving. 2979 * 2980 * This function is called by the MAC layer when the first 2981 * stream is open to prepare the hardware ready for sending 2982 * and transmitting packets. 2983 */ 2984 static int 2985 nxge_m_start(void *arg) 2986 { 2987 p_nxge_t nxgep = (p_nxge_t)arg; 2988 2989 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 2990 2991 MUTEX_ENTER(nxgep->genlock); 2992 if (nxge_init(nxgep) != NXGE_OK) { 2993 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2994 "<== nxge_m_start: initialization failed")); 2995 MUTEX_EXIT(nxgep->genlock); 2996 return (EIO); 2997 } 2998 2999 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 3000 goto nxge_m_start_exit; 3001 /* 3002 * Start timer to check the system error and tx hangs 3003 */ 3004 nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state, 3005 NXGE_CHECK_TIMER); 3006 3007 nxgep->link_notify = B_TRUE; 3008 3009 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3010 3011 nxge_m_start_exit: 3012 MUTEX_EXIT(nxgep->genlock); 3013 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3014 return (0); 3015 } 3016 3017 /* 3018 * nxge_m_stop(): stop transmitting and receiving. 3019 */ 3020 static void 3021 nxge_m_stop(void *arg) 3022 { 3023 p_nxge_t nxgep = (p_nxge_t)arg; 3024 3025 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3026 3027 if (nxgep->nxge_timerid) { 3028 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3029 nxgep->nxge_timerid = 0; 3030 } 3031 3032 MUTEX_ENTER(nxgep->genlock); 3033 nxge_uninit(nxgep); 3034 3035 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3036 3037 MUTEX_EXIT(nxgep->genlock); 3038 3039 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3040 } 3041 3042 static int 3043 nxge_m_unicst(void *arg, const uint8_t *macaddr) 3044 { 3045 p_nxge_t nxgep = (p_nxge_t)arg; 3046 struct ether_addr addrp; 3047 3048 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 3049 3050 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 3051 if (nxge_set_mac_addr(nxgep, &addrp)) { 3052 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3053 "<== nxge_m_unicst: set unitcast failed")); 3054 return (EINVAL); 3055 } 3056 3057 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 3058 3059 return (0); 3060 } 3061 3062 static int 3063 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3064 { 3065 p_nxge_t nxgep = (p_nxge_t)arg; 3066 struct ether_addr addrp; 3067 3068 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3069 "==> nxge_m_multicst: add %d", add)); 3070 3071 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3072 if (add) { 3073 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3074 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3075 "<== nxge_m_multicst: add multicast failed")); 3076 return (EINVAL); 3077 } 3078 } else { 3079 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3080 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3081 "<== nxge_m_multicst: del multicast failed")); 3082 return (EINVAL); 3083 } 3084 } 3085 3086 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3087 3088 return (0); 3089 } 3090 3091 static int 3092 nxge_m_promisc(void *arg, boolean_t on) 3093 { 3094 p_nxge_t nxgep = (p_nxge_t)arg; 3095 3096 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3097 "==> nxge_m_promisc: on %d", on)); 3098 3099 if (nxge_set_promisc(nxgep, on)) { 3100 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3101 "<== nxge_m_promisc: set promisc failed")); 3102 return (EINVAL); 3103 } 3104 3105 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3106 "<== nxge_m_promisc: on %d", on)); 3107 3108 return (0); 3109 } 3110 3111 static void 3112 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3113 { 3114 p_nxge_t nxgep = (p_nxge_t)arg; 3115 struct iocblk *iocp; 3116 boolean_t need_privilege; 3117 int err; 3118 int cmd; 3119 3120 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3121 3122 iocp = (struct iocblk *)mp->b_rptr; 3123 iocp->ioc_error = 0; 3124 need_privilege = B_TRUE; 3125 cmd = iocp->ioc_cmd; 3126 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3127 switch (cmd) { 3128 default: 3129 miocnak(wq, mp, 0, EINVAL); 3130 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3131 return; 3132 3133 case LB_GET_INFO_SIZE: 3134 case LB_GET_INFO: 3135 case LB_GET_MODE: 3136 need_privilege = B_FALSE; 3137 break; 3138 case LB_SET_MODE: 3139 break; 3140 3141 case ND_GET: 3142 need_privilege = B_FALSE; 3143 break; 3144 case ND_SET: 3145 break; 3146 3147 case NXGE_GET_MII: 3148 case NXGE_PUT_MII: 3149 case NXGE_GET64: 3150 case NXGE_PUT64: 3151 case NXGE_GET_TX_RING_SZ: 3152 case NXGE_GET_TX_DESC: 3153 case NXGE_TX_SIDE_RESET: 3154 case NXGE_RX_SIDE_RESET: 3155 case NXGE_GLOBAL_RESET: 3156 case NXGE_RESET_MAC: 3157 case NXGE_TX_REGS_DUMP: 3158 case NXGE_RX_REGS_DUMP: 3159 case NXGE_INT_REGS_DUMP: 3160 case NXGE_VIR_INT_REGS_DUMP: 3161 case NXGE_PUT_TCAM: 3162 case NXGE_GET_TCAM: 3163 case NXGE_RTRACE: 3164 case NXGE_RDUMP: 3165 3166 need_privilege = B_FALSE; 3167 break; 3168 case NXGE_INJECT_ERR: 3169 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3170 nxge_err_inject(nxgep, wq, mp); 3171 break; 3172 } 3173 3174 if (need_privilege) { 3175 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3176 if (err != 0) { 3177 miocnak(wq, mp, 0, err); 3178 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3179 "<== nxge_m_ioctl: no priv")); 3180 return; 3181 } 3182 } 3183 3184 switch (cmd) { 3185 case ND_GET: 3186 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command")); 3187 case ND_SET: 3188 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command")); 3189 nxge_param_ioctl(nxgep, wq, mp, iocp); 3190 break; 3191 3192 case LB_GET_MODE: 3193 case LB_SET_MODE: 3194 case LB_GET_INFO_SIZE: 3195 case LB_GET_INFO: 3196 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3197 break; 3198 3199 case NXGE_GET_MII: 3200 case NXGE_PUT_MII: 3201 case NXGE_PUT_TCAM: 3202 case NXGE_GET_TCAM: 3203 case NXGE_GET64: 3204 case NXGE_PUT64: 3205 case NXGE_GET_TX_RING_SZ: 3206 case NXGE_GET_TX_DESC: 3207 case NXGE_TX_SIDE_RESET: 3208 case NXGE_RX_SIDE_RESET: 3209 case NXGE_GLOBAL_RESET: 3210 case NXGE_RESET_MAC: 3211 case NXGE_TX_REGS_DUMP: 3212 case NXGE_RX_REGS_DUMP: 3213 case NXGE_INT_REGS_DUMP: 3214 case NXGE_VIR_INT_REGS_DUMP: 3215 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3216 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3217 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3218 break; 3219 } 3220 3221 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3222 } 3223 3224 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3225 3226 static void 3227 nxge_m_resources(void *arg) 3228 { 3229 p_nxge_t nxgep = arg; 3230 mac_rx_fifo_t mrf; 3231 p_rx_rcr_rings_t rcr_rings; 3232 p_rx_rcr_ring_t *rcr_p; 3233 uint32_t i, ndmas; 3234 nxge_status_t status; 3235 3236 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3237 3238 MUTEX_ENTER(nxgep->genlock); 3239 3240 /* 3241 * CR 6492541 Check to see if the drv_state has been initialized, 3242 * if not * call nxge_init(). 3243 */ 3244 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3245 status = nxge_init(nxgep); 3246 if (status != NXGE_OK) 3247 goto nxge_m_resources_exit; 3248 } 3249 3250 mrf.mrf_type = MAC_RX_FIFO; 3251 mrf.mrf_blank = nxge_rx_hw_blank; 3252 mrf.mrf_arg = (void *)nxgep; 3253 3254 mrf.mrf_normal_blank_time = 128; 3255 mrf.mrf_normal_pkt_count = 8; 3256 rcr_rings = nxgep->rx_rcr_rings; 3257 rcr_p = rcr_rings->rcr_rings; 3258 ndmas = rcr_rings->ndmas; 3259 3260 /* 3261 * Export our receive resources to the MAC layer. 3262 */ 3263 for (i = 0; i < ndmas; i++) { 3264 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle = 3265 mac_resource_add(nxgep->mach, 3266 (mac_resource_t *)&mrf); 3267 3268 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3269 "==> nxge_m_resources: vdma %d dma %d " 3270 "rcrptr 0x%016llx mac_handle 0x%016llx", 3271 i, ((p_rx_rcr_ring_t)rcr_p[i])->rdc, 3272 rcr_p[i], 3273 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle)); 3274 } 3275 3276 nxge_m_resources_exit: 3277 MUTEX_EXIT(nxgep->genlock); 3278 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 3279 } 3280 3281 static void 3282 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 3283 { 3284 p_nxge_mmac_stats_t mmac_stats; 3285 int i; 3286 nxge_mmac_t *mmac_info; 3287 3288 mmac_info = &nxgep->nxge_mmac_info; 3289 3290 mmac_stats = &nxgep->statsp->mmac_stats; 3291 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3292 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3293 3294 for (i = 0; i < ETHERADDRL; i++) { 3295 if (factory) { 3296 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3297 = mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i]; 3298 } else { 3299 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3300 = mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i]; 3301 } 3302 } 3303 } 3304 3305 /* 3306 * nxge_altmac_set() -- Set an alternate MAC address 3307 */ 3308 static int 3309 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 3310 { 3311 uint8_t addrn; 3312 uint8_t portn; 3313 npi_mac_addr_t altmac; 3314 hostinfo_t mac_rdc; 3315 p_nxge_class_pt_cfg_t clscfgp; 3316 3317 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3318 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3319 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3320 3321 portn = nxgep->mac.portnum; 3322 addrn = (uint8_t)slot - 1; 3323 3324 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 3325 addrn, &altmac) != NPI_SUCCESS) 3326 return (EIO); 3327 3328 /* 3329 * Set the rdc table number for the host info entry 3330 * for this mac address slot. 3331 */ 3332 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 3333 mac_rdc.value = 0; 3334 mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl; 3335 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 3336 3337 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 3338 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 3339 return (EIO); 3340 } 3341 3342 /* 3343 * Enable comparison with the alternate MAC address. 3344 * While the first alternate addr is enabled by bit 1 of register 3345 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 3346 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 3347 * accordingly before calling npi_mac_altaddr_entry. 3348 */ 3349 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3350 addrn = (uint8_t)slot - 1; 3351 else 3352 addrn = (uint8_t)slot; 3353 3354 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 3355 != NPI_SUCCESS) 3356 return (EIO); 3357 3358 return (0); 3359 } 3360 3361 /* 3362 * nxeg_m_mmac_add() - find an unused address slot, set the address 3363 * value to the one specified, enable the port to start filtering on 3364 * the new MAC address. Returns 0 on success. 3365 */ 3366 static int 3367 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 3368 { 3369 p_nxge_t nxgep = arg; 3370 mac_addr_slot_t slot; 3371 nxge_mmac_t *mmac_info; 3372 int err; 3373 nxge_status_t status; 3374 3375 mutex_enter(nxgep->genlock); 3376 3377 /* 3378 * Make sure that nxge is initialized, if _start() has 3379 * not been called. 3380 */ 3381 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3382 status = nxge_init(nxgep); 3383 if (status != NXGE_OK) { 3384 mutex_exit(nxgep->genlock); 3385 return (ENXIO); 3386 } 3387 } 3388 3389 mmac_info = &nxgep->nxge_mmac_info; 3390 if (mmac_info->naddrfree == 0) { 3391 mutex_exit(nxgep->genlock); 3392 return (ENOSPC); 3393 } 3394 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3395 maddr->mma_addrlen)) { 3396 mutex_exit(nxgep->genlock); 3397 return (EINVAL); 3398 } 3399 /* 3400 * Search for the first available slot. Because naddrfree 3401 * is not zero, we are guaranteed to find one. 3402 * Slot 0 is for unique (primary) MAC. The first alternate 3403 * MAC slot is slot 1. 3404 * Each of the first two ports of Neptune has 16 alternate 3405 * MAC slots but only the first 7 (or 15) slots have assigned factory 3406 * MAC addresses. We first search among the slots without bundled 3407 * factory MACs. If we fail to find one in that range, then we 3408 * search the slots with bundled factory MACs. A factory MAC 3409 * will be wasted while the slot is used with a user MAC address. 3410 * But the slot could be used by factory MAC again after calling 3411 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 3412 */ 3413 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 3414 for (slot = mmac_info->num_factory_mmac + 1; 3415 slot <= mmac_info->num_mmac; slot++) { 3416 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3417 break; 3418 } 3419 if (slot > mmac_info->num_mmac) { 3420 for (slot = 1; slot <= mmac_info->num_factory_mmac; 3421 slot++) { 3422 if (!(mmac_info->mac_pool[slot].flags 3423 & MMAC_SLOT_USED)) 3424 break; 3425 } 3426 } 3427 } else { 3428 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 3429 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3430 break; 3431 } 3432 } 3433 ASSERT(slot <= mmac_info->num_mmac); 3434 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 3435 mutex_exit(nxgep->genlock); 3436 return (err); 3437 } 3438 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 3439 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 3440 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3441 mmac_info->naddrfree--; 3442 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3443 3444 maddr->mma_slot = slot; 3445 3446 mutex_exit(nxgep->genlock); 3447 return (0); 3448 } 3449 3450 /* 3451 * This function reserves an unused slot and programs the slot and the HW 3452 * with a factory mac address. 3453 */ 3454 static int 3455 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 3456 { 3457 p_nxge_t nxgep = arg; 3458 mac_addr_slot_t slot; 3459 nxge_mmac_t *mmac_info; 3460 int err; 3461 nxge_status_t status; 3462 3463 mutex_enter(nxgep->genlock); 3464 3465 /* 3466 * Make sure that nxge is initialized, if _start() has 3467 * not been called. 3468 */ 3469 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3470 status = nxge_init(nxgep); 3471 if (status != NXGE_OK) { 3472 mutex_exit(nxgep->genlock); 3473 return (ENXIO); 3474 } 3475 } 3476 3477 mmac_info = &nxgep->nxge_mmac_info; 3478 if (mmac_info->naddrfree == 0) { 3479 mutex_exit(nxgep->genlock); 3480 return (ENOSPC); 3481 } 3482 3483 slot = maddr->mma_slot; 3484 if (slot == -1) { /* -1: Take the first available slot */ 3485 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 3486 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3487 break; 3488 } 3489 if (slot > mmac_info->num_factory_mmac) { 3490 mutex_exit(nxgep->genlock); 3491 return (ENOSPC); 3492 } 3493 } 3494 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 3495 /* 3496 * Do not support factory MAC at a slot greater than 3497 * num_factory_mmac even when there are available factory 3498 * MAC addresses because the alternate MACs are bundled with 3499 * slot[1] through slot[num_factory_mmac] 3500 */ 3501 mutex_exit(nxgep->genlock); 3502 return (EINVAL); 3503 } 3504 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3505 mutex_exit(nxgep->genlock); 3506 return (EBUSY); 3507 } 3508 /* Verify the address to be reserved */ 3509 if (!mac_unicst_verify(nxgep->mach, 3510 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 3511 mutex_exit(nxgep->genlock); 3512 return (EINVAL); 3513 } 3514 if (err = nxge_altmac_set(nxgep, 3515 mmac_info->factory_mac_pool[slot], slot)) { 3516 mutex_exit(nxgep->genlock); 3517 return (err); 3518 } 3519 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 3520 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3521 mmac_info->naddrfree--; 3522 3523 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 3524 mutex_exit(nxgep->genlock); 3525 3526 /* Pass info back to the caller */ 3527 maddr->mma_slot = slot; 3528 maddr->mma_addrlen = ETHERADDRL; 3529 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3530 3531 return (0); 3532 } 3533 3534 /* 3535 * Remove the specified mac address and update the HW not to filter 3536 * the mac address anymore. 3537 */ 3538 static int 3539 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 3540 { 3541 p_nxge_t nxgep = arg; 3542 nxge_mmac_t *mmac_info; 3543 uint8_t addrn; 3544 uint8_t portn; 3545 int err = 0; 3546 nxge_status_t status; 3547 3548 mutex_enter(nxgep->genlock); 3549 3550 /* 3551 * Make sure that nxge is initialized, if _start() has 3552 * not been called. 3553 */ 3554 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3555 status = nxge_init(nxgep); 3556 if (status != NXGE_OK) { 3557 mutex_exit(nxgep->genlock); 3558 return (ENXIO); 3559 } 3560 } 3561 3562 mmac_info = &nxgep->nxge_mmac_info; 3563 if (slot < 1 || slot > mmac_info->num_mmac) { 3564 mutex_exit(nxgep->genlock); 3565 return (EINVAL); 3566 } 3567 3568 portn = nxgep->mac.portnum; 3569 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3570 addrn = (uint8_t)slot - 1; 3571 else 3572 addrn = (uint8_t)slot; 3573 3574 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3575 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 3576 == NPI_SUCCESS) { 3577 mmac_info->naddrfree++; 3578 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 3579 /* 3580 * Regardless if the MAC we just stopped filtering 3581 * is a user addr or a facory addr, we must set 3582 * the MMAC_VENDOR_ADDR flag if this slot has an 3583 * associated factory MAC to indicate that a factory 3584 * MAC is available. 3585 */ 3586 if (slot <= mmac_info->num_factory_mmac) { 3587 mmac_info->mac_pool[slot].flags 3588 |= MMAC_VENDOR_ADDR; 3589 } 3590 /* 3591 * Clear mac_pool[slot].addr so that kstat shows 0 3592 * alternate MAC address if the slot is not used. 3593 * (But nxge_m_mmac_get returns the factory MAC even 3594 * when the slot is not used!) 3595 */ 3596 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 3597 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3598 } else { 3599 err = EIO; 3600 } 3601 } else { 3602 err = EINVAL; 3603 } 3604 3605 mutex_exit(nxgep->genlock); 3606 return (err); 3607 } 3608 3609 3610 /* 3611 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 3612 */ 3613 static int 3614 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 3615 { 3616 p_nxge_t nxgep = arg; 3617 mac_addr_slot_t slot; 3618 nxge_mmac_t *mmac_info; 3619 int err = 0; 3620 nxge_status_t status; 3621 3622 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3623 maddr->mma_addrlen)) 3624 return (EINVAL); 3625 3626 slot = maddr->mma_slot; 3627 3628 mutex_enter(nxgep->genlock); 3629 3630 /* 3631 * Make sure that nxge is initialized, if _start() has 3632 * not been called. 3633 */ 3634 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3635 status = nxge_init(nxgep); 3636 if (status != NXGE_OK) { 3637 mutex_exit(nxgep->genlock); 3638 return (ENXIO); 3639 } 3640 } 3641 3642 mmac_info = &nxgep->nxge_mmac_info; 3643 if (slot < 1 || slot > mmac_info->num_mmac) { 3644 mutex_exit(nxgep->genlock); 3645 return (EINVAL); 3646 } 3647 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3648 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 3649 != 0) { 3650 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 3651 ETHERADDRL); 3652 /* 3653 * Assume that the MAC passed down from the caller 3654 * is not a factory MAC address (The user should 3655 * call mmac_remove followed by mmac_reserve if 3656 * he wants to use the factory MAC for this slot). 3657 */ 3658 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3659 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3660 } 3661 } else { 3662 err = EINVAL; 3663 } 3664 mutex_exit(nxgep->genlock); 3665 return (err); 3666 } 3667 3668 /* 3669 * nxge_m_mmac_get() - Get the MAC address and other information 3670 * related to the slot. mma_flags should be set to 0 in the call. 3671 * Note: although kstat shows MAC address as zero when a slot is 3672 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 3673 * to the caller as long as the slot is not using a user MAC address. 3674 * The following table shows the rules, 3675 * 3676 * USED VENDOR mma_addr 3677 * ------------------------------------------------------------ 3678 * (1) Slot uses a user MAC: yes no user MAC 3679 * (2) Slot uses a factory MAC: yes yes factory MAC 3680 * (3) Slot is not used but is 3681 * factory MAC capable: no yes factory MAC 3682 * (4) Slot is not used and is 3683 * not factory MAC capable: no no 0 3684 * ------------------------------------------------------------ 3685 */ 3686 static int 3687 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 3688 { 3689 nxge_t *nxgep = arg; 3690 mac_addr_slot_t slot; 3691 nxge_mmac_t *mmac_info; 3692 nxge_status_t status; 3693 3694 slot = maddr->mma_slot; 3695 3696 mutex_enter(nxgep->genlock); 3697 3698 /* 3699 * Make sure that nxge is initialized, if _start() has 3700 * not been called. 3701 */ 3702 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3703 status = nxge_init(nxgep); 3704 if (status != NXGE_OK) { 3705 mutex_exit(nxgep->genlock); 3706 return (ENXIO); 3707 } 3708 } 3709 3710 mmac_info = &nxgep->nxge_mmac_info; 3711 3712 if (slot < 1 || slot > mmac_info->num_mmac) { 3713 mutex_exit(nxgep->genlock); 3714 return (EINVAL); 3715 } 3716 maddr->mma_flags = 0; 3717 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 3718 maddr->mma_flags |= MMAC_SLOT_USED; 3719 3720 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 3721 maddr->mma_flags |= MMAC_VENDOR_ADDR; 3722 bcopy(mmac_info->factory_mac_pool[slot], 3723 maddr->mma_addr, ETHERADDRL); 3724 maddr->mma_addrlen = ETHERADDRL; 3725 } else { 3726 if (maddr->mma_flags & MMAC_SLOT_USED) { 3727 bcopy(mmac_info->mac_pool[slot].addr, 3728 maddr->mma_addr, ETHERADDRL); 3729 maddr->mma_addrlen = ETHERADDRL; 3730 } else { 3731 bzero(maddr->mma_addr, ETHERADDRL); 3732 maddr->mma_addrlen = 0; 3733 } 3734 } 3735 mutex_exit(nxgep->genlock); 3736 return (0); 3737 } 3738 3739 3740 static boolean_t 3741 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 3742 { 3743 nxge_t *nxgep = arg; 3744 uint32_t *txflags = cap_data; 3745 multiaddress_capab_t *mmacp = cap_data; 3746 3747 switch (cap) { 3748 case MAC_CAPAB_HCKSUM: 3749 *txflags = HCKSUM_INET_PARTIAL; 3750 break; 3751 case MAC_CAPAB_POLL: 3752 /* 3753 * There's nothing for us to fill in, simply returning 3754 * B_TRUE stating that we support polling is sufficient. 3755 */ 3756 break; 3757 3758 case MAC_CAPAB_MULTIADDRESS: 3759 mutex_enter(nxgep->genlock); 3760 3761 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 3762 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 3763 mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */ 3764 /* 3765 * maddr_handle is driver's private data, passed back to 3766 * entry point functions as arg. 3767 */ 3768 mmacp->maddr_handle = nxgep; 3769 mmacp->maddr_add = nxge_m_mmac_add; 3770 mmacp->maddr_remove = nxge_m_mmac_remove; 3771 mmacp->maddr_modify = nxge_m_mmac_modify; 3772 mmacp->maddr_get = nxge_m_mmac_get; 3773 mmacp->maddr_reserve = nxge_m_mmac_reserve; 3774 3775 mutex_exit(nxgep->genlock); 3776 break; 3777 default: 3778 return (B_FALSE); 3779 } 3780 return (B_TRUE); 3781 } 3782 3783 /* 3784 * Module loading and removing entry points. 3785 */ 3786 3787 static struct cb_ops nxge_cb_ops = { 3788 nodev, /* cb_open */ 3789 nodev, /* cb_close */ 3790 nodev, /* cb_strategy */ 3791 nodev, /* cb_print */ 3792 nodev, /* cb_dump */ 3793 nodev, /* cb_read */ 3794 nodev, /* cb_write */ 3795 nodev, /* cb_ioctl */ 3796 nodev, /* cb_devmap */ 3797 nodev, /* cb_mmap */ 3798 nodev, /* cb_segmap */ 3799 nochpoll, /* cb_chpoll */ 3800 ddi_prop_op, /* cb_prop_op */ 3801 NULL, 3802 D_MP, /* cb_flag */ 3803 CB_REV, /* rev */ 3804 nodev, /* int (*cb_aread)() */ 3805 nodev /* int (*cb_awrite)() */ 3806 }; 3807 3808 static struct dev_ops nxge_dev_ops = { 3809 DEVO_REV, /* devo_rev */ 3810 0, /* devo_refcnt */ 3811 nulldev, 3812 nulldev, /* devo_identify */ 3813 nulldev, /* devo_probe */ 3814 nxge_attach, /* devo_attach */ 3815 nxge_detach, /* devo_detach */ 3816 nodev, /* devo_reset */ 3817 &nxge_cb_ops, /* devo_cb_ops */ 3818 (struct bus_ops *)NULL, /* devo_bus_ops */ 3819 ddi_power /* devo_power */ 3820 }; 3821 3822 extern struct mod_ops mod_driverops; 3823 3824 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 3825 3826 /* 3827 * Module linkage information for the kernel. 3828 */ 3829 static struct modldrv nxge_modldrv = { 3830 &mod_driverops, 3831 NXGE_DESC_VER, 3832 &nxge_dev_ops 3833 }; 3834 3835 static struct modlinkage modlinkage = { 3836 MODREV_1, (void *) &nxge_modldrv, NULL 3837 }; 3838 3839 int 3840 _init(void) 3841 { 3842 int status; 3843 3844 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 3845 mac_init_ops(&nxge_dev_ops, "nxge"); 3846 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 3847 if (status != 0) { 3848 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3849 "failed to init device soft state")); 3850 goto _init_exit; 3851 } 3852 3853 status = mod_install(&modlinkage); 3854 if (status != 0) { 3855 ddi_soft_state_fini(&nxge_list); 3856 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 3857 goto _init_exit; 3858 } 3859 3860 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 3861 3862 _init_exit: 3863 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 3864 3865 return (status); 3866 } 3867 3868 int 3869 _fini(void) 3870 { 3871 int status; 3872 3873 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 3874 3875 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 3876 3877 if (nxge_mblks_pending) 3878 return (EBUSY); 3879 3880 status = mod_remove(&modlinkage); 3881 if (status != DDI_SUCCESS) { 3882 NXGE_DEBUG_MSG((NULL, MOD_CTL, 3883 "Module removal failed 0x%08x", 3884 status)); 3885 goto _fini_exit; 3886 } 3887 3888 mac_fini_ops(&nxge_dev_ops); 3889 3890 ddi_soft_state_fini(&nxge_list); 3891 3892 MUTEX_DESTROY(&nxge_common_lock); 3893 _fini_exit: 3894 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 3895 3896 return (status); 3897 } 3898 3899 int 3900 _info(struct modinfo *modinfop) 3901 { 3902 int status; 3903 3904 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 3905 status = mod_info(&modlinkage, modinfop); 3906 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 3907 3908 return (status); 3909 } 3910 3911 /*ARGSUSED*/ 3912 static nxge_status_t 3913 nxge_add_intrs(p_nxge_t nxgep) 3914 { 3915 3916 int intr_types; 3917 int type = 0; 3918 int ddi_status = DDI_SUCCESS; 3919 nxge_status_t status = NXGE_OK; 3920 3921 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 3922 3923 nxgep->nxge_intr_type.intr_registered = B_FALSE; 3924 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 3925 nxgep->nxge_intr_type.msi_intx_cnt = 0; 3926 nxgep->nxge_intr_type.intr_added = 0; 3927 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 3928 nxgep->nxge_intr_type.intr_type = 0; 3929 3930 if (nxgep->niu_type == N2_NIU) { 3931 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3932 } else if (nxge_msi_enable) { 3933 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3934 } 3935 3936 /* Get the supported interrupt types */ 3937 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 3938 != DDI_SUCCESS) { 3939 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 3940 "ddi_intr_get_supported_types failed: status 0x%08x", 3941 ddi_status)); 3942 return (NXGE_ERROR | NXGE_DDI_FAILED); 3943 } 3944 nxgep->nxge_intr_type.intr_types = intr_types; 3945 3946 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3947 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3948 3949 /* 3950 * Solaris MSIX is not supported yet. use MSI for now. 3951 * nxge_msi_enable (1): 3952 * 1 - MSI 2 - MSI-X others - FIXED 3953 */ 3954 switch (nxge_msi_enable) { 3955 default: 3956 type = DDI_INTR_TYPE_FIXED; 3957 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3958 "use fixed (intx emulation) type %08x", 3959 type)); 3960 break; 3961 3962 case 2: 3963 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3964 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3965 if (intr_types & DDI_INTR_TYPE_MSIX) { 3966 type = DDI_INTR_TYPE_MSIX; 3967 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3968 "ddi_intr_get_supported_types: MSIX 0x%08x", 3969 type)); 3970 } else if (intr_types & DDI_INTR_TYPE_MSI) { 3971 type = DDI_INTR_TYPE_MSI; 3972 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3973 "ddi_intr_get_supported_types: MSI 0x%08x", 3974 type)); 3975 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3976 type = DDI_INTR_TYPE_FIXED; 3977 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3978 "ddi_intr_get_supported_types: MSXED0x%08x", 3979 type)); 3980 } 3981 break; 3982 3983 case 1: 3984 if (intr_types & DDI_INTR_TYPE_MSI) { 3985 type = DDI_INTR_TYPE_MSI; 3986 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3987 "ddi_intr_get_supported_types: MSI 0x%08x", 3988 type)); 3989 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 3990 type = DDI_INTR_TYPE_MSIX; 3991 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3992 "ddi_intr_get_supported_types: MSIX 0x%08x", 3993 type)); 3994 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3995 type = DDI_INTR_TYPE_FIXED; 3996 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3997 "ddi_intr_get_supported_types: MSXED0x%08x", 3998 type)); 3999 } 4000 } 4001 4002 nxgep->nxge_intr_type.intr_type = type; 4003 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 4004 type == DDI_INTR_TYPE_FIXED) && 4005 nxgep->nxge_intr_type.niu_msi_enable) { 4006 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 4007 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4008 " nxge_add_intrs: " 4009 " nxge_add_intrs_adv failed: status 0x%08x", 4010 status)); 4011 return (status); 4012 } else { 4013 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4014 "interrupts registered : type %d", type)); 4015 nxgep->nxge_intr_type.intr_registered = B_TRUE; 4016 4017 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4018 "\nAdded advanced nxge add_intr_adv " 4019 "intr type 0x%x\n", type)); 4020 4021 return (status); 4022 } 4023 } 4024 4025 if (!nxgep->nxge_intr_type.intr_registered) { 4026 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 4027 "failed to register interrupts")); 4028 return (NXGE_ERROR | NXGE_DDI_FAILED); 4029 } 4030 4031 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 4032 return (status); 4033 } 4034 4035 /*ARGSUSED*/ 4036 static nxge_status_t 4037 nxge_add_soft_intrs(p_nxge_t nxgep) 4038 { 4039 4040 int ddi_status = DDI_SUCCESS; 4041 nxge_status_t status = NXGE_OK; 4042 4043 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 4044 4045 nxgep->resched_id = NULL; 4046 nxgep->resched_running = B_FALSE; 4047 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 4048 &nxgep->resched_id, 4049 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 4050 if (ddi_status != DDI_SUCCESS) { 4051 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 4052 "ddi_add_softintrs failed: status 0x%08x", 4053 ddi_status)); 4054 return (NXGE_ERROR | NXGE_DDI_FAILED); 4055 } 4056 4057 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 4058 4059 return (status); 4060 } 4061 4062 static nxge_status_t 4063 nxge_add_intrs_adv(p_nxge_t nxgep) 4064 { 4065 int intr_type; 4066 p_nxge_intr_t intrp; 4067 4068 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 4069 4070 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4071 intr_type = intrp->intr_type; 4072 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 4073 intr_type)); 4074 4075 switch (intr_type) { 4076 case DDI_INTR_TYPE_MSI: /* 0x2 */ 4077 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 4078 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 4079 4080 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 4081 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 4082 4083 default: 4084 return (NXGE_ERROR); 4085 } 4086 } 4087 4088 4089 /*ARGSUSED*/ 4090 static nxge_status_t 4091 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 4092 { 4093 dev_info_t *dip = nxgep->dip; 4094 p_nxge_ldg_t ldgp; 4095 p_nxge_intr_t intrp; 4096 uint_t *inthandler; 4097 void *arg1, *arg2; 4098 int behavior; 4099 int nintrs, navail, nrequest; 4100 int nactual, nrequired; 4101 int inum = 0; 4102 int x, y; 4103 int ddi_status = DDI_SUCCESS; 4104 nxge_status_t status = NXGE_OK; 4105 4106 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 4107 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4108 intrp->start_inum = 0; 4109 4110 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4111 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4112 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4113 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4114 "nintrs: %d", ddi_status, nintrs)); 4115 return (NXGE_ERROR | NXGE_DDI_FAILED); 4116 } 4117 4118 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4119 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4120 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4121 "ddi_intr_get_navail() failed, status: 0x%x%, " 4122 "nintrs: %d", ddi_status, navail)); 4123 return (NXGE_ERROR | NXGE_DDI_FAILED); 4124 } 4125 4126 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4127 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 4128 nintrs, navail)); 4129 4130 /* PSARC/2007/453 MSI-X interrupt limit override */ 4131 if (int_type == DDI_INTR_TYPE_MSIX) { 4132 nrequest = nxge_create_msi_property(nxgep); 4133 if (nrequest < navail) { 4134 navail = nrequest; 4135 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4136 "nxge_add_intrs_adv_type: nintrs %d " 4137 "navail %d (nrequest %d)", 4138 nintrs, navail, nrequest)); 4139 } 4140 } 4141 4142 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 4143 /* MSI must be power of 2 */ 4144 if ((navail & 16) == 16) { 4145 navail = 16; 4146 } else if ((navail & 8) == 8) { 4147 navail = 8; 4148 } else if ((navail & 4) == 4) { 4149 navail = 4; 4150 } else if ((navail & 2) == 2) { 4151 navail = 2; 4152 } else { 4153 navail = 1; 4154 } 4155 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4156 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 4157 "navail %d", nintrs, navail)); 4158 } 4159 4160 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4161 DDI_INTR_ALLOC_NORMAL); 4162 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4163 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4164 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4165 navail, &nactual, behavior); 4166 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4167 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4168 " ddi_intr_alloc() failed: %d", 4169 ddi_status)); 4170 kmem_free(intrp->htable, intrp->intr_size); 4171 return (NXGE_ERROR | NXGE_DDI_FAILED); 4172 } 4173 4174 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4175 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4176 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4177 " ddi_intr_get_pri() failed: %d", 4178 ddi_status)); 4179 /* Free already allocated interrupts */ 4180 for (y = 0; y < nactual; y++) { 4181 (void) ddi_intr_free(intrp->htable[y]); 4182 } 4183 4184 kmem_free(intrp->htable, intrp->intr_size); 4185 return (NXGE_ERROR | NXGE_DDI_FAILED); 4186 } 4187 4188 nrequired = 0; 4189 switch (nxgep->niu_type) { 4190 default: 4191 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4192 break; 4193 4194 case N2_NIU: 4195 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4196 break; 4197 } 4198 4199 if (status != NXGE_OK) { 4200 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4201 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 4202 "failed: 0x%x", status)); 4203 /* Free already allocated interrupts */ 4204 for (y = 0; y < nactual; y++) { 4205 (void) ddi_intr_free(intrp->htable[y]); 4206 } 4207 4208 kmem_free(intrp->htable, intrp->intr_size); 4209 return (status); 4210 } 4211 4212 ldgp = nxgep->ldgvp->ldgp; 4213 for (x = 0; x < nrequired; x++, ldgp++) { 4214 ldgp->vector = (uint8_t)x; 4215 ldgp->intdata = SID_DATA(ldgp->func, x); 4216 arg1 = ldgp->ldvp; 4217 arg2 = nxgep; 4218 if (ldgp->nldvs == 1) { 4219 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4220 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4221 "nxge_add_intrs_adv_type: " 4222 "arg1 0x%x arg2 0x%x: " 4223 "1-1 int handler (entry %d intdata 0x%x)\n", 4224 arg1, arg2, 4225 x, ldgp->intdata)); 4226 } else if (ldgp->nldvs > 1) { 4227 inthandler = (uint_t *)ldgp->sys_intr_handler; 4228 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4229 "nxge_add_intrs_adv_type: " 4230 "arg1 0x%x arg2 0x%x: " 4231 "nldevs %d int handler " 4232 "(entry %d intdata 0x%x)\n", 4233 arg1, arg2, 4234 ldgp->nldvs, x, ldgp->intdata)); 4235 } 4236 4237 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4238 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 4239 "htable 0x%llx", x, intrp->htable[x])); 4240 4241 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4242 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4243 != DDI_SUCCESS) { 4244 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4245 "==> nxge_add_intrs_adv_type: failed #%d " 4246 "status 0x%x", x, ddi_status)); 4247 for (y = 0; y < intrp->intr_added; y++) { 4248 (void) ddi_intr_remove_handler( 4249 intrp->htable[y]); 4250 } 4251 /* Free already allocated intr */ 4252 for (y = 0; y < nactual; y++) { 4253 (void) ddi_intr_free(intrp->htable[y]); 4254 } 4255 kmem_free(intrp->htable, intrp->intr_size); 4256 4257 (void) nxge_ldgv_uninit(nxgep); 4258 4259 return (NXGE_ERROR | NXGE_DDI_FAILED); 4260 } 4261 intrp->intr_added++; 4262 } 4263 4264 intrp->msi_intx_cnt = nactual; 4265 4266 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4267 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 4268 navail, nactual, 4269 intrp->msi_intx_cnt, 4270 intrp->intr_added)); 4271 4272 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4273 4274 (void) nxge_intr_ldgv_init(nxgep); 4275 4276 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 4277 4278 return (status); 4279 } 4280 4281 /*ARGSUSED*/ 4282 static nxge_status_t 4283 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 4284 { 4285 dev_info_t *dip = nxgep->dip; 4286 p_nxge_ldg_t ldgp; 4287 p_nxge_intr_t intrp; 4288 uint_t *inthandler; 4289 void *arg1, *arg2; 4290 int behavior; 4291 int nintrs, navail; 4292 int nactual, nrequired; 4293 int inum = 0; 4294 int x, y; 4295 int ddi_status = DDI_SUCCESS; 4296 nxge_status_t status = NXGE_OK; 4297 4298 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 4299 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4300 intrp->start_inum = 0; 4301 4302 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4303 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4304 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4305 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4306 "nintrs: %d", status, nintrs)); 4307 return (NXGE_ERROR | NXGE_DDI_FAILED); 4308 } 4309 4310 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4311 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4312 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4313 "ddi_intr_get_navail() failed, status: 0x%x%, " 4314 "nintrs: %d", ddi_status, navail)); 4315 return (NXGE_ERROR | NXGE_DDI_FAILED); 4316 } 4317 4318 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4319 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 4320 nintrs, navail)); 4321 4322 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4323 DDI_INTR_ALLOC_NORMAL); 4324 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4325 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4326 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4327 navail, &nactual, behavior); 4328 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4329 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4330 " ddi_intr_alloc() failed: %d", 4331 ddi_status)); 4332 kmem_free(intrp->htable, intrp->intr_size); 4333 return (NXGE_ERROR | NXGE_DDI_FAILED); 4334 } 4335 4336 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4337 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4338 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4339 " ddi_intr_get_pri() failed: %d", 4340 ddi_status)); 4341 /* Free already allocated interrupts */ 4342 for (y = 0; y < nactual; y++) { 4343 (void) ddi_intr_free(intrp->htable[y]); 4344 } 4345 4346 kmem_free(intrp->htable, intrp->intr_size); 4347 return (NXGE_ERROR | NXGE_DDI_FAILED); 4348 } 4349 4350 nrequired = 0; 4351 switch (nxgep->niu_type) { 4352 default: 4353 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4354 break; 4355 4356 case N2_NIU: 4357 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4358 break; 4359 } 4360 4361 if (status != NXGE_OK) { 4362 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4363 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 4364 "failed: 0x%x", status)); 4365 /* Free already allocated interrupts */ 4366 for (y = 0; y < nactual; y++) { 4367 (void) ddi_intr_free(intrp->htable[y]); 4368 } 4369 4370 kmem_free(intrp->htable, intrp->intr_size); 4371 return (status); 4372 } 4373 4374 ldgp = nxgep->ldgvp->ldgp; 4375 for (x = 0; x < nrequired; x++, ldgp++) { 4376 ldgp->vector = (uint8_t)x; 4377 if (nxgep->niu_type != N2_NIU) { 4378 ldgp->intdata = SID_DATA(ldgp->func, x); 4379 } 4380 4381 arg1 = ldgp->ldvp; 4382 arg2 = nxgep; 4383 if (ldgp->nldvs == 1) { 4384 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4385 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4386 "nxge_add_intrs_adv_type_fix: " 4387 "1-1 int handler(%d) ldg %d ldv %d " 4388 "arg1 $%p arg2 $%p\n", 4389 x, ldgp->ldg, ldgp->ldvp->ldv, 4390 arg1, arg2)); 4391 } else if (ldgp->nldvs > 1) { 4392 inthandler = (uint_t *)ldgp->sys_intr_handler; 4393 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4394 "nxge_add_intrs_adv_type_fix: " 4395 "shared ldv %d int handler(%d) ldv %d ldg %d" 4396 "arg1 0x%016llx arg2 0x%016llx\n", 4397 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 4398 arg1, arg2)); 4399 } 4400 4401 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4402 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4403 != DDI_SUCCESS) { 4404 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4405 "==> nxge_add_intrs_adv_type_fix: failed #%d " 4406 "status 0x%x", x, ddi_status)); 4407 for (y = 0; y < intrp->intr_added; y++) { 4408 (void) ddi_intr_remove_handler( 4409 intrp->htable[y]); 4410 } 4411 for (y = 0; y < nactual; y++) { 4412 (void) ddi_intr_free(intrp->htable[y]); 4413 } 4414 /* Free already allocated intr */ 4415 kmem_free(intrp->htable, intrp->intr_size); 4416 4417 (void) nxge_ldgv_uninit(nxgep); 4418 4419 return (NXGE_ERROR | NXGE_DDI_FAILED); 4420 } 4421 intrp->intr_added++; 4422 } 4423 4424 intrp->msi_intx_cnt = nactual; 4425 4426 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4427 4428 status = nxge_intr_ldgv_init(nxgep); 4429 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 4430 4431 return (status); 4432 } 4433 4434 static void 4435 nxge_remove_intrs(p_nxge_t nxgep) 4436 { 4437 int i, inum; 4438 p_nxge_intr_t intrp; 4439 4440 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 4441 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4442 if (!intrp->intr_registered) { 4443 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4444 "<== nxge_remove_intrs: interrupts not registered")); 4445 return; 4446 } 4447 4448 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 4449 4450 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4451 (void) ddi_intr_block_disable(intrp->htable, 4452 intrp->intr_added); 4453 } else { 4454 for (i = 0; i < intrp->intr_added; i++) { 4455 (void) ddi_intr_disable(intrp->htable[i]); 4456 } 4457 } 4458 4459 for (inum = 0; inum < intrp->intr_added; inum++) { 4460 if (intrp->htable[inum]) { 4461 (void) ddi_intr_remove_handler(intrp->htable[inum]); 4462 } 4463 } 4464 4465 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 4466 if (intrp->htable[inum]) { 4467 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4468 "nxge_remove_intrs: ddi_intr_free inum %d " 4469 "msi_intx_cnt %d intr_added %d", 4470 inum, 4471 intrp->msi_intx_cnt, 4472 intrp->intr_added)); 4473 4474 (void) ddi_intr_free(intrp->htable[inum]); 4475 } 4476 } 4477 4478 kmem_free(intrp->htable, intrp->intr_size); 4479 intrp->intr_registered = B_FALSE; 4480 intrp->intr_enabled = B_FALSE; 4481 intrp->msi_intx_cnt = 0; 4482 intrp->intr_added = 0; 4483 4484 (void) nxge_ldgv_uninit(nxgep); 4485 4486 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 4487 "#msix-request"); 4488 4489 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 4490 } 4491 4492 /*ARGSUSED*/ 4493 static void 4494 nxge_remove_soft_intrs(p_nxge_t nxgep) 4495 { 4496 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 4497 if (nxgep->resched_id) { 4498 ddi_remove_softintr(nxgep->resched_id); 4499 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4500 "==> nxge_remove_soft_intrs: removed")); 4501 nxgep->resched_id = NULL; 4502 } 4503 4504 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 4505 } 4506 4507 /*ARGSUSED*/ 4508 static void 4509 nxge_intrs_enable(p_nxge_t nxgep) 4510 { 4511 p_nxge_intr_t intrp; 4512 int i; 4513 int status; 4514 4515 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 4516 4517 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4518 4519 if (!intrp->intr_registered) { 4520 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 4521 "interrupts are not registered")); 4522 return; 4523 } 4524 4525 if (intrp->intr_enabled) { 4526 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4527 "<== nxge_intrs_enable: already enabled")); 4528 return; 4529 } 4530 4531 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4532 status = ddi_intr_block_enable(intrp->htable, 4533 intrp->intr_added); 4534 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4535 "block enable - status 0x%x total inums #%d\n", 4536 status, intrp->intr_added)); 4537 } else { 4538 for (i = 0; i < intrp->intr_added; i++) { 4539 status = ddi_intr_enable(intrp->htable[i]); 4540 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4541 "ddi_intr_enable:enable - status 0x%x " 4542 "total inums %d enable inum #%d\n", 4543 status, intrp->intr_added, i)); 4544 if (status == DDI_SUCCESS) { 4545 intrp->intr_enabled = B_TRUE; 4546 } 4547 } 4548 } 4549 4550 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 4551 } 4552 4553 /*ARGSUSED*/ 4554 static void 4555 nxge_intrs_disable(p_nxge_t nxgep) 4556 { 4557 p_nxge_intr_t intrp; 4558 int i; 4559 4560 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 4561 4562 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4563 4564 if (!intrp->intr_registered) { 4565 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 4566 "interrupts are not registered")); 4567 return; 4568 } 4569 4570 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4571 (void) ddi_intr_block_disable(intrp->htable, 4572 intrp->intr_added); 4573 } else { 4574 for (i = 0; i < intrp->intr_added; i++) { 4575 (void) ddi_intr_disable(intrp->htable[i]); 4576 } 4577 } 4578 4579 intrp->intr_enabled = B_FALSE; 4580 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 4581 } 4582 4583 static nxge_status_t 4584 nxge_mac_register(p_nxge_t nxgep) 4585 { 4586 mac_register_t *macp; 4587 int status; 4588 4589 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 4590 4591 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 4592 return (NXGE_ERROR); 4593 4594 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 4595 macp->m_driver = nxgep; 4596 macp->m_dip = nxgep->dip; 4597 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 4598 macp->m_callbacks = &nxge_m_callbacks; 4599 macp->m_min_sdu = 0; 4600 macp->m_max_sdu = nxgep->mac.maxframesize - 4601 sizeof (struct ether_header) - ETHERFCSL - 4; 4602 4603 status = mac_register(macp, &nxgep->mach); 4604 mac_free(macp); 4605 4606 if (status != 0) { 4607 cmn_err(CE_WARN, 4608 "!nxge_mac_register failed (status %d instance %d)", 4609 status, nxgep->instance); 4610 return (NXGE_ERROR); 4611 } 4612 4613 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 4614 "(instance %d)", nxgep->instance)); 4615 4616 return (NXGE_OK); 4617 } 4618 4619 void 4620 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 4621 { 4622 ssize_t size; 4623 mblk_t *nmp; 4624 uint8_t blk_id; 4625 uint8_t chan; 4626 uint32_t err_id; 4627 err_inject_t *eip; 4628 4629 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 4630 4631 size = 1024; 4632 nmp = mp->b_cont; 4633 eip = (err_inject_t *)nmp->b_rptr; 4634 blk_id = eip->blk_id; 4635 err_id = eip->err_id; 4636 chan = eip->chan; 4637 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 4638 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 4639 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 4640 switch (blk_id) { 4641 case MAC_BLK_ID: 4642 break; 4643 case TXMAC_BLK_ID: 4644 break; 4645 case RXMAC_BLK_ID: 4646 break; 4647 case MIF_BLK_ID: 4648 break; 4649 case IPP_BLK_ID: 4650 nxge_ipp_inject_err(nxgep, err_id); 4651 break; 4652 case TXC_BLK_ID: 4653 nxge_txc_inject_err(nxgep, err_id); 4654 break; 4655 case TXDMA_BLK_ID: 4656 nxge_txdma_inject_err(nxgep, err_id, chan); 4657 break; 4658 case RXDMA_BLK_ID: 4659 nxge_rxdma_inject_err(nxgep, err_id, chan); 4660 break; 4661 case ZCP_BLK_ID: 4662 nxge_zcp_inject_err(nxgep, err_id); 4663 break; 4664 case ESPC_BLK_ID: 4665 break; 4666 case FFLP_BLK_ID: 4667 break; 4668 case PHY_BLK_ID: 4669 break; 4670 case ETHER_SERDES_BLK_ID: 4671 break; 4672 case PCIE_SERDES_BLK_ID: 4673 break; 4674 case VIR_BLK_ID: 4675 break; 4676 } 4677 4678 nmp->b_wptr = nmp->b_rptr + size; 4679 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 4680 4681 miocack(wq, mp, (int)size, 0); 4682 } 4683 4684 static int 4685 nxge_init_common_dev(p_nxge_t nxgep) 4686 { 4687 p_nxge_hw_list_t hw_p; 4688 dev_info_t *p_dip; 4689 4690 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 4691 4692 p_dip = nxgep->p_dip; 4693 MUTEX_ENTER(&nxge_common_lock); 4694 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4695 "==> nxge_init_common_dev:func # %d", 4696 nxgep->function_num)); 4697 /* 4698 * Loop through existing per neptune hardware list. 4699 */ 4700 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4701 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4702 "==> nxge_init_common_device:func # %d " 4703 "hw_p $%p parent dip $%p", 4704 nxgep->function_num, 4705 hw_p, 4706 p_dip)); 4707 if (hw_p->parent_devp == p_dip) { 4708 nxgep->nxge_hw_p = hw_p; 4709 hw_p->ndevs++; 4710 hw_p->nxge_p[nxgep->function_num] = nxgep; 4711 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4712 "==> nxge_init_common_device:func # %d " 4713 "hw_p $%p parent dip $%p " 4714 "ndevs %d (found)", 4715 nxgep->function_num, 4716 hw_p, 4717 p_dip, 4718 hw_p->ndevs)); 4719 break; 4720 } 4721 } 4722 4723 if (hw_p == NULL) { 4724 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4725 "==> nxge_init_common_device:func # %d " 4726 "parent dip $%p (new)", 4727 nxgep->function_num, 4728 p_dip)); 4729 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 4730 hw_p->parent_devp = p_dip; 4731 hw_p->magic = NXGE_NEPTUNE_MAGIC; 4732 nxgep->nxge_hw_p = hw_p; 4733 hw_p->ndevs++; 4734 hw_p->nxge_p[nxgep->function_num] = nxgep; 4735 hw_p->next = nxge_hw_list; 4736 if (nxgep->niu_type == N2_NIU) { 4737 hw_p->niu_type = N2_NIU; 4738 hw_p->platform_type = P_NEPTUNE_NIU; 4739 } else { 4740 hw_p->niu_type = NIU_TYPE_NONE; 4741 hw_p->platform_type = P_NEPTUNE_NONE; 4742 } 4743 4744 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 4745 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 4746 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 4747 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 4748 MUTEX_INIT(&hw_p->nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 4749 4750 nxge_hw_list = hw_p; 4751 4752 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 4753 } 4754 4755 MUTEX_EXIT(&nxge_common_lock); 4756 4757 nxgep->platform_type = hw_p->platform_type; 4758 if (nxgep->niu_type != N2_NIU) { 4759 nxgep->niu_type = hw_p->niu_type; 4760 } 4761 4762 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4763 "==> nxge_init_common_device (nxge_hw_list) $%p", 4764 nxge_hw_list)); 4765 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 4766 4767 return (NXGE_OK); 4768 } 4769 4770 static void 4771 nxge_uninit_common_dev(p_nxge_t nxgep) 4772 { 4773 p_nxge_hw_list_t hw_p, h_hw_p; 4774 dev_info_t *p_dip; 4775 4776 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 4777 if (nxgep->nxge_hw_p == NULL) { 4778 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4779 "<== nxge_uninit_common_device (no common)")); 4780 return; 4781 } 4782 4783 MUTEX_ENTER(&nxge_common_lock); 4784 h_hw_p = nxge_hw_list; 4785 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4786 p_dip = hw_p->parent_devp; 4787 if (nxgep->nxge_hw_p == hw_p && 4788 p_dip == nxgep->p_dip && 4789 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 4790 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 4791 4792 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4793 "==> nxge_uninit_common_device:func # %d " 4794 "hw_p $%p parent dip $%p " 4795 "ndevs %d (found)", 4796 nxgep->function_num, 4797 hw_p, 4798 p_dip, 4799 hw_p->ndevs)); 4800 4801 nxgep->nxge_hw_p = NULL; 4802 if (hw_p->ndevs) { 4803 hw_p->ndevs--; 4804 } 4805 hw_p->nxge_p[nxgep->function_num] = NULL; 4806 if (!hw_p->ndevs) { 4807 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 4808 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 4809 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 4810 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 4811 MUTEX_DESTROY(&hw_p->nxge_mii_lock); 4812 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4813 "==> nxge_uninit_common_device: " 4814 "func # %d " 4815 "hw_p $%p parent dip $%p " 4816 "ndevs %d (last)", 4817 nxgep->function_num, 4818 hw_p, 4819 p_dip, 4820 hw_p->ndevs)); 4821 4822 if (hw_p == nxge_hw_list) { 4823 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4824 "==> nxge_uninit_common_device:" 4825 "remove head func # %d " 4826 "hw_p $%p parent dip $%p " 4827 "ndevs %d (head)", 4828 nxgep->function_num, 4829 hw_p, 4830 p_dip, 4831 hw_p->ndevs)); 4832 nxge_hw_list = hw_p->next; 4833 } else { 4834 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4835 "==> nxge_uninit_common_device:" 4836 "remove middle func # %d " 4837 "hw_p $%p parent dip $%p " 4838 "ndevs %d (middle)", 4839 nxgep->function_num, 4840 hw_p, 4841 p_dip, 4842 hw_p->ndevs)); 4843 h_hw_p->next = hw_p->next; 4844 } 4845 4846 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 4847 } 4848 break; 4849 } else { 4850 h_hw_p = hw_p; 4851 } 4852 } 4853 4854 MUTEX_EXIT(&nxge_common_lock); 4855 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4856 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 4857 nxge_hw_list)); 4858 4859 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 4860 } 4861 4862 /* 4863 * Determines the number of ports from the niu_type or the platform type. 4864 * Returns the number of ports, or returns zero on failure. 4865 */ 4866 4867 int 4868 nxge_get_nports(p_nxge_t nxgep) 4869 { 4870 int nports = 0; 4871 4872 switch (nxgep->niu_type) { 4873 case N2_NIU: 4874 case NEPTUNE_2_10GF: 4875 nports = 2; 4876 break; 4877 case NEPTUNE_4_1GC: 4878 case NEPTUNE_2_10GF_2_1GC: 4879 case NEPTUNE_1_10GF_3_1GC: 4880 case NEPTUNE_1_1GC_1_10GF_2_1GC: 4881 nports = 4; 4882 break; 4883 default: 4884 switch (nxgep->platform_type) { 4885 case P_NEPTUNE_NIU: 4886 case P_NEPTUNE_ATLAS_2PORT: 4887 nports = 2; 4888 break; 4889 case P_NEPTUNE_ATLAS_4PORT: 4890 case P_NEPTUNE_MARAMBA_P0: 4891 case P_NEPTUNE_MARAMBA_P1: 4892 nports = 4; 4893 break; 4894 default: 4895 break; 4896 } 4897 break; 4898 } 4899 4900 return (nports); 4901 } 4902 4903 /* 4904 * The following two functions are to support 4905 * PSARC/2007/453 MSI-X interrupt limit override. 4906 */ 4907 static int 4908 nxge_create_msi_property(p_nxge_t nxgep) 4909 { 4910 int nmsi; 4911 extern int ncpus; 4912 4913 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 4914 4915 switch (nxgep->mac.portmode) { 4916 case PORT_10G_COPPER: 4917 case PORT_10G_FIBER: 4918 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 4919 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 4920 /* 4921 * The maximum MSI-X requested will be 8. 4922 * If the # of CPUs is less than 8, we will reqeust 4923 * # MSI-X based on the # of CPUs. 4924 */ 4925 if (ncpus >= NXGE_MSIX_REQUEST_10G) { 4926 nmsi = NXGE_MSIX_REQUEST_10G; 4927 } else { 4928 nmsi = ncpus; 4929 } 4930 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4931 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 4932 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 4933 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 4934 break; 4935 4936 default: 4937 nmsi = NXGE_MSIX_REQUEST_1G; 4938 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4939 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 4940 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 4941 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 4942 break; 4943 } 4944 4945 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 4946 return (nmsi); 4947 } 4948