1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 30 */ 31 #include <sys/nxge/nxge_impl.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * PSARC/2007/453 MSI-X interrupt limit override 39 * (This PSARC case is limited to MSI-X vectors 40 * and SPARC platforms only). 41 */ 42 #if defined(_BIG_ENDIAN) 43 uint32_t nxge_msi_enable = 2; 44 #else 45 uint32_t nxge_msi_enable = 1; 46 #endif 47 48 /* 49 * Globals: tunable parameters (/etc/system or adb) 50 * 51 */ 52 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 53 uint32_t nxge_rbr_spare_size = 0; 54 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 55 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 56 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 57 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 58 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 59 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 60 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 61 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 62 boolean_t nxge_jumbo_enable = B_FALSE; 63 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 64 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 65 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 66 67 /* 68 * Debugging flags: 69 * nxge_no_tx_lb : transmit load balancing 70 * nxge_tx_lb_policy: 0 - TCP port (default) 71 * 3 - DEST MAC 72 */ 73 uint32_t nxge_no_tx_lb = 0; 74 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 75 76 /* 77 * Add tunable to reduce the amount of time spent in the 78 * ISR doing Rx Processing. 79 */ 80 uint32_t nxge_max_rx_pkts = 1024; 81 82 /* 83 * Tunables to manage the receive buffer blocks. 84 * 85 * nxge_rx_threshold_hi: copy all buffers. 86 * nxge_rx_bcopy_size_type: receive buffer block size type. 87 * nxge_rx_threshold_lo: copy only up to tunable block size type. 88 */ 89 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 90 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 91 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 92 93 rtrace_t npi_rtracebuf; 94 95 #if defined(sun4v) 96 /* 97 * Hypervisor N2/NIU services information. 98 */ 99 static hsvc_info_t niu_hsvc = { 100 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 101 NIU_MINOR_VER, "nxge" 102 }; 103 #endif 104 105 /* 106 * Function Prototypes 107 */ 108 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 109 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 110 static void nxge_unattach(p_nxge_t); 111 112 #if NXGE_PROPERTY 113 static void nxge_remove_hard_properties(p_nxge_t); 114 #endif 115 116 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 117 118 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 119 static void nxge_destroy_mutexes(p_nxge_t); 120 121 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 122 static void nxge_unmap_regs(p_nxge_t nxgep); 123 #ifdef NXGE_DEBUG 124 static void nxge_test_map_regs(p_nxge_t nxgep); 125 #endif 126 127 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 128 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 129 static void nxge_remove_intrs(p_nxge_t nxgep); 130 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 131 132 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 133 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 134 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 135 static void nxge_intrs_enable(p_nxge_t nxgep); 136 static void nxge_intrs_disable(p_nxge_t nxgep); 137 138 static void nxge_suspend(p_nxge_t); 139 static nxge_status_t nxge_resume(p_nxge_t); 140 141 static nxge_status_t nxge_setup_dev(p_nxge_t); 142 static void nxge_destroy_dev(p_nxge_t); 143 144 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 145 static void nxge_free_mem_pool(p_nxge_t); 146 147 static nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 148 static void nxge_free_rx_mem_pool(p_nxge_t); 149 150 static nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 151 static void nxge_free_tx_mem_pool(p_nxge_t); 152 153 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 154 struct ddi_dma_attr *, 155 size_t, ddi_device_acc_attr_t *, uint_t, 156 p_nxge_dma_common_t); 157 158 static void nxge_dma_mem_free(p_nxge_dma_common_t); 159 160 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 161 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 162 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 163 164 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 165 p_nxge_dma_common_t *, size_t); 166 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 167 168 static nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 169 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 170 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 171 172 static nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 173 p_nxge_dma_common_t *, 174 size_t); 175 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 176 177 static int nxge_init_common_dev(p_nxge_t); 178 static void nxge_uninit_common_dev(p_nxge_t); 179 180 /* 181 * The next declarations are for the GLDv3 interface. 182 */ 183 static int nxge_m_start(void *); 184 static void nxge_m_stop(void *); 185 static int nxge_m_unicst(void *, const uint8_t *); 186 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 187 static int nxge_m_promisc(void *, boolean_t); 188 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 189 static void nxge_m_resources(void *); 190 mblk_t *nxge_m_tx(void *arg, mblk_t *); 191 static nxge_status_t nxge_mac_register(p_nxge_t); 192 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 193 mac_addr_slot_t slot); 194 static void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 195 boolean_t factory); 196 static int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 197 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 198 static int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 199 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 200 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 201 202 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 203 #define MAX_DUMP_SZ 256 204 205 #define NXGE_M_CALLBACK_FLAGS (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB) 206 207 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 208 static mac_callbacks_t nxge_m_callbacks = { 209 NXGE_M_CALLBACK_FLAGS, 210 nxge_m_stat, 211 nxge_m_start, 212 nxge_m_stop, 213 nxge_m_promisc, 214 nxge_m_multicst, 215 nxge_m_unicst, 216 nxge_m_tx, 217 nxge_m_resources, 218 nxge_m_ioctl, 219 nxge_m_getcapab 220 }; 221 222 void 223 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 224 225 /* PSARC/2007/453 MSI-X interrupt limit override. */ 226 #define NXGE_MSIX_REQUEST_10G 8 227 #define NXGE_MSIX_REQUEST_1G 2 228 static int nxge_create_msi_property(p_nxge_t); 229 230 /* 231 * These global variables control the message 232 * output. 233 */ 234 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 235 uint64_t nxge_debug_level = 0; 236 237 /* 238 * This list contains the instance structures for the Neptune 239 * devices present in the system. The lock exists to guarantee 240 * mutually exclusive access to the list. 241 */ 242 void *nxge_list = NULL; 243 244 void *nxge_hw_list = NULL; 245 nxge_os_mutex_t nxge_common_lock; 246 247 nxge_os_mutex_t nxge_mii_lock; 248 static uint32_t nxge_mii_lock_init = 0; 249 nxge_os_mutex_t nxge_mdio_lock; 250 static uint32_t nxge_mdio_lock_init = 0; 251 252 extern uint64_t npi_debug_level; 253 254 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 255 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 256 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 257 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 258 extern void nxge_fm_init(p_nxge_t, 259 ddi_device_acc_attr_t *, 260 ddi_device_acc_attr_t *, 261 ddi_dma_attr_t *); 262 extern void nxge_fm_fini(p_nxge_t); 263 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 264 265 /* 266 * Count used to maintain the number of buffers being used 267 * by Neptune instances and loaned up to the upper layers. 268 */ 269 uint32_t nxge_mblks_pending = 0; 270 271 /* 272 * Device register access attributes for PIO. 273 */ 274 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 275 DDI_DEVICE_ATTR_V0, 276 DDI_STRUCTURE_LE_ACC, 277 DDI_STRICTORDER_ACC, 278 }; 279 280 /* 281 * Device descriptor access attributes for DMA. 282 */ 283 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 284 DDI_DEVICE_ATTR_V0, 285 DDI_STRUCTURE_LE_ACC, 286 DDI_STRICTORDER_ACC 287 }; 288 289 /* 290 * Device buffer access attributes for DMA. 291 */ 292 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 293 DDI_DEVICE_ATTR_V0, 294 DDI_STRUCTURE_BE_ACC, 295 DDI_STRICTORDER_ACC 296 }; 297 298 ddi_dma_attr_t nxge_desc_dma_attr = { 299 DMA_ATTR_V0, /* version number. */ 300 0, /* low address */ 301 0xffffffffffffffff, /* high address */ 302 0xffffffffffffffff, /* address counter max */ 303 #ifndef NIU_PA_WORKAROUND 304 0x100000, /* alignment */ 305 #else 306 0x2000, 307 #endif 308 0xfc00fc, /* dlim_burstsizes */ 309 0x1, /* minimum transfer size */ 310 0xffffffffffffffff, /* maximum transfer size */ 311 0xffffffffffffffff, /* maximum segment size */ 312 1, /* scatter/gather list length */ 313 (unsigned int) 1, /* granularity */ 314 0 /* attribute flags */ 315 }; 316 317 ddi_dma_attr_t nxge_tx_dma_attr = { 318 DMA_ATTR_V0, /* version number. */ 319 0, /* low address */ 320 0xffffffffffffffff, /* high address */ 321 0xffffffffffffffff, /* address counter max */ 322 #if defined(_BIG_ENDIAN) 323 0x2000, /* alignment */ 324 #else 325 0x1000, /* alignment */ 326 #endif 327 0xfc00fc, /* dlim_burstsizes */ 328 0x1, /* minimum transfer size */ 329 0xffffffffffffffff, /* maximum transfer size */ 330 0xffffffffffffffff, /* maximum segment size */ 331 5, /* scatter/gather list length */ 332 (unsigned int) 1, /* granularity */ 333 0 /* attribute flags */ 334 }; 335 336 ddi_dma_attr_t nxge_rx_dma_attr = { 337 DMA_ATTR_V0, /* version number. */ 338 0, /* low address */ 339 0xffffffffffffffff, /* high address */ 340 0xffffffffffffffff, /* address counter max */ 341 0x2000, /* alignment */ 342 0xfc00fc, /* dlim_burstsizes */ 343 0x1, /* minimum transfer size */ 344 0xffffffffffffffff, /* maximum transfer size */ 345 0xffffffffffffffff, /* maximum segment size */ 346 1, /* scatter/gather list length */ 347 (unsigned int) 1, /* granularity */ 348 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 349 }; 350 351 ddi_dma_lim_t nxge_dma_limits = { 352 (uint_t)0, /* dlim_addr_lo */ 353 (uint_t)0xffffffff, /* dlim_addr_hi */ 354 (uint_t)0xffffffff, /* dlim_cntr_max */ 355 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 356 0x1, /* dlim_minxfer */ 357 1024 /* dlim_speed */ 358 }; 359 360 dma_method_t nxge_force_dma = DVMA; 361 362 /* 363 * dma chunk sizes. 364 * 365 * Try to allocate the largest possible size 366 * so that fewer number of dma chunks would be managed 367 */ 368 #ifdef NIU_PA_WORKAROUND 369 size_t alloc_sizes [] = {0x2000}; 370 #else 371 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 372 0x10000, 0x20000, 0x40000, 0x80000, 373 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000}; 374 #endif 375 376 /* 377 * Translate "dev_t" to a pointer to the associated "dev_info_t". 378 */ 379 380 static int 381 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 382 { 383 p_nxge_t nxgep = NULL; 384 int instance; 385 int status = DDI_SUCCESS; 386 uint8_t portn; 387 nxge_mmac_t *mmac_info; 388 389 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 390 391 /* 392 * Get the device instance since we'll need to setup 393 * or retrieve a soft state for this instance. 394 */ 395 instance = ddi_get_instance(dip); 396 397 switch (cmd) { 398 case DDI_ATTACH: 399 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 400 break; 401 402 case DDI_RESUME: 403 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 404 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 405 if (nxgep == NULL) { 406 status = DDI_FAILURE; 407 break; 408 } 409 if (nxgep->dip != dip) { 410 status = DDI_FAILURE; 411 break; 412 } 413 if (nxgep->suspended == DDI_PM_SUSPEND) { 414 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 415 } else { 416 status = nxge_resume(nxgep); 417 } 418 goto nxge_attach_exit; 419 420 case DDI_PM_RESUME: 421 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 422 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 423 if (nxgep == NULL) { 424 status = DDI_FAILURE; 425 break; 426 } 427 if (nxgep->dip != dip) { 428 status = DDI_FAILURE; 429 break; 430 } 431 status = nxge_resume(nxgep); 432 goto nxge_attach_exit; 433 434 default: 435 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 436 status = DDI_FAILURE; 437 goto nxge_attach_exit; 438 } 439 440 441 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 442 status = DDI_FAILURE; 443 goto nxge_attach_exit; 444 } 445 446 nxgep = ddi_get_soft_state(nxge_list, instance); 447 if (nxgep == NULL) { 448 status = NXGE_ERROR; 449 goto nxge_attach_fail2; 450 } 451 452 nxgep->nxge_magic = NXGE_MAGIC; 453 454 nxgep->drv_state = 0; 455 nxgep->dip = dip; 456 nxgep->instance = instance; 457 nxgep->p_dip = ddi_get_parent(dip); 458 nxgep->nxge_debug_level = nxge_debug_level; 459 npi_debug_level = nxge_debug_level; 460 461 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_dev_desc_dma_acc_attr, 462 &nxge_rx_dma_attr); 463 464 status = nxge_map_regs(nxgep); 465 if (status != NXGE_OK) { 466 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 467 goto nxge_attach_fail3; 468 } 469 470 status = nxge_init_common_dev(nxgep); 471 if (status != NXGE_OK) { 472 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 473 "nxge_init_common_dev failed")); 474 goto nxge_attach_fail4; 475 } 476 477 if (nxgep->niu_type == NEPTUNE_2_10GF) { 478 if (nxgep->function_num > 1) { 479 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Unsupported" 480 " function %d. Only functions 0 and 1 are " 481 "supported for this card.", nxgep->function_num)); 482 status = NXGE_ERROR; 483 goto nxge_attach_fail4; 484 } 485 } 486 487 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 488 nxgep->mac.portnum = portn; 489 if ((portn == 0) || (portn == 1)) 490 nxgep->mac.porttype = PORT_TYPE_XMAC; 491 else 492 nxgep->mac.porttype = PORT_TYPE_BMAC; 493 /* 494 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 495 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 496 * The two types of MACs have different characterizations. 497 */ 498 mmac_info = &nxgep->nxge_mmac_info; 499 if (nxgep->function_num < 2) { 500 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 501 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 502 } else { 503 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 504 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 505 } 506 /* 507 * Setup the Ndd parameters for the this instance. 508 */ 509 nxge_init_param(nxgep); 510 511 /* 512 * Setup Register Tracing Buffer. 513 */ 514 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 515 516 /* init stats ptr */ 517 nxge_init_statsp(nxgep); 518 519 /* 520 * read the vpd info from the eeprom into local data 521 * structure and check for the VPD info validity 522 */ 523 nxge_vpd_info_get(nxgep); 524 525 status = nxge_xcvr_find(nxgep); 526 527 if (status != NXGE_OK) { 528 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 529 " Couldn't determine card type" 530 " .... exit ")); 531 goto nxge_attach_fail5; 532 } 533 534 status = nxge_get_config_properties(nxgep); 535 536 if (status != NXGE_OK) { 537 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "get_hw create failed")); 538 goto nxge_attach_fail; 539 } 540 541 /* 542 * Setup the Kstats for the driver. 543 */ 544 nxge_setup_kstats(nxgep); 545 546 nxge_setup_param(nxgep); 547 548 status = nxge_setup_system_dma_pages(nxgep); 549 if (status != NXGE_OK) { 550 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 551 goto nxge_attach_fail; 552 } 553 554 #if defined(sun4v) 555 if (nxgep->niu_type == N2_NIU) { 556 nxgep->niu_hsvc_available = B_FALSE; 557 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 558 if ((status = 559 hsvc_register(&nxgep->niu_hsvc, 560 &nxgep->niu_min_ver)) != 0) { 561 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 562 "nxge_attach: " 563 "%s: cannot negotiate " 564 "hypervisor services " 565 "revision %d " 566 "group: 0x%lx " 567 "major: 0x%lx minor: 0x%lx " 568 "errno: %d", 569 niu_hsvc.hsvc_modname, 570 niu_hsvc.hsvc_rev, 571 niu_hsvc.hsvc_group, 572 niu_hsvc.hsvc_major, 573 niu_hsvc.hsvc_minor, 574 status)); 575 status = DDI_FAILURE; 576 goto nxge_attach_fail; 577 } 578 579 nxgep->niu_hsvc_available = B_TRUE; 580 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 581 "NIU Hypervisor service enabled")); 582 } 583 #endif 584 585 nxge_hw_id_init(nxgep); 586 nxge_hw_init_niu_common(nxgep); 587 588 status = nxge_setup_mutexes(nxgep); 589 if (status != NXGE_OK) { 590 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 591 goto nxge_attach_fail; 592 } 593 594 status = nxge_setup_dev(nxgep); 595 if (status != DDI_SUCCESS) { 596 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 597 goto nxge_attach_fail; 598 } 599 600 status = nxge_add_intrs(nxgep); 601 if (status != DDI_SUCCESS) { 602 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 603 goto nxge_attach_fail; 604 } 605 status = nxge_add_soft_intrs(nxgep); 606 if (status != DDI_SUCCESS) { 607 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "add_soft_intr failed")); 608 goto nxge_attach_fail; 609 } 610 611 /* 612 * Enable interrupts. 613 */ 614 nxge_intrs_enable(nxgep); 615 616 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 617 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 618 "unable to register to mac layer (%d)", status)); 619 goto nxge_attach_fail; 620 } 621 622 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 623 624 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "registered to mac (instance %d)", 625 instance)); 626 627 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 628 629 goto nxge_attach_exit; 630 631 nxge_attach_fail: 632 nxge_unattach(nxgep); 633 goto nxge_attach_fail1; 634 635 nxge_attach_fail5: 636 /* 637 * Tear down the ndd parameters setup. 638 */ 639 nxge_destroy_param(nxgep); 640 641 /* 642 * Tear down the kstat setup. 643 */ 644 nxge_destroy_kstats(nxgep); 645 646 nxge_attach_fail4: 647 if (nxgep->nxge_hw_p) { 648 nxge_uninit_common_dev(nxgep); 649 nxgep->nxge_hw_p = NULL; 650 } 651 652 nxge_attach_fail3: 653 /* 654 * Unmap the register setup. 655 */ 656 nxge_unmap_regs(nxgep); 657 658 nxge_fm_fini(nxgep); 659 660 nxge_attach_fail2: 661 ddi_soft_state_free(nxge_list, nxgep->instance); 662 663 nxge_attach_fail1: 664 if (status != NXGE_OK) 665 status = (NXGE_ERROR | NXGE_DDI_FAILED); 666 nxgep = NULL; 667 668 nxge_attach_exit: 669 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 670 status)); 671 672 return (status); 673 } 674 675 static int 676 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 677 { 678 int status = DDI_SUCCESS; 679 int instance; 680 p_nxge_t nxgep = NULL; 681 682 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 683 instance = ddi_get_instance(dip); 684 nxgep = ddi_get_soft_state(nxge_list, instance); 685 if (nxgep == NULL) { 686 status = DDI_FAILURE; 687 goto nxge_detach_exit; 688 } 689 690 switch (cmd) { 691 case DDI_DETACH: 692 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 693 break; 694 695 case DDI_PM_SUSPEND: 696 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 697 nxgep->suspended = DDI_PM_SUSPEND; 698 nxge_suspend(nxgep); 699 break; 700 701 case DDI_SUSPEND: 702 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 703 if (nxgep->suspended != DDI_PM_SUSPEND) { 704 nxgep->suspended = DDI_SUSPEND; 705 nxge_suspend(nxgep); 706 } 707 break; 708 709 default: 710 status = DDI_FAILURE; 711 } 712 713 if (cmd != DDI_DETACH) 714 goto nxge_detach_exit; 715 716 /* 717 * Stop the xcvr polling. 718 */ 719 nxgep->suspended = cmd; 720 721 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 722 723 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 724 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 725 "<== nxge_detach status = 0x%08X", status)); 726 return (DDI_FAILURE); 727 } 728 729 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 730 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 731 732 nxge_unattach(nxgep); 733 nxgep = NULL; 734 735 nxge_detach_exit: 736 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 737 status)); 738 739 return (status); 740 } 741 742 static void 743 nxge_unattach(p_nxge_t nxgep) 744 { 745 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 746 747 if (nxgep == NULL || nxgep->dev_regs == NULL) { 748 return; 749 } 750 751 nxgep->nxge_magic = 0; 752 753 if (nxgep->nxge_hw_p) { 754 nxge_uninit_common_dev(nxgep); 755 nxgep->nxge_hw_p = NULL; 756 } 757 758 if (nxgep->nxge_timerid) { 759 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 760 nxgep->nxge_timerid = 0; 761 } 762 763 #if defined(sun4v) 764 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 765 (void) hsvc_unregister(&nxgep->niu_hsvc); 766 nxgep->niu_hsvc_available = B_FALSE; 767 } 768 #endif 769 /* 770 * Stop any further interrupts. 771 */ 772 nxge_remove_intrs(nxgep); 773 774 /* remove soft interrups */ 775 nxge_remove_soft_intrs(nxgep); 776 777 /* 778 * Stop the device and free resources. 779 */ 780 nxge_destroy_dev(nxgep); 781 782 /* 783 * Tear down the ndd parameters setup. 784 */ 785 nxge_destroy_param(nxgep); 786 787 /* 788 * Tear down the kstat setup. 789 */ 790 nxge_destroy_kstats(nxgep); 791 792 /* 793 * Destroy all mutexes. 794 */ 795 nxge_destroy_mutexes(nxgep); 796 797 /* 798 * Remove the list of ndd parameters which 799 * were setup during attach. 800 */ 801 if (nxgep->dip) { 802 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 803 " nxge_unattach: remove all properties")); 804 805 (void) ddi_prop_remove_all(nxgep->dip); 806 } 807 808 #if NXGE_PROPERTY 809 nxge_remove_hard_properties(nxgep); 810 #endif 811 812 /* 813 * Unmap the register setup. 814 */ 815 nxge_unmap_regs(nxgep); 816 817 nxge_fm_fini(nxgep); 818 819 ddi_soft_state_free(nxge_list, nxgep->instance); 820 821 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 822 } 823 824 static char n2_siu_name[] = "niu"; 825 826 static nxge_status_t 827 nxge_map_regs(p_nxge_t nxgep) 828 { 829 int ddi_status = DDI_SUCCESS; 830 p_dev_regs_t dev_regs; 831 char buf[MAXPATHLEN + 1]; 832 char *devname; 833 #ifdef NXGE_DEBUG 834 char *sysname; 835 #endif 836 off_t regsize; 837 nxge_status_t status = NXGE_OK; 838 #if !defined(_BIG_ENDIAN) 839 off_t pci_offset; 840 uint16_t pcie_devctl; 841 #endif 842 843 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 844 nxgep->dev_regs = NULL; 845 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 846 dev_regs->nxge_regh = NULL; 847 dev_regs->nxge_pciregh = NULL; 848 dev_regs->nxge_msix_regh = NULL; 849 dev_regs->nxge_vir_regh = NULL; 850 dev_regs->nxge_vir2_regh = NULL; 851 nxgep->niu_type = NIU_TYPE_NONE; 852 853 devname = ddi_pathname(nxgep->dip, buf); 854 ASSERT(strlen(devname) > 0); 855 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 856 "nxge_map_regs: pathname devname %s", devname)); 857 858 if (strstr(devname, n2_siu_name)) { 859 /* N2/NIU */ 860 nxgep->niu_type = N2_NIU; 861 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 862 "nxge_map_regs: N2/NIU devname %s", devname)); 863 /* get function number */ 864 nxgep->function_num = 865 (devname[strlen(devname) -1] == '1' ? 1 : 0); 866 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 867 "nxge_map_regs: N2/NIU function number %d", 868 nxgep->function_num)); 869 } else { 870 int *prop_val; 871 uint_t prop_len; 872 uint8_t func_num; 873 874 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 875 0, "reg", 876 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 877 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 878 "Reg property not found")); 879 ddi_status = DDI_FAILURE; 880 goto nxge_map_regs_fail0; 881 882 } else { 883 func_num = (prop_val[0] >> 8) & 0x7; 884 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 885 "Reg property found: fun # %d", 886 func_num)); 887 nxgep->function_num = func_num; 888 ddi_prop_free(prop_val); 889 } 890 } 891 892 switch (nxgep->niu_type) { 893 default: 894 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 895 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 896 "nxge_map_regs: pci config size 0x%x", regsize)); 897 898 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 899 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 900 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 901 if (ddi_status != DDI_SUCCESS) { 902 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 903 "ddi_map_regs, nxge bus config regs failed")); 904 goto nxge_map_regs_fail0; 905 } 906 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 907 "nxge_map_reg: PCI config addr 0x%0llx " 908 " handle 0x%0llx", dev_regs->nxge_pciregp, 909 dev_regs->nxge_pciregh)); 910 /* 911 * IMP IMP 912 * workaround for bit swapping bug in HW 913 * which ends up in no-snoop = yes 914 * resulting, in DMA not synched properly 915 */ 916 #if !defined(_BIG_ENDIAN) 917 /* workarounds for x86 systems */ 918 pci_offset = 0x80 + PCIE_DEVCTL; 919 pcie_devctl = 0x0; 920 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 921 pcie_devctl |= PCIE_DEVCTL_RO_EN; 922 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 923 pcie_devctl); 924 #endif 925 926 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 927 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 928 "nxge_map_regs: pio size 0x%x", regsize)); 929 /* set up the device mapped register */ 930 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 931 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 932 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 933 if (ddi_status != DDI_SUCCESS) { 934 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 935 "ddi_map_regs for Neptune global reg failed")); 936 goto nxge_map_regs_fail1; 937 } 938 939 /* set up the msi/msi-x mapped register */ 940 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 941 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 942 "nxge_map_regs: msix size 0x%x", regsize)); 943 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 944 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 945 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 946 if (ddi_status != DDI_SUCCESS) { 947 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 948 "ddi_map_regs for msi reg failed")); 949 goto nxge_map_regs_fail2; 950 } 951 952 /* set up the vio region mapped register */ 953 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 954 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 955 "nxge_map_regs: vio size 0x%x", regsize)); 956 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 957 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 958 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 959 960 if (ddi_status != DDI_SUCCESS) { 961 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 962 "ddi_map_regs for nxge vio reg failed")); 963 goto nxge_map_regs_fail3; 964 } 965 nxgep->dev_regs = dev_regs; 966 967 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 968 NPI_PCI_ADD_HANDLE_SET(nxgep, 969 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 970 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 971 NPI_MSI_ADD_HANDLE_SET(nxgep, 972 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 973 974 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 975 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 976 977 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 978 NPI_REG_ADD_HANDLE_SET(nxgep, 979 (npi_reg_ptr_t)dev_regs->nxge_regp); 980 981 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 982 NPI_VREG_ADD_HANDLE_SET(nxgep, 983 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 984 985 break; 986 987 case N2_NIU: 988 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 989 /* 990 * Set up the device mapped register (FWARC 2006/556) 991 * (changed back to 1: reg starts at 1!) 992 */ 993 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 994 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 995 "nxge_map_regs: dev size 0x%x", regsize)); 996 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 997 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 998 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 999 1000 if (ddi_status != DDI_SUCCESS) { 1001 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1002 "ddi_map_regs for N2/NIU, global reg failed ")); 1003 goto nxge_map_regs_fail1; 1004 } 1005 1006 /* set up the vio region mapped register */ 1007 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1008 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1009 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1010 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1011 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1012 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1013 1014 if (ddi_status != DDI_SUCCESS) { 1015 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1016 "ddi_map_regs for nxge vio reg failed")); 1017 goto nxge_map_regs_fail2; 1018 } 1019 /* set up the vio region mapped register */ 1020 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1021 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1022 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1023 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1024 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1025 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1026 1027 if (ddi_status != DDI_SUCCESS) { 1028 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1029 "ddi_map_regs for nxge vio2 reg failed")); 1030 goto nxge_map_regs_fail3; 1031 } 1032 nxgep->dev_regs = dev_regs; 1033 1034 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1035 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1036 1037 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1038 NPI_REG_ADD_HANDLE_SET(nxgep, 1039 (npi_reg_ptr_t)dev_regs->nxge_regp); 1040 1041 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1042 NPI_VREG_ADD_HANDLE_SET(nxgep, 1043 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1044 1045 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1046 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1047 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1048 1049 break; 1050 } 1051 1052 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1053 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1054 1055 goto nxge_map_regs_exit; 1056 nxge_map_regs_fail3: 1057 if (dev_regs->nxge_msix_regh) { 1058 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1059 } 1060 if (dev_regs->nxge_vir_regh) { 1061 ddi_regs_map_free(&dev_regs->nxge_regh); 1062 } 1063 nxge_map_regs_fail2: 1064 if (dev_regs->nxge_regh) { 1065 ddi_regs_map_free(&dev_regs->nxge_regh); 1066 } 1067 nxge_map_regs_fail1: 1068 if (dev_regs->nxge_pciregh) { 1069 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1070 } 1071 nxge_map_regs_fail0: 1072 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1073 kmem_free(dev_regs, sizeof (dev_regs_t)); 1074 1075 nxge_map_regs_exit: 1076 if (ddi_status != DDI_SUCCESS) 1077 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1078 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1079 return (status); 1080 } 1081 1082 static void 1083 nxge_unmap_regs(p_nxge_t nxgep) 1084 { 1085 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1086 if (nxgep->dev_regs) { 1087 if (nxgep->dev_regs->nxge_pciregh) { 1088 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1089 "==> nxge_unmap_regs: bus")); 1090 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1091 nxgep->dev_regs->nxge_pciregh = NULL; 1092 } 1093 if (nxgep->dev_regs->nxge_regh) { 1094 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1095 "==> nxge_unmap_regs: device registers")); 1096 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1097 nxgep->dev_regs->nxge_regh = NULL; 1098 } 1099 if (nxgep->dev_regs->nxge_msix_regh) { 1100 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1101 "==> nxge_unmap_regs: device interrupts")); 1102 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1103 nxgep->dev_regs->nxge_msix_regh = NULL; 1104 } 1105 if (nxgep->dev_regs->nxge_vir_regh) { 1106 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1107 "==> nxge_unmap_regs: vio region")); 1108 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1109 nxgep->dev_regs->nxge_vir_regh = NULL; 1110 } 1111 if (nxgep->dev_regs->nxge_vir2_regh) { 1112 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1113 "==> nxge_unmap_regs: vio2 region")); 1114 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1115 nxgep->dev_regs->nxge_vir2_regh = NULL; 1116 } 1117 1118 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1119 nxgep->dev_regs = NULL; 1120 } 1121 1122 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1123 } 1124 1125 static nxge_status_t 1126 nxge_setup_mutexes(p_nxge_t nxgep) 1127 { 1128 int ddi_status = DDI_SUCCESS; 1129 nxge_status_t status = NXGE_OK; 1130 nxge_classify_t *classify_ptr; 1131 int partition; 1132 1133 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1134 1135 /* 1136 * Get the interrupt cookie so the mutexes can be 1137 * Initialized. 1138 */ 1139 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1140 &nxgep->interrupt_cookie); 1141 if (ddi_status != DDI_SUCCESS) { 1142 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1143 "<== nxge_setup_mutexes: failed 0x%x", ddi_status)); 1144 goto nxge_setup_mutexes_exit; 1145 } 1146 1147 /* Initialize global mutex */ 1148 1149 if (nxge_mdio_lock_init == 0) { 1150 MUTEX_INIT(&nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 1151 } 1152 atomic_add_32(&nxge_mdio_lock_init, 1); 1153 1154 if (nxge_mii_lock_init == 0) { 1155 MUTEX_INIT(&nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 1156 } 1157 atomic_add_32(&nxge_mii_lock_init, 1); 1158 1159 nxgep->drv_state |= STATE_MDIO_LOCK_INIT; 1160 nxgep->drv_state |= STATE_MII_LOCK_INIT; 1161 1162 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1163 MUTEX_INIT(&nxgep->poll_lock, NULL, 1164 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1165 1166 /* 1167 * Initialize mutexes for this device. 1168 */ 1169 MUTEX_INIT(nxgep->genlock, NULL, 1170 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1171 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1172 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1173 MUTEX_INIT(&nxgep->mif_lock, NULL, 1174 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1175 RW_INIT(&nxgep->filter_lock, NULL, 1176 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1177 1178 classify_ptr = &nxgep->classifier; 1179 /* 1180 * FFLP Mutexes are never used in interrupt context 1181 * as fflp operation can take very long time to 1182 * complete and hence not suitable to invoke from interrupt 1183 * handlers. 1184 */ 1185 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1186 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1187 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1188 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1189 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1190 for (partition = 0; partition < MAX_PARTITION; partition++) { 1191 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1192 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1193 } 1194 } 1195 1196 nxge_setup_mutexes_exit: 1197 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1198 "<== nxge_setup_mutexes status = %x", status)); 1199 1200 if (ddi_status != DDI_SUCCESS) 1201 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1202 1203 return (status); 1204 } 1205 1206 static void 1207 nxge_destroy_mutexes(p_nxge_t nxgep) 1208 { 1209 int partition; 1210 nxge_classify_t *classify_ptr; 1211 1212 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1213 RW_DESTROY(&nxgep->filter_lock); 1214 MUTEX_DESTROY(&nxgep->mif_lock); 1215 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1216 MUTEX_DESTROY(nxgep->genlock); 1217 1218 classify_ptr = &nxgep->classifier; 1219 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1220 1221 /* Destroy all polling resources. */ 1222 MUTEX_DESTROY(&nxgep->poll_lock); 1223 cv_destroy(&nxgep->poll_cv); 1224 1225 /* free data structures, based on HW type */ 1226 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1227 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1228 for (partition = 0; partition < MAX_PARTITION; partition++) { 1229 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1230 } 1231 } 1232 if (nxgep->drv_state & STATE_MDIO_LOCK_INIT) { 1233 if (nxge_mdio_lock_init == 1) { 1234 MUTEX_DESTROY(&nxge_mdio_lock); 1235 } 1236 atomic_add_32(&nxge_mdio_lock_init, -1); 1237 } 1238 if (nxgep->drv_state & STATE_MII_LOCK_INIT) { 1239 if (nxge_mii_lock_init == 1) { 1240 MUTEX_DESTROY(&nxge_mii_lock); 1241 } 1242 atomic_add_32(&nxge_mii_lock_init, -1); 1243 } 1244 1245 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1246 } 1247 1248 nxge_status_t 1249 nxge_init(p_nxge_t nxgep) 1250 { 1251 nxge_status_t status = NXGE_OK; 1252 1253 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1254 1255 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1256 return (status); 1257 } 1258 1259 /* 1260 * Allocate system memory for the receive/transmit buffer blocks 1261 * and receive/transmit descriptor rings. 1262 */ 1263 status = nxge_alloc_mem_pool(nxgep); 1264 if (status != NXGE_OK) { 1265 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1266 goto nxge_init_fail1; 1267 } 1268 1269 /* 1270 * Initialize and enable TXC registers 1271 * (Globally enable TX controller, 1272 * enable a port, configure dma channel bitmap, 1273 * configure the max burst size). 1274 */ 1275 status = nxge_txc_init(nxgep); 1276 if (status != NXGE_OK) { 1277 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txc failed\n")); 1278 goto nxge_init_fail2; 1279 } 1280 1281 /* 1282 * Initialize and enable TXDMA channels. 1283 */ 1284 status = nxge_init_txdma_channels(nxgep); 1285 if (status != NXGE_OK) { 1286 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1287 goto nxge_init_fail3; 1288 } 1289 1290 /* 1291 * Initialize and enable RXDMA channels. 1292 */ 1293 status = nxge_init_rxdma_channels(nxgep); 1294 if (status != NXGE_OK) { 1295 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1296 goto nxge_init_fail4; 1297 } 1298 1299 /* 1300 * Initialize TCAM and FCRAM (Neptune). 1301 */ 1302 status = nxge_classify_init(nxgep); 1303 if (status != NXGE_OK) { 1304 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1305 goto nxge_init_fail5; 1306 } 1307 1308 /* 1309 * Initialize ZCP 1310 */ 1311 status = nxge_zcp_init(nxgep); 1312 if (status != NXGE_OK) { 1313 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1314 goto nxge_init_fail5; 1315 } 1316 1317 /* 1318 * Initialize IPP. 1319 */ 1320 status = nxge_ipp_init(nxgep); 1321 if (status != NXGE_OK) { 1322 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1323 goto nxge_init_fail5; 1324 } 1325 1326 /* 1327 * Initialize the MAC block. 1328 */ 1329 status = nxge_mac_init(nxgep); 1330 if (status != NXGE_OK) { 1331 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1332 goto nxge_init_fail5; 1333 } 1334 1335 nxge_intrs_enable(nxgep); 1336 1337 /* 1338 * Enable hardware interrupts. 1339 */ 1340 nxge_intr_hw_enable(nxgep); 1341 nxgep->drv_state |= STATE_HW_INITIALIZED; 1342 1343 goto nxge_init_exit; 1344 1345 nxge_init_fail5: 1346 nxge_uninit_rxdma_channels(nxgep); 1347 nxge_init_fail4: 1348 nxge_uninit_txdma_channels(nxgep); 1349 nxge_init_fail3: 1350 (void) nxge_txc_uninit(nxgep); 1351 nxge_init_fail2: 1352 nxge_free_mem_pool(nxgep); 1353 nxge_init_fail1: 1354 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1355 "<== nxge_init status (failed) = 0x%08x", status)); 1356 return (status); 1357 1358 nxge_init_exit: 1359 1360 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1361 status)); 1362 return (status); 1363 } 1364 1365 1366 timeout_id_t 1367 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1368 { 1369 if ((nxgep->suspended == 0) || 1370 (nxgep->suspended == DDI_RESUME)) { 1371 return (timeout(func, (caddr_t)nxgep, 1372 drv_usectohz(1000 * msec))); 1373 } 1374 return (NULL); 1375 } 1376 1377 /*ARGSUSED*/ 1378 void 1379 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1380 { 1381 if (timerid) { 1382 (void) untimeout(timerid); 1383 } 1384 } 1385 1386 void 1387 nxge_uninit(p_nxge_t nxgep) 1388 { 1389 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1390 1391 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1392 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1393 "==> nxge_uninit: not initialized")); 1394 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1395 "<== nxge_uninit")); 1396 return; 1397 } 1398 1399 /* stop timer */ 1400 if (nxgep->nxge_timerid) { 1401 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1402 nxgep->nxge_timerid = 0; 1403 } 1404 1405 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1406 (void) nxge_intr_hw_disable(nxgep); 1407 1408 /* 1409 * Reset the receive MAC side. 1410 */ 1411 (void) nxge_rx_mac_disable(nxgep); 1412 1413 /* Disable and soft reset the IPP */ 1414 (void) nxge_ipp_disable(nxgep); 1415 1416 /* Free classification resources */ 1417 (void) nxge_classify_uninit(nxgep); 1418 1419 /* 1420 * Reset the transmit/receive DMA side. 1421 */ 1422 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1423 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1424 1425 nxge_uninit_txdma_channels(nxgep); 1426 nxge_uninit_rxdma_channels(nxgep); 1427 1428 /* 1429 * Reset the transmit MAC side. 1430 */ 1431 (void) nxge_tx_mac_disable(nxgep); 1432 1433 nxge_free_mem_pool(nxgep); 1434 1435 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1436 1437 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1438 1439 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1440 "nxge_mblks_pending %d", nxge_mblks_pending)); 1441 } 1442 1443 void 1444 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1445 { 1446 #if defined(__i386) 1447 size_t reg; 1448 #else 1449 uint64_t reg; 1450 #endif 1451 uint64_t regdata; 1452 int i, retry; 1453 1454 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1455 regdata = 0; 1456 retry = 1; 1457 1458 for (i = 0; i < retry; i++) { 1459 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1460 } 1461 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1462 } 1463 1464 void 1465 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1466 { 1467 #if defined(__i386) 1468 size_t reg; 1469 #else 1470 uint64_t reg; 1471 #endif 1472 uint64_t buf[2]; 1473 1474 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1475 reg = buf[0]; 1476 1477 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1478 } 1479 1480 1481 nxge_os_mutex_t nxgedebuglock; 1482 int nxge_debug_init = 0; 1483 1484 /*ARGSUSED*/ 1485 /*VARARGS*/ 1486 void 1487 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1488 { 1489 char msg_buffer[1048]; 1490 char prefix_buffer[32]; 1491 int instance; 1492 uint64_t debug_level; 1493 int cmn_level = CE_CONT; 1494 va_list ap; 1495 1496 debug_level = (nxgep == NULL) ? nxge_debug_level : 1497 nxgep->nxge_debug_level; 1498 1499 if ((level & debug_level) || 1500 (level == NXGE_NOTE) || 1501 (level == NXGE_ERR_CTL)) { 1502 /* do the msg processing */ 1503 if (nxge_debug_init == 0) { 1504 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1505 nxge_debug_init = 1; 1506 } 1507 1508 MUTEX_ENTER(&nxgedebuglock); 1509 1510 if ((level & NXGE_NOTE)) { 1511 cmn_level = CE_NOTE; 1512 } 1513 1514 if (level & NXGE_ERR_CTL) { 1515 cmn_level = CE_WARN; 1516 } 1517 1518 va_start(ap, fmt); 1519 (void) vsprintf(msg_buffer, fmt, ap); 1520 va_end(ap); 1521 if (nxgep == NULL) { 1522 instance = -1; 1523 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1524 } else { 1525 instance = nxgep->instance; 1526 (void) sprintf(prefix_buffer, 1527 "%s%d :", "nxge", instance); 1528 } 1529 1530 MUTEX_EXIT(&nxgedebuglock); 1531 cmn_err(cmn_level, "!%s %s\n", 1532 prefix_buffer, msg_buffer); 1533 1534 } 1535 } 1536 1537 char * 1538 nxge_dump_packet(char *addr, int size) 1539 { 1540 uchar_t *ap = (uchar_t *)addr; 1541 int i; 1542 static char etherbuf[1024]; 1543 char *cp = etherbuf; 1544 char digits[] = "0123456789abcdef"; 1545 1546 if (!size) 1547 size = 60; 1548 1549 if (size > MAX_DUMP_SZ) { 1550 /* Dump the leading bytes */ 1551 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1552 if (*ap > 0x0f) 1553 *cp++ = digits[*ap >> 4]; 1554 *cp++ = digits[*ap++ & 0xf]; 1555 *cp++ = ':'; 1556 } 1557 for (i = 0; i < 20; i++) 1558 *cp++ = '.'; 1559 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1560 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1561 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1562 if (*ap > 0x0f) 1563 *cp++ = digits[*ap >> 4]; 1564 *cp++ = digits[*ap++ & 0xf]; 1565 *cp++ = ':'; 1566 } 1567 } else { 1568 for (i = 0; i < size; i++) { 1569 if (*ap > 0x0f) 1570 *cp++ = digits[*ap >> 4]; 1571 *cp++ = digits[*ap++ & 0xf]; 1572 *cp++ = ':'; 1573 } 1574 } 1575 *--cp = 0; 1576 return (etherbuf); 1577 } 1578 1579 #ifdef NXGE_DEBUG 1580 static void 1581 nxge_test_map_regs(p_nxge_t nxgep) 1582 { 1583 ddi_acc_handle_t cfg_handle; 1584 p_pci_cfg_t cfg_ptr; 1585 ddi_acc_handle_t dev_handle; 1586 char *dev_ptr; 1587 ddi_acc_handle_t pci_config_handle; 1588 uint32_t regval; 1589 int i; 1590 1591 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1592 1593 dev_handle = nxgep->dev_regs->nxge_regh; 1594 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1595 1596 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1597 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1598 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1599 1600 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1601 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1602 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1603 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1604 &cfg_ptr->vendorid)); 1605 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1606 "\tvendorid 0x%x devid 0x%x", 1607 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1608 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1609 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1610 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1611 "bar1c 0x%x", 1612 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1613 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1614 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1615 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1616 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1617 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1618 "base 28 0x%x bar2c 0x%x\n", 1619 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1620 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1621 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1622 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1624 "\nNeptune PCI BAR: base30 0x%x\n", 1625 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1626 1627 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1628 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1629 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1630 "first 0x%llx second 0x%llx third 0x%llx " 1631 "last 0x%llx ", 1632 NXGE_PIO_READ64(dev_handle, 1633 (uint64_t *)(dev_ptr + 0), 0), 1634 NXGE_PIO_READ64(dev_handle, 1635 (uint64_t *)(dev_ptr + 8), 0), 1636 NXGE_PIO_READ64(dev_handle, 1637 (uint64_t *)(dev_ptr + 16), 0), 1638 NXGE_PIO_READ64(cfg_handle, 1639 (uint64_t *)(dev_ptr + 24), 0))); 1640 } 1641 } 1642 1643 #endif 1644 1645 static void 1646 nxge_suspend(p_nxge_t nxgep) 1647 { 1648 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1649 1650 nxge_intrs_disable(nxgep); 1651 nxge_destroy_dev(nxgep); 1652 1653 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1654 } 1655 1656 static nxge_status_t 1657 nxge_resume(p_nxge_t nxgep) 1658 { 1659 nxge_status_t status = NXGE_OK; 1660 1661 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1662 1663 nxgep->suspended = DDI_RESUME; 1664 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1665 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1666 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1667 (void) nxge_rx_mac_enable(nxgep); 1668 (void) nxge_tx_mac_enable(nxgep); 1669 nxge_intrs_enable(nxgep); 1670 nxgep->suspended = 0; 1671 1672 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1673 "<== nxge_resume status = 0x%x", status)); 1674 return (status); 1675 } 1676 1677 static nxge_status_t 1678 nxge_setup_dev(p_nxge_t nxgep) 1679 { 1680 nxge_status_t status = NXGE_OK; 1681 1682 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1683 nxgep->mac.portnum)); 1684 1685 status = nxge_link_init(nxgep); 1686 1687 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1688 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1689 "port%d Bad register acc handle", nxgep->mac.portnum)); 1690 status = NXGE_ERROR; 1691 } 1692 1693 if (status != NXGE_OK) { 1694 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1695 " nxge_setup_dev status " 1696 "(xcvr init 0x%08x)", status)); 1697 goto nxge_setup_dev_exit; 1698 } 1699 1700 nxge_setup_dev_exit: 1701 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1702 "<== nxge_setup_dev port %d status = 0x%08x", 1703 nxgep->mac.portnum, status)); 1704 1705 return (status); 1706 } 1707 1708 static void 1709 nxge_destroy_dev(p_nxge_t nxgep) 1710 { 1711 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 1712 1713 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1714 1715 (void) nxge_hw_stop(nxgep); 1716 1717 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 1718 } 1719 1720 static nxge_status_t 1721 nxge_setup_system_dma_pages(p_nxge_t nxgep) 1722 { 1723 int ddi_status = DDI_SUCCESS; 1724 uint_t count; 1725 ddi_dma_cookie_t cookie; 1726 uint_t iommu_pagesize; 1727 nxge_status_t status = NXGE_OK; 1728 1729 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 1730 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 1731 if (nxgep->niu_type != N2_NIU) { 1732 iommu_pagesize = dvma_pagesize(nxgep->dip); 1733 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1734 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1735 " default_block_size %d iommu_pagesize %d", 1736 nxgep->sys_page_sz, 1737 ddi_ptob(nxgep->dip, (ulong_t)1), 1738 nxgep->rx_default_block_size, 1739 iommu_pagesize)); 1740 1741 if (iommu_pagesize != 0) { 1742 if (nxgep->sys_page_sz == iommu_pagesize) { 1743 if (iommu_pagesize > 0x4000) 1744 nxgep->sys_page_sz = 0x4000; 1745 } else { 1746 if (nxgep->sys_page_sz > iommu_pagesize) 1747 nxgep->sys_page_sz = iommu_pagesize; 1748 } 1749 } 1750 } 1751 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1752 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1753 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1754 "default_block_size %d page mask %d", 1755 nxgep->sys_page_sz, 1756 ddi_ptob(nxgep->dip, (ulong_t)1), 1757 nxgep->rx_default_block_size, 1758 nxgep->sys_page_mask)); 1759 1760 1761 switch (nxgep->sys_page_sz) { 1762 default: 1763 nxgep->sys_page_sz = 0x1000; 1764 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1765 nxgep->rx_default_block_size = 0x1000; 1766 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1767 break; 1768 case 0x1000: 1769 nxgep->rx_default_block_size = 0x1000; 1770 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1771 break; 1772 case 0x2000: 1773 nxgep->rx_default_block_size = 0x2000; 1774 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1775 break; 1776 case 0x4000: 1777 nxgep->rx_default_block_size = 0x4000; 1778 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 1779 break; 1780 case 0x8000: 1781 nxgep->rx_default_block_size = 0x8000; 1782 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 1783 break; 1784 } 1785 1786 #ifndef USE_RX_BIG_BUF 1787 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 1788 #else 1789 nxgep->rx_default_block_size = 0x2000; 1790 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1791 #endif 1792 /* 1793 * Get the system DMA burst size. 1794 */ 1795 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 1796 DDI_DMA_DONTWAIT, 0, 1797 &nxgep->dmasparehandle); 1798 if (ddi_status != DDI_SUCCESS) { 1799 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1800 "ddi_dma_alloc_handle: failed " 1801 " status 0x%x", ddi_status)); 1802 goto nxge_get_soft_properties_exit; 1803 } 1804 1805 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 1806 (caddr_t)nxgep->dmasparehandle, 1807 sizeof (nxgep->dmasparehandle), 1808 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1809 DDI_DMA_DONTWAIT, 0, 1810 &cookie, &count); 1811 if (ddi_status != DDI_DMA_MAPPED) { 1812 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1813 "Binding spare handle to find system" 1814 " burstsize failed.")); 1815 ddi_status = DDI_FAILURE; 1816 goto nxge_get_soft_properties_fail1; 1817 } 1818 1819 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 1820 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 1821 1822 nxge_get_soft_properties_fail1: 1823 ddi_dma_free_handle(&nxgep->dmasparehandle); 1824 1825 nxge_get_soft_properties_exit: 1826 1827 if (ddi_status != DDI_SUCCESS) 1828 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1829 1830 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1831 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 1832 return (status); 1833 } 1834 1835 static nxge_status_t 1836 nxge_alloc_mem_pool(p_nxge_t nxgep) 1837 { 1838 nxge_status_t status = NXGE_OK; 1839 1840 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 1841 1842 status = nxge_alloc_rx_mem_pool(nxgep); 1843 if (status != NXGE_OK) { 1844 return (NXGE_ERROR); 1845 } 1846 1847 status = nxge_alloc_tx_mem_pool(nxgep); 1848 if (status != NXGE_OK) { 1849 nxge_free_rx_mem_pool(nxgep); 1850 return (NXGE_ERROR); 1851 } 1852 1853 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 1854 return (NXGE_OK); 1855 } 1856 1857 static void 1858 nxge_free_mem_pool(p_nxge_t nxgep) 1859 { 1860 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 1861 1862 nxge_free_rx_mem_pool(nxgep); 1863 nxge_free_tx_mem_pool(nxgep); 1864 1865 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 1866 } 1867 1868 static nxge_status_t 1869 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 1870 { 1871 int i, j; 1872 uint32_t ndmas, st_rdc; 1873 p_nxge_dma_pt_cfg_t p_all_cfgp; 1874 p_nxge_hw_pt_cfg_t p_cfgp; 1875 p_nxge_dma_pool_t dma_poolp; 1876 p_nxge_dma_common_t *dma_buf_p; 1877 p_nxge_dma_pool_t dma_cntl_poolp; 1878 p_nxge_dma_common_t *dma_cntl_p; 1879 size_t rx_buf_alloc_size; 1880 size_t rx_cntl_alloc_size; 1881 uint32_t *num_chunks; /* per dma */ 1882 nxge_status_t status = NXGE_OK; 1883 1884 uint32_t nxge_port_rbr_size; 1885 uint32_t nxge_port_rbr_spare_size; 1886 uint32_t nxge_port_rcr_size; 1887 1888 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 1889 1890 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 1891 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1892 st_rdc = p_cfgp->start_rdc; 1893 ndmas = p_cfgp->max_rdcs; 1894 1895 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1896 " nxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas)); 1897 1898 /* 1899 * Allocate memory for each receive DMA channel. 1900 */ 1901 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 1902 KM_SLEEP); 1903 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1904 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1905 1906 dma_cntl_poolp = (p_nxge_dma_pool_t) 1907 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 1908 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1909 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1910 1911 num_chunks = (uint32_t *)KMEM_ZALLOC( 1912 sizeof (uint32_t) * ndmas, KM_SLEEP); 1913 1914 /* 1915 * Assume that each DMA channel will be configured with default 1916 * block size. 1917 * rbr block counts are mod of batch count (16). 1918 */ 1919 nxge_port_rbr_size = p_all_cfgp->rbr_size; 1920 nxge_port_rcr_size = p_all_cfgp->rcr_size; 1921 1922 if (!nxge_port_rbr_size) { 1923 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 1924 } 1925 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 1926 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 1927 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 1928 } 1929 1930 p_all_cfgp->rbr_size = nxge_port_rbr_size; 1931 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 1932 1933 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 1934 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 1935 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 1936 } 1937 1938 /* 1939 * N2/NIU has limitation on the descriptor sizes (contiguous 1940 * memory allocation on data buffers to 4M (contig_mem_alloc) 1941 * and little endian for control buffers (must use the ddi/dki mem alloc 1942 * function). 1943 */ 1944 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1945 if (nxgep->niu_type == N2_NIU) { 1946 nxge_port_rbr_spare_size = 0; 1947 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 1948 (!ISP2(nxge_port_rbr_size))) { 1949 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 1950 } 1951 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 1952 (!ISP2(nxge_port_rcr_size))) { 1953 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 1954 } 1955 } 1956 #endif 1957 1958 rx_buf_alloc_size = (nxgep->rx_default_block_size * 1959 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 1960 1961 /* 1962 * Addresses of receive block ring, receive completion ring and the 1963 * mailbox must be all cache-aligned (64 bytes). 1964 */ 1965 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 1966 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 1967 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 1968 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 1969 1970 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 1971 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 1972 "nxge_port_rcr_size = %d " 1973 "rx_cntl_alloc_size = %d", 1974 nxge_port_rbr_size, nxge_port_rbr_spare_size, 1975 nxge_port_rcr_size, 1976 rx_cntl_alloc_size)); 1977 1978 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1979 if (nxgep->niu_type == N2_NIU) { 1980 if (!ISP2(rx_buf_alloc_size)) { 1981 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1982 "==> nxge_alloc_rx_mem_pool: " 1983 " must be power of 2")); 1984 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1985 goto nxge_alloc_rx_mem_pool_exit; 1986 } 1987 1988 if (rx_buf_alloc_size > (1 << 22)) { 1989 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1990 "==> nxge_alloc_rx_mem_pool: " 1991 " limit size to 4M")); 1992 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1993 goto nxge_alloc_rx_mem_pool_exit; 1994 } 1995 1996 if (rx_cntl_alloc_size < 0x2000) { 1997 rx_cntl_alloc_size = 0x2000; 1998 } 1999 } 2000 #endif 2001 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2002 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2003 2004 /* 2005 * Allocate memory for receive buffers and descriptor rings. 2006 * Replace allocation functions with interface functions provided 2007 * by the partition manager when it is available. 2008 */ 2009 /* 2010 * Allocate memory for the receive buffer blocks. 2011 */ 2012 for (i = 0; i < ndmas; i++) { 2013 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2014 " nxge_alloc_rx_mem_pool to alloc mem: " 2015 " dma %d dma_buf_p %llx &dma_buf_p %llx", 2016 i, dma_buf_p[i], &dma_buf_p[i])); 2017 num_chunks[i] = 0; 2018 status = nxge_alloc_rx_buf_dma(nxgep, st_rdc, &dma_buf_p[i], 2019 rx_buf_alloc_size, 2020 nxgep->rx_default_block_size, &num_chunks[i]); 2021 if (status != NXGE_OK) { 2022 break; 2023 } 2024 st_rdc++; 2025 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2026 " nxge_alloc_rx_mem_pool DONE alloc mem: " 2027 "dma %d dma_buf_p %llx &dma_buf_p %llx", i, 2028 dma_buf_p[i], &dma_buf_p[i])); 2029 } 2030 if (i < ndmas) { 2031 goto nxge_alloc_rx_mem_fail1; 2032 } 2033 /* 2034 * Allocate memory for descriptor rings and mailbox. 2035 */ 2036 st_rdc = p_cfgp->start_rdc; 2037 for (j = 0; j < ndmas; j++) { 2038 status = nxge_alloc_rx_cntl_dma(nxgep, st_rdc, &dma_cntl_p[j], 2039 rx_cntl_alloc_size); 2040 if (status != NXGE_OK) { 2041 break; 2042 } 2043 st_rdc++; 2044 } 2045 if (j < ndmas) { 2046 goto nxge_alloc_rx_mem_fail2; 2047 } 2048 2049 dma_poolp->ndmas = ndmas; 2050 dma_poolp->num_chunks = num_chunks; 2051 dma_poolp->buf_allocated = B_TRUE; 2052 nxgep->rx_buf_pool_p = dma_poolp; 2053 dma_poolp->dma_buf_pool_p = dma_buf_p; 2054 2055 dma_cntl_poolp->ndmas = ndmas; 2056 dma_cntl_poolp->buf_allocated = B_TRUE; 2057 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2058 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2059 2060 goto nxge_alloc_rx_mem_pool_exit; 2061 2062 nxge_alloc_rx_mem_fail2: 2063 /* Free control buffers */ 2064 j--; 2065 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2066 "==> nxge_alloc_rx_mem_pool: freeing control bufs (%d)", j)); 2067 for (; j >= 0; j--) { 2068 nxge_free_rx_cntl_dma(nxgep, 2069 (p_nxge_dma_common_t)dma_cntl_p[j]); 2070 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2071 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", 2072 j)); 2073 } 2074 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2075 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 2076 2077 nxge_alloc_rx_mem_fail1: 2078 /* Free data buffers */ 2079 i--; 2080 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2081 "==> nxge_alloc_rx_mem_pool: freeing data bufs (%d)", i)); 2082 for (; i >= 0; i--) { 2083 nxge_free_rx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2084 num_chunks[i]); 2085 } 2086 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2087 "==> nxge_alloc_rx_mem_pool: data bufs freed (%d)", i)); 2088 2089 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2090 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2091 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2092 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2093 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2094 2095 nxge_alloc_rx_mem_pool_exit: 2096 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2097 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2098 2099 return (status); 2100 } 2101 2102 static void 2103 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2104 { 2105 uint32_t i, ndmas; 2106 p_nxge_dma_pool_t dma_poolp; 2107 p_nxge_dma_common_t *dma_buf_p; 2108 p_nxge_dma_pool_t dma_cntl_poolp; 2109 p_nxge_dma_common_t *dma_cntl_p; 2110 uint32_t *num_chunks; 2111 2112 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2113 2114 dma_poolp = nxgep->rx_buf_pool_p; 2115 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2116 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2117 "<== nxge_free_rx_mem_pool " 2118 "(null rx buf pool or buf not allocated")); 2119 return; 2120 } 2121 2122 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2123 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2124 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2125 "<== nxge_free_rx_mem_pool " 2126 "(null rx cntl buf pool or cntl buf not allocated")); 2127 return; 2128 } 2129 2130 dma_buf_p = dma_poolp->dma_buf_pool_p; 2131 num_chunks = dma_poolp->num_chunks; 2132 2133 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2134 ndmas = dma_cntl_poolp->ndmas; 2135 2136 for (i = 0; i < ndmas; i++) { 2137 nxge_free_rx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2138 } 2139 2140 for (i = 0; i < ndmas; i++) { 2141 nxge_free_rx_cntl_dma(nxgep, dma_cntl_p[i]); 2142 } 2143 2144 for (i = 0; i < ndmas; i++) { 2145 KMEM_FREE(dma_buf_p[i], 2146 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2147 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2148 } 2149 2150 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2151 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2152 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2153 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2154 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2155 2156 nxgep->rx_buf_pool_p = NULL; 2157 nxgep->rx_cntl_pool_p = NULL; 2158 2159 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2160 } 2161 2162 2163 static nxge_status_t 2164 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2165 p_nxge_dma_common_t *dmap, 2166 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2167 { 2168 p_nxge_dma_common_t rx_dmap; 2169 nxge_status_t status = NXGE_OK; 2170 size_t total_alloc_size; 2171 size_t allocated = 0; 2172 int i, size_index, array_size; 2173 2174 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2175 2176 rx_dmap = (p_nxge_dma_common_t) 2177 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2178 KM_SLEEP); 2179 2180 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2181 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2182 dma_channel, alloc_size, block_size, dmap)); 2183 2184 total_alloc_size = alloc_size; 2185 2186 #if defined(RX_USE_RECLAIM_POST) 2187 total_alloc_size = alloc_size + alloc_size/4; 2188 #endif 2189 2190 i = 0; 2191 size_index = 0; 2192 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2193 while ((alloc_sizes[size_index] < alloc_size) && 2194 (size_index < array_size)) 2195 size_index++; 2196 if (size_index >= array_size) { 2197 size_index = array_size - 1; 2198 } 2199 2200 while ((allocated < total_alloc_size) && 2201 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2202 rx_dmap[i].dma_chunk_index = i; 2203 rx_dmap[i].block_size = block_size; 2204 rx_dmap[i].alength = alloc_sizes[size_index]; 2205 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2206 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2207 rx_dmap[i].dma_channel = dma_channel; 2208 rx_dmap[i].contig_alloc_type = B_FALSE; 2209 2210 /* 2211 * N2/NIU: data buffers must be contiguous as the driver 2212 * needs to call Hypervisor api to set up 2213 * logical pages. 2214 */ 2215 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2216 rx_dmap[i].contig_alloc_type = B_TRUE; 2217 } 2218 2219 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2220 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2221 "i %d nblocks %d alength %d", 2222 dma_channel, i, &rx_dmap[i], block_size, 2223 i, rx_dmap[i].nblocks, 2224 rx_dmap[i].alength)); 2225 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2226 &nxge_rx_dma_attr, 2227 rx_dmap[i].alength, 2228 &nxge_dev_buf_dma_acc_attr, 2229 DDI_DMA_READ | DDI_DMA_STREAMING, 2230 (p_nxge_dma_common_t)(&rx_dmap[i])); 2231 if (status != NXGE_OK) { 2232 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2233 " nxge_alloc_rx_buf_dma: Alloc Failed ")); 2234 size_index--; 2235 } else { 2236 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2237 " alloc_rx_buf_dma allocated rdc %d " 2238 "chunk %d size %x dvma %x bufp %llx ", 2239 dma_channel, i, rx_dmap[i].alength, 2240 rx_dmap[i].ioaddr_pp, &rx_dmap[i])); 2241 i++; 2242 allocated += alloc_sizes[size_index]; 2243 } 2244 } 2245 2246 2247 if (allocated < total_alloc_size) { 2248 goto nxge_alloc_rx_mem_fail1; 2249 } 2250 2251 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2252 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2253 dma_channel, i)); 2254 *num_chunks = i; 2255 *dmap = rx_dmap; 2256 2257 goto nxge_alloc_rx_mem_exit; 2258 2259 nxge_alloc_rx_mem_fail1: 2260 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2261 2262 nxge_alloc_rx_mem_exit: 2263 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2264 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2265 2266 return (status); 2267 } 2268 2269 /*ARGSUSED*/ 2270 static void 2271 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2272 uint32_t num_chunks) 2273 { 2274 int i; 2275 2276 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2277 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2278 2279 for (i = 0; i < num_chunks; i++) { 2280 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2281 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2282 i, dmap)); 2283 nxge_dma_mem_free(dmap++); 2284 } 2285 2286 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2287 } 2288 2289 /*ARGSUSED*/ 2290 static nxge_status_t 2291 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2292 p_nxge_dma_common_t *dmap, size_t size) 2293 { 2294 p_nxge_dma_common_t rx_dmap; 2295 nxge_status_t status = NXGE_OK; 2296 2297 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2298 2299 rx_dmap = (p_nxge_dma_common_t) 2300 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2301 2302 rx_dmap->contig_alloc_type = B_FALSE; 2303 2304 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2305 &nxge_desc_dma_attr, 2306 size, 2307 &nxge_dev_desc_dma_acc_attr, 2308 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2309 rx_dmap); 2310 if (status != NXGE_OK) { 2311 goto nxge_alloc_rx_cntl_dma_fail1; 2312 } 2313 2314 *dmap = rx_dmap; 2315 goto nxge_alloc_rx_cntl_dma_exit; 2316 2317 nxge_alloc_rx_cntl_dma_fail1: 2318 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2319 2320 nxge_alloc_rx_cntl_dma_exit: 2321 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2322 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2323 2324 return (status); 2325 } 2326 2327 /*ARGSUSED*/ 2328 static void 2329 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2330 { 2331 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2332 2333 nxge_dma_mem_free(dmap); 2334 2335 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2336 } 2337 2338 static nxge_status_t 2339 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2340 { 2341 nxge_status_t status = NXGE_OK; 2342 int i, j; 2343 uint32_t ndmas, st_tdc; 2344 p_nxge_dma_pt_cfg_t p_all_cfgp; 2345 p_nxge_hw_pt_cfg_t p_cfgp; 2346 p_nxge_dma_pool_t dma_poolp; 2347 p_nxge_dma_common_t *dma_buf_p; 2348 p_nxge_dma_pool_t dma_cntl_poolp; 2349 p_nxge_dma_common_t *dma_cntl_p; 2350 size_t tx_buf_alloc_size; 2351 size_t tx_cntl_alloc_size; 2352 uint32_t *num_chunks; /* per dma */ 2353 uint32_t bcopy_thresh; 2354 2355 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2356 2357 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2358 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2359 st_tdc = p_cfgp->start_tdc; 2360 ndmas = p_cfgp->max_tdcs; 2361 2362 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool: " 2363 "p_cfgp 0x%016llx start_tdc %d ndmas %d nxgep->max_tdcs %d", 2364 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, nxgep->max_tdcs)); 2365 /* 2366 * Allocate memory for each transmit DMA channel. 2367 */ 2368 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2369 KM_SLEEP); 2370 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2371 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2372 2373 dma_cntl_poolp = (p_nxge_dma_pool_t) 2374 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2375 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2376 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2377 2378 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2379 /* 2380 * N2/NIU has limitation on the descriptor sizes (contiguous 2381 * memory allocation on data buffers to 4M (contig_mem_alloc) 2382 * and little endian for control buffers (must use the ddi/dki mem alloc 2383 * function). The transmit ring is limited to 8K (includes the 2384 * mailbox). 2385 */ 2386 if (nxgep->niu_type == N2_NIU) { 2387 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2388 (!ISP2(nxge_tx_ring_size))) { 2389 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2390 } 2391 } 2392 #endif 2393 2394 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2395 2396 /* 2397 * Assume that each DMA channel will be configured with default 2398 * transmit bufer size for copying transmit data. 2399 * (For packet payload over this limit, packets will not be 2400 * copied.) 2401 */ 2402 if (nxgep->niu_type == N2_NIU) { 2403 bcopy_thresh = TX_BCOPY_SIZE; 2404 } else { 2405 bcopy_thresh = nxge_bcopy_thresh; 2406 } 2407 tx_buf_alloc_size = (bcopy_thresh * nxge_tx_ring_size); 2408 2409 /* 2410 * Addresses of transmit descriptor ring and the 2411 * mailbox must be all cache-aligned (64 bytes). 2412 */ 2413 tx_cntl_alloc_size = nxge_tx_ring_size; 2414 tx_cntl_alloc_size *= (sizeof (tx_desc_t)); 2415 tx_cntl_alloc_size += sizeof (txdma_mailbox_t); 2416 2417 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2418 if (nxgep->niu_type == N2_NIU) { 2419 if (!ISP2(tx_buf_alloc_size)) { 2420 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2421 "==> nxge_alloc_tx_mem_pool: " 2422 " must be power of 2")); 2423 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2424 goto nxge_alloc_tx_mem_pool_exit; 2425 } 2426 2427 if (tx_buf_alloc_size > (1 << 22)) { 2428 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2429 "==> nxge_alloc_tx_mem_pool: " 2430 " limit size to 4M")); 2431 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2432 goto nxge_alloc_tx_mem_pool_exit; 2433 } 2434 2435 if (tx_cntl_alloc_size < 0x2000) { 2436 tx_cntl_alloc_size = 0x2000; 2437 } 2438 } 2439 #endif 2440 2441 num_chunks = (uint32_t *)KMEM_ZALLOC( 2442 sizeof (uint32_t) * ndmas, KM_SLEEP); 2443 2444 /* 2445 * Allocate memory for transmit buffers and descriptor rings. 2446 * Replace allocation functions with interface functions provided 2447 * by the partition manager when it is available. 2448 * 2449 * Allocate memory for the transmit buffer pool. 2450 */ 2451 for (i = 0; i < ndmas; i++) { 2452 num_chunks[i] = 0; 2453 status = nxge_alloc_tx_buf_dma(nxgep, st_tdc, &dma_buf_p[i], 2454 tx_buf_alloc_size, 2455 bcopy_thresh, &num_chunks[i]); 2456 if (status != NXGE_OK) { 2457 break; 2458 } 2459 st_tdc++; 2460 } 2461 if (i < ndmas) { 2462 goto nxge_alloc_tx_mem_pool_fail1; 2463 } 2464 2465 st_tdc = p_cfgp->start_tdc; 2466 /* 2467 * Allocate memory for descriptor rings and mailbox. 2468 */ 2469 for (j = 0; j < ndmas; j++) { 2470 status = nxge_alloc_tx_cntl_dma(nxgep, st_tdc, &dma_cntl_p[j], 2471 tx_cntl_alloc_size); 2472 if (status != NXGE_OK) { 2473 break; 2474 } 2475 st_tdc++; 2476 } 2477 if (j < ndmas) { 2478 goto nxge_alloc_tx_mem_pool_fail2; 2479 } 2480 2481 dma_poolp->ndmas = ndmas; 2482 dma_poolp->num_chunks = num_chunks; 2483 dma_poolp->buf_allocated = B_TRUE; 2484 dma_poolp->dma_buf_pool_p = dma_buf_p; 2485 nxgep->tx_buf_pool_p = dma_poolp; 2486 2487 dma_cntl_poolp->ndmas = ndmas; 2488 dma_cntl_poolp->buf_allocated = B_TRUE; 2489 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2490 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2491 2492 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2493 "==> nxge_alloc_tx_mem_pool: start_tdc %d " 2494 "ndmas %d poolp->ndmas %d", 2495 st_tdc, ndmas, dma_poolp->ndmas)); 2496 2497 goto nxge_alloc_tx_mem_pool_exit; 2498 2499 nxge_alloc_tx_mem_pool_fail2: 2500 /* Free control buffers */ 2501 j--; 2502 for (; j >= 0; j--) { 2503 nxge_free_tx_cntl_dma(nxgep, 2504 (p_nxge_dma_common_t)dma_cntl_p[j]); 2505 } 2506 2507 nxge_alloc_tx_mem_pool_fail1: 2508 /* Free data buffers */ 2509 i--; 2510 for (; i >= 0; i--) { 2511 nxge_free_tx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2512 num_chunks[i]); 2513 } 2514 2515 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2516 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2517 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2518 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2519 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2520 2521 nxge_alloc_tx_mem_pool_exit: 2522 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2523 "<== nxge_alloc_tx_mem_pool:status 0x%08x", status)); 2524 2525 return (status); 2526 } 2527 2528 static nxge_status_t 2529 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2530 p_nxge_dma_common_t *dmap, size_t alloc_size, 2531 size_t block_size, uint32_t *num_chunks) 2532 { 2533 p_nxge_dma_common_t tx_dmap; 2534 nxge_status_t status = NXGE_OK; 2535 size_t total_alloc_size; 2536 size_t allocated = 0; 2537 int i, size_index, array_size; 2538 2539 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 2540 2541 tx_dmap = (p_nxge_dma_common_t) 2542 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2543 KM_SLEEP); 2544 2545 total_alloc_size = alloc_size; 2546 i = 0; 2547 size_index = 0; 2548 array_size = sizeof (alloc_sizes) / sizeof (size_t); 2549 while ((alloc_sizes[size_index] < alloc_size) && 2550 (size_index < array_size)) 2551 size_index++; 2552 if (size_index >= array_size) { 2553 size_index = array_size - 1; 2554 } 2555 2556 while ((allocated < total_alloc_size) && 2557 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2558 2559 tx_dmap[i].dma_chunk_index = i; 2560 tx_dmap[i].block_size = block_size; 2561 tx_dmap[i].alength = alloc_sizes[size_index]; 2562 tx_dmap[i].orig_alength = tx_dmap[i].alength; 2563 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2564 tx_dmap[i].dma_channel = dma_channel; 2565 tx_dmap[i].contig_alloc_type = B_FALSE; 2566 2567 /* 2568 * N2/NIU: data buffers must be contiguous as the driver 2569 * needs to call Hypervisor api to set up 2570 * logical pages. 2571 */ 2572 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2573 tx_dmap[i].contig_alloc_type = B_TRUE; 2574 } 2575 2576 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2577 &nxge_tx_dma_attr, 2578 tx_dmap[i].alength, 2579 &nxge_dev_buf_dma_acc_attr, 2580 DDI_DMA_WRITE | DDI_DMA_STREAMING, 2581 (p_nxge_dma_common_t)(&tx_dmap[i])); 2582 if (status != NXGE_OK) { 2583 size_index--; 2584 } else { 2585 i++; 2586 allocated += alloc_sizes[size_index]; 2587 } 2588 } 2589 2590 if (allocated < total_alloc_size) { 2591 goto nxge_alloc_tx_mem_fail1; 2592 } 2593 2594 *num_chunks = i; 2595 *dmap = tx_dmap; 2596 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2597 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 2598 *dmap, i)); 2599 goto nxge_alloc_tx_mem_exit; 2600 2601 nxge_alloc_tx_mem_fail1: 2602 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2603 2604 nxge_alloc_tx_mem_exit: 2605 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2606 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 2607 2608 return (status); 2609 } 2610 2611 /*ARGSUSED*/ 2612 static void 2613 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2614 uint32_t num_chunks) 2615 { 2616 int i; 2617 2618 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 2619 2620 for (i = 0; i < num_chunks; i++) { 2621 nxge_dma_mem_free(dmap++); 2622 } 2623 2624 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 2625 } 2626 2627 /*ARGSUSED*/ 2628 static nxge_status_t 2629 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2630 p_nxge_dma_common_t *dmap, size_t size) 2631 { 2632 p_nxge_dma_common_t tx_dmap; 2633 nxge_status_t status = NXGE_OK; 2634 2635 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 2636 tx_dmap = (p_nxge_dma_common_t) 2637 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2638 2639 tx_dmap->contig_alloc_type = B_FALSE; 2640 2641 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2642 &nxge_desc_dma_attr, 2643 size, 2644 &nxge_dev_desc_dma_acc_attr, 2645 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2646 tx_dmap); 2647 if (status != NXGE_OK) { 2648 goto nxge_alloc_tx_cntl_dma_fail1; 2649 } 2650 2651 *dmap = tx_dmap; 2652 goto nxge_alloc_tx_cntl_dma_exit; 2653 2654 nxge_alloc_tx_cntl_dma_fail1: 2655 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 2656 2657 nxge_alloc_tx_cntl_dma_exit: 2658 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2659 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 2660 2661 return (status); 2662 } 2663 2664 /*ARGSUSED*/ 2665 static void 2666 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2667 { 2668 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 2669 2670 nxge_dma_mem_free(dmap); 2671 2672 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 2673 } 2674 2675 static void 2676 nxge_free_tx_mem_pool(p_nxge_t nxgep) 2677 { 2678 uint32_t i, ndmas; 2679 p_nxge_dma_pool_t dma_poolp; 2680 p_nxge_dma_common_t *dma_buf_p; 2681 p_nxge_dma_pool_t dma_cntl_poolp; 2682 p_nxge_dma_common_t *dma_cntl_p; 2683 uint32_t *num_chunks; 2684 2685 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_free_tx_mem_pool")); 2686 2687 dma_poolp = nxgep->tx_buf_pool_p; 2688 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2689 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2690 "<== nxge_free_tx_mem_pool " 2691 "(null rx buf pool or buf not allocated")); 2692 return; 2693 } 2694 2695 dma_cntl_poolp = nxgep->tx_cntl_pool_p; 2696 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2697 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2698 "<== nxge_free_tx_mem_pool " 2699 "(null tx cntl buf pool or cntl buf not allocated")); 2700 return; 2701 } 2702 2703 dma_buf_p = dma_poolp->dma_buf_pool_p; 2704 num_chunks = dma_poolp->num_chunks; 2705 2706 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2707 ndmas = dma_cntl_poolp->ndmas; 2708 2709 for (i = 0; i < ndmas; i++) { 2710 nxge_free_tx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2711 } 2712 2713 for (i = 0; i < ndmas; i++) { 2714 nxge_free_tx_cntl_dma(nxgep, dma_cntl_p[i]); 2715 } 2716 2717 for (i = 0; i < ndmas; i++) { 2718 KMEM_FREE(dma_buf_p[i], 2719 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2720 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2721 } 2722 2723 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2724 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2725 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2726 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2727 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2728 2729 nxgep->tx_buf_pool_p = NULL; 2730 nxgep->tx_cntl_pool_p = NULL; 2731 2732 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_free_tx_mem_pool")); 2733 } 2734 2735 /*ARGSUSED*/ 2736 static nxge_status_t 2737 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 2738 struct ddi_dma_attr *dma_attrp, 2739 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 2740 p_nxge_dma_common_t dma_p) 2741 { 2742 caddr_t kaddrp; 2743 int ddi_status = DDI_SUCCESS; 2744 boolean_t contig_alloc_type; 2745 2746 contig_alloc_type = dma_p->contig_alloc_type; 2747 2748 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 2749 /* 2750 * contig_alloc_type for contiguous memory only allowed 2751 * for N2/NIU. 2752 */ 2753 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2754 "nxge_dma_mem_alloc: alloc type not allows (%d)", 2755 dma_p->contig_alloc_type)); 2756 return (NXGE_ERROR | NXGE_DDI_FAILED); 2757 } 2758 2759 dma_p->dma_handle = NULL; 2760 dma_p->acc_handle = NULL; 2761 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 2762 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 2763 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 2764 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 2765 if (ddi_status != DDI_SUCCESS) { 2766 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2767 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 2768 return (NXGE_ERROR | NXGE_DDI_FAILED); 2769 } 2770 2771 switch (contig_alloc_type) { 2772 case B_FALSE: 2773 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, 2774 acc_attr_p, 2775 xfer_flags, 2776 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 2777 &dma_p->acc_handle); 2778 if (ddi_status != DDI_SUCCESS) { 2779 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2780 "nxge_dma_mem_alloc:ddi_dma_mem_alloc failed")); 2781 ddi_dma_free_handle(&dma_p->dma_handle); 2782 dma_p->dma_handle = NULL; 2783 return (NXGE_ERROR | NXGE_DDI_FAILED); 2784 } 2785 if (dma_p->alength < length) { 2786 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2787 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 2788 "< length.")); 2789 ddi_dma_mem_free(&dma_p->acc_handle); 2790 ddi_dma_free_handle(&dma_p->dma_handle); 2791 dma_p->acc_handle = NULL; 2792 dma_p->dma_handle = NULL; 2793 return (NXGE_ERROR); 2794 } 2795 2796 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2797 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2798 &dma_p->dma_cookie, &dma_p->ncookies); 2799 if (ddi_status != DDI_DMA_MAPPED) { 2800 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2801 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2802 "(staus 0x%x ncookies %d.)", ddi_status, 2803 dma_p->ncookies)); 2804 if (dma_p->acc_handle) { 2805 ddi_dma_mem_free(&dma_p->acc_handle); 2806 dma_p->acc_handle = NULL; 2807 } 2808 ddi_dma_free_handle(&dma_p->dma_handle); 2809 dma_p->dma_handle = NULL; 2810 return (NXGE_ERROR | NXGE_DDI_FAILED); 2811 } 2812 2813 if (dma_p->ncookies != 1) { 2814 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2815 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 2816 "> 1 cookie" 2817 "(staus 0x%x ncookies %d.)", ddi_status, 2818 dma_p->ncookies)); 2819 if (dma_p->acc_handle) { 2820 ddi_dma_mem_free(&dma_p->acc_handle); 2821 dma_p->acc_handle = NULL; 2822 } 2823 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2824 ddi_dma_free_handle(&dma_p->dma_handle); 2825 dma_p->dma_handle = NULL; 2826 return (NXGE_ERROR); 2827 } 2828 break; 2829 2830 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2831 case B_TRUE: 2832 kaddrp = (caddr_t)contig_mem_alloc(length); 2833 if (kaddrp == NULL) { 2834 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2835 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 2836 ddi_dma_free_handle(&dma_p->dma_handle); 2837 return (NXGE_ERROR | NXGE_DDI_FAILED); 2838 } 2839 2840 dma_p->alength = length; 2841 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2842 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2843 &dma_p->dma_cookie, &dma_p->ncookies); 2844 if (ddi_status != DDI_DMA_MAPPED) { 2845 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2846 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2847 "(status 0x%x ncookies %d.)", ddi_status, 2848 dma_p->ncookies)); 2849 2850 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2851 "==> nxge_dma_mem_alloc: (not mapped)" 2852 "length %lu (0x%x) " 2853 "free contig kaddrp $%p " 2854 "va_to_pa $%p", 2855 length, length, 2856 kaddrp, 2857 va_to_pa(kaddrp))); 2858 2859 2860 contig_mem_free((void *)kaddrp, length); 2861 ddi_dma_free_handle(&dma_p->dma_handle); 2862 2863 dma_p->dma_handle = NULL; 2864 dma_p->acc_handle = NULL; 2865 dma_p->alength = NULL; 2866 dma_p->kaddrp = NULL; 2867 2868 return (NXGE_ERROR | NXGE_DDI_FAILED); 2869 } 2870 2871 if (dma_p->ncookies != 1 || 2872 (dma_p->dma_cookie.dmac_laddress == NULL)) { 2873 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2874 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 2875 "cookie or " 2876 "dmac_laddress is NULL $%p size %d " 2877 " (status 0x%x ncookies %d.)", 2878 ddi_status, 2879 dma_p->dma_cookie.dmac_laddress, 2880 dma_p->dma_cookie.dmac_size, 2881 dma_p->ncookies)); 2882 2883 contig_mem_free((void *)kaddrp, length); 2884 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2885 ddi_dma_free_handle(&dma_p->dma_handle); 2886 2887 dma_p->alength = 0; 2888 dma_p->dma_handle = NULL; 2889 dma_p->acc_handle = NULL; 2890 dma_p->kaddrp = NULL; 2891 2892 return (NXGE_ERROR | NXGE_DDI_FAILED); 2893 } 2894 break; 2895 2896 #else 2897 case B_TRUE: 2898 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2899 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 2900 return (NXGE_ERROR | NXGE_DDI_FAILED); 2901 #endif 2902 } 2903 2904 dma_p->kaddrp = kaddrp; 2905 dma_p->last_kaddrp = (unsigned char *)kaddrp + 2906 dma_p->alength - RXBUF_64B_ALIGNED; 2907 #if defined(__i386) 2908 dma_p->ioaddr_pp = 2909 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 2910 #else 2911 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2912 #endif 2913 dma_p->last_ioaddr_pp = 2914 #if defined(__i386) 2915 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 2916 #else 2917 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 2918 #endif 2919 dma_p->alength - RXBUF_64B_ALIGNED; 2920 2921 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 2922 2923 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2924 dma_p->orig_ioaddr_pp = 2925 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2926 dma_p->orig_alength = length; 2927 dma_p->orig_kaddrp = kaddrp; 2928 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 2929 #endif 2930 2931 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 2932 "dma buffer allocated: dma_p $%p " 2933 "return dmac_ladress from cookie $%p cookie dmac_size %d " 2934 "dma_p->ioaddr_p $%p " 2935 "dma_p->orig_ioaddr_p $%p " 2936 "orig_vatopa $%p " 2937 "alength %d (0x%x) " 2938 "kaddrp $%p " 2939 "length %d (0x%x)", 2940 dma_p, 2941 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 2942 dma_p->ioaddr_pp, 2943 dma_p->orig_ioaddr_pp, 2944 dma_p->orig_vatopa, 2945 dma_p->alength, dma_p->alength, 2946 kaddrp, 2947 length, length)); 2948 2949 return (NXGE_OK); 2950 } 2951 2952 static void 2953 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 2954 { 2955 if (dma_p->dma_handle != NULL) { 2956 if (dma_p->ncookies) { 2957 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2958 dma_p->ncookies = 0; 2959 } 2960 ddi_dma_free_handle(&dma_p->dma_handle); 2961 dma_p->dma_handle = NULL; 2962 } 2963 2964 if (dma_p->acc_handle != NULL) { 2965 ddi_dma_mem_free(&dma_p->acc_handle); 2966 dma_p->acc_handle = NULL; 2967 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 2968 } 2969 2970 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2971 if (dma_p->contig_alloc_type && 2972 dma_p->orig_kaddrp && dma_p->orig_alength) { 2973 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 2974 "kaddrp $%p (orig_kaddrp $%p)" 2975 "mem type %d ", 2976 "orig_alength %d " 2977 "alength 0x%x (%d)", 2978 dma_p->kaddrp, 2979 dma_p->orig_kaddrp, 2980 dma_p->contig_alloc_type, 2981 dma_p->orig_alength, 2982 dma_p->alength, dma_p->alength)); 2983 2984 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 2985 dma_p->orig_alength = NULL; 2986 dma_p->orig_kaddrp = NULL; 2987 dma_p->contig_alloc_type = B_FALSE; 2988 } 2989 #endif 2990 dma_p->kaddrp = NULL; 2991 dma_p->alength = NULL; 2992 } 2993 2994 /* 2995 * nxge_m_start() -- start transmitting and receiving. 2996 * 2997 * This function is called by the MAC layer when the first 2998 * stream is open to prepare the hardware ready for sending 2999 * and transmitting packets. 3000 */ 3001 static int 3002 nxge_m_start(void *arg) 3003 { 3004 p_nxge_t nxgep = (p_nxge_t)arg; 3005 3006 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3007 3008 MUTEX_ENTER(nxgep->genlock); 3009 if (nxge_init(nxgep) != NXGE_OK) { 3010 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3011 "<== nxge_m_start: initialization failed")); 3012 MUTEX_EXIT(nxgep->genlock); 3013 return (EIO); 3014 } 3015 3016 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 3017 goto nxge_m_start_exit; 3018 /* 3019 * Start timer to check the system error and tx hangs 3020 */ 3021 nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state, 3022 NXGE_CHECK_TIMER); 3023 3024 nxgep->link_notify = B_TRUE; 3025 3026 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3027 3028 nxge_m_start_exit: 3029 MUTEX_EXIT(nxgep->genlock); 3030 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3031 return (0); 3032 } 3033 3034 /* 3035 * nxge_m_stop(): stop transmitting and receiving. 3036 */ 3037 static void 3038 nxge_m_stop(void *arg) 3039 { 3040 p_nxge_t nxgep = (p_nxge_t)arg; 3041 3042 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3043 3044 if (nxgep->nxge_timerid) { 3045 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3046 nxgep->nxge_timerid = 0; 3047 } 3048 3049 MUTEX_ENTER(nxgep->genlock); 3050 nxge_uninit(nxgep); 3051 3052 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3053 3054 MUTEX_EXIT(nxgep->genlock); 3055 3056 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3057 } 3058 3059 static int 3060 nxge_m_unicst(void *arg, const uint8_t *macaddr) 3061 { 3062 p_nxge_t nxgep = (p_nxge_t)arg; 3063 struct ether_addr addrp; 3064 3065 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 3066 3067 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 3068 if (nxge_set_mac_addr(nxgep, &addrp)) { 3069 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3070 "<== nxge_m_unicst: set unitcast failed")); 3071 return (EINVAL); 3072 } 3073 3074 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 3075 3076 return (0); 3077 } 3078 3079 static int 3080 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3081 { 3082 p_nxge_t nxgep = (p_nxge_t)arg; 3083 struct ether_addr addrp; 3084 3085 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3086 "==> nxge_m_multicst: add %d", add)); 3087 3088 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3089 if (add) { 3090 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3091 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3092 "<== nxge_m_multicst: add multicast failed")); 3093 return (EINVAL); 3094 } 3095 } else { 3096 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3098 "<== nxge_m_multicst: del multicast failed")); 3099 return (EINVAL); 3100 } 3101 } 3102 3103 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3104 3105 return (0); 3106 } 3107 3108 static int 3109 nxge_m_promisc(void *arg, boolean_t on) 3110 { 3111 p_nxge_t nxgep = (p_nxge_t)arg; 3112 3113 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3114 "==> nxge_m_promisc: on %d", on)); 3115 3116 if (nxge_set_promisc(nxgep, on)) { 3117 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3118 "<== nxge_m_promisc: set promisc failed")); 3119 return (EINVAL); 3120 } 3121 3122 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3123 "<== nxge_m_promisc: on %d", on)); 3124 3125 return (0); 3126 } 3127 3128 static void 3129 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3130 { 3131 p_nxge_t nxgep = (p_nxge_t)arg; 3132 struct iocblk *iocp; 3133 boolean_t need_privilege; 3134 int err; 3135 int cmd; 3136 3137 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3138 3139 iocp = (struct iocblk *)mp->b_rptr; 3140 iocp->ioc_error = 0; 3141 need_privilege = B_TRUE; 3142 cmd = iocp->ioc_cmd; 3143 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3144 switch (cmd) { 3145 default: 3146 miocnak(wq, mp, 0, EINVAL); 3147 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3148 return; 3149 3150 case LB_GET_INFO_SIZE: 3151 case LB_GET_INFO: 3152 case LB_GET_MODE: 3153 need_privilege = B_FALSE; 3154 break; 3155 case LB_SET_MODE: 3156 break; 3157 3158 case ND_GET: 3159 need_privilege = B_FALSE; 3160 break; 3161 case ND_SET: 3162 break; 3163 3164 case NXGE_GET_MII: 3165 case NXGE_PUT_MII: 3166 case NXGE_GET64: 3167 case NXGE_PUT64: 3168 case NXGE_GET_TX_RING_SZ: 3169 case NXGE_GET_TX_DESC: 3170 case NXGE_TX_SIDE_RESET: 3171 case NXGE_RX_SIDE_RESET: 3172 case NXGE_GLOBAL_RESET: 3173 case NXGE_RESET_MAC: 3174 case NXGE_TX_REGS_DUMP: 3175 case NXGE_RX_REGS_DUMP: 3176 case NXGE_INT_REGS_DUMP: 3177 case NXGE_VIR_INT_REGS_DUMP: 3178 case NXGE_PUT_TCAM: 3179 case NXGE_GET_TCAM: 3180 case NXGE_RTRACE: 3181 case NXGE_RDUMP: 3182 3183 need_privilege = B_FALSE; 3184 break; 3185 case NXGE_INJECT_ERR: 3186 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3187 nxge_err_inject(nxgep, wq, mp); 3188 break; 3189 } 3190 3191 if (need_privilege) { 3192 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3193 if (err != 0) { 3194 miocnak(wq, mp, 0, err); 3195 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3196 "<== nxge_m_ioctl: no priv")); 3197 return; 3198 } 3199 } 3200 3201 switch (cmd) { 3202 case ND_GET: 3203 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command")); 3204 case ND_SET: 3205 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command")); 3206 nxge_param_ioctl(nxgep, wq, mp, iocp); 3207 break; 3208 3209 case LB_GET_MODE: 3210 case LB_SET_MODE: 3211 case LB_GET_INFO_SIZE: 3212 case LB_GET_INFO: 3213 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3214 break; 3215 3216 case NXGE_GET_MII: 3217 case NXGE_PUT_MII: 3218 case NXGE_PUT_TCAM: 3219 case NXGE_GET_TCAM: 3220 case NXGE_GET64: 3221 case NXGE_PUT64: 3222 case NXGE_GET_TX_RING_SZ: 3223 case NXGE_GET_TX_DESC: 3224 case NXGE_TX_SIDE_RESET: 3225 case NXGE_RX_SIDE_RESET: 3226 case NXGE_GLOBAL_RESET: 3227 case NXGE_RESET_MAC: 3228 case NXGE_TX_REGS_DUMP: 3229 case NXGE_RX_REGS_DUMP: 3230 case NXGE_INT_REGS_DUMP: 3231 case NXGE_VIR_INT_REGS_DUMP: 3232 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3233 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3234 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3235 break; 3236 } 3237 3238 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3239 } 3240 3241 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3242 3243 static void 3244 nxge_m_resources(void *arg) 3245 { 3246 p_nxge_t nxgep = arg; 3247 mac_rx_fifo_t mrf; 3248 p_rx_rcr_rings_t rcr_rings; 3249 p_rx_rcr_ring_t *rcr_p; 3250 uint32_t i, ndmas; 3251 nxge_status_t status; 3252 3253 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3254 3255 MUTEX_ENTER(nxgep->genlock); 3256 3257 /* 3258 * CR 6492541 Check to see if the drv_state has been initialized, 3259 * if not * call nxge_init(). 3260 */ 3261 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3262 status = nxge_init(nxgep); 3263 if (status != NXGE_OK) 3264 goto nxge_m_resources_exit; 3265 } 3266 3267 mrf.mrf_type = MAC_RX_FIFO; 3268 mrf.mrf_blank = nxge_rx_hw_blank; 3269 mrf.mrf_arg = (void *)nxgep; 3270 3271 mrf.mrf_normal_blank_time = 128; 3272 mrf.mrf_normal_pkt_count = 8; 3273 rcr_rings = nxgep->rx_rcr_rings; 3274 rcr_p = rcr_rings->rcr_rings; 3275 ndmas = rcr_rings->ndmas; 3276 3277 /* 3278 * Export our receive resources to the MAC layer. 3279 */ 3280 for (i = 0; i < ndmas; i++) { 3281 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle = 3282 mac_resource_add(nxgep->mach, 3283 (mac_resource_t *)&mrf); 3284 3285 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3286 "==> nxge_m_resources: vdma %d dma %d " 3287 "rcrptr 0x%016llx mac_handle 0x%016llx", 3288 i, ((p_rx_rcr_ring_t)rcr_p[i])->rdc, 3289 rcr_p[i], 3290 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle)); 3291 } 3292 3293 nxge_m_resources_exit: 3294 MUTEX_EXIT(nxgep->genlock); 3295 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 3296 } 3297 3298 static void 3299 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 3300 { 3301 p_nxge_mmac_stats_t mmac_stats; 3302 int i; 3303 nxge_mmac_t *mmac_info; 3304 3305 mmac_info = &nxgep->nxge_mmac_info; 3306 3307 mmac_stats = &nxgep->statsp->mmac_stats; 3308 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3309 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3310 3311 for (i = 0; i < ETHERADDRL; i++) { 3312 if (factory) { 3313 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3314 = mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i]; 3315 } else { 3316 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3317 = mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i]; 3318 } 3319 } 3320 } 3321 3322 /* 3323 * nxge_altmac_set() -- Set an alternate MAC address 3324 */ 3325 static int 3326 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 3327 { 3328 uint8_t addrn; 3329 uint8_t portn; 3330 npi_mac_addr_t altmac; 3331 hostinfo_t mac_rdc; 3332 p_nxge_class_pt_cfg_t clscfgp; 3333 3334 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3335 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3336 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3337 3338 portn = nxgep->mac.portnum; 3339 addrn = (uint8_t)slot - 1; 3340 3341 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 3342 addrn, &altmac) != NPI_SUCCESS) 3343 return (EIO); 3344 3345 /* 3346 * Set the rdc table number for the host info entry 3347 * for this mac address slot. 3348 */ 3349 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 3350 mac_rdc.value = 0; 3351 mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl; 3352 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 3353 3354 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 3355 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 3356 return (EIO); 3357 } 3358 3359 /* 3360 * Enable comparison with the alternate MAC address. 3361 * While the first alternate addr is enabled by bit 1 of register 3362 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 3363 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 3364 * accordingly before calling npi_mac_altaddr_entry. 3365 */ 3366 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3367 addrn = (uint8_t)slot - 1; 3368 else 3369 addrn = (uint8_t)slot; 3370 3371 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 3372 != NPI_SUCCESS) 3373 return (EIO); 3374 3375 return (0); 3376 } 3377 3378 /* 3379 * nxeg_m_mmac_add() - find an unused address slot, set the address 3380 * value to the one specified, enable the port to start filtering on 3381 * the new MAC address. Returns 0 on success. 3382 */ 3383 static int 3384 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 3385 { 3386 p_nxge_t nxgep = arg; 3387 mac_addr_slot_t slot; 3388 nxge_mmac_t *mmac_info; 3389 int err; 3390 nxge_status_t status; 3391 3392 mutex_enter(nxgep->genlock); 3393 3394 /* 3395 * Make sure that nxge is initialized, if _start() has 3396 * not been called. 3397 */ 3398 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3399 status = nxge_init(nxgep); 3400 if (status != NXGE_OK) { 3401 mutex_exit(nxgep->genlock); 3402 return (ENXIO); 3403 } 3404 } 3405 3406 mmac_info = &nxgep->nxge_mmac_info; 3407 if (mmac_info->naddrfree == 0) { 3408 mutex_exit(nxgep->genlock); 3409 return (ENOSPC); 3410 } 3411 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3412 maddr->mma_addrlen)) { 3413 mutex_exit(nxgep->genlock); 3414 return (EINVAL); 3415 } 3416 /* 3417 * Search for the first available slot. Because naddrfree 3418 * is not zero, we are guaranteed to find one. 3419 * Slot 0 is for unique (primary) MAC. The first alternate 3420 * MAC slot is slot 1. 3421 * Each of the first two ports of Neptune has 16 alternate 3422 * MAC slots but only the first 7 (or 15) slots have assigned factory 3423 * MAC addresses. We first search among the slots without bundled 3424 * factory MACs. If we fail to find one in that range, then we 3425 * search the slots with bundled factory MACs. A factory MAC 3426 * will be wasted while the slot is used with a user MAC address. 3427 * But the slot could be used by factory MAC again after calling 3428 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 3429 */ 3430 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 3431 for (slot = mmac_info->num_factory_mmac + 1; 3432 slot <= mmac_info->num_mmac; slot++) { 3433 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3434 break; 3435 } 3436 if (slot > mmac_info->num_mmac) { 3437 for (slot = 1; slot <= mmac_info->num_factory_mmac; 3438 slot++) { 3439 if (!(mmac_info->mac_pool[slot].flags 3440 & MMAC_SLOT_USED)) 3441 break; 3442 } 3443 } 3444 } else { 3445 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 3446 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3447 break; 3448 } 3449 } 3450 ASSERT(slot <= mmac_info->num_mmac); 3451 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 3452 mutex_exit(nxgep->genlock); 3453 return (err); 3454 } 3455 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 3456 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 3457 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3458 mmac_info->naddrfree--; 3459 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3460 3461 maddr->mma_slot = slot; 3462 3463 mutex_exit(nxgep->genlock); 3464 return (0); 3465 } 3466 3467 /* 3468 * This function reserves an unused slot and programs the slot and the HW 3469 * with a factory mac address. 3470 */ 3471 static int 3472 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 3473 { 3474 p_nxge_t nxgep = arg; 3475 mac_addr_slot_t slot; 3476 nxge_mmac_t *mmac_info; 3477 int err; 3478 nxge_status_t status; 3479 3480 mutex_enter(nxgep->genlock); 3481 3482 /* 3483 * Make sure that nxge is initialized, if _start() has 3484 * not been called. 3485 */ 3486 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3487 status = nxge_init(nxgep); 3488 if (status != NXGE_OK) { 3489 mutex_exit(nxgep->genlock); 3490 return (ENXIO); 3491 } 3492 } 3493 3494 mmac_info = &nxgep->nxge_mmac_info; 3495 if (mmac_info->naddrfree == 0) { 3496 mutex_exit(nxgep->genlock); 3497 return (ENOSPC); 3498 } 3499 3500 slot = maddr->mma_slot; 3501 if (slot == -1) { /* -1: Take the first available slot */ 3502 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 3503 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3504 break; 3505 } 3506 if (slot > mmac_info->num_factory_mmac) { 3507 mutex_exit(nxgep->genlock); 3508 return (ENOSPC); 3509 } 3510 } 3511 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 3512 /* 3513 * Do not support factory MAC at a slot greater than 3514 * num_factory_mmac even when there are available factory 3515 * MAC addresses because the alternate MACs are bundled with 3516 * slot[1] through slot[num_factory_mmac] 3517 */ 3518 mutex_exit(nxgep->genlock); 3519 return (EINVAL); 3520 } 3521 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3522 mutex_exit(nxgep->genlock); 3523 return (EBUSY); 3524 } 3525 /* Verify the address to be reserved */ 3526 if (!mac_unicst_verify(nxgep->mach, 3527 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 3528 mutex_exit(nxgep->genlock); 3529 return (EINVAL); 3530 } 3531 if (err = nxge_altmac_set(nxgep, 3532 mmac_info->factory_mac_pool[slot], slot)) { 3533 mutex_exit(nxgep->genlock); 3534 return (err); 3535 } 3536 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 3537 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3538 mmac_info->naddrfree--; 3539 3540 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 3541 mutex_exit(nxgep->genlock); 3542 3543 /* Pass info back to the caller */ 3544 maddr->mma_slot = slot; 3545 maddr->mma_addrlen = ETHERADDRL; 3546 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3547 3548 return (0); 3549 } 3550 3551 /* 3552 * Remove the specified mac address and update the HW not to filter 3553 * the mac address anymore. 3554 */ 3555 static int 3556 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 3557 { 3558 p_nxge_t nxgep = arg; 3559 nxge_mmac_t *mmac_info; 3560 uint8_t addrn; 3561 uint8_t portn; 3562 int err = 0; 3563 nxge_status_t status; 3564 3565 mutex_enter(nxgep->genlock); 3566 3567 /* 3568 * Make sure that nxge is initialized, if _start() has 3569 * not been called. 3570 */ 3571 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3572 status = nxge_init(nxgep); 3573 if (status != NXGE_OK) { 3574 mutex_exit(nxgep->genlock); 3575 return (ENXIO); 3576 } 3577 } 3578 3579 mmac_info = &nxgep->nxge_mmac_info; 3580 if (slot < 1 || slot > mmac_info->num_mmac) { 3581 mutex_exit(nxgep->genlock); 3582 return (EINVAL); 3583 } 3584 3585 portn = nxgep->mac.portnum; 3586 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3587 addrn = (uint8_t)slot - 1; 3588 else 3589 addrn = (uint8_t)slot; 3590 3591 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3592 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 3593 == NPI_SUCCESS) { 3594 mmac_info->naddrfree++; 3595 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 3596 /* 3597 * Regardless if the MAC we just stopped filtering 3598 * is a user addr or a facory addr, we must set 3599 * the MMAC_VENDOR_ADDR flag if this slot has an 3600 * associated factory MAC to indicate that a factory 3601 * MAC is available. 3602 */ 3603 if (slot <= mmac_info->num_factory_mmac) { 3604 mmac_info->mac_pool[slot].flags 3605 |= MMAC_VENDOR_ADDR; 3606 } 3607 /* 3608 * Clear mac_pool[slot].addr so that kstat shows 0 3609 * alternate MAC address if the slot is not used. 3610 * (But nxge_m_mmac_get returns the factory MAC even 3611 * when the slot is not used!) 3612 */ 3613 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 3614 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3615 } else { 3616 err = EIO; 3617 } 3618 } else { 3619 err = EINVAL; 3620 } 3621 3622 mutex_exit(nxgep->genlock); 3623 return (err); 3624 } 3625 3626 3627 /* 3628 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 3629 */ 3630 static int 3631 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 3632 { 3633 p_nxge_t nxgep = arg; 3634 mac_addr_slot_t slot; 3635 nxge_mmac_t *mmac_info; 3636 int err = 0; 3637 nxge_status_t status; 3638 3639 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3640 maddr->mma_addrlen)) 3641 return (EINVAL); 3642 3643 slot = maddr->mma_slot; 3644 3645 mutex_enter(nxgep->genlock); 3646 3647 /* 3648 * Make sure that nxge is initialized, if _start() has 3649 * not been called. 3650 */ 3651 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3652 status = nxge_init(nxgep); 3653 if (status != NXGE_OK) { 3654 mutex_exit(nxgep->genlock); 3655 return (ENXIO); 3656 } 3657 } 3658 3659 mmac_info = &nxgep->nxge_mmac_info; 3660 if (slot < 1 || slot > mmac_info->num_mmac) { 3661 mutex_exit(nxgep->genlock); 3662 return (EINVAL); 3663 } 3664 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3665 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 3666 != 0) { 3667 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 3668 ETHERADDRL); 3669 /* 3670 * Assume that the MAC passed down from the caller 3671 * is not a factory MAC address (The user should 3672 * call mmac_remove followed by mmac_reserve if 3673 * he wants to use the factory MAC for this slot). 3674 */ 3675 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3676 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3677 } 3678 } else { 3679 err = EINVAL; 3680 } 3681 mutex_exit(nxgep->genlock); 3682 return (err); 3683 } 3684 3685 /* 3686 * nxge_m_mmac_get() - Get the MAC address and other information 3687 * related to the slot. mma_flags should be set to 0 in the call. 3688 * Note: although kstat shows MAC address as zero when a slot is 3689 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 3690 * to the caller as long as the slot is not using a user MAC address. 3691 * The following table shows the rules, 3692 * 3693 * USED VENDOR mma_addr 3694 * ------------------------------------------------------------ 3695 * (1) Slot uses a user MAC: yes no user MAC 3696 * (2) Slot uses a factory MAC: yes yes factory MAC 3697 * (3) Slot is not used but is 3698 * factory MAC capable: no yes factory MAC 3699 * (4) Slot is not used and is 3700 * not factory MAC capable: no no 0 3701 * ------------------------------------------------------------ 3702 */ 3703 static int 3704 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 3705 { 3706 nxge_t *nxgep = arg; 3707 mac_addr_slot_t slot; 3708 nxge_mmac_t *mmac_info; 3709 nxge_status_t status; 3710 3711 slot = maddr->mma_slot; 3712 3713 mutex_enter(nxgep->genlock); 3714 3715 /* 3716 * Make sure that nxge is initialized, if _start() has 3717 * not been called. 3718 */ 3719 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3720 status = nxge_init(nxgep); 3721 if (status != NXGE_OK) { 3722 mutex_exit(nxgep->genlock); 3723 return (ENXIO); 3724 } 3725 } 3726 3727 mmac_info = &nxgep->nxge_mmac_info; 3728 3729 if (slot < 1 || slot > mmac_info->num_mmac) { 3730 mutex_exit(nxgep->genlock); 3731 return (EINVAL); 3732 } 3733 maddr->mma_flags = 0; 3734 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 3735 maddr->mma_flags |= MMAC_SLOT_USED; 3736 3737 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 3738 maddr->mma_flags |= MMAC_VENDOR_ADDR; 3739 bcopy(mmac_info->factory_mac_pool[slot], 3740 maddr->mma_addr, ETHERADDRL); 3741 maddr->mma_addrlen = ETHERADDRL; 3742 } else { 3743 if (maddr->mma_flags & MMAC_SLOT_USED) { 3744 bcopy(mmac_info->mac_pool[slot].addr, 3745 maddr->mma_addr, ETHERADDRL); 3746 maddr->mma_addrlen = ETHERADDRL; 3747 } else { 3748 bzero(maddr->mma_addr, ETHERADDRL); 3749 maddr->mma_addrlen = 0; 3750 } 3751 } 3752 mutex_exit(nxgep->genlock); 3753 return (0); 3754 } 3755 3756 3757 static boolean_t 3758 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 3759 { 3760 nxge_t *nxgep = arg; 3761 uint32_t *txflags = cap_data; 3762 multiaddress_capab_t *mmacp = cap_data; 3763 3764 switch (cap) { 3765 case MAC_CAPAB_HCKSUM: 3766 *txflags = HCKSUM_INET_PARTIAL; 3767 break; 3768 case MAC_CAPAB_POLL: 3769 /* 3770 * There's nothing for us to fill in, simply returning 3771 * B_TRUE stating that we support polling is sufficient. 3772 */ 3773 break; 3774 3775 case MAC_CAPAB_MULTIADDRESS: 3776 mutex_enter(nxgep->genlock); 3777 3778 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 3779 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 3780 mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */ 3781 /* 3782 * maddr_handle is driver's private data, passed back to 3783 * entry point functions as arg. 3784 */ 3785 mmacp->maddr_handle = nxgep; 3786 mmacp->maddr_add = nxge_m_mmac_add; 3787 mmacp->maddr_remove = nxge_m_mmac_remove; 3788 mmacp->maddr_modify = nxge_m_mmac_modify; 3789 mmacp->maddr_get = nxge_m_mmac_get; 3790 mmacp->maddr_reserve = nxge_m_mmac_reserve; 3791 3792 mutex_exit(nxgep->genlock); 3793 break; 3794 default: 3795 return (B_FALSE); 3796 } 3797 return (B_TRUE); 3798 } 3799 3800 /* 3801 * Module loading and removing entry points. 3802 */ 3803 3804 static struct cb_ops nxge_cb_ops = { 3805 nodev, /* cb_open */ 3806 nodev, /* cb_close */ 3807 nodev, /* cb_strategy */ 3808 nodev, /* cb_print */ 3809 nodev, /* cb_dump */ 3810 nodev, /* cb_read */ 3811 nodev, /* cb_write */ 3812 nodev, /* cb_ioctl */ 3813 nodev, /* cb_devmap */ 3814 nodev, /* cb_mmap */ 3815 nodev, /* cb_segmap */ 3816 nochpoll, /* cb_chpoll */ 3817 ddi_prop_op, /* cb_prop_op */ 3818 NULL, 3819 D_MP, /* cb_flag */ 3820 CB_REV, /* rev */ 3821 nodev, /* int (*cb_aread)() */ 3822 nodev /* int (*cb_awrite)() */ 3823 }; 3824 3825 static struct dev_ops nxge_dev_ops = { 3826 DEVO_REV, /* devo_rev */ 3827 0, /* devo_refcnt */ 3828 nulldev, 3829 nulldev, /* devo_identify */ 3830 nulldev, /* devo_probe */ 3831 nxge_attach, /* devo_attach */ 3832 nxge_detach, /* devo_detach */ 3833 nodev, /* devo_reset */ 3834 &nxge_cb_ops, /* devo_cb_ops */ 3835 (struct bus_ops *)NULL, /* devo_bus_ops */ 3836 ddi_power /* devo_power */ 3837 }; 3838 3839 extern struct mod_ops mod_driverops; 3840 3841 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 3842 3843 /* 3844 * Module linkage information for the kernel. 3845 */ 3846 static struct modldrv nxge_modldrv = { 3847 &mod_driverops, 3848 NXGE_DESC_VER, 3849 &nxge_dev_ops 3850 }; 3851 3852 static struct modlinkage modlinkage = { 3853 MODREV_1, (void *) &nxge_modldrv, NULL 3854 }; 3855 3856 int 3857 _init(void) 3858 { 3859 int status; 3860 3861 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 3862 mac_init_ops(&nxge_dev_ops, "nxge"); 3863 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 3864 if (status != 0) { 3865 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3866 "failed to init device soft state")); 3867 goto _init_exit; 3868 } 3869 3870 status = mod_install(&modlinkage); 3871 if (status != 0) { 3872 ddi_soft_state_fini(&nxge_list); 3873 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 3874 goto _init_exit; 3875 } 3876 3877 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 3878 3879 _init_exit: 3880 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 3881 3882 return (status); 3883 } 3884 3885 int 3886 _fini(void) 3887 { 3888 int status; 3889 3890 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 3891 3892 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 3893 3894 if (nxge_mblks_pending) 3895 return (EBUSY); 3896 3897 status = mod_remove(&modlinkage); 3898 if (status != DDI_SUCCESS) { 3899 NXGE_DEBUG_MSG((NULL, MOD_CTL, 3900 "Module removal failed 0x%08x", 3901 status)); 3902 goto _fini_exit; 3903 } 3904 3905 mac_fini_ops(&nxge_dev_ops); 3906 3907 ddi_soft_state_fini(&nxge_list); 3908 3909 MUTEX_DESTROY(&nxge_common_lock); 3910 _fini_exit: 3911 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 3912 3913 return (status); 3914 } 3915 3916 int 3917 _info(struct modinfo *modinfop) 3918 { 3919 int status; 3920 3921 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 3922 status = mod_info(&modlinkage, modinfop); 3923 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 3924 3925 return (status); 3926 } 3927 3928 /*ARGSUSED*/ 3929 static nxge_status_t 3930 nxge_add_intrs(p_nxge_t nxgep) 3931 { 3932 3933 int intr_types; 3934 int type = 0; 3935 int ddi_status = DDI_SUCCESS; 3936 nxge_status_t status = NXGE_OK; 3937 3938 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 3939 3940 nxgep->nxge_intr_type.intr_registered = B_FALSE; 3941 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 3942 nxgep->nxge_intr_type.msi_intx_cnt = 0; 3943 nxgep->nxge_intr_type.intr_added = 0; 3944 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 3945 nxgep->nxge_intr_type.intr_type = 0; 3946 3947 if (nxgep->niu_type == N2_NIU) { 3948 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3949 } else if (nxge_msi_enable) { 3950 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3951 } 3952 3953 /* Get the supported interrupt types */ 3954 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 3955 != DDI_SUCCESS) { 3956 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 3957 "ddi_intr_get_supported_types failed: status 0x%08x", 3958 ddi_status)); 3959 return (NXGE_ERROR | NXGE_DDI_FAILED); 3960 } 3961 nxgep->nxge_intr_type.intr_types = intr_types; 3962 3963 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3964 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3965 3966 /* 3967 * Solaris MSIX is not supported yet. use MSI for now. 3968 * nxge_msi_enable (1): 3969 * 1 - MSI 2 - MSI-X others - FIXED 3970 */ 3971 switch (nxge_msi_enable) { 3972 default: 3973 type = DDI_INTR_TYPE_FIXED; 3974 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3975 "use fixed (intx emulation) type %08x", 3976 type)); 3977 break; 3978 3979 case 2: 3980 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3981 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3982 if (intr_types & DDI_INTR_TYPE_MSIX) { 3983 type = DDI_INTR_TYPE_MSIX; 3984 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3985 "ddi_intr_get_supported_types: MSIX 0x%08x", 3986 type)); 3987 } else if (intr_types & DDI_INTR_TYPE_MSI) { 3988 type = DDI_INTR_TYPE_MSI; 3989 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3990 "ddi_intr_get_supported_types: MSI 0x%08x", 3991 type)); 3992 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3993 type = DDI_INTR_TYPE_FIXED; 3994 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3995 "ddi_intr_get_supported_types: MSXED0x%08x", 3996 type)); 3997 } 3998 break; 3999 4000 case 1: 4001 if (intr_types & DDI_INTR_TYPE_MSI) { 4002 type = DDI_INTR_TYPE_MSI; 4003 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 4004 "ddi_intr_get_supported_types: MSI 0x%08x", 4005 type)); 4006 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 4007 type = DDI_INTR_TYPE_MSIX; 4008 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4009 "ddi_intr_get_supported_types: MSIX 0x%08x", 4010 type)); 4011 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 4012 type = DDI_INTR_TYPE_FIXED; 4013 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4014 "ddi_intr_get_supported_types: MSXED0x%08x", 4015 type)); 4016 } 4017 } 4018 4019 nxgep->nxge_intr_type.intr_type = type; 4020 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 4021 type == DDI_INTR_TYPE_FIXED) && 4022 nxgep->nxge_intr_type.niu_msi_enable) { 4023 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 4024 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4025 " nxge_add_intrs: " 4026 " nxge_add_intrs_adv failed: status 0x%08x", 4027 status)); 4028 return (status); 4029 } else { 4030 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4031 "interrupts registered : type %d", type)); 4032 nxgep->nxge_intr_type.intr_registered = B_TRUE; 4033 4034 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4035 "\nAdded advanced nxge add_intr_adv " 4036 "intr type 0x%x\n", type)); 4037 4038 return (status); 4039 } 4040 } 4041 4042 if (!nxgep->nxge_intr_type.intr_registered) { 4043 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 4044 "failed to register interrupts")); 4045 return (NXGE_ERROR | NXGE_DDI_FAILED); 4046 } 4047 4048 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 4049 return (status); 4050 } 4051 4052 /*ARGSUSED*/ 4053 static nxge_status_t 4054 nxge_add_soft_intrs(p_nxge_t nxgep) 4055 { 4056 4057 int ddi_status = DDI_SUCCESS; 4058 nxge_status_t status = NXGE_OK; 4059 4060 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 4061 4062 nxgep->resched_id = NULL; 4063 nxgep->resched_running = B_FALSE; 4064 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 4065 &nxgep->resched_id, 4066 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 4067 if (ddi_status != DDI_SUCCESS) { 4068 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 4069 "ddi_add_softintrs failed: status 0x%08x", 4070 ddi_status)); 4071 return (NXGE_ERROR | NXGE_DDI_FAILED); 4072 } 4073 4074 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 4075 4076 return (status); 4077 } 4078 4079 static nxge_status_t 4080 nxge_add_intrs_adv(p_nxge_t nxgep) 4081 { 4082 int intr_type; 4083 p_nxge_intr_t intrp; 4084 4085 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 4086 4087 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4088 intr_type = intrp->intr_type; 4089 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 4090 intr_type)); 4091 4092 switch (intr_type) { 4093 case DDI_INTR_TYPE_MSI: /* 0x2 */ 4094 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 4095 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 4096 4097 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 4098 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 4099 4100 default: 4101 return (NXGE_ERROR); 4102 } 4103 } 4104 4105 4106 /*ARGSUSED*/ 4107 static nxge_status_t 4108 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 4109 { 4110 dev_info_t *dip = nxgep->dip; 4111 p_nxge_ldg_t ldgp; 4112 p_nxge_intr_t intrp; 4113 uint_t *inthandler; 4114 void *arg1, *arg2; 4115 int behavior; 4116 int nintrs, navail, nrequest; 4117 int nactual, nrequired; 4118 int inum = 0; 4119 int x, y; 4120 int ddi_status = DDI_SUCCESS; 4121 nxge_status_t status = NXGE_OK; 4122 4123 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 4124 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4125 intrp->start_inum = 0; 4126 4127 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4128 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4129 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4130 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4131 "nintrs: %d", ddi_status, nintrs)); 4132 return (NXGE_ERROR | NXGE_DDI_FAILED); 4133 } 4134 4135 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4136 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4137 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4138 "ddi_intr_get_navail() failed, status: 0x%x%, " 4139 "nintrs: %d", ddi_status, navail)); 4140 return (NXGE_ERROR | NXGE_DDI_FAILED); 4141 } 4142 4143 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4144 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 4145 nintrs, navail)); 4146 4147 /* PSARC/2007/453 MSI-X interrupt limit override */ 4148 if (int_type == DDI_INTR_TYPE_MSIX) { 4149 nrequest = nxge_create_msi_property(nxgep); 4150 if (nrequest < navail) { 4151 navail = nrequest; 4152 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4153 "nxge_add_intrs_adv_type: nintrs %d " 4154 "navail %d (nrequest %d)", 4155 nintrs, navail, nrequest)); 4156 } 4157 } 4158 4159 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 4160 /* MSI must be power of 2 */ 4161 if ((navail & 16) == 16) { 4162 navail = 16; 4163 } else if ((navail & 8) == 8) { 4164 navail = 8; 4165 } else if ((navail & 4) == 4) { 4166 navail = 4; 4167 } else if ((navail & 2) == 2) { 4168 navail = 2; 4169 } else { 4170 navail = 1; 4171 } 4172 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4173 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 4174 "navail %d", nintrs, navail)); 4175 } 4176 4177 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4178 DDI_INTR_ALLOC_NORMAL); 4179 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4180 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4181 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4182 navail, &nactual, behavior); 4183 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4184 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4185 " ddi_intr_alloc() failed: %d", 4186 ddi_status)); 4187 kmem_free(intrp->htable, intrp->intr_size); 4188 return (NXGE_ERROR | NXGE_DDI_FAILED); 4189 } 4190 4191 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4192 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4193 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4194 " ddi_intr_get_pri() failed: %d", 4195 ddi_status)); 4196 /* Free already allocated interrupts */ 4197 for (y = 0; y < nactual; y++) { 4198 (void) ddi_intr_free(intrp->htable[y]); 4199 } 4200 4201 kmem_free(intrp->htable, intrp->intr_size); 4202 return (NXGE_ERROR | NXGE_DDI_FAILED); 4203 } 4204 4205 nrequired = 0; 4206 switch (nxgep->niu_type) { 4207 default: 4208 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4209 break; 4210 4211 case N2_NIU: 4212 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4213 break; 4214 } 4215 4216 if (status != NXGE_OK) { 4217 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4218 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 4219 "failed: 0x%x", status)); 4220 /* Free already allocated interrupts */ 4221 for (y = 0; y < nactual; y++) { 4222 (void) ddi_intr_free(intrp->htable[y]); 4223 } 4224 4225 kmem_free(intrp->htable, intrp->intr_size); 4226 return (status); 4227 } 4228 4229 ldgp = nxgep->ldgvp->ldgp; 4230 for (x = 0; x < nrequired; x++, ldgp++) { 4231 ldgp->vector = (uint8_t)x; 4232 ldgp->intdata = SID_DATA(ldgp->func, x); 4233 arg1 = ldgp->ldvp; 4234 arg2 = nxgep; 4235 if (ldgp->nldvs == 1) { 4236 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4237 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4238 "nxge_add_intrs_adv_type: " 4239 "arg1 0x%x arg2 0x%x: " 4240 "1-1 int handler (entry %d intdata 0x%x)\n", 4241 arg1, arg2, 4242 x, ldgp->intdata)); 4243 } else if (ldgp->nldvs > 1) { 4244 inthandler = (uint_t *)ldgp->sys_intr_handler; 4245 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4246 "nxge_add_intrs_adv_type: " 4247 "arg1 0x%x arg2 0x%x: " 4248 "nldevs %d int handler " 4249 "(entry %d intdata 0x%x)\n", 4250 arg1, arg2, 4251 ldgp->nldvs, x, ldgp->intdata)); 4252 } 4253 4254 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4255 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 4256 "htable 0x%llx", x, intrp->htable[x])); 4257 4258 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4259 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4260 != DDI_SUCCESS) { 4261 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4262 "==> nxge_add_intrs_adv_type: failed #%d " 4263 "status 0x%x", x, ddi_status)); 4264 for (y = 0; y < intrp->intr_added; y++) { 4265 (void) ddi_intr_remove_handler( 4266 intrp->htable[y]); 4267 } 4268 /* Free already allocated intr */ 4269 for (y = 0; y < nactual; y++) { 4270 (void) ddi_intr_free(intrp->htable[y]); 4271 } 4272 kmem_free(intrp->htable, intrp->intr_size); 4273 4274 (void) nxge_ldgv_uninit(nxgep); 4275 4276 return (NXGE_ERROR | NXGE_DDI_FAILED); 4277 } 4278 intrp->intr_added++; 4279 } 4280 4281 intrp->msi_intx_cnt = nactual; 4282 4283 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4284 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 4285 navail, nactual, 4286 intrp->msi_intx_cnt, 4287 intrp->intr_added)); 4288 4289 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4290 4291 (void) nxge_intr_ldgv_init(nxgep); 4292 4293 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 4294 4295 return (status); 4296 } 4297 4298 /*ARGSUSED*/ 4299 static nxge_status_t 4300 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 4301 { 4302 dev_info_t *dip = nxgep->dip; 4303 p_nxge_ldg_t ldgp; 4304 p_nxge_intr_t intrp; 4305 uint_t *inthandler; 4306 void *arg1, *arg2; 4307 int behavior; 4308 int nintrs, navail; 4309 int nactual, nrequired; 4310 int inum = 0; 4311 int x, y; 4312 int ddi_status = DDI_SUCCESS; 4313 nxge_status_t status = NXGE_OK; 4314 4315 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 4316 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4317 intrp->start_inum = 0; 4318 4319 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4320 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4321 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4322 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4323 "nintrs: %d", status, nintrs)); 4324 return (NXGE_ERROR | NXGE_DDI_FAILED); 4325 } 4326 4327 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4328 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4329 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4330 "ddi_intr_get_navail() failed, status: 0x%x%, " 4331 "nintrs: %d", ddi_status, navail)); 4332 return (NXGE_ERROR | NXGE_DDI_FAILED); 4333 } 4334 4335 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4336 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 4337 nintrs, navail)); 4338 4339 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4340 DDI_INTR_ALLOC_NORMAL); 4341 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4342 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4343 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4344 navail, &nactual, behavior); 4345 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4346 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4347 " ddi_intr_alloc() failed: %d", 4348 ddi_status)); 4349 kmem_free(intrp->htable, intrp->intr_size); 4350 return (NXGE_ERROR | NXGE_DDI_FAILED); 4351 } 4352 4353 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4354 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4355 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4356 " ddi_intr_get_pri() failed: %d", 4357 ddi_status)); 4358 /* Free already allocated interrupts */ 4359 for (y = 0; y < nactual; y++) { 4360 (void) ddi_intr_free(intrp->htable[y]); 4361 } 4362 4363 kmem_free(intrp->htable, intrp->intr_size); 4364 return (NXGE_ERROR | NXGE_DDI_FAILED); 4365 } 4366 4367 nrequired = 0; 4368 switch (nxgep->niu_type) { 4369 default: 4370 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4371 break; 4372 4373 case N2_NIU: 4374 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4375 break; 4376 } 4377 4378 if (status != NXGE_OK) { 4379 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4380 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 4381 "failed: 0x%x", status)); 4382 /* Free already allocated interrupts */ 4383 for (y = 0; y < nactual; y++) { 4384 (void) ddi_intr_free(intrp->htable[y]); 4385 } 4386 4387 kmem_free(intrp->htable, intrp->intr_size); 4388 return (status); 4389 } 4390 4391 ldgp = nxgep->ldgvp->ldgp; 4392 for (x = 0; x < nrequired; x++, ldgp++) { 4393 ldgp->vector = (uint8_t)x; 4394 if (nxgep->niu_type != N2_NIU) { 4395 ldgp->intdata = SID_DATA(ldgp->func, x); 4396 } 4397 4398 arg1 = ldgp->ldvp; 4399 arg2 = nxgep; 4400 if (ldgp->nldvs == 1) { 4401 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4402 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4403 "nxge_add_intrs_adv_type_fix: " 4404 "1-1 int handler(%d) ldg %d ldv %d " 4405 "arg1 $%p arg2 $%p\n", 4406 x, ldgp->ldg, ldgp->ldvp->ldv, 4407 arg1, arg2)); 4408 } else if (ldgp->nldvs > 1) { 4409 inthandler = (uint_t *)ldgp->sys_intr_handler; 4410 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4411 "nxge_add_intrs_adv_type_fix: " 4412 "shared ldv %d int handler(%d) ldv %d ldg %d" 4413 "arg1 0x%016llx arg2 0x%016llx\n", 4414 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 4415 arg1, arg2)); 4416 } 4417 4418 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4419 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4420 != DDI_SUCCESS) { 4421 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4422 "==> nxge_add_intrs_adv_type_fix: failed #%d " 4423 "status 0x%x", x, ddi_status)); 4424 for (y = 0; y < intrp->intr_added; y++) { 4425 (void) ddi_intr_remove_handler( 4426 intrp->htable[y]); 4427 } 4428 for (y = 0; y < nactual; y++) { 4429 (void) ddi_intr_free(intrp->htable[y]); 4430 } 4431 /* Free already allocated intr */ 4432 kmem_free(intrp->htable, intrp->intr_size); 4433 4434 (void) nxge_ldgv_uninit(nxgep); 4435 4436 return (NXGE_ERROR | NXGE_DDI_FAILED); 4437 } 4438 intrp->intr_added++; 4439 } 4440 4441 intrp->msi_intx_cnt = nactual; 4442 4443 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4444 4445 status = nxge_intr_ldgv_init(nxgep); 4446 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 4447 4448 return (status); 4449 } 4450 4451 static void 4452 nxge_remove_intrs(p_nxge_t nxgep) 4453 { 4454 int i, inum; 4455 p_nxge_intr_t intrp; 4456 4457 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 4458 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4459 if (!intrp->intr_registered) { 4460 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4461 "<== nxge_remove_intrs: interrupts not registered")); 4462 return; 4463 } 4464 4465 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 4466 4467 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4468 (void) ddi_intr_block_disable(intrp->htable, 4469 intrp->intr_added); 4470 } else { 4471 for (i = 0; i < intrp->intr_added; i++) { 4472 (void) ddi_intr_disable(intrp->htable[i]); 4473 } 4474 } 4475 4476 for (inum = 0; inum < intrp->intr_added; inum++) { 4477 if (intrp->htable[inum]) { 4478 (void) ddi_intr_remove_handler(intrp->htable[inum]); 4479 } 4480 } 4481 4482 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 4483 if (intrp->htable[inum]) { 4484 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4485 "nxge_remove_intrs: ddi_intr_free inum %d " 4486 "msi_intx_cnt %d intr_added %d", 4487 inum, 4488 intrp->msi_intx_cnt, 4489 intrp->intr_added)); 4490 4491 (void) ddi_intr_free(intrp->htable[inum]); 4492 } 4493 } 4494 4495 kmem_free(intrp->htable, intrp->intr_size); 4496 intrp->intr_registered = B_FALSE; 4497 intrp->intr_enabled = B_FALSE; 4498 intrp->msi_intx_cnt = 0; 4499 intrp->intr_added = 0; 4500 4501 (void) nxge_ldgv_uninit(nxgep); 4502 4503 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 4504 "#msix-request"); 4505 4506 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 4507 } 4508 4509 /*ARGSUSED*/ 4510 static void 4511 nxge_remove_soft_intrs(p_nxge_t nxgep) 4512 { 4513 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 4514 if (nxgep->resched_id) { 4515 ddi_remove_softintr(nxgep->resched_id); 4516 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4517 "==> nxge_remove_soft_intrs: removed")); 4518 nxgep->resched_id = NULL; 4519 } 4520 4521 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 4522 } 4523 4524 /*ARGSUSED*/ 4525 static void 4526 nxge_intrs_enable(p_nxge_t nxgep) 4527 { 4528 p_nxge_intr_t intrp; 4529 int i; 4530 int status; 4531 4532 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 4533 4534 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4535 4536 if (!intrp->intr_registered) { 4537 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 4538 "interrupts are not registered")); 4539 return; 4540 } 4541 4542 if (intrp->intr_enabled) { 4543 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4544 "<== nxge_intrs_enable: already enabled")); 4545 return; 4546 } 4547 4548 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4549 status = ddi_intr_block_enable(intrp->htable, 4550 intrp->intr_added); 4551 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4552 "block enable - status 0x%x total inums #%d\n", 4553 status, intrp->intr_added)); 4554 } else { 4555 for (i = 0; i < intrp->intr_added; i++) { 4556 status = ddi_intr_enable(intrp->htable[i]); 4557 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4558 "ddi_intr_enable:enable - status 0x%x " 4559 "total inums %d enable inum #%d\n", 4560 status, intrp->intr_added, i)); 4561 if (status == DDI_SUCCESS) { 4562 intrp->intr_enabled = B_TRUE; 4563 } 4564 } 4565 } 4566 4567 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 4568 } 4569 4570 /*ARGSUSED*/ 4571 static void 4572 nxge_intrs_disable(p_nxge_t nxgep) 4573 { 4574 p_nxge_intr_t intrp; 4575 int i; 4576 4577 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 4578 4579 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4580 4581 if (!intrp->intr_registered) { 4582 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 4583 "interrupts are not registered")); 4584 return; 4585 } 4586 4587 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4588 (void) ddi_intr_block_disable(intrp->htable, 4589 intrp->intr_added); 4590 } else { 4591 for (i = 0; i < intrp->intr_added; i++) { 4592 (void) ddi_intr_disable(intrp->htable[i]); 4593 } 4594 } 4595 4596 intrp->intr_enabled = B_FALSE; 4597 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 4598 } 4599 4600 static nxge_status_t 4601 nxge_mac_register(p_nxge_t nxgep) 4602 { 4603 mac_register_t *macp; 4604 int status; 4605 4606 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 4607 4608 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 4609 return (NXGE_ERROR); 4610 4611 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 4612 macp->m_driver = nxgep; 4613 macp->m_dip = nxgep->dip; 4614 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 4615 macp->m_callbacks = &nxge_m_callbacks; 4616 macp->m_min_sdu = 0; 4617 macp->m_max_sdu = nxgep->mac.maxframesize - 4618 sizeof (struct ether_header) - ETHERFCSL - 4; 4619 4620 status = mac_register(macp, &nxgep->mach); 4621 mac_free(macp); 4622 4623 if (status != 0) { 4624 cmn_err(CE_WARN, 4625 "!nxge_mac_register failed (status %d instance %d)", 4626 status, nxgep->instance); 4627 return (NXGE_ERROR); 4628 } 4629 4630 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 4631 "(instance %d)", nxgep->instance)); 4632 4633 return (NXGE_OK); 4634 } 4635 4636 void 4637 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 4638 { 4639 ssize_t size; 4640 mblk_t *nmp; 4641 uint8_t blk_id; 4642 uint8_t chan; 4643 uint32_t err_id; 4644 err_inject_t *eip; 4645 4646 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 4647 4648 size = 1024; 4649 nmp = mp->b_cont; 4650 eip = (err_inject_t *)nmp->b_rptr; 4651 blk_id = eip->blk_id; 4652 err_id = eip->err_id; 4653 chan = eip->chan; 4654 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 4655 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 4656 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 4657 switch (blk_id) { 4658 case MAC_BLK_ID: 4659 break; 4660 case TXMAC_BLK_ID: 4661 break; 4662 case RXMAC_BLK_ID: 4663 break; 4664 case MIF_BLK_ID: 4665 break; 4666 case IPP_BLK_ID: 4667 nxge_ipp_inject_err(nxgep, err_id); 4668 break; 4669 case TXC_BLK_ID: 4670 nxge_txc_inject_err(nxgep, err_id); 4671 break; 4672 case TXDMA_BLK_ID: 4673 nxge_txdma_inject_err(nxgep, err_id, chan); 4674 break; 4675 case RXDMA_BLK_ID: 4676 nxge_rxdma_inject_err(nxgep, err_id, chan); 4677 break; 4678 case ZCP_BLK_ID: 4679 nxge_zcp_inject_err(nxgep, err_id); 4680 break; 4681 case ESPC_BLK_ID: 4682 break; 4683 case FFLP_BLK_ID: 4684 break; 4685 case PHY_BLK_ID: 4686 break; 4687 case ETHER_SERDES_BLK_ID: 4688 break; 4689 case PCIE_SERDES_BLK_ID: 4690 break; 4691 case VIR_BLK_ID: 4692 break; 4693 } 4694 4695 nmp->b_wptr = nmp->b_rptr + size; 4696 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 4697 4698 miocack(wq, mp, (int)size, 0); 4699 } 4700 4701 static int 4702 nxge_init_common_dev(p_nxge_t nxgep) 4703 { 4704 p_nxge_hw_list_t hw_p; 4705 dev_info_t *p_dip; 4706 4707 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 4708 4709 p_dip = nxgep->p_dip; 4710 MUTEX_ENTER(&nxge_common_lock); 4711 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4712 "==> nxge_init_common_dev:func # %d", 4713 nxgep->function_num)); 4714 /* 4715 * Loop through existing per neptune hardware list. 4716 */ 4717 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4718 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4719 "==> nxge_init_common_device:func # %d " 4720 "hw_p $%p parent dip $%p", 4721 nxgep->function_num, 4722 hw_p, 4723 p_dip)); 4724 if (hw_p->parent_devp == p_dip) { 4725 nxgep->nxge_hw_p = hw_p; 4726 hw_p->ndevs++; 4727 hw_p->nxge_p[nxgep->function_num] = nxgep; 4728 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4729 "==> nxge_init_common_device:func # %d " 4730 "hw_p $%p parent dip $%p " 4731 "ndevs %d (found)", 4732 nxgep->function_num, 4733 hw_p, 4734 p_dip, 4735 hw_p->ndevs)); 4736 break; 4737 } 4738 } 4739 4740 if (hw_p == NULL) { 4741 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4742 "==> nxge_init_common_device:func # %d " 4743 "parent dip $%p (new)", 4744 nxgep->function_num, 4745 p_dip)); 4746 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 4747 hw_p->parent_devp = p_dip; 4748 hw_p->magic = NXGE_NEPTUNE_MAGIC; 4749 nxgep->nxge_hw_p = hw_p; 4750 hw_p->ndevs++; 4751 hw_p->nxge_p[nxgep->function_num] = nxgep; 4752 hw_p->next = nxge_hw_list; 4753 if (nxgep->niu_type == N2_NIU) { 4754 hw_p->niu_type = N2_NIU; 4755 hw_p->platform_type = P_NEPTUNE_NIU; 4756 } else { 4757 hw_p->niu_type = NIU_TYPE_NONE; 4758 hw_p->platform_type = P_NEPTUNE_NONE; 4759 } 4760 4761 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 4762 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 4763 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 4764 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 4765 MUTEX_INIT(&hw_p->nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 4766 4767 nxge_hw_list = hw_p; 4768 4769 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 4770 } 4771 4772 MUTEX_EXIT(&nxge_common_lock); 4773 4774 nxgep->platform_type = hw_p->platform_type; 4775 if (nxgep->niu_type != N2_NIU) { 4776 nxgep->niu_type = hw_p->niu_type; 4777 } 4778 4779 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4780 "==> nxge_init_common_device (nxge_hw_list) $%p", 4781 nxge_hw_list)); 4782 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 4783 4784 return (NXGE_OK); 4785 } 4786 4787 static void 4788 nxge_uninit_common_dev(p_nxge_t nxgep) 4789 { 4790 p_nxge_hw_list_t hw_p, h_hw_p; 4791 dev_info_t *p_dip; 4792 4793 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 4794 if (nxgep->nxge_hw_p == NULL) { 4795 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4796 "<== nxge_uninit_common_device (no common)")); 4797 return; 4798 } 4799 4800 MUTEX_ENTER(&nxge_common_lock); 4801 h_hw_p = nxge_hw_list; 4802 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4803 p_dip = hw_p->parent_devp; 4804 if (nxgep->nxge_hw_p == hw_p && 4805 p_dip == nxgep->p_dip && 4806 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 4807 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 4808 4809 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4810 "==> nxge_uninit_common_device:func # %d " 4811 "hw_p $%p parent dip $%p " 4812 "ndevs %d (found)", 4813 nxgep->function_num, 4814 hw_p, 4815 p_dip, 4816 hw_p->ndevs)); 4817 4818 nxgep->nxge_hw_p = NULL; 4819 if (hw_p->ndevs) { 4820 hw_p->ndevs--; 4821 } 4822 hw_p->nxge_p[nxgep->function_num] = NULL; 4823 if (!hw_p->ndevs) { 4824 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 4825 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 4826 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 4827 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 4828 MUTEX_DESTROY(&hw_p->nxge_mii_lock); 4829 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4830 "==> nxge_uninit_common_device: " 4831 "func # %d " 4832 "hw_p $%p parent dip $%p " 4833 "ndevs %d (last)", 4834 nxgep->function_num, 4835 hw_p, 4836 p_dip, 4837 hw_p->ndevs)); 4838 4839 if (hw_p == nxge_hw_list) { 4840 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4841 "==> nxge_uninit_common_device:" 4842 "remove head func # %d " 4843 "hw_p $%p parent dip $%p " 4844 "ndevs %d (head)", 4845 nxgep->function_num, 4846 hw_p, 4847 p_dip, 4848 hw_p->ndevs)); 4849 nxge_hw_list = hw_p->next; 4850 } else { 4851 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4852 "==> nxge_uninit_common_device:" 4853 "remove middle func # %d " 4854 "hw_p $%p parent dip $%p " 4855 "ndevs %d (middle)", 4856 nxgep->function_num, 4857 hw_p, 4858 p_dip, 4859 hw_p->ndevs)); 4860 h_hw_p->next = hw_p->next; 4861 } 4862 4863 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 4864 } 4865 break; 4866 } else { 4867 h_hw_p = hw_p; 4868 } 4869 } 4870 4871 MUTEX_EXIT(&nxge_common_lock); 4872 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4873 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 4874 nxge_hw_list)); 4875 4876 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 4877 } 4878 4879 /* 4880 * Determines the number of ports from the niu_type or the platform type. 4881 * Returns the number of ports, or returns zero on failure. 4882 */ 4883 4884 int 4885 nxge_get_nports(p_nxge_t nxgep) 4886 { 4887 int nports = 0; 4888 4889 switch (nxgep->niu_type) { 4890 case N2_NIU: 4891 case NEPTUNE_2_10GF: 4892 nports = 2; 4893 break; 4894 case NEPTUNE_4_1GC: 4895 case NEPTUNE_2_10GF_2_1GC: 4896 case NEPTUNE_1_10GF_3_1GC: 4897 case NEPTUNE_1_1GC_1_10GF_2_1GC: 4898 nports = 4; 4899 break; 4900 default: 4901 switch (nxgep->platform_type) { 4902 case P_NEPTUNE_NIU: 4903 case P_NEPTUNE_ATLAS_2PORT: 4904 nports = 2; 4905 break; 4906 case P_NEPTUNE_ATLAS_4PORT: 4907 case P_NEPTUNE_MARAMBA_P0: 4908 case P_NEPTUNE_MARAMBA_P1: 4909 nports = 4; 4910 break; 4911 default: 4912 break; 4913 } 4914 break; 4915 } 4916 4917 return (nports); 4918 } 4919 4920 /* 4921 * The following two functions are to support 4922 * PSARC/2007/453 MSI-X interrupt limit override. 4923 */ 4924 static int 4925 nxge_create_msi_property(p_nxge_t nxgep) 4926 { 4927 int nmsi; 4928 extern int ncpus; 4929 4930 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 4931 4932 switch (nxgep->mac.portmode) { 4933 case PORT_10G_COPPER: 4934 case PORT_10G_FIBER: 4935 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 4936 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 4937 /* 4938 * The maximum MSI-X requested will be 8. 4939 * If the # of CPUs is less than 8, we will reqeust 4940 * # MSI-X based on the # of CPUs. 4941 */ 4942 if (ncpus >= NXGE_MSIX_REQUEST_10G) { 4943 nmsi = NXGE_MSIX_REQUEST_10G; 4944 } else { 4945 nmsi = ncpus; 4946 } 4947 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4948 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 4949 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 4950 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 4951 break; 4952 4953 default: 4954 nmsi = NXGE_MSIX_REQUEST_1G; 4955 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4956 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 4957 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 4958 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 4959 break; 4960 } 4961 4962 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 4963 return (nmsi); 4964 } 4965