1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 30 */ 31 #include <sys/nxge/nxge_impl.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * PSARC/2007/453 MSI-X interrupt limit override 39 * (This PSARC case is limited to MSI-X vectors 40 * and SPARC platforms only). 41 */ 42 #if defined(_BIG_ENDIAN) 43 uint32_t nxge_msi_enable = 2; 44 #else 45 uint32_t nxge_msi_enable = 1; 46 #endif 47 48 /* 49 * Globals: tunable parameters (/etc/system or adb) 50 * 51 */ 52 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 53 uint32_t nxge_rbr_spare_size = 0; 54 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 55 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 56 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 57 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 58 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 59 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 60 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 61 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 62 boolean_t nxge_jumbo_enable = B_FALSE; 63 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 64 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 65 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 66 67 /* MAX LSO size */ 68 #define NXGE_LSO_MAXLEN 65535 69 /* Enable Software LSO flag */ 70 uint32_t nxge_lso_enable = 1; 71 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 72 73 /* 74 * Debugging flags: 75 * nxge_no_tx_lb : transmit load balancing 76 * nxge_tx_lb_policy: 0 - TCP port (default) 77 * 3 - DEST MAC 78 */ 79 uint32_t nxge_no_tx_lb = 0; 80 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 81 82 /* 83 * Add tunable to reduce the amount of time spent in the 84 * ISR doing Rx Processing. 85 */ 86 uint32_t nxge_max_rx_pkts = 1024; 87 88 /* 89 * Tunables to manage the receive buffer blocks. 90 * 91 * nxge_rx_threshold_hi: copy all buffers. 92 * nxge_rx_bcopy_size_type: receive buffer block size type. 93 * nxge_rx_threshold_lo: copy only up to tunable block size type. 94 */ 95 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 96 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 97 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 98 99 rtrace_t npi_rtracebuf; 100 101 #if defined(sun4v) 102 /* 103 * Hypervisor N2/NIU services information. 104 */ 105 static hsvc_info_t niu_hsvc = { 106 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 107 NIU_MINOR_VER, "nxge" 108 }; 109 #endif 110 111 /* 112 * Function Prototypes 113 */ 114 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 115 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 116 static void nxge_unattach(p_nxge_t); 117 118 #if NXGE_PROPERTY 119 static void nxge_remove_hard_properties(p_nxge_t); 120 #endif 121 122 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 123 124 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 125 static void nxge_destroy_mutexes(p_nxge_t); 126 127 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 128 static void nxge_unmap_regs(p_nxge_t nxgep); 129 #ifdef NXGE_DEBUG 130 static void nxge_test_map_regs(p_nxge_t nxgep); 131 #endif 132 133 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 134 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 135 static void nxge_remove_intrs(p_nxge_t nxgep); 136 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 137 138 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 139 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 140 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 141 static void nxge_intrs_enable(p_nxge_t nxgep); 142 static void nxge_intrs_disable(p_nxge_t nxgep); 143 144 static void nxge_suspend(p_nxge_t); 145 static nxge_status_t nxge_resume(p_nxge_t); 146 147 static nxge_status_t nxge_setup_dev(p_nxge_t); 148 static void nxge_destroy_dev(p_nxge_t); 149 150 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 151 static void nxge_free_mem_pool(p_nxge_t); 152 153 static nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 154 static void nxge_free_rx_mem_pool(p_nxge_t); 155 156 static nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 157 static void nxge_free_tx_mem_pool(p_nxge_t); 158 159 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 160 struct ddi_dma_attr *, 161 size_t, ddi_device_acc_attr_t *, uint_t, 162 p_nxge_dma_common_t); 163 164 static void nxge_dma_mem_free(p_nxge_dma_common_t); 165 166 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 167 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 168 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 169 170 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 171 p_nxge_dma_common_t *, size_t); 172 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 173 174 static nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 175 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 176 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 177 178 static nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 179 p_nxge_dma_common_t *, 180 size_t); 181 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 182 183 static int nxge_init_common_dev(p_nxge_t); 184 static void nxge_uninit_common_dev(p_nxge_t); 185 186 /* 187 * The next declarations are for the GLDv3 interface. 188 */ 189 static int nxge_m_start(void *); 190 static void nxge_m_stop(void *); 191 static int nxge_m_unicst(void *, const uint8_t *); 192 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 193 static int nxge_m_promisc(void *, boolean_t); 194 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 195 static void nxge_m_resources(void *); 196 mblk_t *nxge_m_tx(void *arg, mblk_t *); 197 static nxge_status_t nxge_mac_register(p_nxge_t); 198 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 199 mac_addr_slot_t slot); 200 static void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 201 boolean_t factory); 202 static int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 203 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 204 static int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 205 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 206 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 207 208 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 209 #define MAX_DUMP_SZ 256 210 211 #define NXGE_M_CALLBACK_FLAGS (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB) 212 213 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 214 static mac_callbacks_t nxge_m_callbacks = { 215 NXGE_M_CALLBACK_FLAGS, 216 nxge_m_stat, 217 nxge_m_start, 218 nxge_m_stop, 219 nxge_m_promisc, 220 nxge_m_multicst, 221 nxge_m_unicst, 222 nxge_m_tx, 223 nxge_m_resources, 224 nxge_m_ioctl, 225 nxge_m_getcapab 226 }; 227 228 void 229 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 230 231 /* PSARC/2007/453 MSI-X interrupt limit override. */ 232 #define NXGE_MSIX_REQUEST_10G 8 233 #define NXGE_MSIX_REQUEST_1G 2 234 static int nxge_create_msi_property(p_nxge_t); 235 236 /* 237 * These global variables control the message 238 * output. 239 */ 240 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 241 uint64_t nxge_debug_level = 0; 242 243 /* 244 * This list contains the instance structures for the Neptune 245 * devices present in the system. The lock exists to guarantee 246 * mutually exclusive access to the list. 247 */ 248 void *nxge_list = NULL; 249 250 void *nxge_hw_list = NULL; 251 nxge_os_mutex_t nxge_common_lock; 252 253 nxge_os_mutex_t nxge_mii_lock; 254 static uint32_t nxge_mii_lock_init = 0; 255 nxge_os_mutex_t nxge_mdio_lock; 256 static uint32_t nxge_mdio_lock_init = 0; 257 258 extern uint64_t npi_debug_level; 259 260 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 261 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 262 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 263 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 264 extern void nxge_fm_init(p_nxge_t, 265 ddi_device_acc_attr_t *, 266 ddi_device_acc_attr_t *, 267 ddi_dma_attr_t *); 268 extern void nxge_fm_fini(p_nxge_t); 269 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 270 271 /* 272 * Count used to maintain the number of buffers being used 273 * by Neptune instances and loaned up to the upper layers. 274 */ 275 uint32_t nxge_mblks_pending = 0; 276 277 /* 278 * Device register access attributes for PIO. 279 */ 280 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 281 DDI_DEVICE_ATTR_V0, 282 DDI_STRUCTURE_LE_ACC, 283 DDI_STRICTORDER_ACC, 284 }; 285 286 /* 287 * Device descriptor access attributes for DMA. 288 */ 289 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 290 DDI_DEVICE_ATTR_V0, 291 DDI_STRUCTURE_LE_ACC, 292 DDI_STRICTORDER_ACC 293 }; 294 295 /* 296 * Device buffer access attributes for DMA. 297 */ 298 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 299 DDI_DEVICE_ATTR_V0, 300 DDI_STRUCTURE_BE_ACC, 301 DDI_STRICTORDER_ACC 302 }; 303 304 ddi_dma_attr_t nxge_desc_dma_attr = { 305 DMA_ATTR_V0, /* version number. */ 306 0, /* low address */ 307 0xffffffffffffffff, /* high address */ 308 0xffffffffffffffff, /* address counter max */ 309 #ifndef NIU_PA_WORKAROUND 310 0x100000, /* alignment */ 311 #else 312 0x2000, 313 #endif 314 0xfc00fc, /* dlim_burstsizes */ 315 0x1, /* minimum transfer size */ 316 0xffffffffffffffff, /* maximum transfer size */ 317 0xffffffffffffffff, /* maximum segment size */ 318 1, /* scatter/gather list length */ 319 (unsigned int) 1, /* granularity */ 320 0 /* attribute flags */ 321 }; 322 323 ddi_dma_attr_t nxge_tx_dma_attr = { 324 DMA_ATTR_V0, /* version number. */ 325 0, /* low address */ 326 0xffffffffffffffff, /* high address */ 327 0xffffffffffffffff, /* address counter max */ 328 #if defined(_BIG_ENDIAN) 329 0x2000, /* alignment */ 330 #else 331 0x1000, /* alignment */ 332 #endif 333 0xfc00fc, /* dlim_burstsizes */ 334 0x1, /* minimum transfer size */ 335 0xffffffffffffffff, /* maximum transfer size */ 336 0xffffffffffffffff, /* maximum segment size */ 337 5, /* scatter/gather list length */ 338 (unsigned int) 1, /* granularity */ 339 0 /* attribute flags */ 340 }; 341 342 ddi_dma_attr_t nxge_rx_dma_attr = { 343 DMA_ATTR_V0, /* version number. */ 344 0, /* low address */ 345 0xffffffffffffffff, /* high address */ 346 0xffffffffffffffff, /* address counter max */ 347 0x2000, /* alignment */ 348 0xfc00fc, /* dlim_burstsizes */ 349 0x1, /* minimum transfer size */ 350 0xffffffffffffffff, /* maximum transfer size */ 351 0xffffffffffffffff, /* maximum segment size */ 352 1, /* scatter/gather list length */ 353 (unsigned int) 1, /* granularity */ 354 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 355 }; 356 357 ddi_dma_lim_t nxge_dma_limits = { 358 (uint_t)0, /* dlim_addr_lo */ 359 (uint_t)0xffffffff, /* dlim_addr_hi */ 360 (uint_t)0xffffffff, /* dlim_cntr_max */ 361 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 362 0x1, /* dlim_minxfer */ 363 1024 /* dlim_speed */ 364 }; 365 366 dma_method_t nxge_force_dma = DVMA; 367 368 /* 369 * dma chunk sizes. 370 * 371 * Try to allocate the largest possible size 372 * so that fewer number of dma chunks would be managed 373 */ 374 #ifdef NIU_PA_WORKAROUND 375 size_t alloc_sizes [] = {0x2000}; 376 #else 377 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 378 0x10000, 0x20000, 0x40000, 0x80000, 379 0x100000, 0x200000, 0x400000, 0x800000, 380 0x1000000, 0x2000000, 0x4000000}; 381 #endif 382 383 /* 384 * Translate "dev_t" to a pointer to the associated "dev_info_t". 385 */ 386 387 static int 388 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 389 { 390 p_nxge_t nxgep = NULL; 391 int instance; 392 int status = DDI_SUCCESS; 393 uint8_t portn; 394 nxge_mmac_t *mmac_info; 395 396 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 397 398 /* 399 * Get the device instance since we'll need to setup 400 * or retrieve a soft state for this instance. 401 */ 402 instance = ddi_get_instance(dip); 403 404 switch (cmd) { 405 case DDI_ATTACH: 406 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 407 break; 408 409 case DDI_RESUME: 410 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 411 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 412 if (nxgep == NULL) { 413 status = DDI_FAILURE; 414 break; 415 } 416 if (nxgep->dip != dip) { 417 status = DDI_FAILURE; 418 break; 419 } 420 if (nxgep->suspended == DDI_PM_SUSPEND) { 421 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 422 } else { 423 status = nxge_resume(nxgep); 424 } 425 goto nxge_attach_exit; 426 427 case DDI_PM_RESUME: 428 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 429 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 430 if (nxgep == NULL) { 431 status = DDI_FAILURE; 432 break; 433 } 434 if (nxgep->dip != dip) { 435 status = DDI_FAILURE; 436 break; 437 } 438 status = nxge_resume(nxgep); 439 goto nxge_attach_exit; 440 441 default: 442 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 443 status = DDI_FAILURE; 444 goto nxge_attach_exit; 445 } 446 447 448 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 449 status = DDI_FAILURE; 450 goto nxge_attach_exit; 451 } 452 453 nxgep = ddi_get_soft_state(nxge_list, instance); 454 if (nxgep == NULL) { 455 status = NXGE_ERROR; 456 goto nxge_attach_fail2; 457 } 458 459 nxgep->nxge_magic = NXGE_MAGIC; 460 461 nxgep->drv_state = 0; 462 nxgep->dip = dip; 463 nxgep->instance = instance; 464 nxgep->p_dip = ddi_get_parent(dip); 465 nxgep->nxge_debug_level = nxge_debug_level; 466 npi_debug_level = nxge_debug_level; 467 468 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_dev_desc_dma_acc_attr, 469 &nxge_rx_dma_attr); 470 471 status = nxge_map_regs(nxgep); 472 if (status != NXGE_OK) { 473 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 474 goto nxge_attach_fail3; 475 } 476 477 status = nxge_init_common_dev(nxgep); 478 if (status != NXGE_OK) { 479 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 480 "nxge_init_common_dev failed")); 481 goto nxge_attach_fail4; 482 } 483 484 if (nxgep->niu_type == NEPTUNE_2_10GF) { 485 if (nxgep->function_num > 1) { 486 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Unsupported" 487 " function %d. Only functions 0 and 1 are " 488 "supported for this card.", nxgep->function_num)); 489 status = NXGE_ERROR; 490 goto nxge_attach_fail4; 491 } 492 } 493 494 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 495 nxgep->mac.portnum = portn; 496 if ((portn == 0) || (portn == 1)) 497 nxgep->mac.porttype = PORT_TYPE_XMAC; 498 else 499 nxgep->mac.porttype = PORT_TYPE_BMAC; 500 /* 501 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 502 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 503 * The two types of MACs have different characterizations. 504 */ 505 mmac_info = &nxgep->nxge_mmac_info; 506 if (nxgep->function_num < 2) { 507 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 508 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 509 } else { 510 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 511 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 512 } 513 /* 514 * Setup the Ndd parameters for the this instance. 515 */ 516 nxge_init_param(nxgep); 517 518 /* 519 * Setup Register Tracing Buffer. 520 */ 521 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 522 523 /* init stats ptr */ 524 nxge_init_statsp(nxgep); 525 526 /* 527 * read the vpd info from the eeprom into local data 528 * structure and check for the VPD info validity 529 */ 530 nxge_vpd_info_get(nxgep); 531 532 status = nxge_xcvr_find(nxgep); 533 534 if (status != NXGE_OK) { 535 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 536 " Couldn't determine card type" 537 " .... exit ")); 538 goto nxge_attach_fail5; 539 } 540 541 status = nxge_get_config_properties(nxgep); 542 543 if (status != NXGE_OK) { 544 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "get_hw create failed")); 545 goto nxge_attach_fail; 546 } 547 548 /* 549 * Setup the Kstats for the driver. 550 */ 551 nxge_setup_kstats(nxgep); 552 553 nxge_setup_param(nxgep); 554 555 status = nxge_setup_system_dma_pages(nxgep); 556 if (status != NXGE_OK) { 557 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 558 goto nxge_attach_fail; 559 } 560 561 #if defined(sun4v) 562 if (nxgep->niu_type == N2_NIU) { 563 nxgep->niu_hsvc_available = B_FALSE; 564 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 565 if ((status = 566 hsvc_register(&nxgep->niu_hsvc, 567 &nxgep->niu_min_ver)) != 0) { 568 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 569 "nxge_attach: " 570 "%s: cannot negotiate " 571 "hypervisor services " 572 "revision %d " 573 "group: 0x%lx " 574 "major: 0x%lx minor: 0x%lx " 575 "errno: %d", 576 niu_hsvc.hsvc_modname, 577 niu_hsvc.hsvc_rev, 578 niu_hsvc.hsvc_group, 579 niu_hsvc.hsvc_major, 580 niu_hsvc.hsvc_minor, 581 status)); 582 status = DDI_FAILURE; 583 goto nxge_attach_fail; 584 } 585 586 nxgep->niu_hsvc_available = B_TRUE; 587 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 588 "NIU Hypervisor service enabled")); 589 } 590 #endif 591 592 nxge_hw_id_init(nxgep); 593 nxge_hw_init_niu_common(nxgep); 594 595 status = nxge_setup_mutexes(nxgep); 596 if (status != NXGE_OK) { 597 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 598 goto nxge_attach_fail; 599 } 600 601 status = nxge_setup_dev(nxgep); 602 if (status != DDI_SUCCESS) { 603 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 604 goto nxge_attach_fail; 605 } 606 607 status = nxge_add_intrs(nxgep); 608 if (status != DDI_SUCCESS) { 609 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 610 goto nxge_attach_fail; 611 } 612 status = nxge_add_soft_intrs(nxgep); 613 if (status != DDI_SUCCESS) { 614 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "add_soft_intr failed")); 615 goto nxge_attach_fail; 616 } 617 618 /* 619 * Enable interrupts. 620 */ 621 nxge_intrs_enable(nxgep); 622 623 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 624 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 625 "unable to register to mac layer (%d)", status)); 626 goto nxge_attach_fail; 627 } 628 629 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 630 631 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "registered to mac (instance %d)", 632 instance)); 633 634 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 635 636 goto nxge_attach_exit; 637 638 nxge_attach_fail: 639 nxge_unattach(nxgep); 640 goto nxge_attach_fail1; 641 642 nxge_attach_fail5: 643 /* 644 * Tear down the ndd parameters setup. 645 */ 646 nxge_destroy_param(nxgep); 647 648 /* 649 * Tear down the kstat setup. 650 */ 651 nxge_destroy_kstats(nxgep); 652 653 nxge_attach_fail4: 654 if (nxgep->nxge_hw_p) { 655 nxge_uninit_common_dev(nxgep); 656 nxgep->nxge_hw_p = NULL; 657 } 658 659 nxge_attach_fail3: 660 /* 661 * Unmap the register setup. 662 */ 663 nxge_unmap_regs(nxgep); 664 665 nxge_fm_fini(nxgep); 666 667 nxge_attach_fail2: 668 ddi_soft_state_free(nxge_list, nxgep->instance); 669 670 nxge_attach_fail1: 671 if (status != NXGE_OK) 672 status = (NXGE_ERROR | NXGE_DDI_FAILED); 673 nxgep = NULL; 674 675 nxge_attach_exit: 676 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 677 status)); 678 679 return (status); 680 } 681 682 static int 683 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 684 { 685 int status = DDI_SUCCESS; 686 int instance; 687 p_nxge_t nxgep = NULL; 688 689 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 690 instance = ddi_get_instance(dip); 691 nxgep = ddi_get_soft_state(nxge_list, instance); 692 if (nxgep == NULL) { 693 status = DDI_FAILURE; 694 goto nxge_detach_exit; 695 } 696 697 switch (cmd) { 698 case DDI_DETACH: 699 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 700 break; 701 702 case DDI_PM_SUSPEND: 703 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 704 nxgep->suspended = DDI_PM_SUSPEND; 705 nxge_suspend(nxgep); 706 break; 707 708 case DDI_SUSPEND: 709 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 710 if (nxgep->suspended != DDI_PM_SUSPEND) { 711 nxgep->suspended = DDI_SUSPEND; 712 nxge_suspend(nxgep); 713 } 714 break; 715 716 default: 717 status = DDI_FAILURE; 718 } 719 720 if (cmd != DDI_DETACH) 721 goto nxge_detach_exit; 722 723 /* 724 * Stop the xcvr polling. 725 */ 726 nxgep->suspended = cmd; 727 728 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 729 730 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 731 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 732 "<== nxge_detach status = 0x%08X", status)); 733 return (DDI_FAILURE); 734 } 735 736 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 737 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 738 739 nxge_unattach(nxgep); 740 nxgep = NULL; 741 742 nxge_detach_exit: 743 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 744 status)); 745 746 return (status); 747 } 748 749 static void 750 nxge_unattach(p_nxge_t nxgep) 751 { 752 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 753 754 if (nxgep == NULL || nxgep->dev_regs == NULL) { 755 return; 756 } 757 758 nxgep->nxge_magic = 0; 759 760 if (nxgep->nxge_hw_p) { 761 nxge_uninit_common_dev(nxgep); 762 nxgep->nxge_hw_p = NULL; 763 } 764 765 if (nxgep->nxge_timerid) { 766 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 767 nxgep->nxge_timerid = 0; 768 } 769 770 #if defined(sun4v) 771 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 772 (void) hsvc_unregister(&nxgep->niu_hsvc); 773 nxgep->niu_hsvc_available = B_FALSE; 774 } 775 #endif 776 /* 777 * Stop any further interrupts. 778 */ 779 nxge_remove_intrs(nxgep); 780 781 /* remove soft interrups */ 782 nxge_remove_soft_intrs(nxgep); 783 784 /* 785 * Stop the device and free resources. 786 */ 787 nxge_destroy_dev(nxgep); 788 789 /* 790 * Tear down the ndd parameters setup. 791 */ 792 nxge_destroy_param(nxgep); 793 794 /* 795 * Tear down the kstat setup. 796 */ 797 nxge_destroy_kstats(nxgep); 798 799 /* 800 * Destroy all mutexes. 801 */ 802 nxge_destroy_mutexes(nxgep); 803 804 /* 805 * Remove the list of ndd parameters which 806 * were setup during attach. 807 */ 808 if (nxgep->dip) { 809 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 810 " nxge_unattach: remove all properties")); 811 812 (void) ddi_prop_remove_all(nxgep->dip); 813 } 814 815 #if NXGE_PROPERTY 816 nxge_remove_hard_properties(nxgep); 817 #endif 818 819 /* 820 * Unmap the register setup. 821 */ 822 nxge_unmap_regs(nxgep); 823 824 nxge_fm_fini(nxgep); 825 826 ddi_soft_state_free(nxge_list, nxgep->instance); 827 828 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 829 } 830 831 static char n2_siu_name[] = "niu"; 832 833 static nxge_status_t 834 nxge_map_regs(p_nxge_t nxgep) 835 { 836 int ddi_status = DDI_SUCCESS; 837 p_dev_regs_t dev_regs; 838 char buf[MAXPATHLEN + 1]; 839 char *devname; 840 #ifdef NXGE_DEBUG 841 char *sysname; 842 #endif 843 off_t regsize; 844 nxge_status_t status = NXGE_OK; 845 #if !defined(_BIG_ENDIAN) 846 off_t pci_offset; 847 uint16_t pcie_devctl; 848 #endif 849 850 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 851 nxgep->dev_regs = NULL; 852 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 853 dev_regs->nxge_regh = NULL; 854 dev_regs->nxge_pciregh = NULL; 855 dev_regs->nxge_msix_regh = NULL; 856 dev_regs->nxge_vir_regh = NULL; 857 dev_regs->nxge_vir2_regh = NULL; 858 nxgep->niu_type = NIU_TYPE_NONE; 859 860 devname = ddi_pathname(nxgep->dip, buf); 861 ASSERT(strlen(devname) > 0); 862 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 863 "nxge_map_regs: pathname devname %s", devname)); 864 865 if (strstr(devname, n2_siu_name)) { 866 /* N2/NIU */ 867 nxgep->niu_type = N2_NIU; 868 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 869 "nxge_map_regs: N2/NIU devname %s", devname)); 870 /* get function number */ 871 nxgep->function_num = 872 (devname[strlen(devname) -1] == '1' ? 1 : 0); 873 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 874 "nxge_map_regs: N2/NIU function number %d", 875 nxgep->function_num)); 876 } else { 877 int *prop_val; 878 uint_t prop_len; 879 uint8_t func_num; 880 881 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 882 0, "reg", 883 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 884 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 885 "Reg property not found")); 886 ddi_status = DDI_FAILURE; 887 goto nxge_map_regs_fail0; 888 889 } else { 890 func_num = (prop_val[0] >> 8) & 0x7; 891 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 892 "Reg property found: fun # %d", 893 func_num)); 894 nxgep->function_num = func_num; 895 ddi_prop_free(prop_val); 896 } 897 } 898 899 switch (nxgep->niu_type) { 900 default: 901 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 902 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 903 "nxge_map_regs: pci config size 0x%x", regsize)); 904 905 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 906 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 907 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 908 if (ddi_status != DDI_SUCCESS) { 909 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 910 "ddi_map_regs, nxge bus config regs failed")); 911 goto nxge_map_regs_fail0; 912 } 913 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 914 "nxge_map_reg: PCI config addr 0x%0llx " 915 " handle 0x%0llx", dev_regs->nxge_pciregp, 916 dev_regs->nxge_pciregh)); 917 /* 918 * IMP IMP 919 * workaround for bit swapping bug in HW 920 * which ends up in no-snoop = yes 921 * resulting, in DMA not synched properly 922 */ 923 #if !defined(_BIG_ENDIAN) 924 /* workarounds for x86 systems */ 925 pci_offset = 0x80 + PCIE_DEVCTL; 926 pcie_devctl = 0x0; 927 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 928 pcie_devctl |= PCIE_DEVCTL_RO_EN; 929 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 930 pcie_devctl); 931 #endif 932 933 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 934 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 935 "nxge_map_regs: pio size 0x%x", regsize)); 936 /* set up the device mapped register */ 937 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 938 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 939 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 940 if (ddi_status != DDI_SUCCESS) { 941 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 942 "ddi_map_regs for Neptune global reg failed")); 943 goto nxge_map_regs_fail1; 944 } 945 946 /* set up the msi/msi-x mapped register */ 947 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 948 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 949 "nxge_map_regs: msix size 0x%x", regsize)); 950 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 951 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 952 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 953 if (ddi_status != DDI_SUCCESS) { 954 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 955 "ddi_map_regs for msi reg failed")); 956 goto nxge_map_regs_fail2; 957 } 958 959 /* set up the vio region mapped register */ 960 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 961 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 962 "nxge_map_regs: vio size 0x%x", regsize)); 963 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 964 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 965 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 966 967 if (ddi_status != DDI_SUCCESS) { 968 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 969 "ddi_map_regs for nxge vio reg failed")); 970 goto nxge_map_regs_fail3; 971 } 972 nxgep->dev_regs = dev_regs; 973 974 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 975 NPI_PCI_ADD_HANDLE_SET(nxgep, 976 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 977 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 978 NPI_MSI_ADD_HANDLE_SET(nxgep, 979 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 980 981 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 982 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 983 984 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 985 NPI_REG_ADD_HANDLE_SET(nxgep, 986 (npi_reg_ptr_t)dev_regs->nxge_regp); 987 988 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 989 NPI_VREG_ADD_HANDLE_SET(nxgep, 990 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 991 992 break; 993 994 case N2_NIU: 995 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 996 /* 997 * Set up the device mapped register (FWARC 2006/556) 998 * (changed back to 1: reg starts at 1!) 999 */ 1000 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1001 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1002 "nxge_map_regs: dev size 0x%x", regsize)); 1003 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1004 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1005 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1006 1007 if (ddi_status != DDI_SUCCESS) { 1008 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1009 "ddi_map_regs for N2/NIU, global reg failed ")); 1010 goto nxge_map_regs_fail1; 1011 } 1012 1013 /* set up the vio region mapped register */ 1014 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1015 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1016 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1017 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1018 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1019 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1020 1021 if (ddi_status != DDI_SUCCESS) { 1022 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1023 "ddi_map_regs for nxge vio reg failed")); 1024 goto nxge_map_regs_fail2; 1025 } 1026 /* set up the vio region mapped register */ 1027 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1028 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1029 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1030 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1031 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1032 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1033 1034 if (ddi_status != DDI_SUCCESS) { 1035 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1036 "ddi_map_regs for nxge vio2 reg failed")); 1037 goto nxge_map_regs_fail3; 1038 } 1039 nxgep->dev_regs = dev_regs; 1040 1041 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1042 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1043 1044 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1045 NPI_REG_ADD_HANDLE_SET(nxgep, 1046 (npi_reg_ptr_t)dev_regs->nxge_regp); 1047 1048 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1049 NPI_VREG_ADD_HANDLE_SET(nxgep, 1050 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1051 1052 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1053 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1054 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1055 1056 break; 1057 } 1058 1059 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1060 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1061 1062 goto nxge_map_regs_exit; 1063 nxge_map_regs_fail3: 1064 if (dev_regs->nxge_msix_regh) { 1065 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1066 } 1067 if (dev_regs->nxge_vir_regh) { 1068 ddi_regs_map_free(&dev_regs->nxge_regh); 1069 } 1070 nxge_map_regs_fail2: 1071 if (dev_regs->nxge_regh) { 1072 ddi_regs_map_free(&dev_regs->nxge_regh); 1073 } 1074 nxge_map_regs_fail1: 1075 if (dev_regs->nxge_pciregh) { 1076 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1077 } 1078 nxge_map_regs_fail0: 1079 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1080 kmem_free(dev_regs, sizeof (dev_regs_t)); 1081 1082 nxge_map_regs_exit: 1083 if (ddi_status != DDI_SUCCESS) 1084 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1085 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1086 return (status); 1087 } 1088 1089 static void 1090 nxge_unmap_regs(p_nxge_t nxgep) 1091 { 1092 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1093 if (nxgep->dev_regs) { 1094 if (nxgep->dev_regs->nxge_pciregh) { 1095 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1096 "==> nxge_unmap_regs: bus")); 1097 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1098 nxgep->dev_regs->nxge_pciregh = NULL; 1099 } 1100 if (nxgep->dev_regs->nxge_regh) { 1101 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1102 "==> nxge_unmap_regs: device registers")); 1103 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1104 nxgep->dev_regs->nxge_regh = NULL; 1105 } 1106 if (nxgep->dev_regs->nxge_msix_regh) { 1107 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1108 "==> nxge_unmap_regs: device interrupts")); 1109 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1110 nxgep->dev_regs->nxge_msix_regh = NULL; 1111 } 1112 if (nxgep->dev_regs->nxge_vir_regh) { 1113 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1114 "==> nxge_unmap_regs: vio region")); 1115 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1116 nxgep->dev_regs->nxge_vir_regh = NULL; 1117 } 1118 if (nxgep->dev_regs->nxge_vir2_regh) { 1119 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1120 "==> nxge_unmap_regs: vio2 region")); 1121 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1122 nxgep->dev_regs->nxge_vir2_regh = NULL; 1123 } 1124 1125 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1126 nxgep->dev_regs = NULL; 1127 } 1128 1129 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1130 } 1131 1132 static nxge_status_t 1133 nxge_setup_mutexes(p_nxge_t nxgep) 1134 { 1135 int ddi_status = DDI_SUCCESS; 1136 nxge_status_t status = NXGE_OK; 1137 nxge_classify_t *classify_ptr; 1138 int partition; 1139 1140 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1141 1142 /* 1143 * Get the interrupt cookie so the mutexes can be 1144 * Initialized. 1145 */ 1146 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1147 &nxgep->interrupt_cookie); 1148 if (ddi_status != DDI_SUCCESS) { 1149 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1150 "<== nxge_setup_mutexes: failed 0x%x", ddi_status)); 1151 goto nxge_setup_mutexes_exit; 1152 } 1153 1154 /* Initialize global mutex */ 1155 1156 if (nxge_mdio_lock_init == 0) { 1157 MUTEX_INIT(&nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 1158 } 1159 atomic_add_32(&nxge_mdio_lock_init, 1); 1160 1161 if (nxge_mii_lock_init == 0) { 1162 MUTEX_INIT(&nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 1163 } 1164 atomic_add_32(&nxge_mii_lock_init, 1); 1165 1166 nxgep->drv_state |= STATE_MDIO_LOCK_INIT; 1167 nxgep->drv_state |= STATE_MII_LOCK_INIT; 1168 1169 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1170 MUTEX_INIT(&nxgep->poll_lock, NULL, 1171 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1172 1173 /* 1174 * Initialize mutexes for this device. 1175 */ 1176 MUTEX_INIT(nxgep->genlock, NULL, 1177 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1178 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1179 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1180 MUTEX_INIT(&nxgep->mif_lock, NULL, 1181 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1182 RW_INIT(&nxgep->filter_lock, NULL, 1183 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1184 1185 classify_ptr = &nxgep->classifier; 1186 /* 1187 * FFLP Mutexes are never used in interrupt context 1188 * as fflp operation can take very long time to 1189 * complete and hence not suitable to invoke from interrupt 1190 * handlers. 1191 */ 1192 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1193 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1194 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1195 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1196 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1197 for (partition = 0; partition < MAX_PARTITION; partition++) { 1198 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1199 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1200 } 1201 } 1202 1203 nxge_setup_mutexes_exit: 1204 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1205 "<== nxge_setup_mutexes status = %x", status)); 1206 1207 if (ddi_status != DDI_SUCCESS) 1208 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1209 1210 return (status); 1211 } 1212 1213 static void 1214 nxge_destroy_mutexes(p_nxge_t nxgep) 1215 { 1216 int partition; 1217 nxge_classify_t *classify_ptr; 1218 1219 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1220 RW_DESTROY(&nxgep->filter_lock); 1221 MUTEX_DESTROY(&nxgep->mif_lock); 1222 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1223 MUTEX_DESTROY(nxgep->genlock); 1224 1225 classify_ptr = &nxgep->classifier; 1226 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1227 1228 /* Destroy all polling resources. */ 1229 MUTEX_DESTROY(&nxgep->poll_lock); 1230 cv_destroy(&nxgep->poll_cv); 1231 1232 /* free data structures, based on HW type */ 1233 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1234 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1235 for (partition = 0; partition < MAX_PARTITION; partition++) { 1236 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1237 } 1238 } 1239 if (nxgep->drv_state & STATE_MDIO_LOCK_INIT) { 1240 if (nxge_mdio_lock_init == 1) { 1241 MUTEX_DESTROY(&nxge_mdio_lock); 1242 } 1243 atomic_add_32(&nxge_mdio_lock_init, -1); 1244 } 1245 if (nxgep->drv_state & STATE_MII_LOCK_INIT) { 1246 if (nxge_mii_lock_init == 1) { 1247 MUTEX_DESTROY(&nxge_mii_lock); 1248 } 1249 atomic_add_32(&nxge_mii_lock_init, -1); 1250 } 1251 1252 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1253 } 1254 1255 nxge_status_t 1256 nxge_init(p_nxge_t nxgep) 1257 { 1258 nxge_status_t status = NXGE_OK; 1259 1260 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1261 1262 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1263 return (status); 1264 } 1265 1266 /* 1267 * Allocate system memory for the receive/transmit buffer blocks 1268 * and receive/transmit descriptor rings. 1269 */ 1270 status = nxge_alloc_mem_pool(nxgep); 1271 if (status != NXGE_OK) { 1272 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1273 goto nxge_init_fail1; 1274 } 1275 1276 /* 1277 * Initialize and enable TXC registers 1278 * (Globally enable TX controller, 1279 * enable a port, configure dma channel bitmap, 1280 * configure the max burst size). 1281 */ 1282 status = nxge_txc_init(nxgep); 1283 if (status != NXGE_OK) { 1284 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txc failed\n")); 1285 goto nxge_init_fail2; 1286 } 1287 1288 /* 1289 * Initialize and enable TXDMA channels. 1290 */ 1291 status = nxge_init_txdma_channels(nxgep); 1292 if (status != NXGE_OK) { 1293 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1294 goto nxge_init_fail3; 1295 } 1296 1297 /* 1298 * Initialize and enable RXDMA channels. 1299 */ 1300 status = nxge_init_rxdma_channels(nxgep); 1301 if (status != NXGE_OK) { 1302 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1303 goto nxge_init_fail4; 1304 } 1305 1306 /* 1307 * Initialize TCAM and FCRAM (Neptune). 1308 */ 1309 status = nxge_classify_init(nxgep); 1310 if (status != NXGE_OK) { 1311 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1312 goto nxge_init_fail5; 1313 } 1314 1315 /* 1316 * Initialize ZCP 1317 */ 1318 status = nxge_zcp_init(nxgep); 1319 if (status != NXGE_OK) { 1320 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1321 goto nxge_init_fail5; 1322 } 1323 1324 /* 1325 * Initialize IPP. 1326 */ 1327 status = nxge_ipp_init(nxgep); 1328 if (status != NXGE_OK) { 1329 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1330 goto nxge_init_fail5; 1331 } 1332 1333 /* 1334 * Initialize the MAC block. 1335 */ 1336 status = nxge_mac_init(nxgep); 1337 if (status != NXGE_OK) { 1338 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1339 goto nxge_init_fail5; 1340 } 1341 1342 nxge_intrs_enable(nxgep); 1343 1344 /* 1345 * Enable hardware interrupts. 1346 */ 1347 nxge_intr_hw_enable(nxgep); 1348 nxgep->drv_state |= STATE_HW_INITIALIZED; 1349 1350 goto nxge_init_exit; 1351 1352 nxge_init_fail5: 1353 nxge_uninit_rxdma_channels(nxgep); 1354 nxge_init_fail4: 1355 nxge_uninit_txdma_channels(nxgep); 1356 nxge_init_fail3: 1357 (void) nxge_txc_uninit(nxgep); 1358 nxge_init_fail2: 1359 nxge_free_mem_pool(nxgep); 1360 nxge_init_fail1: 1361 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1362 "<== nxge_init status (failed) = 0x%08x", status)); 1363 return (status); 1364 1365 nxge_init_exit: 1366 1367 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1368 status)); 1369 return (status); 1370 } 1371 1372 1373 timeout_id_t 1374 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1375 { 1376 if ((nxgep->suspended == 0) || 1377 (nxgep->suspended == DDI_RESUME)) { 1378 return (timeout(func, (caddr_t)nxgep, 1379 drv_usectohz(1000 * msec))); 1380 } 1381 return (NULL); 1382 } 1383 1384 /*ARGSUSED*/ 1385 void 1386 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1387 { 1388 if (timerid) { 1389 (void) untimeout(timerid); 1390 } 1391 } 1392 1393 void 1394 nxge_uninit(p_nxge_t nxgep) 1395 { 1396 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1397 1398 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1399 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1400 "==> nxge_uninit: not initialized")); 1401 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1402 "<== nxge_uninit")); 1403 return; 1404 } 1405 1406 /* stop timer */ 1407 if (nxgep->nxge_timerid) { 1408 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1409 nxgep->nxge_timerid = 0; 1410 } 1411 1412 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1413 (void) nxge_intr_hw_disable(nxgep); 1414 1415 /* 1416 * Reset the receive MAC side. 1417 */ 1418 (void) nxge_rx_mac_disable(nxgep); 1419 1420 /* Disable and soft reset the IPP */ 1421 (void) nxge_ipp_disable(nxgep); 1422 1423 /* Free classification resources */ 1424 (void) nxge_classify_uninit(nxgep); 1425 1426 /* 1427 * Reset the transmit/receive DMA side. 1428 */ 1429 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1430 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1431 1432 nxge_uninit_txdma_channels(nxgep); 1433 nxge_uninit_rxdma_channels(nxgep); 1434 1435 /* 1436 * Reset the transmit MAC side. 1437 */ 1438 (void) nxge_tx_mac_disable(nxgep); 1439 1440 nxge_free_mem_pool(nxgep); 1441 1442 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1443 1444 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1445 1446 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1447 "nxge_mblks_pending %d", nxge_mblks_pending)); 1448 } 1449 1450 void 1451 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1452 { 1453 #if defined(__i386) 1454 size_t reg; 1455 #else 1456 uint64_t reg; 1457 #endif 1458 uint64_t regdata; 1459 int i, retry; 1460 1461 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1462 regdata = 0; 1463 retry = 1; 1464 1465 for (i = 0; i < retry; i++) { 1466 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1467 } 1468 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1469 } 1470 1471 void 1472 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1473 { 1474 #if defined(__i386) 1475 size_t reg; 1476 #else 1477 uint64_t reg; 1478 #endif 1479 uint64_t buf[2]; 1480 1481 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1482 #if defined(__i386) 1483 reg = (size_t)buf[0]; 1484 #else 1485 reg = buf[0]; 1486 #endif 1487 1488 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1489 } 1490 1491 1492 nxge_os_mutex_t nxgedebuglock; 1493 int nxge_debug_init = 0; 1494 1495 /*ARGSUSED*/ 1496 /*VARARGS*/ 1497 void 1498 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1499 { 1500 char msg_buffer[1048]; 1501 char prefix_buffer[32]; 1502 int instance; 1503 uint64_t debug_level; 1504 int cmn_level = CE_CONT; 1505 va_list ap; 1506 1507 debug_level = (nxgep == NULL) ? nxge_debug_level : 1508 nxgep->nxge_debug_level; 1509 1510 if ((level & debug_level) || 1511 (level == NXGE_NOTE) || 1512 (level == NXGE_ERR_CTL)) { 1513 /* do the msg processing */ 1514 if (nxge_debug_init == 0) { 1515 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1516 nxge_debug_init = 1; 1517 } 1518 1519 MUTEX_ENTER(&nxgedebuglock); 1520 1521 if ((level & NXGE_NOTE)) { 1522 cmn_level = CE_NOTE; 1523 } 1524 1525 if (level & NXGE_ERR_CTL) { 1526 cmn_level = CE_WARN; 1527 } 1528 1529 va_start(ap, fmt); 1530 (void) vsprintf(msg_buffer, fmt, ap); 1531 va_end(ap); 1532 if (nxgep == NULL) { 1533 instance = -1; 1534 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1535 } else { 1536 instance = nxgep->instance; 1537 (void) sprintf(prefix_buffer, 1538 "%s%d :", "nxge", instance); 1539 } 1540 1541 MUTEX_EXIT(&nxgedebuglock); 1542 cmn_err(cmn_level, "!%s %s\n", 1543 prefix_buffer, msg_buffer); 1544 1545 } 1546 } 1547 1548 char * 1549 nxge_dump_packet(char *addr, int size) 1550 { 1551 uchar_t *ap = (uchar_t *)addr; 1552 int i; 1553 static char etherbuf[1024]; 1554 char *cp = etherbuf; 1555 char digits[] = "0123456789abcdef"; 1556 1557 if (!size) 1558 size = 60; 1559 1560 if (size > MAX_DUMP_SZ) { 1561 /* Dump the leading bytes */ 1562 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1563 if (*ap > 0x0f) 1564 *cp++ = digits[*ap >> 4]; 1565 *cp++ = digits[*ap++ & 0xf]; 1566 *cp++ = ':'; 1567 } 1568 for (i = 0; i < 20; i++) 1569 *cp++ = '.'; 1570 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1571 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1572 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1573 if (*ap > 0x0f) 1574 *cp++ = digits[*ap >> 4]; 1575 *cp++ = digits[*ap++ & 0xf]; 1576 *cp++ = ':'; 1577 } 1578 } else { 1579 for (i = 0; i < size; i++) { 1580 if (*ap > 0x0f) 1581 *cp++ = digits[*ap >> 4]; 1582 *cp++ = digits[*ap++ & 0xf]; 1583 *cp++ = ':'; 1584 } 1585 } 1586 *--cp = 0; 1587 return (etherbuf); 1588 } 1589 1590 #ifdef NXGE_DEBUG 1591 static void 1592 nxge_test_map_regs(p_nxge_t nxgep) 1593 { 1594 ddi_acc_handle_t cfg_handle; 1595 p_pci_cfg_t cfg_ptr; 1596 ddi_acc_handle_t dev_handle; 1597 char *dev_ptr; 1598 ddi_acc_handle_t pci_config_handle; 1599 uint32_t regval; 1600 int i; 1601 1602 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1603 1604 dev_handle = nxgep->dev_regs->nxge_regh; 1605 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1606 1607 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1608 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1609 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1610 1611 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1612 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1613 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1614 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1615 &cfg_ptr->vendorid)); 1616 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1617 "\tvendorid 0x%x devid 0x%x", 1618 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1619 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1620 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1621 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1622 "bar1c 0x%x", 1623 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1624 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1625 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1626 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1627 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1628 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1629 "base 28 0x%x bar2c 0x%x\n", 1630 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1631 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1632 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1633 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1634 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1635 "\nNeptune PCI BAR: base30 0x%x\n", 1636 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1637 1638 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1639 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1640 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1641 "first 0x%llx second 0x%llx third 0x%llx " 1642 "last 0x%llx ", 1643 NXGE_PIO_READ64(dev_handle, 1644 (uint64_t *)(dev_ptr + 0), 0), 1645 NXGE_PIO_READ64(dev_handle, 1646 (uint64_t *)(dev_ptr + 8), 0), 1647 NXGE_PIO_READ64(dev_handle, 1648 (uint64_t *)(dev_ptr + 16), 0), 1649 NXGE_PIO_READ64(cfg_handle, 1650 (uint64_t *)(dev_ptr + 24), 0))); 1651 } 1652 } 1653 1654 #endif 1655 1656 static void 1657 nxge_suspend(p_nxge_t nxgep) 1658 { 1659 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1660 1661 nxge_intrs_disable(nxgep); 1662 nxge_destroy_dev(nxgep); 1663 1664 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1665 } 1666 1667 static nxge_status_t 1668 nxge_resume(p_nxge_t nxgep) 1669 { 1670 nxge_status_t status = NXGE_OK; 1671 1672 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1673 1674 nxgep->suspended = DDI_RESUME; 1675 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1676 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1677 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1678 (void) nxge_rx_mac_enable(nxgep); 1679 (void) nxge_tx_mac_enable(nxgep); 1680 nxge_intrs_enable(nxgep); 1681 nxgep->suspended = 0; 1682 1683 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1684 "<== nxge_resume status = 0x%x", status)); 1685 return (status); 1686 } 1687 1688 static nxge_status_t 1689 nxge_setup_dev(p_nxge_t nxgep) 1690 { 1691 nxge_status_t status = NXGE_OK; 1692 1693 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1694 nxgep->mac.portnum)); 1695 1696 status = nxge_link_init(nxgep); 1697 1698 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1699 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1700 "port%d Bad register acc handle", nxgep->mac.portnum)); 1701 status = NXGE_ERROR; 1702 } 1703 1704 if (status != NXGE_OK) { 1705 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1706 " nxge_setup_dev status " 1707 "(xcvr init 0x%08x)", status)); 1708 goto nxge_setup_dev_exit; 1709 } 1710 1711 nxge_setup_dev_exit: 1712 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1713 "<== nxge_setup_dev port %d status = 0x%08x", 1714 nxgep->mac.portnum, status)); 1715 1716 return (status); 1717 } 1718 1719 static void 1720 nxge_destroy_dev(p_nxge_t nxgep) 1721 { 1722 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 1723 1724 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1725 1726 (void) nxge_hw_stop(nxgep); 1727 1728 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 1729 } 1730 1731 static nxge_status_t 1732 nxge_setup_system_dma_pages(p_nxge_t nxgep) 1733 { 1734 int ddi_status = DDI_SUCCESS; 1735 uint_t count; 1736 ddi_dma_cookie_t cookie; 1737 uint_t iommu_pagesize; 1738 nxge_status_t status = NXGE_OK; 1739 1740 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 1741 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 1742 if (nxgep->niu_type != N2_NIU) { 1743 iommu_pagesize = dvma_pagesize(nxgep->dip); 1744 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1745 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1746 " default_block_size %d iommu_pagesize %d", 1747 nxgep->sys_page_sz, 1748 ddi_ptob(nxgep->dip, (ulong_t)1), 1749 nxgep->rx_default_block_size, 1750 iommu_pagesize)); 1751 1752 if (iommu_pagesize != 0) { 1753 if (nxgep->sys_page_sz == iommu_pagesize) { 1754 if (iommu_pagesize > 0x4000) 1755 nxgep->sys_page_sz = 0x4000; 1756 } else { 1757 if (nxgep->sys_page_sz > iommu_pagesize) 1758 nxgep->sys_page_sz = iommu_pagesize; 1759 } 1760 } 1761 } 1762 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1763 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1764 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1765 "default_block_size %d page mask %d", 1766 nxgep->sys_page_sz, 1767 ddi_ptob(nxgep->dip, (ulong_t)1), 1768 nxgep->rx_default_block_size, 1769 nxgep->sys_page_mask)); 1770 1771 1772 switch (nxgep->sys_page_sz) { 1773 default: 1774 nxgep->sys_page_sz = 0x1000; 1775 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1776 nxgep->rx_default_block_size = 0x1000; 1777 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1778 break; 1779 case 0x1000: 1780 nxgep->rx_default_block_size = 0x1000; 1781 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1782 break; 1783 case 0x2000: 1784 nxgep->rx_default_block_size = 0x2000; 1785 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1786 break; 1787 case 0x4000: 1788 nxgep->rx_default_block_size = 0x4000; 1789 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 1790 break; 1791 case 0x8000: 1792 nxgep->rx_default_block_size = 0x8000; 1793 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 1794 break; 1795 } 1796 1797 #ifndef USE_RX_BIG_BUF 1798 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 1799 #else 1800 nxgep->rx_default_block_size = 0x2000; 1801 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1802 #endif 1803 /* 1804 * Get the system DMA burst size. 1805 */ 1806 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 1807 DDI_DMA_DONTWAIT, 0, 1808 &nxgep->dmasparehandle); 1809 if (ddi_status != DDI_SUCCESS) { 1810 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1811 "ddi_dma_alloc_handle: failed " 1812 " status 0x%x", ddi_status)); 1813 goto nxge_get_soft_properties_exit; 1814 } 1815 1816 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 1817 (caddr_t)nxgep->dmasparehandle, 1818 sizeof (nxgep->dmasparehandle), 1819 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1820 DDI_DMA_DONTWAIT, 0, 1821 &cookie, &count); 1822 if (ddi_status != DDI_DMA_MAPPED) { 1823 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1824 "Binding spare handle to find system" 1825 " burstsize failed.")); 1826 ddi_status = DDI_FAILURE; 1827 goto nxge_get_soft_properties_fail1; 1828 } 1829 1830 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 1831 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 1832 1833 nxge_get_soft_properties_fail1: 1834 ddi_dma_free_handle(&nxgep->dmasparehandle); 1835 1836 nxge_get_soft_properties_exit: 1837 1838 if (ddi_status != DDI_SUCCESS) 1839 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1840 1841 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1842 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 1843 return (status); 1844 } 1845 1846 static nxge_status_t 1847 nxge_alloc_mem_pool(p_nxge_t nxgep) 1848 { 1849 nxge_status_t status = NXGE_OK; 1850 1851 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 1852 1853 status = nxge_alloc_rx_mem_pool(nxgep); 1854 if (status != NXGE_OK) { 1855 return (NXGE_ERROR); 1856 } 1857 1858 status = nxge_alloc_tx_mem_pool(nxgep); 1859 if (status != NXGE_OK) { 1860 nxge_free_rx_mem_pool(nxgep); 1861 return (NXGE_ERROR); 1862 } 1863 1864 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 1865 return (NXGE_OK); 1866 } 1867 1868 static void 1869 nxge_free_mem_pool(p_nxge_t nxgep) 1870 { 1871 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 1872 1873 nxge_free_rx_mem_pool(nxgep); 1874 nxge_free_tx_mem_pool(nxgep); 1875 1876 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 1877 } 1878 1879 static nxge_status_t 1880 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 1881 { 1882 int i, j; 1883 uint32_t ndmas, st_rdc; 1884 p_nxge_dma_pt_cfg_t p_all_cfgp; 1885 p_nxge_hw_pt_cfg_t p_cfgp; 1886 p_nxge_dma_pool_t dma_poolp; 1887 p_nxge_dma_common_t *dma_buf_p; 1888 p_nxge_dma_pool_t dma_cntl_poolp; 1889 p_nxge_dma_common_t *dma_cntl_p; 1890 size_t rx_buf_alloc_size; 1891 size_t rx_cntl_alloc_size; 1892 uint32_t *num_chunks; /* per dma */ 1893 nxge_status_t status = NXGE_OK; 1894 1895 uint32_t nxge_port_rbr_size; 1896 uint32_t nxge_port_rbr_spare_size; 1897 uint32_t nxge_port_rcr_size; 1898 1899 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 1900 1901 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 1902 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1903 st_rdc = p_cfgp->start_rdc; 1904 ndmas = p_cfgp->max_rdcs; 1905 1906 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1907 " nxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas)); 1908 1909 /* 1910 * Allocate memory for each receive DMA channel. 1911 */ 1912 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 1913 KM_SLEEP); 1914 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1915 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1916 1917 dma_cntl_poolp = (p_nxge_dma_pool_t) 1918 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 1919 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1920 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1921 1922 num_chunks = (uint32_t *)KMEM_ZALLOC( 1923 sizeof (uint32_t) * ndmas, KM_SLEEP); 1924 1925 /* 1926 * Assume that each DMA channel will be configured with default 1927 * block size. 1928 * rbr block counts are mod of batch count (16). 1929 */ 1930 nxge_port_rbr_size = p_all_cfgp->rbr_size; 1931 nxge_port_rcr_size = p_all_cfgp->rcr_size; 1932 1933 if (!nxge_port_rbr_size) { 1934 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 1935 } 1936 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 1937 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 1938 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 1939 } 1940 1941 p_all_cfgp->rbr_size = nxge_port_rbr_size; 1942 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 1943 1944 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 1945 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 1946 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 1947 } 1948 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 1949 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 1950 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 1951 "set to default %d", 1952 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 1953 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 1954 } 1955 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 1956 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 1957 "nxge_alloc_rx_mem_pool: RCR too high %d, " 1958 "set to default %d", 1959 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 1960 nxge_port_rcr_size = RCR_DEFAULT_MAX; 1961 } 1962 1963 /* 1964 * N2/NIU has limitation on the descriptor sizes (contiguous 1965 * memory allocation on data buffers to 4M (contig_mem_alloc) 1966 * and little endian for control buffers (must use the ddi/dki mem alloc 1967 * function). 1968 */ 1969 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1970 if (nxgep->niu_type == N2_NIU) { 1971 nxge_port_rbr_spare_size = 0; 1972 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 1973 (!ISP2(nxge_port_rbr_size))) { 1974 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 1975 } 1976 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 1977 (!ISP2(nxge_port_rcr_size))) { 1978 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 1979 } 1980 } 1981 #endif 1982 1983 rx_buf_alloc_size = (nxgep->rx_default_block_size * 1984 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 1985 1986 /* 1987 * Addresses of receive block ring, receive completion ring and the 1988 * mailbox must be all cache-aligned (64 bytes). 1989 */ 1990 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 1991 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 1992 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 1993 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 1994 1995 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 1996 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 1997 "nxge_port_rcr_size = %d " 1998 "rx_cntl_alloc_size = %d", 1999 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2000 nxge_port_rcr_size, 2001 rx_cntl_alloc_size)); 2002 2003 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2004 if (nxgep->niu_type == N2_NIU) { 2005 if (!ISP2(rx_buf_alloc_size)) { 2006 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2007 "==> nxge_alloc_rx_mem_pool: " 2008 " must be power of 2")); 2009 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2010 goto nxge_alloc_rx_mem_pool_exit; 2011 } 2012 2013 if (rx_buf_alloc_size > (1 << 22)) { 2014 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2015 "==> nxge_alloc_rx_mem_pool: " 2016 " limit size to 4M")); 2017 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2018 goto nxge_alloc_rx_mem_pool_exit; 2019 } 2020 2021 if (rx_cntl_alloc_size < 0x2000) { 2022 rx_cntl_alloc_size = 0x2000; 2023 } 2024 } 2025 #endif 2026 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2027 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2028 2029 /* 2030 * Allocate memory for receive buffers and descriptor rings. 2031 * Replace allocation functions with interface functions provided 2032 * by the partition manager when it is available. 2033 */ 2034 /* 2035 * Allocate memory for the receive buffer blocks. 2036 */ 2037 for (i = 0; i < ndmas; i++) { 2038 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2039 " nxge_alloc_rx_mem_pool to alloc mem: " 2040 " dma %d dma_buf_p %llx &dma_buf_p %llx", 2041 i, dma_buf_p[i], &dma_buf_p[i])); 2042 num_chunks[i] = 0; 2043 status = nxge_alloc_rx_buf_dma(nxgep, st_rdc, &dma_buf_p[i], 2044 rx_buf_alloc_size, 2045 nxgep->rx_default_block_size, &num_chunks[i]); 2046 if (status != NXGE_OK) { 2047 break; 2048 } 2049 st_rdc++; 2050 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2051 " nxge_alloc_rx_mem_pool DONE alloc mem: " 2052 "dma %d dma_buf_p %llx &dma_buf_p %llx", i, 2053 dma_buf_p[i], &dma_buf_p[i])); 2054 } 2055 if (i < ndmas) { 2056 goto nxge_alloc_rx_mem_fail1; 2057 } 2058 /* 2059 * Allocate memory for descriptor rings and mailbox. 2060 */ 2061 st_rdc = p_cfgp->start_rdc; 2062 for (j = 0; j < ndmas; j++) { 2063 status = nxge_alloc_rx_cntl_dma(nxgep, st_rdc, &dma_cntl_p[j], 2064 rx_cntl_alloc_size); 2065 if (status != NXGE_OK) { 2066 break; 2067 } 2068 st_rdc++; 2069 } 2070 if (j < ndmas) { 2071 goto nxge_alloc_rx_mem_fail2; 2072 } 2073 2074 dma_poolp->ndmas = ndmas; 2075 dma_poolp->num_chunks = num_chunks; 2076 dma_poolp->buf_allocated = B_TRUE; 2077 nxgep->rx_buf_pool_p = dma_poolp; 2078 dma_poolp->dma_buf_pool_p = dma_buf_p; 2079 2080 dma_cntl_poolp->ndmas = ndmas; 2081 dma_cntl_poolp->buf_allocated = B_TRUE; 2082 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2083 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2084 2085 goto nxge_alloc_rx_mem_pool_exit; 2086 2087 nxge_alloc_rx_mem_fail2: 2088 /* Free control buffers */ 2089 j--; 2090 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2091 "==> nxge_alloc_rx_mem_pool: freeing control bufs (%d)", j)); 2092 for (; j >= 0; j--) { 2093 nxge_free_rx_cntl_dma(nxgep, 2094 (p_nxge_dma_common_t)dma_cntl_p[j]); 2095 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2096 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", 2097 j)); 2098 } 2099 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2100 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 2101 2102 nxge_alloc_rx_mem_fail1: 2103 /* Free data buffers */ 2104 i--; 2105 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2106 "==> nxge_alloc_rx_mem_pool: freeing data bufs (%d)", i)); 2107 for (; i >= 0; i--) { 2108 nxge_free_rx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2109 num_chunks[i]); 2110 } 2111 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2112 "==> nxge_alloc_rx_mem_pool: data bufs freed (%d)", i)); 2113 2114 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2115 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2116 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2117 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2118 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2119 2120 nxge_alloc_rx_mem_pool_exit: 2121 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2122 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2123 2124 return (status); 2125 } 2126 2127 static void 2128 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2129 { 2130 uint32_t i, ndmas; 2131 p_nxge_dma_pool_t dma_poolp; 2132 p_nxge_dma_common_t *dma_buf_p; 2133 p_nxge_dma_pool_t dma_cntl_poolp; 2134 p_nxge_dma_common_t *dma_cntl_p; 2135 uint32_t *num_chunks; 2136 2137 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2138 2139 dma_poolp = nxgep->rx_buf_pool_p; 2140 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2141 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2142 "<== nxge_free_rx_mem_pool " 2143 "(null rx buf pool or buf not allocated")); 2144 return; 2145 } 2146 2147 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2148 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2149 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2150 "<== nxge_free_rx_mem_pool " 2151 "(null rx cntl buf pool or cntl buf not allocated")); 2152 return; 2153 } 2154 2155 dma_buf_p = dma_poolp->dma_buf_pool_p; 2156 num_chunks = dma_poolp->num_chunks; 2157 2158 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2159 ndmas = dma_cntl_poolp->ndmas; 2160 2161 for (i = 0; i < ndmas; i++) { 2162 nxge_free_rx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2163 } 2164 2165 for (i = 0; i < ndmas; i++) { 2166 nxge_free_rx_cntl_dma(nxgep, dma_cntl_p[i]); 2167 } 2168 2169 for (i = 0; i < ndmas; i++) { 2170 KMEM_FREE(dma_buf_p[i], 2171 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2172 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2173 } 2174 2175 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2176 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2177 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2178 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2179 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2180 2181 nxgep->rx_buf_pool_p = NULL; 2182 nxgep->rx_cntl_pool_p = NULL; 2183 2184 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2185 } 2186 2187 2188 static nxge_status_t 2189 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2190 p_nxge_dma_common_t *dmap, 2191 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2192 { 2193 p_nxge_dma_common_t rx_dmap; 2194 nxge_status_t status = NXGE_OK; 2195 size_t total_alloc_size; 2196 size_t allocated = 0; 2197 int i, size_index, array_size; 2198 2199 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2200 2201 rx_dmap = (p_nxge_dma_common_t) 2202 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2203 KM_SLEEP); 2204 2205 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2206 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2207 dma_channel, alloc_size, block_size, dmap)); 2208 2209 total_alloc_size = alloc_size; 2210 2211 #if defined(RX_USE_RECLAIM_POST) 2212 total_alloc_size = alloc_size + alloc_size/4; 2213 #endif 2214 2215 i = 0; 2216 size_index = 0; 2217 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2218 while ((alloc_sizes[size_index] < alloc_size) && 2219 (size_index < array_size)) 2220 size_index++; 2221 if (size_index >= array_size) { 2222 size_index = array_size - 1; 2223 } 2224 2225 while ((allocated < total_alloc_size) && 2226 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2227 rx_dmap[i].dma_chunk_index = i; 2228 rx_dmap[i].block_size = block_size; 2229 rx_dmap[i].alength = alloc_sizes[size_index]; 2230 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2231 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2232 rx_dmap[i].dma_channel = dma_channel; 2233 rx_dmap[i].contig_alloc_type = B_FALSE; 2234 2235 /* 2236 * N2/NIU: data buffers must be contiguous as the driver 2237 * needs to call Hypervisor api to set up 2238 * logical pages. 2239 */ 2240 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2241 rx_dmap[i].contig_alloc_type = B_TRUE; 2242 } 2243 2244 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2245 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2246 "i %d nblocks %d alength %d", 2247 dma_channel, i, &rx_dmap[i], block_size, 2248 i, rx_dmap[i].nblocks, 2249 rx_dmap[i].alength)); 2250 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2251 &nxge_rx_dma_attr, 2252 rx_dmap[i].alength, 2253 &nxge_dev_buf_dma_acc_attr, 2254 DDI_DMA_READ | DDI_DMA_STREAMING, 2255 (p_nxge_dma_common_t)(&rx_dmap[i])); 2256 if (status != NXGE_OK) { 2257 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2258 " nxge_alloc_rx_buf_dma: Alloc Failed ")); 2259 size_index--; 2260 } else { 2261 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2262 " alloc_rx_buf_dma allocated rdc %d " 2263 "chunk %d size %x dvma %x bufp %llx ", 2264 dma_channel, i, rx_dmap[i].alength, 2265 rx_dmap[i].ioaddr_pp, &rx_dmap[i])); 2266 i++; 2267 allocated += alloc_sizes[size_index]; 2268 } 2269 } 2270 2271 2272 if (allocated < total_alloc_size) { 2273 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2274 "==> nxge_alloc_rx_buf_dma: not enough for channe %d " 2275 "allocated 0x%x requested 0x%x", 2276 dma_channel, 2277 allocated, total_alloc_size)); 2278 status = NXGE_ERROR; 2279 goto nxge_alloc_rx_mem_fail1; 2280 } 2281 2282 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2283 "==> nxge_alloc_rx_buf_dma: Allocated for channe %d " 2284 "allocated 0x%x requested 0x%x", 2285 dma_channel, 2286 allocated, total_alloc_size)); 2287 2288 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2289 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2290 dma_channel, i)); 2291 *num_chunks = i; 2292 *dmap = rx_dmap; 2293 2294 goto nxge_alloc_rx_mem_exit; 2295 2296 nxge_alloc_rx_mem_fail1: 2297 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2298 2299 nxge_alloc_rx_mem_exit: 2300 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2301 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2302 2303 return (status); 2304 } 2305 2306 /*ARGSUSED*/ 2307 static void 2308 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2309 uint32_t num_chunks) 2310 { 2311 int i; 2312 2313 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2314 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2315 2316 for (i = 0; i < num_chunks; i++) { 2317 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2318 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2319 i, dmap)); 2320 nxge_dma_mem_free(dmap++); 2321 } 2322 2323 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2324 } 2325 2326 /*ARGSUSED*/ 2327 static nxge_status_t 2328 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2329 p_nxge_dma_common_t *dmap, size_t size) 2330 { 2331 p_nxge_dma_common_t rx_dmap; 2332 nxge_status_t status = NXGE_OK; 2333 2334 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2335 2336 rx_dmap = (p_nxge_dma_common_t) 2337 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2338 2339 rx_dmap->contig_alloc_type = B_FALSE; 2340 2341 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2342 &nxge_desc_dma_attr, 2343 size, 2344 &nxge_dev_desc_dma_acc_attr, 2345 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2346 rx_dmap); 2347 if (status != NXGE_OK) { 2348 goto nxge_alloc_rx_cntl_dma_fail1; 2349 } 2350 2351 *dmap = rx_dmap; 2352 goto nxge_alloc_rx_cntl_dma_exit; 2353 2354 nxge_alloc_rx_cntl_dma_fail1: 2355 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2356 2357 nxge_alloc_rx_cntl_dma_exit: 2358 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2359 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2360 2361 return (status); 2362 } 2363 2364 /*ARGSUSED*/ 2365 static void 2366 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2367 { 2368 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2369 2370 nxge_dma_mem_free(dmap); 2371 2372 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2373 } 2374 2375 static nxge_status_t 2376 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2377 { 2378 nxge_status_t status = NXGE_OK; 2379 int i, j; 2380 uint32_t ndmas, st_tdc; 2381 p_nxge_dma_pt_cfg_t p_all_cfgp; 2382 p_nxge_hw_pt_cfg_t p_cfgp; 2383 p_nxge_dma_pool_t dma_poolp; 2384 p_nxge_dma_common_t *dma_buf_p; 2385 p_nxge_dma_pool_t dma_cntl_poolp; 2386 p_nxge_dma_common_t *dma_cntl_p; 2387 size_t tx_buf_alloc_size; 2388 size_t tx_cntl_alloc_size; 2389 uint32_t *num_chunks; /* per dma */ 2390 uint32_t bcopy_thresh; 2391 2392 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2393 2394 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2395 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2396 st_tdc = p_cfgp->start_tdc; 2397 ndmas = p_cfgp->max_tdcs; 2398 2399 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool: " 2400 "p_cfgp 0x%016llx start_tdc %d ndmas %d nxgep->max_tdcs %d", 2401 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, nxgep->max_tdcs)); 2402 /* 2403 * Allocate memory for each transmit DMA channel. 2404 */ 2405 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2406 KM_SLEEP); 2407 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2408 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2409 2410 dma_cntl_poolp = (p_nxge_dma_pool_t) 2411 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2412 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2413 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2414 2415 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2416 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2417 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2418 "set to default %d", 2419 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2420 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2421 } 2422 2423 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2424 /* 2425 * N2/NIU has limitation on the descriptor sizes (contiguous 2426 * memory allocation on data buffers to 4M (contig_mem_alloc) 2427 * and little endian for control buffers (must use the ddi/dki mem alloc 2428 * function). The transmit ring is limited to 8K (includes the 2429 * mailbox). 2430 */ 2431 if (nxgep->niu_type == N2_NIU) { 2432 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2433 (!ISP2(nxge_tx_ring_size))) { 2434 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2435 } 2436 } 2437 #endif 2438 2439 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2440 2441 /* 2442 * Assume that each DMA channel will be configured with default 2443 * transmit bufer size for copying transmit data. 2444 * (For packet payload over this limit, packets will not be 2445 * copied.) 2446 */ 2447 if (nxgep->niu_type == N2_NIU) { 2448 bcopy_thresh = TX_BCOPY_SIZE; 2449 } else { 2450 bcopy_thresh = nxge_bcopy_thresh; 2451 } 2452 tx_buf_alloc_size = (bcopy_thresh * nxge_tx_ring_size); 2453 2454 /* 2455 * Addresses of transmit descriptor ring and the 2456 * mailbox must be all cache-aligned (64 bytes). 2457 */ 2458 tx_cntl_alloc_size = nxge_tx_ring_size; 2459 tx_cntl_alloc_size *= (sizeof (tx_desc_t)); 2460 tx_cntl_alloc_size += sizeof (txdma_mailbox_t); 2461 2462 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2463 if (nxgep->niu_type == N2_NIU) { 2464 if (!ISP2(tx_buf_alloc_size)) { 2465 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2466 "==> nxge_alloc_tx_mem_pool: " 2467 " must be power of 2")); 2468 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2469 goto nxge_alloc_tx_mem_pool_exit; 2470 } 2471 2472 if (tx_buf_alloc_size > (1 << 22)) { 2473 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2474 "==> nxge_alloc_tx_mem_pool: " 2475 " limit size to 4M")); 2476 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2477 goto nxge_alloc_tx_mem_pool_exit; 2478 } 2479 2480 if (tx_cntl_alloc_size < 0x2000) { 2481 tx_cntl_alloc_size = 0x2000; 2482 } 2483 } 2484 #endif 2485 2486 num_chunks = (uint32_t *)KMEM_ZALLOC( 2487 sizeof (uint32_t) * ndmas, KM_SLEEP); 2488 2489 /* 2490 * Allocate memory for transmit buffers and descriptor rings. 2491 * Replace allocation functions with interface functions provided 2492 * by the partition manager when it is available. 2493 * 2494 * Allocate memory for the transmit buffer pool. 2495 */ 2496 for (i = 0; i < ndmas; i++) { 2497 num_chunks[i] = 0; 2498 status = nxge_alloc_tx_buf_dma(nxgep, st_tdc, &dma_buf_p[i], 2499 tx_buf_alloc_size, 2500 bcopy_thresh, &num_chunks[i]); 2501 if (status != NXGE_OK) { 2502 break; 2503 } 2504 st_tdc++; 2505 } 2506 if (i < ndmas) { 2507 goto nxge_alloc_tx_mem_pool_fail1; 2508 } 2509 2510 st_tdc = p_cfgp->start_tdc; 2511 /* 2512 * Allocate memory for descriptor rings and mailbox. 2513 */ 2514 for (j = 0; j < ndmas; j++) { 2515 status = nxge_alloc_tx_cntl_dma(nxgep, st_tdc, &dma_cntl_p[j], 2516 tx_cntl_alloc_size); 2517 if (status != NXGE_OK) { 2518 break; 2519 } 2520 st_tdc++; 2521 } 2522 if (j < ndmas) { 2523 goto nxge_alloc_tx_mem_pool_fail2; 2524 } 2525 2526 dma_poolp->ndmas = ndmas; 2527 dma_poolp->num_chunks = num_chunks; 2528 dma_poolp->buf_allocated = B_TRUE; 2529 dma_poolp->dma_buf_pool_p = dma_buf_p; 2530 nxgep->tx_buf_pool_p = dma_poolp; 2531 2532 dma_cntl_poolp->ndmas = ndmas; 2533 dma_cntl_poolp->buf_allocated = B_TRUE; 2534 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2535 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2536 2537 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2538 "==> nxge_alloc_tx_mem_pool: start_tdc %d " 2539 "ndmas %d poolp->ndmas %d", 2540 st_tdc, ndmas, dma_poolp->ndmas)); 2541 2542 goto nxge_alloc_tx_mem_pool_exit; 2543 2544 nxge_alloc_tx_mem_pool_fail2: 2545 /* Free control buffers */ 2546 j--; 2547 for (; j >= 0; j--) { 2548 nxge_free_tx_cntl_dma(nxgep, 2549 (p_nxge_dma_common_t)dma_cntl_p[j]); 2550 } 2551 2552 nxge_alloc_tx_mem_pool_fail1: 2553 /* Free data buffers */ 2554 i--; 2555 for (; i >= 0; i--) { 2556 nxge_free_tx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2557 num_chunks[i]); 2558 } 2559 2560 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2561 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2562 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2563 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2564 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2565 2566 nxge_alloc_tx_mem_pool_exit: 2567 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2568 "<== nxge_alloc_tx_mem_pool:status 0x%08x", status)); 2569 2570 return (status); 2571 } 2572 2573 static nxge_status_t 2574 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2575 p_nxge_dma_common_t *dmap, size_t alloc_size, 2576 size_t block_size, uint32_t *num_chunks) 2577 { 2578 p_nxge_dma_common_t tx_dmap; 2579 nxge_status_t status = NXGE_OK; 2580 size_t total_alloc_size; 2581 size_t allocated = 0; 2582 int i, size_index, array_size; 2583 2584 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 2585 2586 tx_dmap = (p_nxge_dma_common_t) 2587 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2588 KM_SLEEP); 2589 2590 total_alloc_size = alloc_size; 2591 i = 0; 2592 size_index = 0; 2593 array_size = sizeof (alloc_sizes) / sizeof (size_t); 2594 while ((alloc_sizes[size_index] < alloc_size) && 2595 (size_index < array_size)) 2596 size_index++; 2597 if (size_index >= array_size) { 2598 size_index = array_size - 1; 2599 } 2600 2601 while ((allocated < total_alloc_size) && 2602 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2603 2604 tx_dmap[i].dma_chunk_index = i; 2605 tx_dmap[i].block_size = block_size; 2606 tx_dmap[i].alength = alloc_sizes[size_index]; 2607 tx_dmap[i].orig_alength = tx_dmap[i].alength; 2608 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2609 tx_dmap[i].dma_channel = dma_channel; 2610 tx_dmap[i].contig_alloc_type = B_FALSE; 2611 2612 /* 2613 * N2/NIU: data buffers must be contiguous as the driver 2614 * needs to call Hypervisor api to set up 2615 * logical pages. 2616 */ 2617 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2618 tx_dmap[i].contig_alloc_type = B_TRUE; 2619 } 2620 2621 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2622 &nxge_tx_dma_attr, 2623 tx_dmap[i].alength, 2624 &nxge_dev_buf_dma_acc_attr, 2625 DDI_DMA_WRITE | DDI_DMA_STREAMING, 2626 (p_nxge_dma_common_t)(&tx_dmap[i])); 2627 if (status != NXGE_OK) { 2628 size_index--; 2629 } else { 2630 i++; 2631 allocated += alloc_sizes[size_index]; 2632 } 2633 } 2634 2635 if (allocated < total_alloc_size) { 2636 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2637 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 2638 "allocated 0x%x requested 0x%x", 2639 dma_channel, 2640 allocated, total_alloc_size)); 2641 status = NXGE_ERROR; 2642 goto nxge_alloc_tx_mem_fail1; 2643 } 2644 2645 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2646 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 2647 "allocated 0x%x requested 0x%x", 2648 dma_channel, 2649 allocated, total_alloc_size)); 2650 2651 *num_chunks = i; 2652 *dmap = tx_dmap; 2653 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2654 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 2655 *dmap, i)); 2656 goto nxge_alloc_tx_mem_exit; 2657 2658 nxge_alloc_tx_mem_fail1: 2659 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2660 2661 nxge_alloc_tx_mem_exit: 2662 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2663 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 2664 2665 return (status); 2666 } 2667 2668 /*ARGSUSED*/ 2669 static void 2670 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2671 uint32_t num_chunks) 2672 { 2673 int i; 2674 2675 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 2676 2677 for (i = 0; i < num_chunks; i++) { 2678 nxge_dma_mem_free(dmap++); 2679 } 2680 2681 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 2682 } 2683 2684 /*ARGSUSED*/ 2685 static nxge_status_t 2686 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2687 p_nxge_dma_common_t *dmap, size_t size) 2688 { 2689 p_nxge_dma_common_t tx_dmap; 2690 nxge_status_t status = NXGE_OK; 2691 2692 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 2693 tx_dmap = (p_nxge_dma_common_t) 2694 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2695 2696 tx_dmap->contig_alloc_type = B_FALSE; 2697 2698 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2699 &nxge_desc_dma_attr, 2700 size, 2701 &nxge_dev_desc_dma_acc_attr, 2702 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2703 tx_dmap); 2704 if (status != NXGE_OK) { 2705 goto nxge_alloc_tx_cntl_dma_fail1; 2706 } 2707 2708 *dmap = tx_dmap; 2709 goto nxge_alloc_tx_cntl_dma_exit; 2710 2711 nxge_alloc_tx_cntl_dma_fail1: 2712 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 2713 2714 nxge_alloc_tx_cntl_dma_exit: 2715 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2716 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 2717 2718 return (status); 2719 } 2720 2721 /*ARGSUSED*/ 2722 static void 2723 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2724 { 2725 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 2726 2727 nxge_dma_mem_free(dmap); 2728 2729 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 2730 } 2731 2732 static void 2733 nxge_free_tx_mem_pool(p_nxge_t nxgep) 2734 { 2735 uint32_t i, ndmas; 2736 p_nxge_dma_pool_t dma_poolp; 2737 p_nxge_dma_common_t *dma_buf_p; 2738 p_nxge_dma_pool_t dma_cntl_poolp; 2739 p_nxge_dma_common_t *dma_cntl_p; 2740 uint32_t *num_chunks; 2741 2742 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_free_tx_mem_pool")); 2743 2744 dma_poolp = nxgep->tx_buf_pool_p; 2745 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2746 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2747 "<== nxge_free_tx_mem_pool " 2748 "(null rx buf pool or buf not allocated")); 2749 return; 2750 } 2751 2752 dma_cntl_poolp = nxgep->tx_cntl_pool_p; 2753 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2754 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2755 "<== nxge_free_tx_mem_pool " 2756 "(null tx cntl buf pool or cntl buf not allocated")); 2757 return; 2758 } 2759 2760 dma_buf_p = dma_poolp->dma_buf_pool_p; 2761 num_chunks = dma_poolp->num_chunks; 2762 2763 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2764 ndmas = dma_cntl_poolp->ndmas; 2765 2766 for (i = 0; i < ndmas; i++) { 2767 nxge_free_tx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2768 } 2769 2770 for (i = 0; i < ndmas; i++) { 2771 nxge_free_tx_cntl_dma(nxgep, dma_cntl_p[i]); 2772 } 2773 2774 for (i = 0; i < ndmas; i++) { 2775 KMEM_FREE(dma_buf_p[i], 2776 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2777 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2778 } 2779 2780 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2781 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2782 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2783 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2784 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2785 2786 nxgep->tx_buf_pool_p = NULL; 2787 nxgep->tx_cntl_pool_p = NULL; 2788 2789 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_free_tx_mem_pool")); 2790 } 2791 2792 /*ARGSUSED*/ 2793 static nxge_status_t 2794 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 2795 struct ddi_dma_attr *dma_attrp, 2796 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 2797 p_nxge_dma_common_t dma_p) 2798 { 2799 caddr_t kaddrp; 2800 int ddi_status = DDI_SUCCESS; 2801 boolean_t contig_alloc_type; 2802 2803 contig_alloc_type = dma_p->contig_alloc_type; 2804 2805 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 2806 /* 2807 * contig_alloc_type for contiguous memory only allowed 2808 * for N2/NIU. 2809 */ 2810 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2811 "nxge_dma_mem_alloc: alloc type not allows (%d)", 2812 dma_p->contig_alloc_type)); 2813 return (NXGE_ERROR | NXGE_DDI_FAILED); 2814 } 2815 2816 dma_p->dma_handle = NULL; 2817 dma_p->acc_handle = NULL; 2818 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 2819 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 2820 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 2821 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 2822 if (ddi_status != DDI_SUCCESS) { 2823 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2824 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 2825 return (NXGE_ERROR | NXGE_DDI_FAILED); 2826 } 2827 2828 switch (contig_alloc_type) { 2829 case B_FALSE: 2830 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, 2831 acc_attr_p, 2832 xfer_flags, 2833 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 2834 &dma_p->acc_handle); 2835 if (ddi_status != DDI_SUCCESS) { 2836 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2837 "nxge_dma_mem_alloc:ddi_dma_mem_alloc failed")); 2838 ddi_dma_free_handle(&dma_p->dma_handle); 2839 dma_p->dma_handle = NULL; 2840 return (NXGE_ERROR | NXGE_DDI_FAILED); 2841 } 2842 if (dma_p->alength < length) { 2843 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2844 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 2845 "< length.")); 2846 ddi_dma_mem_free(&dma_p->acc_handle); 2847 ddi_dma_free_handle(&dma_p->dma_handle); 2848 dma_p->acc_handle = NULL; 2849 dma_p->dma_handle = NULL; 2850 return (NXGE_ERROR); 2851 } 2852 2853 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2854 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2855 &dma_p->dma_cookie, &dma_p->ncookies); 2856 if (ddi_status != DDI_DMA_MAPPED) { 2857 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2858 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2859 "(staus 0x%x ncookies %d.)", ddi_status, 2860 dma_p->ncookies)); 2861 if (dma_p->acc_handle) { 2862 ddi_dma_mem_free(&dma_p->acc_handle); 2863 dma_p->acc_handle = NULL; 2864 } 2865 ddi_dma_free_handle(&dma_p->dma_handle); 2866 dma_p->dma_handle = NULL; 2867 return (NXGE_ERROR | NXGE_DDI_FAILED); 2868 } 2869 2870 if (dma_p->ncookies != 1) { 2871 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2872 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 2873 "> 1 cookie" 2874 "(staus 0x%x ncookies %d.)", ddi_status, 2875 dma_p->ncookies)); 2876 if (dma_p->acc_handle) { 2877 ddi_dma_mem_free(&dma_p->acc_handle); 2878 dma_p->acc_handle = NULL; 2879 } 2880 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2881 ddi_dma_free_handle(&dma_p->dma_handle); 2882 dma_p->dma_handle = NULL; 2883 return (NXGE_ERROR); 2884 } 2885 break; 2886 2887 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2888 case B_TRUE: 2889 kaddrp = (caddr_t)contig_mem_alloc(length); 2890 if (kaddrp == NULL) { 2891 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2892 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 2893 ddi_dma_free_handle(&dma_p->dma_handle); 2894 return (NXGE_ERROR | NXGE_DDI_FAILED); 2895 } 2896 2897 dma_p->alength = length; 2898 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2899 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2900 &dma_p->dma_cookie, &dma_p->ncookies); 2901 if (ddi_status != DDI_DMA_MAPPED) { 2902 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2903 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2904 "(status 0x%x ncookies %d.)", ddi_status, 2905 dma_p->ncookies)); 2906 2907 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2908 "==> nxge_dma_mem_alloc: (not mapped)" 2909 "length %lu (0x%x) " 2910 "free contig kaddrp $%p " 2911 "va_to_pa $%p", 2912 length, length, 2913 kaddrp, 2914 va_to_pa(kaddrp))); 2915 2916 2917 contig_mem_free((void *)kaddrp, length); 2918 ddi_dma_free_handle(&dma_p->dma_handle); 2919 2920 dma_p->dma_handle = NULL; 2921 dma_p->acc_handle = NULL; 2922 dma_p->alength = NULL; 2923 dma_p->kaddrp = NULL; 2924 2925 return (NXGE_ERROR | NXGE_DDI_FAILED); 2926 } 2927 2928 if (dma_p->ncookies != 1 || 2929 (dma_p->dma_cookie.dmac_laddress == NULL)) { 2930 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2931 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 2932 "cookie or " 2933 "dmac_laddress is NULL $%p size %d " 2934 " (status 0x%x ncookies %d.)", 2935 ddi_status, 2936 dma_p->dma_cookie.dmac_laddress, 2937 dma_p->dma_cookie.dmac_size, 2938 dma_p->ncookies)); 2939 2940 contig_mem_free((void *)kaddrp, length); 2941 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2942 ddi_dma_free_handle(&dma_p->dma_handle); 2943 2944 dma_p->alength = 0; 2945 dma_p->dma_handle = NULL; 2946 dma_p->acc_handle = NULL; 2947 dma_p->kaddrp = NULL; 2948 2949 return (NXGE_ERROR | NXGE_DDI_FAILED); 2950 } 2951 break; 2952 2953 #else 2954 case B_TRUE: 2955 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2956 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 2957 return (NXGE_ERROR | NXGE_DDI_FAILED); 2958 #endif 2959 } 2960 2961 dma_p->kaddrp = kaddrp; 2962 dma_p->last_kaddrp = (unsigned char *)kaddrp + 2963 dma_p->alength - RXBUF_64B_ALIGNED; 2964 #if defined(__i386) 2965 dma_p->ioaddr_pp = 2966 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 2967 #else 2968 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2969 #endif 2970 dma_p->last_ioaddr_pp = 2971 #if defined(__i386) 2972 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 2973 #else 2974 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 2975 #endif 2976 dma_p->alength - RXBUF_64B_ALIGNED; 2977 2978 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 2979 2980 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2981 dma_p->orig_ioaddr_pp = 2982 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2983 dma_p->orig_alength = length; 2984 dma_p->orig_kaddrp = kaddrp; 2985 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 2986 #endif 2987 2988 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 2989 "dma buffer allocated: dma_p $%p " 2990 "return dmac_ladress from cookie $%p cookie dmac_size %d " 2991 "dma_p->ioaddr_p $%p " 2992 "dma_p->orig_ioaddr_p $%p " 2993 "orig_vatopa $%p " 2994 "alength %d (0x%x) " 2995 "kaddrp $%p " 2996 "length %d (0x%x)", 2997 dma_p, 2998 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 2999 dma_p->ioaddr_pp, 3000 dma_p->orig_ioaddr_pp, 3001 dma_p->orig_vatopa, 3002 dma_p->alength, dma_p->alength, 3003 kaddrp, 3004 length, length)); 3005 3006 return (NXGE_OK); 3007 } 3008 3009 static void 3010 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3011 { 3012 if (dma_p->dma_handle != NULL) { 3013 if (dma_p->ncookies) { 3014 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3015 dma_p->ncookies = 0; 3016 } 3017 ddi_dma_free_handle(&dma_p->dma_handle); 3018 dma_p->dma_handle = NULL; 3019 } 3020 3021 if (dma_p->acc_handle != NULL) { 3022 ddi_dma_mem_free(&dma_p->acc_handle); 3023 dma_p->acc_handle = NULL; 3024 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3025 } 3026 3027 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3028 if (dma_p->contig_alloc_type && 3029 dma_p->orig_kaddrp && dma_p->orig_alength) { 3030 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3031 "kaddrp $%p (orig_kaddrp $%p)" 3032 "mem type %d ", 3033 "orig_alength %d " 3034 "alength 0x%x (%d)", 3035 dma_p->kaddrp, 3036 dma_p->orig_kaddrp, 3037 dma_p->contig_alloc_type, 3038 dma_p->orig_alength, 3039 dma_p->alength, dma_p->alength)); 3040 3041 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3042 dma_p->orig_alength = NULL; 3043 dma_p->orig_kaddrp = NULL; 3044 dma_p->contig_alloc_type = B_FALSE; 3045 } 3046 #endif 3047 dma_p->kaddrp = NULL; 3048 dma_p->alength = NULL; 3049 } 3050 3051 /* 3052 * nxge_m_start() -- start transmitting and receiving. 3053 * 3054 * This function is called by the MAC layer when the first 3055 * stream is open to prepare the hardware ready for sending 3056 * and transmitting packets. 3057 */ 3058 static int 3059 nxge_m_start(void *arg) 3060 { 3061 p_nxge_t nxgep = (p_nxge_t)arg; 3062 3063 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3064 3065 MUTEX_ENTER(nxgep->genlock); 3066 if (nxge_init(nxgep) != NXGE_OK) { 3067 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3068 "<== nxge_m_start: initialization failed")); 3069 MUTEX_EXIT(nxgep->genlock); 3070 return (EIO); 3071 } 3072 3073 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 3074 goto nxge_m_start_exit; 3075 /* 3076 * Start timer to check the system error and tx hangs 3077 */ 3078 nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state, 3079 NXGE_CHECK_TIMER); 3080 3081 nxgep->link_notify = B_TRUE; 3082 3083 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3084 3085 nxge_m_start_exit: 3086 MUTEX_EXIT(nxgep->genlock); 3087 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3088 return (0); 3089 } 3090 3091 /* 3092 * nxge_m_stop(): stop transmitting and receiving. 3093 */ 3094 static void 3095 nxge_m_stop(void *arg) 3096 { 3097 p_nxge_t nxgep = (p_nxge_t)arg; 3098 3099 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3100 3101 if (nxgep->nxge_timerid) { 3102 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3103 nxgep->nxge_timerid = 0; 3104 } 3105 3106 MUTEX_ENTER(nxgep->genlock); 3107 nxge_uninit(nxgep); 3108 3109 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3110 3111 MUTEX_EXIT(nxgep->genlock); 3112 3113 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3114 } 3115 3116 static int 3117 nxge_m_unicst(void *arg, const uint8_t *macaddr) 3118 { 3119 p_nxge_t nxgep = (p_nxge_t)arg; 3120 struct ether_addr addrp; 3121 3122 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 3123 3124 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 3125 if (nxge_set_mac_addr(nxgep, &addrp)) { 3126 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3127 "<== nxge_m_unicst: set unitcast failed")); 3128 return (EINVAL); 3129 } 3130 3131 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 3132 3133 return (0); 3134 } 3135 3136 static int 3137 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3138 { 3139 p_nxge_t nxgep = (p_nxge_t)arg; 3140 struct ether_addr addrp; 3141 3142 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3143 "==> nxge_m_multicst: add %d", add)); 3144 3145 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3146 if (add) { 3147 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3148 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3149 "<== nxge_m_multicst: add multicast failed")); 3150 return (EINVAL); 3151 } 3152 } else { 3153 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3154 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3155 "<== nxge_m_multicst: del multicast failed")); 3156 return (EINVAL); 3157 } 3158 } 3159 3160 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3161 3162 return (0); 3163 } 3164 3165 static int 3166 nxge_m_promisc(void *arg, boolean_t on) 3167 { 3168 p_nxge_t nxgep = (p_nxge_t)arg; 3169 3170 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3171 "==> nxge_m_promisc: on %d", on)); 3172 3173 if (nxge_set_promisc(nxgep, on)) { 3174 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3175 "<== nxge_m_promisc: set promisc failed")); 3176 return (EINVAL); 3177 } 3178 3179 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3180 "<== nxge_m_promisc: on %d", on)); 3181 3182 return (0); 3183 } 3184 3185 static void 3186 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3187 { 3188 p_nxge_t nxgep = (p_nxge_t)arg; 3189 struct iocblk *iocp; 3190 boolean_t need_privilege; 3191 int err; 3192 int cmd; 3193 3194 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3195 3196 iocp = (struct iocblk *)mp->b_rptr; 3197 iocp->ioc_error = 0; 3198 need_privilege = B_TRUE; 3199 cmd = iocp->ioc_cmd; 3200 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3201 switch (cmd) { 3202 default: 3203 miocnak(wq, mp, 0, EINVAL); 3204 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3205 return; 3206 3207 case LB_GET_INFO_SIZE: 3208 case LB_GET_INFO: 3209 case LB_GET_MODE: 3210 need_privilege = B_FALSE; 3211 break; 3212 case LB_SET_MODE: 3213 break; 3214 3215 case ND_GET: 3216 need_privilege = B_FALSE; 3217 break; 3218 case ND_SET: 3219 break; 3220 3221 case NXGE_GET_MII: 3222 case NXGE_PUT_MII: 3223 case NXGE_GET64: 3224 case NXGE_PUT64: 3225 case NXGE_GET_TX_RING_SZ: 3226 case NXGE_GET_TX_DESC: 3227 case NXGE_TX_SIDE_RESET: 3228 case NXGE_RX_SIDE_RESET: 3229 case NXGE_GLOBAL_RESET: 3230 case NXGE_RESET_MAC: 3231 case NXGE_TX_REGS_DUMP: 3232 case NXGE_RX_REGS_DUMP: 3233 case NXGE_INT_REGS_DUMP: 3234 case NXGE_VIR_INT_REGS_DUMP: 3235 case NXGE_PUT_TCAM: 3236 case NXGE_GET_TCAM: 3237 case NXGE_RTRACE: 3238 case NXGE_RDUMP: 3239 3240 need_privilege = B_FALSE; 3241 break; 3242 case NXGE_INJECT_ERR: 3243 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3244 nxge_err_inject(nxgep, wq, mp); 3245 break; 3246 } 3247 3248 if (need_privilege) { 3249 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3250 if (err != 0) { 3251 miocnak(wq, mp, 0, err); 3252 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3253 "<== nxge_m_ioctl: no priv")); 3254 return; 3255 } 3256 } 3257 3258 switch (cmd) { 3259 case ND_GET: 3260 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command")); 3261 case ND_SET: 3262 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command")); 3263 nxge_param_ioctl(nxgep, wq, mp, iocp); 3264 break; 3265 3266 case LB_GET_MODE: 3267 case LB_SET_MODE: 3268 case LB_GET_INFO_SIZE: 3269 case LB_GET_INFO: 3270 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3271 break; 3272 3273 case NXGE_GET_MII: 3274 case NXGE_PUT_MII: 3275 case NXGE_PUT_TCAM: 3276 case NXGE_GET_TCAM: 3277 case NXGE_GET64: 3278 case NXGE_PUT64: 3279 case NXGE_GET_TX_RING_SZ: 3280 case NXGE_GET_TX_DESC: 3281 case NXGE_TX_SIDE_RESET: 3282 case NXGE_RX_SIDE_RESET: 3283 case NXGE_GLOBAL_RESET: 3284 case NXGE_RESET_MAC: 3285 case NXGE_TX_REGS_DUMP: 3286 case NXGE_RX_REGS_DUMP: 3287 case NXGE_INT_REGS_DUMP: 3288 case NXGE_VIR_INT_REGS_DUMP: 3289 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3290 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3291 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3292 break; 3293 } 3294 3295 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3296 } 3297 3298 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3299 3300 static void 3301 nxge_m_resources(void *arg) 3302 { 3303 p_nxge_t nxgep = arg; 3304 mac_rx_fifo_t mrf; 3305 p_rx_rcr_rings_t rcr_rings; 3306 p_rx_rcr_ring_t *rcr_p; 3307 uint32_t i, ndmas; 3308 nxge_status_t status; 3309 3310 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3311 3312 MUTEX_ENTER(nxgep->genlock); 3313 3314 /* 3315 * CR 6492541 Check to see if the drv_state has been initialized, 3316 * if not * call nxge_init(). 3317 */ 3318 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3319 status = nxge_init(nxgep); 3320 if (status != NXGE_OK) 3321 goto nxge_m_resources_exit; 3322 } 3323 3324 mrf.mrf_type = MAC_RX_FIFO; 3325 mrf.mrf_blank = nxge_rx_hw_blank; 3326 mrf.mrf_arg = (void *)nxgep; 3327 3328 mrf.mrf_normal_blank_time = 128; 3329 mrf.mrf_normal_pkt_count = 8; 3330 rcr_rings = nxgep->rx_rcr_rings; 3331 rcr_p = rcr_rings->rcr_rings; 3332 ndmas = rcr_rings->ndmas; 3333 3334 /* 3335 * Export our receive resources to the MAC layer. 3336 */ 3337 for (i = 0; i < ndmas; i++) { 3338 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle = 3339 mac_resource_add(nxgep->mach, 3340 (mac_resource_t *)&mrf); 3341 3342 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3343 "==> nxge_m_resources: vdma %d dma %d " 3344 "rcrptr 0x%016llx mac_handle 0x%016llx", 3345 i, ((p_rx_rcr_ring_t)rcr_p[i])->rdc, 3346 rcr_p[i], 3347 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle)); 3348 } 3349 3350 nxge_m_resources_exit: 3351 MUTEX_EXIT(nxgep->genlock); 3352 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 3353 } 3354 3355 static void 3356 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 3357 { 3358 p_nxge_mmac_stats_t mmac_stats; 3359 int i; 3360 nxge_mmac_t *mmac_info; 3361 3362 mmac_info = &nxgep->nxge_mmac_info; 3363 3364 mmac_stats = &nxgep->statsp->mmac_stats; 3365 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3366 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3367 3368 for (i = 0; i < ETHERADDRL; i++) { 3369 if (factory) { 3370 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3371 = mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i]; 3372 } else { 3373 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3374 = mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i]; 3375 } 3376 } 3377 } 3378 3379 /* 3380 * nxge_altmac_set() -- Set an alternate MAC address 3381 */ 3382 static int 3383 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 3384 { 3385 uint8_t addrn; 3386 uint8_t portn; 3387 npi_mac_addr_t altmac; 3388 hostinfo_t mac_rdc; 3389 p_nxge_class_pt_cfg_t clscfgp; 3390 3391 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3392 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3393 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3394 3395 portn = nxgep->mac.portnum; 3396 addrn = (uint8_t)slot - 1; 3397 3398 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 3399 addrn, &altmac) != NPI_SUCCESS) 3400 return (EIO); 3401 3402 /* 3403 * Set the rdc table number for the host info entry 3404 * for this mac address slot. 3405 */ 3406 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 3407 mac_rdc.value = 0; 3408 mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl; 3409 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 3410 3411 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 3412 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 3413 return (EIO); 3414 } 3415 3416 /* 3417 * Enable comparison with the alternate MAC address. 3418 * While the first alternate addr is enabled by bit 1 of register 3419 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 3420 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 3421 * accordingly before calling npi_mac_altaddr_entry. 3422 */ 3423 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3424 addrn = (uint8_t)slot - 1; 3425 else 3426 addrn = (uint8_t)slot; 3427 3428 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 3429 != NPI_SUCCESS) 3430 return (EIO); 3431 3432 return (0); 3433 } 3434 3435 /* 3436 * nxeg_m_mmac_add() - find an unused address slot, set the address 3437 * value to the one specified, enable the port to start filtering on 3438 * the new MAC address. Returns 0 on success. 3439 */ 3440 static int 3441 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 3442 { 3443 p_nxge_t nxgep = arg; 3444 mac_addr_slot_t slot; 3445 nxge_mmac_t *mmac_info; 3446 int err; 3447 nxge_status_t status; 3448 3449 mutex_enter(nxgep->genlock); 3450 3451 /* 3452 * Make sure that nxge is initialized, if _start() has 3453 * not been called. 3454 */ 3455 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3456 status = nxge_init(nxgep); 3457 if (status != NXGE_OK) { 3458 mutex_exit(nxgep->genlock); 3459 return (ENXIO); 3460 } 3461 } 3462 3463 mmac_info = &nxgep->nxge_mmac_info; 3464 if (mmac_info->naddrfree == 0) { 3465 mutex_exit(nxgep->genlock); 3466 return (ENOSPC); 3467 } 3468 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3469 maddr->mma_addrlen)) { 3470 mutex_exit(nxgep->genlock); 3471 return (EINVAL); 3472 } 3473 /* 3474 * Search for the first available slot. Because naddrfree 3475 * is not zero, we are guaranteed to find one. 3476 * Slot 0 is for unique (primary) MAC. The first alternate 3477 * MAC slot is slot 1. 3478 * Each of the first two ports of Neptune has 16 alternate 3479 * MAC slots but only the first 7 (or 15) slots have assigned factory 3480 * MAC addresses. We first search among the slots without bundled 3481 * factory MACs. If we fail to find one in that range, then we 3482 * search the slots with bundled factory MACs. A factory MAC 3483 * will be wasted while the slot is used with a user MAC address. 3484 * But the slot could be used by factory MAC again after calling 3485 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 3486 */ 3487 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 3488 for (slot = mmac_info->num_factory_mmac + 1; 3489 slot <= mmac_info->num_mmac; slot++) { 3490 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3491 break; 3492 } 3493 if (slot > mmac_info->num_mmac) { 3494 for (slot = 1; slot <= mmac_info->num_factory_mmac; 3495 slot++) { 3496 if (!(mmac_info->mac_pool[slot].flags 3497 & MMAC_SLOT_USED)) 3498 break; 3499 } 3500 } 3501 } else { 3502 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 3503 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3504 break; 3505 } 3506 } 3507 ASSERT(slot <= mmac_info->num_mmac); 3508 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 3509 mutex_exit(nxgep->genlock); 3510 return (err); 3511 } 3512 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 3513 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 3514 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3515 mmac_info->naddrfree--; 3516 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3517 3518 maddr->mma_slot = slot; 3519 3520 mutex_exit(nxgep->genlock); 3521 return (0); 3522 } 3523 3524 /* 3525 * This function reserves an unused slot and programs the slot and the HW 3526 * with a factory mac address. 3527 */ 3528 static int 3529 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 3530 { 3531 p_nxge_t nxgep = arg; 3532 mac_addr_slot_t slot; 3533 nxge_mmac_t *mmac_info; 3534 int err; 3535 nxge_status_t status; 3536 3537 mutex_enter(nxgep->genlock); 3538 3539 /* 3540 * Make sure that nxge is initialized, if _start() has 3541 * not been called. 3542 */ 3543 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3544 status = nxge_init(nxgep); 3545 if (status != NXGE_OK) { 3546 mutex_exit(nxgep->genlock); 3547 return (ENXIO); 3548 } 3549 } 3550 3551 mmac_info = &nxgep->nxge_mmac_info; 3552 if (mmac_info->naddrfree == 0) { 3553 mutex_exit(nxgep->genlock); 3554 return (ENOSPC); 3555 } 3556 3557 slot = maddr->mma_slot; 3558 if (slot == -1) { /* -1: Take the first available slot */ 3559 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 3560 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3561 break; 3562 } 3563 if (slot > mmac_info->num_factory_mmac) { 3564 mutex_exit(nxgep->genlock); 3565 return (ENOSPC); 3566 } 3567 } 3568 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 3569 /* 3570 * Do not support factory MAC at a slot greater than 3571 * num_factory_mmac even when there are available factory 3572 * MAC addresses because the alternate MACs are bundled with 3573 * slot[1] through slot[num_factory_mmac] 3574 */ 3575 mutex_exit(nxgep->genlock); 3576 return (EINVAL); 3577 } 3578 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3579 mutex_exit(nxgep->genlock); 3580 return (EBUSY); 3581 } 3582 /* Verify the address to be reserved */ 3583 if (!mac_unicst_verify(nxgep->mach, 3584 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 3585 mutex_exit(nxgep->genlock); 3586 return (EINVAL); 3587 } 3588 if (err = nxge_altmac_set(nxgep, 3589 mmac_info->factory_mac_pool[slot], slot)) { 3590 mutex_exit(nxgep->genlock); 3591 return (err); 3592 } 3593 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 3594 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3595 mmac_info->naddrfree--; 3596 3597 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 3598 mutex_exit(nxgep->genlock); 3599 3600 /* Pass info back to the caller */ 3601 maddr->mma_slot = slot; 3602 maddr->mma_addrlen = ETHERADDRL; 3603 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3604 3605 return (0); 3606 } 3607 3608 /* 3609 * Remove the specified mac address and update the HW not to filter 3610 * the mac address anymore. 3611 */ 3612 static int 3613 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 3614 { 3615 p_nxge_t nxgep = arg; 3616 nxge_mmac_t *mmac_info; 3617 uint8_t addrn; 3618 uint8_t portn; 3619 int err = 0; 3620 nxge_status_t status; 3621 3622 mutex_enter(nxgep->genlock); 3623 3624 /* 3625 * Make sure that nxge is initialized, if _start() has 3626 * not been called. 3627 */ 3628 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3629 status = nxge_init(nxgep); 3630 if (status != NXGE_OK) { 3631 mutex_exit(nxgep->genlock); 3632 return (ENXIO); 3633 } 3634 } 3635 3636 mmac_info = &nxgep->nxge_mmac_info; 3637 if (slot < 1 || slot > mmac_info->num_mmac) { 3638 mutex_exit(nxgep->genlock); 3639 return (EINVAL); 3640 } 3641 3642 portn = nxgep->mac.portnum; 3643 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3644 addrn = (uint8_t)slot - 1; 3645 else 3646 addrn = (uint8_t)slot; 3647 3648 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3649 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 3650 == NPI_SUCCESS) { 3651 mmac_info->naddrfree++; 3652 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 3653 /* 3654 * Regardless if the MAC we just stopped filtering 3655 * is a user addr or a facory addr, we must set 3656 * the MMAC_VENDOR_ADDR flag if this slot has an 3657 * associated factory MAC to indicate that a factory 3658 * MAC is available. 3659 */ 3660 if (slot <= mmac_info->num_factory_mmac) { 3661 mmac_info->mac_pool[slot].flags 3662 |= MMAC_VENDOR_ADDR; 3663 } 3664 /* 3665 * Clear mac_pool[slot].addr so that kstat shows 0 3666 * alternate MAC address if the slot is not used. 3667 * (But nxge_m_mmac_get returns the factory MAC even 3668 * when the slot is not used!) 3669 */ 3670 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 3671 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3672 } else { 3673 err = EIO; 3674 } 3675 } else { 3676 err = EINVAL; 3677 } 3678 3679 mutex_exit(nxgep->genlock); 3680 return (err); 3681 } 3682 3683 3684 /* 3685 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 3686 */ 3687 static int 3688 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 3689 { 3690 p_nxge_t nxgep = arg; 3691 mac_addr_slot_t slot; 3692 nxge_mmac_t *mmac_info; 3693 int err = 0; 3694 nxge_status_t status; 3695 3696 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3697 maddr->mma_addrlen)) 3698 return (EINVAL); 3699 3700 slot = maddr->mma_slot; 3701 3702 mutex_enter(nxgep->genlock); 3703 3704 /* 3705 * Make sure that nxge is initialized, if _start() has 3706 * not been called. 3707 */ 3708 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3709 status = nxge_init(nxgep); 3710 if (status != NXGE_OK) { 3711 mutex_exit(nxgep->genlock); 3712 return (ENXIO); 3713 } 3714 } 3715 3716 mmac_info = &nxgep->nxge_mmac_info; 3717 if (slot < 1 || slot > mmac_info->num_mmac) { 3718 mutex_exit(nxgep->genlock); 3719 return (EINVAL); 3720 } 3721 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3722 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 3723 != 0) { 3724 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 3725 ETHERADDRL); 3726 /* 3727 * Assume that the MAC passed down from the caller 3728 * is not a factory MAC address (The user should 3729 * call mmac_remove followed by mmac_reserve if 3730 * he wants to use the factory MAC for this slot). 3731 */ 3732 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3733 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3734 } 3735 } else { 3736 err = EINVAL; 3737 } 3738 mutex_exit(nxgep->genlock); 3739 return (err); 3740 } 3741 3742 /* 3743 * nxge_m_mmac_get() - Get the MAC address and other information 3744 * related to the slot. mma_flags should be set to 0 in the call. 3745 * Note: although kstat shows MAC address as zero when a slot is 3746 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 3747 * to the caller as long as the slot is not using a user MAC address. 3748 * The following table shows the rules, 3749 * 3750 * USED VENDOR mma_addr 3751 * ------------------------------------------------------------ 3752 * (1) Slot uses a user MAC: yes no user MAC 3753 * (2) Slot uses a factory MAC: yes yes factory MAC 3754 * (3) Slot is not used but is 3755 * factory MAC capable: no yes factory MAC 3756 * (4) Slot is not used and is 3757 * not factory MAC capable: no no 0 3758 * ------------------------------------------------------------ 3759 */ 3760 static int 3761 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 3762 { 3763 nxge_t *nxgep = arg; 3764 mac_addr_slot_t slot; 3765 nxge_mmac_t *mmac_info; 3766 nxge_status_t status; 3767 3768 slot = maddr->mma_slot; 3769 3770 mutex_enter(nxgep->genlock); 3771 3772 /* 3773 * Make sure that nxge is initialized, if _start() has 3774 * not been called. 3775 */ 3776 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3777 status = nxge_init(nxgep); 3778 if (status != NXGE_OK) { 3779 mutex_exit(nxgep->genlock); 3780 return (ENXIO); 3781 } 3782 } 3783 3784 mmac_info = &nxgep->nxge_mmac_info; 3785 3786 if (slot < 1 || slot > mmac_info->num_mmac) { 3787 mutex_exit(nxgep->genlock); 3788 return (EINVAL); 3789 } 3790 maddr->mma_flags = 0; 3791 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 3792 maddr->mma_flags |= MMAC_SLOT_USED; 3793 3794 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 3795 maddr->mma_flags |= MMAC_VENDOR_ADDR; 3796 bcopy(mmac_info->factory_mac_pool[slot], 3797 maddr->mma_addr, ETHERADDRL); 3798 maddr->mma_addrlen = ETHERADDRL; 3799 } else { 3800 if (maddr->mma_flags & MMAC_SLOT_USED) { 3801 bcopy(mmac_info->mac_pool[slot].addr, 3802 maddr->mma_addr, ETHERADDRL); 3803 maddr->mma_addrlen = ETHERADDRL; 3804 } else { 3805 bzero(maddr->mma_addr, ETHERADDRL); 3806 maddr->mma_addrlen = 0; 3807 } 3808 } 3809 mutex_exit(nxgep->genlock); 3810 return (0); 3811 } 3812 3813 3814 static boolean_t 3815 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 3816 { 3817 nxge_t *nxgep = arg; 3818 uint32_t *txflags = cap_data; 3819 multiaddress_capab_t *mmacp = cap_data; 3820 3821 switch (cap) { 3822 case MAC_CAPAB_HCKSUM: 3823 *txflags = HCKSUM_INET_PARTIAL; 3824 break; 3825 case MAC_CAPAB_POLL: 3826 /* 3827 * There's nothing for us to fill in, simply returning 3828 * B_TRUE stating that we support polling is sufficient. 3829 */ 3830 break; 3831 3832 case MAC_CAPAB_MULTIADDRESS: 3833 mutex_enter(nxgep->genlock); 3834 3835 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 3836 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 3837 mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */ 3838 /* 3839 * maddr_handle is driver's private data, passed back to 3840 * entry point functions as arg. 3841 */ 3842 mmacp->maddr_handle = nxgep; 3843 mmacp->maddr_add = nxge_m_mmac_add; 3844 mmacp->maddr_remove = nxge_m_mmac_remove; 3845 mmacp->maddr_modify = nxge_m_mmac_modify; 3846 mmacp->maddr_get = nxge_m_mmac_get; 3847 mmacp->maddr_reserve = nxge_m_mmac_reserve; 3848 3849 mutex_exit(nxgep->genlock); 3850 break; 3851 case MAC_CAPAB_LSO: { 3852 mac_capab_lso_t *cap_lso = cap_data; 3853 3854 if (nxge_lso_enable) { 3855 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 3856 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 3857 nxge_lso_max = NXGE_LSO_MAXLEN; 3858 } 3859 cap_lso->lso_basic_tcp_ipv4.lso_max = nxge_lso_max; 3860 break; 3861 } else { 3862 return (B_FALSE); 3863 } 3864 } 3865 3866 default: 3867 return (B_FALSE); 3868 } 3869 return (B_TRUE); 3870 } 3871 3872 /* 3873 * Module loading and removing entry points. 3874 */ 3875 3876 static struct cb_ops nxge_cb_ops = { 3877 nodev, /* cb_open */ 3878 nodev, /* cb_close */ 3879 nodev, /* cb_strategy */ 3880 nodev, /* cb_print */ 3881 nodev, /* cb_dump */ 3882 nodev, /* cb_read */ 3883 nodev, /* cb_write */ 3884 nodev, /* cb_ioctl */ 3885 nodev, /* cb_devmap */ 3886 nodev, /* cb_mmap */ 3887 nodev, /* cb_segmap */ 3888 nochpoll, /* cb_chpoll */ 3889 ddi_prop_op, /* cb_prop_op */ 3890 NULL, 3891 D_MP, /* cb_flag */ 3892 CB_REV, /* rev */ 3893 nodev, /* int (*cb_aread)() */ 3894 nodev /* int (*cb_awrite)() */ 3895 }; 3896 3897 static struct dev_ops nxge_dev_ops = { 3898 DEVO_REV, /* devo_rev */ 3899 0, /* devo_refcnt */ 3900 nulldev, 3901 nulldev, /* devo_identify */ 3902 nulldev, /* devo_probe */ 3903 nxge_attach, /* devo_attach */ 3904 nxge_detach, /* devo_detach */ 3905 nodev, /* devo_reset */ 3906 &nxge_cb_ops, /* devo_cb_ops */ 3907 (struct bus_ops *)NULL, /* devo_bus_ops */ 3908 ddi_power /* devo_power */ 3909 }; 3910 3911 extern struct mod_ops mod_driverops; 3912 3913 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 3914 3915 /* 3916 * Module linkage information for the kernel. 3917 */ 3918 static struct modldrv nxge_modldrv = { 3919 &mod_driverops, 3920 NXGE_DESC_VER, 3921 &nxge_dev_ops 3922 }; 3923 3924 static struct modlinkage modlinkage = { 3925 MODREV_1, (void *) &nxge_modldrv, NULL 3926 }; 3927 3928 int 3929 _init(void) 3930 { 3931 int status; 3932 3933 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 3934 mac_init_ops(&nxge_dev_ops, "nxge"); 3935 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 3936 if (status != 0) { 3937 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3938 "failed to init device soft state")); 3939 goto _init_exit; 3940 } 3941 status = mod_install(&modlinkage); 3942 if (status != 0) { 3943 ddi_soft_state_fini(&nxge_list); 3944 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 3945 goto _init_exit; 3946 } 3947 3948 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 3949 3950 _init_exit: 3951 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 3952 3953 return (status); 3954 } 3955 3956 int 3957 _fini(void) 3958 { 3959 int status; 3960 3961 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 3962 3963 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 3964 3965 if (nxge_mblks_pending) 3966 return (EBUSY); 3967 3968 status = mod_remove(&modlinkage); 3969 if (status != DDI_SUCCESS) { 3970 NXGE_DEBUG_MSG((NULL, MOD_CTL, 3971 "Module removal failed 0x%08x", 3972 status)); 3973 goto _fini_exit; 3974 } 3975 3976 mac_fini_ops(&nxge_dev_ops); 3977 3978 ddi_soft_state_fini(&nxge_list); 3979 3980 MUTEX_DESTROY(&nxge_common_lock); 3981 _fini_exit: 3982 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 3983 3984 return (status); 3985 } 3986 3987 int 3988 _info(struct modinfo *modinfop) 3989 { 3990 int status; 3991 3992 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 3993 status = mod_info(&modlinkage, modinfop); 3994 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 3995 3996 return (status); 3997 } 3998 3999 /*ARGSUSED*/ 4000 static nxge_status_t 4001 nxge_add_intrs(p_nxge_t nxgep) 4002 { 4003 4004 int intr_types; 4005 int type = 0; 4006 int ddi_status = DDI_SUCCESS; 4007 nxge_status_t status = NXGE_OK; 4008 4009 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 4010 4011 nxgep->nxge_intr_type.intr_registered = B_FALSE; 4012 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 4013 nxgep->nxge_intr_type.msi_intx_cnt = 0; 4014 nxgep->nxge_intr_type.intr_added = 0; 4015 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 4016 nxgep->nxge_intr_type.intr_type = 0; 4017 4018 if (nxgep->niu_type == N2_NIU) { 4019 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 4020 } else if (nxge_msi_enable) { 4021 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 4022 } 4023 4024 /* Get the supported interrupt types */ 4025 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 4026 != DDI_SUCCESS) { 4027 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 4028 "ddi_intr_get_supported_types failed: status 0x%08x", 4029 ddi_status)); 4030 return (NXGE_ERROR | NXGE_DDI_FAILED); 4031 } 4032 nxgep->nxge_intr_type.intr_types = intr_types; 4033 4034 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4035 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 4036 4037 /* 4038 * Solaris MSIX is not supported yet. use MSI for now. 4039 * nxge_msi_enable (1): 4040 * 1 - MSI 2 - MSI-X others - FIXED 4041 */ 4042 switch (nxge_msi_enable) { 4043 default: 4044 type = DDI_INTR_TYPE_FIXED; 4045 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 4046 "use fixed (intx emulation) type %08x", 4047 type)); 4048 break; 4049 4050 case 2: 4051 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 4052 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 4053 if (intr_types & DDI_INTR_TYPE_MSIX) { 4054 type = DDI_INTR_TYPE_MSIX; 4055 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4056 "ddi_intr_get_supported_types: MSIX 0x%08x", 4057 type)); 4058 } else if (intr_types & DDI_INTR_TYPE_MSI) { 4059 type = DDI_INTR_TYPE_MSI; 4060 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4061 "ddi_intr_get_supported_types: MSI 0x%08x", 4062 type)); 4063 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 4064 type = DDI_INTR_TYPE_FIXED; 4065 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 4066 "ddi_intr_get_supported_types: MSXED0x%08x", 4067 type)); 4068 } 4069 break; 4070 4071 case 1: 4072 if (intr_types & DDI_INTR_TYPE_MSI) { 4073 type = DDI_INTR_TYPE_MSI; 4074 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 4075 "ddi_intr_get_supported_types: MSI 0x%08x", 4076 type)); 4077 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 4078 type = DDI_INTR_TYPE_MSIX; 4079 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4080 "ddi_intr_get_supported_types: MSIX 0x%08x", 4081 type)); 4082 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 4083 type = DDI_INTR_TYPE_FIXED; 4084 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4085 "ddi_intr_get_supported_types: MSXED0x%08x", 4086 type)); 4087 } 4088 } 4089 4090 nxgep->nxge_intr_type.intr_type = type; 4091 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 4092 type == DDI_INTR_TYPE_FIXED) && 4093 nxgep->nxge_intr_type.niu_msi_enable) { 4094 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 4095 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4096 " nxge_add_intrs: " 4097 " nxge_add_intrs_adv failed: status 0x%08x", 4098 status)); 4099 return (status); 4100 } else { 4101 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4102 "interrupts registered : type %d", type)); 4103 nxgep->nxge_intr_type.intr_registered = B_TRUE; 4104 4105 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4106 "\nAdded advanced nxge add_intr_adv " 4107 "intr type 0x%x\n", type)); 4108 4109 return (status); 4110 } 4111 } 4112 4113 if (!nxgep->nxge_intr_type.intr_registered) { 4114 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 4115 "failed to register interrupts")); 4116 return (NXGE_ERROR | NXGE_DDI_FAILED); 4117 } 4118 4119 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 4120 return (status); 4121 } 4122 4123 /*ARGSUSED*/ 4124 static nxge_status_t 4125 nxge_add_soft_intrs(p_nxge_t nxgep) 4126 { 4127 4128 int ddi_status = DDI_SUCCESS; 4129 nxge_status_t status = NXGE_OK; 4130 4131 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 4132 4133 nxgep->resched_id = NULL; 4134 nxgep->resched_running = B_FALSE; 4135 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 4136 &nxgep->resched_id, 4137 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 4138 if (ddi_status != DDI_SUCCESS) { 4139 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 4140 "ddi_add_softintrs failed: status 0x%08x", 4141 ddi_status)); 4142 return (NXGE_ERROR | NXGE_DDI_FAILED); 4143 } 4144 4145 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 4146 4147 return (status); 4148 } 4149 4150 static nxge_status_t 4151 nxge_add_intrs_adv(p_nxge_t nxgep) 4152 { 4153 int intr_type; 4154 p_nxge_intr_t intrp; 4155 4156 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 4157 4158 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4159 intr_type = intrp->intr_type; 4160 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 4161 intr_type)); 4162 4163 switch (intr_type) { 4164 case DDI_INTR_TYPE_MSI: /* 0x2 */ 4165 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 4166 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 4167 4168 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 4169 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 4170 4171 default: 4172 return (NXGE_ERROR); 4173 } 4174 } 4175 4176 4177 /*ARGSUSED*/ 4178 static nxge_status_t 4179 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 4180 { 4181 dev_info_t *dip = nxgep->dip; 4182 p_nxge_ldg_t ldgp; 4183 p_nxge_intr_t intrp; 4184 uint_t *inthandler; 4185 void *arg1, *arg2; 4186 int behavior; 4187 int nintrs, navail, nrequest; 4188 int nactual, nrequired; 4189 int inum = 0; 4190 int x, y; 4191 int ddi_status = DDI_SUCCESS; 4192 nxge_status_t status = NXGE_OK; 4193 4194 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 4195 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4196 intrp->start_inum = 0; 4197 4198 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4199 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4200 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4201 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4202 "nintrs: %d", ddi_status, nintrs)); 4203 return (NXGE_ERROR | NXGE_DDI_FAILED); 4204 } 4205 4206 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4207 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4208 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4209 "ddi_intr_get_navail() failed, status: 0x%x%, " 4210 "nintrs: %d", ddi_status, navail)); 4211 return (NXGE_ERROR | NXGE_DDI_FAILED); 4212 } 4213 4214 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4215 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 4216 nintrs, navail)); 4217 4218 /* PSARC/2007/453 MSI-X interrupt limit override */ 4219 if (int_type == DDI_INTR_TYPE_MSIX) { 4220 nrequest = nxge_create_msi_property(nxgep); 4221 if (nrequest < navail) { 4222 navail = nrequest; 4223 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4224 "nxge_add_intrs_adv_type: nintrs %d " 4225 "navail %d (nrequest %d)", 4226 nintrs, navail, nrequest)); 4227 } 4228 } 4229 4230 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 4231 /* MSI must be power of 2 */ 4232 if ((navail & 16) == 16) { 4233 navail = 16; 4234 } else if ((navail & 8) == 8) { 4235 navail = 8; 4236 } else if ((navail & 4) == 4) { 4237 navail = 4; 4238 } else if ((navail & 2) == 2) { 4239 navail = 2; 4240 } else { 4241 navail = 1; 4242 } 4243 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4244 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 4245 "navail %d", nintrs, navail)); 4246 } 4247 4248 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4249 DDI_INTR_ALLOC_NORMAL); 4250 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4251 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4252 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4253 navail, &nactual, behavior); 4254 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4255 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4256 " ddi_intr_alloc() failed: %d", 4257 ddi_status)); 4258 kmem_free(intrp->htable, intrp->intr_size); 4259 return (NXGE_ERROR | NXGE_DDI_FAILED); 4260 } 4261 4262 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4263 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4264 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4265 " ddi_intr_get_pri() failed: %d", 4266 ddi_status)); 4267 /* Free already allocated interrupts */ 4268 for (y = 0; y < nactual; y++) { 4269 (void) ddi_intr_free(intrp->htable[y]); 4270 } 4271 4272 kmem_free(intrp->htable, intrp->intr_size); 4273 return (NXGE_ERROR | NXGE_DDI_FAILED); 4274 } 4275 4276 nrequired = 0; 4277 switch (nxgep->niu_type) { 4278 default: 4279 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4280 break; 4281 4282 case N2_NIU: 4283 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4284 break; 4285 } 4286 4287 if (status != NXGE_OK) { 4288 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4289 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 4290 "failed: 0x%x", status)); 4291 /* Free already allocated interrupts */ 4292 for (y = 0; y < nactual; y++) { 4293 (void) ddi_intr_free(intrp->htable[y]); 4294 } 4295 4296 kmem_free(intrp->htable, intrp->intr_size); 4297 return (status); 4298 } 4299 4300 ldgp = nxgep->ldgvp->ldgp; 4301 for (x = 0; x < nrequired; x++, ldgp++) { 4302 ldgp->vector = (uint8_t)x; 4303 ldgp->intdata = SID_DATA(ldgp->func, x); 4304 arg1 = ldgp->ldvp; 4305 arg2 = nxgep; 4306 if (ldgp->nldvs == 1) { 4307 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4308 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4309 "nxge_add_intrs_adv_type: " 4310 "arg1 0x%x arg2 0x%x: " 4311 "1-1 int handler (entry %d intdata 0x%x)\n", 4312 arg1, arg2, 4313 x, ldgp->intdata)); 4314 } else if (ldgp->nldvs > 1) { 4315 inthandler = (uint_t *)ldgp->sys_intr_handler; 4316 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4317 "nxge_add_intrs_adv_type: " 4318 "arg1 0x%x arg2 0x%x: " 4319 "nldevs %d int handler " 4320 "(entry %d intdata 0x%x)\n", 4321 arg1, arg2, 4322 ldgp->nldvs, x, ldgp->intdata)); 4323 } 4324 4325 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4326 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 4327 "htable 0x%llx", x, intrp->htable[x])); 4328 4329 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4330 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4331 != DDI_SUCCESS) { 4332 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4333 "==> nxge_add_intrs_adv_type: failed #%d " 4334 "status 0x%x", x, ddi_status)); 4335 for (y = 0; y < intrp->intr_added; y++) { 4336 (void) ddi_intr_remove_handler( 4337 intrp->htable[y]); 4338 } 4339 /* Free already allocated intr */ 4340 for (y = 0; y < nactual; y++) { 4341 (void) ddi_intr_free(intrp->htable[y]); 4342 } 4343 kmem_free(intrp->htable, intrp->intr_size); 4344 4345 (void) nxge_ldgv_uninit(nxgep); 4346 4347 return (NXGE_ERROR | NXGE_DDI_FAILED); 4348 } 4349 intrp->intr_added++; 4350 } 4351 4352 intrp->msi_intx_cnt = nactual; 4353 4354 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4355 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 4356 navail, nactual, 4357 intrp->msi_intx_cnt, 4358 intrp->intr_added)); 4359 4360 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4361 4362 (void) nxge_intr_ldgv_init(nxgep); 4363 4364 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 4365 4366 return (status); 4367 } 4368 4369 /*ARGSUSED*/ 4370 static nxge_status_t 4371 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 4372 { 4373 dev_info_t *dip = nxgep->dip; 4374 p_nxge_ldg_t ldgp; 4375 p_nxge_intr_t intrp; 4376 uint_t *inthandler; 4377 void *arg1, *arg2; 4378 int behavior; 4379 int nintrs, navail; 4380 int nactual, nrequired; 4381 int inum = 0; 4382 int x, y; 4383 int ddi_status = DDI_SUCCESS; 4384 nxge_status_t status = NXGE_OK; 4385 4386 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 4387 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4388 intrp->start_inum = 0; 4389 4390 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4391 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4392 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4393 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4394 "nintrs: %d", status, nintrs)); 4395 return (NXGE_ERROR | NXGE_DDI_FAILED); 4396 } 4397 4398 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4399 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4400 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4401 "ddi_intr_get_navail() failed, status: 0x%x%, " 4402 "nintrs: %d", ddi_status, navail)); 4403 return (NXGE_ERROR | NXGE_DDI_FAILED); 4404 } 4405 4406 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4407 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 4408 nintrs, navail)); 4409 4410 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4411 DDI_INTR_ALLOC_NORMAL); 4412 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4413 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4414 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4415 navail, &nactual, behavior); 4416 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4417 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4418 " ddi_intr_alloc() failed: %d", 4419 ddi_status)); 4420 kmem_free(intrp->htable, intrp->intr_size); 4421 return (NXGE_ERROR | NXGE_DDI_FAILED); 4422 } 4423 4424 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4425 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4426 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4427 " ddi_intr_get_pri() failed: %d", 4428 ddi_status)); 4429 /* Free already allocated interrupts */ 4430 for (y = 0; y < nactual; y++) { 4431 (void) ddi_intr_free(intrp->htable[y]); 4432 } 4433 4434 kmem_free(intrp->htable, intrp->intr_size); 4435 return (NXGE_ERROR | NXGE_DDI_FAILED); 4436 } 4437 4438 nrequired = 0; 4439 switch (nxgep->niu_type) { 4440 default: 4441 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4442 break; 4443 4444 case N2_NIU: 4445 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4446 break; 4447 } 4448 4449 if (status != NXGE_OK) { 4450 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4451 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 4452 "failed: 0x%x", status)); 4453 /* Free already allocated interrupts */ 4454 for (y = 0; y < nactual; y++) { 4455 (void) ddi_intr_free(intrp->htable[y]); 4456 } 4457 4458 kmem_free(intrp->htable, intrp->intr_size); 4459 return (status); 4460 } 4461 4462 ldgp = nxgep->ldgvp->ldgp; 4463 for (x = 0; x < nrequired; x++, ldgp++) { 4464 ldgp->vector = (uint8_t)x; 4465 if (nxgep->niu_type != N2_NIU) { 4466 ldgp->intdata = SID_DATA(ldgp->func, x); 4467 } 4468 4469 arg1 = ldgp->ldvp; 4470 arg2 = nxgep; 4471 if (ldgp->nldvs == 1) { 4472 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4473 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4474 "nxge_add_intrs_adv_type_fix: " 4475 "1-1 int handler(%d) ldg %d ldv %d " 4476 "arg1 $%p arg2 $%p\n", 4477 x, ldgp->ldg, ldgp->ldvp->ldv, 4478 arg1, arg2)); 4479 } else if (ldgp->nldvs > 1) { 4480 inthandler = (uint_t *)ldgp->sys_intr_handler; 4481 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4482 "nxge_add_intrs_adv_type_fix: " 4483 "shared ldv %d int handler(%d) ldv %d ldg %d" 4484 "arg1 0x%016llx arg2 0x%016llx\n", 4485 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 4486 arg1, arg2)); 4487 } 4488 4489 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4490 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4491 != DDI_SUCCESS) { 4492 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4493 "==> nxge_add_intrs_adv_type_fix: failed #%d " 4494 "status 0x%x", x, ddi_status)); 4495 for (y = 0; y < intrp->intr_added; y++) { 4496 (void) ddi_intr_remove_handler( 4497 intrp->htable[y]); 4498 } 4499 for (y = 0; y < nactual; y++) { 4500 (void) ddi_intr_free(intrp->htable[y]); 4501 } 4502 /* Free already allocated intr */ 4503 kmem_free(intrp->htable, intrp->intr_size); 4504 4505 (void) nxge_ldgv_uninit(nxgep); 4506 4507 return (NXGE_ERROR | NXGE_DDI_FAILED); 4508 } 4509 intrp->intr_added++; 4510 } 4511 4512 intrp->msi_intx_cnt = nactual; 4513 4514 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4515 4516 status = nxge_intr_ldgv_init(nxgep); 4517 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 4518 4519 return (status); 4520 } 4521 4522 static void 4523 nxge_remove_intrs(p_nxge_t nxgep) 4524 { 4525 int i, inum; 4526 p_nxge_intr_t intrp; 4527 4528 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 4529 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4530 if (!intrp->intr_registered) { 4531 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4532 "<== nxge_remove_intrs: interrupts not registered")); 4533 return; 4534 } 4535 4536 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 4537 4538 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4539 (void) ddi_intr_block_disable(intrp->htable, 4540 intrp->intr_added); 4541 } else { 4542 for (i = 0; i < intrp->intr_added; i++) { 4543 (void) ddi_intr_disable(intrp->htable[i]); 4544 } 4545 } 4546 4547 for (inum = 0; inum < intrp->intr_added; inum++) { 4548 if (intrp->htable[inum]) { 4549 (void) ddi_intr_remove_handler(intrp->htable[inum]); 4550 } 4551 } 4552 4553 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 4554 if (intrp->htable[inum]) { 4555 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4556 "nxge_remove_intrs: ddi_intr_free inum %d " 4557 "msi_intx_cnt %d intr_added %d", 4558 inum, 4559 intrp->msi_intx_cnt, 4560 intrp->intr_added)); 4561 4562 (void) ddi_intr_free(intrp->htable[inum]); 4563 } 4564 } 4565 4566 kmem_free(intrp->htable, intrp->intr_size); 4567 intrp->intr_registered = B_FALSE; 4568 intrp->intr_enabled = B_FALSE; 4569 intrp->msi_intx_cnt = 0; 4570 intrp->intr_added = 0; 4571 4572 (void) nxge_ldgv_uninit(nxgep); 4573 4574 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 4575 "#msix-request"); 4576 4577 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 4578 } 4579 4580 /*ARGSUSED*/ 4581 static void 4582 nxge_remove_soft_intrs(p_nxge_t nxgep) 4583 { 4584 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 4585 if (nxgep->resched_id) { 4586 ddi_remove_softintr(nxgep->resched_id); 4587 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4588 "==> nxge_remove_soft_intrs: removed")); 4589 nxgep->resched_id = NULL; 4590 } 4591 4592 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 4593 } 4594 4595 /*ARGSUSED*/ 4596 static void 4597 nxge_intrs_enable(p_nxge_t nxgep) 4598 { 4599 p_nxge_intr_t intrp; 4600 int i; 4601 int status; 4602 4603 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 4604 4605 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4606 4607 if (!intrp->intr_registered) { 4608 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 4609 "interrupts are not registered")); 4610 return; 4611 } 4612 4613 if (intrp->intr_enabled) { 4614 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4615 "<== nxge_intrs_enable: already enabled")); 4616 return; 4617 } 4618 4619 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4620 status = ddi_intr_block_enable(intrp->htable, 4621 intrp->intr_added); 4622 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4623 "block enable - status 0x%x total inums #%d\n", 4624 status, intrp->intr_added)); 4625 } else { 4626 for (i = 0; i < intrp->intr_added; i++) { 4627 status = ddi_intr_enable(intrp->htable[i]); 4628 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4629 "ddi_intr_enable:enable - status 0x%x " 4630 "total inums %d enable inum #%d\n", 4631 status, intrp->intr_added, i)); 4632 if (status == DDI_SUCCESS) { 4633 intrp->intr_enabled = B_TRUE; 4634 } 4635 } 4636 } 4637 4638 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 4639 } 4640 4641 /*ARGSUSED*/ 4642 static void 4643 nxge_intrs_disable(p_nxge_t nxgep) 4644 { 4645 p_nxge_intr_t intrp; 4646 int i; 4647 4648 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 4649 4650 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4651 4652 if (!intrp->intr_registered) { 4653 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 4654 "interrupts are not registered")); 4655 return; 4656 } 4657 4658 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4659 (void) ddi_intr_block_disable(intrp->htable, 4660 intrp->intr_added); 4661 } else { 4662 for (i = 0; i < intrp->intr_added; i++) { 4663 (void) ddi_intr_disable(intrp->htable[i]); 4664 } 4665 } 4666 4667 intrp->intr_enabled = B_FALSE; 4668 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 4669 } 4670 4671 static nxge_status_t 4672 nxge_mac_register(p_nxge_t nxgep) 4673 { 4674 mac_register_t *macp; 4675 int status; 4676 4677 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 4678 4679 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 4680 return (NXGE_ERROR); 4681 4682 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 4683 macp->m_driver = nxgep; 4684 macp->m_dip = nxgep->dip; 4685 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 4686 macp->m_callbacks = &nxge_m_callbacks; 4687 macp->m_min_sdu = 0; 4688 macp->m_max_sdu = nxgep->mac.maxframesize - 4689 sizeof (struct ether_header) - ETHERFCSL - 4; 4690 4691 status = mac_register(macp, &nxgep->mach); 4692 mac_free(macp); 4693 4694 if (status != 0) { 4695 cmn_err(CE_WARN, 4696 "!nxge_mac_register failed (status %d instance %d)", 4697 status, nxgep->instance); 4698 return (NXGE_ERROR); 4699 } 4700 4701 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 4702 "(instance %d)", nxgep->instance)); 4703 4704 return (NXGE_OK); 4705 } 4706 4707 void 4708 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 4709 { 4710 ssize_t size; 4711 mblk_t *nmp; 4712 uint8_t blk_id; 4713 uint8_t chan; 4714 uint32_t err_id; 4715 err_inject_t *eip; 4716 4717 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 4718 4719 size = 1024; 4720 nmp = mp->b_cont; 4721 eip = (err_inject_t *)nmp->b_rptr; 4722 blk_id = eip->blk_id; 4723 err_id = eip->err_id; 4724 chan = eip->chan; 4725 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 4726 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 4727 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 4728 switch (blk_id) { 4729 case MAC_BLK_ID: 4730 break; 4731 case TXMAC_BLK_ID: 4732 break; 4733 case RXMAC_BLK_ID: 4734 break; 4735 case MIF_BLK_ID: 4736 break; 4737 case IPP_BLK_ID: 4738 nxge_ipp_inject_err(nxgep, err_id); 4739 break; 4740 case TXC_BLK_ID: 4741 nxge_txc_inject_err(nxgep, err_id); 4742 break; 4743 case TXDMA_BLK_ID: 4744 nxge_txdma_inject_err(nxgep, err_id, chan); 4745 break; 4746 case RXDMA_BLK_ID: 4747 nxge_rxdma_inject_err(nxgep, err_id, chan); 4748 break; 4749 case ZCP_BLK_ID: 4750 nxge_zcp_inject_err(nxgep, err_id); 4751 break; 4752 case ESPC_BLK_ID: 4753 break; 4754 case FFLP_BLK_ID: 4755 break; 4756 case PHY_BLK_ID: 4757 break; 4758 case ETHER_SERDES_BLK_ID: 4759 break; 4760 case PCIE_SERDES_BLK_ID: 4761 break; 4762 case VIR_BLK_ID: 4763 break; 4764 } 4765 4766 nmp->b_wptr = nmp->b_rptr + size; 4767 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 4768 4769 miocack(wq, mp, (int)size, 0); 4770 } 4771 4772 static int 4773 nxge_init_common_dev(p_nxge_t nxgep) 4774 { 4775 p_nxge_hw_list_t hw_p; 4776 dev_info_t *p_dip; 4777 4778 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 4779 4780 p_dip = nxgep->p_dip; 4781 MUTEX_ENTER(&nxge_common_lock); 4782 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4783 "==> nxge_init_common_dev:func # %d", 4784 nxgep->function_num)); 4785 /* 4786 * Loop through existing per neptune hardware list. 4787 */ 4788 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4789 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4790 "==> nxge_init_common_device:func # %d " 4791 "hw_p $%p parent dip $%p", 4792 nxgep->function_num, 4793 hw_p, 4794 p_dip)); 4795 if (hw_p->parent_devp == p_dip) { 4796 nxgep->nxge_hw_p = hw_p; 4797 hw_p->ndevs++; 4798 hw_p->nxge_p[nxgep->function_num] = nxgep; 4799 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4800 "==> nxge_init_common_device:func # %d " 4801 "hw_p $%p parent dip $%p " 4802 "ndevs %d (found)", 4803 nxgep->function_num, 4804 hw_p, 4805 p_dip, 4806 hw_p->ndevs)); 4807 break; 4808 } 4809 } 4810 4811 if (hw_p == NULL) { 4812 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4813 "==> nxge_init_common_device:func # %d " 4814 "parent dip $%p (new)", 4815 nxgep->function_num, 4816 p_dip)); 4817 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 4818 hw_p->parent_devp = p_dip; 4819 hw_p->magic = NXGE_NEPTUNE_MAGIC; 4820 nxgep->nxge_hw_p = hw_p; 4821 hw_p->ndevs++; 4822 hw_p->nxge_p[nxgep->function_num] = nxgep; 4823 hw_p->next = nxge_hw_list; 4824 if (nxgep->niu_type == N2_NIU) { 4825 hw_p->niu_type = N2_NIU; 4826 hw_p->platform_type = P_NEPTUNE_NIU; 4827 } else { 4828 hw_p->niu_type = NIU_TYPE_NONE; 4829 hw_p->platform_type = P_NEPTUNE_NONE; 4830 } 4831 4832 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 4833 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 4834 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 4835 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 4836 MUTEX_INIT(&hw_p->nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 4837 4838 nxge_hw_list = hw_p; 4839 4840 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 4841 } 4842 4843 MUTEX_EXIT(&nxge_common_lock); 4844 4845 nxgep->platform_type = hw_p->platform_type; 4846 if (nxgep->niu_type != N2_NIU) { 4847 nxgep->niu_type = hw_p->niu_type; 4848 } 4849 4850 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4851 "==> nxge_init_common_device (nxge_hw_list) $%p", 4852 nxge_hw_list)); 4853 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 4854 4855 return (NXGE_OK); 4856 } 4857 4858 static void 4859 nxge_uninit_common_dev(p_nxge_t nxgep) 4860 { 4861 p_nxge_hw_list_t hw_p, h_hw_p; 4862 dev_info_t *p_dip; 4863 4864 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 4865 if (nxgep->nxge_hw_p == NULL) { 4866 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4867 "<== nxge_uninit_common_device (no common)")); 4868 return; 4869 } 4870 4871 MUTEX_ENTER(&nxge_common_lock); 4872 h_hw_p = nxge_hw_list; 4873 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4874 p_dip = hw_p->parent_devp; 4875 if (nxgep->nxge_hw_p == hw_p && 4876 p_dip == nxgep->p_dip && 4877 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 4878 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 4879 4880 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4881 "==> nxge_uninit_common_device:func # %d " 4882 "hw_p $%p parent dip $%p " 4883 "ndevs %d (found)", 4884 nxgep->function_num, 4885 hw_p, 4886 p_dip, 4887 hw_p->ndevs)); 4888 4889 nxgep->nxge_hw_p = NULL; 4890 if (hw_p->ndevs) { 4891 hw_p->ndevs--; 4892 } 4893 hw_p->nxge_p[nxgep->function_num] = NULL; 4894 if (!hw_p->ndevs) { 4895 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 4896 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 4897 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 4898 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 4899 MUTEX_DESTROY(&hw_p->nxge_mii_lock); 4900 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4901 "==> nxge_uninit_common_device: " 4902 "func # %d " 4903 "hw_p $%p parent dip $%p " 4904 "ndevs %d (last)", 4905 nxgep->function_num, 4906 hw_p, 4907 p_dip, 4908 hw_p->ndevs)); 4909 4910 if (hw_p == nxge_hw_list) { 4911 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4912 "==> nxge_uninit_common_device:" 4913 "remove head func # %d " 4914 "hw_p $%p parent dip $%p " 4915 "ndevs %d (head)", 4916 nxgep->function_num, 4917 hw_p, 4918 p_dip, 4919 hw_p->ndevs)); 4920 nxge_hw_list = hw_p->next; 4921 } else { 4922 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4923 "==> nxge_uninit_common_device:" 4924 "remove middle func # %d " 4925 "hw_p $%p parent dip $%p " 4926 "ndevs %d (middle)", 4927 nxgep->function_num, 4928 hw_p, 4929 p_dip, 4930 hw_p->ndevs)); 4931 h_hw_p->next = hw_p->next; 4932 } 4933 4934 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 4935 } 4936 break; 4937 } else { 4938 h_hw_p = hw_p; 4939 } 4940 } 4941 4942 MUTEX_EXIT(&nxge_common_lock); 4943 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4944 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 4945 nxge_hw_list)); 4946 4947 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 4948 } 4949 4950 /* 4951 * Determines the number of ports from the niu_type or the platform type. 4952 * Returns the number of ports, or returns zero on failure. 4953 */ 4954 4955 int 4956 nxge_get_nports(p_nxge_t nxgep) 4957 { 4958 int nports = 0; 4959 4960 switch (nxgep->niu_type) { 4961 case N2_NIU: 4962 case NEPTUNE_2_10GF: 4963 nports = 2; 4964 break; 4965 case NEPTUNE_4_1GC: 4966 case NEPTUNE_2_10GF_2_1GC: 4967 case NEPTUNE_1_10GF_3_1GC: 4968 case NEPTUNE_1_1GC_1_10GF_2_1GC: 4969 nports = 4; 4970 break; 4971 default: 4972 switch (nxgep->platform_type) { 4973 case P_NEPTUNE_NIU: 4974 case P_NEPTUNE_ATLAS_2PORT: 4975 nports = 2; 4976 break; 4977 case P_NEPTUNE_ATLAS_4PORT: 4978 case P_NEPTUNE_MARAMBA_P0: 4979 case P_NEPTUNE_MARAMBA_P1: 4980 case P_NEPTUNE_ALONSO: 4981 nports = 4; 4982 break; 4983 default: 4984 break; 4985 } 4986 break; 4987 } 4988 4989 return (nports); 4990 } 4991 4992 /* 4993 * The following two functions are to support 4994 * PSARC/2007/453 MSI-X interrupt limit override. 4995 */ 4996 static int 4997 nxge_create_msi_property(p_nxge_t nxgep) 4998 { 4999 int nmsi; 5000 extern int ncpus; 5001 5002 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 5003 5004 switch (nxgep->mac.portmode) { 5005 case PORT_10G_COPPER: 5006 case PORT_10G_FIBER: 5007 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 5008 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 5009 /* 5010 * The maximum MSI-X requested will be 8. 5011 * If the # of CPUs is less than 8, we will reqeust 5012 * # MSI-X based on the # of CPUs. 5013 */ 5014 if (ncpus >= NXGE_MSIX_REQUEST_10G) { 5015 nmsi = NXGE_MSIX_REQUEST_10G; 5016 } else { 5017 nmsi = ncpus; 5018 } 5019 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 5020 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 5021 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 5022 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 5023 break; 5024 5025 default: 5026 nmsi = NXGE_MSIX_REQUEST_1G; 5027 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 5028 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 5029 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 5030 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 5031 break; 5032 } 5033 5034 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 5035 return (nmsi); 5036 } 5037