1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 30 */ 31 #include <sys/nxge/nxge_impl.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * until MSIX supported, assume msi, use 2 for msix 39 */ 40 uint32_t nxge_msi_enable = 1; /* debug: turn msi off */ 41 42 /* 43 * Globals: tunable parameters (/etc/system or adb) 44 * 45 */ 46 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 47 uint32_t nxge_rbr_spare_size = 0; 48 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 49 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 50 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 51 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 52 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 53 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 54 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 55 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 56 boolean_t nxge_jumbo_enable = B_FALSE; 57 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 58 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 59 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 60 61 /* 62 * Debugging flags: 63 * nxge_no_tx_lb : transmit load balancing 64 * nxge_tx_lb_policy: 0 - TCP port (default) 65 * 3 - DEST MAC 66 */ 67 uint32_t nxge_no_tx_lb = 0; 68 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 69 70 /* 71 * Add tunable to reduce the amount of time spent in the 72 * ISR doing Rx Processing. 73 */ 74 uint32_t nxge_max_rx_pkts = 1024; 75 76 /* 77 * Tunables to manage the receive buffer blocks. 78 * 79 * nxge_rx_threshold_hi: copy all buffers. 80 * nxge_rx_bcopy_size_type: receive buffer block size type. 81 * nxge_rx_threshold_lo: copy only up to tunable block size type. 82 */ 83 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 84 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 85 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 86 87 rtrace_t npi_rtracebuf; 88 89 #if defined(sun4v) 90 /* 91 * Hypervisor N2/NIU services information. 92 */ 93 static hsvc_info_t niu_hsvc = { 94 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 95 NIU_MINOR_VER, "nxge" 96 }; 97 #endif 98 99 /* 100 * Function Prototypes 101 */ 102 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 103 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 104 static void nxge_unattach(p_nxge_t); 105 106 #if NXGE_PROPERTY 107 static void nxge_remove_hard_properties(p_nxge_t); 108 #endif 109 110 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 111 112 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 113 static void nxge_destroy_mutexes(p_nxge_t); 114 115 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 116 static void nxge_unmap_regs(p_nxge_t nxgep); 117 #ifdef NXGE_DEBUG 118 static void nxge_test_map_regs(p_nxge_t nxgep); 119 #endif 120 121 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 122 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 123 static void nxge_remove_intrs(p_nxge_t nxgep); 124 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 125 126 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 127 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 128 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 129 static void nxge_intrs_enable(p_nxge_t nxgep); 130 static void nxge_intrs_disable(p_nxge_t nxgep); 131 132 static void nxge_suspend(p_nxge_t); 133 static nxge_status_t nxge_resume(p_nxge_t); 134 135 static nxge_status_t nxge_setup_dev(p_nxge_t); 136 static void nxge_destroy_dev(p_nxge_t); 137 138 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 139 static void nxge_free_mem_pool(p_nxge_t); 140 141 static nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 142 static void nxge_free_rx_mem_pool(p_nxge_t); 143 144 static nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 145 static void nxge_free_tx_mem_pool(p_nxge_t); 146 147 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 148 struct ddi_dma_attr *, 149 size_t, ddi_device_acc_attr_t *, uint_t, 150 p_nxge_dma_common_t); 151 152 static void nxge_dma_mem_free(p_nxge_dma_common_t); 153 154 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 155 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 156 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 157 158 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 159 p_nxge_dma_common_t *, size_t); 160 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 161 162 static nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 163 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 164 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 165 166 static nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 167 p_nxge_dma_common_t *, 168 size_t); 169 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 170 171 static int nxge_init_common_dev(p_nxge_t); 172 static void nxge_uninit_common_dev(p_nxge_t); 173 174 /* 175 * The next declarations are for the GLDv3 interface. 176 */ 177 static int nxge_m_start(void *); 178 static void nxge_m_stop(void *); 179 static int nxge_m_unicst(void *, const uint8_t *); 180 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 181 static int nxge_m_promisc(void *, boolean_t); 182 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 183 static void nxge_m_resources(void *); 184 mblk_t *nxge_m_tx(void *arg, mblk_t *); 185 static nxge_status_t nxge_mac_register(p_nxge_t); 186 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 187 mac_addr_slot_t slot); 188 static void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 189 boolean_t factory); 190 static int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 191 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 192 static int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 193 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 194 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 195 196 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 197 #define MAX_DUMP_SZ 256 198 199 #define NXGE_M_CALLBACK_FLAGS (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB) 200 201 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 202 static mac_callbacks_t nxge_m_callbacks = { 203 NXGE_M_CALLBACK_FLAGS, 204 nxge_m_stat, 205 nxge_m_start, 206 nxge_m_stop, 207 nxge_m_promisc, 208 nxge_m_multicst, 209 nxge_m_unicst, 210 nxge_m_tx, 211 nxge_m_resources, 212 nxge_m_ioctl, 213 nxge_m_getcapab 214 }; 215 216 void 217 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 218 219 /* 220 * These global variables control the message 221 * output. 222 */ 223 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 224 uint64_t nxge_debug_level = 0; 225 226 /* 227 * This list contains the instance structures for the Neptune 228 * devices present in the system. The lock exists to guarantee 229 * mutually exclusive access to the list. 230 */ 231 void *nxge_list = NULL; 232 233 void *nxge_hw_list = NULL; 234 nxge_os_mutex_t nxge_common_lock; 235 236 nxge_os_mutex_t nxge_mii_lock; 237 static uint32_t nxge_mii_lock_init = 0; 238 nxge_os_mutex_t nxge_mdio_lock; 239 static uint32_t nxge_mdio_lock_init = 0; 240 241 extern uint64_t npi_debug_level; 242 243 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 244 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 245 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 246 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 247 extern void nxge_fm_init(p_nxge_t, 248 ddi_device_acc_attr_t *, 249 ddi_device_acc_attr_t *, 250 ddi_dma_attr_t *); 251 extern void nxge_fm_fini(p_nxge_t); 252 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 253 254 /* 255 * Count used to maintain the number of buffers being used 256 * by Neptune instances and loaned up to the upper layers. 257 */ 258 uint32_t nxge_mblks_pending = 0; 259 260 /* 261 * Device register access attributes for PIO. 262 */ 263 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 264 DDI_DEVICE_ATTR_V0, 265 DDI_STRUCTURE_LE_ACC, 266 DDI_STRICTORDER_ACC, 267 }; 268 269 /* 270 * Device descriptor access attributes for DMA. 271 */ 272 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 273 DDI_DEVICE_ATTR_V0, 274 DDI_STRUCTURE_LE_ACC, 275 DDI_STRICTORDER_ACC 276 }; 277 278 /* 279 * Device buffer access attributes for DMA. 280 */ 281 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 282 DDI_DEVICE_ATTR_V0, 283 DDI_STRUCTURE_BE_ACC, 284 DDI_STRICTORDER_ACC 285 }; 286 287 ddi_dma_attr_t nxge_desc_dma_attr = { 288 DMA_ATTR_V0, /* version number. */ 289 0, /* low address */ 290 0xffffffffffffffff, /* high address */ 291 0xffffffffffffffff, /* address counter max */ 292 #ifndef NIU_PA_WORKAROUND 293 0x100000, /* alignment */ 294 #else 295 0x2000, 296 #endif 297 0xfc00fc, /* dlim_burstsizes */ 298 0x1, /* minimum transfer size */ 299 0xffffffffffffffff, /* maximum transfer size */ 300 0xffffffffffffffff, /* maximum segment size */ 301 1, /* scatter/gather list length */ 302 (unsigned int) 1, /* granularity */ 303 0 /* attribute flags */ 304 }; 305 306 ddi_dma_attr_t nxge_tx_dma_attr = { 307 DMA_ATTR_V0, /* version number. */ 308 0, /* low address */ 309 0xffffffffffffffff, /* high address */ 310 0xffffffffffffffff, /* address counter max */ 311 #if defined(_BIG_ENDIAN) 312 0x2000, /* alignment */ 313 #else 314 0x1000, /* alignment */ 315 #endif 316 0xfc00fc, /* dlim_burstsizes */ 317 0x1, /* minimum transfer size */ 318 0xffffffffffffffff, /* maximum transfer size */ 319 0xffffffffffffffff, /* maximum segment size */ 320 5, /* scatter/gather list length */ 321 (unsigned int) 1, /* granularity */ 322 0 /* attribute flags */ 323 }; 324 325 ddi_dma_attr_t nxge_rx_dma_attr = { 326 DMA_ATTR_V0, /* version number. */ 327 0, /* low address */ 328 0xffffffffffffffff, /* high address */ 329 0xffffffffffffffff, /* address counter max */ 330 0x2000, /* alignment */ 331 0xfc00fc, /* dlim_burstsizes */ 332 0x1, /* minimum transfer size */ 333 0xffffffffffffffff, /* maximum transfer size */ 334 0xffffffffffffffff, /* maximum segment size */ 335 1, /* scatter/gather list length */ 336 (unsigned int) 1, /* granularity */ 337 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 338 }; 339 340 ddi_dma_lim_t nxge_dma_limits = { 341 (uint_t)0, /* dlim_addr_lo */ 342 (uint_t)0xffffffff, /* dlim_addr_hi */ 343 (uint_t)0xffffffff, /* dlim_cntr_max */ 344 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 345 0x1, /* dlim_minxfer */ 346 1024 /* dlim_speed */ 347 }; 348 349 dma_method_t nxge_force_dma = DVMA; 350 351 /* 352 * dma chunk sizes. 353 * 354 * Try to allocate the largest possible size 355 * so that fewer number of dma chunks would be managed 356 */ 357 #ifdef NIU_PA_WORKAROUND 358 size_t alloc_sizes [] = {0x2000}; 359 #else 360 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 361 0x10000, 0x20000, 0x40000, 0x80000, 362 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000}; 363 #endif 364 365 /* 366 * Translate "dev_t" to a pointer to the associated "dev_info_t". 367 */ 368 369 static int 370 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 371 { 372 p_nxge_t nxgep = NULL; 373 int instance; 374 int status = DDI_SUCCESS; 375 uint8_t portn; 376 nxge_mmac_t *mmac_info; 377 378 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 379 380 /* 381 * Get the device instance since we'll need to setup 382 * or retrieve a soft state for this instance. 383 */ 384 instance = ddi_get_instance(dip); 385 386 switch (cmd) { 387 case DDI_ATTACH: 388 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 389 break; 390 391 case DDI_RESUME: 392 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 393 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 394 if (nxgep == NULL) { 395 status = DDI_FAILURE; 396 break; 397 } 398 if (nxgep->dip != dip) { 399 status = DDI_FAILURE; 400 break; 401 } 402 if (nxgep->suspended == DDI_PM_SUSPEND) { 403 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 404 } else { 405 status = nxge_resume(nxgep); 406 } 407 goto nxge_attach_exit; 408 409 case DDI_PM_RESUME: 410 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 411 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 412 if (nxgep == NULL) { 413 status = DDI_FAILURE; 414 break; 415 } 416 if (nxgep->dip != dip) { 417 status = DDI_FAILURE; 418 break; 419 } 420 status = nxge_resume(nxgep); 421 goto nxge_attach_exit; 422 423 default: 424 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 425 status = DDI_FAILURE; 426 goto nxge_attach_exit; 427 } 428 429 430 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 431 status = DDI_FAILURE; 432 goto nxge_attach_exit; 433 } 434 435 nxgep = ddi_get_soft_state(nxge_list, instance); 436 if (nxgep == NULL) { 437 goto nxge_attach_fail; 438 } 439 440 nxgep->drv_state = 0; 441 nxgep->dip = dip; 442 nxgep->instance = instance; 443 nxgep->p_dip = ddi_get_parent(dip); 444 nxgep->nxge_debug_level = nxge_debug_level; 445 npi_debug_level = nxge_debug_level; 446 447 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_dev_desc_dma_acc_attr, 448 &nxge_rx_dma_attr); 449 450 status = nxge_map_regs(nxgep); 451 if (status != NXGE_OK) { 452 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 453 goto nxge_attach_fail; 454 } 455 456 status = nxge_init_common_dev(nxgep); 457 if (status != NXGE_OK) { 458 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 459 "nxge_init_common_dev failed")); 460 goto nxge_attach_fail; 461 } 462 463 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 464 nxgep->mac.portnum = portn; 465 if ((portn == 0) || (portn == 1)) 466 nxgep->mac.porttype = PORT_TYPE_XMAC; 467 else 468 nxgep->mac.porttype = PORT_TYPE_BMAC; 469 /* 470 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 471 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 472 * The two types of MACs have different characterizations. 473 */ 474 mmac_info = &nxgep->nxge_mmac_info; 475 if (nxgep->function_num < 2) { 476 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 477 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 478 } else { 479 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 480 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 481 } 482 /* 483 * Setup the Ndd parameters for the this instance. 484 */ 485 nxge_init_param(nxgep); 486 487 /* 488 * Setup Register Tracing Buffer. 489 */ 490 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 491 492 /* init stats ptr */ 493 nxge_init_statsp(nxgep); 494 495 if (nxgep->niu_type != N2_NIU) { 496 /* 497 * read the vpd info from the eeprom into local data 498 * structure and check for the VPD info validity 499 */ 500 (void) nxge_vpd_info_get(nxgep); 501 } 502 503 status = nxge_get_xcvr_type(nxgep); 504 505 if (status != NXGE_OK) { 506 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 507 " Couldn't determine card type" 508 " .... exit ")); 509 goto nxge_attach_fail; 510 } 511 512 if ((nxgep->niu_type == NEPTUNE) && 513 (nxgep->mac.portmode == PORT_10G_FIBER)) { 514 nxgep->niu_type = NEPTUNE_2; 515 } 516 517 if ((nxgep->niu_type == NEPTUNE_2) && (nxgep->function_num > 1)) { 518 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Unsupported function %d." 519 "Only functions 0 and 1 are supported by this card", 520 nxgep->function_num)); 521 status = NXGE_ERROR; 522 goto nxge_attach_fail; 523 } 524 525 status = nxge_get_config_properties(nxgep); 526 527 if (status != NXGE_OK) { 528 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "get_hw create failed")); 529 goto nxge_attach_fail; 530 } 531 532 /* 533 * Setup the Kstats for the driver. 534 */ 535 nxge_setup_kstats(nxgep); 536 537 nxge_setup_param(nxgep); 538 539 status = nxge_setup_system_dma_pages(nxgep); 540 if (status != NXGE_OK) { 541 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 542 goto nxge_attach_fail; 543 } 544 545 #if defined(sun4v) 546 if (nxgep->niu_type == N2_NIU) { 547 nxgep->niu_hsvc_available = B_FALSE; 548 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 549 if ((status = 550 hsvc_register(&nxgep->niu_hsvc, 551 &nxgep->niu_min_ver)) != 0) { 552 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 553 "nxge_attach: " 554 "%s: cannot negotiate " 555 "hypervisor services " 556 "revision %d " 557 "group: 0x%lx " 558 "major: 0x%lx minor: 0x%lx " 559 "errno: %d", 560 niu_hsvc.hsvc_modname, 561 niu_hsvc.hsvc_rev, 562 niu_hsvc.hsvc_group, 563 niu_hsvc.hsvc_major, 564 niu_hsvc.hsvc_minor, 565 status)); 566 status = DDI_FAILURE; 567 goto nxge_attach_fail; 568 } 569 570 nxgep->niu_hsvc_available = B_TRUE; 571 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 572 "NIU Hypervisor service enabled")); 573 } 574 #endif 575 576 nxge_hw_id_init(nxgep); 577 nxge_hw_init_niu_common(nxgep); 578 579 status = nxge_setup_mutexes(nxgep); 580 if (status != NXGE_OK) { 581 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 582 goto nxge_attach_fail; 583 } 584 585 status = nxge_setup_dev(nxgep); 586 if (status != DDI_SUCCESS) { 587 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 588 goto nxge_attach_fail; 589 } 590 591 status = nxge_add_intrs(nxgep); 592 if (status != DDI_SUCCESS) { 593 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 594 goto nxge_attach_fail; 595 } 596 status = nxge_add_soft_intrs(nxgep); 597 if (status != DDI_SUCCESS) { 598 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "add_soft_intr failed")); 599 goto nxge_attach_fail; 600 } 601 602 /* 603 * Enable interrupts. 604 */ 605 nxge_intrs_enable(nxgep); 606 607 if ((status = nxge_mac_register(nxgep)) != DDI_SUCCESS) { 608 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 609 "unable to register to mac layer (%d)", status)); 610 goto nxge_attach_fail; 611 } 612 613 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 614 615 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "registered to mac (instance %d)", 616 instance)); 617 618 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 619 620 goto nxge_attach_exit; 621 622 nxge_attach_fail: 623 nxge_unattach(nxgep); 624 if (status != NXGE_OK) 625 status = (NXGE_ERROR | NXGE_DDI_FAILED); 626 nxgep = NULL; 627 628 nxge_attach_exit: 629 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 630 status)); 631 632 return (status); 633 } 634 635 static int 636 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 637 { 638 int status = DDI_SUCCESS; 639 int instance; 640 p_nxge_t nxgep = NULL; 641 642 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 643 instance = ddi_get_instance(dip); 644 nxgep = ddi_get_soft_state(nxge_list, instance); 645 if (nxgep == NULL) { 646 status = DDI_FAILURE; 647 goto nxge_detach_exit; 648 } 649 650 switch (cmd) { 651 case DDI_DETACH: 652 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 653 break; 654 655 case DDI_PM_SUSPEND: 656 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 657 nxgep->suspended = DDI_PM_SUSPEND; 658 nxge_suspend(nxgep); 659 break; 660 661 case DDI_SUSPEND: 662 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 663 if (nxgep->suspended != DDI_PM_SUSPEND) { 664 nxgep->suspended = DDI_SUSPEND; 665 nxge_suspend(nxgep); 666 } 667 break; 668 669 default: 670 status = DDI_FAILURE; 671 } 672 673 if (cmd != DDI_DETACH) 674 goto nxge_detach_exit; 675 676 /* 677 * Stop the xcvr polling. 678 */ 679 nxgep->suspended = cmd; 680 681 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 682 683 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 684 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 685 "<== nxge_detach status = 0x%08X", status)); 686 return (DDI_FAILURE); 687 } 688 689 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 690 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 691 692 nxge_unattach(nxgep); 693 nxgep = NULL; 694 695 nxge_detach_exit: 696 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 697 status)); 698 699 return (status); 700 } 701 702 static void 703 nxge_unattach(p_nxge_t nxgep) 704 { 705 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 706 707 if (nxgep == NULL || nxgep->dev_regs == NULL) { 708 return; 709 } 710 711 if (nxgep->nxge_hw_p) { 712 nxge_uninit_common_dev(nxgep); 713 nxgep->nxge_hw_p = NULL; 714 } 715 716 if (nxgep->nxge_timerid) { 717 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 718 nxgep->nxge_timerid = 0; 719 } 720 721 #if defined(sun4v) 722 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 723 (void) hsvc_unregister(&nxgep->niu_hsvc); 724 nxgep->niu_hsvc_available = B_FALSE; 725 } 726 #endif 727 /* 728 * Stop any further interrupts. 729 */ 730 nxge_remove_intrs(nxgep); 731 732 /* remove soft interrups */ 733 nxge_remove_soft_intrs(nxgep); 734 735 /* 736 * Stop the device and free resources. 737 */ 738 nxge_destroy_dev(nxgep); 739 740 /* 741 * Tear down the ndd parameters setup. 742 */ 743 nxge_destroy_param(nxgep); 744 745 /* 746 * Tear down the kstat setup. 747 */ 748 nxge_destroy_kstats(nxgep); 749 750 /* 751 * Destroy all mutexes. 752 */ 753 nxge_destroy_mutexes(nxgep); 754 755 /* 756 * Remove the list of ndd parameters which 757 * were setup during attach. 758 */ 759 if (nxgep->dip) { 760 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 761 " nxge_unattach: remove all properties")); 762 763 (void) ddi_prop_remove_all(nxgep->dip); 764 } 765 766 #if NXGE_PROPERTY 767 nxge_remove_hard_properties(nxgep); 768 #endif 769 770 /* 771 * Unmap the register setup. 772 */ 773 nxge_unmap_regs(nxgep); 774 775 nxge_fm_fini(nxgep); 776 777 ddi_soft_state_free(nxge_list, nxgep->instance); 778 779 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 780 } 781 782 static char n2_siu_name[] = "niu"; 783 784 static nxge_status_t 785 nxge_map_regs(p_nxge_t nxgep) 786 { 787 int ddi_status = DDI_SUCCESS; 788 p_dev_regs_t dev_regs; 789 char buf[MAXPATHLEN + 1]; 790 char *devname; 791 #ifdef NXGE_DEBUG 792 char *sysname; 793 #endif 794 off_t regsize; 795 nxge_status_t status = NXGE_OK; 796 #if !defined(_BIG_ENDIAN) 797 off_t pci_offset; 798 uint16_t pcie_devctl; 799 #endif 800 801 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 802 nxgep->dev_regs = NULL; 803 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 804 dev_regs->nxge_regh = NULL; 805 dev_regs->nxge_pciregh = NULL; 806 dev_regs->nxge_msix_regh = NULL; 807 dev_regs->nxge_vir_regh = NULL; 808 dev_regs->nxge_vir2_regh = NULL; 809 nxgep->niu_type = NEPTUNE; 810 811 devname = ddi_pathname(nxgep->dip, buf); 812 ASSERT(strlen(devname) > 0); 813 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 814 "nxge_map_regs: pathname devname %s", devname)); 815 816 if (strstr(devname, n2_siu_name)) { 817 /* N2/NIU */ 818 nxgep->niu_type = N2_NIU; 819 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 820 "nxge_map_regs: N2/NIU devname %s", devname)); 821 /* get function number */ 822 nxgep->function_num = 823 (devname[strlen(devname) -1] == '1' ? 1 : 0); 824 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 825 "nxge_map_regs: N2/NIU function number %d", 826 nxgep->function_num)); 827 } else { 828 int *prop_val; 829 uint_t prop_len; 830 uint8_t func_num; 831 832 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 833 0, "reg", 834 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 835 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 836 "Reg property not found")); 837 ddi_status = DDI_FAILURE; 838 goto nxge_map_regs_fail0; 839 840 } else { 841 func_num = (prop_val[0] >> 8) & 0x7; 842 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 843 "Reg property found: fun # %d", 844 func_num)); 845 nxgep->function_num = func_num; 846 ddi_prop_free(prop_val); 847 } 848 } 849 850 switch (nxgep->niu_type) { 851 case NEPTUNE: 852 case NEPTUNE_2: 853 default: 854 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 855 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 856 "nxge_map_regs: pci config size 0x%x", regsize)); 857 858 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 859 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 860 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 861 if (ddi_status != DDI_SUCCESS) { 862 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 863 "ddi_map_regs, nxge bus config regs failed")); 864 goto nxge_map_regs_fail0; 865 } 866 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 867 "nxge_map_reg: PCI config addr 0x%0llx " 868 " handle 0x%0llx", dev_regs->nxge_pciregp, 869 dev_regs->nxge_pciregh)); 870 /* 871 * IMP IMP 872 * workaround for bit swapping bug in HW 873 * which ends up in no-snoop = yes 874 * resulting, in DMA not synched properly 875 */ 876 #if !defined(_BIG_ENDIAN) 877 /* workarounds for x86 systems */ 878 pci_offset = 0x80 + PCIE_DEVCTL; 879 pcie_devctl = 0x0; 880 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 881 pcie_devctl |= PCIE_DEVCTL_RO_EN; 882 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 883 pcie_devctl); 884 #endif 885 886 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 887 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 888 "nxge_map_regs: pio size 0x%x", regsize)); 889 /* set up the device mapped register */ 890 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 891 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 892 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 893 if (ddi_status != DDI_SUCCESS) { 894 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 895 "ddi_map_regs for Neptune global reg failed")); 896 goto nxge_map_regs_fail1; 897 } 898 899 /* set up the msi/msi-x mapped register */ 900 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 901 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 902 "nxge_map_regs: msix size 0x%x", regsize)); 903 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 904 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 905 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 906 if (ddi_status != DDI_SUCCESS) { 907 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 908 "ddi_map_regs for msi reg failed")); 909 goto nxge_map_regs_fail2; 910 } 911 912 /* set up the vio region mapped register */ 913 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 914 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 915 "nxge_map_regs: vio size 0x%x", regsize)); 916 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 917 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 918 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 919 920 if (ddi_status != DDI_SUCCESS) { 921 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 922 "ddi_map_regs for nxge vio reg failed")); 923 goto nxge_map_regs_fail3; 924 } 925 nxgep->dev_regs = dev_regs; 926 927 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 928 NPI_PCI_ADD_HANDLE_SET(nxgep, 929 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 930 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 931 NPI_MSI_ADD_HANDLE_SET(nxgep, 932 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 933 934 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 935 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 936 937 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 938 NPI_REG_ADD_HANDLE_SET(nxgep, 939 (npi_reg_ptr_t)dev_regs->nxge_regp); 940 941 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 942 NPI_VREG_ADD_HANDLE_SET(nxgep, 943 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 944 945 break; 946 947 case N2_NIU: 948 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 949 /* 950 * Set up the device mapped register (FWARC 2006/556) 951 * (changed back to 1: reg starts at 1!) 952 */ 953 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 954 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 955 "nxge_map_regs: dev size 0x%x", regsize)); 956 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 957 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 958 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 959 960 if (ddi_status != DDI_SUCCESS) { 961 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 962 "ddi_map_regs for N2/NIU, global reg failed ")); 963 goto nxge_map_regs_fail1; 964 } 965 966 /* set up the vio region mapped register */ 967 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 968 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 969 "nxge_map_regs: vio (1) size 0x%x", regsize)); 970 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 971 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 972 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 973 974 if (ddi_status != DDI_SUCCESS) { 975 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 976 "ddi_map_regs for nxge vio reg failed")); 977 goto nxge_map_regs_fail2; 978 } 979 /* set up the vio region mapped register */ 980 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 981 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 982 "nxge_map_regs: vio (3) size 0x%x", regsize)); 983 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 984 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 985 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 986 987 if (ddi_status != DDI_SUCCESS) { 988 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 989 "ddi_map_regs for nxge vio2 reg failed")); 990 goto nxge_map_regs_fail3; 991 } 992 nxgep->dev_regs = dev_regs; 993 994 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 995 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 996 997 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 998 NPI_REG_ADD_HANDLE_SET(nxgep, 999 (npi_reg_ptr_t)dev_regs->nxge_regp); 1000 1001 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1002 NPI_VREG_ADD_HANDLE_SET(nxgep, 1003 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1004 1005 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1006 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1007 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1008 1009 break; 1010 } 1011 1012 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1013 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1014 1015 goto nxge_map_regs_exit; 1016 nxge_map_regs_fail3: 1017 if (dev_regs->nxge_msix_regh) { 1018 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1019 } 1020 if (dev_regs->nxge_vir_regh) { 1021 ddi_regs_map_free(&dev_regs->nxge_regh); 1022 } 1023 nxge_map_regs_fail2: 1024 if (dev_regs->nxge_regh) { 1025 ddi_regs_map_free(&dev_regs->nxge_regh); 1026 } 1027 nxge_map_regs_fail1: 1028 if (dev_regs->nxge_pciregh) { 1029 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1030 } 1031 nxge_map_regs_fail0: 1032 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1033 kmem_free(dev_regs, sizeof (dev_regs_t)); 1034 1035 nxge_map_regs_exit: 1036 if (ddi_status != DDI_SUCCESS) 1037 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1038 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1039 return (status); 1040 } 1041 1042 static void 1043 nxge_unmap_regs(p_nxge_t nxgep) 1044 { 1045 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1046 if (nxgep->dev_regs) { 1047 if (nxgep->dev_regs->nxge_pciregh) { 1048 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1049 "==> nxge_unmap_regs: bus")); 1050 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1051 nxgep->dev_regs->nxge_pciregh = NULL; 1052 } 1053 if (nxgep->dev_regs->nxge_regh) { 1054 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1055 "==> nxge_unmap_regs: device registers")); 1056 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1057 nxgep->dev_regs->nxge_regh = NULL; 1058 } 1059 if (nxgep->dev_regs->nxge_msix_regh) { 1060 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1061 "==> nxge_unmap_regs: device interrupts")); 1062 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1063 nxgep->dev_regs->nxge_msix_regh = NULL; 1064 } 1065 if (nxgep->dev_regs->nxge_vir_regh) { 1066 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1067 "==> nxge_unmap_regs: vio region")); 1068 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1069 nxgep->dev_regs->nxge_vir_regh = NULL; 1070 } 1071 if (nxgep->dev_regs->nxge_vir2_regh) { 1072 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1073 "==> nxge_unmap_regs: vio2 region")); 1074 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1075 nxgep->dev_regs->nxge_vir2_regh = NULL; 1076 } 1077 1078 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1079 nxgep->dev_regs = NULL; 1080 } 1081 1082 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1083 } 1084 1085 static nxge_status_t 1086 nxge_setup_mutexes(p_nxge_t nxgep) 1087 { 1088 int ddi_status = DDI_SUCCESS; 1089 nxge_status_t status = NXGE_OK; 1090 nxge_classify_t *classify_ptr; 1091 int partition; 1092 1093 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1094 1095 /* 1096 * Get the interrupt cookie so the mutexes can be 1097 * Initialized. 1098 */ 1099 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1100 &nxgep->interrupt_cookie); 1101 if (ddi_status != DDI_SUCCESS) { 1102 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1103 "<== nxge_setup_mutexes: failed 0x%x", ddi_status)); 1104 goto nxge_setup_mutexes_exit; 1105 } 1106 1107 /* Initialize global mutex */ 1108 1109 if (nxge_mdio_lock_init == 0) { 1110 MUTEX_INIT(&nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 1111 } 1112 atomic_add_32(&nxge_mdio_lock_init, 1); 1113 1114 if (nxge_mii_lock_init == 0) { 1115 MUTEX_INIT(&nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 1116 } 1117 atomic_add_32(&nxge_mii_lock_init, 1); 1118 1119 nxgep->drv_state |= STATE_MDIO_LOCK_INIT; 1120 nxgep->drv_state |= STATE_MII_LOCK_INIT; 1121 1122 /* 1123 * Initialize mutex's for this device. 1124 */ 1125 MUTEX_INIT(nxgep->genlock, NULL, 1126 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1127 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1128 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1129 MUTEX_INIT(&nxgep->mif_lock, NULL, 1130 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1131 RW_INIT(&nxgep->filter_lock, NULL, 1132 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1133 1134 classify_ptr = &nxgep->classifier; 1135 /* 1136 * FFLP Mutexes are never used in interrupt context 1137 * as fflp operation can take very long time to 1138 * complete and hence not suitable to invoke from interrupt 1139 * handlers. 1140 */ 1141 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1142 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1143 if (nxgep->niu_type == NEPTUNE) { 1144 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1145 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1146 for (partition = 0; partition < MAX_PARTITION; partition++) { 1147 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1148 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1149 } 1150 } 1151 1152 nxge_setup_mutexes_exit: 1153 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1154 "<== nxge_setup_mutexes status = %x", status)); 1155 1156 if (ddi_status != DDI_SUCCESS) 1157 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1158 1159 return (status); 1160 } 1161 1162 static void 1163 nxge_destroy_mutexes(p_nxge_t nxgep) 1164 { 1165 int partition; 1166 nxge_classify_t *classify_ptr; 1167 1168 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1169 RW_DESTROY(&nxgep->filter_lock); 1170 MUTEX_DESTROY(&nxgep->mif_lock); 1171 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1172 MUTEX_DESTROY(nxgep->genlock); 1173 1174 classify_ptr = &nxgep->classifier; 1175 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1176 1177 /* free data structures, based on HW type */ 1178 if (nxgep->niu_type == NEPTUNE) { 1179 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1180 for (partition = 0; partition < MAX_PARTITION; partition++) { 1181 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1182 } 1183 } 1184 if (nxgep->drv_state & STATE_MDIO_LOCK_INIT) { 1185 if (nxge_mdio_lock_init == 1) { 1186 MUTEX_DESTROY(&nxge_mdio_lock); 1187 } 1188 atomic_add_32(&nxge_mdio_lock_init, -1); 1189 } 1190 if (nxgep->drv_state & STATE_MII_LOCK_INIT) { 1191 if (nxge_mii_lock_init == 1) { 1192 MUTEX_DESTROY(&nxge_mii_lock); 1193 } 1194 atomic_add_32(&nxge_mii_lock_init, -1); 1195 } 1196 1197 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1198 } 1199 1200 nxge_status_t 1201 nxge_init(p_nxge_t nxgep) 1202 { 1203 nxge_status_t status = NXGE_OK; 1204 1205 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1206 1207 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1208 return (status); 1209 } 1210 1211 /* 1212 * Allocate system memory for the receive/transmit buffer blocks 1213 * and receive/transmit descriptor rings. 1214 */ 1215 status = nxge_alloc_mem_pool(nxgep); 1216 if (status != NXGE_OK) { 1217 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1218 goto nxge_init_fail1; 1219 } 1220 1221 /* 1222 * Initialize and enable TXC registers 1223 * (Globally enable TX controller, 1224 * enable a port, configure dma channel bitmap, 1225 * configure the max burst size). 1226 */ 1227 status = nxge_txc_init(nxgep); 1228 if (status != NXGE_OK) { 1229 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txc failed\n")); 1230 goto nxge_init_fail2; 1231 } 1232 1233 /* 1234 * Initialize and enable TXDMA channels. 1235 */ 1236 status = nxge_init_txdma_channels(nxgep); 1237 if (status != NXGE_OK) { 1238 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1239 goto nxge_init_fail3; 1240 } 1241 1242 /* 1243 * Initialize and enable RXDMA channels. 1244 */ 1245 status = nxge_init_rxdma_channels(nxgep); 1246 if (status != NXGE_OK) { 1247 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1248 goto nxge_init_fail4; 1249 } 1250 1251 /* 1252 * Initialize TCAM and FCRAM (Neptune). 1253 */ 1254 status = nxge_classify_init(nxgep); 1255 if (status != NXGE_OK) { 1256 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1257 goto nxge_init_fail5; 1258 } 1259 1260 /* 1261 * Initialize ZCP 1262 */ 1263 status = nxge_zcp_init(nxgep); 1264 if (status != NXGE_OK) { 1265 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1266 goto nxge_init_fail5; 1267 } 1268 1269 /* 1270 * Initialize IPP. 1271 */ 1272 status = nxge_ipp_init(nxgep); 1273 if (status != NXGE_OK) { 1274 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1275 goto nxge_init_fail5; 1276 } 1277 1278 /* 1279 * Initialize the MAC block. 1280 */ 1281 status = nxge_mac_init(nxgep); 1282 if (status != NXGE_OK) { 1283 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1284 goto nxge_init_fail5; 1285 } 1286 1287 nxge_intrs_enable(nxgep); 1288 1289 /* 1290 * Enable hardware interrupts. 1291 */ 1292 nxge_intr_hw_enable(nxgep); 1293 nxgep->drv_state |= STATE_HW_INITIALIZED; 1294 1295 goto nxge_init_exit; 1296 1297 nxge_init_fail5: 1298 nxge_uninit_rxdma_channels(nxgep); 1299 nxge_init_fail4: 1300 nxge_uninit_txdma_channels(nxgep); 1301 nxge_init_fail3: 1302 (void) nxge_txc_uninit(nxgep); 1303 nxge_init_fail2: 1304 nxge_free_mem_pool(nxgep); 1305 nxge_init_fail1: 1306 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1307 "<== nxge_init status (failed) = 0x%08x", status)); 1308 return (status); 1309 1310 nxge_init_exit: 1311 1312 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1313 status)); 1314 return (status); 1315 } 1316 1317 1318 timeout_id_t 1319 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1320 { 1321 if ((nxgep->suspended == 0) || 1322 (nxgep->suspended == DDI_RESUME)) { 1323 return (timeout(func, (caddr_t)nxgep, 1324 drv_usectohz(1000 * msec))); 1325 } 1326 return (NULL); 1327 } 1328 1329 /*ARGSUSED*/ 1330 void 1331 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1332 { 1333 if (timerid) { 1334 (void) untimeout(timerid); 1335 } 1336 } 1337 1338 void 1339 nxge_uninit(p_nxge_t nxgep) 1340 { 1341 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1342 1343 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1344 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1345 "==> nxge_uninit: not initialized")); 1346 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1347 "<== nxge_uninit")); 1348 return; 1349 } 1350 1351 /* stop timer */ 1352 if (nxgep->nxge_timerid) { 1353 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1354 nxgep->nxge_timerid = 0; 1355 } 1356 1357 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1358 (void) nxge_intr_hw_disable(nxgep); 1359 1360 /* 1361 * Reset the receive MAC side. 1362 */ 1363 (void) nxge_rx_mac_disable(nxgep); 1364 1365 /* Disable and soft reset the IPP */ 1366 (void) nxge_ipp_disable(nxgep); 1367 1368 /* Free classification resources */ 1369 (void) nxge_classify_uninit(nxgep); 1370 1371 /* 1372 * Reset the transmit/receive DMA side. 1373 */ 1374 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1375 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1376 1377 nxge_uninit_txdma_channels(nxgep); 1378 nxge_uninit_rxdma_channels(nxgep); 1379 1380 /* 1381 * Reset the transmit MAC side. 1382 */ 1383 (void) nxge_tx_mac_disable(nxgep); 1384 1385 nxge_free_mem_pool(nxgep); 1386 1387 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1388 1389 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1390 1391 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1392 "nxge_mblks_pending %d", nxge_mblks_pending)); 1393 } 1394 1395 void 1396 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1397 { 1398 uint64_t reg; 1399 uint64_t regdata; 1400 int i, retry; 1401 1402 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1403 regdata = 0; 1404 retry = 1; 1405 1406 for (i = 0; i < retry; i++) { 1407 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1408 } 1409 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1410 } 1411 1412 void 1413 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1414 { 1415 uint64_t reg; 1416 uint64_t buf[2]; 1417 1418 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1419 reg = buf[0]; 1420 1421 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1422 } 1423 1424 1425 nxge_os_mutex_t nxgedebuglock; 1426 int nxge_debug_init = 0; 1427 1428 /*ARGSUSED*/ 1429 /*VARARGS*/ 1430 void 1431 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1432 { 1433 char msg_buffer[1048]; 1434 char prefix_buffer[32]; 1435 int instance; 1436 uint64_t debug_level; 1437 int cmn_level = CE_CONT; 1438 va_list ap; 1439 1440 debug_level = (nxgep == NULL) ? nxge_debug_level : 1441 nxgep->nxge_debug_level; 1442 1443 if ((level & debug_level) || 1444 (level == NXGE_NOTE) || 1445 (level == NXGE_ERR_CTL)) { 1446 /* do the msg processing */ 1447 if (nxge_debug_init == 0) { 1448 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1449 nxge_debug_init = 1; 1450 } 1451 1452 MUTEX_ENTER(&nxgedebuglock); 1453 1454 if ((level & NXGE_NOTE)) { 1455 cmn_level = CE_NOTE; 1456 } 1457 1458 if (level & NXGE_ERR_CTL) { 1459 cmn_level = CE_WARN; 1460 } 1461 1462 va_start(ap, fmt); 1463 (void) vsprintf(msg_buffer, fmt, ap); 1464 va_end(ap); 1465 if (nxgep == NULL) { 1466 instance = -1; 1467 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1468 } else { 1469 instance = nxgep->instance; 1470 (void) sprintf(prefix_buffer, 1471 "%s%d :", "nxge", instance); 1472 } 1473 1474 MUTEX_EXIT(&nxgedebuglock); 1475 cmn_err(cmn_level, "!%s %s\n", 1476 prefix_buffer, msg_buffer); 1477 1478 } 1479 } 1480 1481 char * 1482 nxge_dump_packet(char *addr, int size) 1483 { 1484 uchar_t *ap = (uchar_t *)addr; 1485 int i; 1486 static char etherbuf[1024]; 1487 char *cp = etherbuf; 1488 char digits[] = "0123456789abcdef"; 1489 1490 if (!size) 1491 size = 60; 1492 1493 if (size > MAX_DUMP_SZ) { 1494 /* Dump the leading bytes */ 1495 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1496 if (*ap > 0x0f) 1497 *cp++ = digits[*ap >> 4]; 1498 *cp++ = digits[*ap++ & 0xf]; 1499 *cp++ = ':'; 1500 } 1501 for (i = 0; i < 20; i++) 1502 *cp++ = '.'; 1503 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1504 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1505 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1506 if (*ap > 0x0f) 1507 *cp++ = digits[*ap >> 4]; 1508 *cp++ = digits[*ap++ & 0xf]; 1509 *cp++ = ':'; 1510 } 1511 } else { 1512 for (i = 0; i < size; i++) { 1513 if (*ap > 0x0f) 1514 *cp++ = digits[*ap >> 4]; 1515 *cp++ = digits[*ap++ & 0xf]; 1516 *cp++ = ':'; 1517 } 1518 } 1519 *--cp = 0; 1520 return (etherbuf); 1521 } 1522 1523 #ifdef NXGE_DEBUG 1524 static void 1525 nxge_test_map_regs(p_nxge_t nxgep) 1526 { 1527 ddi_acc_handle_t cfg_handle; 1528 p_pci_cfg_t cfg_ptr; 1529 ddi_acc_handle_t dev_handle; 1530 char *dev_ptr; 1531 ddi_acc_handle_t pci_config_handle; 1532 uint32_t regval; 1533 int i; 1534 1535 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1536 1537 dev_handle = nxgep->dev_regs->nxge_regh; 1538 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1539 1540 if (nxgep->niu_type == NEPTUNE) { 1541 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1542 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1543 1544 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1545 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1546 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1547 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1548 &cfg_ptr->vendorid)); 1549 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1550 "\tvendorid 0x%x devid 0x%x", 1551 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1552 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1553 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1554 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1555 "bar1c 0x%x", 1556 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1557 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1558 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1559 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1560 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1561 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1562 "base 28 0x%x bar2c 0x%x\n", 1563 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1564 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1565 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1566 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1567 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1568 "\nNeptune PCI BAR: base30 0x%x\n", 1569 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1570 1571 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1572 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1573 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1574 "first 0x%llx second 0x%llx third 0x%llx " 1575 "last 0x%llx ", 1576 NXGE_PIO_READ64(dev_handle, 1577 (uint64_t *)(dev_ptr + 0), 0), 1578 NXGE_PIO_READ64(dev_handle, 1579 (uint64_t *)(dev_ptr + 8), 0), 1580 NXGE_PIO_READ64(dev_handle, 1581 (uint64_t *)(dev_ptr + 16), 0), 1582 NXGE_PIO_READ64(cfg_handle, 1583 (uint64_t *)(dev_ptr + 24), 0))); 1584 } 1585 } 1586 1587 #endif 1588 1589 static void 1590 nxge_suspend(p_nxge_t nxgep) 1591 { 1592 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1593 1594 nxge_intrs_disable(nxgep); 1595 nxge_destroy_dev(nxgep); 1596 1597 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1598 } 1599 1600 static nxge_status_t 1601 nxge_resume(p_nxge_t nxgep) 1602 { 1603 nxge_status_t status = NXGE_OK; 1604 1605 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1606 1607 nxgep->suspended = DDI_RESUME; 1608 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1609 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1610 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1611 (void) nxge_rx_mac_enable(nxgep); 1612 (void) nxge_tx_mac_enable(nxgep); 1613 nxge_intrs_enable(nxgep); 1614 nxgep->suspended = 0; 1615 1616 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1617 "<== nxge_resume status = 0x%x", status)); 1618 return (status); 1619 } 1620 1621 static nxge_status_t 1622 nxge_setup_dev(p_nxge_t nxgep) 1623 { 1624 nxge_status_t status = NXGE_OK; 1625 1626 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1627 nxgep->mac.portnum)); 1628 1629 status = nxge_xcvr_find(nxgep); 1630 if (status != NXGE_OK) { 1631 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1632 " nxge_setup_dev status " 1633 " (xcvr find 0x%08x)", status)); 1634 goto nxge_setup_dev_exit; 1635 } 1636 1637 status = nxge_link_init(nxgep); 1638 1639 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1640 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1641 "port%d Bad register acc handle", nxgep->mac.portnum)); 1642 status = NXGE_ERROR; 1643 } 1644 1645 if (status != NXGE_OK) { 1646 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1647 " nxge_setup_dev status " 1648 "(xcvr init 0x%08x)", status)); 1649 goto nxge_setup_dev_exit; 1650 } 1651 1652 nxge_setup_dev_exit: 1653 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1654 "<== nxge_setup_dev port %d status = 0x%08x", 1655 nxgep->mac.portnum, status)); 1656 1657 return (status); 1658 } 1659 1660 static void 1661 nxge_destroy_dev(p_nxge_t nxgep) 1662 { 1663 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 1664 1665 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1666 1667 (void) nxge_hw_stop(nxgep); 1668 1669 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 1670 } 1671 1672 static nxge_status_t 1673 nxge_setup_system_dma_pages(p_nxge_t nxgep) 1674 { 1675 int ddi_status = DDI_SUCCESS; 1676 uint_t count; 1677 ddi_dma_cookie_t cookie; 1678 uint_t iommu_pagesize; 1679 nxge_status_t status = NXGE_OK; 1680 1681 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 1682 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 1683 if (nxgep->niu_type != N2_NIU) { 1684 iommu_pagesize = dvma_pagesize(nxgep->dip); 1685 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1686 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1687 " default_block_size %d iommu_pagesize %d", 1688 nxgep->sys_page_sz, 1689 ddi_ptob(nxgep->dip, (ulong_t)1), 1690 nxgep->rx_default_block_size, 1691 iommu_pagesize)); 1692 1693 if (iommu_pagesize != 0) { 1694 if (nxgep->sys_page_sz == iommu_pagesize) { 1695 if (iommu_pagesize > 0x4000) 1696 nxgep->sys_page_sz = 0x4000; 1697 } else { 1698 if (nxgep->sys_page_sz > iommu_pagesize) 1699 nxgep->sys_page_sz = iommu_pagesize; 1700 } 1701 } 1702 } 1703 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1704 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1705 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1706 "default_block_size %d page mask %d", 1707 nxgep->sys_page_sz, 1708 ddi_ptob(nxgep->dip, (ulong_t)1), 1709 nxgep->rx_default_block_size, 1710 nxgep->sys_page_mask)); 1711 1712 1713 switch (nxgep->sys_page_sz) { 1714 default: 1715 nxgep->sys_page_sz = 0x1000; 1716 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1717 nxgep->rx_default_block_size = 0x1000; 1718 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1719 break; 1720 case 0x1000: 1721 nxgep->rx_default_block_size = 0x1000; 1722 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1723 break; 1724 case 0x2000: 1725 nxgep->rx_default_block_size = 0x2000; 1726 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1727 break; 1728 case 0x4000: 1729 nxgep->rx_default_block_size = 0x4000; 1730 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 1731 break; 1732 case 0x8000: 1733 nxgep->rx_default_block_size = 0x8000; 1734 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 1735 break; 1736 } 1737 1738 #ifndef USE_RX_BIG_BUF 1739 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 1740 #else 1741 nxgep->rx_default_block_size = 0x2000; 1742 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1743 #endif 1744 /* 1745 * Get the system DMA burst size. 1746 */ 1747 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 1748 DDI_DMA_DONTWAIT, 0, 1749 &nxgep->dmasparehandle); 1750 if (ddi_status != DDI_SUCCESS) { 1751 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1752 "ddi_dma_alloc_handle: failed " 1753 " status 0x%x", ddi_status)); 1754 goto nxge_get_soft_properties_exit; 1755 } 1756 1757 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 1758 (caddr_t)nxgep->dmasparehandle, 1759 sizeof (nxgep->dmasparehandle), 1760 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1761 DDI_DMA_DONTWAIT, 0, 1762 &cookie, &count); 1763 if (ddi_status != DDI_DMA_MAPPED) { 1764 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1765 "Binding spare handle to find system" 1766 " burstsize failed.")); 1767 ddi_status = DDI_FAILURE; 1768 goto nxge_get_soft_properties_fail1; 1769 } 1770 1771 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 1772 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 1773 1774 nxge_get_soft_properties_fail1: 1775 ddi_dma_free_handle(&nxgep->dmasparehandle); 1776 1777 nxge_get_soft_properties_exit: 1778 1779 if (ddi_status != DDI_SUCCESS) 1780 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1781 1782 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1783 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 1784 return (status); 1785 } 1786 1787 static nxge_status_t 1788 nxge_alloc_mem_pool(p_nxge_t nxgep) 1789 { 1790 nxge_status_t status = NXGE_OK; 1791 1792 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 1793 1794 status = nxge_alloc_rx_mem_pool(nxgep); 1795 if (status != NXGE_OK) { 1796 return (NXGE_ERROR); 1797 } 1798 1799 status = nxge_alloc_tx_mem_pool(nxgep); 1800 if (status != NXGE_OK) { 1801 nxge_free_rx_mem_pool(nxgep); 1802 return (NXGE_ERROR); 1803 } 1804 1805 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 1806 return (NXGE_OK); 1807 } 1808 1809 static void 1810 nxge_free_mem_pool(p_nxge_t nxgep) 1811 { 1812 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 1813 1814 nxge_free_rx_mem_pool(nxgep); 1815 nxge_free_tx_mem_pool(nxgep); 1816 1817 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 1818 } 1819 1820 static nxge_status_t 1821 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 1822 { 1823 int i, j; 1824 uint32_t ndmas, st_rdc; 1825 p_nxge_dma_pt_cfg_t p_all_cfgp; 1826 p_nxge_hw_pt_cfg_t p_cfgp; 1827 p_nxge_dma_pool_t dma_poolp; 1828 p_nxge_dma_common_t *dma_buf_p; 1829 p_nxge_dma_pool_t dma_cntl_poolp; 1830 p_nxge_dma_common_t *dma_cntl_p; 1831 size_t rx_buf_alloc_size; 1832 size_t rx_cntl_alloc_size; 1833 uint32_t *num_chunks; /* per dma */ 1834 nxge_status_t status = NXGE_OK; 1835 1836 uint32_t nxge_port_rbr_size; 1837 uint32_t nxge_port_rbr_spare_size; 1838 uint32_t nxge_port_rcr_size; 1839 1840 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 1841 1842 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 1843 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1844 st_rdc = p_cfgp->start_rdc; 1845 ndmas = p_cfgp->max_rdcs; 1846 1847 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1848 " nxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas)); 1849 1850 /* 1851 * Allocate memory for each receive DMA channel. 1852 */ 1853 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 1854 KM_SLEEP); 1855 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1856 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1857 1858 dma_cntl_poolp = (p_nxge_dma_pool_t) 1859 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 1860 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1861 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1862 1863 num_chunks = (uint32_t *)KMEM_ZALLOC( 1864 sizeof (uint32_t) * ndmas, KM_SLEEP); 1865 1866 /* 1867 * Assume that each DMA channel will be configured with default 1868 * block size. 1869 * rbr block counts are mod of batch count (16). 1870 */ 1871 nxge_port_rbr_size = p_all_cfgp->rbr_size; 1872 nxge_port_rcr_size = p_all_cfgp->rcr_size; 1873 1874 if (!nxge_port_rbr_size) { 1875 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 1876 } 1877 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 1878 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 1879 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 1880 } 1881 1882 p_all_cfgp->rbr_size = nxge_port_rbr_size; 1883 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 1884 1885 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 1886 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 1887 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 1888 } 1889 1890 /* 1891 * N2/NIU has limitation on the descriptor sizes (contiguous 1892 * memory allocation on data buffers to 4M (contig_mem_alloc) 1893 * and little endian for control buffers (must use the ddi/dki mem alloc 1894 * function). 1895 */ 1896 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1897 if (nxgep->niu_type == N2_NIU) { 1898 nxge_port_rbr_spare_size = 0; 1899 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 1900 (!ISP2(nxge_port_rbr_size))) { 1901 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 1902 } 1903 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 1904 (!ISP2(nxge_port_rcr_size))) { 1905 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 1906 } 1907 } 1908 #endif 1909 1910 rx_buf_alloc_size = (nxgep->rx_default_block_size * 1911 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 1912 1913 /* 1914 * Addresses of receive block ring, receive completion ring and the 1915 * mailbox must be all cache-aligned (64 bytes). 1916 */ 1917 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 1918 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 1919 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 1920 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 1921 1922 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 1923 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 1924 "nxge_port_rcr_size = %d " 1925 "rx_cntl_alloc_size = %d", 1926 nxge_port_rbr_size, nxge_port_rbr_spare_size, 1927 nxge_port_rcr_size, 1928 rx_cntl_alloc_size)); 1929 1930 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1931 if (nxgep->niu_type == N2_NIU) { 1932 if (!ISP2(rx_buf_alloc_size)) { 1933 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1934 "==> nxge_alloc_rx_mem_pool: " 1935 " must be power of 2")); 1936 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1937 goto nxge_alloc_rx_mem_pool_exit; 1938 } 1939 1940 if (rx_buf_alloc_size > (1 << 22)) { 1941 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1942 "==> nxge_alloc_rx_mem_pool: " 1943 " limit size to 4M")); 1944 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1945 goto nxge_alloc_rx_mem_pool_exit; 1946 } 1947 1948 if (rx_cntl_alloc_size < 0x2000) { 1949 rx_cntl_alloc_size = 0x2000; 1950 } 1951 } 1952 #endif 1953 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 1954 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 1955 1956 /* 1957 * Allocate memory for receive buffers and descriptor rings. 1958 * Replace allocation functions with interface functions provided 1959 * by the partition manager when it is available. 1960 */ 1961 /* 1962 * Allocate memory for the receive buffer blocks. 1963 */ 1964 for (i = 0; i < ndmas; i++) { 1965 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1966 " nxge_alloc_rx_mem_pool to alloc mem: " 1967 " dma %d dma_buf_p %llx &dma_buf_p %llx", 1968 i, dma_buf_p[i], &dma_buf_p[i])); 1969 num_chunks[i] = 0; 1970 status = nxge_alloc_rx_buf_dma(nxgep, st_rdc, &dma_buf_p[i], 1971 rx_buf_alloc_size, 1972 nxgep->rx_default_block_size, &num_chunks[i]); 1973 if (status != NXGE_OK) { 1974 break; 1975 } 1976 st_rdc++; 1977 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1978 " nxge_alloc_rx_mem_pool DONE alloc mem: " 1979 "dma %d dma_buf_p %llx &dma_buf_p %llx", i, 1980 dma_buf_p[i], &dma_buf_p[i])); 1981 } 1982 if (i < ndmas) { 1983 goto nxge_alloc_rx_mem_fail1; 1984 } 1985 /* 1986 * Allocate memory for descriptor rings and mailbox. 1987 */ 1988 st_rdc = p_cfgp->start_rdc; 1989 for (j = 0; j < ndmas; j++) { 1990 status = nxge_alloc_rx_cntl_dma(nxgep, st_rdc, &dma_cntl_p[j], 1991 rx_cntl_alloc_size); 1992 if (status != NXGE_OK) { 1993 break; 1994 } 1995 st_rdc++; 1996 } 1997 if (j < ndmas) { 1998 goto nxge_alloc_rx_mem_fail2; 1999 } 2000 2001 dma_poolp->ndmas = ndmas; 2002 dma_poolp->num_chunks = num_chunks; 2003 dma_poolp->buf_allocated = B_TRUE; 2004 nxgep->rx_buf_pool_p = dma_poolp; 2005 dma_poolp->dma_buf_pool_p = dma_buf_p; 2006 2007 dma_cntl_poolp->ndmas = ndmas; 2008 dma_cntl_poolp->buf_allocated = B_TRUE; 2009 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2010 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2011 2012 goto nxge_alloc_rx_mem_pool_exit; 2013 2014 nxge_alloc_rx_mem_fail2: 2015 /* Free control buffers */ 2016 j--; 2017 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2018 "==> nxge_alloc_rx_mem_pool: freeing control bufs (%d)", j)); 2019 for (; j >= 0; j--) { 2020 nxge_free_rx_cntl_dma(nxgep, 2021 (p_nxge_dma_common_t)dma_cntl_p[j]); 2022 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2023 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", 2024 j)); 2025 } 2026 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2027 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 2028 2029 nxge_alloc_rx_mem_fail1: 2030 /* Free data buffers */ 2031 i--; 2032 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2033 "==> nxge_alloc_rx_mem_pool: freeing data bufs (%d)", i)); 2034 for (; i >= 0; i--) { 2035 nxge_free_rx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2036 num_chunks[i]); 2037 } 2038 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2039 "==> nxge_alloc_rx_mem_pool: data bufs freed (%d)", i)); 2040 2041 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2042 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2043 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2044 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2045 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2046 2047 nxge_alloc_rx_mem_pool_exit: 2048 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2049 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2050 2051 return (status); 2052 } 2053 2054 static void 2055 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2056 { 2057 uint32_t i, ndmas; 2058 p_nxge_dma_pool_t dma_poolp; 2059 p_nxge_dma_common_t *dma_buf_p; 2060 p_nxge_dma_pool_t dma_cntl_poolp; 2061 p_nxge_dma_common_t *dma_cntl_p; 2062 uint32_t *num_chunks; 2063 2064 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2065 2066 dma_poolp = nxgep->rx_buf_pool_p; 2067 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2068 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2069 "<== nxge_free_rx_mem_pool " 2070 "(null rx buf pool or buf not allocated")); 2071 return; 2072 } 2073 2074 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2075 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2076 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2077 "<== nxge_free_rx_mem_pool " 2078 "(null rx cntl buf pool or cntl buf not allocated")); 2079 return; 2080 } 2081 2082 dma_buf_p = dma_poolp->dma_buf_pool_p; 2083 num_chunks = dma_poolp->num_chunks; 2084 2085 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2086 ndmas = dma_cntl_poolp->ndmas; 2087 2088 for (i = 0; i < ndmas; i++) { 2089 nxge_free_rx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2090 } 2091 2092 for (i = 0; i < ndmas; i++) { 2093 nxge_free_rx_cntl_dma(nxgep, dma_cntl_p[i]); 2094 } 2095 2096 for (i = 0; i < ndmas; i++) { 2097 KMEM_FREE(dma_buf_p[i], 2098 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2099 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2100 } 2101 2102 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2103 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2104 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2105 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2106 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2107 2108 nxgep->rx_buf_pool_p = NULL; 2109 nxgep->rx_cntl_pool_p = NULL; 2110 2111 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2112 } 2113 2114 2115 static nxge_status_t 2116 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2117 p_nxge_dma_common_t *dmap, 2118 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2119 { 2120 p_nxge_dma_common_t rx_dmap; 2121 nxge_status_t status = NXGE_OK; 2122 size_t total_alloc_size; 2123 size_t allocated = 0; 2124 int i, size_index, array_size; 2125 2126 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2127 2128 rx_dmap = (p_nxge_dma_common_t) 2129 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2130 KM_SLEEP); 2131 2132 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2133 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2134 dma_channel, alloc_size, block_size, dmap)); 2135 2136 total_alloc_size = alloc_size; 2137 2138 #if defined(RX_USE_RECLAIM_POST) 2139 total_alloc_size = alloc_size + alloc_size/4; 2140 #endif 2141 2142 i = 0; 2143 size_index = 0; 2144 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2145 while ((alloc_sizes[size_index] < alloc_size) && 2146 (size_index < array_size)) 2147 size_index++; 2148 if (size_index >= array_size) { 2149 size_index = array_size - 1; 2150 } 2151 2152 while ((allocated < total_alloc_size) && 2153 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2154 rx_dmap[i].dma_chunk_index = i; 2155 rx_dmap[i].block_size = block_size; 2156 rx_dmap[i].alength = alloc_sizes[size_index]; 2157 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2158 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2159 rx_dmap[i].dma_channel = dma_channel; 2160 rx_dmap[i].contig_alloc_type = B_FALSE; 2161 2162 /* 2163 * N2/NIU: data buffers must be contiguous as the driver 2164 * needs to call Hypervisor api to set up 2165 * logical pages. 2166 */ 2167 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2168 rx_dmap[i].contig_alloc_type = B_TRUE; 2169 } 2170 2171 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2172 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2173 "i %d nblocks %d alength %d", 2174 dma_channel, i, &rx_dmap[i], block_size, 2175 i, rx_dmap[i].nblocks, 2176 rx_dmap[i].alength)); 2177 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2178 &nxge_rx_dma_attr, 2179 rx_dmap[i].alength, 2180 &nxge_dev_buf_dma_acc_attr, 2181 DDI_DMA_READ | DDI_DMA_STREAMING, 2182 (p_nxge_dma_common_t)(&rx_dmap[i])); 2183 if (status != NXGE_OK) { 2184 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2185 " nxge_alloc_rx_buf_dma: Alloc Failed ")); 2186 size_index--; 2187 } else { 2188 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2189 " alloc_rx_buf_dma allocated rdc %d " 2190 "chunk %d size %x dvma %x bufp %llx ", 2191 dma_channel, i, rx_dmap[i].alength, 2192 rx_dmap[i].ioaddr_pp, &rx_dmap[i])); 2193 i++; 2194 allocated += alloc_sizes[size_index]; 2195 } 2196 } 2197 2198 2199 if (allocated < total_alloc_size) { 2200 goto nxge_alloc_rx_mem_fail1; 2201 } 2202 2203 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2204 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2205 dma_channel, i)); 2206 *num_chunks = i; 2207 *dmap = rx_dmap; 2208 2209 goto nxge_alloc_rx_mem_exit; 2210 2211 nxge_alloc_rx_mem_fail1: 2212 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2213 2214 nxge_alloc_rx_mem_exit: 2215 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2216 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2217 2218 return (status); 2219 } 2220 2221 /*ARGSUSED*/ 2222 static void 2223 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2224 uint32_t num_chunks) 2225 { 2226 int i; 2227 2228 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2229 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2230 2231 for (i = 0; i < num_chunks; i++) { 2232 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2233 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2234 i, dmap)); 2235 nxge_dma_mem_free(dmap++); 2236 } 2237 2238 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2239 } 2240 2241 /*ARGSUSED*/ 2242 static nxge_status_t 2243 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2244 p_nxge_dma_common_t *dmap, size_t size) 2245 { 2246 p_nxge_dma_common_t rx_dmap; 2247 nxge_status_t status = NXGE_OK; 2248 2249 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2250 2251 rx_dmap = (p_nxge_dma_common_t) 2252 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2253 2254 rx_dmap->contig_alloc_type = B_FALSE; 2255 2256 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2257 &nxge_desc_dma_attr, 2258 size, 2259 &nxge_dev_desc_dma_acc_attr, 2260 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2261 rx_dmap); 2262 if (status != NXGE_OK) { 2263 goto nxge_alloc_rx_cntl_dma_fail1; 2264 } 2265 2266 *dmap = rx_dmap; 2267 goto nxge_alloc_rx_cntl_dma_exit; 2268 2269 nxge_alloc_rx_cntl_dma_fail1: 2270 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2271 2272 nxge_alloc_rx_cntl_dma_exit: 2273 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2274 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2275 2276 return (status); 2277 } 2278 2279 /*ARGSUSED*/ 2280 static void 2281 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2282 { 2283 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2284 2285 nxge_dma_mem_free(dmap); 2286 2287 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2288 } 2289 2290 static nxge_status_t 2291 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2292 { 2293 nxge_status_t status = NXGE_OK; 2294 int i, j; 2295 uint32_t ndmas, st_tdc; 2296 p_nxge_dma_pt_cfg_t p_all_cfgp; 2297 p_nxge_hw_pt_cfg_t p_cfgp; 2298 p_nxge_dma_pool_t dma_poolp; 2299 p_nxge_dma_common_t *dma_buf_p; 2300 p_nxge_dma_pool_t dma_cntl_poolp; 2301 p_nxge_dma_common_t *dma_cntl_p; 2302 size_t tx_buf_alloc_size; 2303 size_t tx_cntl_alloc_size; 2304 uint32_t *num_chunks; /* per dma */ 2305 uint32_t bcopy_thresh; 2306 2307 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2308 2309 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2310 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2311 st_tdc = p_cfgp->start_tdc; 2312 ndmas = p_cfgp->max_tdcs; 2313 2314 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool: " 2315 "p_cfgp 0x%016llx start_tdc %d ndmas %d nxgep->max_tdcs %d", 2316 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, nxgep->max_tdcs)); 2317 /* 2318 * Allocate memory for each transmit DMA channel. 2319 */ 2320 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2321 KM_SLEEP); 2322 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2323 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2324 2325 dma_cntl_poolp = (p_nxge_dma_pool_t) 2326 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2327 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2328 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2329 2330 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2331 /* 2332 * N2/NIU has limitation on the descriptor sizes (contiguous 2333 * memory allocation on data buffers to 4M (contig_mem_alloc) 2334 * and little endian for control buffers (must use the ddi/dki mem alloc 2335 * function). The transmit ring is limited to 8K (includes the 2336 * mailbox). 2337 */ 2338 if (nxgep->niu_type == N2_NIU) { 2339 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2340 (!ISP2(nxge_tx_ring_size))) { 2341 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2342 } 2343 } 2344 #endif 2345 2346 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2347 2348 /* 2349 * Assume that each DMA channel will be configured with default 2350 * transmit bufer size for copying transmit data. 2351 * (For packet payload over this limit, packets will not be 2352 * copied.) 2353 */ 2354 if (nxgep->niu_type == N2_NIU) { 2355 bcopy_thresh = TX_BCOPY_SIZE; 2356 } else { 2357 bcopy_thresh = nxge_bcopy_thresh; 2358 } 2359 tx_buf_alloc_size = (bcopy_thresh * nxge_tx_ring_size); 2360 2361 /* 2362 * Addresses of transmit descriptor ring and the 2363 * mailbox must be all cache-aligned (64 bytes). 2364 */ 2365 tx_cntl_alloc_size = nxge_tx_ring_size; 2366 tx_cntl_alloc_size *= (sizeof (tx_desc_t)); 2367 tx_cntl_alloc_size += sizeof (txdma_mailbox_t); 2368 2369 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2370 if (nxgep->niu_type == N2_NIU) { 2371 if (!ISP2(tx_buf_alloc_size)) { 2372 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2373 "==> nxge_alloc_tx_mem_pool: " 2374 " must be power of 2")); 2375 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2376 goto nxge_alloc_tx_mem_pool_exit; 2377 } 2378 2379 if (tx_buf_alloc_size > (1 << 22)) { 2380 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2381 "==> nxge_alloc_tx_mem_pool: " 2382 " limit size to 4M")); 2383 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2384 goto nxge_alloc_tx_mem_pool_exit; 2385 } 2386 2387 if (tx_cntl_alloc_size < 0x2000) { 2388 tx_cntl_alloc_size = 0x2000; 2389 } 2390 } 2391 #endif 2392 2393 num_chunks = (uint32_t *)KMEM_ZALLOC( 2394 sizeof (uint32_t) * ndmas, KM_SLEEP); 2395 2396 /* 2397 * Allocate memory for transmit buffers and descriptor rings. 2398 * Replace allocation functions with interface functions provided 2399 * by the partition manager when it is available. 2400 * 2401 * Allocate memory for the transmit buffer pool. 2402 */ 2403 for (i = 0; i < ndmas; i++) { 2404 num_chunks[i] = 0; 2405 status = nxge_alloc_tx_buf_dma(nxgep, st_tdc, &dma_buf_p[i], 2406 tx_buf_alloc_size, 2407 bcopy_thresh, &num_chunks[i]); 2408 if (status != NXGE_OK) { 2409 break; 2410 } 2411 st_tdc++; 2412 } 2413 if (i < ndmas) { 2414 goto nxge_alloc_tx_mem_pool_fail1; 2415 } 2416 2417 st_tdc = p_cfgp->start_tdc; 2418 /* 2419 * Allocate memory for descriptor rings and mailbox. 2420 */ 2421 for (j = 0; j < ndmas; j++) { 2422 status = nxge_alloc_tx_cntl_dma(nxgep, st_tdc, &dma_cntl_p[j], 2423 tx_cntl_alloc_size); 2424 if (status != NXGE_OK) { 2425 break; 2426 } 2427 st_tdc++; 2428 } 2429 if (j < ndmas) { 2430 goto nxge_alloc_tx_mem_pool_fail2; 2431 } 2432 2433 dma_poolp->ndmas = ndmas; 2434 dma_poolp->num_chunks = num_chunks; 2435 dma_poolp->buf_allocated = B_TRUE; 2436 dma_poolp->dma_buf_pool_p = dma_buf_p; 2437 nxgep->tx_buf_pool_p = dma_poolp; 2438 2439 dma_cntl_poolp->ndmas = ndmas; 2440 dma_cntl_poolp->buf_allocated = B_TRUE; 2441 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2442 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2443 2444 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2445 "==> nxge_alloc_tx_mem_pool: start_tdc %d " 2446 "ndmas %d poolp->ndmas %d", 2447 st_tdc, ndmas, dma_poolp->ndmas)); 2448 2449 goto nxge_alloc_tx_mem_pool_exit; 2450 2451 nxge_alloc_tx_mem_pool_fail2: 2452 /* Free control buffers */ 2453 j--; 2454 for (; j >= 0; j--) { 2455 nxge_free_tx_cntl_dma(nxgep, 2456 (p_nxge_dma_common_t)dma_cntl_p[j]); 2457 } 2458 2459 nxge_alloc_tx_mem_pool_fail1: 2460 /* Free data buffers */ 2461 i--; 2462 for (; i >= 0; i--) { 2463 nxge_free_tx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2464 num_chunks[i]); 2465 } 2466 2467 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2468 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2469 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2470 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2471 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2472 2473 nxge_alloc_tx_mem_pool_exit: 2474 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2475 "<== nxge_alloc_tx_mem_pool:status 0x%08x", status)); 2476 2477 return (status); 2478 } 2479 2480 static nxge_status_t 2481 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2482 p_nxge_dma_common_t *dmap, size_t alloc_size, 2483 size_t block_size, uint32_t *num_chunks) 2484 { 2485 p_nxge_dma_common_t tx_dmap; 2486 nxge_status_t status = NXGE_OK; 2487 size_t total_alloc_size; 2488 size_t allocated = 0; 2489 int i, size_index, array_size; 2490 2491 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 2492 2493 tx_dmap = (p_nxge_dma_common_t) 2494 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2495 KM_SLEEP); 2496 2497 total_alloc_size = alloc_size; 2498 i = 0; 2499 size_index = 0; 2500 array_size = sizeof (alloc_sizes) / sizeof (size_t); 2501 while ((alloc_sizes[size_index] < alloc_size) && 2502 (size_index < array_size)) 2503 size_index++; 2504 if (size_index >= array_size) { 2505 size_index = array_size - 1; 2506 } 2507 2508 while ((allocated < total_alloc_size) && 2509 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2510 2511 tx_dmap[i].dma_chunk_index = i; 2512 tx_dmap[i].block_size = block_size; 2513 tx_dmap[i].alength = alloc_sizes[size_index]; 2514 tx_dmap[i].orig_alength = tx_dmap[i].alength; 2515 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2516 tx_dmap[i].dma_channel = dma_channel; 2517 tx_dmap[i].contig_alloc_type = B_FALSE; 2518 2519 /* 2520 * N2/NIU: data buffers must be contiguous as the driver 2521 * needs to call Hypervisor api to set up 2522 * logical pages. 2523 */ 2524 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2525 tx_dmap[i].contig_alloc_type = B_TRUE; 2526 } 2527 2528 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2529 &nxge_tx_dma_attr, 2530 tx_dmap[i].alength, 2531 &nxge_dev_buf_dma_acc_attr, 2532 DDI_DMA_WRITE | DDI_DMA_STREAMING, 2533 (p_nxge_dma_common_t)(&tx_dmap[i])); 2534 if (status != NXGE_OK) { 2535 size_index--; 2536 } else { 2537 i++; 2538 allocated += alloc_sizes[size_index]; 2539 } 2540 } 2541 2542 if (allocated < total_alloc_size) { 2543 goto nxge_alloc_tx_mem_fail1; 2544 } 2545 2546 *num_chunks = i; 2547 *dmap = tx_dmap; 2548 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2549 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 2550 *dmap, i)); 2551 goto nxge_alloc_tx_mem_exit; 2552 2553 nxge_alloc_tx_mem_fail1: 2554 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2555 2556 nxge_alloc_tx_mem_exit: 2557 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2558 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 2559 2560 return (status); 2561 } 2562 2563 /*ARGSUSED*/ 2564 static void 2565 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2566 uint32_t num_chunks) 2567 { 2568 int i; 2569 2570 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 2571 2572 for (i = 0; i < num_chunks; i++) { 2573 nxge_dma_mem_free(dmap++); 2574 } 2575 2576 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 2577 } 2578 2579 /*ARGSUSED*/ 2580 static nxge_status_t 2581 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2582 p_nxge_dma_common_t *dmap, size_t size) 2583 { 2584 p_nxge_dma_common_t tx_dmap; 2585 nxge_status_t status = NXGE_OK; 2586 2587 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 2588 tx_dmap = (p_nxge_dma_common_t) 2589 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2590 2591 tx_dmap->contig_alloc_type = B_FALSE; 2592 2593 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2594 &nxge_desc_dma_attr, 2595 size, 2596 &nxge_dev_desc_dma_acc_attr, 2597 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2598 tx_dmap); 2599 if (status != NXGE_OK) { 2600 goto nxge_alloc_tx_cntl_dma_fail1; 2601 } 2602 2603 *dmap = tx_dmap; 2604 goto nxge_alloc_tx_cntl_dma_exit; 2605 2606 nxge_alloc_tx_cntl_dma_fail1: 2607 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 2608 2609 nxge_alloc_tx_cntl_dma_exit: 2610 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2611 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 2612 2613 return (status); 2614 } 2615 2616 /*ARGSUSED*/ 2617 static void 2618 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2619 { 2620 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 2621 2622 nxge_dma_mem_free(dmap); 2623 2624 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 2625 } 2626 2627 static void 2628 nxge_free_tx_mem_pool(p_nxge_t nxgep) 2629 { 2630 uint32_t i, ndmas; 2631 p_nxge_dma_pool_t dma_poolp; 2632 p_nxge_dma_common_t *dma_buf_p; 2633 p_nxge_dma_pool_t dma_cntl_poolp; 2634 p_nxge_dma_common_t *dma_cntl_p; 2635 uint32_t *num_chunks; 2636 2637 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_free_tx_mem_pool")); 2638 2639 dma_poolp = nxgep->tx_buf_pool_p; 2640 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2641 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2642 "<== nxge_free_tx_mem_pool " 2643 "(null rx buf pool or buf not allocated")); 2644 return; 2645 } 2646 2647 dma_cntl_poolp = nxgep->tx_cntl_pool_p; 2648 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2649 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2650 "<== nxge_free_tx_mem_pool " 2651 "(null tx cntl buf pool or cntl buf not allocated")); 2652 return; 2653 } 2654 2655 dma_buf_p = dma_poolp->dma_buf_pool_p; 2656 num_chunks = dma_poolp->num_chunks; 2657 2658 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2659 ndmas = dma_cntl_poolp->ndmas; 2660 2661 for (i = 0; i < ndmas; i++) { 2662 nxge_free_tx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2663 } 2664 2665 for (i = 0; i < ndmas; i++) { 2666 nxge_free_tx_cntl_dma(nxgep, dma_cntl_p[i]); 2667 } 2668 2669 for (i = 0; i < ndmas; i++) { 2670 KMEM_FREE(dma_buf_p[i], 2671 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2672 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2673 } 2674 2675 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2676 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2677 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2678 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2679 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2680 2681 nxgep->tx_buf_pool_p = NULL; 2682 nxgep->tx_cntl_pool_p = NULL; 2683 2684 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_free_tx_mem_pool")); 2685 } 2686 2687 /*ARGSUSED*/ 2688 static nxge_status_t 2689 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 2690 struct ddi_dma_attr *dma_attrp, 2691 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 2692 p_nxge_dma_common_t dma_p) 2693 { 2694 caddr_t kaddrp; 2695 int ddi_status = DDI_SUCCESS; 2696 boolean_t contig_alloc_type; 2697 2698 contig_alloc_type = dma_p->contig_alloc_type; 2699 2700 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 2701 /* 2702 * contig_alloc_type for contiguous memory only allowed 2703 * for N2/NIU. 2704 */ 2705 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2706 "nxge_dma_mem_alloc: alloc type not allows (%d)", 2707 dma_p->contig_alloc_type)); 2708 return (NXGE_ERROR | NXGE_DDI_FAILED); 2709 } 2710 2711 dma_p->dma_handle = NULL; 2712 dma_p->acc_handle = NULL; 2713 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 2714 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 2715 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 2716 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 2717 if (ddi_status != DDI_SUCCESS) { 2718 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2719 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 2720 return (NXGE_ERROR | NXGE_DDI_FAILED); 2721 } 2722 2723 switch (contig_alloc_type) { 2724 case B_FALSE: 2725 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, 2726 acc_attr_p, 2727 xfer_flags, 2728 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 2729 &dma_p->acc_handle); 2730 if (ddi_status != DDI_SUCCESS) { 2731 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2732 "nxge_dma_mem_alloc:ddi_dma_mem_alloc failed")); 2733 ddi_dma_free_handle(&dma_p->dma_handle); 2734 dma_p->dma_handle = NULL; 2735 return (NXGE_ERROR | NXGE_DDI_FAILED); 2736 } 2737 if (dma_p->alength < length) { 2738 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2739 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 2740 "< length.")); 2741 ddi_dma_mem_free(&dma_p->acc_handle); 2742 ddi_dma_free_handle(&dma_p->dma_handle); 2743 dma_p->acc_handle = NULL; 2744 dma_p->dma_handle = NULL; 2745 return (NXGE_ERROR); 2746 } 2747 2748 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2749 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2750 &dma_p->dma_cookie, &dma_p->ncookies); 2751 if (ddi_status != DDI_DMA_MAPPED) { 2752 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2753 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2754 "(staus 0x%x ncookies %d.)", ddi_status, 2755 dma_p->ncookies)); 2756 if (dma_p->acc_handle) { 2757 ddi_dma_mem_free(&dma_p->acc_handle); 2758 dma_p->acc_handle = NULL; 2759 } 2760 ddi_dma_free_handle(&dma_p->dma_handle); 2761 dma_p->dma_handle = NULL; 2762 return (NXGE_ERROR | NXGE_DDI_FAILED); 2763 } 2764 2765 if (dma_p->ncookies != 1) { 2766 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2767 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 2768 "> 1 cookie" 2769 "(staus 0x%x ncookies %d.)", ddi_status, 2770 dma_p->ncookies)); 2771 if (dma_p->acc_handle) { 2772 ddi_dma_mem_free(&dma_p->acc_handle); 2773 dma_p->acc_handle = NULL; 2774 } 2775 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2776 ddi_dma_free_handle(&dma_p->dma_handle); 2777 dma_p->dma_handle = NULL; 2778 return (NXGE_ERROR); 2779 } 2780 break; 2781 2782 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2783 case B_TRUE: 2784 kaddrp = (caddr_t)contig_mem_alloc(length); 2785 if (kaddrp == NULL) { 2786 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2787 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 2788 ddi_dma_free_handle(&dma_p->dma_handle); 2789 return (NXGE_ERROR | NXGE_DDI_FAILED); 2790 } 2791 2792 dma_p->alength = length; 2793 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2794 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2795 &dma_p->dma_cookie, &dma_p->ncookies); 2796 if (ddi_status != DDI_DMA_MAPPED) { 2797 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2798 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2799 "(status 0x%x ncookies %d.)", ddi_status, 2800 dma_p->ncookies)); 2801 2802 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2803 "==> nxge_dma_mem_alloc: (not mapped)" 2804 "length %lu (0x%x) " 2805 "free contig kaddrp $%p " 2806 "va_to_pa $%p", 2807 length, length, 2808 kaddrp, 2809 va_to_pa(kaddrp))); 2810 2811 2812 contig_mem_free((void *)kaddrp, length); 2813 ddi_dma_free_handle(&dma_p->dma_handle); 2814 2815 dma_p->dma_handle = NULL; 2816 dma_p->acc_handle = NULL; 2817 dma_p->alength = NULL; 2818 dma_p->kaddrp = NULL; 2819 2820 return (NXGE_ERROR | NXGE_DDI_FAILED); 2821 } 2822 2823 if (dma_p->ncookies != 1 || 2824 (dma_p->dma_cookie.dmac_laddress == NULL)) { 2825 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2826 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 2827 "cookie or " 2828 "dmac_laddress is NULL $%p size %d " 2829 " (status 0x%x ncookies %d.)", 2830 ddi_status, 2831 dma_p->dma_cookie.dmac_laddress, 2832 dma_p->dma_cookie.dmac_size, 2833 dma_p->ncookies)); 2834 2835 contig_mem_free((void *)kaddrp, length); 2836 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2837 ddi_dma_free_handle(&dma_p->dma_handle); 2838 2839 dma_p->alength = 0; 2840 dma_p->dma_handle = NULL; 2841 dma_p->acc_handle = NULL; 2842 dma_p->kaddrp = NULL; 2843 2844 return (NXGE_ERROR | NXGE_DDI_FAILED); 2845 } 2846 break; 2847 2848 #else 2849 case B_TRUE: 2850 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2851 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 2852 return (NXGE_ERROR | NXGE_DDI_FAILED); 2853 #endif 2854 } 2855 2856 dma_p->kaddrp = kaddrp; 2857 dma_p->last_kaddrp = (unsigned char *)kaddrp + 2858 dma_p->alength - RXBUF_64B_ALIGNED; 2859 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2860 dma_p->last_ioaddr_pp = 2861 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 2862 dma_p->alength - RXBUF_64B_ALIGNED; 2863 2864 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 2865 2866 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2867 dma_p->orig_ioaddr_pp = 2868 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2869 dma_p->orig_alength = length; 2870 dma_p->orig_kaddrp = kaddrp; 2871 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 2872 #endif 2873 2874 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 2875 "dma buffer allocated: dma_p $%p " 2876 "return dmac_ladress from cookie $%p cookie dmac_size %d " 2877 "dma_p->ioaddr_p $%p " 2878 "dma_p->orig_ioaddr_p $%p " 2879 "orig_vatopa $%p " 2880 "alength %d (0x%x) " 2881 "kaddrp $%p " 2882 "length %d (0x%x)", 2883 dma_p, 2884 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 2885 dma_p->ioaddr_pp, 2886 dma_p->orig_ioaddr_pp, 2887 dma_p->orig_vatopa, 2888 dma_p->alength, dma_p->alength, 2889 kaddrp, 2890 length, length)); 2891 2892 return (NXGE_OK); 2893 } 2894 2895 static void 2896 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 2897 { 2898 if (dma_p->dma_handle != NULL) { 2899 if (dma_p->ncookies) { 2900 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2901 dma_p->ncookies = 0; 2902 } 2903 ddi_dma_free_handle(&dma_p->dma_handle); 2904 dma_p->dma_handle = NULL; 2905 } 2906 2907 if (dma_p->acc_handle != NULL) { 2908 ddi_dma_mem_free(&dma_p->acc_handle); 2909 dma_p->acc_handle = NULL; 2910 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 2911 } 2912 2913 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2914 if (dma_p->contig_alloc_type && 2915 dma_p->orig_kaddrp && dma_p->orig_alength) { 2916 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 2917 "kaddrp $%p (orig_kaddrp $%p)" 2918 "mem type %d ", 2919 "orig_alength %d " 2920 "alength 0x%x (%d)", 2921 dma_p->kaddrp, 2922 dma_p->orig_kaddrp, 2923 dma_p->contig_alloc_type, 2924 dma_p->orig_alength, 2925 dma_p->alength, dma_p->alength)); 2926 2927 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 2928 dma_p->orig_alength = NULL; 2929 dma_p->orig_kaddrp = NULL; 2930 dma_p->contig_alloc_type = B_FALSE; 2931 } 2932 #endif 2933 dma_p->kaddrp = NULL; 2934 dma_p->alength = NULL; 2935 } 2936 2937 /* 2938 * nxge_m_start() -- start transmitting and receiving. 2939 * 2940 * This function is called by the MAC layer when the first 2941 * stream is open to prepare the hardware ready for sending 2942 * and transmitting packets. 2943 */ 2944 static int 2945 nxge_m_start(void *arg) 2946 { 2947 p_nxge_t nxgep = (p_nxge_t)arg; 2948 2949 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 2950 2951 MUTEX_ENTER(nxgep->genlock); 2952 if (nxge_init(nxgep) != NXGE_OK) { 2953 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2954 "<== nxge_m_start: initialization failed")); 2955 MUTEX_EXIT(nxgep->genlock); 2956 return (EIO); 2957 } 2958 2959 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 2960 goto nxge_m_start_exit; 2961 /* 2962 * Start timer to check the system error and tx hangs 2963 */ 2964 nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state, 2965 NXGE_CHECK_TIMER); 2966 2967 nxgep->link_notify = B_TRUE; 2968 2969 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 2970 2971 nxge_m_start_exit: 2972 MUTEX_EXIT(nxgep->genlock); 2973 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 2974 return (0); 2975 } 2976 2977 /* 2978 * nxge_m_stop(): stop transmitting and receiving. 2979 */ 2980 static void 2981 nxge_m_stop(void *arg) 2982 { 2983 p_nxge_t nxgep = (p_nxge_t)arg; 2984 2985 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 2986 2987 if (nxgep->nxge_timerid) { 2988 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 2989 nxgep->nxge_timerid = 0; 2990 } 2991 2992 MUTEX_ENTER(nxgep->genlock); 2993 nxge_uninit(nxgep); 2994 2995 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 2996 2997 MUTEX_EXIT(nxgep->genlock); 2998 2999 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3000 } 3001 3002 static int 3003 nxge_m_unicst(void *arg, const uint8_t *macaddr) 3004 { 3005 p_nxge_t nxgep = (p_nxge_t)arg; 3006 struct ether_addr addrp; 3007 3008 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 3009 3010 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 3011 if (nxge_set_mac_addr(nxgep, &addrp)) { 3012 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3013 "<== nxge_m_unicst: set unitcast failed")); 3014 return (EINVAL); 3015 } 3016 3017 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 3018 3019 return (0); 3020 } 3021 3022 static int 3023 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3024 { 3025 p_nxge_t nxgep = (p_nxge_t)arg; 3026 struct ether_addr addrp; 3027 3028 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3029 "==> nxge_m_multicst: add %d", add)); 3030 3031 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3032 if (add) { 3033 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3034 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3035 "<== nxge_m_multicst: add multicast failed")); 3036 return (EINVAL); 3037 } 3038 } else { 3039 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3040 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3041 "<== nxge_m_multicst: del multicast failed")); 3042 return (EINVAL); 3043 } 3044 } 3045 3046 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3047 3048 return (0); 3049 } 3050 3051 static int 3052 nxge_m_promisc(void *arg, boolean_t on) 3053 { 3054 p_nxge_t nxgep = (p_nxge_t)arg; 3055 3056 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3057 "==> nxge_m_promisc: on %d", on)); 3058 3059 if (nxge_set_promisc(nxgep, on)) { 3060 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3061 "<== nxge_m_promisc: set promisc failed")); 3062 return (EINVAL); 3063 } 3064 3065 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3066 "<== nxge_m_promisc: on %d", on)); 3067 3068 return (0); 3069 } 3070 3071 static void 3072 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3073 { 3074 p_nxge_t nxgep = (p_nxge_t)arg; 3075 struct iocblk *iocp; 3076 boolean_t need_privilege; 3077 int err; 3078 int cmd; 3079 3080 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3081 3082 iocp = (struct iocblk *)mp->b_rptr; 3083 iocp->ioc_error = 0; 3084 need_privilege = B_TRUE; 3085 cmd = iocp->ioc_cmd; 3086 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3087 switch (cmd) { 3088 default: 3089 miocnak(wq, mp, 0, EINVAL); 3090 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3091 return; 3092 3093 case LB_GET_INFO_SIZE: 3094 case LB_GET_INFO: 3095 case LB_GET_MODE: 3096 need_privilege = B_FALSE; 3097 break; 3098 case LB_SET_MODE: 3099 break; 3100 3101 case ND_GET: 3102 need_privilege = B_FALSE; 3103 break; 3104 case ND_SET: 3105 break; 3106 3107 case NXGE_GET_MII: 3108 case NXGE_PUT_MII: 3109 case NXGE_GET64: 3110 case NXGE_PUT64: 3111 case NXGE_GET_TX_RING_SZ: 3112 case NXGE_GET_TX_DESC: 3113 case NXGE_TX_SIDE_RESET: 3114 case NXGE_RX_SIDE_RESET: 3115 case NXGE_GLOBAL_RESET: 3116 case NXGE_RESET_MAC: 3117 case NXGE_TX_REGS_DUMP: 3118 case NXGE_RX_REGS_DUMP: 3119 case NXGE_INT_REGS_DUMP: 3120 case NXGE_VIR_INT_REGS_DUMP: 3121 case NXGE_PUT_TCAM: 3122 case NXGE_GET_TCAM: 3123 case NXGE_RTRACE: 3124 case NXGE_RDUMP: 3125 3126 need_privilege = B_FALSE; 3127 break; 3128 case NXGE_INJECT_ERR: 3129 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3130 nxge_err_inject(nxgep, wq, mp); 3131 break; 3132 } 3133 3134 if (need_privilege) { 3135 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3136 if (err != 0) { 3137 miocnak(wq, mp, 0, err); 3138 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3139 "<== nxge_m_ioctl: no priv")); 3140 return; 3141 } 3142 } 3143 3144 switch (cmd) { 3145 case ND_GET: 3146 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command")); 3147 case ND_SET: 3148 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command")); 3149 nxge_param_ioctl(nxgep, wq, mp, iocp); 3150 break; 3151 3152 case LB_GET_MODE: 3153 case LB_SET_MODE: 3154 case LB_GET_INFO_SIZE: 3155 case LB_GET_INFO: 3156 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3157 break; 3158 3159 case NXGE_GET_MII: 3160 case NXGE_PUT_MII: 3161 case NXGE_PUT_TCAM: 3162 case NXGE_GET_TCAM: 3163 case NXGE_GET64: 3164 case NXGE_PUT64: 3165 case NXGE_GET_TX_RING_SZ: 3166 case NXGE_GET_TX_DESC: 3167 case NXGE_TX_SIDE_RESET: 3168 case NXGE_RX_SIDE_RESET: 3169 case NXGE_GLOBAL_RESET: 3170 case NXGE_RESET_MAC: 3171 case NXGE_TX_REGS_DUMP: 3172 case NXGE_RX_REGS_DUMP: 3173 case NXGE_INT_REGS_DUMP: 3174 case NXGE_VIR_INT_REGS_DUMP: 3175 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3176 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3177 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3178 break; 3179 } 3180 3181 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3182 } 3183 3184 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3185 3186 static void 3187 nxge_m_resources(void *arg) 3188 { 3189 p_nxge_t nxgep = arg; 3190 mac_rx_fifo_t mrf; 3191 p_rx_rcr_rings_t rcr_rings; 3192 p_rx_rcr_ring_t *rcr_p; 3193 uint32_t i, ndmas; 3194 nxge_status_t status; 3195 3196 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3197 3198 MUTEX_ENTER(nxgep->genlock); 3199 3200 /* 3201 * CR 6492541 Check to see if the drv_state has been initialized, 3202 * if not * call nxge_init(). 3203 */ 3204 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3205 status = nxge_init(nxgep); 3206 if (status != NXGE_OK) 3207 goto nxge_m_resources_exit; 3208 } 3209 3210 mrf.mrf_type = MAC_RX_FIFO; 3211 mrf.mrf_blank = nxge_rx_hw_blank; 3212 mrf.mrf_arg = (void *)nxgep; 3213 3214 mrf.mrf_normal_blank_time = 128; 3215 mrf.mrf_normal_pkt_count = 8; 3216 rcr_rings = nxgep->rx_rcr_rings; 3217 rcr_p = rcr_rings->rcr_rings; 3218 ndmas = rcr_rings->ndmas; 3219 3220 /* 3221 * Export our receive resources to the MAC layer. 3222 */ 3223 for (i = 0; i < ndmas; i++) { 3224 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle = 3225 mac_resource_add(nxgep->mach, 3226 (mac_resource_t *)&mrf); 3227 3228 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3229 "==> nxge_m_resources: vdma %d dma %d " 3230 "rcrptr 0x%016llx mac_handle 0x%016llx", 3231 i, ((p_rx_rcr_ring_t)rcr_p[i])->rdc, 3232 rcr_p[i], 3233 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle)); 3234 } 3235 3236 nxge_m_resources_exit: 3237 MUTEX_EXIT(nxgep->genlock); 3238 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 3239 } 3240 3241 static void 3242 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 3243 { 3244 p_nxge_mmac_stats_t mmac_stats; 3245 int i; 3246 nxge_mmac_t *mmac_info; 3247 3248 mmac_info = &nxgep->nxge_mmac_info; 3249 3250 mmac_stats = &nxgep->statsp->mmac_stats; 3251 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3252 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3253 3254 for (i = 0; i < ETHERADDRL; i++) { 3255 if (factory) { 3256 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3257 = mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i]; 3258 } else { 3259 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3260 = mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i]; 3261 } 3262 } 3263 } 3264 3265 /* 3266 * nxge_altmac_set() -- Set an alternate MAC address 3267 */ 3268 static int 3269 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 3270 { 3271 uint8_t addrn; 3272 uint8_t portn; 3273 npi_mac_addr_t altmac; 3274 hostinfo_t mac_rdc; 3275 p_nxge_class_pt_cfg_t clscfgp; 3276 3277 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3278 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3279 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3280 3281 portn = nxgep->mac.portnum; 3282 addrn = (uint8_t)slot - 1; 3283 3284 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 3285 addrn, &altmac) != NPI_SUCCESS) 3286 return (EIO); 3287 3288 /* 3289 * Set the rdc table number for the host info entry 3290 * for this mac address slot. 3291 */ 3292 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 3293 mac_rdc.value = 0; 3294 mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl; 3295 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 3296 3297 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 3298 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 3299 return (EIO); 3300 } 3301 3302 /* 3303 * Enable comparison with the alternate MAC address. 3304 * While the first alternate addr is enabled by bit 1 of register 3305 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 3306 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 3307 * accordingly before calling npi_mac_altaddr_entry. 3308 */ 3309 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3310 addrn = (uint8_t)slot - 1; 3311 else 3312 addrn = (uint8_t)slot; 3313 3314 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 3315 != NPI_SUCCESS) 3316 return (EIO); 3317 3318 return (0); 3319 } 3320 3321 /* 3322 * nxeg_m_mmac_add() - find an unused address slot, set the address 3323 * value to the one specified, enable the port to start filtering on 3324 * the new MAC address. Returns 0 on success. 3325 */ 3326 static int 3327 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 3328 { 3329 p_nxge_t nxgep = arg; 3330 mac_addr_slot_t slot; 3331 nxge_mmac_t *mmac_info; 3332 int err; 3333 nxge_status_t status; 3334 3335 mutex_enter(nxgep->genlock); 3336 3337 /* 3338 * Make sure that nxge is initialized, if _start() has 3339 * not been called. 3340 */ 3341 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3342 status = nxge_init(nxgep); 3343 if (status != NXGE_OK) { 3344 mutex_exit(nxgep->genlock); 3345 return (ENXIO); 3346 } 3347 } 3348 3349 mmac_info = &nxgep->nxge_mmac_info; 3350 if (mmac_info->naddrfree == 0) { 3351 mutex_exit(nxgep->genlock); 3352 return (ENOSPC); 3353 } 3354 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3355 maddr->mma_addrlen)) { 3356 mutex_exit(nxgep->genlock); 3357 return (EINVAL); 3358 } 3359 /* 3360 * Search for the first available slot. Because naddrfree 3361 * is not zero, we are guaranteed to find one. 3362 * Slot 0 is for unique (primary) MAC. The first alternate 3363 * MAC slot is slot 1. 3364 * Each of the first two ports of Neptune has 16 alternate 3365 * MAC slots but only the first 7 (or 15) slots have assigned factory 3366 * MAC addresses. We first search among the slots without bundled 3367 * factory MACs. If we fail to find one in that range, then we 3368 * search the slots with bundled factory MACs. A factory MAC 3369 * will be wasted while the slot is used with a user MAC address. 3370 * But the slot could be used by factory MAC again after calling 3371 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 3372 */ 3373 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 3374 for (slot = mmac_info->num_factory_mmac + 1; 3375 slot <= mmac_info->num_mmac; slot++) { 3376 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3377 break; 3378 } 3379 if (slot > mmac_info->num_mmac) { 3380 for (slot = 1; slot <= mmac_info->num_factory_mmac; 3381 slot++) { 3382 if (!(mmac_info->mac_pool[slot].flags 3383 & MMAC_SLOT_USED)) 3384 break; 3385 } 3386 } 3387 } else { 3388 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 3389 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3390 break; 3391 } 3392 } 3393 ASSERT(slot <= mmac_info->num_mmac); 3394 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 3395 mutex_exit(nxgep->genlock); 3396 return (err); 3397 } 3398 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 3399 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 3400 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3401 mmac_info->naddrfree--; 3402 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3403 3404 maddr->mma_slot = slot; 3405 3406 mutex_exit(nxgep->genlock); 3407 return (0); 3408 } 3409 3410 /* 3411 * This function reserves an unused slot and programs the slot and the HW 3412 * with a factory mac address. 3413 */ 3414 static int 3415 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 3416 { 3417 p_nxge_t nxgep = arg; 3418 mac_addr_slot_t slot; 3419 nxge_mmac_t *mmac_info; 3420 int err; 3421 nxge_status_t status; 3422 3423 mutex_enter(nxgep->genlock); 3424 3425 /* 3426 * Make sure that nxge is initialized, if _start() has 3427 * not been called. 3428 */ 3429 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3430 status = nxge_init(nxgep); 3431 if (status != NXGE_OK) { 3432 mutex_exit(nxgep->genlock); 3433 return (ENXIO); 3434 } 3435 } 3436 3437 mmac_info = &nxgep->nxge_mmac_info; 3438 if (mmac_info->naddrfree == 0) { 3439 mutex_exit(nxgep->genlock); 3440 return (ENOSPC); 3441 } 3442 3443 slot = maddr->mma_slot; 3444 if (slot == -1) { /* -1: Take the first available slot */ 3445 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 3446 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3447 break; 3448 } 3449 if (slot > mmac_info->num_factory_mmac) { 3450 mutex_exit(nxgep->genlock); 3451 return (ENOSPC); 3452 } 3453 } 3454 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 3455 /* 3456 * Do not support factory MAC at a slot greater than 3457 * num_factory_mmac even when there are available factory 3458 * MAC addresses because the alternate MACs are bundled with 3459 * slot[1] through slot[num_factory_mmac] 3460 */ 3461 mutex_exit(nxgep->genlock); 3462 return (EINVAL); 3463 } 3464 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3465 mutex_exit(nxgep->genlock); 3466 return (EBUSY); 3467 } 3468 /* Verify the address to be reserved */ 3469 if (!mac_unicst_verify(nxgep->mach, 3470 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 3471 mutex_exit(nxgep->genlock); 3472 return (EINVAL); 3473 } 3474 if (err = nxge_altmac_set(nxgep, 3475 mmac_info->factory_mac_pool[slot], slot)) { 3476 mutex_exit(nxgep->genlock); 3477 return (err); 3478 } 3479 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 3480 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3481 mmac_info->naddrfree--; 3482 3483 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 3484 mutex_exit(nxgep->genlock); 3485 3486 /* Pass info back to the caller */ 3487 maddr->mma_slot = slot; 3488 maddr->mma_addrlen = ETHERADDRL; 3489 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3490 3491 return (0); 3492 } 3493 3494 /* 3495 * Remove the specified mac address and update the HW not to filter 3496 * the mac address anymore. 3497 */ 3498 static int 3499 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 3500 { 3501 p_nxge_t nxgep = arg; 3502 nxge_mmac_t *mmac_info; 3503 uint8_t addrn; 3504 uint8_t portn; 3505 int err = 0; 3506 nxge_status_t status; 3507 3508 mutex_enter(nxgep->genlock); 3509 3510 /* 3511 * Make sure that nxge is initialized, if _start() has 3512 * not been called. 3513 */ 3514 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3515 status = nxge_init(nxgep); 3516 if (status != NXGE_OK) { 3517 mutex_exit(nxgep->genlock); 3518 return (ENXIO); 3519 } 3520 } 3521 3522 mmac_info = &nxgep->nxge_mmac_info; 3523 if (slot < 1 || slot > mmac_info->num_mmac) { 3524 mutex_exit(nxgep->genlock); 3525 return (EINVAL); 3526 } 3527 3528 portn = nxgep->mac.portnum; 3529 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3530 addrn = (uint8_t)slot - 1; 3531 else 3532 addrn = (uint8_t)slot; 3533 3534 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3535 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 3536 == NPI_SUCCESS) { 3537 mmac_info->naddrfree++; 3538 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 3539 /* 3540 * Regardless if the MAC we just stopped filtering 3541 * is a user addr or a facory addr, we must set 3542 * the MMAC_VENDOR_ADDR flag if this slot has an 3543 * associated factory MAC to indicate that a factory 3544 * MAC is available. 3545 */ 3546 if (slot <= mmac_info->num_factory_mmac) { 3547 mmac_info->mac_pool[slot].flags 3548 |= MMAC_VENDOR_ADDR; 3549 } 3550 /* 3551 * Clear mac_pool[slot].addr so that kstat shows 0 3552 * alternate MAC address if the slot is not used. 3553 * (But nxge_m_mmac_get returns the factory MAC even 3554 * when the slot is not used!) 3555 */ 3556 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 3557 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3558 } else { 3559 err = EIO; 3560 } 3561 } else { 3562 err = EINVAL; 3563 } 3564 3565 mutex_exit(nxgep->genlock); 3566 return (err); 3567 } 3568 3569 3570 /* 3571 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 3572 */ 3573 static int 3574 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 3575 { 3576 p_nxge_t nxgep = arg; 3577 mac_addr_slot_t slot; 3578 nxge_mmac_t *mmac_info; 3579 int err = 0; 3580 nxge_status_t status; 3581 3582 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3583 maddr->mma_addrlen)) 3584 return (EINVAL); 3585 3586 slot = maddr->mma_slot; 3587 3588 mutex_enter(nxgep->genlock); 3589 3590 /* 3591 * Make sure that nxge is initialized, if _start() has 3592 * not been called. 3593 */ 3594 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3595 status = nxge_init(nxgep); 3596 if (status != NXGE_OK) { 3597 mutex_exit(nxgep->genlock); 3598 return (ENXIO); 3599 } 3600 } 3601 3602 mmac_info = &nxgep->nxge_mmac_info; 3603 if (slot < 1 || slot > mmac_info->num_mmac) { 3604 mutex_exit(nxgep->genlock); 3605 return (EINVAL); 3606 } 3607 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3608 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 3609 != 0) { 3610 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 3611 ETHERADDRL); 3612 /* 3613 * Assume that the MAC passed down from the caller 3614 * is not a factory MAC address (The user should 3615 * call mmac_remove followed by mmac_reserve if 3616 * he wants to use the factory MAC for this slot). 3617 */ 3618 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3619 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3620 } 3621 } else { 3622 err = EINVAL; 3623 } 3624 mutex_exit(nxgep->genlock); 3625 return (err); 3626 } 3627 3628 /* 3629 * nxge_m_mmac_get() - Get the MAC address and other information 3630 * related to the slot. mma_flags should be set to 0 in the call. 3631 * Note: although kstat shows MAC address as zero when a slot is 3632 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 3633 * to the caller as long as the slot is not using a user MAC address. 3634 * The following table shows the rules, 3635 * 3636 * USED VENDOR mma_addr 3637 * ------------------------------------------------------------ 3638 * (1) Slot uses a user MAC: yes no user MAC 3639 * (2) Slot uses a factory MAC: yes yes factory MAC 3640 * (3) Slot is not used but is 3641 * factory MAC capable: no yes factory MAC 3642 * (4) Slot is not used and is 3643 * not factory MAC capable: no no 0 3644 * ------------------------------------------------------------ 3645 */ 3646 static int 3647 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 3648 { 3649 nxge_t *nxgep = arg; 3650 mac_addr_slot_t slot; 3651 nxge_mmac_t *mmac_info; 3652 nxge_status_t status; 3653 3654 slot = maddr->mma_slot; 3655 3656 mutex_enter(nxgep->genlock); 3657 3658 /* 3659 * Make sure that nxge is initialized, if _start() has 3660 * not been called. 3661 */ 3662 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3663 status = nxge_init(nxgep); 3664 if (status != NXGE_OK) { 3665 mutex_exit(nxgep->genlock); 3666 return (ENXIO); 3667 } 3668 } 3669 3670 mmac_info = &nxgep->nxge_mmac_info; 3671 3672 if (slot < 1 || slot > mmac_info->num_mmac) { 3673 mutex_exit(nxgep->genlock); 3674 return (EINVAL); 3675 } 3676 maddr->mma_flags = 0; 3677 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 3678 maddr->mma_flags |= MMAC_SLOT_USED; 3679 3680 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 3681 maddr->mma_flags |= MMAC_VENDOR_ADDR; 3682 bcopy(mmac_info->factory_mac_pool[slot], 3683 maddr->mma_addr, ETHERADDRL); 3684 maddr->mma_addrlen = ETHERADDRL; 3685 } else { 3686 if (maddr->mma_flags & MMAC_SLOT_USED) { 3687 bcopy(mmac_info->mac_pool[slot].addr, 3688 maddr->mma_addr, ETHERADDRL); 3689 maddr->mma_addrlen = ETHERADDRL; 3690 } else { 3691 bzero(maddr->mma_addr, ETHERADDRL); 3692 maddr->mma_addrlen = 0; 3693 } 3694 } 3695 mutex_exit(nxgep->genlock); 3696 return (0); 3697 } 3698 3699 3700 static boolean_t 3701 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 3702 { 3703 nxge_t *nxgep = arg; 3704 uint32_t *txflags = cap_data; 3705 multiaddress_capab_t *mmacp = cap_data; 3706 3707 switch (cap) { 3708 case MAC_CAPAB_HCKSUM: 3709 *txflags = HCKSUM_INET_PARTIAL; 3710 break; 3711 case MAC_CAPAB_POLL: 3712 /* 3713 * There's nothing for us to fill in, simply returning 3714 * B_TRUE stating that we support polling is sufficient. 3715 */ 3716 break; 3717 3718 case MAC_CAPAB_MULTIADDRESS: 3719 mutex_enter(nxgep->genlock); 3720 3721 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 3722 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 3723 mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */ 3724 /* 3725 * maddr_handle is driver's private data, passed back to 3726 * entry point functions as arg. 3727 */ 3728 mmacp->maddr_handle = nxgep; 3729 mmacp->maddr_add = nxge_m_mmac_add; 3730 mmacp->maddr_remove = nxge_m_mmac_remove; 3731 mmacp->maddr_modify = nxge_m_mmac_modify; 3732 mmacp->maddr_get = nxge_m_mmac_get; 3733 mmacp->maddr_reserve = nxge_m_mmac_reserve; 3734 3735 mutex_exit(nxgep->genlock); 3736 break; 3737 default: 3738 return (B_FALSE); 3739 } 3740 return (B_TRUE); 3741 } 3742 3743 /* 3744 * Module loading and removing entry points. 3745 */ 3746 3747 static struct cb_ops nxge_cb_ops = { 3748 nodev, /* cb_open */ 3749 nodev, /* cb_close */ 3750 nodev, /* cb_strategy */ 3751 nodev, /* cb_print */ 3752 nodev, /* cb_dump */ 3753 nodev, /* cb_read */ 3754 nodev, /* cb_write */ 3755 nodev, /* cb_ioctl */ 3756 nodev, /* cb_devmap */ 3757 nodev, /* cb_mmap */ 3758 nodev, /* cb_segmap */ 3759 nochpoll, /* cb_chpoll */ 3760 ddi_prop_op, /* cb_prop_op */ 3761 NULL, 3762 D_MP, /* cb_flag */ 3763 CB_REV, /* rev */ 3764 nodev, /* int (*cb_aread)() */ 3765 nodev /* int (*cb_awrite)() */ 3766 }; 3767 3768 static struct dev_ops nxge_dev_ops = { 3769 DEVO_REV, /* devo_rev */ 3770 0, /* devo_refcnt */ 3771 nulldev, 3772 nulldev, /* devo_identify */ 3773 nulldev, /* devo_probe */ 3774 nxge_attach, /* devo_attach */ 3775 nxge_detach, /* devo_detach */ 3776 nodev, /* devo_reset */ 3777 &nxge_cb_ops, /* devo_cb_ops */ 3778 (struct bus_ops *)NULL, /* devo_bus_ops */ 3779 ddi_power /* devo_power */ 3780 }; 3781 3782 extern struct mod_ops mod_driverops; 3783 3784 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet %I%" 3785 3786 /* 3787 * Module linkage information for the kernel. 3788 */ 3789 static struct modldrv nxge_modldrv = { 3790 &mod_driverops, 3791 NXGE_DESC_VER, 3792 &nxge_dev_ops 3793 }; 3794 3795 static struct modlinkage modlinkage = { 3796 MODREV_1, (void *) &nxge_modldrv, NULL 3797 }; 3798 3799 int 3800 _init(void) 3801 { 3802 int status; 3803 3804 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 3805 mac_init_ops(&nxge_dev_ops, "nxge"); 3806 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 3807 if (status != 0) { 3808 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3809 "failed to init device soft state")); 3810 goto _init_exit; 3811 } 3812 3813 status = mod_install(&modlinkage); 3814 if (status != 0) { 3815 ddi_soft_state_fini(&nxge_list); 3816 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 3817 goto _init_exit; 3818 } 3819 3820 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 3821 3822 _init_exit: 3823 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 3824 3825 return (status); 3826 } 3827 3828 int 3829 _fini(void) 3830 { 3831 int status; 3832 3833 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 3834 3835 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 3836 3837 if (nxge_mblks_pending) 3838 return (EBUSY); 3839 3840 status = mod_remove(&modlinkage); 3841 if (status != DDI_SUCCESS) { 3842 NXGE_DEBUG_MSG((NULL, MOD_CTL, 3843 "Module removal failed 0x%08x", 3844 status)); 3845 goto _fini_exit; 3846 } 3847 3848 mac_fini_ops(&nxge_dev_ops); 3849 3850 ddi_soft_state_fini(&nxge_list); 3851 3852 MUTEX_DESTROY(&nxge_common_lock); 3853 _fini_exit: 3854 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 3855 3856 return (status); 3857 } 3858 3859 int 3860 _info(struct modinfo *modinfop) 3861 { 3862 int status; 3863 3864 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 3865 status = mod_info(&modlinkage, modinfop); 3866 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 3867 3868 return (status); 3869 } 3870 3871 /*ARGSUSED*/ 3872 static nxge_status_t 3873 nxge_add_intrs(p_nxge_t nxgep) 3874 { 3875 3876 int intr_types; 3877 int type = 0; 3878 int ddi_status = DDI_SUCCESS; 3879 nxge_status_t status = NXGE_OK; 3880 3881 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 3882 3883 nxgep->nxge_intr_type.intr_registered = B_FALSE; 3884 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 3885 nxgep->nxge_intr_type.msi_intx_cnt = 0; 3886 nxgep->nxge_intr_type.intr_added = 0; 3887 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 3888 nxgep->nxge_intr_type.intr_type = 0; 3889 3890 if (nxgep->niu_type == N2_NIU) { 3891 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3892 } else if (nxge_msi_enable) { 3893 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3894 } 3895 3896 /* Get the supported interrupt types */ 3897 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 3898 != DDI_SUCCESS) { 3899 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 3900 "ddi_intr_get_supported_types failed: status 0x%08x", 3901 ddi_status)); 3902 return (NXGE_ERROR | NXGE_DDI_FAILED); 3903 } 3904 nxgep->nxge_intr_type.intr_types = intr_types; 3905 3906 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3907 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3908 3909 /* 3910 * Solaris MSIX is not supported yet. use MSI for now. 3911 * nxge_msi_enable (1): 3912 * 1 - MSI 2 - MSI-X others - FIXED 3913 */ 3914 switch (nxge_msi_enable) { 3915 default: 3916 type = DDI_INTR_TYPE_FIXED; 3917 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3918 "use fixed (intx emulation) type %08x", 3919 type)); 3920 break; 3921 3922 case 2: 3923 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3924 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3925 if (intr_types & DDI_INTR_TYPE_MSIX) { 3926 type = DDI_INTR_TYPE_MSIX; 3927 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3928 "ddi_intr_get_supported_types: MSIX 0x%08x", 3929 type)); 3930 } else if (intr_types & DDI_INTR_TYPE_MSI) { 3931 type = DDI_INTR_TYPE_MSI; 3932 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3933 "ddi_intr_get_supported_types: MSI 0x%08x", 3934 type)); 3935 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3936 type = DDI_INTR_TYPE_FIXED; 3937 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3938 "ddi_intr_get_supported_types: MSXED0x%08x", 3939 type)); 3940 } 3941 break; 3942 3943 case 1: 3944 if (intr_types & DDI_INTR_TYPE_MSI) { 3945 type = DDI_INTR_TYPE_MSI; 3946 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3947 "ddi_intr_get_supported_types: MSI 0x%08x", 3948 type)); 3949 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 3950 type = DDI_INTR_TYPE_MSIX; 3951 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3952 "ddi_intr_get_supported_types: MSIX 0x%08x", 3953 type)); 3954 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3955 type = DDI_INTR_TYPE_FIXED; 3956 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3957 "ddi_intr_get_supported_types: MSXED0x%08x", 3958 type)); 3959 } 3960 } 3961 3962 nxgep->nxge_intr_type.intr_type = type; 3963 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 3964 type == DDI_INTR_TYPE_FIXED) && 3965 nxgep->nxge_intr_type.niu_msi_enable) { 3966 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 3967 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3968 " nxge_add_intrs: " 3969 " nxge_add_intrs_adv failed: status 0x%08x", 3970 status)); 3971 return (status); 3972 } else { 3973 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3974 "interrupts registered : type %d", type)); 3975 nxgep->nxge_intr_type.intr_registered = B_TRUE; 3976 3977 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 3978 "\nAdded advanced nxge add_intr_adv " 3979 "intr type 0x%x\n", type)); 3980 3981 return (status); 3982 } 3983 } 3984 3985 if (!nxgep->nxge_intr_type.intr_registered) { 3986 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 3987 "failed to register interrupts")); 3988 return (NXGE_ERROR | NXGE_DDI_FAILED); 3989 } 3990 3991 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 3992 return (status); 3993 } 3994 3995 /*ARGSUSED*/ 3996 static nxge_status_t 3997 nxge_add_soft_intrs(p_nxge_t nxgep) 3998 { 3999 4000 int ddi_status = DDI_SUCCESS; 4001 nxge_status_t status = NXGE_OK; 4002 4003 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 4004 4005 nxgep->resched_id = NULL; 4006 nxgep->resched_running = B_FALSE; 4007 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 4008 &nxgep->resched_id, 4009 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 4010 if (ddi_status != DDI_SUCCESS) { 4011 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 4012 "ddi_add_softintrs failed: status 0x%08x", 4013 ddi_status)); 4014 return (NXGE_ERROR | NXGE_DDI_FAILED); 4015 } 4016 4017 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 4018 4019 return (status); 4020 } 4021 4022 static nxge_status_t 4023 nxge_add_intrs_adv(p_nxge_t nxgep) 4024 { 4025 int intr_type; 4026 p_nxge_intr_t intrp; 4027 4028 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 4029 4030 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4031 intr_type = intrp->intr_type; 4032 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 4033 intr_type)); 4034 4035 switch (intr_type) { 4036 case DDI_INTR_TYPE_MSI: /* 0x2 */ 4037 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 4038 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 4039 4040 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 4041 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 4042 4043 default: 4044 return (NXGE_ERROR); 4045 } 4046 } 4047 4048 4049 /*ARGSUSED*/ 4050 static nxge_status_t 4051 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 4052 { 4053 dev_info_t *dip = nxgep->dip; 4054 p_nxge_ldg_t ldgp; 4055 p_nxge_intr_t intrp; 4056 uint_t *inthandler; 4057 void *arg1, *arg2; 4058 int behavior; 4059 int nintrs, navail; 4060 int nactual, nrequired; 4061 int inum = 0; 4062 int x, y; 4063 int ddi_status = DDI_SUCCESS; 4064 nxge_status_t status = NXGE_OK; 4065 4066 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 4067 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4068 intrp->start_inum = 0; 4069 4070 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4071 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4072 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4073 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4074 "nintrs: %d", ddi_status, nintrs)); 4075 return (NXGE_ERROR | NXGE_DDI_FAILED); 4076 } 4077 4078 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4079 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4080 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4081 "ddi_intr_get_navail() failed, status: 0x%x%, " 4082 "nintrs: %d", ddi_status, navail)); 4083 return (NXGE_ERROR | NXGE_DDI_FAILED); 4084 } 4085 4086 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4087 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 4088 nintrs, navail)); 4089 4090 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 4091 /* MSI must be power of 2 */ 4092 if ((navail & 16) == 16) { 4093 navail = 16; 4094 } else if ((navail & 8) == 8) { 4095 navail = 8; 4096 } else if ((navail & 4) == 4) { 4097 navail = 4; 4098 } else if ((navail & 2) == 2) { 4099 navail = 2; 4100 } else { 4101 navail = 1; 4102 } 4103 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4104 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 4105 "navail %d", nintrs, navail)); 4106 } 4107 4108 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4109 DDI_INTR_ALLOC_NORMAL); 4110 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4111 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4112 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4113 navail, &nactual, behavior); 4114 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4115 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4116 " ddi_intr_alloc() failed: %d", 4117 ddi_status)); 4118 kmem_free(intrp->htable, intrp->intr_size); 4119 return (NXGE_ERROR | NXGE_DDI_FAILED); 4120 } 4121 4122 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4123 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4124 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4125 " ddi_intr_get_pri() failed: %d", 4126 ddi_status)); 4127 /* Free already allocated interrupts */ 4128 for (y = 0; y < nactual; y++) { 4129 (void) ddi_intr_free(intrp->htable[y]); 4130 } 4131 4132 kmem_free(intrp->htable, intrp->intr_size); 4133 return (NXGE_ERROR | NXGE_DDI_FAILED); 4134 } 4135 4136 nrequired = 0; 4137 switch (nxgep->niu_type) { 4138 case NEPTUNE: 4139 case NEPTUNE_2: 4140 default: 4141 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4142 break; 4143 4144 case N2_NIU: 4145 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4146 break; 4147 } 4148 4149 if (status != NXGE_OK) { 4150 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4151 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 4152 "failed: 0x%x", status)); 4153 /* Free already allocated interrupts */ 4154 for (y = 0; y < nactual; y++) { 4155 (void) ddi_intr_free(intrp->htable[y]); 4156 } 4157 4158 kmem_free(intrp->htable, intrp->intr_size); 4159 return (status); 4160 } 4161 4162 ldgp = nxgep->ldgvp->ldgp; 4163 for (x = 0; x < nrequired; x++, ldgp++) { 4164 ldgp->vector = (uint8_t)x; 4165 ldgp->intdata = SID_DATA(ldgp->func, x); 4166 arg1 = ldgp->ldvp; 4167 arg2 = nxgep; 4168 if (ldgp->nldvs == 1) { 4169 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4170 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4171 "nxge_add_intrs_adv_type: " 4172 "arg1 0x%x arg2 0x%x: " 4173 "1-1 int handler (entry %d intdata 0x%x)\n", 4174 arg1, arg2, 4175 x, ldgp->intdata)); 4176 } else if (ldgp->nldvs > 1) { 4177 inthandler = (uint_t *)ldgp->sys_intr_handler; 4178 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4179 "nxge_add_intrs_adv_type: " 4180 "arg1 0x%x arg2 0x%x: " 4181 "nldevs %d int handler " 4182 "(entry %d intdata 0x%x)\n", 4183 arg1, arg2, 4184 ldgp->nldvs, x, ldgp->intdata)); 4185 } 4186 4187 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4188 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 4189 "htable 0x%llx", x, intrp->htable[x])); 4190 4191 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4192 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4193 != DDI_SUCCESS) { 4194 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4195 "==> nxge_add_intrs_adv_type: failed #%d " 4196 "status 0x%x", x, ddi_status)); 4197 for (y = 0; y < intrp->intr_added; y++) { 4198 (void) ddi_intr_remove_handler( 4199 intrp->htable[y]); 4200 } 4201 /* Free already allocated intr */ 4202 for (y = 0; y < nactual; y++) { 4203 (void) ddi_intr_free(intrp->htable[y]); 4204 } 4205 kmem_free(intrp->htable, intrp->intr_size); 4206 4207 (void) nxge_ldgv_uninit(nxgep); 4208 4209 return (NXGE_ERROR | NXGE_DDI_FAILED); 4210 } 4211 intrp->intr_added++; 4212 } 4213 4214 intrp->msi_intx_cnt = nactual; 4215 4216 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4217 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 4218 navail, nactual, 4219 intrp->msi_intx_cnt, 4220 intrp->intr_added)); 4221 4222 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4223 4224 (void) nxge_intr_ldgv_init(nxgep); 4225 4226 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 4227 4228 return (status); 4229 } 4230 4231 /*ARGSUSED*/ 4232 static nxge_status_t 4233 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 4234 { 4235 dev_info_t *dip = nxgep->dip; 4236 p_nxge_ldg_t ldgp; 4237 p_nxge_intr_t intrp; 4238 uint_t *inthandler; 4239 void *arg1, *arg2; 4240 int behavior; 4241 int nintrs, navail; 4242 int nactual, nrequired; 4243 int inum = 0; 4244 int x, y; 4245 int ddi_status = DDI_SUCCESS; 4246 nxge_status_t status = NXGE_OK; 4247 4248 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 4249 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4250 intrp->start_inum = 0; 4251 4252 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4253 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4254 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4255 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4256 "nintrs: %d", status, nintrs)); 4257 return (NXGE_ERROR | NXGE_DDI_FAILED); 4258 } 4259 4260 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4261 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4262 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4263 "ddi_intr_get_navail() failed, status: 0x%x%, " 4264 "nintrs: %d", ddi_status, navail)); 4265 return (NXGE_ERROR | NXGE_DDI_FAILED); 4266 } 4267 4268 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4269 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 4270 nintrs, navail)); 4271 4272 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4273 DDI_INTR_ALLOC_NORMAL); 4274 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4275 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4276 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4277 navail, &nactual, behavior); 4278 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4279 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4280 " ddi_intr_alloc() failed: %d", 4281 ddi_status)); 4282 kmem_free(intrp->htable, intrp->intr_size); 4283 return (NXGE_ERROR | NXGE_DDI_FAILED); 4284 } 4285 4286 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4287 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4288 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4289 " ddi_intr_get_pri() failed: %d", 4290 ddi_status)); 4291 /* Free already allocated interrupts */ 4292 for (y = 0; y < nactual; y++) { 4293 (void) ddi_intr_free(intrp->htable[y]); 4294 } 4295 4296 kmem_free(intrp->htable, intrp->intr_size); 4297 return (NXGE_ERROR | NXGE_DDI_FAILED); 4298 } 4299 4300 nrequired = 0; 4301 switch (nxgep->niu_type) { 4302 case NEPTUNE: 4303 case NEPTUNE_2: 4304 default: 4305 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4306 break; 4307 4308 case N2_NIU: 4309 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4310 break; 4311 } 4312 4313 if (status != NXGE_OK) { 4314 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4315 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 4316 "failed: 0x%x", status)); 4317 /* Free already allocated interrupts */ 4318 for (y = 0; y < nactual; y++) { 4319 (void) ddi_intr_free(intrp->htable[y]); 4320 } 4321 4322 kmem_free(intrp->htable, intrp->intr_size); 4323 return (status); 4324 } 4325 4326 ldgp = nxgep->ldgvp->ldgp; 4327 for (x = 0; x < nrequired; x++, ldgp++) { 4328 ldgp->vector = (uint8_t)x; 4329 if (nxgep->niu_type != N2_NIU) { 4330 ldgp->intdata = SID_DATA(ldgp->func, x); 4331 } 4332 4333 arg1 = ldgp->ldvp; 4334 arg2 = nxgep; 4335 if (ldgp->nldvs == 1) { 4336 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4337 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4338 "nxge_add_intrs_adv_type_fix: " 4339 "1-1 int handler(%d) ldg %d ldv %d " 4340 "arg1 $%p arg2 $%p\n", 4341 x, ldgp->ldg, ldgp->ldvp->ldv, 4342 arg1, arg2)); 4343 } else if (ldgp->nldvs > 1) { 4344 inthandler = (uint_t *)ldgp->sys_intr_handler; 4345 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4346 "nxge_add_intrs_adv_type_fix: " 4347 "shared ldv %d int handler(%d) ldv %d ldg %d" 4348 "arg1 0x%016llx arg2 0x%016llx\n", 4349 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 4350 arg1, arg2)); 4351 } 4352 4353 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4354 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4355 != DDI_SUCCESS) { 4356 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4357 "==> nxge_add_intrs_adv_type_fix: failed #%d " 4358 "status 0x%x", x, ddi_status)); 4359 for (y = 0; y < intrp->intr_added; y++) { 4360 (void) ddi_intr_remove_handler( 4361 intrp->htable[y]); 4362 } 4363 for (y = 0; y < nactual; y++) { 4364 (void) ddi_intr_free(intrp->htable[y]); 4365 } 4366 /* Free already allocated intr */ 4367 kmem_free(intrp->htable, intrp->intr_size); 4368 4369 (void) nxge_ldgv_uninit(nxgep); 4370 4371 return (NXGE_ERROR | NXGE_DDI_FAILED); 4372 } 4373 intrp->intr_added++; 4374 } 4375 4376 intrp->msi_intx_cnt = nactual; 4377 4378 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4379 4380 status = nxge_intr_ldgv_init(nxgep); 4381 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 4382 4383 return (status); 4384 } 4385 4386 static void 4387 nxge_remove_intrs(p_nxge_t nxgep) 4388 { 4389 int i, inum; 4390 p_nxge_intr_t intrp; 4391 4392 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 4393 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4394 if (!intrp->intr_registered) { 4395 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4396 "<== nxge_remove_intrs: interrupts not registered")); 4397 return; 4398 } 4399 4400 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 4401 4402 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4403 (void) ddi_intr_block_disable(intrp->htable, 4404 intrp->intr_added); 4405 } else { 4406 for (i = 0; i < intrp->intr_added; i++) { 4407 (void) ddi_intr_disable(intrp->htable[i]); 4408 } 4409 } 4410 4411 for (inum = 0; inum < intrp->intr_added; inum++) { 4412 if (intrp->htable[inum]) { 4413 (void) ddi_intr_remove_handler(intrp->htable[inum]); 4414 } 4415 } 4416 4417 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 4418 if (intrp->htable[inum]) { 4419 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4420 "nxge_remove_intrs: ddi_intr_free inum %d " 4421 "msi_intx_cnt %d intr_added %d", 4422 inum, 4423 intrp->msi_intx_cnt, 4424 intrp->intr_added)); 4425 4426 (void) ddi_intr_free(intrp->htable[inum]); 4427 } 4428 } 4429 4430 kmem_free(intrp->htable, intrp->intr_size); 4431 intrp->intr_registered = B_FALSE; 4432 intrp->intr_enabled = B_FALSE; 4433 intrp->msi_intx_cnt = 0; 4434 intrp->intr_added = 0; 4435 4436 (void) nxge_ldgv_uninit(nxgep); 4437 4438 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 4439 } 4440 4441 /*ARGSUSED*/ 4442 static void 4443 nxge_remove_soft_intrs(p_nxge_t nxgep) 4444 { 4445 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 4446 if (nxgep->resched_id) { 4447 ddi_remove_softintr(nxgep->resched_id); 4448 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4449 "==> nxge_remove_soft_intrs: removed")); 4450 nxgep->resched_id = NULL; 4451 } 4452 4453 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 4454 } 4455 4456 /*ARGSUSED*/ 4457 static void 4458 nxge_intrs_enable(p_nxge_t nxgep) 4459 { 4460 p_nxge_intr_t intrp; 4461 int i; 4462 int status; 4463 4464 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 4465 4466 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4467 4468 if (!intrp->intr_registered) { 4469 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 4470 "interrupts are not registered")); 4471 return; 4472 } 4473 4474 if (intrp->intr_enabled) { 4475 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4476 "<== nxge_intrs_enable: already enabled")); 4477 return; 4478 } 4479 4480 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4481 status = ddi_intr_block_enable(intrp->htable, 4482 intrp->intr_added); 4483 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4484 "block enable - status 0x%x total inums #%d\n", 4485 status, intrp->intr_added)); 4486 } else { 4487 for (i = 0; i < intrp->intr_added; i++) { 4488 status = ddi_intr_enable(intrp->htable[i]); 4489 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4490 "ddi_intr_enable:enable - status 0x%x " 4491 "total inums %d enable inum #%d\n", 4492 status, intrp->intr_added, i)); 4493 if (status == DDI_SUCCESS) { 4494 intrp->intr_enabled = B_TRUE; 4495 } 4496 } 4497 } 4498 4499 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 4500 } 4501 4502 /*ARGSUSED*/ 4503 static void 4504 nxge_intrs_disable(p_nxge_t nxgep) 4505 { 4506 p_nxge_intr_t intrp; 4507 int i; 4508 4509 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 4510 4511 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4512 4513 if (!intrp->intr_registered) { 4514 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 4515 "interrupts are not registered")); 4516 return; 4517 } 4518 4519 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4520 (void) ddi_intr_block_disable(intrp->htable, 4521 intrp->intr_added); 4522 } else { 4523 for (i = 0; i < intrp->intr_added; i++) { 4524 (void) ddi_intr_disable(intrp->htable[i]); 4525 } 4526 } 4527 4528 intrp->intr_enabled = B_FALSE; 4529 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 4530 } 4531 4532 static nxge_status_t 4533 nxge_mac_register(p_nxge_t nxgep) 4534 { 4535 mac_register_t *macp; 4536 int status; 4537 4538 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 4539 4540 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 4541 return (NXGE_ERROR); 4542 4543 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 4544 macp->m_driver = nxgep; 4545 macp->m_dip = nxgep->dip; 4546 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 4547 macp->m_callbacks = &nxge_m_callbacks; 4548 macp->m_min_sdu = 0; 4549 macp->m_max_sdu = nxgep->mac.maxframesize - 4550 sizeof (struct ether_header) - ETHERFCSL - 4; 4551 4552 status = mac_register(macp, &nxgep->mach); 4553 mac_free(macp); 4554 4555 if (status != 0) { 4556 cmn_err(CE_WARN, 4557 "!nxge_mac_register failed (status %d instance %d)", 4558 status, nxgep->instance); 4559 return (NXGE_ERROR); 4560 } 4561 4562 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 4563 "(instance %d)", nxgep->instance)); 4564 4565 return (NXGE_OK); 4566 } 4567 4568 void 4569 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 4570 { 4571 ssize_t size; 4572 mblk_t *nmp; 4573 uint8_t blk_id; 4574 uint8_t chan; 4575 uint32_t err_id; 4576 err_inject_t *eip; 4577 4578 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 4579 4580 size = 1024; 4581 nmp = mp->b_cont; 4582 eip = (err_inject_t *)nmp->b_rptr; 4583 blk_id = eip->blk_id; 4584 err_id = eip->err_id; 4585 chan = eip->chan; 4586 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 4587 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 4588 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 4589 switch (blk_id) { 4590 case MAC_BLK_ID: 4591 break; 4592 case TXMAC_BLK_ID: 4593 break; 4594 case RXMAC_BLK_ID: 4595 break; 4596 case MIF_BLK_ID: 4597 break; 4598 case IPP_BLK_ID: 4599 nxge_ipp_inject_err(nxgep, err_id); 4600 break; 4601 case TXC_BLK_ID: 4602 nxge_txc_inject_err(nxgep, err_id); 4603 break; 4604 case TXDMA_BLK_ID: 4605 nxge_txdma_inject_err(nxgep, err_id, chan); 4606 break; 4607 case RXDMA_BLK_ID: 4608 nxge_rxdma_inject_err(nxgep, err_id, chan); 4609 break; 4610 case ZCP_BLK_ID: 4611 nxge_zcp_inject_err(nxgep, err_id); 4612 break; 4613 case ESPC_BLK_ID: 4614 break; 4615 case FFLP_BLK_ID: 4616 break; 4617 case PHY_BLK_ID: 4618 break; 4619 case ETHER_SERDES_BLK_ID: 4620 break; 4621 case PCIE_SERDES_BLK_ID: 4622 break; 4623 case VIR_BLK_ID: 4624 break; 4625 } 4626 4627 nmp->b_wptr = nmp->b_rptr + size; 4628 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 4629 4630 miocack(wq, mp, (int)size, 0); 4631 } 4632 4633 static int 4634 nxge_init_common_dev(p_nxge_t nxgep) 4635 { 4636 p_nxge_hw_list_t hw_p; 4637 dev_info_t *p_dip; 4638 4639 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 4640 4641 p_dip = nxgep->p_dip; 4642 MUTEX_ENTER(&nxge_common_lock); 4643 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4644 "==> nxge_init_common_dev:func # %d", 4645 nxgep->function_num)); 4646 /* 4647 * Loop through existing per neptune hardware list. 4648 */ 4649 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4650 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4651 "==> nxge_init_common_device:func # %d " 4652 "hw_p $%p parent dip $%p", 4653 nxgep->function_num, 4654 hw_p, 4655 p_dip)); 4656 if (hw_p->parent_devp == p_dip) { 4657 nxgep->nxge_hw_p = hw_p; 4658 hw_p->ndevs++; 4659 hw_p->nxge_p[nxgep->function_num] = nxgep; 4660 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4661 "==> nxge_init_common_device:func # %d " 4662 "hw_p $%p parent dip $%p " 4663 "ndevs %d (found)", 4664 nxgep->function_num, 4665 hw_p, 4666 p_dip, 4667 hw_p->ndevs)); 4668 break; 4669 } 4670 } 4671 4672 if (hw_p == NULL) { 4673 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4674 "==> nxge_init_common_device:func # %d " 4675 "parent dip $%p (new)", 4676 nxgep->function_num, 4677 p_dip)); 4678 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 4679 hw_p->parent_devp = p_dip; 4680 hw_p->magic = NXGE_NEPTUNE_MAGIC; 4681 nxgep->nxge_hw_p = hw_p; 4682 hw_p->ndevs++; 4683 hw_p->nxge_p[nxgep->function_num] = nxgep; 4684 hw_p->next = nxge_hw_list; 4685 4686 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 4687 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 4688 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 4689 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 4690 MUTEX_INIT(&hw_p->nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 4691 4692 nxge_hw_list = hw_p; 4693 } 4694 4695 MUTEX_EXIT(&nxge_common_lock); 4696 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4697 "==> nxge_init_common_device (nxge_hw_list) $%p", 4698 nxge_hw_list)); 4699 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 4700 4701 return (NXGE_OK); 4702 } 4703 4704 static void 4705 nxge_uninit_common_dev(p_nxge_t nxgep) 4706 { 4707 p_nxge_hw_list_t hw_p, h_hw_p; 4708 dev_info_t *p_dip; 4709 4710 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 4711 if (nxgep->nxge_hw_p == NULL) { 4712 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4713 "<== nxge_uninit_common_device (no common)")); 4714 return; 4715 } 4716 4717 MUTEX_ENTER(&nxge_common_lock); 4718 h_hw_p = nxge_hw_list; 4719 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4720 p_dip = hw_p->parent_devp; 4721 if (nxgep->nxge_hw_p == hw_p && 4722 p_dip == nxgep->p_dip && 4723 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 4724 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 4725 4726 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4727 "==> nxge_uninit_common_device:func # %d " 4728 "hw_p $%p parent dip $%p " 4729 "ndevs %d (found)", 4730 nxgep->function_num, 4731 hw_p, 4732 p_dip, 4733 hw_p->ndevs)); 4734 4735 nxgep->nxge_hw_p = NULL; 4736 if (hw_p->ndevs) { 4737 hw_p->ndevs--; 4738 } 4739 hw_p->nxge_p[nxgep->function_num] = NULL; 4740 if (!hw_p->ndevs) { 4741 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 4742 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 4743 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 4744 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 4745 MUTEX_DESTROY(&hw_p->nxge_mii_lock); 4746 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4747 "==> nxge_uninit_common_device: " 4748 "func # %d " 4749 "hw_p $%p parent dip $%p " 4750 "ndevs %d (last)", 4751 nxgep->function_num, 4752 hw_p, 4753 p_dip, 4754 hw_p->ndevs)); 4755 4756 if (hw_p == nxge_hw_list) { 4757 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4758 "==> nxge_uninit_common_device:" 4759 "remove head func # %d " 4760 "hw_p $%p parent dip $%p " 4761 "ndevs %d (head)", 4762 nxgep->function_num, 4763 hw_p, 4764 p_dip, 4765 hw_p->ndevs)); 4766 nxge_hw_list = hw_p->next; 4767 } else { 4768 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4769 "==> nxge_uninit_common_device:" 4770 "remove middle func # %d " 4771 "hw_p $%p parent dip $%p " 4772 "ndevs %d (middle)", 4773 nxgep->function_num, 4774 hw_p, 4775 p_dip, 4776 hw_p->ndevs)); 4777 h_hw_p->next = hw_p->next; 4778 } 4779 4780 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 4781 } 4782 break; 4783 } else { 4784 h_hw_p = hw_p; 4785 } 4786 } 4787 4788 MUTEX_EXIT(&nxge_common_lock); 4789 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4790 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 4791 nxge_hw_list)); 4792 4793 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 4794 } 4795