1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 30 */ 31 #include <sys/nxge/nxge_impl.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * until MSIX supported, assume msi, use 2 for msix 39 */ 40 uint32_t nxge_msi_enable = 1; /* debug: turn msi off */ 41 42 /* 43 * Globals: tunable parameters (/etc/system or adb) 44 * 45 */ 46 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 47 uint32_t nxge_rbr_spare_size = 0; 48 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 49 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 50 uint32_t nxge_no_msg = 0; /* control message display */ 51 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 52 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 53 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 54 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 55 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 56 boolean_t nxge_jumbo_enable = B_FALSE; 57 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 58 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 59 60 /* 61 * Debugging flags: 62 * nxge_no_tx_lb : transmit load balancing 63 * nxge_tx_lb_policy: 0 - TCP port (default) 64 * 3 - DEST MAC 65 */ 66 uint32_t nxge_no_tx_lb = 0; 67 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 68 69 /* 70 * Add tunable to reduce the amount of time spent in the 71 * ISR doing Rx Processing. 72 */ 73 uint32_t nxge_max_rx_pkts = 1024; 74 75 /* 76 * Tunables to manage the receive buffer blocks. 77 * 78 * nxge_rx_threshold_hi: copy all buffers. 79 * nxge_rx_bcopy_size_type: receive buffer block size type. 80 * nxge_rx_threshold_lo: copy only up to tunable block size type. 81 */ 82 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 83 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 84 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 85 86 rtrace_t npi_rtracebuf; 87 88 #if defined(sun4v) 89 /* 90 * Hypervisor N2/NIU services information. 91 */ 92 static hsvc_info_t niu_hsvc = { 93 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 94 NIU_MINOR_VER, "nxge" 95 }; 96 #endif 97 98 /* 99 * Function Prototypes 100 */ 101 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 102 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 103 static void nxge_unattach(p_nxge_t); 104 105 #if NXGE_PROPERTY 106 static void nxge_remove_hard_properties(p_nxge_t); 107 #endif 108 109 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 110 111 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 112 static void nxge_destroy_mutexes(p_nxge_t); 113 114 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 115 static void nxge_unmap_regs(p_nxge_t nxgep); 116 #ifdef NXGE_DEBUG 117 static void nxge_test_map_regs(p_nxge_t nxgep); 118 #endif 119 120 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 121 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 122 static void nxge_remove_intrs(p_nxge_t nxgep); 123 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 124 125 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 126 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 127 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 128 static void nxge_intrs_enable(p_nxge_t nxgep); 129 static void nxge_intrs_disable(p_nxge_t nxgep); 130 131 static void nxge_suspend(p_nxge_t); 132 static nxge_status_t nxge_resume(p_nxge_t); 133 134 static nxge_status_t nxge_setup_dev(p_nxge_t); 135 static void nxge_destroy_dev(p_nxge_t); 136 137 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 138 static void nxge_free_mem_pool(p_nxge_t); 139 140 static nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 141 static void nxge_free_rx_mem_pool(p_nxge_t); 142 143 static nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 144 static void nxge_free_tx_mem_pool(p_nxge_t); 145 146 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 147 struct ddi_dma_attr *, 148 size_t, ddi_device_acc_attr_t *, uint_t, 149 p_nxge_dma_common_t); 150 151 static void nxge_dma_mem_free(p_nxge_dma_common_t); 152 153 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 154 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 155 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 156 157 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 158 p_nxge_dma_common_t *, size_t); 159 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 160 161 static nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 162 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 163 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 164 165 static nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 166 p_nxge_dma_common_t *, 167 size_t); 168 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 169 170 static int nxge_init_common_dev(p_nxge_t); 171 static void nxge_uninit_common_dev(p_nxge_t); 172 173 /* 174 * The next declarations are for the GLDv3 interface. 175 */ 176 static int nxge_m_start(void *); 177 static void nxge_m_stop(void *); 178 static int nxge_m_unicst(void *, const uint8_t *); 179 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 180 static int nxge_m_promisc(void *, boolean_t); 181 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 182 static void nxge_m_resources(void *); 183 mblk_t *nxge_m_tx(void *arg, mblk_t *); 184 static nxge_status_t nxge_mac_register(p_nxge_t); 185 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 186 mac_addr_slot_t slot); 187 static void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 188 boolean_t factory); 189 static int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 190 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 191 static int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 192 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 193 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 194 195 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 196 #define MAX_DUMP_SZ 256 197 198 #define NXGE_M_CALLBACK_FLAGS (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB) 199 200 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 201 static mac_callbacks_t nxge_m_callbacks = { 202 NXGE_M_CALLBACK_FLAGS, 203 nxge_m_stat, 204 nxge_m_start, 205 nxge_m_stop, 206 nxge_m_promisc, 207 nxge_m_multicst, 208 nxge_m_unicst, 209 nxge_m_tx, 210 nxge_m_resources, 211 nxge_m_ioctl, 212 nxge_m_getcapab 213 }; 214 215 void 216 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 217 218 /* 219 * These global variables control the message 220 * output. 221 */ 222 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 223 uint64_t nxge_debug_level = 0; 224 225 /* 226 * This list contains the instance structures for the Neptune 227 * devices present in the system. The lock exists to guarantee 228 * mutually exclusive access to the list. 229 */ 230 void *nxge_list = NULL; 231 232 void *nxge_hw_list = NULL; 233 nxge_os_mutex_t nxge_common_lock; 234 235 nxge_os_mutex_t nxge_mii_lock; 236 static uint32_t nxge_mii_lock_init = 0; 237 nxge_os_mutex_t nxge_mdio_lock; 238 static uint32_t nxge_mdio_lock_init = 0; 239 240 extern uint64_t npi_debug_level; 241 242 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 243 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 244 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 245 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 246 extern void nxge_fm_init(p_nxge_t, 247 ddi_device_acc_attr_t *, 248 ddi_device_acc_attr_t *, 249 ddi_dma_attr_t *); 250 extern void nxge_fm_fini(p_nxge_t); 251 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 252 253 /* 254 * Count used to maintain the number of buffers being used 255 * by Neptune instances and loaned up to the upper layers. 256 */ 257 uint32_t nxge_mblks_pending = 0; 258 259 /* 260 * Device register access attributes for PIO. 261 */ 262 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 263 DDI_DEVICE_ATTR_V0, 264 DDI_STRUCTURE_LE_ACC, 265 DDI_STRICTORDER_ACC, 266 }; 267 268 /* 269 * Device descriptor access attributes for DMA. 270 */ 271 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 272 DDI_DEVICE_ATTR_V0, 273 DDI_STRUCTURE_LE_ACC, 274 DDI_STRICTORDER_ACC 275 }; 276 277 /* 278 * Device buffer access attributes for DMA. 279 */ 280 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 281 DDI_DEVICE_ATTR_V0, 282 DDI_STRUCTURE_BE_ACC, 283 DDI_STRICTORDER_ACC 284 }; 285 286 ddi_dma_attr_t nxge_desc_dma_attr = { 287 DMA_ATTR_V0, /* version number. */ 288 0, /* low address */ 289 0xffffffffffffffff, /* high address */ 290 0xffffffffffffffff, /* address counter max */ 291 #ifndef NIU_PA_WORKAROUND 292 0x100000, /* alignment */ 293 #else 294 0x2000, 295 #endif 296 0xfc00fc, /* dlim_burstsizes */ 297 0x1, /* minimum transfer size */ 298 0xffffffffffffffff, /* maximum transfer size */ 299 0xffffffffffffffff, /* maximum segment size */ 300 1, /* scatter/gather list length */ 301 (unsigned int) 1, /* granularity */ 302 0 /* attribute flags */ 303 }; 304 305 ddi_dma_attr_t nxge_tx_dma_attr = { 306 DMA_ATTR_V0, /* version number. */ 307 0, /* low address */ 308 0xffffffffffffffff, /* high address */ 309 0xffffffffffffffff, /* address counter max */ 310 #if defined(_BIG_ENDIAN) 311 0x2000, /* alignment */ 312 #else 313 0x1000, /* alignment */ 314 #endif 315 0xfc00fc, /* dlim_burstsizes */ 316 0x1, /* minimum transfer size */ 317 0xffffffffffffffff, /* maximum transfer size */ 318 0xffffffffffffffff, /* maximum segment size */ 319 5, /* scatter/gather list length */ 320 (unsigned int) 1, /* granularity */ 321 0 /* attribute flags */ 322 }; 323 324 ddi_dma_attr_t nxge_rx_dma_attr = { 325 DMA_ATTR_V0, /* version number. */ 326 0, /* low address */ 327 0xffffffffffffffff, /* high address */ 328 0xffffffffffffffff, /* address counter max */ 329 0x2000, /* alignment */ 330 0xfc00fc, /* dlim_burstsizes */ 331 0x1, /* minimum transfer size */ 332 0xffffffffffffffff, /* maximum transfer size */ 333 0xffffffffffffffff, /* maximum segment size */ 334 1, /* scatter/gather list length */ 335 (unsigned int) 1, /* granularity */ 336 0 /* attribute flags */ 337 }; 338 339 ddi_dma_lim_t nxge_dma_limits = { 340 (uint_t)0, /* dlim_addr_lo */ 341 (uint_t)0xffffffff, /* dlim_addr_hi */ 342 (uint_t)0xffffffff, /* dlim_cntr_max */ 343 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 344 0x1, /* dlim_minxfer */ 345 1024 /* dlim_speed */ 346 }; 347 348 dma_method_t nxge_force_dma = DVMA; 349 350 /* 351 * dma chunk sizes. 352 * 353 * Try to allocate the largest possible size 354 * so that fewer number of dma chunks would be managed 355 */ 356 #ifdef NIU_PA_WORKAROUND 357 size_t alloc_sizes [] = {0x2000}; 358 #else 359 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 360 0x10000, 0x20000, 0x40000, 0x80000, 361 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000}; 362 #endif 363 364 /* 365 * Translate "dev_t" to a pointer to the associated "dev_info_t". 366 */ 367 368 static int 369 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 370 { 371 p_nxge_t nxgep = NULL; 372 int instance; 373 int status = DDI_SUCCESS; 374 nxge_status_t nxge_status = NXGE_OK; 375 uint8_t portn; 376 nxge_mmac_t *mmac_info; 377 378 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 379 380 /* 381 * Get the device instance since we'll need to setup 382 * or retrieve a soft state for this instance. 383 */ 384 instance = ddi_get_instance(dip); 385 386 switch (cmd) { 387 case DDI_ATTACH: 388 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 389 break; 390 391 case DDI_RESUME: 392 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 393 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 394 if (nxgep == NULL) { 395 status = DDI_FAILURE; 396 break; 397 } 398 if (nxgep->dip != dip) { 399 status = DDI_FAILURE; 400 break; 401 } 402 if (nxgep->suspended == DDI_PM_SUSPEND) { 403 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 404 } else { 405 nxge_status = nxge_resume(nxgep); 406 } 407 goto nxge_attach_exit; 408 409 case DDI_PM_RESUME: 410 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 411 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 412 if (nxgep == NULL) { 413 status = DDI_FAILURE; 414 break; 415 } 416 if (nxgep->dip != dip) { 417 status = DDI_FAILURE; 418 break; 419 } 420 nxge_status = nxge_resume(nxgep); 421 goto nxge_attach_exit; 422 423 default: 424 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 425 status = DDI_FAILURE; 426 goto nxge_attach_exit; 427 } 428 429 430 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 431 status = DDI_FAILURE; 432 goto nxge_attach_exit; 433 } 434 435 nxgep = ddi_get_soft_state(nxge_list, instance); 436 if (nxgep == NULL) { 437 goto nxge_attach_fail; 438 } 439 440 nxgep->drv_state = 0; 441 nxgep->dip = dip; 442 nxgep->instance = instance; 443 nxgep->p_dip = ddi_get_parent(dip); 444 nxgep->nxge_debug_level = nxge_debug_level; 445 npi_debug_level = nxge_debug_level; 446 447 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_dev_desc_dma_acc_attr, 448 &nxge_rx_dma_attr); 449 450 status = nxge_map_regs(nxgep); 451 if (status != NXGE_OK) { 452 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 453 goto nxge_attach_fail; 454 } 455 456 status = nxge_init_common_dev(nxgep); 457 if (status != NXGE_OK) { 458 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 459 "nxge_init_common_dev failed")); 460 goto nxge_attach_fail; 461 } 462 463 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 464 nxgep->mac.portnum = portn; 465 if ((portn == 0) || (portn == 1)) 466 nxgep->mac.porttype = PORT_TYPE_XMAC; 467 else 468 nxgep->mac.porttype = PORT_TYPE_BMAC; 469 /* 470 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 471 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 472 * The two types of MACs have different characterizations. 473 */ 474 mmac_info = &nxgep->nxge_mmac_info; 475 if (nxgep->function_num < 2) { 476 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 477 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 478 } else { 479 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 480 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 481 } 482 /* 483 * Setup the Ndd parameters for the this instance. 484 */ 485 nxge_init_param(nxgep); 486 487 /* 488 * Setup Register Tracing Buffer. 489 */ 490 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 491 492 /* init stats ptr */ 493 nxge_init_statsp(nxgep); 494 status = nxge_get_xcvr_type(nxgep); 495 496 if (status != NXGE_OK) { 497 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_attach: " 498 " Couldn't determine card type" 499 " .... exit ")); 500 goto nxge_attach_fail; 501 } 502 503 if ((nxgep->niu_type == NEPTUNE) && 504 (nxgep->mac.portmode == PORT_10G_FIBER)) { 505 nxgep->niu_type = NEPTUNE_2; 506 } 507 508 status = nxge_get_config_properties(nxgep); 509 510 if (status != NXGE_OK) { 511 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "get_hw create failed")); 512 goto nxge_attach_fail; 513 } 514 515 nxge_get_xcvr_properties(nxgep); 516 517 /* 518 * Setup the Kstats for the driver. 519 */ 520 nxge_setup_kstats(nxgep); 521 522 nxge_setup_param(nxgep); 523 524 status = nxge_setup_system_dma_pages(nxgep); 525 if (status != NXGE_OK) { 526 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 527 goto nxge_attach_fail; 528 } 529 530 #if defined(sun4v) 531 if (nxgep->niu_type == N2_NIU) { 532 nxgep->niu_hsvc_available = B_FALSE; 533 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 534 if ((status = 535 hsvc_register(&nxgep->niu_hsvc, 536 &nxgep->niu_min_ver)) != 0) { 537 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 538 "nxge_attach: " 539 "%s: cannot negotiate " 540 "hypervisor services " 541 "revision %d " 542 "group: 0x%lx " 543 "major: 0x%lx minor: 0x%lx " 544 "errno: %d", 545 niu_hsvc.hsvc_modname, 546 niu_hsvc.hsvc_rev, 547 niu_hsvc.hsvc_group, 548 niu_hsvc.hsvc_major, 549 niu_hsvc.hsvc_minor, 550 status)); 551 status = DDI_FAILURE; 552 goto nxge_attach_fail; 553 } 554 555 nxgep->niu_hsvc_available = B_TRUE; 556 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 557 "NIU Hypervisor service enabled")); 558 } 559 #endif 560 561 nxge_hw_id_init(nxgep); 562 nxge_hw_init_niu_common(nxgep); 563 564 status = nxge_setup_mutexes(nxgep); 565 if (status != NXGE_OK) { 566 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 567 goto nxge_attach_fail; 568 } 569 570 status = nxge_setup_dev(nxgep); 571 if (status != DDI_SUCCESS) { 572 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 573 goto nxge_attach_fail; 574 } 575 576 status = nxge_add_intrs(nxgep); 577 if (status != DDI_SUCCESS) { 578 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 579 goto nxge_attach_fail; 580 } 581 status = nxge_add_soft_intrs(nxgep); 582 if (status != DDI_SUCCESS) { 583 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "add_soft_intr failed")); 584 goto nxge_attach_fail; 585 } 586 587 /* 588 * Enable interrupts. 589 */ 590 nxge_intrs_enable(nxgep); 591 592 if ((status = nxge_mac_register(nxgep)) != DDI_SUCCESS) { 593 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 594 "unable to register to mac layer (%d)", status)); 595 goto nxge_attach_fail; 596 } 597 598 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 599 600 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "registered to mac (instance %d)", 601 instance)); 602 603 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 604 605 goto nxge_attach_exit; 606 607 nxge_attach_fail: 608 nxge_unattach(nxgep); 609 if (nxge_status != NXGE_OK) 610 nxge_status = (NXGE_ERROR | NXGE_DDI_FAILED); 611 nxgep = NULL; 612 613 nxge_attach_exit: 614 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 615 status)); 616 617 return (status); 618 } 619 620 static int 621 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 622 { 623 int status = DDI_SUCCESS; 624 int instance; 625 p_nxge_t nxgep = NULL; 626 627 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 628 instance = ddi_get_instance(dip); 629 nxgep = ddi_get_soft_state(nxge_list, instance); 630 if (nxgep == NULL) { 631 status = DDI_FAILURE; 632 goto nxge_detach_exit; 633 } 634 635 switch (cmd) { 636 case DDI_DETACH: 637 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 638 break; 639 640 case DDI_PM_SUSPEND: 641 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 642 nxgep->suspended = DDI_PM_SUSPEND; 643 nxge_suspend(nxgep); 644 break; 645 646 case DDI_SUSPEND: 647 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 648 if (nxgep->suspended != DDI_PM_SUSPEND) { 649 nxgep->suspended = DDI_SUSPEND; 650 nxge_suspend(nxgep); 651 } 652 break; 653 654 default: 655 status = DDI_FAILURE; 656 } 657 658 if (cmd != DDI_DETACH) 659 goto nxge_detach_exit; 660 661 /* 662 * Stop the xcvr polling. 663 */ 664 nxgep->suspended = cmd; 665 666 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 667 668 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 669 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 670 "<== nxge_detach status = 0x%08X", status)); 671 return (DDI_FAILURE); 672 } 673 674 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 675 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 676 677 nxge_unattach(nxgep); 678 nxgep = NULL; 679 680 nxge_detach_exit: 681 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 682 status)); 683 684 return (status); 685 } 686 687 static void 688 nxge_unattach(p_nxge_t nxgep) 689 { 690 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 691 692 if (nxgep == NULL || nxgep->dev_regs == NULL) { 693 return; 694 } 695 696 if (nxgep->nxge_hw_p) { 697 nxge_uninit_common_dev(nxgep); 698 nxgep->nxge_hw_p = NULL; 699 } 700 701 if (nxgep->nxge_timerid) { 702 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 703 nxgep->nxge_timerid = 0; 704 } 705 706 #if defined(sun4v) 707 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 708 (void) hsvc_unregister(&nxgep->niu_hsvc); 709 nxgep->niu_hsvc_available = B_FALSE; 710 } 711 #endif 712 /* 713 * Stop any further interrupts. 714 */ 715 nxge_remove_intrs(nxgep); 716 717 /* remove soft interrups */ 718 nxge_remove_soft_intrs(nxgep); 719 720 /* 721 * Stop the device and free resources. 722 */ 723 nxge_destroy_dev(nxgep); 724 725 /* 726 * Tear down the ndd parameters setup. 727 */ 728 nxge_destroy_param(nxgep); 729 730 /* 731 * Tear down the kstat setup. 732 */ 733 nxge_destroy_kstats(nxgep); 734 735 /* 736 * Destroy all mutexes. 737 */ 738 nxge_destroy_mutexes(nxgep); 739 740 /* 741 * Remove the list of ndd parameters which 742 * were setup during attach. 743 */ 744 if (nxgep->dip) { 745 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 746 " nxge_unattach: remove all properties")); 747 748 (void) ddi_prop_remove_all(nxgep->dip); 749 } 750 751 #if NXGE_PROPERTY 752 nxge_remove_hard_properties(nxgep); 753 #endif 754 755 /* 756 * Unmap the register setup. 757 */ 758 nxge_unmap_regs(nxgep); 759 760 nxge_fm_fini(nxgep); 761 762 ddi_soft_state_free(nxge_list, nxgep->instance); 763 764 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 765 } 766 767 static char n2_siu_name[] = "niu"; 768 769 static nxge_status_t 770 nxge_map_regs(p_nxge_t nxgep) 771 { 772 int ddi_status = DDI_SUCCESS; 773 p_dev_regs_t dev_regs; 774 char buf[MAXPATHLEN + 1]; 775 char *devname; 776 #ifdef NXGE_DEBUG 777 char *sysname; 778 #endif 779 off_t regsize; 780 nxge_status_t status = NXGE_OK; 781 #if !defined(_BIG_ENDIAN) 782 off_t pci_offset; 783 uint16_t pcie_devctl; 784 #endif 785 786 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 787 nxgep->dev_regs = NULL; 788 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 789 dev_regs->nxge_regh = NULL; 790 dev_regs->nxge_pciregh = NULL; 791 dev_regs->nxge_msix_regh = NULL; 792 dev_regs->nxge_vir_regh = NULL; 793 dev_regs->nxge_vir2_regh = NULL; 794 nxgep->niu_type = NEPTUNE; 795 796 devname = ddi_pathname(nxgep->dip, buf); 797 ASSERT(strlen(devname) > 0); 798 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 799 "nxge_map_regs: pathname devname %s", devname)); 800 801 if (strstr(devname, n2_siu_name)) { 802 /* N2/NIU */ 803 nxgep->niu_type = N2_NIU; 804 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 805 "nxge_map_regs: N2/NIU devname %s", devname)); 806 /* get function number */ 807 nxgep->function_num = 808 (devname[strlen(devname) -1] == '1' ? 1 : 0); 809 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 810 "nxge_map_regs: N2/NIU function number %d", 811 nxgep->function_num)); 812 } else { 813 int *prop_val; 814 uint_t prop_len; 815 uint8_t func_num; 816 817 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 818 0, "reg", 819 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 820 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 821 "Reg property not found")); 822 ddi_status = DDI_FAILURE; 823 goto nxge_map_regs_fail0; 824 825 } else { 826 func_num = (prop_val[0] >> 8) & 0x7; 827 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 828 "Reg property found: fun # %d", 829 func_num)); 830 nxgep->function_num = func_num; 831 ddi_prop_free(prop_val); 832 } 833 } 834 835 switch (nxgep->niu_type) { 836 case NEPTUNE: 837 case NEPTUNE_2: 838 default: 839 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 840 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 841 "nxge_map_regs: pci config size 0x%x", regsize)); 842 843 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 844 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 845 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 846 if (ddi_status != DDI_SUCCESS) { 847 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 848 "ddi_map_regs, nxge bus config regs failed")); 849 goto nxge_map_regs_fail0; 850 } 851 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 852 "nxge_map_reg: PCI config addr 0x%0llx " 853 " handle 0x%0llx", dev_regs->nxge_pciregp, 854 dev_regs->nxge_pciregh)); 855 /* 856 * IMP IMP 857 * workaround for bit swapping bug in HW 858 * which ends up in no-snoop = yes 859 * resulting, in DMA not synched properly 860 */ 861 #if !defined(_BIG_ENDIAN) 862 /* workarounds for x86 systems */ 863 pci_offset = 0x80 + PCIE_DEVCTL; 864 pcie_devctl = 0x0; 865 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 866 pcie_devctl |= PCIE_DEVCTL_RO_EN; 867 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 868 pcie_devctl); 869 #endif 870 871 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 872 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 873 "nxge_map_regs: pio size 0x%x", regsize)); 874 /* set up the device mapped register */ 875 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 876 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 877 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 878 if (ddi_status != DDI_SUCCESS) { 879 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 880 "ddi_map_regs for Neptune global reg failed")); 881 goto nxge_map_regs_fail1; 882 } 883 884 /* set up the msi/msi-x mapped register */ 885 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 886 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 887 "nxge_map_regs: msix size 0x%x", regsize)); 888 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 889 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 890 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 891 if (ddi_status != DDI_SUCCESS) { 892 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 893 "ddi_map_regs for msi reg failed")); 894 goto nxge_map_regs_fail2; 895 } 896 897 /* set up the vio region mapped register */ 898 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 899 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 900 "nxge_map_regs: vio size 0x%x", regsize)); 901 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 902 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 903 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 904 905 if (ddi_status != DDI_SUCCESS) { 906 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 907 "ddi_map_regs for nxge vio reg failed")); 908 goto nxge_map_regs_fail3; 909 } 910 nxgep->dev_regs = dev_regs; 911 912 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 913 NPI_PCI_ADD_HANDLE_SET(nxgep, 914 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 915 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 916 NPI_MSI_ADD_HANDLE_SET(nxgep, 917 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 918 919 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 920 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 921 922 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 923 NPI_REG_ADD_HANDLE_SET(nxgep, 924 (npi_reg_ptr_t)dev_regs->nxge_regp); 925 926 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 927 NPI_VREG_ADD_HANDLE_SET(nxgep, 928 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 929 930 break; 931 932 case N2_NIU: 933 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 934 /* 935 * Set up the device mapped register (FWARC 2006/556) 936 * (changed back to 1: reg starts at 1!) 937 */ 938 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 939 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 940 "nxge_map_regs: dev size 0x%x", regsize)); 941 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 942 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 943 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 944 945 if (ddi_status != DDI_SUCCESS) { 946 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 947 "ddi_map_regs for N2/NIU, global reg failed ")); 948 goto nxge_map_regs_fail1; 949 } 950 951 /* set up the vio region mapped register */ 952 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 953 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 954 "nxge_map_regs: vio (1) size 0x%x", regsize)); 955 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 956 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 957 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 958 959 if (ddi_status != DDI_SUCCESS) { 960 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 961 "ddi_map_regs for nxge vio reg failed")); 962 goto nxge_map_regs_fail2; 963 } 964 /* set up the vio region mapped register */ 965 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 966 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 967 "nxge_map_regs: vio (3) size 0x%x", regsize)); 968 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 969 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 970 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 971 972 if (ddi_status != DDI_SUCCESS) { 973 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 974 "ddi_map_regs for nxge vio2 reg failed")); 975 goto nxge_map_regs_fail3; 976 } 977 nxgep->dev_regs = dev_regs; 978 979 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 980 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 981 982 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 983 NPI_REG_ADD_HANDLE_SET(nxgep, 984 (npi_reg_ptr_t)dev_regs->nxge_regp); 985 986 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 987 NPI_VREG_ADD_HANDLE_SET(nxgep, 988 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 989 990 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 991 NPI_V2REG_ADD_HANDLE_SET(nxgep, 992 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 993 994 break; 995 } 996 997 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 998 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 999 1000 goto nxge_map_regs_exit; 1001 nxge_map_regs_fail3: 1002 if (dev_regs->nxge_msix_regh) { 1003 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1004 } 1005 if (dev_regs->nxge_vir_regh) { 1006 ddi_regs_map_free(&dev_regs->nxge_regh); 1007 } 1008 nxge_map_regs_fail2: 1009 if (dev_regs->nxge_regh) { 1010 ddi_regs_map_free(&dev_regs->nxge_regh); 1011 } 1012 nxge_map_regs_fail1: 1013 if (dev_regs->nxge_pciregh) { 1014 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1015 } 1016 nxge_map_regs_fail0: 1017 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1018 kmem_free(dev_regs, sizeof (dev_regs_t)); 1019 1020 nxge_map_regs_exit: 1021 if (ddi_status != DDI_SUCCESS) 1022 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1023 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1024 return (status); 1025 } 1026 1027 static void 1028 nxge_unmap_regs(p_nxge_t nxgep) 1029 { 1030 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1031 if (nxgep->dev_regs) { 1032 if (nxgep->dev_regs->nxge_pciregh) { 1033 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1034 "==> nxge_unmap_regs: bus")); 1035 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1036 nxgep->dev_regs->nxge_pciregh = NULL; 1037 } 1038 if (nxgep->dev_regs->nxge_regh) { 1039 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1040 "==> nxge_unmap_regs: device registers")); 1041 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1042 nxgep->dev_regs->nxge_regh = NULL; 1043 } 1044 if (nxgep->dev_regs->nxge_msix_regh) { 1045 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1046 "==> nxge_unmap_regs: device interrupts")); 1047 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1048 nxgep->dev_regs->nxge_msix_regh = NULL; 1049 } 1050 if (nxgep->dev_regs->nxge_vir_regh) { 1051 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1052 "==> nxge_unmap_regs: vio region")); 1053 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1054 nxgep->dev_regs->nxge_vir_regh = NULL; 1055 } 1056 if (nxgep->dev_regs->nxge_vir2_regh) { 1057 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1058 "==> nxge_unmap_regs: vio2 region")); 1059 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1060 nxgep->dev_regs->nxge_vir2_regh = NULL; 1061 } 1062 1063 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1064 nxgep->dev_regs = NULL; 1065 } 1066 1067 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1068 } 1069 1070 static nxge_status_t 1071 nxge_setup_mutexes(p_nxge_t nxgep) 1072 { 1073 int ddi_status = DDI_SUCCESS; 1074 nxge_status_t status = NXGE_OK; 1075 nxge_classify_t *classify_ptr; 1076 int partition; 1077 1078 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1079 1080 /* 1081 * Get the interrupt cookie so the mutexes can be 1082 * Initialized. 1083 */ 1084 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1085 &nxgep->interrupt_cookie); 1086 if (ddi_status != DDI_SUCCESS) { 1087 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1088 "<== nxge_setup_mutexes: failed 0x%x", ddi_status)); 1089 goto nxge_setup_mutexes_exit; 1090 } 1091 1092 /* Initialize global mutex */ 1093 1094 if (nxge_mdio_lock_init == 0) { 1095 MUTEX_INIT(&nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 1096 } 1097 atomic_add_32(&nxge_mdio_lock_init, 1); 1098 1099 if (nxge_mii_lock_init == 0) { 1100 MUTEX_INIT(&nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 1101 } 1102 atomic_add_32(&nxge_mii_lock_init, 1); 1103 1104 nxgep->drv_state |= STATE_MDIO_LOCK_INIT; 1105 nxgep->drv_state |= STATE_MII_LOCK_INIT; 1106 1107 /* 1108 * Initialize mutex's for this device. 1109 */ 1110 MUTEX_INIT(nxgep->genlock, NULL, 1111 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1112 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1113 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1114 MUTEX_INIT(&nxgep->mif_lock, NULL, 1115 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1116 RW_INIT(&nxgep->filter_lock, NULL, 1117 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1118 1119 classify_ptr = &nxgep->classifier; 1120 /* 1121 * FFLP Mutexes are never used in interrupt context 1122 * as fflp operation can take very long time to 1123 * complete and hence not suitable to invoke from interrupt 1124 * handlers. 1125 */ 1126 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1127 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1128 if (nxgep->niu_type == NEPTUNE) { 1129 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1130 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1131 for (partition = 0; partition < MAX_PARTITION; partition++) { 1132 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1133 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1134 } 1135 } 1136 1137 nxge_setup_mutexes_exit: 1138 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1139 "<== nxge_setup_mutexes status = %x", status)); 1140 1141 if (ddi_status != DDI_SUCCESS) 1142 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1143 1144 return (status); 1145 } 1146 1147 static void 1148 nxge_destroy_mutexes(p_nxge_t nxgep) 1149 { 1150 int partition; 1151 nxge_classify_t *classify_ptr; 1152 1153 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1154 RW_DESTROY(&nxgep->filter_lock); 1155 MUTEX_DESTROY(&nxgep->mif_lock); 1156 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1157 MUTEX_DESTROY(nxgep->genlock); 1158 1159 classify_ptr = &nxgep->classifier; 1160 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1161 1162 /* free data structures, based on HW type */ 1163 if (nxgep->niu_type == NEPTUNE) { 1164 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1165 for (partition = 0; partition < MAX_PARTITION; partition++) { 1166 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1167 } 1168 } 1169 if (nxgep->drv_state & STATE_MDIO_LOCK_INIT) { 1170 if (nxge_mdio_lock_init == 1) { 1171 MUTEX_DESTROY(&nxge_mdio_lock); 1172 } 1173 atomic_add_32(&nxge_mdio_lock_init, -1); 1174 } 1175 if (nxgep->drv_state & STATE_MII_LOCK_INIT) { 1176 if (nxge_mii_lock_init == 1) { 1177 MUTEX_DESTROY(&nxge_mii_lock); 1178 } 1179 atomic_add_32(&nxge_mii_lock_init, -1); 1180 } 1181 1182 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1183 } 1184 1185 nxge_status_t 1186 nxge_init(p_nxge_t nxgep) 1187 { 1188 nxge_status_t status = NXGE_OK; 1189 1190 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1191 1192 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1193 return (status); 1194 } 1195 1196 /* 1197 * Allocate system memory for the receive/transmit buffer blocks 1198 * and receive/transmit descriptor rings. 1199 */ 1200 status = nxge_alloc_mem_pool(nxgep); 1201 if (status != NXGE_OK) { 1202 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1203 goto nxge_init_fail1; 1204 } 1205 1206 /* 1207 * Initialize and enable TXC registers 1208 * (Globally enable TX controller, 1209 * enable a port, configure dma channel bitmap, 1210 * configure the max burst size). 1211 */ 1212 status = nxge_txc_init(nxgep); 1213 if (status != NXGE_OK) { 1214 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txc failed\n")); 1215 goto nxge_init_fail2; 1216 } 1217 1218 /* 1219 * Initialize and enable TXDMA channels. 1220 */ 1221 status = nxge_init_txdma_channels(nxgep); 1222 if (status != NXGE_OK) { 1223 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1224 goto nxge_init_fail3; 1225 } 1226 1227 /* 1228 * Initialize and enable RXDMA channels. 1229 */ 1230 status = nxge_init_rxdma_channels(nxgep); 1231 if (status != NXGE_OK) { 1232 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1233 goto nxge_init_fail4; 1234 } 1235 1236 /* 1237 * Initialize TCAM and FCRAM (Neptune). 1238 */ 1239 status = nxge_classify_init(nxgep); 1240 if (status != NXGE_OK) { 1241 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1242 goto nxge_init_fail5; 1243 } 1244 1245 /* 1246 * Initialize ZCP 1247 */ 1248 status = nxge_zcp_init(nxgep); 1249 if (status != NXGE_OK) { 1250 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1251 goto nxge_init_fail5; 1252 } 1253 1254 /* 1255 * Initialize IPP. 1256 */ 1257 status = nxge_ipp_init(nxgep); 1258 if (status != NXGE_OK) { 1259 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1260 goto nxge_init_fail5; 1261 } 1262 1263 /* 1264 * Initialize the MAC block. 1265 */ 1266 status = nxge_mac_init(nxgep); 1267 if (status != NXGE_OK) { 1268 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1269 goto nxge_init_fail5; 1270 } 1271 1272 nxge_intrs_enable(nxgep); 1273 1274 /* 1275 * Enable hardware interrupts. 1276 */ 1277 nxge_intr_hw_enable(nxgep); 1278 nxgep->drv_state |= STATE_HW_INITIALIZED; 1279 1280 goto nxge_init_exit; 1281 1282 nxge_init_fail5: 1283 nxge_uninit_rxdma_channels(nxgep); 1284 nxge_init_fail4: 1285 nxge_uninit_txdma_channels(nxgep); 1286 nxge_init_fail3: 1287 (void) nxge_txc_uninit(nxgep); 1288 nxge_init_fail2: 1289 nxge_free_mem_pool(nxgep); 1290 nxge_init_fail1: 1291 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1292 "<== nxge_init status (failed) = 0x%08x", status)); 1293 return (status); 1294 1295 nxge_init_exit: 1296 1297 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1298 status)); 1299 return (status); 1300 } 1301 1302 1303 timeout_id_t 1304 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1305 { 1306 if ((nxgep->suspended == 0) || 1307 (nxgep->suspended == DDI_RESUME)) { 1308 return (timeout(func, (caddr_t)nxgep, 1309 drv_usectohz(1000 * msec))); 1310 } 1311 return (NULL); 1312 } 1313 1314 /*ARGSUSED*/ 1315 void 1316 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1317 { 1318 if (timerid) { 1319 (void) untimeout(timerid); 1320 } 1321 } 1322 1323 void 1324 nxge_uninit(p_nxge_t nxgep) 1325 { 1326 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1327 1328 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1329 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1330 "==> nxge_uninit: not initialized")); 1331 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1332 "<== nxge_uninit")); 1333 return; 1334 } 1335 1336 /* stop timer */ 1337 if (nxgep->nxge_timerid) { 1338 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1339 nxgep->nxge_timerid = 0; 1340 } 1341 1342 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1343 (void) nxge_intr_hw_disable(nxgep); 1344 1345 /* 1346 * Reset the receive MAC side. 1347 */ 1348 (void) nxge_rx_mac_disable(nxgep); 1349 1350 /* Disable and soft reset the IPP */ 1351 (void) nxge_ipp_disable(nxgep); 1352 1353 /* Free classification resources */ 1354 (void) nxge_classify_uninit(nxgep); 1355 1356 /* 1357 * Reset the transmit/receive DMA side. 1358 */ 1359 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1360 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1361 1362 nxge_uninit_txdma_channels(nxgep); 1363 nxge_uninit_rxdma_channels(nxgep); 1364 1365 /* 1366 * Reset the transmit MAC side. 1367 */ 1368 (void) nxge_tx_mac_disable(nxgep); 1369 1370 nxge_free_mem_pool(nxgep); 1371 1372 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1373 1374 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1375 1376 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1377 "nxge_mblks_pending %d", nxge_mblks_pending)); 1378 } 1379 1380 void 1381 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1382 { 1383 uint64_t reg; 1384 uint64_t regdata; 1385 int i, retry; 1386 1387 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1388 regdata = 0; 1389 retry = 1; 1390 1391 for (i = 0; i < retry; i++) { 1392 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1393 } 1394 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1395 } 1396 1397 void 1398 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1399 { 1400 uint64_t reg; 1401 uint64_t buf[2]; 1402 1403 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1404 reg = buf[0]; 1405 1406 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1407 } 1408 1409 1410 nxge_os_mutex_t nxgedebuglock; 1411 int nxge_debug_init = 0; 1412 1413 /*ARGSUSED*/ 1414 /*VARARGS*/ 1415 void 1416 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1417 { 1418 char msg_buffer[1048]; 1419 char prefix_buffer[32]; 1420 int instance; 1421 uint64_t debug_level; 1422 int cmn_level = CE_CONT; 1423 va_list ap; 1424 1425 debug_level = (nxgep == NULL) ? nxge_debug_level : 1426 nxgep->nxge_debug_level; 1427 1428 if ((level & debug_level) || 1429 (level == NXGE_NOTE) || 1430 (level == NXGE_ERR_CTL)) { 1431 /* do the msg processing */ 1432 if (nxge_debug_init == 0) { 1433 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1434 nxge_debug_init = 1; 1435 } 1436 1437 MUTEX_ENTER(&nxgedebuglock); 1438 1439 if ((level & NXGE_NOTE)) { 1440 cmn_level = CE_NOTE; 1441 } 1442 1443 if (level & NXGE_ERR_CTL) { 1444 cmn_level = CE_WARN; 1445 } 1446 1447 va_start(ap, fmt); 1448 (void) vsprintf(msg_buffer, fmt, ap); 1449 va_end(ap); 1450 if (nxgep == NULL) { 1451 instance = -1; 1452 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1453 } else { 1454 instance = nxgep->instance; 1455 (void) sprintf(prefix_buffer, 1456 "%s%d :", "nxge", instance); 1457 } 1458 1459 MUTEX_EXIT(&nxgedebuglock); 1460 cmn_err(cmn_level, "!%s %s\n", 1461 prefix_buffer, msg_buffer); 1462 1463 } 1464 } 1465 1466 char * 1467 nxge_dump_packet(char *addr, int size) 1468 { 1469 uchar_t *ap = (uchar_t *)addr; 1470 int i; 1471 static char etherbuf[1024]; 1472 char *cp = etherbuf; 1473 char digits[] = "0123456789abcdef"; 1474 1475 if (!size) 1476 size = 60; 1477 1478 if (size > MAX_DUMP_SZ) { 1479 /* Dump the leading bytes */ 1480 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1481 if (*ap > 0x0f) 1482 *cp++ = digits[*ap >> 4]; 1483 *cp++ = digits[*ap++ & 0xf]; 1484 *cp++ = ':'; 1485 } 1486 for (i = 0; i < 20; i++) 1487 *cp++ = '.'; 1488 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1489 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1490 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1491 if (*ap > 0x0f) 1492 *cp++ = digits[*ap >> 4]; 1493 *cp++ = digits[*ap++ & 0xf]; 1494 *cp++ = ':'; 1495 } 1496 } else { 1497 for (i = 0; i < size; i++) { 1498 if (*ap > 0x0f) 1499 *cp++ = digits[*ap >> 4]; 1500 *cp++ = digits[*ap++ & 0xf]; 1501 *cp++ = ':'; 1502 } 1503 } 1504 *--cp = 0; 1505 return (etherbuf); 1506 } 1507 1508 #ifdef NXGE_DEBUG 1509 static void 1510 nxge_test_map_regs(p_nxge_t nxgep) 1511 { 1512 ddi_acc_handle_t cfg_handle; 1513 p_pci_cfg_t cfg_ptr; 1514 ddi_acc_handle_t dev_handle; 1515 char *dev_ptr; 1516 ddi_acc_handle_t pci_config_handle; 1517 uint32_t regval; 1518 int i; 1519 1520 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1521 1522 dev_handle = nxgep->dev_regs->nxge_regh; 1523 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1524 1525 if (nxgep->niu_type == NEPTUNE) { 1526 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1527 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1528 1529 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1530 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1531 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1532 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1533 &cfg_ptr->vendorid)); 1534 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1535 "\tvendorid 0x%x devid 0x%x", 1536 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1537 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1538 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1539 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1540 "bar1c 0x%x", 1541 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1542 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1543 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1544 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1545 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1546 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1547 "base 28 0x%x bar2c 0x%x\n", 1548 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1549 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1550 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1551 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1552 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1553 "\nNeptune PCI BAR: base30 0x%x\n", 1554 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1555 1556 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1557 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1558 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1559 "first 0x%llx second 0x%llx third 0x%llx " 1560 "last 0x%llx ", 1561 NXGE_PIO_READ64(dev_handle, 1562 (uint64_t *)(dev_ptr + 0), 0), 1563 NXGE_PIO_READ64(dev_handle, 1564 (uint64_t *)(dev_ptr + 8), 0), 1565 NXGE_PIO_READ64(dev_handle, 1566 (uint64_t *)(dev_ptr + 16), 0), 1567 NXGE_PIO_READ64(cfg_handle, 1568 (uint64_t *)(dev_ptr + 24), 0))); 1569 } 1570 } 1571 1572 #endif 1573 1574 static void 1575 nxge_suspend(p_nxge_t nxgep) 1576 { 1577 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1578 1579 nxge_intrs_disable(nxgep); 1580 nxge_destroy_dev(nxgep); 1581 1582 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1583 } 1584 1585 static nxge_status_t 1586 nxge_resume(p_nxge_t nxgep) 1587 { 1588 nxge_status_t status = NXGE_OK; 1589 1590 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1591 nxgep->suspended = DDI_RESUME; 1592 1593 nxge_global_reset(nxgep); 1594 nxgep->suspended = 0; 1595 1596 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1597 "<== nxge_resume status = 0x%x", status)); 1598 return (status); 1599 } 1600 1601 static nxge_status_t 1602 nxge_setup_dev(p_nxge_t nxgep) 1603 { 1604 nxge_status_t status = NXGE_OK; 1605 1606 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1607 nxgep->mac.portnum)); 1608 1609 status = nxge_xcvr_find(nxgep); 1610 if (status != NXGE_OK) { 1611 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1612 " nxge_setup_dev status " 1613 " (xcvr find 0x%08x)", status)); 1614 goto nxge_setup_dev_exit; 1615 } 1616 1617 status = nxge_link_init(nxgep); 1618 1619 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1620 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1621 "port%d Bad register acc handle", nxgep->mac.portnum)); 1622 status = NXGE_ERROR; 1623 } 1624 1625 if (status != NXGE_OK) { 1626 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1627 " nxge_setup_dev status " 1628 "(xcvr init 0x%08x)", status)); 1629 goto nxge_setup_dev_exit; 1630 } 1631 1632 nxge_setup_dev_exit: 1633 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1634 "<== nxge_setup_dev port %d status = 0x%08x", 1635 nxgep->mac.portnum, status)); 1636 1637 return (status); 1638 } 1639 1640 static void 1641 nxge_destroy_dev(p_nxge_t nxgep) 1642 { 1643 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 1644 1645 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1646 1647 (void) nxge_hw_stop(nxgep); 1648 1649 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 1650 } 1651 1652 static nxge_status_t 1653 nxge_setup_system_dma_pages(p_nxge_t nxgep) 1654 { 1655 int ddi_status = DDI_SUCCESS; 1656 uint_t count; 1657 ddi_dma_cookie_t cookie; 1658 uint_t iommu_pagesize; 1659 nxge_status_t status = NXGE_OK; 1660 1661 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 1662 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 1663 if (nxgep->niu_type != N2_NIU) { 1664 iommu_pagesize = dvma_pagesize(nxgep->dip); 1665 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1666 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1667 " default_block_size %d iommu_pagesize %d", 1668 nxgep->sys_page_sz, 1669 ddi_ptob(nxgep->dip, (ulong_t)1), 1670 nxgep->rx_default_block_size, 1671 iommu_pagesize)); 1672 1673 if (iommu_pagesize != 0) { 1674 if (nxgep->sys_page_sz == iommu_pagesize) { 1675 if (iommu_pagesize > 0x4000) 1676 nxgep->sys_page_sz = 0x4000; 1677 } else { 1678 if (nxgep->sys_page_sz > iommu_pagesize) 1679 nxgep->sys_page_sz = iommu_pagesize; 1680 } 1681 } 1682 } 1683 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1684 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1685 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1686 "default_block_size %d page mask %d", 1687 nxgep->sys_page_sz, 1688 ddi_ptob(nxgep->dip, (ulong_t)1), 1689 nxgep->rx_default_block_size, 1690 nxgep->sys_page_mask)); 1691 1692 1693 switch (nxgep->sys_page_sz) { 1694 default: 1695 nxgep->sys_page_sz = 0x1000; 1696 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1697 nxgep->rx_default_block_size = 0x1000; 1698 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1699 break; 1700 case 0x1000: 1701 nxgep->rx_default_block_size = 0x1000; 1702 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1703 break; 1704 case 0x2000: 1705 nxgep->rx_default_block_size = 0x2000; 1706 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1707 break; 1708 case 0x4000: 1709 nxgep->rx_default_block_size = 0x4000; 1710 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 1711 break; 1712 case 0x8000: 1713 nxgep->rx_default_block_size = 0x8000; 1714 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 1715 break; 1716 } 1717 1718 #ifndef USE_RX_BIG_BUF 1719 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 1720 #else 1721 nxgep->rx_default_block_size = 0x2000; 1722 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1723 #endif 1724 /* 1725 * Get the system DMA burst size. 1726 */ 1727 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 1728 DDI_DMA_DONTWAIT, 0, 1729 &nxgep->dmasparehandle); 1730 if (ddi_status != DDI_SUCCESS) { 1731 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1732 "ddi_dma_alloc_handle: failed " 1733 " status 0x%x", ddi_status)); 1734 goto nxge_get_soft_properties_exit; 1735 } 1736 1737 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 1738 (caddr_t)nxgep->dmasparehandle, 1739 sizeof (nxgep->dmasparehandle), 1740 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1741 DDI_DMA_DONTWAIT, 0, 1742 &cookie, &count); 1743 if (ddi_status != DDI_DMA_MAPPED) { 1744 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1745 "Binding spare handle to find system" 1746 " burstsize failed.")); 1747 ddi_status = DDI_FAILURE; 1748 goto nxge_get_soft_properties_fail1; 1749 } 1750 1751 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 1752 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 1753 1754 nxge_get_soft_properties_fail1: 1755 ddi_dma_free_handle(&nxgep->dmasparehandle); 1756 1757 nxge_get_soft_properties_exit: 1758 1759 if (ddi_status != DDI_SUCCESS) 1760 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1761 1762 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1763 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 1764 return (status); 1765 } 1766 1767 static nxge_status_t 1768 nxge_alloc_mem_pool(p_nxge_t nxgep) 1769 { 1770 nxge_status_t status = NXGE_OK; 1771 1772 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 1773 1774 status = nxge_alloc_rx_mem_pool(nxgep); 1775 if (status != NXGE_OK) { 1776 return (NXGE_ERROR); 1777 } 1778 1779 status = nxge_alloc_tx_mem_pool(nxgep); 1780 if (status != NXGE_OK) { 1781 nxge_free_rx_mem_pool(nxgep); 1782 return (NXGE_ERROR); 1783 } 1784 1785 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 1786 return (NXGE_OK); 1787 } 1788 1789 static void 1790 nxge_free_mem_pool(p_nxge_t nxgep) 1791 { 1792 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 1793 1794 nxge_free_rx_mem_pool(nxgep); 1795 nxge_free_tx_mem_pool(nxgep); 1796 1797 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 1798 } 1799 1800 static nxge_status_t 1801 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 1802 { 1803 int i, j; 1804 uint32_t ndmas, st_rdc; 1805 p_nxge_dma_pt_cfg_t p_all_cfgp; 1806 p_nxge_hw_pt_cfg_t p_cfgp; 1807 p_nxge_dma_pool_t dma_poolp; 1808 p_nxge_dma_common_t *dma_buf_p; 1809 p_nxge_dma_pool_t dma_cntl_poolp; 1810 p_nxge_dma_common_t *dma_cntl_p; 1811 size_t rx_buf_alloc_size; 1812 size_t rx_cntl_alloc_size; 1813 uint32_t *num_chunks; /* per dma */ 1814 nxge_status_t status = NXGE_OK; 1815 1816 uint32_t nxge_port_rbr_size; 1817 uint32_t nxge_port_rbr_spare_size; 1818 uint32_t nxge_port_rcr_size; 1819 1820 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 1821 1822 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 1823 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1824 st_rdc = p_cfgp->start_rdc; 1825 ndmas = p_cfgp->max_rdcs; 1826 1827 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1828 " nxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas)); 1829 1830 /* 1831 * Allocate memory for each receive DMA channel. 1832 */ 1833 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 1834 KM_SLEEP); 1835 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1836 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1837 1838 dma_cntl_poolp = (p_nxge_dma_pool_t) 1839 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 1840 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1841 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1842 1843 num_chunks = (uint32_t *)KMEM_ZALLOC( 1844 sizeof (uint32_t) * ndmas, KM_SLEEP); 1845 1846 /* 1847 * Assume that each DMA channel will be configured with default 1848 * block size. 1849 * rbr block counts are mod of batch count (16). 1850 */ 1851 nxge_port_rbr_size = p_all_cfgp->rbr_size; 1852 nxge_port_rcr_size = p_all_cfgp->rcr_size; 1853 1854 if (!nxge_port_rbr_size) { 1855 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 1856 } 1857 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 1858 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 1859 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 1860 } 1861 1862 p_all_cfgp->rbr_size = nxge_port_rbr_size; 1863 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 1864 1865 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 1866 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 1867 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 1868 } 1869 1870 /* 1871 * N2/NIU has limitation on the descriptor sizes (contiguous 1872 * memory allocation on data buffers to 4M (contig_mem_alloc) 1873 * and little endian for control buffers (must use the ddi/dki mem alloc 1874 * function). 1875 */ 1876 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1877 if (nxgep->niu_type == N2_NIU) { 1878 nxge_port_rbr_spare_size = 0; 1879 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 1880 (!ISP2(nxge_port_rbr_size))) { 1881 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 1882 } 1883 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 1884 (!ISP2(nxge_port_rcr_size))) { 1885 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 1886 } 1887 } 1888 #endif 1889 1890 rx_buf_alloc_size = (nxgep->rx_default_block_size * 1891 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 1892 1893 /* 1894 * Addresses of receive block ring, receive completion ring and the 1895 * mailbox must be all cache-aligned (64 bytes). 1896 */ 1897 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 1898 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 1899 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 1900 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 1901 1902 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 1903 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 1904 "nxge_port_rcr_size = %d " 1905 "rx_cntl_alloc_size = %d", 1906 nxge_port_rbr_size, nxge_port_rbr_spare_size, 1907 nxge_port_rcr_size, 1908 rx_cntl_alloc_size)); 1909 1910 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1911 if (nxgep->niu_type == N2_NIU) { 1912 if (!ISP2(rx_buf_alloc_size)) { 1913 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1914 "==> nxge_alloc_rx_mem_pool: " 1915 " must be power of 2")); 1916 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1917 goto nxge_alloc_rx_mem_pool_exit; 1918 } 1919 1920 if (rx_buf_alloc_size > (1 << 22)) { 1921 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1922 "==> nxge_alloc_rx_mem_pool: " 1923 " limit size to 4M")); 1924 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1925 goto nxge_alloc_rx_mem_pool_exit; 1926 } 1927 1928 if (rx_cntl_alloc_size < 0x2000) { 1929 rx_cntl_alloc_size = 0x2000; 1930 } 1931 } 1932 #endif 1933 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 1934 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 1935 1936 /* 1937 * Allocate memory for receive buffers and descriptor rings. 1938 * Replace allocation functions with interface functions provided 1939 * by the partition manager when it is available. 1940 */ 1941 /* 1942 * Allocate memory for the receive buffer blocks. 1943 */ 1944 for (i = 0; i < ndmas; i++) { 1945 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1946 " nxge_alloc_rx_mem_pool to alloc mem: " 1947 " dma %d dma_buf_p %llx &dma_buf_p %llx", 1948 i, dma_buf_p[i], &dma_buf_p[i])); 1949 num_chunks[i] = 0; 1950 status = nxge_alloc_rx_buf_dma(nxgep, st_rdc, &dma_buf_p[i], 1951 rx_buf_alloc_size, 1952 nxgep->rx_default_block_size, &num_chunks[i]); 1953 if (status != NXGE_OK) { 1954 break; 1955 } 1956 st_rdc++; 1957 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1958 " nxge_alloc_rx_mem_pool DONE alloc mem: " 1959 "dma %d dma_buf_p %llx &dma_buf_p %llx", i, 1960 dma_buf_p[i], &dma_buf_p[i])); 1961 } 1962 if (i < ndmas) { 1963 goto nxge_alloc_rx_mem_fail1; 1964 } 1965 /* 1966 * Allocate memory for descriptor rings and mailbox. 1967 */ 1968 st_rdc = p_cfgp->start_rdc; 1969 for (j = 0; j < ndmas; j++) { 1970 status = nxge_alloc_rx_cntl_dma(nxgep, st_rdc, &dma_cntl_p[j], 1971 rx_cntl_alloc_size); 1972 if (status != NXGE_OK) { 1973 break; 1974 } 1975 st_rdc++; 1976 } 1977 if (j < ndmas) { 1978 goto nxge_alloc_rx_mem_fail2; 1979 } 1980 1981 dma_poolp->ndmas = ndmas; 1982 dma_poolp->num_chunks = num_chunks; 1983 dma_poolp->buf_allocated = B_TRUE; 1984 nxgep->rx_buf_pool_p = dma_poolp; 1985 dma_poolp->dma_buf_pool_p = dma_buf_p; 1986 1987 dma_cntl_poolp->ndmas = ndmas; 1988 dma_cntl_poolp->buf_allocated = B_TRUE; 1989 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 1990 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 1991 1992 goto nxge_alloc_rx_mem_pool_exit; 1993 1994 nxge_alloc_rx_mem_fail2: 1995 /* Free control buffers */ 1996 j--; 1997 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1998 "==> nxge_alloc_rx_mem_pool: freeing control bufs (%d)", j)); 1999 for (; j >= 0; j--) { 2000 nxge_free_rx_cntl_dma(nxgep, 2001 (p_nxge_dma_common_t)dma_cntl_p[i]); 2002 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2003 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", 2004 j)); 2005 } 2006 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2007 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 2008 2009 nxge_alloc_rx_mem_fail1: 2010 /* Free data buffers */ 2011 i--; 2012 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2013 "==> nxge_alloc_rx_mem_pool: freeing data bufs (%d)", i)); 2014 for (; i >= 0; i--) { 2015 nxge_free_rx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2016 num_chunks[i]); 2017 } 2018 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2019 "==> nxge_alloc_rx_mem_pool: data bufs freed (%d)", i)); 2020 2021 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2022 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2023 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2024 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2025 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2026 2027 nxge_alloc_rx_mem_pool_exit: 2028 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2029 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2030 2031 return (status); 2032 } 2033 2034 static void 2035 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2036 { 2037 uint32_t i, ndmas; 2038 p_nxge_dma_pool_t dma_poolp; 2039 p_nxge_dma_common_t *dma_buf_p; 2040 p_nxge_dma_pool_t dma_cntl_poolp; 2041 p_nxge_dma_common_t *dma_cntl_p; 2042 uint32_t *num_chunks; 2043 2044 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2045 2046 dma_poolp = nxgep->rx_buf_pool_p; 2047 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2048 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2049 "<== nxge_free_rx_mem_pool " 2050 "(null rx buf pool or buf not allocated")); 2051 return; 2052 } 2053 2054 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2055 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2056 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2057 "<== nxge_free_rx_mem_pool " 2058 "(null rx cntl buf pool or cntl buf not allocated")); 2059 return; 2060 } 2061 2062 dma_buf_p = dma_poolp->dma_buf_pool_p; 2063 num_chunks = dma_poolp->num_chunks; 2064 2065 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2066 ndmas = dma_cntl_poolp->ndmas; 2067 2068 for (i = 0; i < ndmas; i++) { 2069 nxge_free_rx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2070 } 2071 2072 for (i = 0; i < ndmas; i++) { 2073 nxge_free_rx_cntl_dma(nxgep, dma_cntl_p[i]); 2074 } 2075 2076 for (i = 0; i < ndmas; i++) { 2077 KMEM_FREE(dma_buf_p[i], 2078 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2079 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2080 } 2081 2082 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2083 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2084 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2085 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2086 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2087 2088 nxgep->rx_buf_pool_p = NULL; 2089 nxgep->rx_cntl_pool_p = NULL; 2090 2091 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2092 } 2093 2094 2095 static nxge_status_t 2096 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2097 p_nxge_dma_common_t *dmap, 2098 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2099 { 2100 p_nxge_dma_common_t rx_dmap; 2101 nxge_status_t status = NXGE_OK; 2102 size_t total_alloc_size; 2103 size_t allocated = 0; 2104 int i, size_index, array_size; 2105 2106 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2107 2108 rx_dmap = (p_nxge_dma_common_t) 2109 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2110 KM_SLEEP); 2111 2112 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2113 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2114 dma_channel, alloc_size, block_size, dmap)); 2115 2116 total_alloc_size = alloc_size; 2117 2118 #if defined(RX_USE_RECLAIM_POST) 2119 total_alloc_size = alloc_size + alloc_size/4; 2120 #endif 2121 2122 i = 0; 2123 size_index = 0; 2124 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2125 while ((alloc_sizes[size_index] < alloc_size) && 2126 (size_index < array_size)) 2127 size_index++; 2128 if (size_index >= array_size) { 2129 size_index = array_size - 1; 2130 } 2131 2132 while ((allocated < total_alloc_size) && 2133 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2134 rx_dmap[i].dma_chunk_index = i; 2135 rx_dmap[i].block_size = block_size; 2136 rx_dmap[i].alength = alloc_sizes[size_index]; 2137 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2138 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2139 rx_dmap[i].dma_channel = dma_channel; 2140 rx_dmap[i].contig_alloc_type = B_FALSE; 2141 2142 /* 2143 * N2/NIU: data buffers must be contiguous as the driver 2144 * needs to call Hypervisor api to set up 2145 * logical pages. 2146 */ 2147 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2148 rx_dmap[i].contig_alloc_type = B_TRUE; 2149 } 2150 2151 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2152 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2153 "i %d nblocks %d alength %d", 2154 dma_channel, i, &rx_dmap[i], block_size, 2155 i, rx_dmap[i].nblocks, 2156 rx_dmap[i].alength)); 2157 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2158 &nxge_rx_dma_attr, 2159 rx_dmap[i].alength, 2160 &nxge_dev_buf_dma_acc_attr, 2161 DDI_DMA_READ | DDI_DMA_STREAMING, 2162 (p_nxge_dma_common_t)(&rx_dmap[i])); 2163 if (status != NXGE_OK) { 2164 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2165 " nxge_alloc_rx_buf_dma: Alloc Failed ")); 2166 size_index--; 2167 } else { 2168 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2169 " alloc_rx_buf_dma allocated rdc %d " 2170 "chunk %d size %x dvma %x bufp %llx ", 2171 dma_channel, i, rx_dmap[i].alength, 2172 rx_dmap[i].ioaddr_pp, &rx_dmap[i])); 2173 i++; 2174 allocated += alloc_sizes[size_index]; 2175 } 2176 } 2177 2178 2179 if (allocated < total_alloc_size) { 2180 goto nxge_alloc_rx_mem_fail1; 2181 } 2182 2183 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2184 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2185 dma_channel, i)); 2186 *num_chunks = i; 2187 *dmap = rx_dmap; 2188 2189 goto nxge_alloc_rx_mem_exit; 2190 2191 nxge_alloc_rx_mem_fail1: 2192 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2193 2194 nxge_alloc_rx_mem_exit: 2195 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2196 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2197 2198 return (status); 2199 } 2200 2201 /*ARGSUSED*/ 2202 static void 2203 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2204 uint32_t num_chunks) 2205 { 2206 int i; 2207 2208 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2209 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2210 2211 for (i = 0; i < num_chunks; i++) { 2212 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2213 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2214 i, dmap)); 2215 nxge_dma_mem_free(dmap++); 2216 } 2217 2218 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2219 } 2220 2221 /*ARGSUSED*/ 2222 static nxge_status_t 2223 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2224 p_nxge_dma_common_t *dmap, size_t size) 2225 { 2226 p_nxge_dma_common_t rx_dmap; 2227 nxge_status_t status = NXGE_OK; 2228 2229 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2230 2231 rx_dmap = (p_nxge_dma_common_t) 2232 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2233 2234 rx_dmap->contig_alloc_type = B_FALSE; 2235 2236 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2237 &nxge_desc_dma_attr, 2238 size, 2239 &nxge_dev_desc_dma_acc_attr, 2240 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2241 rx_dmap); 2242 if (status != NXGE_OK) { 2243 goto nxge_alloc_rx_cntl_dma_fail1; 2244 } 2245 2246 *dmap = rx_dmap; 2247 goto nxge_alloc_rx_cntl_dma_exit; 2248 2249 nxge_alloc_rx_cntl_dma_fail1: 2250 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2251 2252 nxge_alloc_rx_cntl_dma_exit: 2253 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2254 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2255 2256 return (status); 2257 } 2258 2259 /*ARGSUSED*/ 2260 static void 2261 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2262 { 2263 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2264 2265 nxge_dma_mem_free(dmap); 2266 2267 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2268 } 2269 2270 static nxge_status_t 2271 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2272 { 2273 nxge_status_t status = NXGE_OK; 2274 int i, j; 2275 uint32_t ndmas, st_tdc; 2276 p_nxge_dma_pt_cfg_t p_all_cfgp; 2277 p_nxge_hw_pt_cfg_t p_cfgp; 2278 p_nxge_dma_pool_t dma_poolp; 2279 p_nxge_dma_common_t *dma_buf_p; 2280 p_nxge_dma_pool_t dma_cntl_poolp; 2281 p_nxge_dma_common_t *dma_cntl_p; 2282 size_t tx_buf_alloc_size; 2283 size_t tx_cntl_alloc_size; 2284 uint32_t *num_chunks; /* per dma */ 2285 2286 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2287 2288 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2289 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2290 st_tdc = p_cfgp->start_tdc; 2291 ndmas = p_cfgp->max_tdcs; 2292 2293 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool: " 2294 "p_cfgp 0x%016llx start_tdc %d ndmas %d nxgep->max_tdcs %d", 2295 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, nxgep->max_tdcs)); 2296 /* 2297 * Allocate memory for each transmit DMA channel. 2298 */ 2299 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2300 KM_SLEEP); 2301 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2302 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2303 2304 dma_cntl_poolp = (p_nxge_dma_pool_t) 2305 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2306 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2307 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2308 2309 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2310 /* 2311 * N2/NIU has limitation on the descriptor sizes (contiguous 2312 * memory allocation on data buffers to 4M (contig_mem_alloc) 2313 * and little endian for control buffers (must use the ddi/dki mem alloc 2314 * function). The transmit ring is limited to 8K (includes the 2315 * mailbox). 2316 */ 2317 if (nxgep->niu_type == N2_NIU) { 2318 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2319 (!ISP2(nxge_tx_ring_size))) { 2320 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2321 } 2322 } 2323 #endif 2324 2325 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2326 2327 /* 2328 * Assume that each DMA channel will be configured with default 2329 * transmit bufer size for copying transmit data. 2330 * (For packet payload over this limit, packets will not be 2331 * copied.) 2332 */ 2333 tx_buf_alloc_size = (nxge_bcopy_thresh * nxge_tx_ring_size); 2334 2335 /* 2336 * Addresses of transmit descriptor ring and the 2337 * mailbox must be all cache-aligned (64 bytes). 2338 */ 2339 tx_cntl_alloc_size = nxge_tx_ring_size; 2340 tx_cntl_alloc_size *= (sizeof (tx_desc_t)); 2341 tx_cntl_alloc_size += sizeof (txdma_mailbox_t); 2342 2343 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2344 if (nxgep->niu_type == N2_NIU) { 2345 if (!ISP2(tx_buf_alloc_size)) { 2346 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2347 "==> nxge_alloc_tx_mem_pool: " 2348 " must be power of 2")); 2349 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2350 goto nxge_alloc_tx_mem_pool_exit; 2351 } 2352 2353 if (tx_buf_alloc_size > (1 << 22)) { 2354 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2355 "==> nxge_alloc_tx_mem_pool: " 2356 " limit size to 4M")); 2357 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2358 goto nxge_alloc_tx_mem_pool_exit; 2359 } 2360 2361 if (tx_cntl_alloc_size < 0x2000) { 2362 tx_cntl_alloc_size = 0x2000; 2363 } 2364 } 2365 #endif 2366 2367 num_chunks = (uint32_t *)KMEM_ZALLOC( 2368 sizeof (uint32_t) * ndmas, KM_SLEEP); 2369 2370 /* 2371 * Allocate memory for transmit buffers and descriptor rings. 2372 * Replace allocation functions with interface functions provided 2373 * by the partition manager when it is available. 2374 * 2375 * Allocate memory for the transmit buffer pool. 2376 */ 2377 for (i = 0; i < ndmas; i++) { 2378 num_chunks[i] = 0; 2379 status = nxge_alloc_tx_buf_dma(nxgep, st_tdc, &dma_buf_p[i], 2380 tx_buf_alloc_size, 2381 nxge_bcopy_thresh, &num_chunks[i]); 2382 if (status != NXGE_OK) { 2383 break; 2384 } 2385 st_tdc++; 2386 } 2387 if (i < ndmas) { 2388 goto nxge_alloc_tx_mem_pool_fail1; 2389 } 2390 2391 st_tdc = p_cfgp->start_tdc; 2392 /* 2393 * Allocate memory for descriptor rings and mailbox. 2394 */ 2395 for (j = 0; j < ndmas; j++) { 2396 status = nxge_alloc_tx_cntl_dma(nxgep, st_tdc, &dma_cntl_p[j], 2397 tx_cntl_alloc_size); 2398 if (status != NXGE_OK) { 2399 break; 2400 } 2401 st_tdc++; 2402 } 2403 if (j < ndmas) { 2404 goto nxge_alloc_tx_mem_pool_fail2; 2405 } 2406 2407 dma_poolp->ndmas = ndmas; 2408 dma_poolp->num_chunks = num_chunks; 2409 dma_poolp->buf_allocated = B_TRUE; 2410 dma_poolp->dma_buf_pool_p = dma_buf_p; 2411 nxgep->tx_buf_pool_p = dma_poolp; 2412 2413 dma_cntl_poolp->ndmas = ndmas; 2414 dma_cntl_poolp->buf_allocated = B_TRUE; 2415 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2416 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2417 2418 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2419 "==> nxge_alloc_tx_mem_pool: start_tdc %d " 2420 "ndmas %d poolp->ndmas %d", 2421 st_tdc, ndmas, dma_poolp->ndmas)); 2422 2423 goto nxge_alloc_tx_mem_pool_exit; 2424 2425 nxge_alloc_tx_mem_pool_fail2: 2426 /* Free control buffers */ 2427 j--; 2428 for (; j >= 0; j--) { 2429 nxge_free_tx_cntl_dma(nxgep, 2430 (p_nxge_dma_common_t)dma_cntl_p[i]); 2431 } 2432 2433 nxge_alloc_tx_mem_pool_fail1: 2434 /* Free data buffers */ 2435 i--; 2436 for (; i >= 0; i--) { 2437 nxge_free_tx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2438 num_chunks[i]); 2439 } 2440 2441 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2442 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2443 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2444 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2445 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2446 2447 nxge_alloc_tx_mem_pool_exit: 2448 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2449 "<== nxge_alloc_tx_mem_pool:status 0x%08x", status)); 2450 2451 return (status); 2452 } 2453 2454 static nxge_status_t 2455 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2456 p_nxge_dma_common_t *dmap, size_t alloc_size, 2457 size_t block_size, uint32_t *num_chunks) 2458 { 2459 p_nxge_dma_common_t tx_dmap; 2460 nxge_status_t status = NXGE_OK; 2461 size_t total_alloc_size; 2462 size_t allocated = 0; 2463 int i, size_index, array_size; 2464 2465 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 2466 2467 tx_dmap = (p_nxge_dma_common_t) 2468 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2469 KM_SLEEP); 2470 2471 total_alloc_size = alloc_size; 2472 i = 0; 2473 size_index = 0; 2474 array_size = sizeof (alloc_sizes) / sizeof (size_t); 2475 while ((alloc_sizes[size_index] < alloc_size) && 2476 (size_index < array_size)) 2477 size_index++; 2478 if (size_index >= array_size) { 2479 size_index = array_size - 1; 2480 } 2481 2482 while ((allocated < total_alloc_size) && 2483 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2484 2485 tx_dmap[i].dma_chunk_index = i; 2486 tx_dmap[i].block_size = block_size; 2487 tx_dmap[i].alength = alloc_sizes[size_index]; 2488 tx_dmap[i].orig_alength = tx_dmap[i].alength; 2489 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2490 tx_dmap[i].dma_channel = dma_channel; 2491 tx_dmap[i].contig_alloc_type = B_FALSE; 2492 2493 /* 2494 * N2/NIU: data buffers must be contiguous as the driver 2495 * needs to call Hypervisor api to set up 2496 * logical pages. 2497 */ 2498 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2499 tx_dmap[i].contig_alloc_type = B_TRUE; 2500 } 2501 2502 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2503 &nxge_tx_dma_attr, 2504 tx_dmap[i].alength, 2505 &nxge_dev_buf_dma_acc_attr, 2506 DDI_DMA_WRITE | DDI_DMA_STREAMING, 2507 (p_nxge_dma_common_t)(&tx_dmap[i])); 2508 if (status != NXGE_OK) { 2509 size_index--; 2510 } else { 2511 i++; 2512 allocated += alloc_sizes[size_index]; 2513 } 2514 } 2515 2516 if (allocated < total_alloc_size) { 2517 goto nxge_alloc_tx_mem_fail1; 2518 } 2519 2520 *num_chunks = i; 2521 *dmap = tx_dmap; 2522 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2523 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 2524 *dmap, i)); 2525 goto nxge_alloc_tx_mem_exit; 2526 2527 nxge_alloc_tx_mem_fail1: 2528 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2529 2530 nxge_alloc_tx_mem_exit: 2531 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2532 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 2533 2534 return (status); 2535 } 2536 2537 /*ARGSUSED*/ 2538 static void 2539 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2540 uint32_t num_chunks) 2541 { 2542 int i; 2543 2544 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 2545 2546 for (i = 0; i < num_chunks; i++) { 2547 nxge_dma_mem_free(dmap++); 2548 } 2549 2550 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 2551 } 2552 2553 /*ARGSUSED*/ 2554 static nxge_status_t 2555 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2556 p_nxge_dma_common_t *dmap, size_t size) 2557 { 2558 p_nxge_dma_common_t tx_dmap; 2559 nxge_status_t status = NXGE_OK; 2560 2561 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 2562 tx_dmap = (p_nxge_dma_common_t) 2563 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2564 2565 tx_dmap->contig_alloc_type = B_FALSE; 2566 2567 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2568 &nxge_desc_dma_attr, 2569 size, 2570 &nxge_dev_desc_dma_acc_attr, 2571 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2572 tx_dmap); 2573 if (status != NXGE_OK) { 2574 goto nxge_alloc_tx_cntl_dma_fail1; 2575 } 2576 2577 *dmap = tx_dmap; 2578 goto nxge_alloc_tx_cntl_dma_exit; 2579 2580 nxge_alloc_tx_cntl_dma_fail1: 2581 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 2582 2583 nxge_alloc_tx_cntl_dma_exit: 2584 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2585 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 2586 2587 return (status); 2588 } 2589 2590 /*ARGSUSED*/ 2591 static void 2592 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2593 { 2594 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 2595 2596 nxge_dma_mem_free(dmap); 2597 2598 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 2599 } 2600 2601 static void 2602 nxge_free_tx_mem_pool(p_nxge_t nxgep) 2603 { 2604 uint32_t i, ndmas; 2605 p_nxge_dma_pool_t dma_poolp; 2606 p_nxge_dma_common_t *dma_buf_p; 2607 p_nxge_dma_pool_t dma_cntl_poolp; 2608 p_nxge_dma_common_t *dma_cntl_p; 2609 uint32_t *num_chunks; 2610 2611 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_free_tx_mem_pool")); 2612 2613 dma_poolp = nxgep->tx_buf_pool_p; 2614 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2615 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2616 "<== nxge_free_tx_mem_pool " 2617 "(null rx buf pool or buf not allocated")); 2618 return; 2619 } 2620 2621 dma_cntl_poolp = nxgep->tx_cntl_pool_p; 2622 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2624 "<== nxge_free_tx_mem_pool " 2625 "(null tx cntl buf pool or cntl buf not allocated")); 2626 return; 2627 } 2628 2629 dma_buf_p = dma_poolp->dma_buf_pool_p; 2630 num_chunks = dma_poolp->num_chunks; 2631 2632 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2633 ndmas = dma_cntl_poolp->ndmas; 2634 2635 for (i = 0; i < ndmas; i++) { 2636 nxge_free_tx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2637 } 2638 2639 for (i = 0; i < ndmas; i++) { 2640 nxge_free_tx_cntl_dma(nxgep, dma_cntl_p[i]); 2641 } 2642 2643 for (i = 0; i < ndmas; i++) { 2644 KMEM_FREE(dma_buf_p[i], 2645 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2646 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2647 } 2648 2649 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2650 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2651 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2652 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2653 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2654 2655 nxgep->tx_buf_pool_p = NULL; 2656 nxgep->tx_cntl_pool_p = NULL; 2657 2658 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_free_tx_mem_pool")); 2659 } 2660 2661 /*ARGSUSED*/ 2662 static nxge_status_t 2663 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 2664 struct ddi_dma_attr *dma_attrp, 2665 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 2666 p_nxge_dma_common_t dma_p) 2667 { 2668 caddr_t kaddrp; 2669 int ddi_status = DDI_SUCCESS; 2670 boolean_t contig_alloc_type; 2671 2672 contig_alloc_type = dma_p->contig_alloc_type; 2673 2674 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 2675 /* 2676 * contig_alloc_type for contiguous memory only allowed 2677 * for N2/NIU. 2678 */ 2679 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2680 "nxge_dma_mem_alloc: alloc type not allows (%d)", 2681 dma_p->contig_alloc_type)); 2682 return (NXGE_ERROR | NXGE_DDI_FAILED); 2683 } 2684 2685 dma_p->dma_handle = NULL; 2686 dma_p->acc_handle = NULL; 2687 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 2688 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 2689 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 2690 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 2691 if (ddi_status != DDI_SUCCESS) { 2692 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2693 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 2694 return (NXGE_ERROR | NXGE_DDI_FAILED); 2695 } 2696 2697 switch (contig_alloc_type) { 2698 case B_FALSE: 2699 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, 2700 acc_attr_p, 2701 xfer_flags, 2702 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 2703 &dma_p->acc_handle); 2704 if (ddi_status != DDI_SUCCESS) { 2705 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2706 "nxge_dma_mem_alloc:ddi_dma_mem_alloc failed")); 2707 ddi_dma_free_handle(&dma_p->dma_handle); 2708 dma_p->dma_handle = NULL; 2709 return (NXGE_ERROR | NXGE_DDI_FAILED); 2710 } 2711 if (dma_p->alength < length) { 2712 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2713 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 2714 "< length.")); 2715 ddi_dma_mem_free(&dma_p->acc_handle); 2716 ddi_dma_free_handle(&dma_p->dma_handle); 2717 dma_p->acc_handle = NULL; 2718 dma_p->dma_handle = NULL; 2719 return (NXGE_ERROR); 2720 } 2721 2722 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2723 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2724 &dma_p->dma_cookie, &dma_p->ncookies); 2725 if (ddi_status != DDI_DMA_MAPPED) { 2726 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2727 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2728 "(staus 0x%x ncookies %d.)", ddi_status, 2729 dma_p->ncookies)); 2730 if (dma_p->acc_handle) { 2731 ddi_dma_mem_free(&dma_p->acc_handle); 2732 dma_p->acc_handle = NULL; 2733 } 2734 ddi_dma_free_handle(&dma_p->dma_handle); 2735 dma_p->dma_handle = NULL; 2736 return (NXGE_ERROR | NXGE_DDI_FAILED); 2737 } 2738 2739 if (dma_p->ncookies != 1) { 2740 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2741 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 2742 "> 1 cookie" 2743 "(staus 0x%x ncookies %d.)", ddi_status, 2744 dma_p->ncookies)); 2745 if (dma_p->acc_handle) { 2746 ddi_dma_mem_free(&dma_p->acc_handle); 2747 dma_p->acc_handle = NULL; 2748 } 2749 ddi_dma_free_handle(&dma_p->dma_handle); 2750 dma_p->dma_handle = NULL; 2751 return (NXGE_ERROR); 2752 } 2753 break; 2754 2755 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2756 case B_TRUE: 2757 kaddrp = (caddr_t)contig_mem_alloc(length); 2758 if (kaddrp == NULL) { 2759 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2760 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 2761 ddi_dma_free_handle(&dma_p->dma_handle); 2762 return (NXGE_ERROR | NXGE_DDI_FAILED); 2763 } 2764 2765 dma_p->alength = length; 2766 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2767 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2768 &dma_p->dma_cookie, &dma_p->ncookies); 2769 if (ddi_status != DDI_DMA_MAPPED) { 2770 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2771 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2772 "(status 0x%x ncookies %d.)", ddi_status, 2773 dma_p->ncookies)); 2774 2775 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2776 "==> nxge_dma_mem_alloc: (not mapped)" 2777 "length %lu (0x%x) " 2778 "free contig kaddrp $%p " 2779 "va_to_pa $%p", 2780 length, length, 2781 kaddrp, 2782 va_to_pa(kaddrp))); 2783 2784 2785 contig_mem_free((void *)kaddrp, length); 2786 ddi_dma_free_handle(&dma_p->dma_handle); 2787 2788 dma_p->dma_handle = NULL; 2789 dma_p->acc_handle = NULL; 2790 dma_p->alength = NULL; 2791 dma_p->kaddrp = NULL; 2792 2793 return (NXGE_ERROR | NXGE_DDI_FAILED); 2794 } 2795 2796 if (dma_p->ncookies != 1 || 2797 (dma_p->dma_cookie.dmac_laddress == NULL)) { 2798 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2799 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 2800 "cookie or " 2801 "dmac_laddress is NULL $%p size %d " 2802 " (status 0x%x ncookies %d.)", 2803 ddi_status, 2804 dma_p->dma_cookie.dmac_laddress, 2805 dma_p->dma_cookie.dmac_size, 2806 dma_p->ncookies)); 2807 2808 contig_mem_free((void *)kaddrp, length); 2809 ddi_dma_free_handle(&dma_p->dma_handle); 2810 2811 dma_p->alength = 0; 2812 dma_p->dma_handle = NULL; 2813 dma_p->acc_handle = NULL; 2814 dma_p->kaddrp = NULL; 2815 2816 return (NXGE_ERROR | NXGE_DDI_FAILED); 2817 } 2818 break; 2819 2820 #else 2821 case B_TRUE: 2822 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2823 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 2824 return (NXGE_ERROR | NXGE_DDI_FAILED); 2825 #endif 2826 } 2827 2828 dma_p->kaddrp = kaddrp; 2829 dma_p->last_kaddrp = (unsigned char *)kaddrp + 2830 dma_p->alength - RXBUF_64B_ALIGNED; 2831 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2832 dma_p->last_ioaddr_pp = 2833 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 2834 dma_p->alength - RXBUF_64B_ALIGNED; 2835 2836 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 2837 2838 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2839 dma_p->orig_ioaddr_pp = 2840 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2841 dma_p->orig_alength = length; 2842 dma_p->orig_kaddrp = kaddrp; 2843 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 2844 #endif 2845 2846 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 2847 "dma buffer allocated: dma_p $%p " 2848 "return dmac_ladress from cookie $%p cookie dmac_size %d " 2849 "dma_p->ioaddr_p $%p " 2850 "dma_p->orig_ioaddr_p $%p " 2851 "orig_vatopa $%p " 2852 "alength %d (0x%x) " 2853 "kaddrp $%p " 2854 "length %d (0x%x)", 2855 dma_p, 2856 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 2857 dma_p->ioaddr_pp, 2858 dma_p->orig_ioaddr_pp, 2859 dma_p->orig_vatopa, 2860 dma_p->alength, dma_p->alength, 2861 kaddrp, 2862 length, length)); 2863 2864 return (NXGE_OK); 2865 } 2866 2867 static void 2868 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 2869 { 2870 if (dma_p->dma_handle != NULL) { 2871 if (dma_p->ncookies) { 2872 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2873 dma_p->ncookies = 0; 2874 } 2875 ddi_dma_free_handle(&dma_p->dma_handle); 2876 dma_p->dma_handle = NULL; 2877 } 2878 2879 if (dma_p->acc_handle != NULL) { 2880 ddi_dma_mem_free(&dma_p->acc_handle); 2881 dma_p->acc_handle = NULL; 2882 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 2883 } 2884 2885 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2886 if (dma_p->contig_alloc_type && 2887 dma_p->orig_kaddrp && dma_p->orig_alength) { 2888 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 2889 "kaddrp $%p (orig_kaddrp $%p)" 2890 "mem type %d ", 2891 "orig_alength %d " 2892 "alength 0x%x (%d)", 2893 dma_p->kaddrp, 2894 dma_p->orig_kaddrp, 2895 dma_p->contig_alloc_type, 2896 dma_p->orig_alength, 2897 dma_p->alength, dma_p->alength)); 2898 2899 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 2900 dma_p->orig_alength = NULL; 2901 dma_p->orig_kaddrp = NULL; 2902 dma_p->contig_alloc_type = B_FALSE; 2903 } 2904 #endif 2905 dma_p->kaddrp = NULL; 2906 dma_p->alength = NULL; 2907 } 2908 2909 /* 2910 * nxge_m_start() -- start transmitting and receiving. 2911 * 2912 * This function is called by the MAC layer when the first 2913 * stream is open to prepare the hardware ready for sending 2914 * and transmitting packets. 2915 */ 2916 static int 2917 nxge_m_start(void *arg) 2918 { 2919 p_nxge_t nxgep = (p_nxge_t)arg; 2920 2921 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 2922 2923 MUTEX_ENTER(nxgep->genlock); 2924 if (nxge_init(nxgep) != NXGE_OK) { 2925 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2926 "<== nxge_m_start: initialization failed")); 2927 MUTEX_EXIT(nxgep->genlock); 2928 return (EIO); 2929 } 2930 2931 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 2932 goto nxge_m_start_exit; 2933 /* 2934 * Start timer to check the system error and tx hangs 2935 */ 2936 nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state, 2937 NXGE_CHECK_TIMER); 2938 2939 nxgep->link_notify = B_TRUE; 2940 2941 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 2942 2943 nxge_m_start_exit: 2944 MUTEX_EXIT(nxgep->genlock); 2945 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 2946 return (0); 2947 } 2948 2949 /* 2950 * nxge_m_stop(): stop transmitting and receiving. 2951 */ 2952 static void 2953 nxge_m_stop(void *arg) 2954 { 2955 p_nxge_t nxgep = (p_nxge_t)arg; 2956 2957 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 2958 2959 if (nxgep->nxge_timerid) { 2960 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 2961 nxgep->nxge_timerid = 0; 2962 } 2963 2964 MUTEX_ENTER(nxgep->genlock); 2965 nxge_uninit(nxgep); 2966 2967 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 2968 2969 MUTEX_EXIT(nxgep->genlock); 2970 2971 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 2972 } 2973 2974 static int 2975 nxge_m_unicst(void *arg, const uint8_t *macaddr) 2976 { 2977 p_nxge_t nxgep = (p_nxge_t)arg; 2978 struct ether_addr addrp; 2979 2980 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 2981 2982 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 2983 if (nxge_set_mac_addr(nxgep, &addrp)) { 2984 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2985 "<== nxge_m_unicst: set unitcast failed")); 2986 return (EINVAL); 2987 } 2988 2989 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 2990 2991 return (0); 2992 } 2993 2994 static int 2995 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 2996 { 2997 p_nxge_t nxgep = (p_nxge_t)arg; 2998 struct ether_addr addrp; 2999 3000 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3001 "==> nxge_m_multicst: add %d", add)); 3002 3003 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3004 if (add) { 3005 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3006 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3007 "<== nxge_m_multicst: add multicast failed")); 3008 return (EINVAL); 3009 } 3010 } else { 3011 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3012 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3013 "<== nxge_m_multicst: del multicast failed")); 3014 return (EINVAL); 3015 } 3016 } 3017 3018 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3019 3020 return (0); 3021 } 3022 3023 static int 3024 nxge_m_promisc(void *arg, boolean_t on) 3025 { 3026 p_nxge_t nxgep = (p_nxge_t)arg; 3027 3028 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3029 "==> nxge_m_promisc: on %d", on)); 3030 3031 if (nxge_set_promisc(nxgep, on)) { 3032 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3033 "<== nxge_m_promisc: set promisc failed")); 3034 return (EINVAL); 3035 } 3036 3037 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3038 "<== nxge_m_promisc: on %d", on)); 3039 3040 return (0); 3041 } 3042 3043 static void 3044 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3045 { 3046 p_nxge_t nxgep = (p_nxge_t)arg; 3047 struct iocblk *iocp = (struct iocblk *)mp->b_rptr; 3048 boolean_t need_privilege; 3049 int err; 3050 int cmd; 3051 3052 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3053 3054 iocp = (struct iocblk *)mp->b_rptr; 3055 iocp->ioc_error = 0; 3056 need_privilege = B_TRUE; 3057 cmd = iocp->ioc_cmd; 3058 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3059 switch (cmd) { 3060 default: 3061 miocnak(wq, mp, 0, EINVAL); 3062 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3063 return; 3064 3065 case LB_GET_INFO_SIZE: 3066 case LB_GET_INFO: 3067 case LB_GET_MODE: 3068 need_privilege = B_FALSE; 3069 break; 3070 case LB_SET_MODE: 3071 break; 3072 3073 case ND_GET: 3074 need_privilege = B_FALSE; 3075 break; 3076 case ND_SET: 3077 break; 3078 3079 case NXGE_GET_MII: 3080 case NXGE_PUT_MII: 3081 case NXGE_GET64: 3082 case NXGE_PUT64: 3083 case NXGE_GET_TX_RING_SZ: 3084 case NXGE_GET_TX_DESC: 3085 case NXGE_TX_SIDE_RESET: 3086 case NXGE_RX_SIDE_RESET: 3087 case NXGE_GLOBAL_RESET: 3088 case NXGE_RESET_MAC: 3089 case NXGE_TX_REGS_DUMP: 3090 case NXGE_RX_REGS_DUMP: 3091 case NXGE_INT_REGS_DUMP: 3092 case NXGE_VIR_INT_REGS_DUMP: 3093 case NXGE_PUT_TCAM: 3094 case NXGE_GET_TCAM: 3095 case NXGE_RTRACE: 3096 case NXGE_RDUMP: 3097 3098 need_privilege = B_FALSE; 3099 break; 3100 case NXGE_INJECT_ERR: 3101 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3102 nxge_err_inject(nxgep, wq, mp); 3103 break; 3104 } 3105 3106 if (need_privilege) { 3107 if (secpolicy_net_config != NULL) 3108 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3109 else 3110 err = drv_priv(iocp->ioc_cr); 3111 if (err != 0) { 3112 miocnak(wq, mp, 0, err); 3113 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3114 "<== nxge_m_ioctl: no priv")); 3115 return; 3116 } 3117 } 3118 3119 switch (cmd) { 3120 case ND_GET: 3121 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command")); 3122 case ND_SET: 3123 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command")); 3124 nxge_param_ioctl(nxgep, wq, mp, iocp); 3125 break; 3126 3127 case LB_GET_MODE: 3128 case LB_SET_MODE: 3129 case LB_GET_INFO_SIZE: 3130 case LB_GET_INFO: 3131 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3132 break; 3133 3134 case NXGE_GET_MII: 3135 case NXGE_PUT_MII: 3136 case NXGE_PUT_TCAM: 3137 case NXGE_GET_TCAM: 3138 case NXGE_GET64: 3139 case NXGE_PUT64: 3140 case NXGE_GET_TX_RING_SZ: 3141 case NXGE_GET_TX_DESC: 3142 case NXGE_TX_SIDE_RESET: 3143 case NXGE_RX_SIDE_RESET: 3144 case NXGE_GLOBAL_RESET: 3145 case NXGE_RESET_MAC: 3146 case NXGE_TX_REGS_DUMP: 3147 case NXGE_RX_REGS_DUMP: 3148 case NXGE_INT_REGS_DUMP: 3149 case NXGE_VIR_INT_REGS_DUMP: 3150 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3151 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3152 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3153 break; 3154 } 3155 3156 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3157 } 3158 3159 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3160 3161 static void 3162 nxge_m_resources(void *arg) 3163 { 3164 p_nxge_t nxgep = arg; 3165 mac_rx_fifo_t mrf; 3166 p_rx_rcr_rings_t rcr_rings; 3167 p_rx_rcr_ring_t *rcr_p; 3168 uint32_t i, ndmas; 3169 nxge_status_t status; 3170 3171 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3172 3173 MUTEX_ENTER(nxgep->genlock); 3174 3175 /* 3176 * CR 6492541 Check to see if the drv_state has been initialized, 3177 * if not * call nxge_init(). 3178 */ 3179 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3180 status = nxge_init(nxgep); 3181 if (status != NXGE_OK) 3182 goto nxge_m_resources_exit; 3183 } 3184 3185 mrf.mrf_type = MAC_RX_FIFO; 3186 mrf.mrf_blank = nxge_rx_hw_blank; 3187 mrf.mrf_arg = (void *)nxgep; 3188 3189 mrf.mrf_normal_blank_time = 128; 3190 mrf.mrf_normal_pkt_count = 8; 3191 rcr_rings = nxgep->rx_rcr_rings; 3192 rcr_p = rcr_rings->rcr_rings; 3193 ndmas = rcr_rings->ndmas; 3194 3195 /* 3196 * Export our receive resources to the MAC layer. 3197 */ 3198 for (i = 0; i < ndmas; i++) { 3199 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle = 3200 mac_resource_add(nxgep->mach, 3201 (mac_resource_t *)&mrf); 3202 3203 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3204 "==> nxge_m_resources: vdma %d dma %d " 3205 "rcrptr 0x%016llx mac_handle 0x%016llx", 3206 i, ((p_rx_rcr_ring_t)rcr_p[i])->rdc, 3207 rcr_p[i], 3208 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle)); 3209 } 3210 3211 nxge_m_resources_exit: 3212 MUTEX_EXIT(nxgep->genlock); 3213 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 3214 } 3215 3216 static void 3217 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 3218 { 3219 p_nxge_mmac_stats_t mmac_stats; 3220 int i; 3221 nxge_mmac_t *mmac_info; 3222 3223 mmac_info = &nxgep->nxge_mmac_info; 3224 3225 mmac_stats = &nxgep->statsp->mmac_stats; 3226 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3227 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3228 3229 for (i = 0; i < ETHERADDRL; i++) { 3230 if (factory) { 3231 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3232 = mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i]; 3233 } else { 3234 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3235 = mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i]; 3236 } 3237 } 3238 } 3239 3240 /* 3241 * nxge_altmac_set() -- Set an alternate MAC address 3242 */ 3243 static int 3244 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 3245 { 3246 uint8_t addrn; 3247 uint8_t portn; 3248 npi_mac_addr_t altmac; 3249 3250 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3251 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3252 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3253 3254 portn = nxgep->mac.portnum; 3255 addrn = (uint8_t)slot - 1; 3256 3257 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 3258 addrn, &altmac) != NPI_SUCCESS) 3259 return (EIO); 3260 /* 3261 * Enable comparison with the alternate MAC address. 3262 * While the first alternate addr is enabled by bit 1 of register 3263 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 3264 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 3265 * accordingly before calling npi_mac_altaddr_entry. 3266 */ 3267 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3268 addrn = (uint8_t)slot - 1; 3269 else 3270 addrn = (uint8_t)slot; 3271 3272 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 3273 != NPI_SUCCESS) 3274 return (EIO); 3275 3276 return (0); 3277 } 3278 3279 /* 3280 * nxeg_m_mmac_add() - find an unused address slot, set the address 3281 * value to the one specified, enable the port to start filtering on 3282 * the new MAC address. Returns 0 on success. 3283 */ 3284 static int 3285 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 3286 { 3287 p_nxge_t nxgep = arg; 3288 mac_addr_slot_t slot; 3289 nxge_mmac_t *mmac_info; 3290 int err; 3291 nxge_status_t status; 3292 3293 mutex_enter(nxgep->genlock); 3294 3295 /* 3296 * Make sure that nxge is initialized, if _start() has 3297 * not been called. 3298 */ 3299 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3300 status = nxge_init(nxgep); 3301 if (status != NXGE_OK) { 3302 mutex_exit(nxgep->genlock); 3303 return (ENXIO); 3304 } 3305 } 3306 3307 mmac_info = &nxgep->nxge_mmac_info; 3308 if (mmac_info->naddrfree == 0) { 3309 mutex_exit(nxgep->genlock); 3310 return (ENOSPC); 3311 } 3312 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3313 maddr->mma_addrlen)) { 3314 mutex_exit(nxgep->genlock); 3315 return (EINVAL); 3316 } 3317 /* 3318 * Search for the first available slot. Because naddrfree 3319 * is not zero, we are guaranteed to find one. 3320 * Slot 0 is for unique (primary) MAC. The first alternate 3321 * MAC slot is slot 1. 3322 * Each of the first two ports of Neptune has 16 alternate 3323 * MAC slots but only the first 7 (of 15) slots have assigned factory 3324 * MAC addresses. We first search among the slots without bundled 3325 * factory MACs. If we fail to find one in that range, then we 3326 * search the slots with bundled factory MACs. A factory MAC 3327 * will be wasted while the slot is used with a user MAC address. 3328 * But the slot could be used by factory MAC again after calling 3329 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 3330 */ 3331 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 3332 for (slot = mmac_info->num_factory_mmac + 1; 3333 slot <= mmac_info->num_mmac; slot++) { 3334 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3335 break; 3336 } 3337 if (slot > mmac_info->num_mmac) { 3338 for (slot = 1; slot <= mmac_info->num_factory_mmac; 3339 slot++) { 3340 if (!(mmac_info->mac_pool[slot].flags 3341 & MMAC_SLOT_USED)) 3342 break; 3343 } 3344 } 3345 } else { 3346 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 3347 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3348 break; 3349 } 3350 } 3351 ASSERT(slot <= mmac_info->num_mmac); 3352 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 3353 mutex_exit(nxgep->genlock); 3354 return (err); 3355 } 3356 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 3357 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 3358 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3359 mmac_info->naddrfree--; 3360 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3361 3362 maddr->mma_slot = slot; 3363 3364 mutex_exit(nxgep->genlock); 3365 return (0); 3366 } 3367 3368 /* 3369 * This function reserves an unused slot and programs the slot and the HW 3370 * with a factory mac address. 3371 */ 3372 static int 3373 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 3374 { 3375 p_nxge_t nxgep = arg; 3376 mac_addr_slot_t slot; 3377 nxge_mmac_t *mmac_info; 3378 int err; 3379 nxge_status_t status; 3380 3381 mutex_enter(nxgep->genlock); 3382 3383 /* 3384 * Make sure that nxge is initialized, if _start() has 3385 * not been called. 3386 */ 3387 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3388 status = nxge_init(nxgep); 3389 if (status != NXGE_OK) { 3390 mutex_exit(nxgep->genlock); 3391 return (ENXIO); 3392 } 3393 } 3394 3395 mmac_info = &nxgep->nxge_mmac_info; 3396 if (mmac_info->naddrfree == 0) { 3397 mutex_exit(nxgep->genlock); 3398 return (ENOSPC); 3399 } 3400 3401 slot = maddr->mma_slot; 3402 if (slot == -1) { /* -1: Take the first available slot */ 3403 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 3404 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3405 break; 3406 } 3407 if (slot > mmac_info->num_factory_mmac) { 3408 mutex_exit(nxgep->genlock); 3409 return (ENOSPC); 3410 } 3411 } 3412 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 3413 /* 3414 * Do not support factory MAC at a slot greater than 3415 * num_factory_mmac even when there are available factory 3416 * MAC addresses because the alternate MACs are bundled with 3417 * slot[1] through slot[num_factory_mmac] 3418 */ 3419 mutex_exit(nxgep->genlock); 3420 return (EINVAL); 3421 } 3422 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3423 mutex_exit(nxgep->genlock); 3424 return (EBUSY); 3425 } 3426 /* Verify the address to be reserved */ 3427 if (!mac_unicst_verify(nxgep->mach, 3428 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 3429 mutex_exit(nxgep->genlock); 3430 return (EINVAL); 3431 } 3432 if (err = nxge_altmac_set(nxgep, 3433 mmac_info->factory_mac_pool[slot], slot)) { 3434 mutex_exit(nxgep->genlock); 3435 return (err); 3436 } 3437 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 3438 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3439 mmac_info->naddrfree--; 3440 3441 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 3442 mutex_exit(nxgep->genlock); 3443 3444 /* Pass info back to the caller */ 3445 maddr->mma_slot = slot; 3446 maddr->mma_addrlen = ETHERADDRL; 3447 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3448 3449 return (0); 3450 } 3451 3452 /* 3453 * Remove the specified mac address and update the HW not to filter 3454 * the mac address anymore. 3455 */ 3456 static int 3457 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 3458 { 3459 p_nxge_t nxgep = arg; 3460 nxge_mmac_t *mmac_info; 3461 uint8_t addrn; 3462 uint8_t portn; 3463 int err = 0; 3464 nxge_status_t status; 3465 3466 mutex_enter(nxgep->genlock); 3467 3468 /* 3469 * Make sure that nxge is initialized, if _start() has 3470 * not been called. 3471 */ 3472 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3473 status = nxge_init(nxgep); 3474 if (status != NXGE_OK) { 3475 mutex_exit(nxgep->genlock); 3476 return (ENXIO); 3477 } 3478 } 3479 3480 mmac_info = &nxgep->nxge_mmac_info; 3481 if (slot < 1 || slot > mmac_info->num_mmac) { 3482 mutex_exit(nxgep->genlock); 3483 return (EINVAL); 3484 } 3485 3486 portn = nxgep->mac.portnum; 3487 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3488 addrn = (uint8_t)slot - 1; 3489 else 3490 addrn = (uint8_t)slot; 3491 3492 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3493 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 3494 == NPI_SUCCESS) { 3495 mmac_info->naddrfree++; 3496 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 3497 /* 3498 * Regardless if the MAC we just stopped filtering 3499 * is a user addr or a facory addr, we must set 3500 * the MMAC_VENDOR_ADDR flag if this slot has an 3501 * associated factory MAC to indicate that a factory 3502 * MAC is available. 3503 */ 3504 if (slot <= mmac_info->num_factory_mmac) { 3505 mmac_info->mac_pool[slot].flags 3506 |= MMAC_VENDOR_ADDR; 3507 } 3508 /* 3509 * Clear mac_pool[slot].addr so that kstat shows 0 3510 * alternate MAC address if the slot is not used. 3511 * (But nxge_m_mmac_get returns the factory MAC even 3512 * when the slot is not used!) 3513 */ 3514 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 3515 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3516 } else { 3517 err = EIO; 3518 } 3519 } else { 3520 err = EINVAL; 3521 } 3522 3523 mutex_exit(nxgep->genlock); 3524 return (err); 3525 } 3526 3527 3528 /* 3529 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 3530 */ 3531 static int 3532 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 3533 { 3534 p_nxge_t nxgep = arg; 3535 mac_addr_slot_t slot; 3536 nxge_mmac_t *mmac_info; 3537 int err = 0; 3538 nxge_status_t status; 3539 3540 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3541 maddr->mma_addrlen)) 3542 return (EINVAL); 3543 3544 slot = maddr->mma_slot; 3545 3546 mutex_enter(nxgep->genlock); 3547 3548 /* 3549 * Make sure that nxge is initialized, if _start() has 3550 * not been called. 3551 */ 3552 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3553 status = nxge_init(nxgep); 3554 if (status != NXGE_OK) { 3555 mutex_exit(nxgep->genlock); 3556 return (ENXIO); 3557 } 3558 } 3559 3560 mmac_info = &nxgep->nxge_mmac_info; 3561 if (slot < 1 || slot > mmac_info->num_mmac) { 3562 mutex_exit(nxgep->genlock); 3563 return (EINVAL); 3564 } 3565 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3566 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 3567 != 0) { 3568 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 3569 ETHERADDRL); 3570 /* 3571 * Assume that the MAC passed down from the caller 3572 * is not a factory MAC address (The user should 3573 * call mmac_remove followed by mmac_reserve if 3574 * he wants to use the factory MAC for this slot). 3575 */ 3576 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3577 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3578 } 3579 } else { 3580 err = EINVAL; 3581 } 3582 mutex_exit(nxgep->genlock); 3583 return (err); 3584 } 3585 3586 /* 3587 * nxge_m_mmac_get() - Get the MAC address and other information 3588 * related to the slot. mma_flags should be set to 0 in the call. 3589 * Note: although kstat shows MAC address as zero when a slot is 3590 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 3591 * to the caller as long as the slot is not using a user MAC address. 3592 * The following table shows the rules, 3593 * 3594 * USED VENDOR mma_addr 3595 * ------------------------------------------------------------ 3596 * (1) Slot uses a user MAC: yes no user MAC 3597 * (2) Slot uses a factory MAC: yes yes factory MAC 3598 * (3) Slot is not used but is 3599 * factory MAC capable: no yes factory MAC 3600 * (4) Slot is not used and is 3601 * not factory MAC capable: no no 0 3602 * ------------------------------------------------------------ 3603 */ 3604 static int 3605 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 3606 { 3607 nxge_t *nxgep = arg; 3608 mac_addr_slot_t slot; 3609 nxge_mmac_t *mmac_info; 3610 nxge_status_t status; 3611 3612 slot = maddr->mma_slot; 3613 3614 mutex_enter(nxgep->genlock); 3615 3616 /* 3617 * Make sure that nxge is initialized, if _start() has 3618 * not been called. 3619 */ 3620 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3621 status = nxge_init(nxgep); 3622 if (status != NXGE_OK) { 3623 mutex_exit(nxgep->genlock); 3624 return (ENXIO); 3625 } 3626 } 3627 3628 mmac_info = &nxgep->nxge_mmac_info; 3629 3630 if (slot < 1 || slot > mmac_info->num_mmac) { 3631 mutex_exit(nxgep->genlock); 3632 return (EINVAL); 3633 } 3634 maddr->mma_flags = 0; 3635 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 3636 maddr->mma_flags |= MMAC_SLOT_USED; 3637 3638 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 3639 maddr->mma_flags |= MMAC_VENDOR_ADDR; 3640 bcopy(mmac_info->factory_mac_pool[slot], 3641 maddr->mma_addr, ETHERADDRL); 3642 maddr->mma_addrlen = ETHERADDRL; 3643 } else { 3644 if (maddr->mma_flags & MMAC_SLOT_USED) { 3645 bcopy(mmac_info->mac_pool[slot].addr, 3646 maddr->mma_addr, ETHERADDRL); 3647 maddr->mma_addrlen = ETHERADDRL; 3648 } else { 3649 bzero(maddr->mma_addr, ETHERADDRL); 3650 maddr->mma_addrlen = 0; 3651 } 3652 } 3653 mutex_exit(nxgep->genlock); 3654 return (0); 3655 } 3656 3657 3658 static boolean_t 3659 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 3660 { 3661 nxge_t *nxgep = arg; 3662 uint32_t *txflags = cap_data; 3663 multiaddress_capab_t *mmacp = cap_data; 3664 3665 switch (cap) { 3666 case MAC_CAPAB_HCKSUM: 3667 *txflags = HCKSUM_INET_PARTIAL; 3668 break; 3669 case MAC_CAPAB_POLL: 3670 /* 3671 * There's nothing for us to fill in, simply returning 3672 * B_TRUE stating that we support polling is sufficient. 3673 */ 3674 break; 3675 3676 case MAC_CAPAB_MULTIADDRESS: 3677 mutex_enter(nxgep->genlock); 3678 3679 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 3680 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 3681 mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */ 3682 /* 3683 * maddr_handle is driver's private data, passed back to 3684 * entry point functions as arg. 3685 */ 3686 mmacp->maddr_handle = nxgep; 3687 mmacp->maddr_add = nxge_m_mmac_add; 3688 mmacp->maddr_remove = nxge_m_mmac_remove; 3689 mmacp->maddr_modify = nxge_m_mmac_modify; 3690 mmacp->maddr_get = nxge_m_mmac_get; 3691 mmacp->maddr_reserve = nxge_m_mmac_reserve; 3692 3693 mutex_exit(nxgep->genlock); 3694 break; 3695 default: 3696 return (B_FALSE); 3697 } 3698 return (B_TRUE); 3699 } 3700 3701 /* 3702 * Module loading and removing entry points. 3703 */ 3704 3705 static struct cb_ops nxge_cb_ops = { 3706 nodev, /* cb_open */ 3707 nodev, /* cb_close */ 3708 nodev, /* cb_strategy */ 3709 nodev, /* cb_print */ 3710 nodev, /* cb_dump */ 3711 nodev, /* cb_read */ 3712 nodev, /* cb_write */ 3713 nodev, /* cb_ioctl */ 3714 nodev, /* cb_devmap */ 3715 nodev, /* cb_mmap */ 3716 nodev, /* cb_segmap */ 3717 nochpoll, /* cb_chpoll */ 3718 ddi_prop_op, /* cb_prop_op */ 3719 NULL, 3720 D_MP, /* cb_flag */ 3721 CB_REV, /* rev */ 3722 nodev, /* int (*cb_aread)() */ 3723 nodev /* int (*cb_awrite)() */ 3724 }; 3725 3726 static struct dev_ops nxge_dev_ops = { 3727 DEVO_REV, /* devo_rev */ 3728 0, /* devo_refcnt */ 3729 nulldev, 3730 nulldev, /* devo_identify */ 3731 nulldev, /* devo_probe */ 3732 nxge_attach, /* devo_attach */ 3733 nxge_detach, /* devo_detach */ 3734 nodev, /* devo_reset */ 3735 &nxge_cb_ops, /* devo_cb_ops */ 3736 (struct bus_ops *)NULL, /* devo_bus_ops */ 3737 ddi_power /* devo_power */ 3738 }; 3739 3740 extern struct mod_ops mod_driverops; 3741 3742 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet %I%" 3743 3744 /* 3745 * Module linkage information for the kernel. 3746 */ 3747 static struct modldrv nxge_modldrv = { 3748 &mod_driverops, 3749 NXGE_DESC_VER, 3750 &nxge_dev_ops 3751 }; 3752 3753 static struct modlinkage modlinkage = { 3754 MODREV_1, (void *) &nxge_modldrv, NULL 3755 }; 3756 3757 int 3758 _init(void) 3759 { 3760 int status; 3761 3762 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 3763 mac_init_ops(&nxge_dev_ops, "nxge"); 3764 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 3765 if (status != 0) { 3766 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3767 "failed to init device soft state")); 3768 goto _init_exit; 3769 } 3770 3771 status = mod_install(&modlinkage); 3772 if (status != 0) { 3773 ddi_soft_state_fini(&nxge_list); 3774 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 3775 goto _init_exit; 3776 } 3777 3778 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 3779 3780 _init_exit: 3781 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 3782 3783 return (status); 3784 } 3785 3786 int 3787 _fini(void) 3788 { 3789 int status; 3790 3791 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 3792 3793 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 3794 3795 if (nxge_mblks_pending) 3796 return (EBUSY); 3797 3798 status = mod_remove(&modlinkage); 3799 if (status != DDI_SUCCESS) { 3800 NXGE_DEBUG_MSG((NULL, MOD_CTL, 3801 "Module removal failed 0x%08x", 3802 status)); 3803 goto _fini_exit; 3804 } 3805 3806 mac_fini_ops(&nxge_dev_ops); 3807 3808 ddi_soft_state_fini(&nxge_list); 3809 3810 MUTEX_DESTROY(&nxge_common_lock); 3811 _fini_exit: 3812 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 3813 3814 return (status); 3815 } 3816 3817 int 3818 _info(struct modinfo *modinfop) 3819 { 3820 int status; 3821 3822 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 3823 status = mod_info(&modlinkage, modinfop); 3824 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 3825 3826 return (status); 3827 } 3828 3829 /*ARGSUSED*/ 3830 static nxge_status_t 3831 nxge_add_intrs(p_nxge_t nxgep) 3832 { 3833 3834 int intr_types; 3835 int type = 0; 3836 int ddi_status = DDI_SUCCESS; 3837 nxge_status_t status = NXGE_OK; 3838 3839 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 3840 3841 nxgep->nxge_intr_type.intr_registered = B_FALSE; 3842 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 3843 nxgep->nxge_intr_type.msi_intx_cnt = 0; 3844 nxgep->nxge_intr_type.intr_added = 0; 3845 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 3846 nxgep->nxge_intr_type.intr_type = 0; 3847 3848 if (nxgep->niu_type == N2_NIU) { 3849 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3850 } else if (nxge_msi_enable) { 3851 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3852 } 3853 3854 /* Get the supported interrupt types */ 3855 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 3856 != DDI_SUCCESS) { 3857 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 3858 "ddi_intr_get_supported_types failed: status 0x%08x", 3859 ddi_status)); 3860 return (NXGE_ERROR | NXGE_DDI_FAILED); 3861 } 3862 nxgep->nxge_intr_type.intr_types = intr_types; 3863 3864 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3865 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3866 3867 /* 3868 * Solaris MSIX is not supported yet. use MSI for now. 3869 * nxge_msi_enable (1): 3870 * 1 - MSI 2 - MSI-X others - FIXED 3871 */ 3872 switch (nxge_msi_enable) { 3873 default: 3874 type = DDI_INTR_TYPE_FIXED; 3875 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3876 "use fixed (intx emulation) type %08x", 3877 type)); 3878 break; 3879 3880 case 2: 3881 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3882 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3883 if (intr_types & DDI_INTR_TYPE_MSIX) { 3884 type = DDI_INTR_TYPE_MSIX; 3885 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3886 "ddi_intr_get_supported_types: MSIX 0x%08x", 3887 type)); 3888 } else if (intr_types & DDI_INTR_TYPE_MSI) { 3889 type = DDI_INTR_TYPE_MSI; 3890 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3891 "ddi_intr_get_supported_types: MSI 0x%08x", 3892 type)); 3893 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3894 type = DDI_INTR_TYPE_FIXED; 3895 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3896 "ddi_intr_get_supported_types: MSXED0x%08x", 3897 type)); 3898 } 3899 break; 3900 3901 case 1: 3902 if (intr_types & DDI_INTR_TYPE_MSI) { 3903 type = DDI_INTR_TYPE_MSI; 3904 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3905 "ddi_intr_get_supported_types: MSI 0x%08x", 3906 type)); 3907 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 3908 type = DDI_INTR_TYPE_MSIX; 3909 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3910 "ddi_intr_get_supported_types: MSIX 0x%08x", 3911 type)); 3912 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3913 type = DDI_INTR_TYPE_FIXED; 3914 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3915 "ddi_intr_get_supported_types: MSXED0x%08x", 3916 type)); 3917 } 3918 } 3919 3920 nxgep->nxge_intr_type.intr_type = type; 3921 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 3922 type == DDI_INTR_TYPE_FIXED) && 3923 nxgep->nxge_intr_type.niu_msi_enable) { 3924 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 3925 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3926 " nxge_add_intrs: " 3927 " nxge_add_intrs_adv failed: status 0x%08x", 3928 status)); 3929 return (status); 3930 } else { 3931 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3932 "interrupts registered : type %d", type)); 3933 nxgep->nxge_intr_type.intr_registered = B_TRUE; 3934 3935 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 3936 "\nAdded advanced nxge add_intr_adv " 3937 "intr type 0x%x\n", type)); 3938 3939 return (status); 3940 } 3941 } 3942 3943 if (!nxgep->nxge_intr_type.intr_registered) { 3944 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 3945 "failed to register interrupts")); 3946 return (NXGE_ERROR | NXGE_DDI_FAILED); 3947 } 3948 3949 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 3950 return (status); 3951 } 3952 3953 /*ARGSUSED*/ 3954 static nxge_status_t 3955 nxge_add_soft_intrs(p_nxge_t nxgep) 3956 { 3957 3958 int ddi_status = DDI_SUCCESS; 3959 nxge_status_t status = NXGE_OK; 3960 3961 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 3962 3963 nxgep->resched_id = NULL; 3964 nxgep->resched_running = B_FALSE; 3965 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 3966 &nxgep->resched_id, 3967 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 3968 if (ddi_status != DDI_SUCCESS) { 3969 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 3970 "ddi_add_softintrs failed: status 0x%08x", 3971 ddi_status)); 3972 return (NXGE_ERROR | NXGE_DDI_FAILED); 3973 } 3974 3975 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 3976 3977 return (status); 3978 } 3979 3980 static nxge_status_t 3981 nxge_add_intrs_adv(p_nxge_t nxgep) 3982 { 3983 int intr_type; 3984 p_nxge_intr_t intrp; 3985 3986 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 3987 3988 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 3989 intr_type = intrp->intr_type; 3990 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 3991 intr_type)); 3992 3993 switch (intr_type) { 3994 case DDI_INTR_TYPE_MSI: /* 0x2 */ 3995 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 3996 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 3997 3998 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 3999 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 4000 4001 default: 4002 return (NXGE_ERROR); 4003 } 4004 } 4005 4006 4007 /*ARGSUSED*/ 4008 static nxge_status_t 4009 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 4010 { 4011 dev_info_t *dip = nxgep->dip; 4012 p_nxge_ldg_t ldgp; 4013 p_nxge_intr_t intrp; 4014 uint_t *inthandler; 4015 void *arg1, *arg2; 4016 int behavior; 4017 int nintrs, navail; 4018 int nactual, nrequired; 4019 int inum = 0; 4020 int x, y; 4021 int ddi_status = DDI_SUCCESS; 4022 nxge_status_t status = NXGE_OK; 4023 4024 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 4025 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4026 intrp->start_inum = 0; 4027 4028 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4029 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4030 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4031 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4032 "nintrs: %d", ddi_status, nintrs)); 4033 return (NXGE_ERROR | NXGE_DDI_FAILED); 4034 } 4035 4036 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4037 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4038 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4039 "ddi_intr_get_navail() failed, status: 0x%x%, " 4040 "nintrs: %d", ddi_status, navail)); 4041 return (NXGE_ERROR | NXGE_DDI_FAILED); 4042 } 4043 4044 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4045 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 4046 nintrs, navail)); 4047 4048 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 4049 /* MSI must be power of 2 */ 4050 if ((navail & 16) == 16) { 4051 navail = 16; 4052 } else if ((navail & 8) == 8) { 4053 navail = 8; 4054 } else if ((navail & 4) == 4) { 4055 navail = 4; 4056 } else if ((navail & 2) == 2) { 4057 navail = 2; 4058 } else { 4059 navail = 1; 4060 } 4061 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4062 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 4063 "navail %d", nintrs, navail)); 4064 } 4065 4066 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4067 DDI_INTR_ALLOC_NORMAL); 4068 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4069 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4070 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4071 navail, &nactual, behavior); 4072 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4073 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4074 " ddi_intr_alloc() failed: %d", 4075 ddi_status)); 4076 kmem_free(intrp->htable, intrp->intr_size); 4077 return (NXGE_ERROR | NXGE_DDI_FAILED); 4078 } 4079 4080 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4081 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4082 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4083 " ddi_intr_get_pri() failed: %d", 4084 ddi_status)); 4085 /* Free already allocated interrupts */ 4086 for (y = 0; y < nactual; y++) { 4087 (void) ddi_intr_free(intrp->htable[y]); 4088 } 4089 4090 kmem_free(intrp->htable, intrp->intr_size); 4091 return (NXGE_ERROR | NXGE_DDI_FAILED); 4092 } 4093 4094 nrequired = 0; 4095 switch (nxgep->niu_type) { 4096 case NEPTUNE: 4097 case NEPTUNE_2: 4098 default: 4099 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4100 break; 4101 4102 case N2_NIU: 4103 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4104 break; 4105 } 4106 4107 if (status != NXGE_OK) { 4108 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4109 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 4110 "failed: 0x%x", status)); 4111 /* Free already allocated interrupts */ 4112 for (y = 0; y < nactual; y++) { 4113 (void) ddi_intr_free(intrp->htable[y]); 4114 } 4115 4116 kmem_free(intrp->htable, intrp->intr_size); 4117 return (status); 4118 } 4119 4120 ldgp = nxgep->ldgvp->ldgp; 4121 for (x = 0; x < nrequired; x++, ldgp++) { 4122 ldgp->vector = (uint8_t)x; 4123 ldgp->intdata = SID_DATA(ldgp->func, x); 4124 arg1 = ldgp->ldvp; 4125 arg2 = nxgep; 4126 if (ldgp->nldvs == 1) { 4127 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4128 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4129 "nxge_add_intrs_adv_type: " 4130 "arg1 0x%x arg2 0x%x: " 4131 "1-1 int handler (entry %d intdata 0x%x)\n", 4132 arg1, arg2, 4133 x, ldgp->intdata)); 4134 } else if (ldgp->nldvs > 1) { 4135 inthandler = (uint_t *)ldgp->sys_intr_handler; 4136 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4137 "nxge_add_intrs_adv_type: " 4138 "arg1 0x%x arg2 0x%x: " 4139 "nldevs %d int handler " 4140 "(entry %d intdata 0x%x)\n", 4141 arg1, arg2, 4142 ldgp->nldvs, x, ldgp->intdata)); 4143 } 4144 4145 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4146 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 4147 "htable 0x%llx", x, intrp->htable[x])); 4148 4149 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4150 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4151 != DDI_SUCCESS) { 4152 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4153 "==> nxge_add_intrs_adv_type: failed #%d " 4154 "status 0x%x", x, ddi_status)); 4155 for (y = 0; y < intrp->intr_added; y++) { 4156 (void) ddi_intr_remove_handler( 4157 intrp->htable[y]); 4158 } 4159 /* Free already allocated intr */ 4160 for (y = 0; y < nactual; y++) { 4161 (void) ddi_intr_free(intrp->htable[y]); 4162 } 4163 kmem_free(intrp->htable, intrp->intr_size); 4164 4165 (void) nxge_ldgv_uninit(nxgep); 4166 4167 return (NXGE_ERROR | NXGE_DDI_FAILED); 4168 } 4169 intrp->intr_added++; 4170 } 4171 4172 intrp->msi_intx_cnt = nactual; 4173 4174 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4175 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 4176 navail, nactual, 4177 intrp->msi_intx_cnt, 4178 intrp->intr_added)); 4179 4180 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4181 4182 (void) nxge_intr_ldgv_init(nxgep); 4183 4184 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 4185 4186 return (status); 4187 } 4188 4189 /*ARGSUSED*/ 4190 static nxge_status_t 4191 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 4192 { 4193 dev_info_t *dip = nxgep->dip; 4194 p_nxge_ldg_t ldgp; 4195 p_nxge_intr_t intrp; 4196 uint_t *inthandler; 4197 void *arg1, *arg2; 4198 int behavior; 4199 int nintrs, navail; 4200 int nactual, nrequired; 4201 int inum = 0; 4202 int x, y; 4203 int ddi_status = DDI_SUCCESS; 4204 nxge_status_t status = NXGE_OK; 4205 4206 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 4207 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4208 intrp->start_inum = 0; 4209 4210 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4211 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4212 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4213 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4214 "nintrs: %d", status, nintrs)); 4215 return (NXGE_ERROR | NXGE_DDI_FAILED); 4216 } 4217 4218 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4219 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4220 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4221 "ddi_intr_get_navail() failed, status: 0x%x%, " 4222 "nintrs: %d", ddi_status, navail)); 4223 return (NXGE_ERROR | NXGE_DDI_FAILED); 4224 } 4225 4226 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4227 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 4228 nintrs, navail)); 4229 4230 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4231 DDI_INTR_ALLOC_NORMAL); 4232 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4233 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4234 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4235 navail, &nactual, behavior); 4236 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4237 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4238 " ddi_intr_alloc() failed: %d", 4239 ddi_status)); 4240 kmem_free(intrp->htable, intrp->intr_size); 4241 return (NXGE_ERROR | NXGE_DDI_FAILED); 4242 } 4243 4244 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4245 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4246 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4247 " ddi_intr_get_pri() failed: %d", 4248 ddi_status)); 4249 /* Free already allocated interrupts */ 4250 for (y = 0; y < nactual; y++) { 4251 (void) ddi_intr_free(intrp->htable[y]); 4252 } 4253 4254 kmem_free(intrp->htable, intrp->intr_size); 4255 return (NXGE_ERROR | NXGE_DDI_FAILED); 4256 } 4257 4258 nrequired = 0; 4259 switch (nxgep->niu_type) { 4260 case NEPTUNE: 4261 case NEPTUNE_2: 4262 default: 4263 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4264 break; 4265 4266 case N2_NIU: 4267 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4268 break; 4269 } 4270 4271 if (status != NXGE_OK) { 4272 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4273 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 4274 "failed: 0x%x", status)); 4275 /* Free already allocated interrupts */ 4276 for (y = 0; y < nactual; y++) { 4277 (void) ddi_intr_free(intrp->htable[y]); 4278 } 4279 4280 kmem_free(intrp->htable, intrp->intr_size); 4281 return (status); 4282 } 4283 4284 ldgp = nxgep->ldgvp->ldgp; 4285 for (x = 0; x < nrequired; x++, ldgp++) { 4286 ldgp->vector = (uint8_t)x; 4287 if (nxgep->niu_type != N2_NIU) { 4288 ldgp->intdata = SID_DATA(ldgp->func, x); 4289 } 4290 4291 arg1 = ldgp->ldvp; 4292 arg2 = nxgep; 4293 if (ldgp->nldvs == 1) { 4294 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4295 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4296 "nxge_add_intrs_adv_type_fix: " 4297 "1-1 int handler(%d) ldg %d ldv %d " 4298 "arg1 $%p arg2 $%p\n", 4299 x, ldgp->ldg, ldgp->ldvp->ldv, 4300 arg1, arg2)); 4301 } else if (ldgp->nldvs > 1) { 4302 inthandler = (uint_t *)ldgp->sys_intr_handler; 4303 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4304 "nxge_add_intrs_adv_type_fix: " 4305 "shared ldv %d int handler(%d) ldv %d ldg %d" 4306 "arg1 0x%016llx arg2 0x%016llx\n", 4307 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 4308 arg1, arg2)); 4309 } 4310 4311 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4312 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4313 != DDI_SUCCESS) { 4314 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4315 "==> nxge_add_intrs_adv_type_fix: failed #%d " 4316 "status 0x%x", x, ddi_status)); 4317 for (y = 0; y < intrp->intr_added; y++) { 4318 (void) ddi_intr_remove_handler( 4319 intrp->htable[y]); 4320 } 4321 for (y = 0; y < nactual; y++) { 4322 (void) ddi_intr_free(intrp->htable[y]); 4323 } 4324 /* Free already allocated intr */ 4325 kmem_free(intrp->htable, intrp->intr_size); 4326 4327 (void) nxge_ldgv_uninit(nxgep); 4328 4329 return (NXGE_ERROR | NXGE_DDI_FAILED); 4330 } 4331 intrp->intr_added++; 4332 } 4333 4334 intrp->msi_intx_cnt = nactual; 4335 4336 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4337 4338 status = nxge_intr_ldgv_init(nxgep); 4339 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 4340 4341 return (status); 4342 } 4343 4344 static void 4345 nxge_remove_intrs(p_nxge_t nxgep) 4346 { 4347 int i, inum; 4348 p_nxge_intr_t intrp; 4349 4350 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 4351 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4352 if (!intrp->intr_registered) { 4353 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4354 "<== nxge_remove_intrs: interrupts not registered")); 4355 return; 4356 } 4357 4358 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 4359 4360 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4361 (void) ddi_intr_block_disable(intrp->htable, 4362 intrp->intr_added); 4363 } else { 4364 for (i = 0; i < intrp->intr_added; i++) { 4365 (void) ddi_intr_disable(intrp->htable[i]); 4366 } 4367 } 4368 4369 for (inum = 0; inum < intrp->intr_added; inum++) { 4370 if (intrp->htable[inum]) { 4371 (void) ddi_intr_remove_handler(intrp->htable[inum]); 4372 } 4373 } 4374 4375 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 4376 if (intrp->htable[inum]) { 4377 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4378 "nxge_remove_intrs: ddi_intr_free inum %d " 4379 "msi_intx_cnt %d intr_added %d", 4380 inum, 4381 intrp->msi_intx_cnt, 4382 intrp->intr_added)); 4383 4384 (void) ddi_intr_free(intrp->htable[inum]); 4385 } 4386 } 4387 4388 kmem_free(intrp->htable, intrp->intr_size); 4389 intrp->intr_registered = B_FALSE; 4390 intrp->intr_enabled = B_FALSE; 4391 intrp->msi_intx_cnt = 0; 4392 intrp->intr_added = 0; 4393 4394 (void) nxge_ldgv_uninit(nxgep); 4395 4396 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 4397 } 4398 4399 /*ARGSUSED*/ 4400 static void 4401 nxge_remove_soft_intrs(p_nxge_t nxgep) 4402 { 4403 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 4404 if (nxgep->resched_id) { 4405 ddi_remove_softintr(nxgep->resched_id); 4406 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4407 "==> nxge_remove_soft_intrs: removed")); 4408 nxgep->resched_id = NULL; 4409 } 4410 4411 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 4412 } 4413 4414 /*ARGSUSED*/ 4415 static void 4416 nxge_intrs_enable(p_nxge_t nxgep) 4417 { 4418 p_nxge_intr_t intrp; 4419 int i; 4420 int status; 4421 4422 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 4423 4424 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4425 4426 if (!intrp->intr_registered) { 4427 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 4428 "interrupts are not registered")); 4429 return; 4430 } 4431 4432 if (intrp->intr_enabled) { 4433 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4434 "<== nxge_intrs_enable: already enabled")); 4435 return; 4436 } 4437 4438 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4439 status = ddi_intr_block_enable(intrp->htable, 4440 intrp->intr_added); 4441 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4442 "block enable - status 0x%x total inums #%d\n", 4443 status, intrp->intr_added)); 4444 } else { 4445 for (i = 0; i < intrp->intr_added; i++) { 4446 status = ddi_intr_enable(intrp->htable[i]); 4447 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4448 "ddi_intr_enable:enable - status 0x%x " 4449 "total inums %d enable inum #%d\n", 4450 status, intrp->intr_added, i)); 4451 if (status == DDI_SUCCESS) { 4452 intrp->intr_enabled = B_TRUE; 4453 } 4454 } 4455 } 4456 4457 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 4458 } 4459 4460 /*ARGSUSED*/ 4461 static void 4462 nxge_intrs_disable(p_nxge_t nxgep) 4463 { 4464 p_nxge_intr_t intrp; 4465 int i; 4466 4467 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 4468 4469 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4470 4471 if (!intrp->intr_registered) { 4472 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 4473 "interrupts are not registered")); 4474 return; 4475 } 4476 4477 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4478 (void) ddi_intr_block_disable(intrp->htable, 4479 intrp->intr_added); 4480 } else { 4481 for (i = 0; i < intrp->intr_added; i++) { 4482 (void) ddi_intr_disable(intrp->htable[i]); 4483 } 4484 } 4485 4486 intrp->intr_enabled = B_FALSE; 4487 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 4488 } 4489 4490 static nxge_status_t 4491 nxge_mac_register(p_nxge_t nxgep) 4492 { 4493 mac_register_t *macp; 4494 int status; 4495 4496 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 4497 4498 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 4499 return (NXGE_ERROR); 4500 4501 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 4502 macp->m_driver = nxgep; 4503 macp->m_dip = nxgep->dip; 4504 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 4505 macp->m_callbacks = &nxge_m_callbacks; 4506 macp->m_min_sdu = 0; 4507 macp->m_max_sdu = nxgep->mac.maxframesize - 4508 sizeof (struct ether_header) - ETHERFCSL - 4; 4509 4510 status = mac_register(macp, &nxgep->mach); 4511 mac_free(macp); 4512 4513 if (status != 0) { 4514 cmn_err(CE_WARN, 4515 "!nxge_mac_register failed (status %d instance %d)", 4516 status, nxgep->instance); 4517 return (NXGE_ERROR); 4518 } 4519 4520 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 4521 "(instance %d)", nxgep->instance)); 4522 4523 return (NXGE_OK); 4524 } 4525 4526 void 4527 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 4528 { 4529 ssize_t size; 4530 mblk_t *nmp; 4531 uint8_t blk_id; 4532 uint8_t chan; 4533 uint32_t err_id; 4534 err_inject_t *eip; 4535 4536 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 4537 4538 size = 1024; 4539 nmp = mp->b_cont; 4540 eip = (err_inject_t *)nmp->b_rptr; 4541 blk_id = eip->blk_id; 4542 err_id = eip->err_id; 4543 chan = eip->chan; 4544 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 4545 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 4546 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 4547 switch (blk_id) { 4548 case MAC_BLK_ID: 4549 break; 4550 case TXMAC_BLK_ID: 4551 break; 4552 case RXMAC_BLK_ID: 4553 break; 4554 case MIF_BLK_ID: 4555 break; 4556 case IPP_BLK_ID: 4557 nxge_ipp_inject_err(nxgep, err_id); 4558 break; 4559 case TXC_BLK_ID: 4560 nxge_txc_inject_err(nxgep, err_id); 4561 break; 4562 case TXDMA_BLK_ID: 4563 nxge_txdma_inject_err(nxgep, err_id, chan); 4564 break; 4565 case RXDMA_BLK_ID: 4566 nxge_rxdma_inject_err(nxgep, err_id, chan); 4567 break; 4568 case ZCP_BLK_ID: 4569 nxge_zcp_inject_err(nxgep, err_id); 4570 break; 4571 case ESPC_BLK_ID: 4572 break; 4573 case FFLP_BLK_ID: 4574 break; 4575 case PHY_BLK_ID: 4576 break; 4577 case ETHER_SERDES_BLK_ID: 4578 break; 4579 case PCIE_SERDES_BLK_ID: 4580 break; 4581 case VIR_BLK_ID: 4582 break; 4583 } 4584 4585 nmp->b_wptr = nmp->b_rptr + size; 4586 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 4587 4588 miocack(wq, mp, (int)size, 0); 4589 } 4590 4591 static int 4592 nxge_init_common_dev(p_nxge_t nxgep) 4593 { 4594 p_nxge_hw_list_t hw_p; 4595 dev_info_t *p_dip; 4596 4597 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 4598 4599 p_dip = nxgep->p_dip; 4600 MUTEX_ENTER(&nxge_common_lock); 4601 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4602 "==> nxge_init_common_dev:func # %d", 4603 nxgep->function_num)); 4604 /* 4605 * Loop through existing per neptune hardware list. 4606 */ 4607 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4608 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4609 "==> nxge_init_common_device:func # %d " 4610 "hw_p $%p parent dip $%p", 4611 nxgep->function_num, 4612 hw_p, 4613 p_dip)); 4614 if (hw_p->parent_devp == p_dip) { 4615 nxgep->nxge_hw_p = hw_p; 4616 hw_p->ndevs++; 4617 hw_p->nxge_p[nxgep->function_num] = nxgep; 4618 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4619 "==> nxge_init_common_device:func # %d " 4620 "hw_p $%p parent dip $%p " 4621 "ndevs %d (found)", 4622 nxgep->function_num, 4623 hw_p, 4624 p_dip, 4625 hw_p->ndevs)); 4626 break; 4627 } 4628 } 4629 4630 if (hw_p == NULL) { 4631 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4632 "==> nxge_init_common_device:func # %d " 4633 "parent dip $%p (new)", 4634 nxgep->function_num, 4635 p_dip)); 4636 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 4637 hw_p->parent_devp = p_dip; 4638 hw_p->magic = NXGE_NEPTUNE_MAGIC; 4639 nxgep->nxge_hw_p = hw_p; 4640 hw_p->ndevs++; 4641 hw_p->nxge_p[nxgep->function_num] = nxgep; 4642 hw_p->next = nxge_hw_list; 4643 4644 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 4645 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 4646 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 4647 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 4648 MUTEX_INIT(&hw_p->nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 4649 4650 nxge_hw_list = hw_p; 4651 } 4652 4653 MUTEX_EXIT(&nxge_common_lock); 4654 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4655 "==> nxge_init_common_device (nxge_hw_list) $%p", 4656 nxge_hw_list)); 4657 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 4658 4659 return (NXGE_OK); 4660 } 4661 4662 static void 4663 nxge_uninit_common_dev(p_nxge_t nxgep) 4664 { 4665 p_nxge_hw_list_t hw_p, h_hw_p; 4666 dev_info_t *p_dip; 4667 4668 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 4669 if (nxgep->nxge_hw_p == NULL) { 4670 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4671 "<== nxge_uninit_common_device (no common)")); 4672 return; 4673 } 4674 4675 MUTEX_ENTER(&nxge_common_lock); 4676 h_hw_p = nxge_hw_list; 4677 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4678 p_dip = hw_p->parent_devp; 4679 if (nxgep->nxge_hw_p == hw_p && 4680 p_dip == nxgep->p_dip && 4681 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 4682 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 4683 4684 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4685 "==> nxge_uninit_common_device:func # %d " 4686 "hw_p $%p parent dip $%p " 4687 "ndevs %d (found)", 4688 nxgep->function_num, 4689 hw_p, 4690 p_dip, 4691 hw_p->ndevs)); 4692 4693 nxgep->nxge_hw_p = NULL; 4694 if (hw_p->ndevs) { 4695 hw_p->ndevs--; 4696 } 4697 hw_p->nxge_p[nxgep->function_num] = NULL; 4698 if (!hw_p->ndevs) { 4699 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 4700 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 4701 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 4702 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 4703 MUTEX_DESTROY(&hw_p->nxge_mii_lock); 4704 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4705 "==> nxge_uninit_common_device: " 4706 "func # %d " 4707 "hw_p $%p parent dip $%p " 4708 "ndevs %d (last)", 4709 nxgep->function_num, 4710 hw_p, 4711 p_dip, 4712 hw_p->ndevs)); 4713 4714 if (hw_p == nxge_hw_list) { 4715 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4716 "==> nxge_uninit_common_device:" 4717 "remove head func # %d " 4718 "hw_p $%p parent dip $%p " 4719 "ndevs %d (head)", 4720 nxgep->function_num, 4721 hw_p, 4722 p_dip, 4723 hw_p->ndevs)); 4724 nxge_hw_list = hw_p->next; 4725 } else { 4726 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4727 "==> nxge_uninit_common_device:" 4728 "remove middle func # %d " 4729 "hw_p $%p parent dip $%p " 4730 "ndevs %d (middle)", 4731 nxgep->function_num, 4732 hw_p, 4733 p_dip, 4734 hw_p->ndevs)); 4735 h_hw_p->next = hw_p->next; 4736 } 4737 4738 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 4739 } 4740 break; 4741 } else { 4742 h_hw_p = hw_p; 4743 } 4744 } 4745 4746 MUTEX_EXIT(&nxge_common_lock); 4747 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4748 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 4749 nxge_hw_list)); 4750 4751 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 4752 } 4753