1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 30 */ 31 #include <sys/nxge/nxge_impl.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * until MSIX supported, assume msi, use 2 for msix 39 */ 40 uint32_t nxge_msi_enable = 1; /* debug: turn msi off */ 41 42 /* 43 * Globals: tunable parameters (/etc/system or adb) 44 * 45 */ 46 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 47 uint32_t nxge_rbr_spare_size = 0; 48 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 49 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 50 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 51 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 52 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 53 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 54 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 55 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 56 boolean_t nxge_jumbo_enable = B_FALSE; 57 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 58 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 59 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 60 61 /* 62 * Debugging flags: 63 * nxge_no_tx_lb : transmit load balancing 64 * nxge_tx_lb_policy: 0 - TCP port (default) 65 * 3 - DEST MAC 66 */ 67 uint32_t nxge_no_tx_lb = 0; 68 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 69 70 /* 71 * Add tunable to reduce the amount of time spent in the 72 * ISR doing Rx Processing. 73 */ 74 uint32_t nxge_max_rx_pkts = 1024; 75 76 /* 77 * Tunables to manage the receive buffer blocks. 78 * 79 * nxge_rx_threshold_hi: copy all buffers. 80 * nxge_rx_bcopy_size_type: receive buffer block size type. 81 * nxge_rx_threshold_lo: copy only up to tunable block size type. 82 */ 83 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 84 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 85 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 86 87 rtrace_t npi_rtracebuf; 88 89 #if defined(sun4v) 90 /* 91 * Hypervisor N2/NIU services information. 92 */ 93 static hsvc_info_t niu_hsvc = { 94 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 95 NIU_MINOR_VER, "nxge" 96 }; 97 #endif 98 99 /* 100 * Function Prototypes 101 */ 102 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 103 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 104 static void nxge_unattach(p_nxge_t); 105 106 #if NXGE_PROPERTY 107 static void nxge_remove_hard_properties(p_nxge_t); 108 #endif 109 110 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 111 112 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 113 static void nxge_destroy_mutexes(p_nxge_t); 114 115 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 116 static void nxge_unmap_regs(p_nxge_t nxgep); 117 #ifdef NXGE_DEBUG 118 static void nxge_test_map_regs(p_nxge_t nxgep); 119 #endif 120 121 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 122 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 123 static void nxge_remove_intrs(p_nxge_t nxgep); 124 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 125 126 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 127 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 128 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 129 static void nxge_intrs_enable(p_nxge_t nxgep); 130 static void nxge_intrs_disable(p_nxge_t nxgep); 131 132 static void nxge_suspend(p_nxge_t); 133 static nxge_status_t nxge_resume(p_nxge_t); 134 135 static nxge_status_t nxge_setup_dev(p_nxge_t); 136 static void nxge_destroy_dev(p_nxge_t); 137 138 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 139 static void nxge_free_mem_pool(p_nxge_t); 140 141 static nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 142 static void nxge_free_rx_mem_pool(p_nxge_t); 143 144 static nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 145 static void nxge_free_tx_mem_pool(p_nxge_t); 146 147 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 148 struct ddi_dma_attr *, 149 size_t, ddi_device_acc_attr_t *, uint_t, 150 p_nxge_dma_common_t); 151 152 static void nxge_dma_mem_free(p_nxge_dma_common_t); 153 154 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 155 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 156 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 157 158 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 159 p_nxge_dma_common_t *, size_t); 160 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 161 162 static nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 163 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 164 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 165 166 static nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 167 p_nxge_dma_common_t *, 168 size_t); 169 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 170 171 static int nxge_init_common_dev(p_nxge_t); 172 static void nxge_uninit_common_dev(p_nxge_t); 173 174 /* 175 * The next declarations are for the GLDv3 interface. 176 */ 177 static int nxge_m_start(void *); 178 static void nxge_m_stop(void *); 179 static int nxge_m_unicst(void *, const uint8_t *); 180 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 181 static int nxge_m_promisc(void *, boolean_t); 182 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 183 static void nxge_m_resources(void *); 184 mblk_t *nxge_m_tx(void *arg, mblk_t *); 185 static nxge_status_t nxge_mac_register(p_nxge_t); 186 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 187 mac_addr_slot_t slot); 188 static void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 189 boolean_t factory); 190 static int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 191 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 192 static int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 193 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 194 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 195 196 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 197 #define MAX_DUMP_SZ 256 198 199 #define NXGE_M_CALLBACK_FLAGS (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB) 200 201 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 202 static mac_callbacks_t nxge_m_callbacks = { 203 NXGE_M_CALLBACK_FLAGS, 204 nxge_m_stat, 205 nxge_m_start, 206 nxge_m_stop, 207 nxge_m_promisc, 208 nxge_m_multicst, 209 nxge_m_unicst, 210 nxge_m_tx, 211 nxge_m_resources, 212 nxge_m_ioctl, 213 nxge_m_getcapab 214 }; 215 216 void 217 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 218 219 /* 220 * These global variables control the message 221 * output. 222 */ 223 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 224 uint64_t nxge_debug_level = 0; 225 226 /* 227 * This list contains the instance structures for the Neptune 228 * devices present in the system. The lock exists to guarantee 229 * mutually exclusive access to the list. 230 */ 231 void *nxge_list = NULL; 232 233 void *nxge_hw_list = NULL; 234 nxge_os_mutex_t nxge_common_lock; 235 236 nxge_os_mutex_t nxge_mii_lock; 237 static uint32_t nxge_mii_lock_init = 0; 238 nxge_os_mutex_t nxge_mdio_lock; 239 static uint32_t nxge_mdio_lock_init = 0; 240 241 extern uint64_t npi_debug_level; 242 243 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 244 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 245 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 246 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 247 extern void nxge_fm_init(p_nxge_t, 248 ddi_device_acc_attr_t *, 249 ddi_device_acc_attr_t *, 250 ddi_dma_attr_t *); 251 extern void nxge_fm_fini(p_nxge_t); 252 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 253 254 /* 255 * Count used to maintain the number of buffers being used 256 * by Neptune instances and loaned up to the upper layers. 257 */ 258 uint32_t nxge_mblks_pending = 0; 259 260 /* 261 * Device register access attributes for PIO. 262 */ 263 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 264 DDI_DEVICE_ATTR_V0, 265 DDI_STRUCTURE_LE_ACC, 266 DDI_STRICTORDER_ACC, 267 }; 268 269 /* 270 * Device descriptor access attributes for DMA. 271 */ 272 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 273 DDI_DEVICE_ATTR_V0, 274 DDI_STRUCTURE_LE_ACC, 275 DDI_STRICTORDER_ACC 276 }; 277 278 /* 279 * Device buffer access attributes for DMA. 280 */ 281 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 282 DDI_DEVICE_ATTR_V0, 283 DDI_STRUCTURE_BE_ACC, 284 DDI_STRICTORDER_ACC 285 }; 286 287 ddi_dma_attr_t nxge_desc_dma_attr = { 288 DMA_ATTR_V0, /* version number. */ 289 0, /* low address */ 290 0xffffffffffffffff, /* high address */ 291 0xffffffffffffffff, /* address counter max */ 292 #ifndef NIU_PA_WORKAROUND 293 0x100000, /* alignment */ 294 #else 295 0x2000, 296 #endif 297 0xfc00fc, /* dlim_burstsizes */ 298 0x1, /* minimum transfer size */ 299 0xffffffffffffffff, /* maximum transfer size */ 300 0xffffffffffffffff, /* maximum segment size */ 301 1, /* scatter/gather list length */ 302 (unsigned int) 1, /* granularity */ 303 0 /* attribute flags */ 304 }; 305 306 ddi_dma_attr_t nxge_tx_dma_attr = { 307 DMA_ATTR_V0, /* version number. */ 308 0, /* low address */ 309 0xffffffffffffffff, /* high address */ 310 0xffffffffffffffff, /* address counter max */ 311 #if defined(_BIG_ENDIAN) 312 0x2000, /* alignment */ 313 #else 314 0x1000, /* alignment */ 315 #endif 316 0xfc00fc, /* dlim_burstsizes */ 317 0x1, /* minimum transfer size */ 318 0xffffffffffffffff, /* maximum transfer size */ 319 0xffffffffffffffff, /* maximum segment size */ 320 5, /* scatter/gather list length */ 321 (unsigned int) 1, /* granularity */ 322 0 /* attribute flags */ 323 }; 324 325 ddi_dma_attr_t nxge_rx_dma_attr = { 326 DMA_ATTR_V0, /* version number. */ 327 0, /* low address */ 328 0xffffffffffffffff, /* high address */ 329 0xffffffffffffffff, /* address counter max */ 330 0x2000, /* alignment */ 331 0xfc00fc, /* dlim_burstsizes */ 332 0x1, /* minimum transfer size */ 333 0xffffffffffffffff, /* maximum transfer size */ 334 0xffffffffffffffff, /* maximum segment size */ 335 1, /* scatter/gather list length */ 336 (unsigned int) 1, /* granularity */ 337 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 338 }; 339 340 ddi_dma_lim_t nxge_dma_limits = { 341 (uint_t)0, /* dlim_addr_lo */ 342 (uint_t)0xffffffff, /* dlim_addr_hi */ 343 (uint_t)0xffffffff, /* dlim_cntr_max */ 344 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 345 0x1, /* dlim_minxfer */ 346 1024 /* dlim_speed */ 347 }; 348 349 dma_method_t nxge_force_dma = DVMA; 350 351 /* 352 * dma chunk sizes. 353 * 354 * Try to allocate the largest possible size 355 * so that fewer number of dma chunks would be managed 356 */ 357 #ifdef NIU_PA_WORKAROUND 358 size_t alloc_sizes [] = {0x2000}; 359 #else 360 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 361 0x10000, 0x20000, 0x40000, 0x80000, 362 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000}; 363 #endif 364 365 /* 366 * Translate "dev_t" to a pointer to the associated "dev_info_t". 367 */ 368 369 static int 370 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 371 { 372 p_nxge_t nxgep = NULL; 373 int instance; 374 int status = DDI_SUCCESS; 375 uint8_t portn; 376 nxge_mmac_t *mmac_info; 377 378 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 379 380 /* 381 * Get the device instance since we'll need to setup 382 * or retrieve a soft state for this instance. 383 */ 384 instance = ddi_get_instance(dip); 385 386 switch (cmd) { 387 case DDI_ATTACH: 388 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 389 break; 390 391 case DDI_RESUME: 392 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 393 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 394 if (nxgep == NULL) { 395 status = DDI_FAILURE; 396 break; 397 } 398 if (nxgep->dip != dip) { 399 status = DDI_FAILURE; 400 break; 401 } 402 if (nxgep->suspended == DDI_PM_SUSPEND) { 403 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 404 } else { 405 status = nxge_resume(nxgep); 406 } 407 goto nxge_attach_exit; 408 409 case DDI_PM_RESUME: 410 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 411 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 412 if (nxgep == NULL) { 413 status = DDI_FAILURE; 414 break; 415 } 416 if (nxgep->dip != dip) { 417 status = DDI_FAILURE; 418 break; 419 } 420 status = nxge_resume(nxgep); 421 goto nxge_attach_exit; 422 423 default: 424 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 425 status = DDI_FAILURE; 426 goto nxge_attach_exit; 427 } 428 429 430 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 431 status = DDI_FAILURE; 432 goto nxge_attach_exit; 433 } 434 435 nxgep = ddi_get_soft_state(nxge_list, instance); 436 if (nxgep == NULL) { 437 status = NXGE_ERROR; 438 goto nxge_attach_fail2; 439 } 440 441 nxgep->nxge_magic = NXGE_MAGIC; 442 443 nxgep->drv_state = 0; 444 nxgep->dip = dip; 445 nxgep->instance = instance; 446 nxgep->p_dip = ddi_get_parent(dip); 447 nxgep->nxge_debug_level = nxge_debug_level; 448 npi_debug_level = nxge_debug_level; 449 450 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_dev_desc_dma_acc_attr, 451 &nxge_rx_dma_attr); 452 453 status = nxge_map_regs(nxgep); 454 if (status != NXGE_OK) { 455 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 456 goto nxge_attach_fail3; 457 } 458 459 status = nxge_init_common_dev(nxgep); 460 if (status != NXGE_OK) { 461 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 462 "nxge_init_common_dev failed")); 463 goto nxge_attach_fail4; 464 } 465 466 if (nxgep->niu_type == NEPTUNE_2_10GF) { 467 if (nxgep->function_num > 1) { 468 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Unsupported" 469 " function %d. Only functions 0 and 1 are " 470 "supported for this card.", nxgep->function_num)); 471 status = NXGE_ERROR; 472 goto nxge_attach_fail4; 473 } 474 } 475 476 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 477 nxgep->mac.portnum = portn; 478 if ((portn == 0) || (portn == 1)) 479 nxgep->mac.porttype = PORT_TYPE_XMAC; 480 else 481 nxgep->mac.porttype = PORT_TYPE_BMAC; 482 /* 483 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 484 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 485 * The two types of MACs have different characterizations. 486 */ 487 mmac_info = &nxgep->nxge_mmac_info; 488 if (nxgep->function_num < 2) { 489 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 490 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 491 } else { 492 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 493 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 494 } 495 /* 496 * Setup the Ndd parameters for the this instance. 497 */ 498 nxge_init_param(nxgep); 499 500 /* 501 * Setup Register Tracing Buffer. 502 */ 503 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 504 505 /* init stats ptr */ 506 nxge_init_statsp(nxgep); 507 508 /* 509 * read the vpd info from the eeprom into local data 510 * structure and check for the VPD info validity 511 */ 512 nxge_vpd_info_get(nxgep); 513 514 status = nxge_xcvr_find(nxgep); 515 516 if (status != NXGE_OK) { 517 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 518 " Couldn't determine card type" 519 " .... exit ")); 520 goto nxge_attach_fail5; 521 } 522 523 status = nxge_get_config_properties(nxgep); 524 525 if (status != NXGE_OK) { 526 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "get_hw create failed")); 527 goto nxge_attach_fail; 528 } 529 530 /* 531 * Setup the Kstats for the driver. 532 */ 533 nxge_setup_kstats(nxgep); 534 535 nxge_setup_param(nxgep); 536 537 status = nxge_setup_system_dma_pages(nxgep); 538 if (status != NXGE_OK) { 539 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 540 goto nxge_attach_fail; 541 } 542 543 #if defined(sun4v) 544 if (nxgep->niu_type == N2_NIU) { 545 nxgep->niu_hsvc_available = B_FALSE; 546 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 547 if ((status = 548 hsvc_register(&nxgep->niu_hsvc, 549 &nxgep->niu_min_ver)) != 0) { 550 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 551 "nxge_attach: " 552 "%s: cannot negotiate " 553 "hypervisor services " 554 "revision %d " 555 "group: 0x%lx " 556 "major: 0x%lx minor: 0x%lx " 557 "errno: %d", 558 niu_hsvc.hsvc_modname, 559 niu_hsvc.hsvc_rev, 560 niu_hsvc.hsvc_group, 561 niu_hsvc.hsvc_major, 562 niu_hsvc.hsvc_minor, 563 status)); 564 status = DDI_FAILURE; 565 goto nxge_attach_fail; 566 } 567 568 nxgep->niu_hsvc_available = B_TRUE; 569 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 570 "NIU Hypervisor service enabled")); 571 } 572 #endif 573 574 nxge_hw_id_init(nxgep); 575 nxge_hw_init_niu_common(nxgep); 576 577 status = nxge_setup_mutexes(nxgep); 578 if (status != NXGE_OK) { 579 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 580 goto nxge_attach_fail; 581 } 582 583 status = nxge_setup_dev(nxgep); 584 if (status != DDI_SUCCESS) { 585 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 586 goto nxge_attach_fail; 587 } 588 589 status = nxge_add_intrs(nxgep); 590 if (status != DDI_SUCCESS) { 591 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 592 goto nxge_attach_fail; 593 } 594 status = nxge_add_soft_intrs(nxgep); 595 if (status != DDI_SUCCESS) { 596 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "add_soft_intr failed")); 597 goto nxge_attach_fail; 598 } 599 600 /* 601 * Enable interrupts. 602 */ 603 nxge_intrs_enable(nxgep); 604 605 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 606 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 607 "unable to register to mac layer (%d)", status)); 608 goto nxge_attach_fail; 609 } 610 611 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 612 613 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "registered to mac (instance %d)", 614 instance)); 615 616 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 617 618 goto nxge_attach_exit; 619 620 nxge_attach_fail: 621 nxge_unattach(nxgep); 622 goto nxge_attach_fail1; 623 624 nxge_attach_fail5: 625 /* 626 * Tear down the ndd parameters setup. 627 */ 628 nxge_destroy_param(nxgep); 629 630 /* 631 * Tear down the kstat setup. 632 */ 633 nxge_destroy_kstats(nxgep); 634 635 nxge_attach_fail4: 636 if (nxgep->nxge_hw_p) { 637 nxge_uninit_common_dev(nxgep); 638 nxgep->nxge_hw_p = NULL; 639 } 640 641 nxge_attach_fail3: 642 /* 643 * Unmap the register setup. 644 */ 645 nxge_unmap_regs(nxgep); 646 647 nxge_fm_fini(nxgep); 648 649 nxge_attach_fail2: 650 ddi_soft_state_free(nxge_list, nxgep->instance); 651 652 nxge_attach_fail1: 653 if (status != NXGE_OK) 654 status = (NXGE_ERROR | NXGE_DDI_FAILED); 655 nxgep = NULL; 656 657 nxge_attach_exit: 658 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 659 status)); 660 661 return (status); 662 } 663 664 static int 665 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 666 { 667 int status = DDI_SUCCESS; 668 int instance; 669 p_nxge_t nxgep = NULL; 670 671 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 672 instance = ddi_get_instance(dip); 673 nxgep = ddi_get_soft_state(nxge_list, instance); 674 if (nxgep == NULL) { 675 status = DDI_FAILURE; 676 goto nxge_detach_exit; 677 } 678 679 switch (cmd) { 680 case DDI_DETACH: 681 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 682 break; 683 684 case DDI_PM_SUSPEND: 685 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 686 nxgep->suspended = DDI_PM_SUSPEND; 687 nxge_suspend(nxgep); 688 break; 689 690 case DDI_SUSPEND: 691 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 692 if (nxgep->suspended != DDI_PM_SUSPEND) { 693 nxgep->suspended = DDI_SUSPEND; 694 nxge_suspend(nxgep); 695 } 696 break; 697 698 default: 699 status = DDI_FAILURE; 700 } 701 702 if (cmd != DDI_DETACH) 703 goto nxge_detach_exit; 704 705 /* 706 * Stop the xcvr polling. 707 */ 708 nxgep->suspended = cmd; 709 710 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 711 712 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 713 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 714 "<== nxge_detach status = 0x%08X", status)); 715 return (DDI_FAILURE); 716 } 717 718 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 719 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 720 721 nxge_unattach(nxgep); 722 nxgep = NULL; 723 724 nxge_detach_exit: 725 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 726 status)); 727 728 return (status); 729 } 730 731 static void 732 nxge_unattach(p_nxge_t nxgep) 733 { 734 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 735 736 if (nxgep == NULL || nxgep->dev_regs == NULL) { 737 return; 738 } 739 740 nxgep->nxge_magic = 0; 741 742 if (nxgep->nxge_hw_p) { 743 nxge_uninit_common_dev(nxgep); 744 nxgep->nxge_hw_p = NULL; 745 } 746 747 if (nxgep->nxge_timerid) { 748 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 749 nxgep->nxge_timerid = 0; 750 } 751 752 #if defined(sun4v) 753 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 754 (void) hsvc_unregister(&nxgep->niu_hsvc); 755 nxgep->niu_hsvc_available = B_FALSE; 756 } 757 #endif 758 /* 759 * Stop any further interrupts. 760 */ 761 nxge_remove_intrs(nxgep); 762 763 /* remove soft interrups */ 764 nxge_remove_soft_intrs(nxgep); 765 766 /* 767 * Stop the device and free resources. 768 */ 769 nxge_destroy_dev(nxgep); 770 771 /* 772 * Tear down the ndd parameters setup. 773 */ 774 nxge_destroy_param(nxgep); 775 776 /* 777 * Tear down the kstat setup. 778 */ 779 nxge_destroy_kstats(nxgep); 780 781 /* 782 * Destroy all mutexes. 783 */ 784 nxge_destroy_mutexes(nxgep); 785 786 /* 787 * Remove the list of ndd parameters which 788 * were setup during attach. 789 */ 790 if (nxgep->dip) { 791 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 792 " nxge_unattach: remove all properties")); 793 794 (void) ddi_prop_remove_all(nxgep->dip); 795 } 796 797 #if NXGE_PROPERTY 798 nxge_remove_hard_properties(nxgep); 799 #endif 800 801 /* 802 * Unmap the register setup. 803 */ 804 nxge_unmap_regs(nxgep); 805 806 nxge_fm_fini(nxgep); 807 808 ddi_soft_state_free(nxge_list, nxgep->instance); 809 810 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 811 } 812 813 static char n2_siu_name[] = "niu"; 814 815 static nxge_status_t 816 nxge_map_regs(p_nxge_t nxgep) 817 { 818 int ddi_status = DDI_SUCCESS; 819 p_dev_regs_t dev_regs; 820 char buf[MAXPATHLEN + 1]; 821 char *devname; 822 #ifdef NXGE_DEBUG 823 char *sysname; 824 #endif 825 off_t regsize; 826 nxge_status_t status = NXGE_OK; 827 #if !defined(_BIG_ENDIAN) 828 off_t pci_offset; 829 uint16_t pcie_devctl; 830 #endif 831 832 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 833 nxgep->dev_regs = NULL; 834 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 835 dev_regs->nxge_regh = NULL; 836 dev_regs->nxge_pciregh = NULL; 837 dev_regs->nxge_msix_regh = NULL; 838 dev_regs->nxge_vir_regh = NULL; 839 dev_regs->nxge_vir2_regh = NULL; 840 nxgep->niu_type = NIU_TYPE_NONE; 841 842 devname = ddi_pathname(nxgep->dip, buf); 843 ASSERT(strlen(devname) > 0); 844 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 845 "nxge_map_regs: pathname devname %s", devname)); 846 847 if (strstr(devname, n2_siu_name)) { 848 /* N2/NIU */ 849 nxgep->niu_type = N2_NIU; 850 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 851 "nxge_map_regs: N2/NIU devname %s", devname)); 852 /* get function number */ 853 nxgep->function_num = 854 (devname[strlen(devname) -1] == '1' ? 1 : 0); 855 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 856 "nxge_map_regs: N2/NIU function number %d", 857 nxgep->function_num)); 858 } else { 859 int *prop_val; 860 uint_t prop_len; 861 uint8_t func_num; 862 863 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 864 0, "reg", 865 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 866 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 867 "Reg property not found")); 868 ddi_status = DDI_FAILURE; 869 goto nxge_map_regs_fail0; 870 871 } else { 872 func_num = (prop_val[0] >> 8) & 0x7; 873 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 874 "Reg property found: fun # %d", 875 func_num)); 876 nxgep->function_num = func_num; 877 ddi_prop_free(prop_val); 878 } 879 } 880 881 switch (nxgep->niu_type) { 882 default: 883 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 884 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 885 "nxge_map_regs: pci config size 0x%x", regsize)); 886 887 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 888 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 889 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 890 if (ddi_status != DDI_SUCCESS) { 891 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 892 "ddi_map_regs, nxge bus config regs failed")); 893 goto nxge_map_regs_fail0; 894 } 895 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 896 "nxge_map_reg: PCI config addr 0x%0llx " 897 " handle 0x%0llx", dev_regs->nxge_pciregp, 898 dev_regs->nxge_pciregh)); 899 /* 900 * IMP IMP 901 * workaround for bit swapping bug in HW 902 * which ends up in no-snoop = yes 903 * resulting, in DMA not synched properly 904 */ 905 #if !defined(_BIG_ENDIAN) 906 /* workarounds for x86 systems */ 907 pci_offset = 0x80 + PCIE_DEVCTL; 908 pcie_devctl = 0x0; 909 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 910 pcie_devctl |= PCIE_DEVCTL_RO_EN; 911 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 912 pcie_devctl); 913 #endif 914 915 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 916 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 917 "nxge_map_regs: pio size 0x%x", regsize)); 918 /* set up the device mapped register */ 919 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 920 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 921 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 922 if (ddi_status != DDI_SUCCESS) { 923 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 924 "ddi_map_regs for Neptune global reg failed")); 925 goto nxge_map_regs_fail1; 926 } 927 928 /* set up the msi/msi-x mapped register */ 929 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 930 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 931 "nxge_map_regs: msix size 0x%x", regsize)); 932 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 933 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 934 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 935 if (ddi_status != DDI_SUCCESS) { 936 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 937 "ddi_map_regs for msi reg failed")); 938 goto nxge_map_regs_fail2; 939 } 940 941 /* set up the vio region mapped register */ 942 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 943 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 944 "nxge_map_regs: vio size 0x%x", regsize)); 945 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 946 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 947 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 948 949 if (ddi_status != DDI_SUCCESS) { 950 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 951 "ddi_map_regs for nxge vio reg failed")); 952 goto nxge_map_regs_fail3; 953 } 954 nxgep->dev_regs = dev_regs; 955 956 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 957 NPI_PCI_ADD_HANDLE_SET(nxgep, 958 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 959 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 960 NPI_MSI_ADD_HANDLE_SET(nxgep, 961 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 962 963 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 964 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 965 966 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 967 NPI_REG_ADD_HANDLE_SET(nxgep, 968 (npi_reg_ptr_t)dev_regs->nxge_regp); 969 970 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 971 NPI_VREG_ADD_HANDLE_SET(nxgep, 972 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 973 974 break; 975 976 case N2_NIU: 977 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 978 /* 979 * Set up the device mapped register (FWARC 2006/556) 980 * (changed back to 1: reg starts at 1!) 981 */ 982 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 983 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 984 "nxge_map_regs: dev size 0x%x", regsize)); 985 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 986 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 987 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 988 989 if (ddi_status != DDI_SUCCESS) { 990 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 991 "ddi_map_regs for N2/NIU, global reg failed ")); 992 goto nxge_map_regs_fail1; 993 } 994 995 /* set up the vio region mapped register */ 996 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 997 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 998 "nxge_map_regs: vio (1) size 0x%x", regsize)); 999 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1000 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1001 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1002 1003 if (ddi_status != DDI_SUCCESS) { 1004 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1005 "ddi_map_regs for nxge vio reg failed")); 1006 goto nxge_map_regs_fail2; 1007 } 1008 /* set up the vio region mapped register */ 1009 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1010 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1011 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1012 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1013 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1014 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1015 1016 if (ddi_status != DDI_SUCCESS) { 1017 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1018 "ddi_map_regs for nxge vio2 reg failed")); 1019 goto nxge_map_regs_fail3; 1020 } 1021 nxgep->dev_regs = dev_regs; 1022 1023 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1024 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1025 1026 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1027 NPI_REG_ADD_HANDLE_SET(nxgep, 1028 (npi_reg_ptr_t)dev_regs->nxge_regp); 1029 1030 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1031 NPI_VREG_ADD_HANDLE_SET(nxgep, 1032 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1033 1034 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1035 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1036 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1037 1038 break; 1039 } 1040 1041 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1042 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1043 1044 goto nxge_map_regs_exit; 1045 nxge_map_regs_fail3: 1046 if (dev_regs->nxge_msix_regh) { 1047 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1048 } 1049 if (dev_regs->nxge_vir_regh) { 1050 ddi_regs_map_free(&dev_regs->nxge_regh); 1051 } 1052 nxge_map_regs_fail2: 1053 if (dev_regs->nxge_regh) { 1054 ddi_regs_map_free(&dev_regs->nxge_regh); 1055 } 1056 nxge_map_regs_fail1: 1057 if (dev_regs->nxge_pciregh) { 1058 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1059 } 1060 nxge_map_regs_fail0: 1061 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1062 kmem_free(dev_regs, sizeof (dev_regs_t)); 1063 1064 nxge_map_regs_exit: 1065 if (ddi_status != DDI_SUCCESS) 1066 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1067 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1068 return (status); 1069 } 1070 1071 static void 1072 nxge_unmap_regs(p_nxge_t nxgep) 1073 { 1074 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1075 if (nxgep->dev_regs) { 1076 if (nxgep->dev_regs->nxge_pciregh) { 1077 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1078 "==> nxge_unmap_regs: bus")); 1079 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1080 nxgep->dev_regs->nxge_pciregh = NULL; 1081 } 1082 if (nxgep->dev_regs->nxge_regh) { 1083 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1084 "==> nxge_unmap_regs: device registers")); 1085 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1086 nxgep->dev_regs->nxge_regh = NULL; 1087 } 1088 if (nxgep->dev_regs->nxge_msix_regh) { 1089 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1090 "==> nxge_unmap_regs: device interrupts")); 1091 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1092 nxgep->dev_regs->nxge_msix_regh = NULL; 1093 } 1094 if (nxgep->dev_regs->nxge_vir_regh) { 1095 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1096 "==> nxge_unmap_regs: vio region")); 1097 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1098 nxgep->dev_regs->nxge_vir_regh = NULL; 1099 } 1100 if (nxgep->dev_regs->nxge_vir2_regh) { 1101 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1102 "==> nxge_unmap_regs: vio2 region")); 1103 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1104 nxgep->dev_regs->nxge_vir2_regh = NULL; 1105 } 1106 1107 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1108 nxgep->dev_regs = NULL; 1109 } 1110 1111 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1112 } 1113 1114 static nxge_status_t 1115 nxge_setup_mutexes(p_nxge_t nxgep) 1116 { 1117 int ddi_status = DDI_SUCCESS; 1118 nxge_status_t status = NXGE_OK; 1119 nxge_classify_t *classify_ptr; 1120 int partition; 1121 1122 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1123 1124 /* 1125 * Get the interrupt cookie so the mutexes can be 1126 * Initialized. 1127 */ 1128 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1129 &nxgep->interrupt_cookie); 1130 if (ddi_status != DDI_SUCCESS) { 1131 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1132 "<== nxge_setup_mutexes: failed 0x%x", ddi_status)); 1133 goto nxge_setup_mutexes_exit; 1134 } 1135 1136 /* Initialize global mutex */ 1137 1138 if (nxge_mdio_lock_init == 0) { 1139 MUTEX_INIT(&nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 1140 } 1141 atomic_add_32(&nxge_mdio_lock_init, 1); 1142 1143 if (nxge_mii_lock_init == 0) { 1144 MUTEX_INIT(&nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 1145 } 1146 atomic_add_32(&nxge_mii_lock_init, 1); 1147 1148 nxgep->drv_state |= STATE_MDIO_LOCK_INIT; 1149 nxgep->drv_state |= STATE_MII_LOCK_INIT; 1150 1151 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1152 MUTEX_INIT(&nxgep->poll_lock, NULL, 1153 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1154 1155 /* 1156 * Initialize mutexes for this device. 1157 */ 1158 MUTEX_INIT(nxgep->genlock, NULL, 1159 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1160 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1161 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1162 MUTEX_INIT(&nxgep->mif_lock, NULL, 1163 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1164 RW_INIT(&nxgep->filter_lock, NULL, 1165 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1166 1167 classify_ptr = &nxgep->classifier; 1168 /* 1169 * FFLP Mutexes are never used in interrupt context 1170 * as fflp operation can take very long time to 1171 * complete and hence not suitable to invoke from interrupt 1172 * handlers. 1173 */ 1174 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1175 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1176 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1177 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1178 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1179 for (partition = 0; partition < MAX_PARTITION; partition++) { 1180 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1181 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1182 } 1183 } 1184 1185 nxge_setup_mutexes_exit: 1186 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1187 "<== nxge_setup_mutexes status = %x", status)); 1188 1189 if (ddi_status != DDI_SUCCESS) 1190 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1191 1192 return (status); 1193 } 1194 1195 static void 1196 nxge_destroy_mutexes(p_nxge_t nxgep) 1197 { 1198 int partition; 1199 nxge_classify_t *classify_ptr; 1200 1201 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1202 RW_DESTROY(&nxgep->filter_lock); 1203 MUTEX_DESTROY(&nxgep->mif_lock); 1204 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1205 MUTEX_DESTROY(nxgep->genlock); 1206 1207 classify_ptr = &nxgep->classifier; 1208 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1209 1210 /* Destroy all polling resources. */ 1211 MUTEX_DESTROY(&nxgep->poll_lock); 1212 cv_destroy(&nxgep->poll_cv); 1213 1214 /* free data structures, based on HW type */ 1215 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1216 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1217 for (partition = 0; partition < MAX_PARTITION; partition++) { 1218 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1219 } 1220 } 1221 if (nxgep->drv_state & STATE_MDIO_LOCK_INIT) { 1222 if (nxge_mdio_lock_init == 1) { 1223 MUTEX_DESTROY(&nxge_mdio_lock); 1224 } 1225 atomic_add_32(&nxge_mdio_lock_init, -1); 1226 } 1227 if (nxgep->drv_state & STATE_MII_LOCK_INIT) { 1228 if (nxge_mii_lock_init == 1) { 1229 MUTEX_DESTROY(&nxge_mii_lock); 1230 } 1231 atomic_add_32(&nxge_mii_lock_init, -1); 1232 } 1233 1234 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1235 } 1236 1237 nxge_status_t 1238 nxge_init(p_nxge_t nxgep) 1239 { 1240 nxge_status_t status = NXGE_OK; 1241 1242 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1243 1244 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1245 return (status); 1246 } 1247 1248 /* 1249 * Allocate system memory for the receive/transmit buffer blocks 1250 * and receive/transmit descriptor rings. 1251 */ 1252 status = nxge_alloc_mem_pool(nxgep); 1253 if (status != NXGE_OK) { 1254 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1255 goto nxge_init_fail1; 1256 } 1257 1258 /* 1259 * Initialize and enable TXC registers 1260 * (Globally enable TX controller, 1261 * enable a port, configure dma channel bitmap, 1262 * configure the max burst size). 1263 */ 1264 status = nxge_txc_init(nxgep); 1265 if (status != NXGE_OK) { 1266 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txc failed\n")); 1267 goto nxge_init_fail2; 1268 } 1269 1270 /* 1271 * Initialize and enable TXDMA channels. 1272 */ 1273 status = nxge_init_txdma_channels(nxgep); 1274 if (status != NXGE_OK) { 1275 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1276 goto nxge_init_fail3; 1277 } 1278 1279 /* 1280 * Initialize and enable RXDMA channels. 1281 */ 1282 status = nxge_init_rxdma_channels(nxgep); 1283 if (status != NXGE_OK) { 1284 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1285 goto nxge_init_fail4; 1286 } 1287 1288 /* 1289 * Initialize TCAM and FCRAM (Neptune). 1290 */ 1291 status = nxge_classify_init(nxgep); 1292 if (status != NXGE_OK) { 1293 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1294 goto nxge_init_fail5; 1295 } 1296 1297 /* 1298 * Initialize ZCP 1299 */ 1300 status = nxge_zcp_init(nxgep); 1301 if (status != NXGE_OK) { 1302 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1303 goto nxge_init_fail5; 1304 } 1305 1306 /* 1307 * Initialize IPP. 1308 */ 1309 status = nxge_ipp_init(nxgep); 1310 if (status != NXGE_OK) { 1311 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1312 goto nxge_init_fail5; 1313 } 1314 1315 /* 1316 * Initialize the MAC block. 1317 */ 1318 status = nxge_mac_init(nxgep); 1319 if (status != NXGE_OK) { 1320 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1321 goto nxge_init_fail5; 1322 } 1323 1324 nxge_intrs_enable(nxgep); 1325 1326 /* 1327 * Enable hardware interrupts. 1328 */ 1329 nxge_intr_hw_enable(nxgep); 1330 nxgep->drv_state |= STATE_HW_INITIALIZED; 1331 1332 goto nxge_init_exit; 1333 1334 nxge_init_fail5: 1335 nxge_uninit_rxdma_channels(nxgep); 1336 nxge_init_fail4: 1337 nxge_uninit_txdma_channels(nxgep); 1338 nxge_init_fail3: 1339 (void) nxge_txc_uninit(nxgep); 1340 nxge_init_fail2: 1341 nxge_free_mem_pool(nxgep); 1342 nxge_init_fail1: 1343 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1344 "<== nxge_init status (failed) = 0x%08x", status)); 1345 return (status); 1346 1347 nxge_init_exit: 1348 1349 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1350 status)); 1351 return (status); 1352 } 1353 1354 1355 timeout_id_t 1356 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1357 { 1358 if ((nxgep->suspended == 0) || 1359 (nxgep->suspended == DDI_RESUME)) { 1360 return (timeout(func, (caddr_t)nxgep, 1361 drv_usectohz(1000 * msec))); 1362 } 1363 return (NULL); 1364 } 1365 1366 /*ARGSUSED*/ 1367 void 1368 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1369 { 1370 if (timerid) { 1371 (void) untimeout(timerid); 1372 } 1373 } 1374 1375 void 1376 nxge_uninit(p_nxge_t nxgep) 1377 { 1378 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1379 1380 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1381 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1382 "==> nxge_uninit: not initialized")); 1383 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1384 "<== nxge_uninit")); 1385 return; 1386 } 1387 1388 /* stop timer */ 1389 if (nxgep->nxge_timerid) { 1390 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1391 nxgep->nxge_timerid = 0; 1392 } 1393 1394 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1395 (void) nxge_intr_hw_disable(nxgep); 1396 1397 /* 1398 * Reset the receive MAC side. 1399 */ 1400 (void) nxge_rx_mac_disable(nxgep); 1401 1402 /* Disable and soft reset the IPP */ 1403 (void) nxge_ipp_disable(nxgep); 1404 1405 /* Free classification resources */ 1406 (void) nxge_classify_uninit(nxgep); 1407 1408 /* 1409 * Reset the transmit/receive DMA side. 1410 */ 1411 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1412 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1413 1414 nxge_uninit_txdma_channels(nxgep); 1415 nxge_uninit_rxdma_channels(nxgep); 1416 1417 /* 1418 * Reset the transmit MAC side. 1419 */ 1420 (void) nxge_tx_mac_disable(nxgep); 1421 1422 nxge_free_mem_pool(nxgep); 1423 1424 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1425 1426 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1427 1428 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1429 "nxge_mblks_pending %d", nxge_mblks_pending)); 1430 } 1431 1432 void 1433 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1434 { 1435 uint64_t reg; 1436 uint64_t regdata; 1437 int i, retry; 1438 1439 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1440 regdata = 0; 1441 retry = 1; 1442 1443 for (i = 0; i < retry; i++) { 1444 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1445 } 1446 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1447 } 1448 1449 void 1450 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1451 { 1452 uint64_t reg; 1453 uint64_t buf[2]; 1454 1455 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1456 reg = buf[0]; 1457 1458 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1459 } 1460 1461 1462 nxge_os_mutex_t nxgedebuglock; 1463 int nxge_debug_init = 0; 1464 1465 /*ARGSUSED*/ 1466 /*VARARGS*/ 1467 void 1468 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1469 { 1470 char msg_buffer[1048]; 1471 char prefix_buffer[32]; 1472 int instance; 1473 uint64_t debug_level; 1474 int cmn_level = CE_CONT; 1475 va_list ap; 1476 1477 debug_level = (nxgep == NULL) ? nxge_debug_level : 1478 nxgep->nxge_debug_level; 1479 1480 if ((level & debug_level) || 1481 (level == NXGE_NOTE) || 1482 (level == NXGE_ERR_CTL)) { 1483 /* do the msg processing */ 1484 if (nxge_debug_init == 0) { 1485 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1486 nxge_debug_init = 1; 1487 } 1488 1489 MUTEX_ENTER(&nxgedebuglock); 1490 1491 if ((level & NXGE_NOTE)) { 1492 cmn_level = CE_NOTE; 1493 } 1494 1495 if (level & NXGE_ERR_CTL) { 1496 cmn_level = CE_WARN; 1497 } 1498 1499 va_start(ap, fmt); 1500 (void) vsprintf(msg_buffer, fmt, ap); 1501 va_end(ap); 1502 if (nxgep == NULL) { 1503 instance = -1; 1504 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1505 } else { 1506 instance = nxgep->instance; 1507 (void) sprintf(prefix_buffer, 1508 "%s%d :", "nxge", instance); 1509 } 1510 1511 MUTEX_EXIT(&nxgedebuglock); 1512 cmn_err(cmn_level, "!%s %s\n", 1513 prefix_buffer, msg_buffer); 1514 1515 } 1516 } 1517 1518 char * 1519 nxge_dump_packet(char *addr, int size) 1520 { 1521 uchar_t *ap = (uchar_t *)addr; 1522 int i; 1523 static char etherbuf[1024]; 1524 char *cp = etherbuf; 1525 char digits[] = "0123456789abcdef"; 1526 1527 if (!size) 1528 size = 60; 1529 1530 if (size > MAX_DUMP_SZ) { 1531 /* Dump the leading bytes */ 1532 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1533 if (*ap > 0x0f) 1534 *cp++ = digits[*ap >> 4]; 1535 *cp++ = digits[*ap++ & 0xf]; 1536 *cp++ = ':'; 1537 } 1538 for (i = 0; i < 20; i++) 1539 *cp++ = '.'; 1540 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1541 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1542 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1543 if (*ap > 0x0f) 1544 *cp++ = digits[*ap >> 4]; 1545 *cp++ = digits[*ap++ & 0xf]; 1546 *cp++ = ':'; 1547 } 1548 } else { 1549 for (i = 0; i < size; i++) { 1550 if (*ap > 0x0f) 1551 *cp++ = digits[*ap >> 4]; 1552 *cp++ = digits[*ap++ & 0xf]; 1553 *cp++ = ':'; 1554 } 1555 } 1556 *--cp = 0; 1557 return (etherbuf); 1558 } 1559 1560 #ifdef NXGE_DEBUG 1561 static void 1562 nxge_test_map_regs(p_nxge_t nxgep) 1563 { 1564 ddi_acc_handle_t cfg_handle; 1565 p_pci_cfg_t cfg_ptr; 1566 ddi_acc_handle_t dev_handle; 1567 char *dev_ptr; 1568 ddi_acc_handle_t pci_config_handle; 1569 uint32_t regval; 1570 int i; 1571 1572 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1573 1574 dev_handle = nxgep->dev_regs->nxge_regh; 1575 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1576 1577 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1578 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1579 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1580 1581 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1582 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1583 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1584 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1585 &cfg_ptr->vendorid)); 1586 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1587 "\tvendorid 0x%x devid 0x%x", 1588 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1589 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1590 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1591 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1592 "bar1c 0x%x", 1593 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1594 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1595 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1596 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1597 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1598 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1599 "base 28 0x%x bar2c 0x%x\n", 1600 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1601 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1602 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1603 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1604 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1605 "\nNeptune PCI BAR: base30 0x%x\n", 1606 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1607 1608 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1609 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1610 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1611 "first 0x%llx second 0x%llx third 0x%llx " 1612 "last 0x%llx ", 1613 NXGE_PIO_READ64(dev_handle, 1614 (uint64_t *)(dev_ptr + 0), 0), 1615 NXGE_PIO_READ64(dev_handle, 1616 (uint64_t *)(dev_ptr + 8), 0), 1617 NXGE_PIO_READ64(dev_handle, 1618 (uint64_t *)(dev_ptr + 16), 0), 1619 NXGE_PIO_READ64(cfg_handle, 1620 (uint64_t *)(dev_ptr + 24), 0))); 1621 } 1622 } 1623 1624 #endif 1625 1626 static void 1627 nxge_suspend(p_nxge_t nxgep) 1628 { 1629 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1630 1631 nxge_intrs_disable(nxgep); 1632 nxge_destroy_dev(nxgep); 1633 1634 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1635 } 1636 1637 static nxge_status_t 1638 nxge_resume(p_nxge_t nxgep) 1639 { 1640 nxge_status_t status = NXGE_OK; 1641 1642 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1643 1644 nxgep->suspended = DDI_RESUME; 1645 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1646 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1647 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1648 (void) nxge_rx_mac_enable(nxgep); 1649 (void) nxge_tx_mac_enable(nxgep); 1650 nxge_intrs_enable(nxgep); 1651 nxgep->suspended = 0; 1652 1653 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1654 "<== nxge_resume status = 0x%x", status)); 1655 return (status); 1656 } 1657 1658 static nxge_status_t 1659 nxge_setup_dev(p_nxge_t nxgep) 1660 { 1661 nxge_status_t status = NXGE_OK; 1662 1663 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1664 nxgep->mac.portnum)); 1665 1666 status = nxge_link_init(nxgep); 1667 1668 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1669 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1670 "port%d Bad register acc handle", nxgep->mac.portnum)); 1671 status = NXGE_ERROR; 1672 } 1673 1674 if (status != NXGE_OK) { 1675 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1676 " nxge_setup_dev status " 1677 "(xcvr init 0x%08x)", status)); 1678 goto nxge_setup_dev_exit; 1679 } 1680 1681 nxge_setup_dev_exit: 1682 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1683 "<== nxge_setup_dev port %d status = 0x%08x", 1684 nxgep->mac.portnum, status)); 1685 1686 return (status); 1687 } 1688 1689 static void 1690 nxge_destroy_dev(p_nxge_t nxgep) 1691 { 1692 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 1693 1694 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1695 1696 (void) nxge_hw_stop(nxgep); 1697 1698 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 1699 } 1700 1701 static nxge_status_t 1702 nxge_setup_system_dma_pages(p_nxge_t nxgep) 1703 { 1704 int ddi_status = DDI_SUCCESS; 1705 uint_t count; 1706 ddi_dma_cookie_t cookie; 1707 uint_t iommu_pagesize; 1708 nxge_status_t status = NXGE_OK; 1709 1710 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 1711 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 1712 if (nxgep->niu_type != N2_NIU) { 1713 iommu_pagesize = dvma_pagesize(nxgep->dip); 1714 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1715 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1716 " default_block_size %d iommu_pagesize %d", 1717 nxgep->sys_page_sz, 1718 ddi_ptob(nxgep->dip, (ulong_t)1), 1719 nxgep->rx_default_block_size, 1720 iommu_pagesize)); 1721 1722 if (iommu_pagesize != 0) { 1723 if (nxgep->sys_page_sz == iommu_pagesize) { 1724 if (iommu_pagesize > 0x4000) 1725 nxgep->sys_page_sz = 0x4000; 1726 } else { 1727 if (nxgep->sys_page_sz > iommu_pagesize) 1728 nxgep->sys_page_sz = iommu_pagesize; 1729 } 1730 } 1731 } 1732 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1733 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1734 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1735 "default_block_size %d page mask %d", 1736 nxgep->sys_page_sz, 1737 ddi_ptob(nxgep->dip, (ulong_t)1), 1738 nxgep->rx_default_block_size, 1739 nxgep->sys_page_mask)); 1740 1741 1742 switch (nxgep->sys_page_sz) { 1743 default: 1744 nxgep->sys_page_sz = 0x1000; 1745 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1746 nxgep->rx_default_block_size = 0x1000; 1747 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1748 break; 1749 case 0x1000: 1750 nxgep->rx_default_block_size = 0x1000; 1751 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1752 break; 1753 case 0x2000: 1754 nxgep->rx_default_block_size = 0x2000; 1755 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1756 break; 1757 case 0x4000: 1758 nxgep->rx_default_block_size = 0x4000; 1759 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 1760 break; 1761 case 0x8000: 1762 nxgep->rx_default_block_size = 0x8000; 1763 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 1764 break; 1765 } 1766 1767 #ifndef USE_RX_BIG_BUF 1768 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 1769 #else 1770 nxgep->rx_default_block_size = 0x2000; 1771 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1772 #endif 1773 /* 1774 * Get the system DMA burst size. 1775 */ 1776 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 1777 DDI_DMA_DONTWAIT, 0, 1778 &nxgep->dmasparehandle); 1779 if (ddi_status != DDI_SUCCESS) { 1780 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1781 "ddi_dma_alloc_handle: failed " 1782 " status 0x%x", ddi_status)); 1783 goto nxge_get_soft_properties_exit; 1784 } 1785 1786 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 1787 (caddr_t)nxgep->dmasparehandle, 1788 sizeof (nxgep->dmasparehandle), 1789 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1790 DDI_DMA_DONTWAIT, 0, 1791 &cookie, &count); 1792 if (ddi_status != DDI_DMA_MAPPED) { 1793 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1794 "Binding spare handle to find system" 1795 " burstsize failed.")); 1796 ddi_status = DDI_FAILURE; 1797 goto nxge_get_soft_properties_fail1; 1798 } 1799 1800 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 1801 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 1802 1803 nxge_get_soft_properties_fail1: 1804 ddi_dma_free_handle(&nxgep->dmasparehandle); 1805 1806 nxge_get_soft_properties_exit: 1807 1808 if (ddi_status != DDI_SUCCESS) 1809 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1810 1811 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1812 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 1813 return (status); 1814 } 1815 1816 static nxge_status_t 1817 nxge_alloc_mem_pool(p_nxge_t nxgep) 1818 { 1819 nxge_status_t status = NXGE_OK; 1820 1821 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 1822 1823 status = nxge_alloc_rx_mem_pool(nxgep); 1824 if (status != NXGE_OK) { 1825 return (NXGE_ERROR); 1826 } 1827 1828 status = nxge_alloc_tx_mem_pool(nxgep); 1829 if (status != NXGE_OK) { 1830 nxge_free_rx_mem_pool(nxgep); 1831 return (NXGE_ERROR); 1832 } 1833 1834 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 1835 return (NXGE_OK); 1836 } 1837 1838 static void 1839 nxge_free_mem_pool(p_nxge_t nxgep) 1840 { 1841 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 1842 1843 nxge_free_rx_mem_pool(nxgep); 1844 nxge_free_tx_mem_pool(nxgep); 1845 1846 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 1847 } 1848 1849 static nxge_status_t 1850 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 1851 { 1852 int i, j; 1853 uint32_t ndmas, st_rdc; 1854 p_nxge_dma_pt_cfg_t p_all_cfgp; 1855 p_nxge_hw_pt_cfg_t p_cfgp; 1856 p_nxge_dma_pool_t dma_poolp; 1857 p_nxge_dma_common_t *dma_buf_p; 1858 p_nxge_dma_pool_t dma_cntl_poolp; 1859 p_nxge_dma_common_t *dma_cntl_p; 1860 size_t rx_buf_alloc_size; 1861 size_t rx_cntl_alloc_size; 1862 uint32_t *num_chunks; /* per dma */ 1863 nxge_status_t status = NXGE_OK; 1864 1865 uint32_t nxge_port_rbr_size; 1866 uint32_t nxge_port_rbr_spare_size; 1867 uint32_t nxge_port_rcr_size; 1868 1869 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 1870 1871 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 1872 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1873 st_rdc = p_cfgp->start_rdc; 1874 ndmas = p_cfgp->max_rdcs; 1875 1876 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1877 " nxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas)); 1878 1879 /* 1880 * Allocate memory for each receive DMA channel. 1881 */ 1882 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 1883 KM_SLEEP); 1884 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1885 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1886 1887 dma_cntl_poolp = (p_nxge_dma_pool_t) 1888 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 1889 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1890 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1891 1892 num_chunks = (uint32_t *)KMEM_ZALLOC( 1893 sizeof (uint32_t) * ndmas, KM_SLEEP); 1894 1895 /* 1896 * Assume that each DMA channel will be configured with default 1897 * block size. 1898 * rbr block counts are mod of batch count (16). 1899 */ 1900 nxge_port_rbr_size = p_all_cfgp->rbr_size; 1901 nxge_port_rcr_size = p_all_cfgp->rcr_size; 1902 1903 if (!nxge_port_rbr_size) { 1904 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 1905 } 1906 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 1907 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 1908 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 1909 } 1910 1911 p_all_cfgp->rbr_size = nxge_port_rbr_size; 1912 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 1913 1914 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 1915 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 1916 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 1917 } 1918 1919 /* 1920 * N2/NIU has limitation on the descriptor sizes (contiguous 1921 * memory allocation on data buffers to 4M (contig_mem_alloc) 1922 * and little endian for control buffers (must use the ddi/dki mem alloc 1923 * function). 1924 */ 1925 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1926 if (nxgep->niu_type == N2_NIU) { 1927 nxge_port_rbr_spare_size = 0; 1928 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 1929 (!ISP2(nxge_port_rbr_size))) { 1930 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 1931 } 1932 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 1933 (!ISP2(nxge_port_rcr_size))) { 1934 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 1935 } 1936 } 1937 #endif 1938 1939 rx_buf_alloc_size = (nxgep->rx_default_block_size * 1940 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 1941 1942 /* 1943 * Addresses of receive block ring, receive completion ring and the 1944 * mailbox must be all cache-aligned (64 bytes). 1945 */ 1946 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 1947 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 1948 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 1949 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 1950 1951 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 1952 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 1953 "nxge_port_rcr_size = %d " 1954 "rx_cntl_alloc_size = %d", 1955 nxge_port_rbr_size, nxge_port_rbr_spare_size, 1956 nxge_port_rcr_size, 1957 rx_cntl_alloc_size)); 1958 1959 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1960 if (nxgep->niu_type == N2_NIU) { 1961 if (!ISP2(rx_buf_alloc_size)) { 1962 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1963 "==> nxge_alloc_rx_mem_pool: " 1964 " must be power of 2")); 1965 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1966 goto nxge_alloc_rx_mem_pool_exit; 1967 } 1968 1969 if (rx_buf_alloc_size > (1 << 22)) { 1970 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1971 "==> nxge_alloc_rx_mem_pool: " 1972 " limit size to 4M")); 1973 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1974 goto nxge_alloc_rx_mem_pool_exit; 1975 } 1976 1977 if (rx_cntl_alloc_size < 0x2000) { 1978 rx_cntl_alloc_size = 0x2000; 1979 } 1980 } 1981 #endif 1982 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 1983 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 1984 1985 /* 1986 * Allocate memory for receive buffers and descriptor rings. 1987 * Replace allocation functions with interface functions provided 1988 * by the partition manager when it is available. 1989 */ 1990 /* 1991 * Allocate memory for the receive buffer blocks. 1992 */ 1993 for (i = 0; i < ndmas; i++) { 1994 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1995 " nxge_alloc_rx_mem_pool to alloc mem: " 1996 " dma %d dma_buf_p %llx &dma_buf_p %llx", 1997 i, dma_buf_p[i], &dma_buf_p[i])); 1998 num_chunks[i] = 0; 1999 status = nxge_alloc_rx_buf_dma(nxgep, st_rdc, &dma_buf_p[i], 2000 rx_buf_alloc_size, 2001 nxgep->rx_default_block_size, &num_chunks[i]); 2002 if (status != NXGE_OK) { 2003 break; 2004 } 2005 st_rdc++; 2006 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2007 " nxge_alloc_rx_mem_pool DONE alloc mem: " 2008 "dma %d dma_buf_p %llx &dma_buf_p %llx", i, 2009 dma_buf_p[i], &dma_buf_p[i])); 2010 } 2011 if (i < ndmas) { 2012 goto nxge_alloc_rx_mem_fail1; 2013 } 2014 /* 2015 * Allocate memory for descriptor rings and mailbox. 2016 */ 2017 st_rdc = p_cfgp->start_rdc; 2018 for (j = 0; j < ndmas; j++) { 2019 status = nxge_alloc_rx_cntl_dma(nxgep, st_rdc, &dma_cntl_p[j], 2020 rx_cntl_alloc_size); 2021 if (status != NXGE_OK) { 2022 break; 2023 } 2024 st_rdc++; 2025 } 2026 if (j < ndmas) { 2027 goto nxge_alloc_rx_mem_fail2; 2028 } 2029 2030 dma_poolp->ndmas = ndmas; 2031 dma_poolp->num_chunks = num_chunks; 2032 dma_poolp->buf_allocated = B_TRUE; 2033 nxgep->rx_buf_pool_p = dma_poolp; 2034 dma_poolp->dma_buf_pool_p = dma_buf_p; 2035 2036 dma_cntl_poolp->ndmas = ndmas; 2037 dma_cntl_poolp->buf_allocated = B_TRUE; 2038 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2039 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2040 2041 goto nxge_alloc_rx_mem_pool_exit; 2042 2043 nxge_alloc_rx_mem_fail2: 2044 /* Free control buffers */ 2045 j--; 2046 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2047 "==> nxge_alloc_rx_mem_pool: freeing control bufs (%d)", j)); 2048 for (; j >= 0; j--) { 2049 nxge_free_rx_cntl_dma(nxgep, 2050 (p_nxge_dma_common_t)dma_cntl_p[j]); 2051 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2052 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", 2053 j)); 2054 } 2055 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2056 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 2057 2058 nxge_alloc_rx_mem_fail1: 2059 /* Free data buffers */ 2060 i--; 2061 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2062 "==> nxge_alloc_rx_mem_pool: freeing data bufs (%d)", i)); 2063 for (; i >= 0; i--) { 2064 nxge_free_rx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2065 num_chunks[i]); 2066 } 2067 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2068 "==> nxge_alloc_rx_mem_pool: data bufs freed (%d)", i)); 2069 2070 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2071 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2072 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2073 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2074 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2075 2076 nxge_alloc_rx_mem_pool_exit: 2077 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2078 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2079 2080 return (status); 2081 } 2082 2083 static void 2084 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2085 { 2086 uint32_t i, ndmas; 2087 p_nxge_dma_pool_t dma_poolp; 2088 p_nxge_dma_common_t *dma_buf_p; 2089 p_nxge_dma_pool_t dma_cntl_poolp; 2090 p_nxge_dma_common_t *dma_cntl_p; 2091 uint32_t *num_chunks; 2092 2093 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2094 2095 dma_poolp = nxgep->rx_buf_pool_p; 2096 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2097 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2098 "<== nxge_free_rx_mem_pool " 2099 "(null rx buf pool or buf not allocated")); 2100 return; 2101 } 2102 2103 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2104 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2105 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2106 "<== nxge_free_rx_mem_pool " 2107 "(null rx cntl buf pool or cntl buf not allocated")); 2108 return; 2109 } 2110 2111 dma_buf_p = dma_poolp->dma_buf_pool_p; 2112 num_chunks = dma_poolp->num_chunks; 2113 2114 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2115 ndmas = dma_cntl_poolp->ndmas; 2116 2117 for (i = 0; i < ndmas; i++) { 2118 nxge_free_rx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2119 } 2120 2121 for (i = 0; i < ndmas; i++) { 2122 nxge_free_rx_cntl_dma(nxgep, dma_cntl_p[i]); 2123 } 2124 2125 for (i = 0; i < ndmas; i++) { 2126 KMEM_FREE(dma_buf_p[i], 2127 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2128 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2129 } 2130 2131 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2132 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2133 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2134 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2135 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2136 2137 nxgep->rx_buf_pool_p = NULL; 2138 nxgep->rx_cntl_pool_p = NULL; 2139 2140 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2141 } 2142 2143 2144 static nxge_status_t 2145 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2146 p_nxge_dma_common_t *dmap, 2147 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2148 { 2149 p_nxge_dma_common_t rx_dmap; 2150 nxge_status_t status = NXGE_OK; 2151 size_t total_alloc_size; 2152 size_t allocated = 0; 2153 int i, size_index, array_size; 2154 2155 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2156 2157 rx_dmap = (p_nxge_dma_common_t) 2158 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2159 KM_SLEEP); 2160 2161 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2162 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2163 dma_channel, alloc_size, block_size, dmap)); 2164 2165 total_alloc_size = alloc_size; 2166 2167 #if defined(RX_USE_RECLAIM_POST) 2168 total_alloc_size = alloc_size + alloc_size/4; 2169 #endif 2170 2171 i = 0; 2172 size_index = 0; 2173 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2174 while ((alloc_sizes[size_index] < alloc_size) && 2175 (size_index < array_size)) 2176 size_index++; 2177 if (size_index >= array_size) { 2178 size_index = array_size - 1; 2179 } 2180 2181 while ((allocated < total_alloc_size) && 2182 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2183 rx_dmap[i].dma_chunk_index = i; 2184 rx_dmap[i].block_size = block_size; 2185 rx_dmap[i].alength = alloc_sizes[size_index]; 2186 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2187 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2188 rx_dmap[i].dma_channel = dma_channel; 2189 rx_dmap[i].contig_alloc_type = B_FALSE; 2190 2191 /* 2192 * N2/NIU: data buffers must be contiguous as the driver 2193 * needs to call Hypervisor api to set up 2194 * logical pages. 2195 */ 2196 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2197 rx_dmap[i].contig_alloc_type = B_TRUE; 2198 } 2199 2200 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2201 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2202 "i %d nblocks %d alength %d", 2203 dma_channel, i, &rx_dmap[i], block_size, 2204 i, rx_dmap[i].nblocks, 2205 rx_dmap[i].alength)); 2206 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2207 &nxge_rx_dma_attr, 2208 rx_dmap[i].alength, 2209 &nxge_dev_buf_dma_acc_attr, 2210 DDI_DMA_READ | DDI_DMA_STREAMING, 2211 (p_nxge_dma_common_t)(&rx_dmap[i])); 2212 if (status != NXGE_OK) { 2213 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2214 " nxge_alloc_rx_buf_dma: Alloc Failed ")); 2215 size_index--; 2216 } else { 2217 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2218 " alloc_rx_buf_dma allocated rdc %d " 2219 "chunk %d size %x dvma %x bufp %llx ", 2220 dma_channel, i, rx_dmap[i].alength, 2221 rx_dmap[i].ioaddr_pp, &rx_dmap[i])); 2222 i++; 2223 allocated += alloc_sizes[size_index]; 2224 } 2225 } 2226 2227 2228 if (allocated < total_alloc_size) { 2229 goto nxge_alloc_rx_mem_fail1; 2230 } 2231 2232 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2233 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2234 dma_channel, i)); 2235 *num_chunks = i; 2236 *dmap = rx_dmap; 2237 2238 goto nxge_alloc_rx_mem_exit; 2239 2240 nxge_alloc_rx_mem_fail1: 2241 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2242 2243 nxge_alloc_rx_mem_exit: 2244 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2245 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2246 2247 return (status); 2248 } 2249 2250 /*ARGSUSED*/ 2251 static void 2252 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2253 uint32_t num_chunks) 2254 { 2255 int i; 2256 2257 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2258 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2259 2260 for (i = 0; i < num_chunks; i++) { 2261 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2262 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2263 i, dmap)); 2264 nxge_dma_mem_free(dmap++); 2265 } 2266 2267 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2268 } 2269 2270 /*ARGSUSED*/ 2271 static nxge_status_t 2272 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2273 p_nxge_dma_common_t *dmap, size_t size) 2274 { 2275 p_nxge_dma_common_t rx_dmap; 2276 nxge_status_t status = NXGE_OK; 2277 2278 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2279 2280 rx_dmap = (p_nxge_dma_common_t) 2281 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2282 2283 rx_dmap->contig_alloc_type = B_FALSE; 2284 2285 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2286 &nxge_desc_dma_attr, 2287 size, 2288 &nxge_dev_desc_dma_acc_attr, 2289 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2290 rx_dmap); 2291 if (status != NXGE_OK) { 2292 goto nxge_alloc_rx_cntl_dma_fail1; 2293 } 2294 2295 *dmap = rx_dmap; 2296 goto nxge_alloc_rx_cntl_dma_exit; 2297 2298 nxge_alloc_rx_cntl_dma_fail1: 2299 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2300 2301 nxge_alloc_rx_cntl_dma_exit: 2302 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2303 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2304 2305 return (status); 2306 } 2307 2308 /*ARGSUSED*/ 2309 static void 2310 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2311 { 2312 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2313 2314 nxge_dma_mem_free(dmap); 2315 2316 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2317 } 2318 2319 static nxge_status_t 2320 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2321 { 2322 nxge_status_t status = NXGE_OK; 2323 int i, j; 2324 uint32_t ndmas, st_tdc; 2325 p_nxge_dma_pt_cfg_t p_all_cfgp; 2326 p_nxge_hw_pt_cfg_t p_cfgp; 2327 p_nxge_dma_pool_t dma_poolp; 2328 p_nxge_dma_common_t *dma_buf_p; 2329 p_nxge_dma_pool_t dma_cntl_poolp; 2330 p_nxge_dma_common_t *dma_cntl_p; 2331 size_t tx_buf_alloc_size; 2332 size_t tx_cntl_alloc_size; 2333 uint32_t *num_chunks; /* per dma */ 2334 uint32_t bcopy_thresh; 2335 2336 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2337 2338 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2339 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2340 st_tdc = p_cfgp->start_tdc; 2341 ndmas = p_cfgp->max_tdcs; 2342 2343 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool: " 2344 "p_cfgp 0x%016llx start_tdc %d ndmas %d nxgep->max_tdcs %d", 2345 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, nxgep->max_tdcs)); 2346 /* 2347 * Allocate memory for each transmit DMA channel. 2348 */ 2349 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2350 KM_SLEEP); 2351 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2352 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2353 2354 dma_cntl_poolp = (p_nxge_dma_pool_t) 2355 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2356 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2357 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2358 2359 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2360 /* 2361 * N2/NIU has limitation on the descriptor sizes (contiguous 2362 * memory allocation on data buffers to 4M (contig_mem_alloc) 2363 * and little endian for control buffers (must use the ddi/dki mem alloc 2364 * function). The transmit ring is limited to 8K (includes the 2365 * mailbox). 2366 */ 2367 if (nxgep->niu_type == N2_NIU) { 2368 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2369 (!ISP2(nxge_tx_ring_size))) { 2370 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2371 } 2372 } 2373 #endif 2374 2375 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2376 2377 /* 2378 * Assume that each DMA channel will be configured with default 2379 * transmit bufer size for copying transmit data. 2380 * (For packet payload over this limit, packets will not be 2381 * copied.) 2382 */ 2383 if (nxgep->niu_type == N2_NIU) { 2384 bcopy_thresh = TX_BCOPY_SIZE; 2385 } else { 2386 bcopy_thresh = nxge_bcopy_thresh; 2387 } 2388 tx_buf_alloc_size = (bcopy_thresh * nxge_tx_ring_size); 2389 2390 /* 2391 * Addresses of transmit descriptor ring and the 2392 * mailbox must be all cache-aligned (64 bytes). 2393 */ 2394 tx_cntl_alloc_size = nxge_tx_ring_size; 2395 tx_cntl_alloc_size *= (sizeof (tx_desc_t)); 2396 tx_cntl_alloc_size += sizeof (txdma_mailbox_t); 2397 2398 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2399 if (nxgep->niu_type == N2_NIU) { 2400 if (!ISP2(tx_buf_alloc_size)) { 2401 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2402 "==> nxge_alloc_tx_mem_pool: " 2403 " must be power of 2")); 2404 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2405 goto nxge_alloc_tx_mem_pool_exit; 2406 } 2407 2408 if (tx_buf_alloc_size > (1 << 22)) { 2409 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2410 "==> nxge_alloc_tx_mem_pool: " 2411 " limit size to 4M")); 2412 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2413 goto nxge_alloc_tx_mem_pool_exit; 2414 } 2415 2416 if (tx_cntl_alloc_size < 0x2000) { 2417 tx_cntl_alloc_size = 0x2000; 2418 } 2419 } 2420 #endif 2421 2422 num_chunks = (uint32_t *)KMEM_ZALLOC( 2423 sizeof (uint32_t) * ndmas, KM_SLEEP); 2424 2425 /* 2426 * Allocate memory for transmit buffers and descriptor rings. 2427 * Replace allocation functions with interface functions provided 2428 * by the partition manager when it is available. 2429 * 2430 * Allocate memory for the transmit buffer pool. 2431 */ 2432 for (i = 0; i < ndmas; i++) { 2433 num_chunks[i] = 0; 2434 status = nxge_alloc_tx_buf_dma(nxgep, st_tdc, &dma_buf_p[i], 2435 tx_buf_alloc_size, 2436 bcopy_thresh, &num_chunks[i]); 2437 if (status != NXGE_OK) { 2438 break; 2439 } 2440 st_tdc++; 2441 } 2442 if (i < ndmas) { 2443 goto nxge_alloc_tx_mem_pool_fail1; 2444 } 2445 2446 st_tdc = p_cfgp->start_tdc; 2447 /* 2448 * Allocate memory for descriptor rings and mailbox. 2449 */ 2450 for (j = 0; j < ndmas; j++) { 2451 status = nxge_alloc_tx_cntl_dma(nxgep, st_tdc, &dma_cntl_p[j], 2452 tx_cntl_alloc_size); 2453 if (status != NXGE_OK) { 2454 break; 2455 } 2456 st_tdc++; 2457 } 2458 if (j < ndmas) { 2459 goto nxge_alloc_tx_mem_pool_fail2; 2460 } 2461 2462 dma_poolp->ndmas = ndmas; 2463 dma_poolp->num_chunks = num_chunks; 2464 dma_poolp->buf_allocated = B_TRUE; 2465 dma_poolp->dma_buf_pool_p = dma_buf_p; 2466 nxgep->tx_buf_pool_p = dma_poolp; 2467 2468 dma_cntl_poolp->ndmas = ndmas; 2469 dma_cntl_poolp->buf_allocated = B_TRUE; 2470 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2471 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2472 2473 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2474 "==> nxge_alloc_tx_mem_pool: start_tdc %d " 2475 "ndmas %d poolp->ndmas %d", 2476 st_tdc, ndmas, dma_poolp->ndmas)); 2477 2478 goto nxge_alloc_tx_mem_pool_exit; 2479 2480 nxge_alloc_tx_mem_pool_fail2: 2481 /* Free control buffers */ 2482 j--; 2483 for (; j >= 0; j--) { 2484 nxge_free_tx_cntl_dma(nxgep, 2485 (p_nxge_dma_common_t)dma_cntl_p[j]); 2486 } 2487 2488 nxge_alloc_tx_mem_pool_fail1: 2489 /* Free data buffers */ 2490 i--; 2491 for (; i >= 0; i--) { 2492 nxge_free_tx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2493 num_chunks[i]); 2494 } 2495 2496 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2497 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2498 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2499 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2500 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2501 2502 nxge_alloc_tx_mem_pool_exit: 2503 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2504 "<== nxge_alloc_tx_mem_pool:status 0x%08x", status)); 2505 2506 return (status); 2507 } 2508 2509 static nxge_status_t 2510 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2511 p_nxge_dma_common_t *dmap, size_t alloc_size, 2512 size_t block_size, uint32_t *num_chunks) 2513 { 2514 p_nxge_dma_common_t tx_dmap; 2515 nxge_status_t status = NXGE_OK; 2516 size_t total_alloc_size; 2517 size_t allocated = 0; 2518 int i, size_index, array_size; 2519 2520 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 2521 2522 tx_dmap = (p_nxge_dma_common_t) 2523 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2524 KM_SLEEP); 2525 2526 total_alloc_size = alloc_size; 2527 i = 0; 2528 size_index = 0; 2529 array_size = sizeof (alloc_sizes) / sizeof (size_t); 2530 while ((alloc_sizes[size_index] < alloc_size) && 2531 (size_index < array_size)) 2532 size_index++; 2533 if (size_index >= array_size) { 2534 size_index = array_size - 1; 2535 } 2536 2537 while ((allocated < total_alloc_size) && 2538 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2539 2540 tx_dmap[i].dma_chunk_index = i; 2541 tx_dmap[i].block_size = block_size; 2542 tx_dmap[i].alength = alloc_sizes[size_index]; 2543 tx_dmap[i].orig_alength = tx_dmap[i].alength; 2544 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2545 tx_dmap[i].dma_channel = dma_channel; 2546 tx_dmap[i].contig_alloc_type = B_FALSE; 2547 2548 /* 2549 * N2/NIU: data buffers must be contiguous as the driver 2550 * needs to call Hypervisor api to set up 2551 * logical pages. 2552 */ 2553 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2554 tx_dmap[i].contig_alloc_type = B_TRUE; 2555 } 2556 2557 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2558 &nxge_tx_dma_attr, 2559 tx_dmap[i].alength, 2560 &nxge_dev_buf_dma_acc_attr, 2561 DDI_DMA_WRITE | DDI_DMA_STREAMING, 2562 (p_nxge_dma_common_t)(&tx_dmap[i])); 2563 if (status != NXGE_OK) { 2564 size_index--; 2565 } else { 2566 i++; 2567 allocated += alloc_sizes[size_index]; 2568 } 2569 } 2570 2571 if (allocated < total_alloc_size) { 2572 goto nxge_alloc_tx_mem_fail1; 2573 } 2574 2575 *num_chunks = i; 2576 *dmap = tx_dmap; 2577 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2578 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 2579 *dmap, i)); 2580 goto nxge_alloc_tx_mem_exit; 2581 2582 nxge_alloc_tx_mem_fail1: 2583 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2584 2585 nxge_alloc_tx_mem_exit: 2586 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2587 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 2588 2589 return (status); 2590 } 2591 2592 /*ARGSUSED*/ 2593 static void 2594 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2595 uint32_t num_chunks) 2596 { 2597 int i; 2598 2599 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 2600 2601 for (i = 0; i < num_chunks; i++) { 2602 nxge_dma_mem_free(dmap++); 2603 } 2604 2605 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 2606 } 2607 2608 /*ARGSUSED*/ 2609 static nxge_status_t 2610 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2611 p_nxge_dma_common_t *dmap, size_t size) 2612 { 2613 p_nxge_dma_common_t tx_dmap; 2614 nxge_status_t status = NXGE_OK; 2615 2616 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 2617 tx_dmap = (p_nxge_dma_common_t) 2618 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2619 2620 tx_dmap->contig_alloc_type = B_FALSE; 2621 2622 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2623 &nxge_desc_dma_attr, 2624 size, 2625 &nxge_dev_desc_dma_acc_attr, 2626 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2627 tx_dmap); 2628 if (status != NXGE_OK) { 2629 goto nxge_alloc_tx_cntl_dma_fail1; 2630 } 2631 2632 *dmap = tx_dmap; 2633 goto nxge_alloc_tx_cntl_dma_exit; 2634 2635 nxge_alloc_tx_cntl_dma_fail1: 2636 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 2637 2638 nxge_alloc_tx_cntl_dma_exit: 2639 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2640 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 2641 2642 return (status); 2643 } 2644 2645 /*ARGSUSED*/ 2646 static void 2647 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2648 { 2649 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 2650 2651 nxge_dma_mem_free(dmap); 2652 2653 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 2654 } 2655 2656 static void 2657 nxge_free_tx_mem_pool(p_nxge_t nxgep) 2658 { 2659 uint32_t i, ndmas; 2660 p_nxge_dma_pool_t dma_poolp; 2661 p_nxge_dma_common_t *dma_buf_p; 2662 p_nxge_dma_pool_t dma_cntl_poolp; 2663 p_nxge_dma_common_t *dma_cntl_p; 2664 uint32_t *num_chunks; 2665 2666 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_free_tx_mem_pool")); 2667 2668 dma_poolp = nxgep->tx_buf_pool_p; 2669 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2670 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2671 "<== nxge_free_tx_mem_pool " 2672 "(null rx buf pool or buf not allocated")); 2673 return; 2674 } 2675 2676 dma_cntl_poolp = nxgep->tx_cntl_pool_p; 2677 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2678 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2679 "<== nxge_free_tx_mem_pool " 2680 "(null tx cntl buf pool or cntl buf not allocated")); 2681 return; 2682 } 2683 2684 dma_buf_p = dma_poolp->dma_buf_pool_p; 2685 num_chunks = dma_poolp->num_chunks; 2686 2687 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2688 ndmas = dma_cntl_poolp->ndmas; 2689 2690 for (i = 0; i < ndmas; i++) { 2691 nxge_free_tx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2692 } 2693 2694 for (i = 0; i < ndmas; i++) { 2695 nxge_free_tx_cntl_dma(nxgep, dma_cntl_p[i]); 2696 } 2697 2698 for (i = 0; i < ndmas; i++) { 2699 KMEM_FREE(dma_buf_p[i], 2700 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2701 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2702 } 2703 2704 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2705 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2706 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2707 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2708 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2709 2710 nxgep->tx_buf_pool_p = NULL; 2711 nxgep->tx_cntl_pool_p = NULL; 2712 2713 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_free_tx_mem_pool")); 2714 } 2715 2716 /*ARGSUSED*/ 2717 static nxge_status_t 2718 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 2719 struct ddi_dma_attr *dma_attrp, 2720 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 2721 p_nxge_dma_common_t dma_p) 2722 { 2723 caddr_t kaddrp; 2724 int ddi_status = DDI_SUCCESS; 2725 boolean_t contig_alloc_type; 2726 2727 contig_alloc_type = dma_p->contig_alloc_type; 2728 2729 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 2730 /* 2731 * contig_alloc_type for contiguous memory only allowed 2732 * for N2/NIU. 2733 */ 2734 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2735 "nxge_dma_mem_alloc: alloc type not allows (%d)", 2736 dma_p->contig_alloc_type)); 2737 return (NXGE_ERROR | NXGE_DDI_FAILED); 2738 } 2739 2740 dma_p->dma_handle = NULL; 2741 dma_p->acc_handle = NULL; 2742 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 2743 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 2744 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 2745 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 2746 if (ddi_status != DDI_SUCCESS) { 2747 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2748 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 2749 return (NXGE_ERROR | NXGE_DDI_FAILED); 2750 } 2751 2752 switch (contig_alloc_type) { 2753 case B_FALSE: 2754 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, 2755 acc_attr_p, 2756 xfer_flags, 2757 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 2758 &dma_p->acc_handle); 2759 if (ddi_status != DDI_SUCCESS) { 2760 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2761 "nxge_dma_mem_alloc:ddi_dma_mem_alloc failed")); 2762 ddi_dma_free_handle(&dma_p->dma_handle); 2763 dma_p->dma_handle = NULL; 2764 return (NXGE_ERROR | NXGE_DDI_FAILED); 2765 } 2766 if (dma_p->alength < length) { 2767 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2768 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 2769 "< length.")); 2770 ddi_dma_mem_free(&dma_p->acc_handle); 2771 ddi_dma_free_handle(&dma_p->dma_handle); 2772 dma_p->acc_handle = NULL; 2773 dma_p->dma_handle = NULL; 2774 return (NXGE_ERROR); 2775 } 2776 2777 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2778 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2779 &dma_p->dma_cookie, &dma_p->ncookies); 2780 if (ddi_status != DDI_DMA_MAPPED) { 2781 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2782 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2783 "(staus 0x%x ncookies %d.)", ddi_status, 2784 dma_p->ncookies)); 2785 if (dma_p->acc_handle) { 2786 ddi_dma_mem_free(&dma_p->acc_handle); 2787 dma_p->acc_handle = NULL; 2788 } 2789 ddi_dma_free_handle(&dma_p->dma_handle); 2790 dma_p->dma_handle = NULL; 2791 return (NXGE_ERROR | NXGE_DDI_FAILED); 2792 } 2793 2794 if (dma_p->ncookies != 1) { 2795 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2796 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 2797 "> 1 cookie" 2798 "(staus 0x%x ncookies %d.)", ddi_status, 2799 dma_p->ncookies)); 2800 if (dma_p->acc_handle) { 2801 ddi_dma_mem_free(&dma_p->acc_handle); 2802 dma_p->acc_handle = NULL; 2803 } 2804 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2805 ddi_dma_free_handle(&dma_p->dma_handle); 2806 dma_p->dma_handle = NULL; 2807 return (NXGE_ERROR); 2808 } 2809 break; 2810 2811 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2812 case B_TRUE: 2813 kaddrp = (caddr_t)contig_mem_alloc(length); 2814 if (kaddrp == NULL) { 2815 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2816 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 2817 ddi_dma_free_handle(&dma_p->dma_handle); 2818 return (NXGE_ERROR | NXGE_DDI_FAILED); 2819 } 2820 2821 dma_p->alength = length; 2822 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2823 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2824 &dma_p->dma_cookie, &dma_p->ncookies); 2825 if (ddi_status != DDI_DMA_MAPPED) { 2826 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2827 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2828 "(status 0x%x ncookies %d.)", ddi_status, 2829 dma_p->ncookies)); 2830 2831 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2832 "==> nxge_dma_mem_alloc: (not mapped)" 2833 "length %lu (0x%x) " 2834 "free contig kaddrp $%p " 2835 "va_to_pa $%p", 2836 length, length, 2837 kaddrp, 2838 va_to_pa(kaddrp))); 2839 2840 2841 contig_mem_free((void *)kaddrp, length); 2842 ddi_dma_free_handle(&dma_p->dma_handle); 2843 2844 dma_p->dma_handle = NULL; 2845 dma_p->acc_handle = NULL; 2846 dma_p->alength = NULL; 2847 dma_p->kaddrp = NULL; 2848 2849 return (NXGE_ERROR | NXGE_DDI_FAILED); 2850 } 2851 2852 if (dma_p->ncookies != 1 || 2853 (dma_p->dma_cookie.dmac_laddress == NULL)) { 2854 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2855 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 2856 "cookie or " 2857 "dmac_laddress is NULL $%p size %d " 2858 " (status 0x%x ncookies %d.)", 2859 ddi_status, 2860 dma_p->dma_cookie.dmac_laddress, 2861 dma_p->dma_cookie.dmac_size, 2862 dma_p->ncookies)); 2863 2864 contig_mem_free((void *)kaddrp, length); 2865 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2866 ddi_dma_free_handle(&dma_p->dma_handle); 2867 2868 dma_p->alength = 0; 2869 dma_p->dma_handle = NULL; 2870 dma_p->acc_handle = NULL; 2871 dma_p->kaddrp = NULL; 2872 2873 return (NXGE_ERROR | NXGE_DDI_FAILED); 2874 } 2875 break; 2876 2877 #else 2878 case B_TRUE: 2879 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2880 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 2881 return (NXGE_ERROR | NXGE_DDI_FAILED); 2882 #endif 2883 } 2884 2885 dma_p->kaddrp = kaddrp; 2886 dma_p->last_kaddrp = (unsigned char *)kaddrp + 2887 dma_p->alength - RXBUF_64B_ALIGNED; 2888 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2889 dma_p->last_ioaddr_pp = 2890 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 2891 dma_p->alength - RXBUF_64B_ALIGNED; 2892 2893 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 2894 2895 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2896 dma_p->orig_ioaddr_pp = 2897 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2898 dma_p->orig_alength = length; 2899 dma_p->orig_kaddrp = kaddrp; 2900 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 2901 #endif 2902 2903 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 2904 "dma buffer allocated: dma_p $%p " 2905 "return dmac_ladress from cookie $%p cookie dmac_size %d " 2906 "dma_p->ioaddr_p $%p " 2907 "dma_p->orig_ioaddr_p $%p " 2908 "orig_vatopa $%p " 2909 "alength %d (0x%x) " 2910 "kaddrp $%p " 2911 "length %d (0x%x)", 2912 dma_p, 2913 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 2914 dma_p->ioaddr_pp, 2915 dma_p->orig_ioaddr_pp, 2916 dma_p->orig_vatopa, 2917 dma_p->alength, dma_p->alength, 2918 kaddrp, 2919 length, length)); 2920 2921 return (NXGE_OK); 2922 } 2923 2924 static void 2925 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 2926 { 2927 if (dma_p->dma_handle != NULL) { 2928 if (dma_p->ncookies) { 2929 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2930 dma_p->ncookies = 0; 2931 } 2932 ddi_dma_free_handle(&dma_p->dma_handle); 2933 dma_p->dma_handle = NULL; 2934 } 2935 2936 if (dma_p->acc_handle != NULL) { 2937 ddi_dma_mem_free(&dma_p->acc_handle); 2938 dma_p->acc_handle = NULL; 2939 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 2940 } 2941 2942 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2943 if (dma_p->contig_alloc_type && 2944 dma_p->orig_kaddrp && dma_p->orig_alength) { 2945 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 2946 "kaddrp $%p (orig_kaddrp $%p)" 2947 "mem type %d ", 2948 "orig_alength %d " 2949 "alength 0x%x (%d)", 2950 dma_p->kaddrp, 2951 dma_p->orig_kaddrp, 2952 dma_p->contig_alloc_type, 2953 dma_p->orig_alength, 2954 dma_p->alength, dma_p->alength)); 2955 2956 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 2957 dma_p->orig_alength = NULL; 2958 dma_p->orig_kaddrp = NULL; 2959 dma_p->contig_alloc_type = B_FALSE; 2960 } 2961 #endif 2962 dma_p->kaddrp = NULL; 2963 dma_p->alength = NULL; 2964 } 2965 2966 /* 2967 * nxge_m_start() -- start transmitting and receiving. 2968 * 2969 * This function is called by the MAC layer when the first 2970 * stream is open to prepare the hardware ready for sending 2971 * and transmitting packets. 2972 */ 2973 static int 2974 nxge_m_start(void *arg) 2975 { 2976 p_nxge_t nxgep = (p_nxge_t)arg; 2977 2978 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 2979 2980 MUTEX_ENTER(nxgep->genlock); 2981 if (nxge_init(nxgep) != NXGE_OK) { 2982 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2983 "<== nxge_m_start: initialization failed")); 2984 MUTEX_EXIT(nxgep->genlock); 2985 return (EIO); 2986 } 2987 2988 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 2989 goto nxge_m_start_exit; 2990 /* 2991 * Start timer to check the system error and tx hangs 2992 */ 2993 nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state, 2994 NXGE_CHECK_TIMER); 2995 2996 nxgep->link_notify = B_TRUE; 2997 2998 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 2999 3000 nxge_m_start_exit: 3001 MUTEX_EXIT(nxgep->genlock); 3002 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3003 return (0); 3004 } 3005 3006 /* 3007 * nxge_m_stop(): stop transmitting and receiving. 3008 */ 3009 static void 3010 nxge_m_stop(void *arg) 3011 { 3012 p_nxge_t nxgep = (p_nxge_t)arg; 3013 3014 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3015 3016 if (nxgep->nxge_timerid) { 3017 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3018 nxgep->nxge_timerid = 0; 3019 } 3020 3021 MUTEX_ENTER(nxgep->genlock); 3022 nxge_uninit(nxgep); 3023 3024 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3025 3026 MUTEX_EXIT(nxgep->genlock); 3027 3028 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3029 } 3030 3031 static int 3032 nxge_m_unicst(void *arg, const uint8_t *macaddr) 3033 { 3034 p_nxge_t nxgep = (p_nxge_t)arg; 3035 struct ether_addr addrp; 3036 3037 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 3038 3039 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 3040 if (nxge_set_mac_addr(nxgep, &addrp)) { 3041 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3042 "<== nxge_m_unicst: set unitcast failed")); 3043 return (EINVAL); 3044 } 3045 3046 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 3047 3048 return (0); 3049 } 3050 3051 static int 3052 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3053 { 3054 p_nxge_t nxgep = (p_nxge_t)arg; 3055 struct ether_addr addrp; 3056 3057 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3058 "==> nxge_m_multicst: add %d", add)); 3059 3060 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3061 if (add) { 3062 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3063 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3064 "<== nxge_m_multicst: add multicast failed")); 3065 return (EINVAL); 3066 } 3067 } else { 3068 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3069 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3070 "<== nxge_m_multicst: del multicast failed")); 3071 return (EINVAL); 3072 } 3073 } 3074 3075 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3076 3077 return (0); 3078 } 3079 3080 static int 3081 nxge_m_promisc(void *arg, boolean_t on) 3082 { 3083 p_nxge_t nxgep = (p_nxge_t)arg; 3084 3085 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3086 "==> nxge_m_promisc: on %d", on)); 3087 3088 if (nxge_set_promisc(nxgep, on)) { 3089 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3090 "<== nxge_m_promisc: set promisc failed")); 3091 return (EINVAL); 3092 } 3093 3094 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3095 "<== nxge_m_promisc: on %d", on)); 3096 3097 return (0); 3098 } 3099 3100 static void 3101 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3102 { 3103 p_nxge_t nxgep = (p_nxge_t)arg; 3104 struct iocblk *iocp; 3105 boolean_t need_privilege; 3106 int err; 3107 int cmd; 3108 3109 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3110 3111 iocp = (struct iocblk *)mp->b_rptr; 3112 iocp->ioc_error = 0; 3113 need_privilege = B_TRUE; 3114 cmd = iocp->ioc_cmd; 3115 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3116 switch (cmd) { 3117 default: 3118 miocnak(wq, mp, 0, EINVAL); 3119 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3120 return; 3121 3122 case LB_GET_INFO_SIZE: 3123 case LB_GET_INFO: 3124 case LB_GET_MODE: 3125 need_privilege = B_FALSE; 3126 break; 3127 case LB_SET_MODE: 3128 break; 3129 3130 case ND_GET: 3131 need_privilege = B_FALSE; 3132 break; 3133 case ND_SET: 3134 break; 3135 3136 case NXGE_GET_MII: 3137 case NXGE_PUT_MII: 3138 case NXGE_GET64: 3139 case NXGE_PUT64: 3140 case NXGE_GET_TX_RING_SZ: 3141 case NXGE_GET_TX_DESC: 3142 case NXGE_TX_SIDE_RESET: 3143 case NXGE_RX_SIDE_RESET: 3144 case NXGE_GLOBAL_RESET: 3145 case NXGE_RESET_MAC: 3146 case NXGE_TX_REGS_DUMP: 3147 case NXGE_RX_REGS_DUMP: 3148 case NXGE_INT_REGS_DUMP: 3149 case NXGE_VIR_INT_REGS_DUMP: 3150 case NXGE_PUT_TCAM: 3151 case NXGE_GET_TCAM: 3152 case NXGE_RTRACE: 3153 case NXGE_RDUMP: 3154 3155 need_privilege = B_FALSE; 3156 break; 3157 case NXGE_INJECT_ERR: 3158 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3159 nxge_err_inject(nxgep, wq, mp); 3160 break; 3161 } 3162 3163 if (need_privilege) { 3164 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3165 if (err != 0) { 3166 miocnak(wq, mp, 0, err); 3167 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3168 "<== nxge_m_ioctl: no priv")); 3169 return; 3170 } 3171 } 3172 3173 switch (cmd) { 3174 case ND_GET: 3175 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command")); 3176 case ND_SET: 3177 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command")); 3178 nxge_param_ioctl(nxgep, wq, mp, iocp); 3179 break; 3180 3181 case LB_GET_MODE: 3182 case LB_SET_MODE: 3183 case LB_GET_INFO_SIZE: 3184 case LB_GET_INFO: 3185 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3186 break; 3187 3188 case NXGE_GET_MII: 3189 case NXGE_PUT_MII: 3190 case NXGE_PUT_TCAM: 3191 case NXGE_GET_TCAM: 3192 case NXGE_GET64: 3193 case NXGE_PUT64: 3194 case NXGE_GET_TX_RING_SZ: 3195 case NXGE_GET_TX_DESC: 3196 case NXGE_TX_SIDE_RESET: 3197 case NXGE_RX_SIDE_RESET: 3198 case NXGE_GLOBAL_RESET: 3199 case NXGE_RESET_MAC: 3200 case NXGE_TX_REGS_DUMP: 3201 case NXGE_RX_REGS_DUMP: 3202 case NXGE_INT_REGS_DUMP: 3203 case NXGE_VIR_INT_REGS_DUMP: 3204 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3205 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3206 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3207 break; 3208 } 3209 3210 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3211 } 3212 3213 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3214 3215 static void 3216 nxge_m_resources(void *arg) 3217 { 3218 p_nxge_t nxgep = arg; 3219 mac_rx_fifo_t mrf; 3220 p_rx_rcr_rings_t rcr_rings; 3221 p_rx_rcr_ring_t *rcr_p; 3222 uint32_t i, ndmas; 3223 nxge_status_t status; 3224 3225 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3226 3227 MUTEX_ENTER(nxgep->genlock); 3228 3229 /* 3230 * CR 6492541 Check to see if the drv_state has been initialized, 3231 * if not * call nxge_init(). 3232 */ 3233 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3234 status = nxge_init(nxgep); 3235 if (status != NXGE_OK) 3236 goto nxge_m_resources_exit; 3237 } 3238 3239 mrf.mrf_type = MAC_RX_FIFO; 3240 mrf.mrf_blank = nxge_rx_hw_blank; 3241 mrf.mrf_arg = (void *)nxgep; 3242 3243 mrf.mrf_normal_blank_time = 128; 3244 mrf.mrf_normal_pkt_count = 8; 3245 rcr_rings = nxgep->rx_rcr_rings; 3246 rcr_p = rcr_rings->rcr_rings; 3247 ndmas = rcr_rings->ndmas; 3248 3249 /* 3250 * Export our receive resources to the MAC layer. 3251 */ 3252 for (i = 0; i < ndmas; i++) { 3253 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle = 3254 mac_resource_add(nxgep->mach, 3255 (mac_resource_t *)&mrf); 3256 3257 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3258 "==> nxge_m_resources: vdma %d dma %d " 3259 "rcrptr 0x%016llx mac_handle 0x%016llx", 3260 i, ((p_rx_rcr_ring_t)rcr_p[i])->rdc, 3261 rcr_p[i], 3262 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle)); 3263 } 3264 3265 nxge_m_resources_exit: 3266 MUTEX_EXIT(nxgep->genlock); 3267 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 3268 } 3269 3270 static void 3271 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 3272 { 3273 p_nxge_mmac_stats_t mmac_stats; 3274 int i; 3275 nxge_mmac_t *mmac_info; 3276 3277 mmac_info = &nxgep->nxge_mmac_info; 3278 3279 mmac_stats = &nxgep->statsp->mmac_stats; 3280 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3281 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3282 3283 for (i = 0; i < ETHERADDRL; i++) { 3284 if (factory) { 3285 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3286 = mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i]; 3287 } else { 3288 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3289 = mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i]; 3290 } 3291 } 3292 } 3293 3294 /* 3295 * nxge_altmac_set() -- Set an alternate MAC address 3296 */ 3297 static int 3298 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 3299 { 3300 uint8_t addrn; 3301 uint8_t portn; 3302 npi_mac_addr_t altmac; 3303 hostinfo_t mac_rdc; 3304 p_nxge_class_pt_cfg_t clscfgp; 3305 3306 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3307 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3308 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3309 3310 portn = nxgep->mac.portnum; 3311 addrn = (uint8_t)slot - 1; 3312 3313 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 3314 addrn, &altmac) != NPI_SUCCESS) 3315 return (EIO); 3316 3317 /* 3318 * Set the rdc table number for the host info entry 3319 * for this mac address slot. 3320 */ 3321 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 3322 mac_rdc.value = 0; 3323 mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl; 3324 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 3325 3326 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 3327 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 3328 return (EIO); 3329 } 3330 3331 /* 3332 * Enable comparison with the alternate MAC address. 3333 * While the first alternate addr is enabled by bit 1 of register 3334 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 3335 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 3336 * accordingly before calling npi_mac_altaddr_entry. 3337 */ 3338 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3339 addrn = (uint8_t)slot - 1; 3340 else 3341 addrn = (uint8_t)slot; 3342 3343 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 3344 != NPI_SUCCESS) 3345 return (EIO); 3346 3347 return (0); 3348 } 3349 3350 /* 3351 * nxeg_m_mmac_add() - find an unused address slot, set the address 3352 * value to the one specified, enable the port to start filtering on 3353 * the new MAC address. Returns 0 on success. 3354 */ 3355 static int 3356 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 3357 { 3358 p_nxge_t nxgep = arg; 3359 mac_addr_slot_t slot; 3360 nxge_mmac_t *mmac_info; 3361 int err; 3362 nxge_status_t status; 3363 3364 mutex_enter(nxgep->genlock); 3365 3366 /* 3367 * Make sure that nxge is initialized, if _start() has 3368 * not been called. 3369 */ 3370 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3371 status = nxge_init(nxgep); 3372 if (status != NXGE_OK) { 3373 mutex_exit(nxgep->genlock); 3374 return (ENXIO); 3375 } 3376 } 3377 3378 mmac_info = &nxgep->nxge_mmac_info; 3379 if (mmac_info->naddrfree == 0) { 3380 mutex_exit(nxgep->genlock); 3381 return (ENOSPC); 3382 } 3383 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3384 maddr->mma_addrlen)) { 3385 mutex_exit(nxgep->genlock); 3386 return (EINVAL); 3387 } 3388 /* 3389 * Search for the first available slot. Because naddrfree 3390 * is not zero, we are guaranteed to find one. 3391 * Slot 0 is for unique (primary) MAC. The first alternate 3392 * MAC slot is slot 1. 3393 * Each of the first two ports of Neptune has 16 alternate 3394 * MAC slots but only the first 7 (or 15) slots have assigned factory 3395 * MAC addresses. We first search among the slots without bundled 3396 * factory MACs. If we fail to find one in that range, then we 3397 * search the slots with bundled factory MACs. A factory MAC 3398 * will be wasted while the slot is used with a user MAC address. 3399 * But the slot could be used by factory MAC again after calling 3400 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 3401 */ 3402 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 3403 for (slot = mmac_info->num_factory_mmac + 1; 3404 slot <= mmac_info->num_mmac; slot++) { 3405 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3406 break; 3407 } 3408 if (slot > mmac_info->num_mmac) { 3409 for (slot = 1; slot <= mmac_info->num_factory_mmac; 3410 slot++) { 3411 if (!(mmac_info->mac_pool[slot].flags 3412 & MMAC_SLOT_USED)) 3413 break; 3414 } 3415 } 3416 } else { 3417 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 3418 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3419 break; 3420 } 3421 } 3422 ASSERT(slot <= mmac_info->num_mmac); 3423 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 3424 mutex_exit(nxgep->genlock); 3425 return (err); 3426 } 3427 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 3428 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 3429 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3430 mmac_info->naddrfree--; 3431 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3432 3433 maddr->mma_slot = slot; 3434 3435 mutex_exit(nxgep->genlock); 3436 return (0); 3437 } 3438 3439 /* 3440 * This function reserves an unused slot and programs the slot and the HW 3441 * with a factory mac address. 3442 */ 3443 static int 3444 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 3445 { 3446 p_nxge_t nxgep = arg; 3447 mac_addr_slot_t slot; 3448 nxge_mmac_t *mmac_info; 3449 int err; 3450 nxge_status_t status; 3451 3452 mutex_enter(nxgep->genlock); 3453 3454 /* 3455 * Make sure that nxge is initialized, if _start() has 3456 * not been called. 3457 */ 3458 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3459 status = nxge_init(nxgep); 3460 if (status != NXGE_OK) { 3461 mutex_exit(nxgep->genlock); 3462 return (ENXIO); 3463 } 3464 } 3465 3466 mmac_info = &nxgep->nxge_mmac_info; 3467 if (mmac_info->naddrfree == 0) { 3468 mutex_exit(nxgep->genlock); 3469 return (ENOSPC); 3470 } 3471 3472 slot = maddr->mma_slot; 3473 if (slot == -1) { /* -1: Take the first available slot */ 3474 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 3475 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3476 break; 3477 } 3478 if (slot > mmac_info->num_factory_mmac) { 3479 mutex_exit(nxgep->genlock); 3480 return (ENOSPC); 3481 } 3482 } 3483 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 3484 /* 3485 * Do not support factory MAC at a slot greater than 3486 * num_factory_mmac even when there are available factory 3487 * MAC addresses because the alternate MACs are bundled with 3488 * slot[1] through slot[num_factory_mmac] 3489 */ 3490 mutex_exit(nxgep->genlock); 3491 return (EINVAL); 3492 } 3493 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3494 mutex_exit(nxgep->genlock); 3495 return (EBUSY); 3496 } 3497 /* Verify the address to be reserved */ 3498 if (!mac_unicst_verify(nxgep->mach, 3499 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 3500 mutex_exit(nxgep->genlock); 3501 return (EINVAL); 3502 } 3503 if (err = nxge_altmac_set(nxgep, 3504 mmac_info->factory_mac_pool[slot], slot)) { 3505 mutex_exit(nxgep->genlock); 3506 return (err); 3507 } 3508 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 3509 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3510 mmac_info->naddrfree--; 3511 3512 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 3513 mutex_exit(nxgep->genlock); 3514 3515 /* Pass info back to the caller */ 3516 maddr->mma_slot = slot; 3517 maddr->mma_addrlen = ETHERADDRL; 3518 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3519 3520 return (0); 3521 } 3522 3523 /* 3524 * Remove the specified mac address and update the HW not to filter 3525 * the mac address anymore. 3526 */ 3527 static int 3528 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 3529 { 3530 p_nxge_t nxgep = arg; 3531 nxge_mmac_t *mmac_info; 3532 uint8_t addrn; 3533 uint8_t portn; 3534 int err = 0; 3535 nxge_status_t status; 3536 3537 mutex_enter(nxgep->genlock); 3538 3539 /* 3540 * Make sure that nxge is initialized, if _start() has 3541 * not been called. 3542 */ 3543 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3544 status = nxge_init(nxgep); 3545 if (status != NXGE_OK) { 3546 mutex_exit(nxgep->genlock); 3547 return (ENXIO); 3548 } 3549 } 3550 3551 mmac_info = &nxgep->nxge_mmac_info; 3552 if (slot < 1 || slot > mmac_info->num_mmac) { 3553 mutex_exit(nxgep->genlock); 3554 return (EINVAL); 3555 } 3556 3557 portn = nxgep->mac.portnum; 3558 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3559 addrn = (uint8_t)slot - 1; 3560 else 3561 addrn = (uint8_t)slot; 3562 3563 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3564 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 3565 == NPI_SUCCESS) { 3566 mmac_info->naddrfree++; 3567 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 3568 /* 3569 * Regardless if the MAC we just stopped filtering 3570 * is a user addr or a facory addr, we must set 3571 * the MMAC_VENDOR_ADDR flag if this slot has an 3572 * associated factory MAC to indicate that a factory 3573 * MAC is available. 3574 */ 3575 if (slot <= mmac_info->num_factory_mmac) { 3576 mmac_info->mac_pool[slot].flags 3577 |= MMAC_VENDOR_ADDR; 3578 } 3579 /* 3580 * Clear mac_pool[slot].addr so that kstat shows 0 3581 * alternate MAC address if the slot is not used. 3582 * (But nxge_m_mmac_get returns the factory MAC even 3583 * when the slot is not used!) 3584 */ 3585 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 3586 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3587 } else { 3588 err = EIO; 3589 } 3590 } else { 3591 err = EINVAL; 3592 } 3593 3594 mutex_exit(nxgep->genlock); 3595 return (err); 3596 } 3597 3598 3599 /* 3600 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 3601 */ 3602 static int 3603 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 3604 { 3605 p_nxge_t nxgep = arg; 3606 mac_addr_slot_t slot; 3607 nxge_mmac_t *mmac_info; 3608 int err = 0; 3609 nxge_status_t status; 3610 3611 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3612 maddr->mma_addrlen)) 3613 return (EINVAL); 3614 3615 slot = maddr->mma_slot; 3616 3617 mutex_enter(nxgep->genlock); 3618 3619 /* 3620 * Make sure that nxge is initialized, if _start() has 3621 * not been called. 3622 */ 3623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3624 status = nxge_init(nxgep); 3625 if (status != NXGE_OK) { 3626 mutex_exit(nxgep->genlock); 3627 return (ENXIO); 3628 } 3629 } 3630 3631 mmac_info = &nxgep->nxge_mmac_info; 3632 if (slot < 1 || slot > mmac_info->num_mmac) { 3633 mutex_exit(nxgep->genlock); 3634 return (EINVAL); 3635 } 3636 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3637 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 3638 != 0) { 3639 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 3640 ETHERADDRL); 3641 /* 3642 * Assume that the MAC passed down from the caller 3643 * is not a factory MAC address (The user should 3644 * call mmac_remove followed by mmac_reserve if 3645 * he wants to use the factory MAC for this slot). 3646 */ 3647 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3648 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3649 } 3650 } else { 3651 err = EINVAL; 3652 } 3653 mutex_exit(nxgep->genlock); 3654 return (err); 3655 } 3656 3657 /* 3658 * nxge_m_mmac_get() - Get the MAC address and other information 3659 * related to the slot. mma_flags should be set to 0 in the call. 3660 * Note: although kstat shows MAC address as zero when a slot is 3661 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 3662 * to the caller as long as the slot is not using a user MAC address. 3663 * The following table shows the rules, 3664 * 3665 * USED VENDOR mma_addr 3666 * ------------------------------------------------------------ 3667 * (1) Slot uses a user MAC: yes no user MAC 3668 * (2) Slot uses a factory MAC: yes yes factory MAC 3669 * (3) Slot is not used but is 3670 * factory MAC capable: no yes factory MAC 3671 * (4) Slot is not used and is 3672 * not factory MAC capable: no no 0 3673 * ------------------------------------------------------------ 3674 */ 3675 static int 3676 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 3677 { 3678 nxge_t *nxgep = arg; 3679 mac_addr_slot_t slot; 3680 nxge_mmac_t *mmac_info; 3681 nxge_status_t status; 3682 3683 slot = maddr->mma_slot; 3684 3685 mutex_enter(nxgep->genlock); 3686 3687 /* 3688 * Make sure that nxge is initialized, if _start() has 3689 * not been called. 3690 */ 3691 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3692 status = nxge_init(nxgep); 3693 if (status != NXGE_OK) { 3694 mutex_exit(nxgep->genlock); 3695 return (ENXIO); 3696 } 3697 } 3698 3699 mmac_info = &nxgep->nxge_mmac_info; 3700 3701 if (slot < 1 || slot > mmac_info->num_mmac) { 3702 mutex_exit(nxgep->genlock); 3703 return (EINVAL); 3704 } 3705 maddr->mma_flags = 0; 3706 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 3707 maddr->mma_flags |= MMAC_SLOT_USED; 3708 3709 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 3710 maddr->mma_flags |= MMAC_VENDOR_ADDR; 3711 bcopy(mmac_info->factory_mac_pool[slot], 3712 maddr->mma_addr, ETHERADDRL); 3713 maddr->mma_addrlen = ETHERADDRL; 3714 } else { 3715 if (maddr->mma_flags & MMAC_SLOT_USED) { 3716 bcopy(mmac_info->mac_pool[slot].addr, 3717 maddr->mma_addr, ETHERADDRL); 3718 maddr->mma_addrlen = ETHERADDRL; 3719 } else { 3720 bzero(maddr->mma_addr, ETHERADDRL); 3721 maddr->mma_addrlen = 0; 3722 } 3723 } 3724 mutex_exit(nxgep->genlock); 3725 return (0); 3726 } 3727 3728 3729 static boolean_t 3730 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 3731 { 3732 nxge_t *nxgep = arg; 3733 uint32_t *txflags = cap_data; 3734 multiaddress_capab_t *mmacp = cap_data; 3735 3736 switch (cap) { 3737 case MAC_CAPAB_HCKSUM: 3738 *txflags = HCKSUM_INET_PARTIAL; 3739 break; 3740 case MAC_CAPAB_POLL: 3741 /* 3742 * There's nothing for us to fill in, simply returning 3743 * B_TRUE stating that we support polling is sufficient. 3744 */ 3745 break; 3746 3747 case MAC_CAPAB_MULTIADDRESS: 3748 mutex_enter(nxgep->genlock); 3749 3750 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 3751 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 3752 mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */ 3753 /* 3754 * maddr_handle is driver's private data, passed back to 3755 * entry point functions as arg. 3756 */ 3757 mmacp->maddr_handle = nxgep; 3758 mmacp->maddr_add = nxge_m_mmac_add; 3759 mmacp->maddr_remove = nxge_m_mmac_remove; 3760 mmacp->maddr_modify = nxge_m_mmac_modify; 3761 mmacp->maddr_get = nxge_m_mmac_get; 3762 mmacp->maddr_reserve = nxge_m_mmac_reserve; 3763 3764 mutex_exit(nxgep->genlock); 3765 break; 3766 default: 3767 return (B_FALSE); 3768 } 3769 return (B_TRUE); 3770 } 3771 3772 /* 3773 * Module loading and removing entry points. 3774 */ 3775 3776 static struct cb_ops nxge_cb_ops = { 3777 nodev, /* cb_open */ 3778 nodev, /* cb_close */ 3779 nodev, /* cb_strategy */ 3780 nodev, /* cb_print */ 3781 nodev, /* cb_dump */ 3782 nodev, /* cb_read */ 3783 nodev, /* cb_write */ 3784 nodev, /* cb_ioctl */ 3785 nodev, /* cb_devmap */ 3786 nodev, /* cb_mmap */ 3787 nodev, /* cb_segmap */ 3788 nochpoll, /* cb_chpoll */ 3789 ddi_prop_op, /* cb_prop_op */ 3790 NULL, 3791 D_MP, /* cb_flag */ 3792 CB_REV, /* rev */ 3793 nodev, /* int (*cb_aread)() */ 3794 nodev /* int (*cb_awrite)() */ 3795 }; 3796 3797 static struct dev_ops nxge_dev_ops = { 3798 DEVO_REV, /* devo_rev */ 3799 0, /* devo_refcnt */ 3800 nulldev, 3801 nulldev, /* devo_identify */ 3802 nulldev, /* devo_probe */ 3803 nxge_attach, /* devo_attach */ 3804 nxge_detach, /* devo_detach */ 3805 nodev, /* devo_reset */ 3806 &nxge_cb_ops, /* devo_cb_ops */ 3807 (struct bus_ops *)NULL, /* devo_bus_ops */ 3808 ddi_power /* devo_power */ 3809 }; 3810 3811 extern struct mod_ops mod_driverops; 3812 3813 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 3814 3815 /* 3816 * Module linkage information for the kernel. 3817 */ 3818 static struct modldrv nxge_modldrv = { 3819 &mod_driverops, 3820 NXGE_DESC_VER, 3821 &nxge_dev_ops 3822 }; 3823 3824 static struct modlinkage modlinkage = { 3825 MODREV_1, (void *) &nxge_modldrv, NULL 3826 }; 3827 3828 int 3829 _init(void) 3830 { 3831 int status; 3832 3833 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 3834 mac_init_ops(&nxge_dev_ops, "nxge"); 3835 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 3836 if (status != 0) { 3837 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3838 "failed to init device soft state")); 3839 goto _init_exit; 3840 } 3841 3842 status = mod_install(&modlinkage); 3843 if (status != 0) { 3844 ddi_soft_state_fini(&nxge_list); 3845 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 3846 goto _init_exit; 3847 } 3848 3849 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 3850 3851 _init_exit: 3852 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 3853 3854 return (status); 3855 } 3856 3857 int 3858 _fini(void) 3859 { 3860 int status; 3861 3862 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 3863 3864 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 3865 3866 if (nxge_mblks_pending) 3867 return (EBUSY); 3868 3869 status = mod_remove(&modlinkage); 3870 if (status != DDI_SUCCESS) { 3871 NXGE_DEBUG_MSG((NULL, MOD_CTL, 3872 "Module removal failed 0x%08x", 3873 status)); 3874 goto _fini_exit; 3875 } 3876 3877 mac_fini_ops(&nxge_dev_ops); 3878 3879 ddi_soft_state_fini(&nxge_list); 3880 3881 MUTEX_DESTROY(&nxge_common_lock); 3882 _fini_exit: 3883 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 3884 3885 return (status); 3886 } 3887 3888 int 3889 _info(struct modinfo *modinfop) 3890 { 3891 int status; 3892 3893 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 3894 status = mod_info(&modlinkage, modinfop); 3895 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 3896 3897 return (status); 3898 } 3899 3900 /*ARGSUSED*/ 3901 static nxge_status_t 3902 nxge_add_intrs(p_nxge_t nxgep) 3903 { 3904 3905 int intr_types; 3906 int type = 0; 3907 int ddi_status = DDI_SUCCESS; 3908 nxge_status_t status = NXGE_OK; 3909 3910 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 3911 3912 nxgep->nxge_intr_type.intr_registered = B_FALSE; 3913 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 3914 nxgep->nxge_intr_type.msi_intx_cnt = 0; 3915 nxgep->nxge_intr_type.intr_added = 0; 3916 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 3917 nxgep->nxge_intr_type.intr_type = 0; 3918 3919 if (nxgep->niu_type == N2_NIU) { 3920 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3921 } else if (nxge_msi_enable) { 3922 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3923 } 3924 3925 /* Get the supported interrupt types */ 3926 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 3927 != DDI_SUCCESS) { 3928 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 3929 "ddi_intr_get_supported_types failed: status 0x%08x", 3930 ddi_status)); 3931 return (NXGE_ERROR | NXGE_DDI_FAILED); 3932 } 3933 nxgep->nxge_intr_type.intr_types = intr_types; 3934 3935 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3936 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3937 3938 /* 3939 * Solaris MSIX is not supported yet. use MSI for now. 3940 * nxge_msi_enable (1): 3941 * 1 - MSI 2 - MSI-X others - FIXED 3942 */ 3943 switch (nxge_msi_enable) { 3944 default: 3945 type = DDI_INTR_TYPE_FIXED; 3946 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3947 "use fixed (intx emulation) type %08x", 3948 type)); 3949 break; 3950 3951 case 2: 3952 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3953 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3954 if (intr_types & DDI_INTR_TYPE_MSIX) { 3955 type = DDI_INTR_TYPE_MSIX; 3956 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3957 "ddi_intr_get_supported_types: MSIX 0x%08x", 3958 type)); 3959 } else if (intr_types & DDI_INTR_TYPE_MSI) { 3960 type = DDI_INTR_TYPE_MSI; 3961 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3962 "ddi_intr_get_supported_types: MSI 0x%08x", 3963 type)); 3964 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3965 type = DDI_INTR_TYPE_FIXED; 3966 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3967 "ddi_intr_get_supported_types: MSXED0x%08x", 3968 type)); 3969 } 3970 break; 3971 3972 case 1: 3973 if (intr_types & DDI_INTR_TYPE_MSI) { 3974 type = DDI_INTR_TYPE_MSI; 3975 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3976 "ddi_intr_get_supported_types: MSI 0x%08x", 3977 type)); 3978 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 3979 type = DDI_INTR_TYPE_MSIX; 3980 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3981 "ddi_intr_get_supported_types: MSIX 0x%08x", 3982 type)); 3983 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3984 type = DDI_INTR_TYPE_FIXED; 3985 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3986 "ddi_intr_get_supported_types: MSXED0x%08x", 3987 type)); 3988 } 3989 } 3990 3991 nxgep->nxge_intr_type.intr_type = type; 3992 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 3993 type == DDI_INTR_TYPE_FIXED) && 3994 nxgep->nxge_intr_type.niu_msi_enable) { 3995 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 3996 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3997 " nxge_add_intrs: " 3998 " nxge_add_intrs_adv failed: status 0x%08x", 3999 status)); 4000 return (status); 4001 } else { 4002 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4003 "interrupts registered : type %d", type)); 4004 nxgep->nxge_intr_type.intr_registered = B_TRUE; 4005 4006 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4007 "\nAdded advanced nxge add_intr_adv " 4008 "intr type 0x%x\n", type)); 4009 4010 return (status); 4011 } 4012 } 4013 4014 if (!nxgep->nxge_intr_type.intr_registered) { 4015 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 4016 "failed to register interrupts")); 4017 return (NXGE_ERROR | NXGE_DDI_FAILED); 4018 } 4019 4020 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 4021 return (status); 4022 } 4023 4024 /*ARGSUSED*/ 4025 static nxge_status_t 4026 nxge_add_soft_intrs(p_nxge_t nxgep) 4027 { 4028 4029 int ddi_status = DDI_SUCCESS; 4030 nxge_status_t status = NXGE_OK; 4031 4032 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 4033 4034 nxgep->resched_id = NULL; 4035 nxgep->resched_running = B_FALSE; 4036 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 4037 &nxgep->resched_id, 4038 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 4039 if (ddi_status != DDI_SUCCESS) { 4040 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 4041 "ddi_add_softintrs failed: status 0x%08x", 4042 ddi_status)); 4043 return (NXGE_ERROR | NXGE_DDI_FAILED); 4044 } 4045 4046 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 4047 4048 return (status); 4049 } 4050 4051 static nxge_status_t 4052 nxge_add_intrs_adv(p_nxge_t nxgep) 4053 { 4054 int intr_type; 4055 p_nxge_intr_t intrp; 4056 4057 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 4058 4059 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4060 intr_type = intrp->intr_type; 4061 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 4062 intr_type)); 4063 4064 switch (intr_type) { 4065 case DDI_INTR_TYPE_MSI: /* 0x2 */ 4066 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 4067 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 4068 4069 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 4070 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 4071 4072 default: 4073 return (NXGE_ERROR); 4074 } 4075 } 4076 4077 4078 /*ARGSUSED*/ 4079 static nxge_status_t 4080 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 4081 { 4082 dev_info_t *dip = nxgep->dip; 4083 p_nxge_ldg_t ldgp; 4084 p_nxge_intr_t intrp; 4085 uint_t *inthandler; 4086 void *arg1, *arg2; 4087 int behavior; 4088 int nintrs, navail; 4089 int nactual, nrequired; 4090 int inum = 0; 4091 int x, y; 4092 int ddi_status = DDI_SUCCESS; 4093 nxge_status_t status = NXGE_OK; 4094 4095 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 4096 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4097 intrp->start_inum = 0; 4098 4099 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4100 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4101 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4102 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4103 "nintrs: %d", ddi_status, nintrs)); 4104 return (NXGE_ERROR | NXGE_DDI_FAILED); 4105 } 4106 4107 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4108 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4109 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4110 "ddi_intr_get_navail() failed, status: 0x%x%, " 4111 "nintrs: %d", ddi_status, navail)); 4112 return (NXGE_ERROR | NXGE_DDI_FAILED); 4113 } 4114 4115 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4116 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 4117 nintrs, navail)); 4118 4119 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 4120 /* MSI must be power of 2 */ 4121 if ((navail & 16) == 16) { 4122 navail = 16; 4123 } else if ((navail & 8) == 8) { 4124 navail = 8; 4125 } else if ((navail & 4) == 4) { 4126 navail = 4; 4127 } else if ((navail & 2) == 2) { 4128 navail = 2; 4129 } else { 4130 navail = 1; 4131 } 4132 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4133 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 4134 "navail %d", nintrs, navail)); 4135 } 4136 4137 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4138 DDI_INTR_ALLOC_NORMAL); 4139 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4140 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4141 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4142 navail, &nactual, behavior); 4143 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4144 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4145 " ddi_intr_alloc() failed: %d", 4146 ddi_status)); 4147 kmem_free(intrp->htable, intrp->intr_size); 4148 return (NXGE_ERROR | NXGE_DDI_FAILED); 4149 } 4150 4151 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4152 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4153 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4154 " ddi_intr_get_pri() failed: %d", 4155 ddi_status)); 4156 /* Free already allocated interrupts */ 4157 for (y = 0; y < nactual; y++) { 4158 (void) ddi_intr_free(intrp->htable[y]); 4159 } 4160 4161 kmem_free(intrp->htable, intrp->intr_size); 4162 return (NXGE_ERROR | NXGE_DDI_FAILED); 4163 } 4164 4165 nrequired = 0; 4166 switch (nxgep->niu_type) { 4167 default: 4168 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4169 break; 4170 4171 case N2_NIU: 4172 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4173 break; 4174 } 4175 4176 if (status != NXGE_OK) { 4177 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4178 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 4179 "failed: 0x%x", status)); 4180 /* Free already allocated interrupts */ 4181 for (y = 0; y < nactual; y++) { 4182 (void) ddi_intr_free(intrp->htable[y]); 4183 } 4184 4185 kmem_free(intrp->htable, intrp->intr_size); 4186 return (status); 4187 } 4188 4189 ldgp = nxgep->ldgvp->ldgp; 4190 for (x = 0; x < nrequired; x++, ldgp++) { 4191 ldgp->vector = (uint8_t)x; 4192 ldgp->intdata = SID_DATA(ldgp->func, x); 4193 arg1 = ldgp->ldvp; 4194 arg2 = nxgep; 4195 if (ldgp->nldvs == 1) { 4196 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4197 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4198 "nxge_add_intrs_adv_type: " 4199 "arg1 0x%x arg2 0x%x: " 4200 "1-1 int handler (entry %d intdata 0x%x)\n", 4201 arg1, arg2, 4202 x, ldgp->intdata)); 4203 } else if (ldgp->nldvs > 1) { 4204 inthandler = (uint_t *)ldgp->sys_intr_handler; 4205 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4206 "nxge_add_intrs_adv_type: " 4207 "arg1 0x%x arg2 0x%x: " 4208 "nldevs %d int handler " 4209 "(entry %d intdata 0x%x)\n", 4210 arg1, arg2, 4211 ldgp->nldvs, x, ldgp->intdata)); 4212 } 4213 4214 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4215 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 4216 "htable 0x%llx", x, intrp->htable[x])); 4217 4218 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4219 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4220 != DDI_SUCCESS) { 4221 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4222 "==> nxge_add_intrs_adv_type: failed #%d " 4223 "status 0x%x", x, ddi_status)); 4224 for (y = 0; y < intrp->intr_added; y++) { 4225 (void) ddi_intr_remove_handler( 4226 intrp->htable[y]); 4227 } 4228 /* Free already allocated intr */ 4229 for (y = 0; y < nactual; y++) { 4230 (void) ddi_intr_free(intrp->htable[y]); 4231 } 4232 kmem_free(intrp->htable, intrp->intr_size); 4233 4234 (void) nxge_ldgv_uninit(nxgep); 4235 4236 return (NXGE_ERROR | NXGE_DDI_FAILED); 4237 } 4238 intrp->intr_added++; 4239 } 4240 4241 intrp->msi_intx_cnt = nactual; 4242 4243 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4244 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 4245 navail, nactual, 4246 intrp->msi_intx_cnt, 4247 intrp->intr_added)); 4248 4249 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4250 4251 (void) nxge_intr_ldgv_init(nxgep); 4252 4253 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 4254 4255 return (status); 4256 } 4257 4258 /*ARGSUSED*/ 4259 static nxge_status_t 4260 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 4261 { 4262 dev_info_t *dip = nxgep->dip; 4263 p_nxge_ldg_t ldgp; 4264 p_nxge_intr_t intrp; 4265 uint_t *inthandler; 4266 void *arg1, *arg2; 4267 int behavior; 4268 int nintrs, navail; 4269 int nactual, nrequired; 4270 int inum = 0; 4271 int x, y; 4272 int ddi_status = DDI_SUCCESS; 4273 nxge_status_t status = NXGE_OK; 4274 4275 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 4276 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4277 intrp->start_inum = 0; 4278 4279 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4280 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4281 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4282 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4283 "nintrs: %d", status, nintrs)); 4284 return (NXGE_ERROR | NXGE_DDI_FAILED); 4285 } 4286 4287 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4288 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4289 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4290 "ddi_intr_get_navail() failed, status: 0x%x%, " 4291 "nintrs: %d", ddi_status, navail)); 4292 return (NXGE_ERROR | NXGE_DDI_FAILED); 4293 } 4294 4295 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4296 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 4297 nintrs, navail)); 4298 4299 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4300 DDI_INTR_ALLOC_NORMAL); 4301 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4302 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4303 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4304 navail, &nactual, behavior); 4305 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4306 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4307 " ddi_intr_alloc() failed: %d", 4308 ddi_status)); 4309 kmem_free(intrp->htable, intrp->intr_size); 4310 return (NXGE_ERROR | NXGE_DDI_FAILED); 4311 } 4312 4313 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4314 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4315 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4316 " ddi_intr_get_pri() failed: %d", 4317 ddi_status)); 4318 /* Free already allocated interrupts */ 4319 for (y = 0; y < nactual; y++) { 4320 (void) ddi_intr_free(intrp->htable[y]); 4321 } 4322 4323 kmem_free(intrp->htable, intrp->intr_size); 4324 return (NXGE_ERROR | NXGE_DDI_FAILED); 4325 } 4326 4327 nrequired = 0; 4328 switch (nxgep->niu_type) { 4329 default: 4330 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4331 break; 4332 4333 case N2_NIU: 4334 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4335 break; 4336 } 4337 4338 if (status != NXGE_OK) { 4339 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4340 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 4341 "failed: 0x%x", status)); 4342 /* Free already allocated interrupts */ 4343 for (y = 0; y < nactual; y++) { 4344 (void) ddi_intr_free(intrp->htable[y]); 4345 } 4346 4347 kmem_free(intrp->htable, intrp->intr_size); 4348 return (status); 4349 } 4350 4351 ldgp = nxgep->ldgvp->ldgp; 4352 for (x = 0; x < nrequired; x++, ldgp++) { 4353 ldgp->vector = (uint8_t)x; 4354 if (nxgep->niu_type != N2_NIU) { 4355 ldgp->intdata = SID_DATA(ldgp->func, x); 4356 } 4357 4358 arg1 = ldgp->ldvp; 4359 arg2 = nxgep; 4360 if (ldgp->nldvs == 1) { 4361 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4362 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4363 "nxge_add_intrs_adv_type_fix: " 4364 "1-1 int handler(%d) ldg %d ldv %d " 4365 "arg1 $%p arg2 $%p\n", 4366 x, ldgp->ldg, ldgp->ldvp->ldv, 4367 arg1, arg2)); 4368 } else if (ldgp->nldvs > 1) { 4369 inthandler = (uint_t *)ldgp->sys_intr_handler; 4370 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4371 "nxge_add_intrs_adv_type_fix: " 4372 "shared ldv %d int handler(%d) ldv %d ldg %d" 4373 "arg1 0x%016llx arg2 0x%016llx\n", 4374 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 4375 arg1, arg2)); 4376 } 4377 4378 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4379 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4380 != DDI_SUCCESS) { 4381 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4382 "==> nxge_add_intrs_adv_type_fix: failed #%d " 4383 "status 0x%x", x, ddi_status)); 4384 for (y = 0; y < intrp->intr_added; y++) { 4385 (void) ddi_intr_remove_handler( 4386 intrp->htable[y]); 4387 } 4388 for (y = 0; y < nactual; y++) { 4389 (void) ddi_intr_free(intrp->htable[y]); 4390 } 4391 /* Free already allocated intr */ 4392 kmem_free(intrp->htable, intrp->intr_size); 4393 4394 (void) nxge_ldgv_uninit(nxgep); 4395 4396 return (NXGE_ERROR | NXGE_DDI_FAILED); 4397 } 4398 intrp->intr_added++; 4399 } 4400 4401 intrp->msi_intx_cnt = nactual; 4402 4403 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4404 4405 status = nxge_intr_ldgv_init(nxgep); 4406 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 4407 4408 return (status); 4409 } 4410 4411 static void 4412 nxge_remove_intrs(p_nxge_t nxgep) 4413 { 4414 int i, inum; 4415 p_nxge_intr_t intrp; 4416 4417 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 4418 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4419 if (!intrp->intr_registered) { 4420 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4421 "<== nxge_remove_intrs: interrupts not registered")); 4422 return; 4423 } 4424 4425 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 4426 4427 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4428 (void) ddi_intr_block_disable(intrp->htable, 4429 intrp->intr_added); 4430 } else { 4431 for (i = 0; i < intrp->intr_added; i++) { 4432 (void) ddi_intr_disable(intrp->htable[i]); 4433 } 4434 } 4435 4436 for (inum = 0; inum < intrp->intr_added; inum++) { 4437 if (intrp->htable[inum]) { 4438 (void) ddi_intr_remove_handler(intrp->htable[inum]); 4439 } 4440 } 4441 4442 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 4443 if (intrp->htable[inum]) { 4444 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4445 "nxge_remove_intrs: ddi_intr_free inum %d " 4446 "msi_intx_cnt %d intr_added %d", 4447 inum, 4448 intrp->msi_intx_cnt, 4449 intrp->intr_added)); 4450 4451 (void) ddi_intr_free(intrp->htable[inum]); 4452 } 4453 } 4454 4455 kmem_free(intrp->htable, intrp->intr_size); 4456 intrp->intr_registered = B_FALSE; 4457 intrp->intr_enabled = B_FALSE; 4458 intrp->msi_intx_cnt = 0; 4459 intrp->intr_added = 0; 4460 4461 (void) nxge_ldgv_uninit(nxgep); 4462 4463 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 4464 } 4465 4466 /*ARGSUSED*/ 4467 static void 4468 nxge_remove_soft_intrs(p_nxge_t nxgep) 4469 { 4470 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 4471 if (nxgep->resched_id) { 4472 ddi_remove_softintr(nxgep->resched_id); 4473 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4474 "==> nxge_remove_soft_intrs: removed")); 4475 nxgep->resched_id = NULL; 4476 } 4477 4478 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 4479 } 4480 4481 /*ARGSUSED*/ 4482 static void 4483 nxge_intrs_enable(p_nxge_t nxgep) 4484 { 4485 p_nxge_intr_t intrp; 4486 int i; 4487 int status; 4488 4489 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 4490 4491 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4492 4493 if (!intrp->intr_registered) { 4494 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 4495 "interrupts are not registered")); 4496 return; 4497 } 4498 4499 if (intrp->intr_enabled) { 4500 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4501 "<== nxge_intrs_enable: already enabled")); 4502 return; 4503 } 4504 4505 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4506 status = ddi_intr_block_enable(intrp->htable, 4507 intrp->intr_added); 4508 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4509 "block enable - status 0x%x total inums #%d\n", 4510 status, intrp->intr_added)); 4511 } else { 4512 for (i = 0; i < intrp->intr_added; i++) { 4513 status = ddi_intr_enable(intrp->htable[i]); 4514 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4515 "ddi_intr_enable:enable - status 0x%x " 4516 "total inums %d enable inum #%d\n", 4517 status, intrp->intr_added, i)); 4518 if (status == DDI_SUCCESS) { 4519 intrp->intr_enabled = B_TRUE; 4520 } 4521 } 4522 } 4523 4524 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 4525 } 4526 4527 /*ARGSUSED*/ 4528 static void 4529 nxge_intrs_disable(p_nxge_t nxgep) 4530 { 4531 p_nxge_intr_t intrp; 4532 int i; 4533 4534 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 4535 4536 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4537 4538 if (!intrp->intr_registered) { 4539 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 4540 "interrupts are not registered")); 4541 return; 4542 } 4543 4544 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4545 (void) ddi_intr_block_disable(intrp->htable, 4546 intrp->intr_added); 4547 } else { 4548 for (i = 0; i < intrp->intr_added; i++) { 4549 (void) ddi_intr_disable(intrp->htable[i]); 4550 } 4551 } 4552 4553 intrp->intr_enabled = B_FALSE; 4554 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 4555 } 4556 4557 static nxge_status_t 4558 nxge_mac_register(p_nxge_t nxgep) 4559 { 4560 mac_register_t *macp; 4561 int status; 4562 4563 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 4564 4565 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 4566 return (NXGE_ERROR); 4567 4568 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 4569 macp->m_driver = nxgep; 4570 macp->m_dip = nxgep->dip; 4571 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 4572 macp->m_callbacks = &nxge_m_callbacks; 4573 macp->m_min_sdu = 0; 4574 macp->m_max_sdu = nxgep->mac.maxframesize - 4575 sizeof (struct ether_header) - ETHERFCSL - 4; 4576 4577 status = mac_register(macp, &nxgep->mach); 4578 mac_free(macp); 4579 4580 if (status != 0) { 4581 cmn_err(CE_WARN, 4582 "!nxge_mac_register failed (status %d instance %d)", 4583 status, nxgep->instance); 4584 return (NXGE_ERROR); 4585 } 4586 4587 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 4588 "(instance %d)", nxgep->instance)); 4589 4590 return (NXGE_OK); 4591 } 4592 4593 void 4594 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 4595 { 4596 ssize_t size; 4597 mblk_t *nmp; 4598 uint8_t blk_id; 4599 uint8_t chan; 4600 uint32_t err_id; 4601 err_inject_t *eip; 4602 4603 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 4604 4605 size = 1024; 4606 nmp = mp->b_cont; 4607 eip = (err_inject_t *)nmp->b_rptr; 4608 blk_id = eip->blk_id; 4609 err_id = eip->err_id; 4610 chan = eip->chan; 4611 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 4612 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 4613 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 4614 switch (blk_id) { 4615 case MAC_BLK_ID: 4616 break; 4617 case TXMAC_BLK_ID: 4618 break; 4619 case RXMAC_BLK_ID: 4620 break; 4621 case MIF_BLK_ID: 4622 break; 4623 case IPP_BLK_ID: 4624 nxge_ipp_inject_err(nxgep, err_id); 4625 break; 4626 case TXC_BLK_ID: 4627 nxge_txc_inject_err(nxgep, err_id); 4628 break; 4629 case TXDMA_BLK_ID: 4630 nxge_txdma_inject_err(nxgep, err_id, chan); 4631 break; 4632 case RXDMA_BLK_ID: 4633 nxge_rxdma_inject_err(nxgep, err_id, chan); 4634 break; 4635 case ZCP_BLK_ID: 4636 nxge_zcp_inject_err(nxgep, err_id); 4637 break; 4638 case ESPC_BLK_ID: 4639 break; 4640 case FFLP_BLK_ID: 4641 break; 4642 case PHY_BLK_ID: 4643 break; 4644 case ETHER_SERDES_BLK_ID: 4645 break; 4646 case PCIE_SERDES_BLK_ID: 4647 break; 4648 case VIR_BLK_ID: 4649 break; 4650 } 4651 4652 nmp->b_wptr = nmp->b_rptr + size; 4653 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 4654 4655 miocack(wq, mp, (int)size, 0); 4656 } 4657 4658 static int 4659 nxge_init_common_dev(p_nxge_t nxgep) 4660 { 4661 p_nxge_hw_list_t hw_p; 4662 dev_info_t *p_dip; 4663 4664 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 4665 4666 p_dip = nxgep->p_dip; 4667 MUTEX_ENTER(&nxge_common_lock); 4668 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4669 "==> nxge_init_common_dev:func # %d", 4670 nxgep->function_num)); 4671 /* 4672 * Loop through existing per neptune hardware list. 4673 */ 4674 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4675 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4676 "==> nxge_init_common_device:func # %d " 4677 "hw_p $%p parent dip $%p", 4678 nxgep->function_num, 4679 hw_p, 4680 p_dip)); 4681 if (hw_p->parent_devp == p_dip) { 4682 nxgep->nxge_hw_p = hw_p; 4683 hw_p->ndevs++; 4684 hw_p->nxge_p[nxgep->function_num] = nxgep; 4685 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4686 "==> nxge_init_common_device:func # %d " 4687 "hw_p $%p parent dip $%p " 4688 "ndevs %d (found)", 4689 nxgep->function_num, 4690 hw_p, 4691 p_dip, 4692 hw_p->ndevs)); 4693 break; 4694 } 4695 } 4696 4697 if (hw_p == NULL) { 4698 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4699 "==> nxge_init_common_device:func # %d " 4700 "parent dip $%p (new)", 4701 nxgep->function_num, 4702 p_dip)); 4703 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 4704 hw_p->parent_devp = p_dip; 4705 hw_p->magic = NXGE_NEPTUNE_MAGIC; 4706 nxgep->nxge_hw_p = hw_p; 4707 hw_p->ndevs++; 4708 hw_p->nxge_p[nxgep->function_num] = nxgep; 4709 hw_p->next = nxge_hw_list; 4710 if (nxgep->niu_type == N2_NIU) { 4711 hw_p->niu_type = N2_NIU; 4712 hw_p->platform_type = P_NEPTUNE_NIU; 4713 } else { 4714 hw_p->niu_type = NIU_TYPE_NONE; 4715 hw_p->platform_type = P_NEPTUNE_NONE; 4716 } 4717 4718 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 4719 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 4720 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 4721 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 4722 MUTEX_INIT(&hw_p->nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 4723 4724 nxge_hw_list = hw_p; 4725 4726 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 4727 } 4728 4729 MUTEX_EXIT(&nxge_common_lock); 4730 4731 nxgep->platform_type = hw_p->platform_type; 4732 if (nxgep->niu_type != N2_NIU) { 4733 nxgep->niu_type = hw_p->niu_type; 4734 } 4735 4736 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4737 "==> nxge_init_common_device (nxge_hw_list) $%p", 4738 nxge_hw_list)); 4739 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 4740 4741 return (NXGE_OK); 4742 } 4743 4744 static void 4745 nxge_uninit_common_dev(p_nxge_t nxgep) 4746 { 4747 p_nxge_hw_list_t hw_p, h_hw_p; 4748 dev_info_t *p_dip; 4749 4750 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 4751 if (nxgep->nxge_hw_p == NULL) { 4752 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4753 "<== nxge_uninit_common_device (no common)")); 4754 return; 4755 } 4756 4757 MUTEX_ENTER(&nxge_common_lock); 4758 h_hw_p = nxge_hw_list; 4759 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4760 p_dip = hw_p->parent_devp; 4761 if (nxgep->nxge_hw_p == hw_p && 4762 p_dip == nxgep->p_dip && 4763 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 4764 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 4765 4766 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4767 "==> nxge_uninit_common_device:func # %d " 4768 "hw_p $%p parent dip $%p " 4769 "ndevs %d (found)", 4770 nxgep->function_num, 4771 hw_p, 4772 p_dip, 4773 hw_p->ndevs)); 4774 4775 nxgep->nxge_hw_p = NULL; 4776 if (hw_p->ndevs) { 4777 hw_p->ndevs--; 4778 } 4779 hw_p->nxge_p[nxgep->function_num] = NULL; 4780 if (!hw_p->ndevs) { 4781 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 4782 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 4783 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 4784 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 4785 MUTEX_DESTROY(&hw_p->nxge_mii_lock); 4786 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4787 "==> nxge_uninit_common_device: " 4788 "func # %d " 4789 "hw_p $%p parent dip $%p " 4790 "ndevs %d (last)", 4791 nxgep->function_num, 4792 hw_p, 4793 p_dip, 4794 hw_p->ndevs)); 4795 4796 if (hw_p == nxge_hw_list) { 4797 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4798 "==> nxge_uninit_common_device:" 4799 "remove head func # %d " 4800 "hw_p $%p parent dip $%p " 4801 "ndevs %d (head)", 4802 nxgep->function_num, 4803 hw_p, 4804 p_dip, 4805 hw_p->ndevs)); 4806 nxge_hw_list = hw_p->next; 4807 } else { 4808 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4809 "==> nxge_uninit_common_device:" 4810 "remove middle func # %d " 4811 "hw_p $%p parent dip $%p " 4812 "ndevs %d (middle)", 4813 nxgep->function_num, 4814 hw_p, 4815 p_dip, 4816 hw_p->ndevs)); 4817 h_hw_p->next = hw_p->next; 4818 } 4819 4820 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 4821 } 4822 break; 4823 } else { 4824 h_hw_p = hw_p; 4825 } 4826 } 4827 4828 MUTEX_EXIT(&nxge_common_lock); 4829 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4830 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 4831 nxge_hw_list)); 4832 4833 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 4834 } 4835 4836 /* 4837 * Determines the number of ports from the niu_type or the platform type. 4838 * Returns the number of ports, or returns zero on failure. 4839 */ 4840 4841 int 4842 nxge_get_nports(p_nxge_t nxgep) 4843 { 4844 int nports = 0; 4845 4846 switch (nxgep->niu_type) { 4847 case N2_NIU: 4848 case NEPTUNE_2_10GF: 4849 nports = 2; 4850 break; 4851 case NEPTUNE_4_1GC: 4852 case NEPTUNE_2_10GF_2_1GC: 4853 case NEPTUNE_1_10GF_3_1GC: 4854 case NEPTUNE_1_1GC_1_10GF_2_1GC: 4855 nports = 4; 4856 break; 4857 default: 4858 switch (nxgep->platform_type) { 4859 case P_NEPTUNE_NIU: 4860 case P_NEPTUNE_ATLAS_2PORT: 4861 nports = 2; 4862 break; 4863 case P_NEPTUNE_ATLAS_4PORT: 4864 case P_NEPTUNE_MARAMBA_P0: 4865 case P_NEPTUNE_MARAMBA_P1: 4866 nports = 4; 4867 break; 4868 default: 4869 break; 4870 } 4871 break; 4872 } 4873 4874 return (nports); 4875 } 4876