1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 30 */ 31 #include <sys/nxge/nxge_impl.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * PSARC/2007/453 MSI-X interrupt limit override 39 * (This PSARC case is limited to MSI-X vectors 40 * and SPARC platforms only). 41 */ 42 #if defined(_BIG_ENDIAN) 43 uint32_t nxge_msi_enable = 2; 44 #else 45 uint32_t nxge_msi_enable = 1; 46 #endif 47 48 /* 49 * Globals: tunable parameters (/etc/system or adb) 50 * 51 */ 52 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 53 uint32_t nxge_rbr_spare_size = 0; 54 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 55 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 56 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 57 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 58 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 59 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 60 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 61 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 62 boolean_t nxge_jumbo_enable = B_FALSE; 63 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 64 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 65 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 66 67 /* MAX LSO size */ 68 #define NXGE_LSO_MAXLEN 65535 69 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 70 71 /* 72 * Debugging flags: 73 * nxge_no_tx_lb : transmit load balancing 74 * nxge_tx_lb_policy: 0 - TCP port (default) 75 * 3 - DEST MAC 76 */ 77 uint32_t nxge_no_tx_lb = 0; 78 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 79 80 /* 81 * Add tunable to reduce the amount of time spent in the 82 * ISR doing Rx Processing. 83 */ 84 uint32_t nxge_max_rx_pkts = 1024; 85 86 /* 87 * Tunables to manage the receive buffer blocks. 88 * 89 * nxge_rx_threshold_hi: copy all buffers. 90 * nxge_rx_bcopy_size_type: receive buffer block size type. 91 * nxge_rx_threshold_lo: copy only up to tunable block size type. 92 */ 93 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 94 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 95 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 96 97 rtrace_t npi_rtracebuf; 98 99 #if defined(sun4v) 100 /* 101 * Hypervisor N2/NIU services information. 102 */ 103 static hsvc_info_t niu_hsvc = { 104 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 105 NIU_MINOR_VER, "nxge" 106 }; 107 #endif 108 109 /* 110 * Function Prototypes 111 */ 112 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 113 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 114 static void nxge_unattach(p_nxge_t); 115 116 #if NXGE_PROPERTY 117 static void nxge_remove_hard_properties(p_nxge_t); 118 #endif 119 120 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 121 122 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 123 static void nxge_destroy_mutexes(p_nxge_t); 124 125 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 126 static void nxge_unmap_regs(p_nxge_t nxgep); 127 #ifdef NXGE_DEBUG 128 static void nxge_test_map_regs(p_nxge_t nxgep); 129 #endif 130 131 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 132 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 133 static void nxge_remove_intrs(p_nxge_t nxgep); 134 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 135 136 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 137 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 138 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 139 static void nxge_intrs_enable(p_nxge_t nxgep); 140 static void nxge_intrs_disable(p_nxge_t nxgep); 141 142 static void nxge_suspend(p_nxge_t); 143 static nxge_status_t nxge_resume(p_nxge_t); 144 145 static nxge_status_t nxge_setup_dev(p_nxge_t); 146 static void nxge_destroy_dev(p_nxge_t); 147 148 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 149 static void nxge_free_mem_pool(p_nxge_t); 150 151 static nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 152 static void nxge_free_rx_mem_pool(p_nxge_t); 153 154 static nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 155 static void nxge_free_tx_mem_pool(p_nxge_t); 156 157 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 158 struct ddi_dma_attr *, 159 size_t, ddi_device_acc_attr_t *, uint_t, 160 p_nxge_dma_common_t); 161 162 static void nxge_dma_mem_free(p_nxge_dma_common_t); 163 164 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 165 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 166 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 167 168 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 169 p_nxge_dma_common_t *, size_t); 170 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 171 172 static nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 173 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 174 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 175 176 static nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 177 p_nxge_dma_common_t *, 178 size_t); 179 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 180 181 static int nxge_init_common_dev(p_nxge_t); 182 static void nxge_uninit_common_dev(p_nxge_t); 183 184 /* 185 * The next declarations are for the GLDv3 interface. 186 */ 187 static int nxge_m_start(void *); 188 static void nxge_m_stop(void *); 189 static int nxge_m_unicst(void *, const uint8_t *); 190 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 191 static int nxge_m_promisc(void *, boolean_t); 192 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 193 static void nxge_m_resources(void *); 194 mblk_t *nxge_m_tx(void *arg, mblk_t *); 195 static nxge_status_t nxge_mac_register(p_nxge_t); 196 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 197 mac_addr_slot_t slot); 198 static void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 199 boolean_t factory); 200 static int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 201 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 202 static int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 203 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 204 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 205 206 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 207 #define MAX_DUMP_SZ 256 208 209 #define NXGE_M_CALLBACK_FLAGS (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB) 210 211 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 212 static mac_callbacks_t nxge_m_callbacks = { 213 NXGE_M_CALLBACK_FLAGS, 214 nxge_m_stat, 215 nxge_m_start, 216 nxge_m_stop, 217 nxge_m_promisc, 218 nxge_m_multicst, 219 nxge_m_unicst, 220 nxge_m_tx, 221 nxge_m_resources, 222 nxge_m_ioctl, 223 nxge_m_getcapab 224 }; 225 226 void 227 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 228 229 /* PSARC/2007/453 MSI-X interrupt limit override. */ 230 #define NXGE_MSIX_REQUEST_10G 8 231 #define NXGE_MSIX_REQUEST_1G 2 232 static int nxge_create_msi_property(p_nxge_t); 233 234 /* 235 * These global variables control the message 236 * output. 237 */ 238 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 239 uint64_t nxge_debug_level = 0; 240 241 /* 242 * This list contains the instance structures for the Neptune 243 * devices present in the system. The lock exists to guarantee 244 * mutually exclusive access to the list. 245 */ 246 void *nxge_list = NULL; 247 248 void *nxge_hw_list = NULL; 249 nxge_os_mutex_t nxge_common_lock; 250 251 extern uint64_t npi_debug_level; 252 253 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 254 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 255 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 256 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 257 extern void nxge_fm_init(p_nxge_t, 258 ddi_device_acc_attr_t *, 259 ddi_device_acc_attr_t *, 260 ddi_dma_attr_t *); 261 extern void nxge_fm_fini(p_nxge_t); 262 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 263 264 /* 265 * Count used to maintain the number of buffers being used 266 * by Neptune instances and loaned up to the upper layers. 267 */ 268 uint32_t nxge_mblks_pending = 0; 269 270 /* 271 * Device register access attributes for PIO. 272 */ 273 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 274 DDI_DEVICE_ATTR_V0, 275 DDI_STRUCTURE_LE_ACC, 276 DDI_STRICTORDER_ACC, 277 }; 278 279 /* 280 * Device descriptor access attributes for DMA. 281 */ 282 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 283 DDI_DEVICE_ATTR_V0, 284 DDI_STRUCTURE_LE_ACC, 285 DDI_STRICTORDER_ACC 286 }; 287 288 /* 289 * Device buffer access attributes for DMA. 290 */ 291 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 292 DDI_DEVICE_ATTR_V0, 293 DDI_STRUCTURE_BE_ACC, 294 DDI_STRICTORDER_ACC 295 }; 296 297 ddi_dma_attr_t nxge_desc_dma_attr = { 298 DMA_ATTR_V0, /* version number. */ 299 0, /* low address */ 300 0xffffffffffffffff, /* high address */ 301 0xffffffffffffffff, /* address counter max */ 302 #ifndef NIU_PA_WORKAROUND 303 0x100000, /* alignment */ 304 #else 305 0x2000, 306 #endif 307 0xfc00fc, /* dlim_burstsizes */ 308 0x1, /* minimum transfer size */ 309 0xffffffffffffffff, /* maximum transfer size */ 310 0xffffffffffffffff, /* maximum segment size */ 311 1, /* scatter/gather list length */ 312 (unsigned int) 1, /* granularity */ 313 0 /* attribute flags */ 314 }; 315 316 ddi_dma_attr_t nxge_tx_dma_attr = { 317 DMA_ATTR_V0, /* version number. */ 318 0, /* low address */ 319 0xffffffffffffffff, /* high address */ 320 0xffffffffffffffff, /* address counter max */ 321 #if defined(_BIG_ENDIAN) 322 0x2000, /* alignment */ 323 #else 324 0x1000, /* alignment */ 325 #endif 326 0xfc00fc, /* dlim_burstsizes */ 327 0x1, /* minimum transfer size */ 328 0xffffffffffffffff, /* maximum transfer size */ 329 0xffffffffffffffff, /* maximum segment size */ 330 5, /* scatter/gather list length */ 331 (unsigned int) 1, /* granularity */ 332 0 /* attribute flags */ 333 }; 334 335 ddi_dma_attr_t nxge_rx_dma_attr = { 336 DMA_ATTR_V0, /* version number. */ 337 0, /* low address */ 338 0xffffffffffffffff, /* high address */ 339 0xffffffffffffffff, /* address counter max */ 340 0x2000, /* alignment */ 341 0xfc00fc, /* dlim_burstsizes */ 342 0x1, /* minimum transfer size */ 343 0xffffffffffffffff, /* maximum transfer size */ 344 0xffffffffffffffff, /* maximum segment size */ 345 1, /* scatter/gather list length */ 346 (unsigned int) 1, /* granularity */ 347 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 348 }; 349 350 ddi_dma_lim_t nxge_dma_limits = { 351 (uint_t)0, /* dlim_addr_lo */ 352 (uint_t)0xffffffff, /* dlim_addr_hi */ 353 (uint_t)0xffffffff, /* dlim_cntr_max */ 354 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 355 0x1, /* dlim_minxfer */ 356 1024 /* dlim_speed */ 357 }; 358 359 dma_method_t nxge_force_dma = DVMA; 360 361 /* 362 * dma chunk sizes. 363 * 364 * Try to allocate the largest possible size 365 * so that fewer number of dma chunks would be managed 366 */ 367 #ifdef NIU_PA_WORKAROUND 368 size_t alloc_sizes [] = {0x2000}; 369 #else 370 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 371 0x10000, 0x20000, 0x40000, 0x80000, 372 0x100000, 0x200000, 0x400000, 0x800000, 373 0x1000000, 0x2000000, 0x4000000}; 374 #endif 375 376 /* 377 * Translate "dev_t" to a pointer to the associated "dev_info_t". 378 */ 379 380 static int 381 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 382 { 383 p_nxge_t nxgep = NULL; 384 int instance; 385 int status = DDI_SUCCESS; 386 uint8_t portn; 387 nxge_mmac_t *mmac_info; 388 389 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 390 391 /* 392 * Get the device instance since we'll need to setup 393 * or retrieve a soft state for this instance. 394 */ 395 instance = ddi_get_instance(dip); 396 397 switch (cmd) { 398 case DDI_ATTACH: 399 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 400 break; 401 402 case DDI_RESUME: 403 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 404 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 405 if (nxgep == NULL) { 406 status = DDI_FAILURE; 407 break; 408 } 409 if (nxgep->dip != dip) { 410 status = DDI_FAILURE; 411 break; 412 } 413 if (nxgep->suspended == DDI_PM_SUSPEND) { 414 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 415 } else { 416 status = nxge_resume(nxgep); 417 } 418 goto nxge_attach_exit; 419 420 case DDI_PM_RESUME: 421 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 422 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 423 if (nxgep == NULL) { 424 status = DDI_FAILURE; 425 break; 426 } 427 if (nxgep->dip != dip) { 428 status = DDI_FAILURE; 429 break; 430 } 431 status = nxge_resume(nxgep); 432 goto nxge_attach_exit; 433 434 default: 435 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 436 status = DDI_FAILURE; 437 goto nxge_attach_exit; 438 } 439 440 441 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 442 status = DDI_FAILURE; 443 goto nxge_attach_exit; 444 } 445 446 nxgep = ddi_get_soft_state(nxge_list, instance); 447 if (nxgep == NULL) { 448 status = NXGE_ERROR; 449 goto nxge_attach_fail2; 450 } 451 452 nxgep->nxge_magic = NXGE_MAGIC; 453 454 nxgep->drv_state = 0; 455 nxgep->dip = dip; 456 nxgep->instance = instance; 457 nxgep->p_dip = ddi_get_parent(dip); 458 nxgep->nxge_debug_level = nxge_debug_level; 459 npi_debug_level = nxge_debug_level; 460 461 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_dev_desc_dma_acc_attr, 462 &nxge_rx_dma_attr); 463 464 status = nxge_map_regs(nxgep); 465 if (status != NXGE_OK) { 466 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 467 goto nxge_attach_fail3; 468 } 469 470 status = nxge_init_common_dev(nxgep); 471 if (status != NXGE_OK) { 472 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 473 "nxge_init_common_dev failed")); 474 goto nxge_attach_fail4; 475 } 476 477 if (nxgep->niu_type == NEPTUNE_2_10GF) { 478 if (nxgep->function_num > 1) { 479 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Unsupported" 480 " function %d. Only functions 0 and 1 are " 481 "supported for this card.", nxgep->function_num)); 482 status = NXGE_ERROR; 483 goto nxge_attach_fail4; 484 } 485 } 486 487 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 488 nxgep->mac.portnum = portn; 489 if ((portn == 0) || (portn == 1)) 490 nxgep->mac.porttype = PORT_TYPE_XMAC; 491 else 492 nxgep->mac.porttype = PORT_TYPE_BMAC; 493 /* 494 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 495 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 496 * The two types of MACs have different characterizations. 497 */ 498 mmac_info = &nxgep->nxge_mmac_info; 499 if (nxgep->function_num < 2) { 500 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 501 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 502 } else { 503 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 504 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 505 } 506 /* 507 * Setup the Ndd parameters for the this instance. 508 */ 509 nxge_init_param(nxgep); 510 511 /* 512 * Setup Register Tracing Buffer. 513 */ 514 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 515 516 /* init stats ptr */ 517 nxge_init_statsp(nxgep); 518 519 /* 520 * read the vpd info from the eeprom into local data 521 * structure and check for the VPD info validity 522 */ 523 nxge_vpd_info_get(nxgep); 524 525 status = nxge_xcvr_find(nxgep); 526 527 if (status != NXGE_OK) { 528 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 529 " Couldn't determine card type" 530 " .... exit ")); 531 goto nxge_attach_fail5; 532 } 533 534 status = nxge_get_config_properties(nxgep); 535 536 if (status != NXGE_OK) { 537 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "get_hw create failed")); 538 goto nxge_attach_fail; 539 } 540 541 /* 542 * Setup the Kstats for the driver. 543 */ 544 nxge_setup_kstats(nxgep); 545 546 nxge_setup_param(nxgep); 547 548 status = nxge_setup_system_dma_pages(nxgep); 549 if (status != NXGE_OK) { 550 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 551 goto nxge_attach_fail; 552 } 553 554 #if defined(sun4v) 555 if (nxgep->niu_type == N2_NIU) { 556 nxgep->niu_hsvc_available = B_FALSE; 557 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 558 if ((status = 559 hsvc_register(&nxgep->niu_hsvc, 560 &nxgep->niu_min_ver)) != 0) { 561 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 562 "nxge_attach: " 563 "%s: cannot negotiate " 564 "hypervisor services " 565 "revision %d " 566 "group: 0x%lx " 567 "major: 0x%lx minor: 0x%lx " 568 "errno: %d", 569 niu_hsvc.hsvc_modname, 570 niu_hsvc.hsvc_rev, 571 niu_hsvc.hsvc_group, 572 niu_hsvc.hsvc_major, 573 niu_hsvc.hsvc_minor, 574 status)); 575 status = DDI_FAILURE; 576 goto nxge_attach_fail; 577 } 578 579 nxgep->niu_hsvc_available = B_TRUE; 580 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 581 "NIU Hypervisor service enabled")); 582 } 583 #endif 584 585 nxge_hw_id_init(nxgep); 586 nxge_hw_init_niu_common(nxgep); 587 588 status = nxge_setup_mutexes(nxgep); 589 if (status != NXGE_OK) { 590 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 591 goto nxge_attach_fail; 592 } 593 594 status = nxge_setup_dev(nxgep); 595 if (status != DDI_SUCCESS) { 596 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 597 goto nxge_attach_fail; 598 } 599 600 status = nxge_add_intrs(nxgep); 601 if (status != DDI_SUCCESS) { 602 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 603 goto nxge_attach_fail; 604 } 605 status = nxge_add_soft_intrs(nxgep); 606 if (status != DDI_SUCCESS) { 607 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "add_soft_intr failed")); 608 goto nxge_attach_fail; 609 } 610 611 /* 612 * Enable interrupts. 613 */ 614 nxge_intrs_enable(nxgep); 615 616 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 617 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 618 "unable to register to mac layer (%d)", status)); 619 goto nxge_attach_fail; 620 } 621 622 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 623 624 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "registered to mac (instance %d)", 625 instance)); 626 627 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 628 629 goto nxge_attach_exit; 630 631 nxge_attach_fail: 632 nxge_unattach(nxgep); 633 goto nxge_attach_fail1; 634 635 nxge_attach_fail5: 636 /* 637 * Tear down the ndd parameters setup. 638 */ 639 nxge_destroy_param(nxgep); 640 641 /* 642 * Tear down the kstat setup. 643 */ 644 nxge_destroy_kstats(nxgep); 645 646 nxge_attach_fail4: 647 if (nxgep->nxge_hw_p) { 648 nxge_uninit_common_dev(nxgep); 649 nxgep->nxge_hw_p = NULL; 650 } 651 652 nxge_attach_fail3: 653 /* 654 * Unmap the register setup. 655 */ 656 nxge_unmap_regs(nxgep); 657 658 nxge_fm_fini(nxgep); 659 660 nxge_attach_fail2: 661 ddi_soft_state_free(nxge_list, nxgep->instance); 662 663 nxge_attach_fail1: 664 if (status != NXGE_OK) 665 status = (NXGE_ERROR | NXGE_DDI_FAILED); 666 nxgep = NULL; 667 668 nxge_attach_exit: 669 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 670 status)); 671 672 return (status); 673 } 674 675 static int 676 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 677 { 678 int status = DDI_SUCCESS; 679 int instance; 680 p_nxge_t nxgep = NULL; 681 682 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 683 instance = ddi_get_instance(dip); 684 nxgep = ddi_get_soft_state(nxge_list, instance); 685 if (nxgep == NULL) { 686 status = DDI_FAILURE; 687 goto nxge_detach_exit; 688 } 689 690 switch (cmd) { 691 case DDI_DETACH: 692 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 693 break; 694 695 case DDI_PM_SUSPEND: 696 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 697 nxgep->suspended = DDI_PM_SUSPEND; 698 nxge_suspend(nxgep); 699 break; 700 701 case DDI_SUSPEND: 702 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 703 if (nxgep->suspended != DDI_PM_SUSPEND) { 704 nxgep->suspended = DDI_SUSPEND; 705 nxge_suspend(nxgep); 706 } 707 break; 708 709 default: 710 status = DDI_FAILURE; 711 } 712 713 if (cmd != DDI_DETACH) 714 goto nxge_detach_exit; 715 716 /* 717 * Stop the xcvr polling. 718 */ 719 nxgep->suspended = cmd; 720 721 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 722 723 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 724 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 725 "<== nxge_detach status = 0x%08X", status)); 726 return (DDI_FAILURE); 727 } 728 729 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 730 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 731 732 nxge_unattach(nxgep); 733 nxgep = NULL; 734 735 nxge_detach_exit: 736 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 737 status)); 738 739 return (status); 740 } 741 742 static void 743 nxge_unattach(p_nxge_t nxgep) 744 { 745 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 746 747 if (nxgep == NULL || nxgep->dev_regs == NULL) { 748 return; 749 } 750 751 nxgep->nxge_magic = 0; 752 753 if (nxgep->nxge_timerid) { 754 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 755 nxgep->nxge_timerid = 0; 756 } 757 758 if (nxgep->nxge_hw_p) { 759 nxge_uninit_common_dev(nxgep); 760 nxgep->nxge_hw_p = NULL; 761 } 762 763 #if defined(sun4v) 764 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 765 (void) hsvc_unregister(&nxgep->niu_hsvc); 766 nxgep->niu_hsvc_available = B_FALSE; 767 } 768 #endif 769 /* 770 * Stop any further interrupts. 771 */ 772 nxge_remove_intrs(nxgep); 773 774 /* remove soft interrups */ 775 nxge_remove_soft_intrs(nxgep); 776 777 /* 778 * Stop the device and free resources. 779 */ 780 nxge_destroy_dev(nxgep); 781 782 /* 783 * Tear down the ndd parameters setup. 784 */ 785 nxge_destroy_param(nxgep); 786 787 /* 788 * Tear down the kstat setup. 789 */ 790 nxge_destroy_kstats(nxgep); 791 792 /* 793 * Destroy all mutexes. 794 */ 795 nxge_destroy_mutexes(nxgep); 796 797 /* 798 * Remove the list of ndd parameters which 799 * were setup during attach. 800 */ 801 if (nxgep->dip) { 802 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 803 " nxge_unattach: remove all properties")); 804 805 (void) ddi_prop_remove_all(nxgep->dip); 806 } 807 808 #if NXGE_PROPERTY 809 nxge_remove_hard_properties(nxgep); 810 #endif 811 812 /* 813 * Unmap the register setup. 814 */ 815 nxge_unmap_regs(nxgep); 816 817 nxge_fm_fini(nxgep); 818 819 ddi_soft_state_free(nxge_list, nxgep->instance); 820 821 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 822 } 823 824 static char n2_siu_name[] = "niu"; 825 826 static nxge_status_t 827 nxge_map_regs(p_nxge_t nxgep) 828 { 829 int ddi_status = DDI_SUCCESS; 830 p_dev_regs_t dev_regs; 831 char buf[MAXPATHLEN + 1]; 832 char *devname; 833 #ifdef NXGE_DEBUG 834 char *sysname; 835 #endif 836 off_t regsize; 837 nxge_status_t status = NXGE_OK; 838 #if !defined(_BIG_ENDIAN) 839 off_t pci_offset; 840 uint16_t pcie_devctl; 841 #endif 842 843 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 844 nxgep->dev_regs = NULL; 845 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 846 dev_regs->nxge_regh = NULL; 847 dev_regs->nxge_pciregh = NULL; 848 dev_regs->nxge_msix_regh = NULL; 849 dev_regs->nxge_vir_regh = NULL; 850 dev_regs->nxge_vir2_regh = NULL; 851 nxgep->niu_type = NIU_TYPE_NONE; 852 853 devname = ddi_pathname(nxgep->dip, buf); 854 ASSERT(strlen(devname) > 0); 855 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 856 "nxge_map_regs: pathname devname %s", devname)); 857 858 if (strstr(devname, n2_siu_name)) { 859 /* N2/NIU */ 860 nxgep->niu_type = N2_NIU; 861 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 862 "nxge_map_regs: N2/NIU devname %s", devname)); 863 /* get function number */ 864 nxgep->function_num = 865 (devname[strlen(devname) -1] == '1' ? 1 : 0); 866 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 867 "nxge_map_regs: N2/NIU function number %d", 868 nxgep->function_num)); 869 } else { 870 int *prop_val; 871 uint_t prop_len; 872 uint8_t func_num; 873 874 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 875 0, "reg", 876 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 877 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 878 "Reg property not found")); 879 ddi_status = DDI_FAILURE; 880 goto nxge_map_regs_fail0; 881 882 } else { 883 func_num = (prop_val[0] >> 8) & 0x7; 884 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 885 "Reg property found: fun # %d", 886 func_num)); 887 nxgep->function_num = func_num; 888 ddi_prop_free(prop_val); 889 } 890 } 891 892 switch (nxgep->niu_type) { 893 default: 894 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 895 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 896 "nxge_map_regs: pci config size 0x%x", regsize)); 897 898 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 899 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 900 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 901 if (ddi_status != DDI_SUCCESS) { 902 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 903 "ddi_map_regs, nxge bus config regs failed")); 904 goto nxge_map_regs_fail0; 905 } 906 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 907 "nxge_map_reg: PCI config addr 0x%0llx " 908 " handle 0x%0llx", dev_regs->nxge_pciregp, 909 dev_regs->nxge_pciregh)); 910 /* 911 * IMP IMP 912 * workaround for bit swapping bug in HW 913 * which ends up in no-snoop = yes 914 * resulting, in DMA not synched properly 915 */ 916 #if !defined(_BIG_ENDIAN) 917 /* workarounds for x86 systems */ 918 pci_offset = 0x80 + PCIE_DEVCTL; 919 pcie_devctl = 0x0; 920 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 921 pcie_devctl |= PCIE_DEVCTL_RO_EN; 922 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 923 pcie_devctl); 924 #endif 925 926 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 927 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 928 "nxge_map_regs: pio size 0x%x", regsize)); 929 /* set up the device mapped register */ 930 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 931 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 932 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 933 if (ddi_status != DDI_SUCCESS) { 934 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 935 "ddi_map_regs for Neptune global reg failed")); 936 goto nxge_map_regs_fail1; 937 } 938 939 /* set up the msi/msi-x mapped register */ 940 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 941 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 942 "nxge_map_regs: msix size 0x%x", regsize)); 943 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 944 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 945 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 946 if (ddi_status != DDI_SUCCESS) { 947 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 948 "ddi_map_regs for msi reg failed")); 949 goto nxge_map_regs_fail2; 950 } 951 952 /* set up the vio region mapped register */ 953 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 954 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 955 "nxge_map_regs: vio size 0x%x", regsize)); 956 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 957 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 958 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 959 960 if (ddi_status != DDI_SUCCESS) { 961 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 962 "ddi_map_regs for nxge vio reg failed")); 963 goto nxge_map_regs_fail3; 964 } 965 nxgep->dev_regs = dev_regs; 966 967 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 968 NPI_PCI_ADD_HANDLE_SET(nxgep, 969 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 970 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 971 NPI_MSI_ADD_HANDLE_SET(nxgep, 972 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 973 974 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 975 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 976 977 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 978 NPI_REG_ADD_HANDLE_SET(nxgep, 979 (npi_reg_ptr_t)dev_regs->nxge_regp); 980 981 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 982 NPI_VREG_ADD_HANDLE_SET(nxgep, 983 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 984 985 break; 986 987 case N2_NIU: 988 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 989 /* 990 * Set up the device mapped register (FWARC 2006/556) 991 * (changed back to 1: reg starts at 1!) 992 */ 993 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 994 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 995 "nxge_map_regs: dev size 0x%x", regsize)); 996 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 997 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 998 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 999 1000 if (ddi_status != DDI_SUCCESS) { 1001 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1002 "ddi_map_regs for N2/NIU, global reg failed ")); 1003 goto nxge_map_regs_fail1; 1004 } 1005 1006 /* set up the vio region mapped register */ 1007 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1008 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1009 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1010 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1011 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1012 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1013 1014 if (ddi_status != DDI_SUCCESS) { 1015 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1016 "ddi_map_regs for nxge vio reg failed")); 1017 goto nxge_map_regs_fail2; 1018 } 1019 /* set up the vio region mapped register */ 1020 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1021 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1022 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1023 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1024 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1025 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1026 1027 if (ddi_status != DDI_SUCCESS) { 1028 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1029 "ddi_map_regs for nxge vio2 reg failed")); 1030 goto nxge_map_regs_fail3; 1031 } 1032 nxgep->dev_regs = dev_regs; 1033 1034 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1035 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1036 1037 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1038 NPI_REG_ADD_HANDLE_SET(nxgep, 1039 (npi_reg_ptr_t)dev_regs->nxge_regp); 1040 1041 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1042 NPI_VREG_ADD_HANDLE_SET(nxgep, 1043 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1044 1045 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1046 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1047 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1048 1049 break; 1050 } 1051 1052 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1053 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1054 1055 goto nxge_map_regs_exit; 1056 nxge_map_regs_fail3: 1057 if (dev_regs->nxge_msix_regh) { 1058 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1059 } 1060 if (dev_regs->nxge_vir_regh) { 1061 ddi_regs_map_free(&dev_regs->nxge_regh); 1062 } 1063 nxge_map_regs_fail2: 1064 if (dev_regs->nxge_regh) { 1065 ddi_regs_map_free(&dev_regs->nxge_regh); 1066 } 1067 nxge_map_regs_fail1: 1068 if (dev_regs->nxge_pciregh) { 1069 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1070 } 1071 nxge_map_regs_fail0: 1072 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1073 kmem_free(dev_regs, sizeof (dev_regs_t)); 1074 1075 nxge_map_regs_exit: 1076 if (ddi_status != DDI_SUCCESS) 1077 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1078 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1079 return (status); 1080 } 1081 1082 static void 1083 nxge_unmap_regs(p_nxge_t nxgep) 1084 { 1085 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1086 if (nxgep->dev_regs) { 1087 if (nxgep->dev_regs->nxge_pciregh) { 1088 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1089 "==> nxge_unmap_regs: bus")); 1090 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1091 nxgep->dev_regs->nxge_pciregh = NULL; 1092 } 1093 if (nxgep->dev_regs->nxge_regh) { 1094 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1095 "==> nxge_unmap_regs: device registers")); 1096 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1097 nxgep->dev_regs->nxge_regh = NULL; 1098 } 1099 if (nxgep->dev_regs->nxge_msix_regh) { 1100 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1101 "==> nxge_unmap_regs: device interrupts")); 1102 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1103 nxgep->dev_regs->nxge_msix_regh = NULL; 1104 } 1105 if (nxgep->dev_regs->nxge_vir_regh) { 1106 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1107 "==> nxge_unmap_regs: vio region")); 1108 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1109 nxgep->dev_regs->nxge_vir_regh = NULL; 1110 } 1111 if (nxgep->dev_regs->nxge_vir2_regh) { 1112 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1113 "==> nxge_unmap_regs: vio2 region")); 1114 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1115 nxgep->dev_regs->nxge_vir2_regh = NULL; 1116 } 1117 1118 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1119 nxgep->dev_regs = NULL; 1120 } 1121 1122 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1123 } 1124 1125 static nxge_status_t 1126 nxge_setup_mutexes(p_nxge_t nxgep) 1127 { 1128 int ddi_status = DDI_SUCCESS; 1129 nxge_status_t status = NXGE_OK; 1130 nxge_classify_t *classify_ptr; 1131 int partition; 1132 1133 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1134 1135 /* 1136 * Get the interrupt cookie so the mutexes can be 1137 * Initialized. 1138 */ 1139 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1140 &nxgep->interrupt_cookie); 1141 if (ddi_status != DDI_SUCCESS) { 1142 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1143 "<== nxge_setup_mutexes: failed 0x%x", ddi_status)); 1144 goto nxge_setup_mutexes_exit; 1145 } 1146 1147 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1148 MUTEX_INIT(&nxgep->poll_lock, NULL, 1149 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1150 1151 /* 1152 * Initialize mutexes for this device. 1153 */ 1154 MUTEX_INIT(nxgep->genlock, NULL, 1155 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1156 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1157 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1158 MUTEX_INIT(&nxgep->mif_lock, NULL, 1159 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1160 RW_INIT(&nxgep->filter_lock, NULL, 1161 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1162 1163 classify_ptr = &nxgep->classifier; 1164 /* 1165 * FFLP Mutexes are never used in interrupt context 1166 * as fflp operation can take very long time to 1167 * complete and hence not suitable to invoke from interrupt 1168 * handlers. 1169 */ 1170 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1171 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1172 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1173 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1174 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1175 for (partition = 0; partition < MAX_PARTITION; partition++) { 1176 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1177 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1178 } 1179 } 1180 1181 nxge_setup_mutexes_exit: 1182 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1183 "<== nxge_setup_mutexes status = %x", status)); 1184 1185 if (ddi_status != DDI_SUCCESS) 1186 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1187 1188 return (status); 1189 } 1190 1191 static void 1192 nxge_destroy_mutexes(p_nxge_t nxgep) 1193 { 1194 int partition; 1195 nxge_classify_t *classify_ptr; 1196 1197 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1198 RW_DESTROY(&nxgep->filter_lock); 1199 MUTEX_DESTROY(&nxgep->mif_lock); 1200 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1201 MUTEX_DESTROY(nxgep->genlock); 1202 1203 classify_ptr = &nxgep->classifier; 1204 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1205 1206 /* Destroy all polling resources. */ 1207 MUTEX_DESTROY(&nxgep->poll_lock); 1208 cv_destroy(&nxgep->poll_cv); 1209 1210 /* free data structures, based on HW type */ 1211 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1212 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1213 for (partition = 0; partition < MAX_PARTITION; partition++) { 1214 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1215 } 1216 } 1217 1218 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1219 } 1220 1221 nxge_status_t 1222 nxge_init(p_nxge_t nxgep) 1223 { 1224 nxge_status_t status = NXGE_OK; 1225 1226 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1227 1228 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1229 return (status); 1230 } 1231 1232 /* 1233 * Allocate system memory for the receive/transmit buffer blocks 1234 * and receive/transmit descriptor rings. 1235 */ 1236 status = nxge_alloc_mem_pool(nxgep); 1237 if (status != NXGE_OK) { 1238 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1239 goto nxge_init_fail1; 1240 } 1241 1242 /* 1243 * Initialize and enable TXC registers 1244 * (Globally enable TX controller, 1245 * enable a port, configure dma channel bitmap, 1246 * configure the max burst size). 1247 */ 1248 status = nxge_txc_init(nxgep); 1249 if (status != NXGE_OK) { 1250 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txc failed\n")); 1251 goto nxge_init_fail2; 1252 } 1253 1254 /* 1255 * Initialize and enable TXDMA channels. 1256 */ 1257 status = nxge_init_txdma_channels(nxgep); 1258 if (status != NXGE_OK) { 1259 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1260 goto nxge_init_fail3; 1261 } 1262 1263 /* 1264 * Initialize and enable RXDMA channels. 1265 */ 1266 status = nxge_init_rxdma_channels(nxgep); 1267 if (status != NXGE_OK) { 1268 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1269 goto nxge_init_fail4; 1270 } 1271 1272 /* 1273 * Initialize TCAM and FCRAM (Neptune). 1274 */ 1275 status = nxge_classify_init(nxgep); 1276 if (status != NXGE_OK) { 1277 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1278 goto nxge_init_fail5; 1279 } 1280 1281 /* 1282 * Initialize ZCP 1283 */ 1284 status = nxge_zcp_init(nxgep); 1285 if (status != NXGE_OK) { 1286 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1287 goto nxge_init_fail5; 1288 } 1289 1290 /* 1291 * Initialize IPP. 1292 */ 1293 status = nxge_ipp_init(nxgep); 1294 if (status != NXGE_OK) { 1295 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1296 goto nxge_init_fail5; 1297 } 1298 1299 /* 1300 * Initialize the MAC block. 1301 */ 1302 status = nxge_mac_init(nxgep); 1303 if (status != NXGE_OK) { 1304 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1305 goto nxge_init_fail5; 1306 } 1307 1308 nxge_intrs_enable(nxgep); 1309 1310 /* 1311 * Enable hardware interrupts. 1312 */ 1313 nxge_intr_hw_enable(nxgep); 1314 nxgep->drv_state |= STATE_HW_INITIALIZED; 1315 1316 goto nxge_init_exit; 1317 1318 nxge_init_fail5: 1319 nxge_uninit_rxdma_channels(nxgep); 1320 nxge_init_fail4: 1321 nxge_uninit_txdma_channels(nxgep); 1322 nxge_init_fail3: 1323 (void) nxge_txc_uninit(nxgep); 1324 nxge_init_fail2: 1325 nxge_free_mem_pool(nxgep); 1326 nxge_init_fail1: 1327 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1328 "<== nxge_init status (failed) = 0x%08x", status)); 1329 return (status); 1330 1331 nxge_init_exit: 1332 1333 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1334 status)); 1335 return (status); 1336 } 1337 1338 1339 timeout_id_t 1340 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1341 { 1342 if ((nxgep->suspended == 0) || 1343 (nxgep->suspended == DDI_RESUME)) { 1344 return (timeout(func, (caddr_t)nxgep, 1345 drv_usectohz(1000 * msec))); 1346 } 1347 return (NULL); 1348 } 1349 1350 /*ARGSUSED*/ 1351 void 1352 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1353 { 1354 if (timerid) { 1355 (void) untimeout(timerid); 1356 } 1357 } 1358 1359 void 1360 nxge_uninit(p_nxge_t nxgep) 1361 { 1362 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1363 1364 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1365 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1366 "==> nxge_uninit: not initialized")); 1367 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1368 "<== nxge_uninit")); 1369 return; 1370 } 1371 1372 /* stop timer */ 1373 if (nxgep->nxge_timerid) { 1374 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1375 nxgep->nxge_timerid = 0; 1376 } 1377 1378 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1379 (void) nxge_intr_hw_disable(nxgep); 1380 1381 /* 1382 * Reset the receive MAC side. 1383 */ 1384 (void) nxge_rx_mac_disable(nxgep); 1385 1386 /* Disable and soft reset the IPP */ 1387 (void) nxge_ipp_disable(nxgep); 1388 1389 /* Free classification resources */ 1390 (void) nxge_classify_uninit(nxgep); 1391 1392 /* 1393 * Reset the transmit/receive DMA side. 1394 */ 1395 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1396 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1397 1398 nxge_uninit_txdma_channels(nxgep); 1399 nxge_uninit_rxdma_channels(nxgep); 1400 1401 /* 1402 * Reset the transmit MAC side. 1403 */ 1404 (void) nxge_tx_mac_disable(nxgep); 1405 1406 nxge_free_mem_pool(nxgep); 1407 1408 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1409 1410 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1411 1412 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1413 "nxge_mblks_pending %d", nxge_mblks_pending)); 1414 } 1415 1416 void 1417 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1418 { 1419 #if defined(__i386) 1420 size_t reg; 1421 #else 1422 uint64_t reg; 1423 #endif 1424 uint64_t regdata; 1425 int i, retry; 1426 1427 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1428 regdata = 0; 1429 retry = 1; 1430 1431 for (i = 0; i < retry; i++) { 1432 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1433 } 1434 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1435 } 1436 1437 void 1438 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1439 { 1440 #if defined(__i386) 1441 size_t reg; 1442 #else 1443 uint64_t reg; 1444 #endif 1445 uint64_t buf[2]; 1446 1447 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1448 #if defined(__i386) 1449 reg = (size_t)buf[0]; 1450 #else 1451 reg = buf[0]; 1452 #endif 1453 1454 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1455 } 1456 1457 1458 nxge_os_mutex_t nxgedebuglock; 1459 int nxge_debug_init = 0; 1460 1461 /*ARGSUSED*/ 1462 /*VARARGS*/ 1463 void 1464 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1465 { 1466 char msg_buffer[1048]; 1467 char prefix_buffer[32]; 1468 int instance; 1469 uint64_t debug_level; 1470 int cmn_level = CE_CONT; 1471 va_list ap; 1472 1473 debug_level = (nxgep == NULL) ? nxge_debug_level : 1474 nxgep->nxge_debug_level; 1475 1476 if ((level & debug_level) || 1477 (level == NXGE_NOTE) || 1478 (level == NXGE_ERR_CTL)) { 1479 /* do the msg processing */ 1480 if (nxge_debug_init == 0) { 1481 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1482 nxge_debug_init = 1; 1483 } 1484 1485 MUTEX_ENTER(&nxgedebuglock); 1486 1487 if ((level & NXGE_NOTE)) { 1488 cmn_level = CE_NOTE; 1489 } 1490 1491 if (level & NXGE_ERR_CTL) { 1492 cmn_level = CE_WARN; 1493 } 1494 1495 va_start(ap, fmt); 1496 (void) vsprintf(msg_buffer, fmt, ap); 1497 va_end(ap); 1498 if (nxgep == NULL) { 1499 instance = -1; 1500 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1501 } else { 1502 instance = nxgep->instance; 1503 (void) sprintf(prefix_buffer, 1504 "%s%d :", "nxge", instance); 1505 } 1506 1507 MUTEX_EXIT(&nxgedebuglock); 1508 cmn_err(cmn_level, "!%s %s\n", 1509 prefix_buffer, msg_buffer); 1510 1511 } 1512 } 1513 1514 char * 1515 nxge_dump_packet(char *addr, int size) 1516 { 1517 uchar_t *ap = (uchar_t *)addr; 1518 int i; 1519 static char etherbuf[1024]; 1520 char *cp = etherbuf; 1521 char digits[] = "0123456789abcdef"; 1522 1523 if (!size) 1524 size = 60; 1525 1526 if (size > MAX_DUMP_SZ) { 1527 /* Dump the leading bytes */ 1528 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1529 if (*ap > 0x0f) 1530 *cp++ = digits[*ap >> 4]; 1531 *cp++ = digits[*ap++ & 0xf]; 1532 *cp++ = ':'; 1533 } 1534 for (i = 0; i < 20; i++) 1535 *cp++ = '.'; 1536 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1537 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1538 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1539 if (*ap > 0x0f) 1540 *cp++ = digits[*ap >> 4]; 1541 *cp++ = digits[*ap++ & 0xf]; 1542 *cp++ = ':'; 1543 } 1544 } else { 1545 for (i = 0; i < size; i++) { 1546 if (*ap > 0x0f) 1547 *cp++ = digits[*ap >> 4]; 1548 *cp++ = digits[*ap++ & 0xf]; 1549 *cp++ = ':'; 1550 } 1551 } 1552 *--cp = 0; 1553 return (etherbuf); 1554 } 1555 1556 #ifdef NXGE_DEBUG 1557 static void 1558 nxge_test_map_regs(p_nxge_t nxgep) 1559 { 1560 ddi_acc_handle_t cfg_handle; 1561 p_pci_cfg_t cfg_ptr; 1562 ddi_acc_handle_t dev_handle; 1563 char *dev_ptr; 1564 ddi_acc_handle_t pci_config_handle; 1565 uint32_t regval; 1566 int i; 1567 1568 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1569 1570 dev_handle = nxgep->dev_regs->nxge_regh; 1571 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1572 1573 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1574 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1575 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1576 1577 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1578 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1579 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1580 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1581 &cfg_ptr->vendorid)); 1582 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1583 "\tvendorid 0x%x devid 0x%x", 1584 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1585 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1586 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1587 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1588 "bar1c 0x%x", 1589 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1590 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1591 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1592 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1593 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1594 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1595 "base 28 0x%x bar2c 0x%x\n", 1596 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1597 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1598 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1599 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1600 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1601 "\nNeptune PCI BAR: base30 0x%x\n", 1602 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1603 1604 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1605 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1606 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1607 "first 0x%llx second 0x%llx third 0x%llx " 1608 "last 0x%llx ", 1609 NXGE_PIO_READ64(dev_handle, 1610 (uint64_t *)(dev_ptr + 0), 0), 1611 NXGE_PIO_READ64(dev_handle, 1612 (uint64_t *)(dev_ptr + 8), 0), 1613 NXGE_PIO_READ64(dev_handle, 1614 (uint64_t *)(dev_ptr + 16), 0), 1615 NXGE_PIO_READ64(cfg_handle, 1616 (uint64_t *)(dev_ptr + 24), 0))); 1617 } 1618 } 1619 1620 #endif 1621 1622 static void 1623 nxge_suspend(p_nxge_t nxgep) 1624 { 1625 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1626 1627 nxge_intrs_disable(nxgep); 1628 nxge_destroy_dev(nxgep); 1629 1630 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1631 } 1632 1633 static nxge_status_t 1634 nxge_resume(p_nxge_t nxgep) 1635 { 1636 nxge_status_t status = NXGE_OK; 1637 1638 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1639 1640 nxgep->suspended = DDI_RESUME; 1641 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1642 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1643 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1644 (void) nxge_rx_mac_enable(nxgep); 1645 (void) nxge_tx_mac_enable(nxgep); 1646 nxge_intrs_enable(nxgep); 1647 nxgep->suspended = 0; 1648 1649 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1650 "<== nxge_resume status = 0x%x", status)); 1651 return (status); 1652 } 1653 1654 static nxge_status_t 1655 nxge_setup_dev(p_nxge_t nxgep) 1656 { 1657 nxge_status_t status = NXGE_OK; 1658 1659 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1660 nxgep->mac.portnum)); 1661 1662 status = nxge_link_init(nxgep); 1663 1664 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1665 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1666 "port%d Bad register acc handle", nxgep->mac.portnum)); 1667 status = NXGE_ERROR; 1668 } 1669 1670 if (status != NXGE_OK) { 1671 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1672 " nxge_setup_dev status " 1673 "(xcvr init 0x%08x)", status)); 1674 goto nxge_setup_dev_exit; 1675 } 1676 1677 nxge_setup_dev_exit: 1678 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1679 "<== nxge_setup_dev port %d status = 0x%08x", 1680 nxgep->mac.portnum, status)); 1681 1682 return (status); 1683 } 1684 1685 static void 1686 nxge_destroy_dev(p_nxge_t nxgep) 1687 { 1688 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 1689 1690 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1691 1692 (void) nxge_hw_stop(nxgep); 1693 1694 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 1695 } 1696 1697 static nxge_status_t 1698 nxge_setup_system_dma_pages(p_nxge_t nxgep) 1699 { 1700 int ddi_status = DDI_SUCCESS; 1701 uint_t count; 1702 ddi_dma_cookie_t cookie; 1703 uint_t iommu_pagesize; 1704 nxge_status_t status = NXGE_OK; 1705 1706 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 1707 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 1708 if (nxgep->niu_type != N2_NIU) { 1709 iommu_pagesize = dvma_pagesize(nxgep->dip); 1710 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1711 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1712 " default_block_size %d iommu_pagesize %d", 1713 nxgep->sys_page_sz, 1714 ddi_ptob(nxgep->dip, (ulong_t)1), 1715 nxgep->rx_default_block_size, 1716 iommu_pagesize)); 1717 1718 if (iommu_pagesize != 0) { 1719 if (nxgep->sys_page_sz == iommu_pagesize) { 1720 if (iommu_pagesize > 0x4000) 1721 nxgep->sys_page_sz = 0x4000; 1722 } else { 1723 if (nxgep->sys_page_sz > iommu_pagesize) 1724 nxgep->sys_page_sz = iommu_pagesize; 1725 } 1726 } 1727 } 1728 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1729 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1730 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1731 "default_block_size %d page mask %d", 1732 nxgep->sys_page_sz, 1733 ddi_ptob(nxgep->dip, (ulong_t)1), 1734 nxgep->rx_default_block_size, 1735 nxgep->sys_page_mask)); 1736 1737 1738 switch (nxgep->sys_page_sz) { 1739 default: 1740 nxgep->sys_page_sz = 0x1000; 1741 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1742 nxgep->rx_default_block_size = 0x1000; 1743 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1744 break; 1745 case 0x1000: 1746 nxgep->rx_default_block_size = 0x1000; 1747 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1748 break; 1749 case 0x2000: 1750 nxgep->rx_default_block_size = 0x2000; 1751 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1752 break; 1753 case 0x4000: 1754 nxgep->rx_default_block_size = 0x4000; 1755 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 1756 break; 1757 case 0x8000: 1758 nxgep->rx_default_block_size = 0x8000; 1759 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 1760 break; 1761 } 1762 1763 #ifndef USE_RX_BIG_BUF 1764 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 1765 #else 1766 nxgep->rx_default_block_size = 0x2000; 1767 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1768 #endif 1769 /* 1770 * Get the system DMA burst size. 1771 */ 1772 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 1773 DDI_DMA_DONTWAIT, 0, 1774 &nxgep->dmasparehandle); 1775 if (ddi_status != DDI_SUCCESS) { 1776 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1777 "ddi_dma_alloc_handle: failed " 1778 " status 0x%x", ddi_status)); 1779 goto nxge_get_soft_properties_exit; 1780 } 1781 1782 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 1783 (caddr_t)nxgep->dmasparehandle, 1784 sizeof (nxgep->dmasparehandle), 1785 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1786 DDI_DMA_DONTWAIT, 0, 1787 &cookie, &count); 1788 if (ddi_status != DDI_DMA_MAPPED) { 1789 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1790 "Binding spare handle to find system" 1791 " burstsize failed.")); 1792 ddi_status = DDI_FAILURE; 1793 goto nxge_get_soft_properties_fail1; 1794 } 1795 1796 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 1797 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 1798 1799 nxge_get_soft_properties_fail1: 1800 ddi_dma_free_handle(&nxgep->dmasparehandle); 1801 1802 nxge_get_soft_properties_exit: 1803 1804 if (ddi_status != DDI_SUCCESS) 1805 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1806 1807 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1808 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 1809 return (status); 1810 } 1811 1812 static nxge_status_t 1813 nxge_alloc_mem_pool(p_nxge_t nxgep) 1814 { 1815 nxge_status_t status = NXGE_OK; 1816 1817 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 1818 1819 status = nxge_alloc_rx_mem_pool(nxgep); 1820 if (status != NXGE_OK) { 1821 return (NXGE_ERROR); 1822 } 1823 1824 status = nxge_alloc_tx_mem_pool(nxgep); 1825 if (status != NXGE_OK) { 1826 nxge_free_rx_mem_pool(nxgep); 1827 return (NXGE_ERROR); 1828 } 1829 1830 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 1831 return (NXGE_OK); 1832 } 1833 1834 static void 1835 nxge_free_mem_pool(p_nxge_t nxgep) 1836 { 1837 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 1838 1839 nxge_free_rx_mem_pool(nxgep); 1840 nxge_free_tx_mem_pool(nxgep); 1841 1842 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 1843 } 1844 1845 static nxge_status_t 1846 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 1847 { 1848 int i, j; 1849 uint32_t ndmas, st_rdc; 1850 p_nxge_dma_pt_cfg_t p_all_cfgp; 1851 p_nxge_hw_pt_cfg_t p_cfgp; 1852 p_nxge_dma_pool_t dma_poolp; 1853 p_nxge_dma_common_t *dma_buf_p; 1854 p_nxge_dma_pool_t dma_cntl_poolp; 1855 p_nxge_dma_common_t *dma_cntl_p; 1856 size_t rx_buf_alloc_size; 1857 size_t rx_cntl_alloc_size; 1858 uint32_t *num_chunks; /* per dma */ 1859 nxge_status_t status = NXGE_OK; 1860 1861 uint32_t nxge_port_rbr_size; 1862 uint32_t nxge_port_rbr_spare_size; 1863 uint32_t nxge_port_rcr_size; 1864 1865 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 1866 1867 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 1868 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1869 st_rdc = p_cfgp->start_rdc; 1870 ndmas = p_cfgp->max_rdcs; 1871 1872 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1873 " nxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas)); 1874 1875 /* 1876 * Allocate memory for each receive DMA channel. 1877 */ 1878 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 1879 KM_SLEEP); 1880 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1881 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1882 1883 dma_cntl_poolp = (p_nxge_dma_pool_t) 1884 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 1885 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1886 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1887 1888 num_chunks = (uint32_t *)KMEM_ZALLOC( 1889 sizeof (uint32_t) * ndmas, KM_SLEEP); 1890 1891 /* 1892 * Assume that each DMA channel will be configured with default 1893 * block size. 1894 * rbr block counts are mod of batch count (16). 1895 */ 1896 nxge_port_rbr_size = p_all_cfgp->rbr_size; 1897 nxge_port_rcr_size = p_all_cfgp->rcr_size; 1898 1899 if (!nxge_port_rbr_size) { 1900 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 1901 } 1902 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 1903 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 1904 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 1905 } 1906 1907 p_all_cfgp->rbr_size = nxge_port_rbr_size; 1908 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 1909 1910 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 1911 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 1912 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 1913 } 1914 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 1915 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 1916 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 1917 "set to default %d", 1918 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 1919 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 1920 } 1921 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 1922 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 1923 "nxge_alloc_rx_mem_pool: RCR too high %d, " 1924 "set to default %d", 1925 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 1926 nxge_port_rcr_size = RCR_DEFAULT_MAX; 1927 } 1928 1929 /* 1930 * N2/NIU has limitation on the descriptor sizes (contiguous 1931 * memory allocation on data buffers to 4M (contig_mem_alloc) 1932 * and little endian for control buffers (must use the ddi/dki mem alloc 1933 * function). 1934 */ 1935 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1936 if (nxgep->niu_type == N2_NIU) { 1937 nxge_port_rbr_spare_size = 0; 1938 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 1939 (!ISP2(nxge_port_rbr_size))) { 1940 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 1941 } 1942 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 1943 (!ISP2(nxge_port_rcr_size))) { 1944 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 1945 } 1946 } 1947 #endif 1948 1949 rx_buf_alloc_size = (nxgep->rx_default_block_size * 1950 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 1951 1952 /* 1953 * Addresses of receive block ring, receive completion ring and the 1954 * mailbox must be all cache-aligned (64 bytes). 1955 */ 1956 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 1957 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 1958 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 1959 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 1960 1961 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 1962 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 1963 "nxge_port_rcr_size = %d " 1964 "rx_cntl_alloc_size = %d", 1965 nxge_port_rbr_size, nxge_port_rbr_spare_size, 1966 nxge_port_rcr_size, 1967 rx_cntl_alloc_size)); 1968 1969 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1970 if (nxgep->niu_type == N2_NIU) { 1971 if (!ISP2(rx_buf_alloc_size)) { 1972 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1973 "==> nxge_alloc_rx_mem_pool: " 1974 " must be power of 2")); 1975 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1976 goto nxge_alloc_rx_mem_pool_exit; 1977 } 1978 1979 if (rx_buf_alloc_size > (1 << 22)) { 1980 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1981 "==> nxge_alloc_rx_mem_pool: " 1982 " limit size to 4M")); 1983 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1984 goto nxge_alloc_rx_mem_pool_exit; 1985 } 1986 1987 if (rx_cntl_alloc_size < 0x2000) { 1988 rx_cntl_alloc_size = 0x2000; 1989 } 1990 } 1991 #endif 1992 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 1993 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 1994 1995 /* 1996 * Allocate memory for receive buffers and descriptor rings. 1997 * Replace allocation functions with interface functions provided 1998 * by the partition manager when it is available. 1999 */ 2000 /* 2001 * Allocate memory for the receive buffer blocks. 2002 */ 2003 for (i = 0; i < ndmas; i++) { 2004 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2005 " nxge_alloc_rx_mem_pool to alloc mem: " 2006 " dma %d dma_buf_p %llx &dma_buf_p %llx", 2007 i, dma_buf_p[i], &dma_buf_p[i])); 2008 num_chunks[i] = 0; 2009 status = nxge_alloc_rx_buf_dma(nxgep, st_rdc, &dma_buf_p[i], 2010 rx_buf_alloc_size, 2011 nxgep->rx_default_block_size, &num_chunks[i]); 2012 if (status != NXGE_OK) { 2013 break; 2014 } 2015 st_rdc++; 2016 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2017 " nxge_alloc_rx_mem_pool DONE alloc mem: " 2018 "dma %d dma_buf_p %llx &dma_buf_p %llx", i, 2019 dma_buf_p[i], &dma_buf_p[i])); 2020 } 2021 if (i < ndmas) { 2022 goto nxge_alloc_rx_mem_fail1; 2023 } 2024 /* 2025 * Allocate memory for descriptor rings and mailbox. 2026 */ 2027 st_rdc = p_cfgp->start_rdc; 2028 for (j = 0; j < ndmas; j++) { 2029 status = nxge_alloc_rx_cntl_dma(nxgep, st_rdc, &dma_cntl_p[j], 2030 rx_cntl_alloc_size); 2031 if (status != NXGE_OK) { 2032 break; 2033 } 2034 st_rdc++; 2035 } 2036 if (j < ndmas) { 2037 goto nxge_alloc_rx_mem_fail2; 2038 } 2039 2040 dma_poolp->ndmas = ndmas; 2041 dma_poolp->num_chunks = num_chunks; 2042 dma_poolp->buf_allocated = B_TRUE; 2043 nxgep->rx_buf_pool_p = dma_poolp; 2044 dma_poolp->dma_buf_pool_p = dma_buf_p; 2045 2046 dma_cntl_poolp->ndmas = ndmas; 2047 dma_cntl_poolp->buf_allocated = B_TRUE; 2048 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2049 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2050 2051 goto nxge_alloc_rx_mem_pool_exit; 2052 2053 nxge_alloc_rx_mem_fail2: 2054 /* Free control buffers */ 2055 j--; 2056 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2057 "==> nxge_alloc_rx_mem_pool: freeing control bufs (%d)", j)); 2058 for (; j >= 0; j--) { 2059 nxge_free_rx_cntl_dma(nxgep, 2060 (p_nxge_dma_common_t)dma_cntl_p[j]); 2061 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2062 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", 2063 j)); 2064 } 2065 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2066 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 2067 2068 nxge_alloc_rx_mem_fail1: 2069 /* Free data buffers */ 2070 i--; 2071 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2072 "==> nxge_alloc_rx_mem_pool: freeing data bufs (%d)", i)); 2073 for (; i >= 0; i--) { 2074 nxge_free_rx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2075 num_chunks[i]); 2076 } 2077 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2078 "==> nxge_alloc_rx_mem_pool: data bufs freed (%d)", i)); 2079 2080 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2081 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2082 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2083 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2084 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2085 2086 nxge_alloc_rx_mem_pool_exit: 2087 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2088 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2089 2090 return (status); 2091 } 2092 2093 static void 2094 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2095 { 2096 uint32_t i, ndmas; 2097 p_nxge_dma_pool_t dma_poolp; 2098 p_nxge_dma_common_t *dma_buf_p; 2099 p_nxge_dma_pool_t dma_cntl_poolp; 2100 p_nxge_dma_common_t *dma_cntl_p; 2101 uint32_t *num_chunks; 2102 2103 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2104 2105 dma_poolp = nxgep->rx_buf_pool_p; 2106 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2107 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2108 "<== nxge_free_rx_mem_pool " 2109 "(null rx buf pool or buf not allocated")); 2110 return; 2111 } 2112 2113 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2114 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2115 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2116 "<== nxge_free_rx_mem_pool " 2117 "(null rx cntl buf pool or cntl buf not allocated")); 2118 return; 2119 } 2120 2121 dma_buf_p = dma_poolp->dma_buf_pool_p; 2122 num_chunks = dma_poolp->num_chunks; 2123 2124 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2125 ndmas = dma_cntl_poolp->ndmas; 2126 2127 for (i = 0; i < ndmas; i++) { 2128 nxge_free_rx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2129 } 2130 2131 for (i = 0; i < ndmas; i++) { 2132 nxge_free_rx_cntl_dma(nxgep, dma_cntl_p[i]); 2133 } 2134 2135 for (i = 0; i < ndmas; i++) { 2136 KMEM_FREE(dma_buf_p[i], 2137 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2138 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2139 } 2140 2141 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2142 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2143 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2144 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2145 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2146 2147 nxgep->rx_buf_pool_p = NULL; 2148 nxgep->rx_cntl_pool_p = NULL; 2149 2150 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2151 } 2152 2153 2154 static nxge_status_t 2155 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2156 p_nxge_dma_common_t *dmap, 2157 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2158 { 2159 p_nxge_dma_common_t rx_dmap; 2160 nxge_status_t status = NXGE_OK; 2161 size_t total_alloc_size; 2162 size_t allocated = 0; 2163 int i, size_index, array_size; 2164 2165 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2166 2167 rx_dmap = (p_nxge_dma_common_t) 2168 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2169 KM_SLEEP); 2170 2171 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2172 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2173 dma_channel, alloc_size, block_size, dmap)); 2174 2175 total_alloc_size = alloc_size; 2176 2177 #if defined(RX_USE_RECLAIM_POST) 2178 total_alloc_size = alloc_size + alloc_size/4; 2179 #endif 2180 2181 i = 0; 2182 size_index = 0; 2183 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2184 while ((alloc_sizes[size_index] < alloc_size) && 2185 (size_index < array_size)) 2186 size_index++; 2187 if (size_index >= array_size) { 2188 size_index = array_size - 1; 2189 } 2190 2191 while ((allocated < total_alloc_size) && 2192 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2193 rx_dmap[i].dma_chunk_index = i; 2194 rx_dmap[i].block_size = block_size; 2195 rx_dmap[i].alength = alloc_sizes[size_index]; 2196 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2197 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2198 rx_dmap[i].dma_channel = dma_channel; 2199 rx_dmap[i].contig_alloc_type = B_FALSE; 2200 2201 /* 2202 * N2/NIU: data buffers must be contiguous as the driver 2203 * needs to call Hypervisor api to set up 2204 * logical pages. 2205 */ 2206 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2207 rx_dmap[i].contig_alloc_type = B_TRUE; 2208 } 2209 2210 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2211 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2212 "i %d nblocks %d alength %d", 2213 dma_channel, i, &rx_dmap[i], block_size, 2214 i, rx_dmap[i].nblocks, 2215 rx_dmap[i].alength)); 2216 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2217 &nxge_rx_dma_attr, 2218 rx_dmap[i].alength, 2219 &nxge_dev_buf_dma_acc_attr, 2220 DDI_DMA_READ | DDI_DMA_STREAMING, 2221 (p_nxge_dma_common_t)(&rx_dmap[i])); 2222 if (status != NXGE_OK) { 2223 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2224 " nxge_alloc_rx_buf_dma: Alloc Failed ")); 2225 size_index--; 2226 } else { 2227 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2228 " alloc_rx_buf_dma allocated rdc %d " 2229 "chunk %d size %x dvma %x bufp %llx ", 2230 dma_channel, i, rx_dmap[i].alength, 2231 rx_dmap[i].ioaddr_pp, &rx_dmap[i])); 2232 i++; 2233 allocated += alloc_sizes[size_index]; 2234 } 2235 } 2236 2237 2238 if (allocated < total_alloc_size) { 2239 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2240 "==> nxge_alloc_rx_buf_dma: not enough for channe %d " 2241 "allocated 0x%x requested 0x%x", 2242 dma_channel, 2243 allocated, total_alloc_size)); 2244 status = NXGE_ERROR; 2245 goto nxge_alloc_rx_mem_fail1; 2246 } 2247 2248 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2249 "==> nxge_alloc_rx_buf_dma: Allocated for channe %d " 2250 "allocated 0x%x requested 0x%x", 2251 dma_channel, 2252 allocated, total_alloc_size)); 2253 2254 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2255 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2256 dma_channel, i)); 2257 *num_chunks = i; 2258 *dmap = rx_dmap; 2259 2260 goto nxge_alloc_rx_mem_exit; 2261 2262 nxge_alloc_rx_mem_fail1: 2263 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2264 2265 nxge_alloc_rx_mem_exit: 2266 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2267 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2268 2269 return (status); 2270 } 2271 2272 /*ARGSUSED*/ 2273 static void 2274 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2275 uint32_t num_chunks) 2276 { 2277 int i; 2278 2279 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2280 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2281 2282 for (i = 0; i < num_chunks; i++) { 2283 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2284 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2285 i, dmap)); 2286 nxge_dma_mem_free(dmap++); 2287 } 2288 2289 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2290 } 2291 2292 /*ARGSUSED*/ 2293 static nxge_status_t 2294 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2295 p_nxge_dma_common_t *dmap, size_t size) 2296 { 2297 p_nxge_dma_common_t rx_dmap; 2298 nxge_status_t status = NXGE_OK; 2299 2300 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2301 2302 rx_dmap = (p_nxge_dma_common_t) 2303 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2304 2305 rx_dmap->contig_alloc_type = B_FALSE; 2306 2307 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2308 &nxge_desc_dma_attr, 2309 size, 2310 &nxge_dev_desc_dma_acc_attr, 2311 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2312 rx_dmap); 2313 if (status != NXGE_OK) { 2314 goto nxge_alloc_rx_cntl_dma_fail1; 2315 } 2316 2317 *dmap = rx_dmap; 2318 goto nxge_alloc_rx_cntl_dma_exit; 2319 2320 nxge_alloc_rx_cntl_dma_fail1: 2321 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2322 2323 nxge_alloc_rx_cntl_dma_exit: 2324 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2325 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2326 2327 return (status); 2328 } 2329 2330 /*ARGSUSED*/ 2331 static void 2332 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2333 { 2334 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2335 2336 nxge_dma_mem_free(dmap); 2337 2338 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2339 } 2340 2341 static nxge_status_t 2342 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2343 { 2344 nxge_status_t status = NXGE_OK; 2345 int i, j; 2346 uint32_t ndmas, st_tdc; 2347 p_nxge_dma_pt_cfg_t p_all_cfgp; 2348 p_nxge_hw_pt_cfg_t p_cfgp; 2349 p_nxge_dma_pool_t dma_poolp; 2350 p_nxge_dma_common_t *dma_buf_p; 2351 p_nxge_dma_pool_t dma_cntl_poolp; 2352 p_nxge_dma_common_t *dma_cntl_p; 2353 size_t tx_buf_alloc_size; 2354 size_t tx_cntl_alloc_size; 2355 uint32_t *num_chunks; /* per dma */ 2356 uint32_t bcopy_thresh; 2357 2358 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2359 2360 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2361 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2362 st_tdc = p_cfgp->start_tdc; 2363 ndmas = p_cfgp->max_tdcs; 2364 2365 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool: " 2366 "p_cfgp 0x%016llx start_tdc %d ndmas %d nxgep->max_tdcs %d", 2367 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, nxgep->max_tdcs)); 2368 /* 2369 * Allocate memory for each transmit DMA channel. 2370 */ 2371 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2372 KM_SLEEP); 2373 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2374 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2375 2376 dma_cntl_poolp = (p_nxge_dma_pool_t) 2377 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2378 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2379 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2380 2381 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2382 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2383 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2384 "set to default %d", 2385 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2386 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2387 } 2388 2389 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2390 /* 2391 * N2/NIU has limitation on the descriptor sizes (contiguous 2392 * memory allocation on data buffers to 4M (contig_mem_alloc) 2393 * and little endian for control buffers (must use the ddi/dki mem alloc 2394 * function). The transmit ring is limited to 8K (includes the 2395 * mailbox). 2396 */ 2397 if (nxgep->niu_type == N2_NIU) { 2398 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2399 (!ISP2(nxge_tx_ring_size))) { 2400 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2401 } 2402 } 2403 #endif 2404 2405 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2406 2407 /* 2408 * Assume that each DMA channel will be configured with default 2409 * transmit bufer size for copying transmit data. 2410 * (For packet payload over this limit, packets will not be 2411 * copied.) 2412 */ 2413 if (nxgep->niu_type == N2_NIU) { 2414 bcopy_thresh = TX_BCOPY_SIZE; 2415 } else { 2416 bcopy_thresh = nxge_bcopy_thresh; 2417 } 2418 tx_buf_alloc_size = (bcopy_thresh * nxge_tx_ring_size); 2419 2420 /* 2421 * Addresses of transmit descriptor ring and the 2422 * mailbox must be all cache-aligned (64 bytes). 2423 */ 2424 tx_cntl_alloc_size = nxge_tx_ring_size; 2425 tx_cntl_alloc_size *= (sizeof (tx_desc_t)); 2426 tx_cntl_alloc_size += sizeof (txdma_mailbox_t); 2427 2428 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2429 if (nxgep->niu_type == N2_NIU) { 2430 if (!ISP2(tx_buf_alloc_size)) { 2431 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2432 "==> nxge_alloc_tx_mem_pool: " 2433 " must be power of 2")); 2434 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2435 goto nxge_alloc_tx_mem_pool_exit; 2436 } 2437 2438 if (tx_buf_alloc_size > (1 << 22)) { 2439 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2440 "==> nxge_alloc_tx_mem_pool: " 2441 " limit size to 4M")); 2442 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2443 goto nxge_alloc_tx_mem_pool_exit; 2444 } 2445 2446 if (tx_cntl_alloc_size < 0x2000) { 2447 tx_cntl_alloc_size = 0x2000; 2448 } 2449 } 2450 #endif 2451 2452 num_chunks = (uint32_t *)KMEM_ZALLOC( 2453 sizeof (uint32_t) * ndmas, KM_SLEEP); 2454 2455 /* 2456 * Allocate memory for transmit buffers and descriptor rings. 2457 * Replace allocation functions with interface functions provided 2458 * by the partition manager when it is available. 2459 * 2460 * Allocate memory for the transmit buffer pool. 2461 */ 2462 for (i = 0; i < ndmas; i++) { 2463 num_chunks[i] = 0; 2464 status = nxge_alloc_tx_buf_dma(nxgep, st_tdc, &dma_buf_p[i], 2465 tx_buf_alloc_size, 2466 bcopy_thresh, &num_chunks[i]); 2467 if (status != NXGE_OK) { 2468 break; 2469 } 2470 st_tdc++; 2471 } 2472 if (i < ndmas) { 2473 goto nxge_alloc_tx_mem_pool_fail1; 2474 } 2475 2476 st_tdc = p_cfgp->start_tdc; 2477 /* 2478 * Allocate memory for descriptor rings and mailbox. 2479 */ 2480 for (j = 0; j < ndmas; j++) { 2481 status = nxge_alloc_tx_cntl_dma(nxgep, st_tdc, &dma_cntl_p[j], 2482 tx_cntl_alloc_size); 2483 if (status != NXGE_OK) { 2484 break; 2485 } 2486 st_tdc++; 2487 } 2488 if (j < ndmas) { 2489 goto nxge_alloc_tx_mem_pool_fail2; 2490 } 2491 2492 dma_poolp->ndmas = ndmas; 2493 dma_poolp->num_chunks = num_chunks; 2494 dma_poolp->buf_allocated = B_TRUE; 2495 dma_poolp->dma_buf_pool_p = dma_buf_p; 2496 nxgep->tx_buf_pool_p = dma_poolp; 2497 2498 dma_cntl_poolp->ndmas = ndmas; 2499 dma_cntl_poolp->buf_allocated = B_TRUE; 2500 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2501 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2502 2503 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2504 "==> nxge_alloc_tx_mem_pool: start_tdc %d " 2505 "ndmas %d poolp->ndmas %d", 2506 st_tdc, ndmas, dma_poolp->ndmas)); 2507 2508 goto nxge_alloc_tx_mem_pool_exit; 2509 2510 nxge_alloc_tx_mem_pool_fail2: 2511 /* Free control buffers */ 2512 j--; 2513 for (; j >= 0; j--) { 2514 nxge_free_tx_cntl_dma(nxgep, 2515 (p_nxge_dma_common_t)dma_cntl_p[j]); 2516 } 2517 2518 nxge_alloc_tx_mem_pool_fail1: 2519 /* Free data buffers */ 2520 i--; 2521 for (; i >= 0; i--) { 2522 nxge_free_tx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2523 num_chunks[i]); 2524 } 2525 2526 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2527 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2528 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2529 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2530 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2531 2532 nxge_alloc_tx_mem_pool_exit: 2533 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2534 "<== nxge_alloc_tx_mem_pool:status 0x%08x", status)); 2535 2536 return (status); 2537 } 2538 2539 static nxge_status_t 2540 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2541 p_nxge_dma_common_t *dmap, size_t alloc_size, 2542 size_t block_size, uint32_t *num_chunks) 2543 { 2544 p_nxge_dma_common_t tx_dmap; 2545 nxge_status_t status = NXGE_OK; 2546 size_t total_alloc_size; 2547 size_t allocated = 0; 2548 int i, size_index, array_size; 2549 2550 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 2551 2552 tx_dmap = (p_nxge_dma_common_t) 2553 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2554 KM_SLEEP); 2555 2556 total_alloc_size = alloc_size; 2557 i = 0; 2558 size_index = 0; 2559 array_size = sizeof (alloc_sizes) / sizeof (size_t); 2560 while ((alloc_sizes[size_index] < alloc_size) && 2561 (size_index < array_size)) 2562 size_index++; 2563 if (size_index >= array_size) { 2564 size_index = array_size - 1; 2565 } 2566 2567 while ((allocated < total_alloc_size) && 2568 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2569 2570 tx_dmap[i].dma_chunk_index = i; 2571 tx_dmap[i].block_size = block_size; 2572 tx_dmap[i].alength = alloc_sizes[size_index]; 2573 tx_dmap[i].orig_alength = tx_dmap[i].alength; 2574 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2575 tx_dmap[i].dma_channel = dma_channel; 2576 tx_dmap[i].contig_alloc_type = B_FALSE; 2577 2578 /* 2579 * N2/NIU: data buffers must be contiguous as the driver 2580 * needs to call Hypervisor api to set up 2581 * logical pages. 2582 */ 2583 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2584 tx_dmap[i].contig_alloc_type = B_TRUE; 2585 } 2586 2587 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2588 &nxge_tx_dma_attr, 2589 tx_dmap[i].alength, 2590 &nxge_dev_buf_dma_acc_attr, 2591 DDI_DMA_WRITE | DDI_DMA_STREAMING, 2592 (p_nxge_dma_common_t)(&tx_dmap[i])); 2593 if (status != NXGE_OK) { 2594 size_index--; 2595 } else { 2596 i++; 2597 allocated += alloc_sizes[size_index]; 2598 } 2599 } 2600 2601 if (allocated < total_alloc_size) { 2602 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2603 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 2604 "allocated 0x%x requested 0x%x", 2605 dma_channel, 2606 allocated, total_alloc_size)); 2607 status = NXGE_ERROR; 2608 goto nxge_alloc_tx_mem_fail1; 2609 } 2610 2611 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2612 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 2613 "allocated 0x%x requested 0x%x", 2614 dma_channel, 2615 allocated, total_alloc_size)); 2616 2617 *num_chunks = i; 2618 *dmap = tx_dmap; 2619 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2620 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 2621 *dmap, i)); 2622 goto nxge_alloc_tx_mem_exit; 2623 2624 nxge_alloc_tx_mem_fail1: 2625 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2626 2627 nxge_alloc_tx_mem_exit: 2628 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2629 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 2630 2631 return (status); 2632 } 2633 2634 /*ARGSUSED*/ 2635 static void 2636 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2637 uint32_t num_chunks) 2638 { 2639 int i; 2640 2641 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 2642 2643 for (i = 0; i < num_chunks; i++) { 2644 nxge_dma_mem_free(dmap++); 2645 } 2646 2647 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 2648 } 2649 2650 /*ARGSUSED*/ 2651 static nxge_status_t 2652 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2653 p_nxge_dma_common_t *dmap, size_t size) 2654 { 2655 p_nxge_dma_common_t tx_dmap; 2656 nxge_status_t status = NXGE_OK; 2657 2658 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 2659 tx_dmap = (p_nxge_dma_common_t) 2660 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2661 2662 tx_dmap->contig_alloc_type = B_FALSE; 2663 2664 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2665 &nxge_desc_dma_attr, 2666 size, 2667 &nxge_dev_desc_dma_acc_attr, 2668 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2669 tx_dmap); 2670 if (status != NXGE_OK) { 2671 goto nxge_alloc_tx_cntl_dma_fail1; 2672 } 2673 2674 *dmap = tx_dmap; 2675 goto nxge_alloc_tx_cntl_dma_exit; 2676 2677 nxge_alloc_tx_cntl_dma_fail1: 2678 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 2679 2680 nxge_alloc_tx_cntl_dma_exit: 2681 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2682 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 2683 2684 return (status); 2685 } 2686 2687 /*ARGSUSED*/ 2688 static void 2689 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2690 { 2691 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 2692 2693 nxge_dma_mem_free(dmap); 2694 2695 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 2696 } 2697 2698 static void 2699 nxge_free_tx_mem_pool(p_nxge_t nxgep) 2700 { 2701 uint32_t i, ndmas; 2702 p_nxge_dma_pool_t dma_poolp; 2703 p_nxge_dma_common_t *dma_buf_p; 2704 p_nxge_dma_pool_t dma_cntl_poolp; 2705 p_nxge_dma_common_t *dma_cntl_p; 2706 uint32_t *num_chunks; 2707 2708 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_free_tx_mem_pool")); 2709 2710 dma_poolp = nxgep->tx_buf_pool_p; 2711 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2712 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2713 "<== nxge_free_tx_mem_pool " 2714 "(null rx buf pool or buf not allocated")); 2715 return; 2716 } 2717 2718 dma_cntl_poolp = nxgep->tx_cntl_pool_p; 2719 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2720 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2721 "<== nxge_free_tx_mem_pool " 2722 "(null tx cntl buf pool or cntl buf not allocated")); 2723 return; 2724 } 2725 2726 dma_buf_p = dma_poolp->dma_buf_pool_p; 2727 num_chunks = dma_poolp->num_chunks; 2728 2729 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2730 ndmas = dma_cntl_poolp->ndmas; 2731 2732 for (i = 0; i < ndmas; i++) { 2733 nxge_free_tx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2734 } 2735 2736 for (i = 0; i < ndmas; i++) { 2737 nxge_free_tx_cntl_dma(nxgep, dma_cntl_p[i]); 2738 } 2739 2740 for (i = 0; i < ndmas; i++) { 2741 KMEM_FREE(dma_buf_p[i], 2742 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2743 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2744 } 2745 2746 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2747 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2748 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2749 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2750 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2751 2752 nxgep->tx_buf_pool_p = NULL; 2753 nxgep->tx_cntl_pool_p = NULL; 2754 2755 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_free_tx_mem_pool")); 2756 } 2757 2758 /*ARGSUSED*/ 2759 static nxge_status_t 2760 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 2761 struct ddi_dma_attr *dma_attrp, 2762 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 2763 p_nxge_dma_common_t dma_p) 2764 { 2765 caddr_t kaddrp; 2766 int ddi_status = DDI_SUCCESS; 2767 boolean_t contig_alloc_type; 2768 2769 contig_alloc_type = dma_p->contig_alloc_type; 2770 2771 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 2772 /* 2773 * contig_alloc_type for contiguous memory only allowed 2774 * for N2/NIU. 2775 */ 2776 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2777 "nxge_dma_mem_alloc: alloc type not allows (%d)", 2778 dma_p->contig_alloc_type)); 2779 return (NXGE_ERROR | NXGE_DDI_FAILED); 2780 } 2781 2782 dma_p->dma_handle = NULL; 2783 dma_p->acc_handle = NULL; 2784 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 2785 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 2786 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 2787 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 2788 if (ddi_status != DDI_SUCCESS) { 2789 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2790 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 2791 return (NXGE_ERROR | NXGE_DDI_FAILED); 2792 } 2793 2794 switch (contig_alloc_type) { 2795 case B_FALSE: 2796 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, 2797 acc_attr_p, 2798 xfer_flags, 2799 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 2800 &dma_p->acc_handle); 2801 if (ddi_status != DDI_SUCCESS) { 2802 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2803 "nxge_dma_mem_alloc:ddi_dma_mem_alloc failed")); 2804 ddi_dma_free_handle(&dma_p->dma_handle); 2805 dma_p->dma_handle = NULL; 2806 return (NXGE_ERROR | NXGE_DDI_FAILED); 2807 } 2808 if (dma_p->alength < length) { 2809 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2810 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 2811 "< length.")); 2812 ddi_dma_mem_free(&dma_p->acc_handle); 2813 ddi_dma_free_handle(&dma_p->dma_handle); 2814 dma_p->acc_handle = NULL; 2815 dma_p->dma_handle = NULL; 2816 return (NXGE_ERROR); 2817 } 2818 2819 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2820 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2821 &dma_p->dma_cookie, &dma_p->ncookies); 2822 if (ddi_status != DDI_DMA_MAPPED) { 2823 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2824 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2825 "(staus 0x%x ncookies %d.)", ddi_status, 2826 dma_p->ncookies)); 2827 if (dma_p->acc_handle) { 2828 ddi_dma_mem_free(&dma_p->acc_handle); 2829 dma_p->acc_handle = NULL; 2830 } 2831 ddi_dma_free_handle(&dma_p->dma_handle); 2832 dma_p->dma_handle = NULL; 2833 return (NXGE_ERROR | NXGE_DDI_FAILED); 2834 } 2835 2836 if (dma_p->ncookies != 1) { 2837 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2838 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 2839 "> 1 cookie" 2840 "(staus 0x%x ncookies %d.)", ddi_status, 2841 dma_p->ncookies)); 2842 if (dma_p->acc_handle) { 2843 ddi_dma_mem_free(&dma_p->acc_handle); 2844 dma_p->acc_handle = NULL; 2845 } 2846 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2847 ddi_dma_free_handle(&dma_p->dma_handle); 2848 dma_p->dma_handle = NULL; 2849 return (NXGE_ERROR); 2850 } 2851 break; 2852 2853 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2854 case B_TRUE: 2855 kaddrp = (caddr_t)contig_mem_alloc(length); 2856 if (kaddrp == NULL) { 2857 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2858 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 2859 ddi_dma_free_handle(&dma_p->dma_handle); 2860 return (NXGE_ERROR | NXGE_DDI_FAILED); 2861 } 2862 2863 dma_p->alength = length; 2864 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2865 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2866 &dma_p->dma_cookie, &dma_p->ncookies); 2867 if (ddi_status != DDI_DMA_MAPPED) { 2868 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2869 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2870 "(status 0x%x ncookies %d.)", ddi_status, 2871 dma_p->ncookies)); 2872 2873 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2874 "==> nxge_dma_mem_alloc: (not mapped)" 2875 "length %lu (0x%x) " 2876 "free contig kaddrp $%p " 2877 "va_to_pa $%p", 2878 length, length, 2879 kaddrp, 2880 va_to_pa(kaddrp))); 2881 2882 2883 contig_mem_free((void *)kaddrp, length); 2884 ddi_dma_free_handle(&dma_p->dma_handle); 2885 2886 dma_p->dma_handle = NULL; 2887 dma_p->acc_handle = NULL; 2888 dma_p->alength = NULL; 2889 dma_p->kaddrp = NULL; 2890 2891 return (NXGE_ERROR | NXGE_DDI_FAILED); 2892 } 2893 2894 if (dma_p->ncookies != 1 || 2895 (dma_p->dma_cookie.dmac_laddress == NULL)) { 2896 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2897 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 2898 "cookie or " 2899 "dmac_laddress is NULL $%p size %d " 2900 " (status 0x%x ncookies %d.)", 2901 ddi_status, 2902 dma_p->dma_cookie.dmac_laddress, 2903 dma_p->dma_cookie.dmac_size, 2904 dma_p->ncookies)); 2905 2906 contig_mem_free((void *)kaddrp, length); 2907 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2908 ddi_dma_free_handle(&dma_p->dma_handle); 2909 2910 dma_p->alength = 0; 2911 dma_p->dma_handle = NULL; 2912 dma_p->acc_handle = NULL; 2913 dma_p->kaddrp = NULL; 2914 2915 return (NXGE_ERROR | NXGE_DDI_FAILED); 2916 } 2917 break; 2918 2919 #else 2920 case B_TRUE: 2921 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2922 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 2923 return (NXGE_ERROR | NXGE_DDI_FAILED); 2924 #endif 2925 } 2926 2927 dma_p->kaddrp = kaddrp; 2928 dma_p->last_kaddrp = (unsigned char *)kaddrp + 2929 dma_p->alength - RXBUF_64B_ALIGNED; 2930 #if defined(__i386) 2931 dma_p->ioaddr_pp = 2932 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 2933 #else 2934 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2935 #endif 2936 dma_p->last_ioaddr_pp = 2937 #if defined(__i386) 2938 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 2939 #else 2940 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 2941 #endif 2942 dma_p->alength - RXBUF_64B_ALIGNED; 2943 2944 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 2945 2946 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2947 dma_p->orig_ioaddr_pp = 2948 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2949 dma_p->orig_alength = length; 2950 dma_p->orig_kaddrp = kaddrp; 2951 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 2952 #endif 2953 2954 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 2955 "dma buffer allocated: dma_p $%p " 2956 "return dmac_ladress from cookie $%p cookie dmac_size %d " 2957 "dma_p->ioaddr_p $%p " 2958 "dma_p->orig_ioaddr_p $%p " 2959 "orig_vatopa $%p " 2960 "alength %d (0x%x) " 2961 "kaddrp $%p " 2962 "length %d (0x%x)", 2963 dma_p, 2964 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 2965 dma_p->ioaddr_pp, 2966 dma_p->orig_ioaddr_pp, 2967 dma_p->orig_vatopa, 2968 dma_p->alength, dma_p->alength, 2969 kaddrp, 2970 length, length)); 2971 2972 return (NXGE_OK); 2973 } 2974 2975 static void 2976 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 2977 { 2978 if (dma_p->dma_handle != NULL) { 2979 if (dma_p->ncookies) { 2980 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2981 dma_p->ncookies = 0; 2982 } 2983 ddi_dma_free_handle(&dma_p->dma_handle); 2984 dma_p->dma_handle = NULL; 2985 } 2986 2987 if (dma_p->acc_handle != NULL) { 2988 ddi_dma_mem_free(&dma_p->acc_handle); 2989 dma_p->acc_handle = NULL; 2990 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 2991 } 2992 2993 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2994 if (dma_p->contig_alloc_type && 2995 dma_p->orig_kaddrp && dma_p->orig_alength) { 2996 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 2997 "kaddrp $%p (orig_kaddrp $%p)" 2998 "mem type %d ", 2999 "orig_alength %d " 3000 "alength 0x%x (%d)", 3001 dma_p->kaddrp, 3002 dma_p->orig_kaddrp, 3003 dma_p->contig_alloc_type, 3004 dma_p->orig_alength, 3005 dma_p->alength, dma_p->alength)); 3006 3007 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3008 dma_p->orig_alength = NULL; 3009 dma_p->orig_kaddrp = NULL; 3010 dma_p->contig_alloc_type = B_FALSE; 3011 } 3012 #endif 3013 dma_p->kaddrp = NULL; 3014 dma_p->alength = NULL; 3015 } 3016 3017 /* 3018 * nxge_m_start() -- start transmitting and receiving. 3019 * 3020 * This function is called by the MAC layer when the first 3021 * stream is open to prepare the hardware ready for sending 3022 * and transmitting packets. 3023 */ 3024 static int 3025 nxge_m_start(void *arg) 3026 { 3027 p_nxge_t nxgep = (p_nxge_t)arg; 3028 3029 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3030 3031 MUTEX_ENTER(nxgep->genlock); 3032 if (nxge_init(nxgep) != NXGE_OK) { 3033 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3034 "<== nxge_m_start: initialization failed")); 3035 MUTEX_EXIT(nxgep->genlock); 3036 return (EIO); 3037 } 3038 3039 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 3040 goto nxge_m_start_exit; 3041 /* 3042 * Start timer to check the system error and tx hangs 3043 */ 3044 nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state, 3045 NXGE_CHECK_TIMER); 3046 3047 nxgep->link_notify = B_TRUE; 3048 3049 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3050 3051 nxge_m_start_exit: 3052 MUTEX_EXIT(nxgep->genlock); 3053 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3054 return (0); 3055 } 3056 3057 /* 3058 * nxge_m_stop(): stop transmitting and receiving. 3059 */ 3060 static void 3061 nxge_m_stop(void *arg) 3062 { 3063 p_nxge_t nxgep = (p_nxge_t)arg; 3064 3065 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3066 3067 if (nxgep->nxge_timerid) { 3068 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3069 nxgep->nxge_timerid = 0; 3070 } 3071 3072 MUTEX_ENTER(nxgep->genlock); 3073 nxge_uninit(nxgep); 3074 3075 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3076 3077 MUTEX_EXIT(nxgep->genlock); 3078 3079 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3080 } 3081 3082 static int 3083 nxge_m_unicst(void *arg, const uint8_t *macaddr) 3084 { 3085 p_nxge_t nxgep = (p_nxge_t)arg; 3086 struct ether_addr addrp; 3087 3088 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 3089 3090 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 3091 if (nxge_set_mac_addr(nxgep, &addrp)) { 3092 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3093 "<== nxge_m_unicst: set unitcast failed")); 3094 return (EINVAL); 3095 } 3096 3097 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 3098 3099 return (0); 3100 } 3101 3102 static int 3103 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3104 { 3105 p_nxge_t nxgep = (p_nxge_t)arg; 3106 struct ether_addr addrp; 3107 3108 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3109 "==> nxge_m_multicst: add %d", add)); 3110 3111 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3112 if (add) { 3113 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3114 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3115 "<== nxge_m_multicst: add multicast failed")); 3116 return (EINVAL); 3117 } 3118 } else { 3119 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3120 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3121 "<== nxge_m_multicst: del multicast failed")); 3122 return (EINVAL); 3123 } 3124 } 3125 3126 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3127 3128 return (0); 3129 } 3130 3131 static int 3132 nxge_m_promisc(void *arg, boolean_t on) 3133 { 3134 p_nxge_t nxgep = (p_nxge_t)arg; 3135 3136 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3137 "==> nxge_m_promisc: on %d", on)); 3138 3139 if (nxge_set_promisc(nxgep, on)) { 3140 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3141 "<== nxge_m_promisc: set promisc failed")); 3142 return (EINVAL); 3143 } 3144 3145 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3146 "<== nxge_m_promisc: on %d", on)); 3147 3148 return (0); 3149 } 3150 3151 static void 3152 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3153 { 3154 p_nxge_t nxgep = (p_nxge_t)arg; 3155 struct iocblk *iocp; 3156 boolean_t need_privilege; 3157 int err; 3158 int cmd; 3159 3160 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3161 3162 iocp = (struct iocblk *)mp->b_rptr; 3163 iocp->ioc_error = 0; 3164 need_privilege = B_TRUE; 3165 cmd = iocp->ioc_cmd; 3166 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3167 switch (cmd) { 3168 default: 3169 miocnak(wq, mp, 0, EINVAL); 3170 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3171 return; 3172 3173 case LB_GET_INFO_SIZE: 3174 case LB_GET_INFO: 3175 case LB_GET_MODE: 3176 need_privilege = B_FALSE; 3177 break; 3178 case LB_SET_MODE: 3179 break; 3180 3181 case ND_GET: 3182 need_privilege = B_FALSE; 3183 break; 3184 case ND_SET: 3185 break; 3186 3187 case NXGE_GET_MII: 3188 case NXGE_PUT_MII: 3189 case NXGE_GET64: 3190 case NXGE_PUT64: 3191 case NXGE_GET_TX_RING_SZ: 3192 case NXGE_GET_TX_DESC: 3193 case NXGE_TX_SIDE_RESET: 3194 case NXGE_RX_SIDE_RESET: 3195 case NXGE_GLOBAL_RESET: 3196 case NXGE_RESET_MAC: 3197 case NXGE_TX_REGS_DUMP: 3198 case NXGE_RX_REGS_DUMP: 3199 case NXGE_INT_REGS_DUMP: 3200 case NXGE_VIR_INT_REGS_DUMP: 3201 case NXGE_PUT_TCAM: 3202 case NXGE_GET_TCAM: 3203 case NXGE_RTRACE: 3204 case NXGE_RDUMP: 3205 3206 need_privilege = B_FALSE; 3207 break; 3208 case NXGE_INJECT_ERR: 3209 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3210 nxge_err_inject(nxgep, wq, mp); 3211 break; 3212 } 3213 3214 if (need_privilege) { 3215 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3216 if (err != 0) { 3217 miocnak(wq, mp, 0, err); 3218 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3219 "<== nxge_m_ioctl: no priv")); 3220 return; 3221 } 3222 } 3223 3224 switch (cmd) { 3225 case ND_GET: 3226 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command")); 3227 case ND_SET: 3228 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command")); 3229 nxge_param_ioctl(nxgep, wq, mp, iocp); 3230 break; 3231 3232 case LB_GET_MODE: 3233 case LB_SET_MODE: 3234 case LB_GET_INFO_SIZE: 3235 case LB_GET_INFO: 3236 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3237 break; 3238 3239 case NXGE_GET_MII: 3240 case NXGE_PUT_MII: 3241 case NXGE_PUT_TCAM: 3242 case NXGE_GET_TCAM: 3243 case NXGE_GET64: 3244 case NXGE_PUT64: 3245 case NXGE_GET_TX_RING_SZ: 3246 case NXGE_GET_TX_DESC: 3247 case NXGE_TX_SIDE_RESET: 3248 case NXGE_RX_SIDE_RESET: 3249 case NXGE_GLOBAL_RESET: 3250 case NXGE_RESET_MAC: 3251 case NXGE_TX_REGS_DUMP: 3252 case NXGE_RX_REGS_DUMP: 3253 case NXGE_INT_REGS_DUMP: 3254 case NXGE_VIR_INT_REGS_DUMP: 3255 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3256 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3257 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3258 break; 3259 } 3260 3261 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3262 } 3263 3264 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3265 3266 static void 3267 nxge_m_resources(void *arg) 3268 { 3269 p_nxge_t nxgep = arg; 3270 mac_rx_fifo_t mrf; 3271 p_rx_rcr_rings_t rcr_rings; 3272 p_rx_rcr_ring_t *rcr_p; 3273 uint32_t i, ndmas; 3274 nxge_status_t status; 3275 3276 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3277 3278 MUTEX_ENTER(nxgep->genlock); 3279 3280 /* 3281 * CR 6492541 Check to see if the drv_state has been initialized, 3282 * if not * call nxge_init(). 3283 */ 3284 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3285 status = nxge_init(nxgep); 3286 if (status != NXGE_OK) 3287 goto nxge_m_resources_exit; 3288 } 3289 3290 mrf.mrf_type = MAC_RX_FIFO; 3291 mrf.mrf_blank = nxge_rx_hw_blank; 3292 mrf.mrf_arg = (void *)nxgep; 3293 3294 mrf.mrf_normal_blank_time = 128; 3295 mrf.mrf_normal_pkt_count = 8; 3296 rcr_rings = nxgep->rx_rcr_rings; 3297 rcr_p = rcr_rings->rcr_rings; 3298 ndmas = rcr_rings->ndmas; 3299 3300 /* 3301 * Export our receive resources to the MAC layer. 3302 */ 3303 for (i = 0; i < ndmas; i++) { 3304 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle = 3305 mac_resource_add(nxgep->mach, 3306 (mac_resource_t *)&mrf); 3307 3308 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3309 "==> nxge_m_resources: vdma %d dma %d " 3310 "rcrptr 0x%016llx mac_handle 0x%016llx", 3311 i, ((p_rx_rcr_ring_t)rcr_p[i])->rdc, 3312 rcr_p[i], 3313 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle)); 3314 } 3315 3316 nxge_m_resources_exit: 3317 MUTEX_EXIT(nxgep->genlock); 3318 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 3319 } 3320 3321 static void 3322 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 3323 { 3324 p_nxge_mmac_stats_t mmac_stats; 3325 int i; 3326 nxge_mmac_t *mmac_info; 3327 3328 mmac_info = &nxgep->nxge_mmac_info; 3329 3330 mmac_stats = &nxgep->statsp->mmac_stats; 3331 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3332 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3333 3334 for (i = 0; i < ETHERADDRL; i++) { 3335 if (factory) { 3336 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3337 = mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i]; 3338 } else { 3339 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3340 = mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i]; 3341 } 3342 } 3343 } 3344 3345 /* 3346 * nxge_altmac_set() -- Set an alternate MAC address 3347 */ 3348 static int 3349 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 3350 { 3351 uint8_t addrn; 3352 uint8_t portn; 3353 npi_mac_addr_t altmac; 3354 hostinfo_t mac_rdc; 3355 p_nxge_class_pt_cfg_t clscfgp; 3356 3357 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3358 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3359 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3360 3361 portn = nxgep->mac.portnum; 3362 addrn = (uint8_t)slot - 1; 3363 3364 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 3365 addrn, &altmac) != NPI_SUCCESS) 3366 return (EIO); 3367 3368 /* 3369 * Set the rdc table number for the host info entry 3370 * for this mac address slot. 3371 */ 3372 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 3373 mac_rdc.value = 0; 3374 mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl; 3375 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 3376 3377 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 3378 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 3379 return (EIO); 3380 } 3381 3382 /* 3383 * Enable comparison with the alternate MAC address. 3384 * While the first alternate addr is enabled by bit 1 of register 3385 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 3386 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 3387 * accordingly before calling npi_mac_altaddr_entry. 3388 */ 3389 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3390 addrn = (uint8_t)slot - 1; 3391 else 3392 addrn = (uint8_t)slot; 3393 3394 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 3395 != NPI_SUCCESS) 3396 return (EIO); 3397 3398 return (0); 3399 } 3400 3401 /* 3402 * nxeg_m_mmac_add() - find an unused address slot, set the address 3403 * value to the one specified, enable the port to start filtering on 3404 * the new MAC address. Returns 0 on success. 3405 */ 3406 static int 3407 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 3408 { 3409 p_nxge_t nxgep = arg; 3410 mac_addr_slot_t slot; 3411 nxge_mmac_t *mmac_info; 3412 int err; 3413 nxge_status_t status; 3414 3415 mutex_enter(nxgep->genlock); 3416 3417 /* 3418 * Make sure that nxge is initialized, if _start() has 3419 * not been called. 3420 */ 3421 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3422 status = nxge_init(nxgep); 3423 if (status != NXGE_OK) { 3424 mutex_exit(nxgep->genlock); 3425 return (ENXIO); 3426 } 3427 } 3428 3429 mmac_info = &nxgep->nxge_mmac_info; 3430 if (mmac_info->naddrfree == 0) { 3431 mutex_exit(nxgep->genlock); 3432 return (ENOSPC); 3433 } 3434 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3435 maddr->mma_addrlen)) { 3436 mutex_exit(nxgep->genlock); 3437 return (EINVAL); 3438 } 3439 /* 3440 * Search for the first available slot. Because naddrfree 3441 * is not zero, we are guaranteed to find one. 3442 * Slot 0 is for unique (primary) MAC. The first alternate 3443 * MAC slot is slot 1. 3444 * Each of the first two ports of Neptune has 16 alternate 3445 * MAC slots but only the first 7 (or 15) slots have assigned factory 3446 * MAC addresses. We first search among the slots without bundled 3447 * factory MACs. If we fail to find one in that range, then we 3448 * search the slots with bundled factory MACs. A factory MAC 3449 * will be wasted while the slot is used with a user MAC address. 3450 * But the slot could be used by factory MAC again after calling 3451 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 3452 */ 3453 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 3454 for (slot = mmac_info->num_factory_mmac + 1; 3455 slot <= mmac_info->num_mmac; slot++) { 3456 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3457 break; 3458 } 3459 if (slot > mmac_info->num_mmac) { 3460 for (slot = 1; slot <= mmac_info->num_factory_mmac; 3461 slot++) { 3462 if (!(mmac_info->mac_pool[slot].flags 3463 & MMAC_SLOT_USED)) 3464 break; 3465 } 3466 } 3467 } else { 3468 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 3469 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3470 break; 3471 } 3472 } 3473 ASSERT(slot <= mmac_info->num_mmac); 3474 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 3475 mutex_exit(nxgep->genlock); 3476 return (err); 3477 } 3478 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 3479 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 3480 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3481 mmac_info->naddrfree--; 3482 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3483 3484 maddr->mma_slot = slot; 3485 3486 mutex_exit(nxgep->genlock); 3487 return (0); 3488 } 3489 3490 /* 3491 * This function reserves an unused slot and programs the slot and the HW 3492 * with a factory mac address. 3493 */ 3494 static int 3495 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 3496 { 3497 p_nxge_t nxgep = arg; 3498 mac_addr_slot_t slot; 3499 nxge_mmac_t *mmac_info; 3500 int err; 3501 nxge_status_t status; 3502 3503 mutex_enter(nxgep->genlock); 3504 3505 /* 3506 * Make sure that nxge is initialized, if _start() has 3507 * not been called. 3508 */ 3509 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3510 status = nxge_init(nxgep); 3511 if (status != NXGE_OK) { 3512 mutex_exit(nxgep->genlock); 3513 return (ENXIO); 3514 } 3515 } 3516 3517 mmac_info = &nxgep->nxge_mmac_info; 3518 if (mmac_info->naddrfree == 0) { 3519 mutex_exit(nxgep->genlock); 3520 return (ENOSPC); 3521 } 3522 3523 slot = maddr->mma_slot; 3524 if (slot == -1) { /* -1: Take the first available slot */ 3525 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 3526 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3527 break; 3528 } 3529 if (slot > mmac_info->num_factory_mmac) { 3530 mutex_exit(nxgep->genlock); 3531 return (ENOSPC); 3532 } 3533 } 3534 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 3535 /* 3536 * Do not support factory MAC at a slot greater than 3537 * num_factory_mmac even when there are available factory 3538 * MAC addresses because the alternate MACs are bundled with 3539 * slot[1] through slot[num_factory_mmac] 3540 */ 3541 mutex_exit(nxgep->genlock); 3542 return (EINVAL); 3543 } 3544 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3545 mutex_exit(nxgep->genlock); 3546 return (EBUSY); 3547 } 3548 /* Verify the address to be reserved */ 3549 if (!mac_unicst_verify(nxgep->mach, 3550 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 3551 mutex_exit(nxgep->genlock); 3552 return (EINVAL); 3553 } 3554 if (err = nxge_altmac_set(nxgep, 3555 mmac_info->factory_mac_pool[slot], slot)) { 3556 mutex_exit(nxgep->genlock); 3557 return (err); 3558 } 3559 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 3560 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3561 mmac_info->naddrfree--; 3562 3563 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 3564 mutex_exit(nxgep->genlock); 3565 3566 /* Pass info back to the caller */ 3567 maddr->mma_slot = slot; 3568 maddr->mma_addrlen = ETHERADDRL; 3569 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3570 3571 return (0); 3572 } 3573 3574 /* 3575 * Remove the specified mac address and update the HW not to filter 3576 * the mac address anymore. 3577 */ 3578 static int 3579 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 3580 { 3581 p_nxge_t nxgep = arg; 3582 nxge_mmac_t *mmac_info; 3583 uint8_t addrn; 3584 uint8_t portn; 3585 int err = 0; 3586 nxge_status_t status; 3587 3588 mutex_enter(nxgep->genlock); 3589 3590 /* 3591 * Make sure that nxge is initialized, if _start() has 3592 * not been called. 3593 */ 3594 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3595 status = nxge_init(nxgep); 3596 if (status != NXGE_OK) { 3597 mutex_exit(nxgep->genlock); 3598 return (ENXIO); 3599 } 3600 } 3601 3602 mmac_info = &nxgep->nxge_mmac_info; 3603 if (slot < 1 || slot > mmac_info->num_mmac) { 3604 mutex_exit(nxgep->genlock); 3605 return (EINVAL); 3606 } 3607 3608 portn = nxgep->mac.portnum; 3609 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3610 addrn = (uint8_t)slot - 1; 3611 else 3612 addrn = (uint8_t)slot; 3613 3614 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3615 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 3616 == NPI_SUCCESS) { 3617 mmac_info->naddrfree++; 3618 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 3619 /* 3620 * Regardless if the MAC we just stopped filtering 3621 * is a user addr or a facory addr, we must set 3622 * the MMAC_VENDOR_ADDR flag if this slot has an 3623 * associated factory MAC to indicate that a factory 3624 * MAC is available. 3625 */ 3626 if (slot <= mmac_info->num_factory_mmac) { 3627 mmac_info->mac_pool[slot].flags 3628 |= MMAC_VENDOR_ADDR; 3629 } 3630 /* 3631 * Clear mac_pool[slot].addr so that kstat shows 0 3632 * alternate MAC address if the slot is not used. 3633 * (But nxge_m_mmac_get returns the factory MAC even 3634 * when the slot is not used!) 3635 */ 3636 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 3637 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3638 } else { 3639 err = EIO; 3640 } 3641 } else { 3642 err = EINVAL; 3643 } 3644 3645 mutex_exit(nxgep->genlock); 3646 return (err); 3647 } 3648 3649 3650 /* 3651 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 3652 */ 3653 static int 3654 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 3655 { 3656 p_nxge_t nxgep = arg; 3657 mac_addr_slot_t slot; 3658 nxge_mmac_t *mmac_info; 3659 int err = 0; 3660 nxge_status_t status; 3661 3662 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3663 maddr->mma_addrlen)) 3664 return (EINVAL); 3665 3666 slot = maddr->mma_slot; 3667 3668 mutex_enter(nxgep->genlock); 3669 3670 /* 3671 * Make sure that nxge is initialized, if _start() has 3672 * not been called. 3673 */ 3674 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3675 status = nxge_init(nxgep); 3676 if (status != NXGE_OK) { 3677 mutex_exit(nxgep->genlock); 3678 return (ENXIO); 3679 } 3680 } 3681 3682 mmac_info = &nxgep->nxge_mmac_info; 3683 if (slot < 1 || slot > mmac_info->num_mmac) { 3684 mutex_exit(nxgep->genlock); 3685 return (EINVAL); 3686 } 3687 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3688 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 3689 != 0) { 3690 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 3691 ETHERADDRL); 3692 /* 3693 * Assume that the MAC passed down from the caller 3694 * is not a factory MAC address (The user should 3695 * call mmac_remove followed by mmac_reserve if 3696 * he wants to use the factory MAC for this slot). 3697 */ 3698 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3699 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3700 } 3701 } else { 3702 err = EINVAL; 3703 } 3704 mutex_exit(nxgep->genlock); 3705 return (err); 3706 } 3707 3708 /* 3709 * nxge_m_mmac_get() - Get the MAC address and other information 3710 * related to the slot. mma_flags should be set to 0 in the call. 3711 * Note: although kstat shows MAC address as zero when a slot is 3712 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 3713 * to the caller as long as the slot is not using a user MAC address. 3714 * The following table shows the rules, 3715 * 3716 * USED VENDOR mma_addr 3717 * ------------------------------------------------------------ 3718 * (1) Slot uses a user MAC: yes no user MAC 3719 * (2) Slot uses a factory MAC: yes yes factory MAC 3720 * (3) Slot is not used but is 3721 * factory MAC capable: no yes factory MAC 3722 * (4) Slot is not used and is 3723 * not factory MAC capable: no no 0 3724 * ------------------------------------------------------------ 3725 */ 3726 static int 3727 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 3728 { 3729 nxge_t *nxgep = arg; 3730 mac_addr_slot_t slot; 3731 nxge_mmac_t *mmac_info; 3732 nxge_status_t status; 3733 3734 slot = maddr->mma_slot; 3735 3736 mutex_enter(nxgep->genlock); 3737 3738 /* 3739 * Make sure that nxge is initialized, if _start() has 3740 * not been called. 3741 */ 3742 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3743 status = nxge_init(nxgep); 3744 if (status != NXGE_OK) { 3745 mutex_exit(nxgep->genlock); 3746 return (ENXIO); 3747 } 3748 } 3749 3750 mmac_info = &nxgep->nxge_mmac_info; 3751 3752 if (slot < 1 || slot > mmac_info->num_mmac) { 3753 mutex_exit(nxgep->genlock); 3754 return (EINVAL); 3755 } 3756 maddr->mma_flags = 0; 3757 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 3758 maddr->mma_flags |= MMAC_SLOT_USED; 3759 3760 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 3761 maddr->mma_flags |= MMAC_VENDOR_ADDR; 3762 bcopy(mmac_info->factory_mac_pool[slot], 3763 maddr->mma_addr, ETHERADDRL); 3764 maddr->mma_addrlen = ETHERADDRL; 3765 } else { 3766 if (maddr->mma_flags & MMAC_SLOT_USED) { 3767 bcopy(mmac_info->mac_pool[slot].addr, 3768 maddr->mma_addr, ETHERADDRL); 3769 maddr->mma_addrlen = ETHERADDRL; 3770 } else { 3771 bzero(maddr->mma_addr, ETHERADDRL); 3772 maddr->mma_addrlen = 0; 3773 } 3774 } 3775 mutex_exit(nxgep->genlock); 3776 return (0); 3777 } 3778 3779 3780 static boolean_t 3781 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 3782 { 3783 nxge_t *nxgep = arg; 3784 uint32_t *txflags = cap_data; 3785 multiaddress_capab_t *mmacp = cap_data; 3786 3787 switch (cap) { 3788 case MAC_CAPAB_HCKSUM: 3789 *txflags = HCKSUM_INET_PARTIAL; 3790 break; 3791 case MAC_CAPAB_POLL: 3792 /* 3793 * There's nothing for us to fill in, simply returning 3794 * B_TRUE stating that we support polling is sufficient. 3795 */ 3796 break; 3797 3798 case MAC_CAPAB_MULTIADDRESS: 3799 mutex_enter(nxgep->genlock); 3800 3801 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 3802 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 3803 mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */ 3804 /* 3805 * maddr_handle is driver's private data, passed back to 3806 * entry point functions as arg. 3807 */ 3808 mmacp->maddr_handle = nxgep; 3809 mmacp->maddr_add = nxge_m_mmac_add; 3810 mmacp->maddr_remove = nxge_m_mmac_remove; 3811 mmacp->maddr_modify = nxge_m_mmac_modify; 3812 mmacp->maddr_get = nxge_m_mmac_get; 3813 mmacp->maddr_reserve = nxge_m_mmac_reserve; 3814 3815 mutex_exit(nxgep->genlock); 3816 break; 3817 case MAC_CAPAB_LSO: { 3818 mac_capab_lso_t *cap_lso = cap_data; 3819 3820 if (nxgep->soft_lso_enable) { 3821 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 3822 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 3823 nxge_lso_max = NXGE_LSO_MAXLEN; 3824 } 3825 cap_lso->lso_basic_tcp_ipv4.lso_max = nxge_lso_max; 3826 break; 3827 } else { 3828 return (B_FALSE); 3829 } 3830 } 3831 3832 default: 3833 return (B_FALSE); 3834 } 3835 return (B_TRUE); 3836 } 3837 3838 /* 3839 * Module loading and removing entry points. 3840 */ 3841 3842 static struct cb_ops nxge_cb_ops = { 3843 nodev, /* cb_open */ 3844 nodev, /* cb_close */ 3845 nodev, /* cb_strategy */ 3846 nodev, /* cb_print */ 3847 nodev, /* cb_dump */ 3848 nodev, /* cb_read */ 3849 nodev, /* cb_write */ 3850 nodev, /* cb_ioctl */ 3851 nodev, /* cb_devmap */ 3852 nodev, /* cb_mmap */ 3853 nodev, /* cb_segmap */ 3854 nochpoll, /* cb_chpoll */ 3855 ddi_prop_op, /* cb_prop_op */ 3856 NULL, 3857 D_MP, /* cb_flag */ 3858 CB_REV, /* rev */ 3859 nodev, /* int (*cb_aread)() */ 3860 nodev /* int (*cb_awrite)() */ 3861 }; 3862 3863 static struct dev_ops nxge_dev_ops = { 3864 DEVO_REV, /* devo_rev */ 3865 0, /* devo_refcnt */ 3866 nulldev, 3867 nulldev, /* devo_identify */ 3868 nulldev, /* devo_probe */ 3869 nxge_attach, /* devo_attach */ 3870 nxge_detach, /* devo_detach */ 3871 nodev, /* devo_reset */ 3872 &nxge_cb_ops, /* devo_cb_ops */ 3873 (struct bus_ops *)NULL, /* devo_bus_ops */ 3874 ddi_power /* devo_power */ 3875 }; 3876 3877 extern struct mod_ops mod_driverops; 3878 3879 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 3880 3881 /* 3882 * Module linkage information for the kernel. 3883 */ 3884 static struct modldrv nxge_modldrv = { 3885 &mod_driverops, 3886 NXGE_DESC_VER, 3887 &nxge_dev_ops 3888 }; 3889 3890 static struct modlinkage modlinkage = { 3891 MODREV_1, (void *) &nxge_modldrv, NULL 3892 }; 3893 3894 int 3895 _init(void) 3896 { 3897 int status; 3898 3899 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 3900 mac_init_ops(&nxge_dev_ops, "nxge"); 3901 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 3902 if (status != 0) { 3903 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3904 "failed to init device soft state")); 3905 goto _init_exit; 3906 } 3907 status = mod_install(&modlinkage); 3908 if (status != 0) { 3909 ddi_soft_state_fini(&nxge_list); 3910 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 3911 goto _init_exit; 3912 } 3913 3914 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 3915 3916 _init_exit: 3917 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 3918 3919 return (status); 3920 } 3921 3922 int 3923 _fini(void) 3924 { 3925 int status; 3926 3927 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 3928 3929 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 3930 3931 if (nxge_mblks_pending) 3932 return (EBUSY); 3933 3934 status = mod_remove(&modlinkage); 3935 if (status != DDI_SUCCESS) { 3936 NXGE_DEBUG_MSG((NULL, MOD_CTL, 3937 "Module removal failed 0x%08x", 3938 status)); 3939 goto _fini_exit; 3940 } 3941 3942 mac_fini_ops(&nxge_dev_ops); 3943 3944 ddi_soft_state_fini(&nxge_list); 3945 3946 MUTEX_DESTROY(&nxge_common_lock); 3947 _fini_exit: 3948 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 3949 3950 return (status); 3951 } 3952 3953 int 3954 _info(struct modinfo *modinfop) 3955 { 3956 int status; 3957 3958 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 3959 status = mod_info(&modlinkage, modinfop); 3960 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 3961 3962 return (status); 3963 } 3964 3965 /*ARGSUSED*/ 3966 static nxge_status_t 3967 nxge_add_intrs(p_nxge_t nxgep) 3968 { 3969 3970 int intr_types; 3971 int type = 0; 3972 int ddi_status = DDI_SUCCESS; 3973 nxge_status_t status = NXGE_OK; 3974 3975 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 3976 3977 nxgep->nxge_intr_type.intr_registered = B_FALSE; 3978 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 3979 nxgep->nxge_intr_type.msi_intx_cnt = 0; 3980 nxgep->nxge_intr_type.intr_added = 0; 3981 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 3982 nxgep->nxge_intr_type.intr_type = 0; 3983 3984 if (nxgep->niu_type == N2_NIU) { 3985 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3986 } else if (nxge_msi_enable) { 3987 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3988 } 3989 3990 /* Get the supported interrupt types */ 3991 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 3992 != DDI_SUCCESS) { 3993 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 3994 "ddi_intr_get_supported_types failed: status 0x%08x", 3995 ddi_status)); 3996 return (NXGE_ERROR | NXGE_DDI_FAILED); 3997 } 3998 nxgep->nxge_intr_type.intr_types = intr_types; 3999 4000 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4001 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 4002 4003 /* 4004 * Solaris MSIX is not supported yet. use MSI for now. 4005 * nxge_msi_enable (1): 4006 * 1 - MSI 2 - MSI-X others - FIXED 4007 */ 4008 switch (nxge_msi_enable) { 4009 default: 4010 type = DDI_INTR_TYPE_FIXED; 4011 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 4012 "use fixed (intx emulation) type %08x", 4013 type)); 4014 break; 4015 4016 case 2: 4017 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 4018 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 4019 if (intr_types & DDI_INTR_TYPE_MSIX) { 4020 type = DDI_INTR_TYPE_MSIX; 4021 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4022 "ddi_intr_get_supported_types: MSIX 0x%08x", 4023 type)); 4024 } else if (intr_types & DDI_INTR_TYPE_MSI) { 4025 type = DDI_INTR_TYPE_MSI; 4026 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4027 "ddi_intr_get_supported_types: MSI 0x%08x", 4028 type)); 4029 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 4030 type = DDI_INTR_TYPE_FIXED; 4031 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 4032 "ddi_intr_get_supported_types: MSXED0x%08x", 4033 type)); 4034 } 4035 break; 4036 4037 case 1: 4038 if (intr_types & DDI_INTR_TYPE_MSI) { 4039 type = DDI_INTR_TYPE_MSI; 4040 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 4041 "ddi_intr_get_supported_types: MSI 0x%08x", 4042 type)); 4043 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 4044 type = DDI_INTR_TYPE_MSIX; 4045 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4046 "ddi_intr_get_supported_types: MSIX 0x%08x", 4047 type)); 4048 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 4049 type = DDI_INTR_TYPE_FIXED; 4050 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4051 "ddi_intr_get_supported_types: MSXED0x%08x", 4052 type)); 4053 } 4054 } 4055 4056 nxgep->nxge_intr_type.intr_type = type; 4057 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 4058 type == DDI_INTR_TYPE_FIXED) && 4059 nxgep->nxge_intr_type.niu_msi_enable) { 4060 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 4061 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4062 " nxge_add_intrs: " 4063 " nxge_add_intrs_adv failed: status 0x%08x", 4064 status)); 4065 return (status); 4066 } else { 4067 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 4068 "interrupts registered : type %d", type)); 4069 nxgep->nxge_intr_type.intr_registered = B_TRUE; 4070 4071 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4072 "\nAdded advanced nxge add_intr_adv " 4073 "intr type 0x%x\n", type)); 4074 4075 return (status); 4076 } 4077 } 4078 4079 if (!nxgep->nxge_intr_type.intr_registered) { 4080 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 4081 "failed to register interrupts")); 4082 return (NXGE_ERROR | NXGE_DDI_FAILED); 4083 } 4084 4085 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 4086 return (status); 4087 } 4088 4089 /*ARGSUSED*/ 4090 static nxge_status_t 4091 nxge_add_soft_intrs(p_nxge_t nxgep) 4092 { 4093 4094 int ddi_status = DDI_SUCCESS; 4095 nxge_status_t status = NXGE_OK; 4096 4097 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 4098 4099 nxgep->resched_id = NULL; 4100 nxgep->resched_running = B_FALSE; 4101 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 4102 &nxgep->resched_id, 4103 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 4104 if (ddi_status != DDI_SUCCESS) { 4105 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 4106 "ddi_add_softintrs failed: status 0x%08x", 4107 ddi_status)); 4108 return (NXGE_ERROR | NXGE_DDI_FAILED); 4109 } 4110 4111 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 4112 4113 return (status); 4114 } 4115 4116 static nxge_status_t 4117 nxge_add_intrs_adv(p_nxge_t nxgep) 4118 { 4119 int intr_type; 4120 p_nxge_intr_t intrp; 4121 4122 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 4123 4124 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4125 intr_type = intrp->intr_type; 4126 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 4127 intr_type)); 4128 4129 switch (intr_type) { 4130 case DDI_INTR_TYPE_MSI: /* 0x2 */ 4131 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 4132 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 4133 4134 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 4135 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 4136 4137 default: 4138 return (NXGE_ERROR); 4139 } 4140 } 4141 4142 4143 /*ARGSUSED*/ 4144 static nxge_status_t 4145 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 4146 { 4147 dev_info_t *dip = nxgep->dip; 4148 p_nxge_ldg_t ldgp; 4149 p_nxge_intr_t intrp; 4150 uint_t *inthandler; 4151 void *arg1, *arg2; 4152 int behavior; 4153 int nintrs, navail, nrequest; 4154 int nactual, nrequired; 4155 int inum = 0; 4156 int x, y; 4157 int ddi_status = DDI_SUCCESS; 4158 nxge_status_t status = NXGE_OK; 4159 4160 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 4161 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4162 intrp->start_inum = 0; 4163 4164 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4165 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4166 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4167 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4168 "nintrs: %d", ddi_status, nintrs)); 4169 return (NXGE_ERROR | NXGE_DDI_FAILED); 4170 } 4171 4172 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4173 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4174 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4175 "ddi_intr_get_navail() failed, status: 0x%x%, " 4176 "nintrs: %d", ddi_status, navail)); 4177 return (NXGE_ERROR | NXGE_DDI_FAILED); 4178 } 4179 4180 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4181 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 4182 nintrs, navail)); 4183 4184 /* PSARC/2007/453 MSI-X interrupt limit override */ 4185 if (int_type == DDI_INTR_TYPE_MSIX) { 4186 nrequest = nxge_create_msi_property(nxgep); 4187 if (nrequest < navail) { 4188 navail = nrequest; 4189 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4190 "nxge_add_intrs_adv_type: nintrs %d " 4191 "navail %d (nrequest %d)", 4192 nintrs, navail, nrequest)); 4193 } 4194 } 4195 4196 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 4197 /* MSI must be power of 2 */ 4198 if ((navail & 16) == 16) { 4199 navail = 16; 4200 } else if ((navail & 8) == 8) { 4201 navail = 8; 4202 } else if ((navail & 4) == 4) { 4203 navail = 4; 4204 } else if ((navail & 2) == 2) { 4205 navail = 2; 4206 } else { 4207 navail = 1; 4208 } 4209 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4210 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 4211 "navail %d", nintrs, navail)); 4212 } 4213 4214 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4215 DDI_INTR_ALLOC_NORMAL); 4216 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4217 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4218 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4219 navail, &nactual, behavior); 4220 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4221 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4222 " ddi_intr_alloc() failed: %d", 4223 ddi_status)); 4224 kmem_free(intrp->htable, intrp->intr_size); 4225 return (NXGE_ERROR | NXGE_DDI_FAILED); 4226 } 4227 4228 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4229 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4230 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4231 " ddi_intr_get_pri() failed: %d", 4232 ddi_status)); 4233 /* Free already allocated interrupts */ 4234 for (y = 0; y < nactual; y++) { 4235 (void) ddi_intr_free(intrp->htable[y]); 4236 } 4237 4238 kmem_free(intrp->htable, intrp->intr_size); 4239 return (NXGE_ERROR | NXGE_DDI_FAILED); 4240 } 4241 4242 nrequired = 0; 4243 switch (nxgep->niu_type) { 4244 default: 4245 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4246 break; 4247 4248 case N2_NIU: 4249 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4250 break; 4251 } 4252 4253 if (status != NXGE_OK) { 4254 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4255 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 4256 "failed: 0x%x", status)); 4257 /* Free already allocated interrupts */ 4258 for (y = 0; y < nactual; y++) { 4259 (void) ddi_intr_free(intrp->htable[y]); 4260 } 4261 4262 kmem_free(intrp->htable, intrp->intr_size); 4263 return (status); 4264 } 4265 4266 ldgp = nxgep->ldgvp->ldgp; 4267 for (x = 0; x < nrequired; x++, ldgp++) { 4268 ldgp->vector = (uint8_t)x; 4269 ldgp->intdata = SID_DATA(ldgp->func, x); 4270 arg1 = ldgp->ldvp; 4271 arg2 = nxgep; 4272 if (ldgp->nldvs == 1) { 4273 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4274 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4275 "nxge_add_intrs_adv_type: " 4276 "arg1 0x%x arg2 0x%x: " 4277 "1-1 int handler (entry %d intdata 0x%x)\n", 4278 arg1, arg2, 4279 x, ldgp->intdata)); 4280 } else if (ldgp->nldvs > 1) { 4281 inthandler = (uint_t *)ldgp->sys_intr_handler; 4282 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4283 "nxge_add_intrs_adv_type: " 4284 "arg1 0x%x arg2 0x%x: " 4285 "nldevs %d int handler " 4286 "(entry %d intdata 0x%x)\n", 4287 arg1, arg2, 4288 ldgp->nldvs, x, ldgp->intdata)); 4289 } 4290 4291 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4292 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 4293 "htable 0x%llx", x, intrp->htable[x])); 4294 4295 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4296 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4297 != DDI_SUCCESS) { 4298 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4299 "==> nxge_add_intrs_adv_type: failed #%d " 4300 "status 0x%x", x, ddi_status)); 4301 for (y = 0; y < intrp->intr_added; y++) { 4302 (void) ddi_intr_remove_handler( 4303 intrp->htable[y]); 4304 } 4305 /* Free already allocated intr */ 4306 for (y = 0; y < nactual; y++) { 4307 (void) ddi_intr_free(intrp->htable[y]); 4308 } 4309 kmem_free(intrp->htable, intrp->intr_size); 4310 4311 (void) nxge_ldgv_uninit(nxgep); 4312 4313 return (NXGE_ERROR | NXGE_DDI_FAILED); 4314 } 4315 intrp->intr_added++; 4316 } 4317 4318 intrp->msi_intx_cnt = nactual; 4319 4320 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4321 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 4322 navail, nactual, 4323 intrp->msi_intx_cnt, 4324 intrp->intr_added)); 4325 4326 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4327 4328 (void) nxge_intr_ldgv_init(nxgep); 4329 4330 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 4331 4332 return (status); 4333 } 4334 4335 /*ARGSUSED*/ 4336 static nxge_status_t 4337 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 4338 { 4339 dev_info_t *dip = nxgep->dip; 4340 p_nxge_ldg_t ldgp; 4341 p_nxge_intr_t intrp; 4342 uint_t *inthandler; 4343 void *arg1, *arg2; 4344 int behavior; 4345 int nintrs, navail; 4346 int nactual, nrequired; 4347 int inum = 0; 4348 int x, y; 4349 int ddi_status = DDI_SUCCESS; 4350 nxge_status_t status = NXGE_OK; 4351 4352 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 4353 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4354 intrp->start_inum = 0; 4355 4356 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4357 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4358 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4359 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4360 "nintrs: %d", status, nintrs)); 4361 return (NXGE_ERROR | NXGE_DDI_FAILED); 4362 } 4363 4364 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4365 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4366 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4367 "ddi_intr_get_navail() failed, status: 0x%x%, " 4368 "nintrs: %d", ddi_status, navail)); 4369 return (NXGE_ERROR | NXGE_DDI_FAILED); 4370 } 4371 4372 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4373 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 4374 nintrs, navail)); 4375 4376 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4377 DDI_INTR_ALLOC_NORMAL); 4378 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4379 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4380 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4381 navail, &nactual, behavior); 4382 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4383 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4384 " ddi_intr_alloc() failed: %d", 4385 ddi_status)); 4386 kmem_free(intrp->htable, intrp->intr_size); 4387 return (NXGE_ERROR | NXGE_DDI_FAILED); 4388 } 4389 4390 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4391 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4392 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4393 " ddi_intr_get_pri() failed: %d", 4394 ddi_status)); 4395 /* Free already allocated interrupts */ 4396 for (y = 0; y < nactual; y++) { 4397 (void) ddi_intr_free(intrp->htable[y]); 4398 } 4399 4400 kmem_free(intrp->htable, intrp->intr_size); 4401 return (NXGE_ERROR | NXGE_DDI_FAILED); 4402 } 4403 4404 nrequired = 0; 4405 switch (nxgep->niu_type) { 4406 default: 4407 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4408 break; 4409 4410 case N2_NIU: 4411 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4412 break; 4413 } 4414 4415 if (status != NXGE_OK) { 4416 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4417 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 4418 "failed: 0x%x", status)); 4419 /* Free already allocated interrupts */ 4420 for (y = 0; y < nactual; y++) { 4421 (void) ddi_intr_free(intrp->htable[y]); 4422 } 4423 4424 kmem_free(intrp->htable, intrp->intr_size); 4425 return (status); 4426 } 4427 4428 ldgp = nxgep->ldgvp->ldgp; 4429 for (x = 0; x < nrequired; x++, ldgp++) { 4430 ldgp->vector = (uint8_t)x; 4431 if (nxgep->niu_type != N2_NIU) { 4432 ldgp->intdata = SID_DATA(ldgp->func, x); 4433 } 4434 4435 arg1 = ldgp->ldvp; 4436 arg2 = nxgep; 4437 if (ldgp->nldvs == 1) { 4438 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4439 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4440 "nxge_add_intrs_adv_type_fix: " 4441 "1-1 int handler(%d) ldg %d ldv %d " 4442 "arg1 $%p arg2 $%p\n", 4443 x, ldgp->ldg, ldgp->ldvp->ldv, 4444 arg1, arg2)); 4445 } else if (ldgp->nldvs > 1) { 4446 inthandler = (uint_t *)ldgp->sys_intr_handler; 4447 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4448 "nxge_add_intrs_adv_type_fix: " 4449 "shared ldv %d int handler(%d) ldv %d ldg %d" 4450 "arg1 0x%016llx arg2 0x%016llx\n", 4451 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 4452 arg1, arg2)); 4453 } 4454 4455 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4456 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4457 != DDI_SUCCESS) { 4458 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4459 "==> nxge_add_intrs_adv_type_fix: failed #%d " 4460 "status 0x%x", x, ddi_status)); 4461 for (y = 0; y < intrp->intr_added; y++) { 4462 (void) ddi_intr_remove_handler( 4463 intrp->htable[y]); 4464 } 4465 for (y = 0; y < nactual; y++) { 4466 (void) ddi_intr_free(intrp->htable[y]); 4467 } 4468 /* Free already allocated intr */ 4469 kmem_free(intrp->htable, intrp->intr_size); 4470 4471 (void) nxge_ldgv_uninit(nxgep); 4472 4473 return (NXGE_ERROR | NXGE_DDI_FAILED); 4474 } 4475 intrp->intr_added++; 4476 } 4477 4478 intrp->msi_intx_cnt = nactual; 4479 4480 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4481 4482 status = nxge_intr_ldgv_init(nxgep); 4483 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 4484 4485 return (status); 4486 } 4487 4488 static void 4489 nxge_remove_intrs(p_nxge_t nxgep) 4490 { 4491 int i, inum; 4492 p_nxge_intr_t intrp; 4493 4494 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 4495 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4496 if (!intrp->intr_registered) { 4497 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4498 "<== nxge_remove_intrs: interrupts not registered")); 4499 return; 4500 } 4501 4502 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 4503 4504 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4505 (void) ddi_intr_block_disable(intrp->htable, 4506 intrp->intr_added); 4507 } else { 4508 for (i = 0; i < intrp->intr_added; i++) { 4509 (void) ddi_intr_disable(intrp->htable[i]); 4510 } 4511 } 4512 4513 for (inum = 0; inum < intrp->intr_added; inum++) { 4514 if (intrp->htable[inum]) { 4515 (void) ddi_intr_remove_handler(intrp->htable[inum]); 4516 } 4517 } 4518 4519 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 4520 if (intrp->htable[inum]) { 4521 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4522 "nxge_remove_intrs: ddi_intr_free inum %d " 4523 "msi_intx_cnt %d intr_added %d", 4524 inum, 4525 intrp->msi_intx_cnt, 4526 intrp->intr_added)); 4527 4528 (void) ddi_intr_free(intrp->htable[inum]); 4529 } 4530 } 4531 4532 kmem_free(intrp->htable, intrp->intr_size); 4533 intrp->intr_registered = B_FALSE; 4534 intrp->intr_enabled = B_FALSE; 4535 intrp->msi_intx_cnt = 0; 4536 intrp->intr_added = 0; 4537 4538 (void) nxge_ldgv_uninit(nxgep); 4539 4540 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 4541 "#msix-request"); 4542 4543 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 4544 } 4545 4546 /*ARGSUSED*/ 4547 static void 4548 nxge_remove_soft_intrs(p_nxge_t nxgep) 4549 { 4550 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 4551 if (nxgep->resched_id) { 4552 ddi_remove_softintr(nxgep->resched_id); 4553 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4554 "==> nxge_remove_soft_intrs: removed")); 4555 nxgep->resched_id = NULL; 4556 } 4557 4558 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 4559 } 4560 4561 /*ARGSUSED*/ 4562 static void 4563 nxge_intrs_enable(p_nxge_t nxgep) 4564 { 4565 p_nxge_intr_t intrp; 4566 int i; 4567 int status; 4568 4569 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 4570 4571 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4572 4573 if (!intrp->intr_registered) { 4574 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 4575 "interrupts are not registered")); 4576 return; 4577 } 4578 4579 if (intrp->intr_enabled) { 4580 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4581 "<== nxge_intrs_enable: already enabled")); 4582 return; 4583 } 4584 4585 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4586 status = ddi_intr_block_enable(intrp->htable, 4587 intrp->intr_added); 4588 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4589 "block enable - status 0x%x total inums #%d\n", 4590 status, intrp->intr_added)); 4591 } else { 4592 for (i = 0; i < intrp->intr_added; i++) { 4593 status = ddi_intr_enable(intrp->htable[i]); 4594 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4595 "ddi_intr_enable:enable - status 0x%x " 4596 "total inums %d enable inum #%d\n", 4597 status, intrp->intr_added, i)); 4598 if (status == DDI_SUCCESS) { 4599 intrp->intr_enabled = B_TRUE; 4600 } 4601 } 4602 } 4603 4604 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 4605 } 4606 4607 /*ARGSUSED*/ 4608 static void 4609 nxge_intrs_disable(p_nxge_t nxgep) 4610 { 4611 p_nxge_intr_t intrp; 4612 int i; 4613 4614 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 4615 4616 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4617 4618 if (!intrp->intr_registered) { 4619 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 4620 "interrupts are not registered")); 4621 return; 4622 } 4623 4624 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4625 (void) ddi_intr_block_disable(intrp->htable, 4626 intrp->intr_added); 4627 } else { 4628 for (i = 0; i < intrp->intr_added; i++) { 4629 (void) ddi_intr_disable(intrp->htable[i]); 4630 } 4631 } 4632 4633 intrp->intr_enabled = B_FALSE; 4634 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 4635 } 4636 4637 static nxge_status_t 4638 nxge_mac_register(p_nxge_t nxgep) 4639 { 4640 mac_register_t *macp; 4641 int status; 4642 4643 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 4644 4645 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 4646 return (NXGE_ERROR); 4647 4648 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 4649 macp->m_driver = nxgep; 4650 macp->m_dip = nxgep->dip; 4651 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 4652 macp->m_callbacks = &nxge_m_callbacks; 4653 macp->m_min_sdu = 0; 4654 macp->m_max_sdu = nxgep->mac.maxframesize - 4655 sizeof (struct ether_header) - ETHERFCSL - 4; 4656 macp->m_margin = VLAN_TAGSZ; 4657 4658 status = mac_register(macp, &nxgep->mach); 4659 mac_free(macp); 4660 4661 if (status != 0) { 4662 cmn_err(CE_WARN, 4663 "!nxge_mac_register failed (status %d instance %d)", 4664 status, nxgep->instance); 4665 return (NXGE_ERROR); 4666 } 4667 4668 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 4669 "(instance %d)", nxgep->instance)); 4670 4671 return (NXGE_OK); 4672 } 4673 4674 void 4675 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 4676 { 4677 ssize_t size; 4678 mblk_t *nmp; 4679 uint8_t blk_id; 4680 uint8_t chan; 4681 uint32_t err_id; 4682 err_inject_t *eip; 4683 4684 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 4685 4686 size = 1024; 4687 nmp = mp->b_cont; 4688 eip = (err_inject_t *)nmp->b_rptr; 4689 blk_id = eip->blk_id; 4690 err_id = eip->err_id; 4691 chan = eip->chan; 4692 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 4693 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 4694 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 4695 switch (blk_id) { 4696 case MAC_BLK_ID: 4697 break; 4698 case TXMAC_BLK_ID: 4699 break; 4700 case RXMAC_BLK_ID: 4701 break; 4702 case MIF_BLK_ID: 4703 break; 4704 case IPP_BLK_ID: 4705 nxge_ipp_inject_err(nxgep, err_id); 4706 break; 4707 case TXC_BLK_ID: 4708 nxge_txc_inject_err(nxgep, err_id); 4709 break; 4710 case TXDMA_BLK_ID: 4711 nxge_txdma_inject_err(nxgep, err_id, chan); 4712 break; 4713 case RXDMA_BLK_ID: 4714 nxge_rxdma_inject_err(nxgep, err_id, chan); 4715 break; 4716 case ZCP_BLK_ID: 4717 nxge_zcp_inject_err(nxgep, err_id); 4718 break; 4719 case ESPC_BLK_ID: 4720 break; 4721 case FFLP_BLK_ID: 4722 break; 4723 case PHY_BLK_ID: 4724 break; 4725 case ETHER_SERDES_BLK_ID: 4726 break; 4727 case PCIE_SERDES_BLK_ID: 4728 break; 4729 case VIR_BLK_ID: 4730 break; 4731 } 4732 4733 nmp->b_wptr = nmp->b_rptr + size; 4734 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 4735 4736 miocack(wq, mp, (int)size, 0); 4737 } 4738 4739 static int 4740 nxge_init_common_dev(p_nxge_t nxgep) 4741 { 4742 p_nxge_hw_list_t hw_p; 4743 dev_info_t *p_dip; 4744 4745 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 4746 4747 p_dip = nxgep->p_dip; 4748 MUTEX_ENTER(&nxge_common_lock); 4749 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4750 "==> nxge_init_common_dev:func # %d", 4751 nxgep->function_num)); 4752 /* 4753 * Loop through existing per neptune hardware list. 4754 */ 4755 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4756 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4757 "==> nxge_init_common_device:func # %d " 4758 "hw_p $%p parent dip $%p", 4759 nxgep->function_num, 4760 hw_p, 4761 p_dip)); 4762 if (hw_p->parent_devp == p_dip) { 4763 nxgep->nxge_hw_p = hw_p; 4764 hw_p->ndevs++; 4765 hw_p->nxge_p[nxgep->function_num] = nxgep; 4766 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4767 "==> nxge_init_common_device:func # %d " 4768 "hw_p $%p parent dip $%p " 4769 "ndevs %d (found)", 4770 nxgep->function_num, 4771 hw_p, 4772 p_dip, 4773 hw_p->ndevs)); 4774 break; 4775 } 4776 } 4777 4778 if (hw_p == NULL) { 4779 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4780 "==> nxge_init_common_device:func # %d " 4781 "parent dip $%p (new)", 4782 nxgep->function_num, 4783 p_dip)); 4784 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 4785 hw_p->parent_devp = p_dip; 4786 hw_p->magic = NXGE_NEPTUNE_MAGIC; 4787 nxgep->nxge_hw_p = hw_p; 4788 hw_p->ndevs++; 4789 hw_p->nxge_p[nxgep->function_num] = nxgep; 4790 hw_p->next = nxge_hw_list; 4791 if (nxgep->niu_type == N2_NIU) { 4792 hw_p->niu_type = N2_NIU; 4793 hw_p->platform_type = P_NEPTUNE_NIU; 4794 } else { 4795 hw_p->niu_type = NIU_TYPE_NONE; 4796 hw_p->platform_type = P_NEPTUNE_NONE; 4797 } 4798 4799 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 4800 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 4801 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 4802 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 4803 MUTEX_INIT(&hw_p->nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 4804 4805 nxge_hw_list = hw_p; 4806 4807 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 4808 } 4809 4810 MUTEX_EXIT(&nxge_common_lock); 4811 4812 nxgep->platform_type = hw_p->platform_type; 4813 if (nxgep->niu_type != N2_NIU) { 4814 nxgep->niu_type = hw_p->niu_type; 4815 } 4816 4817 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4818 "==> nxge_init_common_device (nxge_hw_list) $%p", 4819 nxge_hw_list)); 4820 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 4821 4822 return (NXGE_OK); 4823 } 4824 4825 static void 4826 nxge_uninit_common_dev(p_nxge_t nxgep) 4827 { 4828 p_nxge_hw_list_t hw_p, h_hw_p; 4829 dev_info_t *p_dip; 4830 4831 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 4832 if (nxgep->nxge_hw_p == NULL) { 4833 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4834 "<== nxge_uninit_common_device (no common)")); 4835 return; 4836 } 4837 4838 MUTEX_ENTER(&nxge_common_lock); 4839 h_hw_p = nxge_hw_list; 4840 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4841 p_dip = hw_p->parent_devp; 4842 if (nxgep->nxge_hw_p == hw_p && 4843 p_dip == nxgep->p_dip && 4844 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 4845 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 4846 4847 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4848 "==> nxge_uninit_common_device:func # %d " 4849 "hw_p $%p parent dip $%p " 4850 "ndevs %d (found)", 4851 nxgep->function_num, 4852 hw_p, 4853 p_dip, 4854 hw_p->ndevs)); 4855 4856 nxgep->nxge_hw_p = NULL; 4857 if (hw_p->ndevs) { 4858 hw_p->ndevs--; 4859 } 4860 hw_p->nxge_p[nxgep->function_num] = NULL; 4861 if (!hw_p->ndevs) { 4862 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 4863 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 4864 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 4865 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 4866 MUTEX_DESTROY(&hw_p->nxge_mii_lock); 4867 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4868 "==> nxge_uninit_common_device: " 4869 "func # %d " 4870 "hw_p $%p parent dip $%p " 4871 "ndevs %d (last)", 4872 nxgep->function_num, 4873 hw_p, 4874 p_dip, 4875 hw_p->ndevs)); 4876 4877 if (hw_p == nxge_hw_list) { 4878 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4879 "==> nxge_uninit_common_device:" 4880 "remove head func # %d " 4881 "hw_p $%p parent dip $%p " 4882 "ndevs %d (head)", 4883 nxgep->function_num, 4884 hw_p, 4885 p_dip, 4886 hw_p->ndevs)); 4887 nxge_hw_list = hw_p->next; 4888 } else { 4889 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4890 "==> nxge_uninit_common_device:" 4891 "remove middle func # %d " 4892 "hw_p $%p parent dip $%p " 4893 "ndevs %d (middle)", 4894 nxgep->function_num, 4895 hw_p, 4896 p_dip, 4897 hw_p->ndevs)); 4898 h_hw_p->next = hw_p->next; 4899 } 4900 4901 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 4902 } 4903 break; 4904 } else { 4905 h_hw_p = hw_p; 4906 } 4907 } 4908 4909 MUTEX_EXIT(&nxge_common_lock); 4910 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4911 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 4912 nxge_hw_list)); 4913 4914 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 4915 } 4916 4917 /* 4918 * Determines the number of ports from the niu_type or the platform type. 4919 * Returns the number of ports, or returns zero on failure. 4920 */ 4921 4922 int 4923 nxge_get_nports(p_nxge_t nxgep) 4924 { 4925 int nports = 0; 4926 4927 switch (nxgep->niu_type) { 4928 case N2_NIU: 4929 case NEPTUNE_2_10GF: 4930 nports = 2; 4931 break; 4932 case NEPTUNE_4_1GC: 4933 case NEPTUNE_2_10GF_2_1GC: 4934 case NEPTUNE_1_10GF_3_1GC: 4935 case NEPTUNE_1_1GC_1_10GF_2_1GC: 4936 nports = 4; 4937 break; 4938 default: 4939 switch (nxgep->platform_type) { 4940 case P_NEPTUNE_NIU: 4941 case P_NEPTUNE_ATLAS_2PORT: 4942 nports = 2; 4943 break; 4944 case P_NEPTUNE_ATLAS_4PORT: 4945 case P_NEPTUNE_MARAMBA_P0: 4946 case P_NEPTUNE_MARAMBA_P1: 4947 case P_NEPTUNE_ALONSO: 4948 nports = 4; 4949 break; 4950 default: 4951 break; 4952 } 4953 break; 4954 } 4955 4956 return (nports); 4957 } 4958 4959 /* 4960 * The following two functions are to support 4961 * PSARC/2007/453 MSI-X interrupt limit override. 4962 */ 4963 static int 4964 nxge_create_msi_property(p_nxge_t nxgep) 4965 { 4966 int nmsi; 4967 extern int ncpus; 4968 4969 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 4970 4971 switch (nxgep->mac.portmode) { 4972 case PORT_10G_COPPER: 4973 case PORT_10G_FIBER: 4974 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 4975 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 4976 /* 4977 * The maximum MSI-X requested will be 8. 4978 * If the # of CPUs is less than 8, we will reqeust 4979 * # MSI-X based on the # of CPUs. 4980 */ 4981 if (ncpus >= NXGE_MSIX_REQUEST_10G) { 4982 nmsi = NXGE_MSIX_REQUEST_10G; 4983 } else { 4984 nmsi = ncpus; 4985 } 4986 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4987 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 4988 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 4989 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 4990 break; 4991 4992 default: 4993 nmsi = NXGE_MSIX_REQUEST_1G; 4994 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4995 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 4996 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 4997 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 4998 break; 4999 } 5000 5001 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 5002 return (nmsi); 5003 } 5004