1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 30 */ 31 #include <sys/nxge/nxge_impl.h> 32 #include <sys/nxge/nxge_hio.h> 33 #include <sys/nxge/nxge_rxdma.h> 34 #include <sys/pcie.h> 35 36 uint32_t nxge_use_partition = 0; /* debug partition flag */ 37 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 38 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 39 /* 40 * PSARC/2007/453 MSI-X interrupt limit override 41 * (This PSARC case is limited to MSI-X vectors 42 * and SPARC platforms only). 43 */ 44 #if defined(_BIG_ENDIAN) 45 uint32_t nxge_msi_enable = 2; 46 #else 47 uint32_t nxge_msi_enable = 1; 48 #endif 49 50 uint32_t nxge_cksum_enable = 0; 51 52 /* 53 * Globals: tunable parameters (/etc/system or adb) 54 * 55 */ 56 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 57 uint32_t nxge_rbr_spare_size = 0; 58 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 59 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 60 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 61 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 62 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 63 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 64 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 65 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 66 boolean_t nxge_jumbo_enable = B_FALSE; 67 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 68 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 69 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 70 71 /* MAX LSO size */ 72 #define NXGE_LSO_MAXLEN 65535 73 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 74 75 /* 76 * Debugging flags: 77 * nxge_no_tx_lb : transmit load balancing 78 * nxge_tx_lb_policy: 0 - TCP port (default) 79 * 3 - DEST MAC 80 */ 81 uint32_t nxge_no_tx_lb = 0; 82 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 83 84 /* 85 * Add tunable to reduce the amount of time spent in the 86 * ISR doing Rx Processing. 87 */ 88 uint32_t nxge_max_rx_pkts = 1024; 89 90 /* 91 * Tunables to manage the receive buffer blocks. 92 * 93 * nxge_rx_threshold_hi: copy all buffers. 94 * nxge_rx_bcopy_size_type: receive buffer block size type. 95 * nxge_rx_threshold_lo: copy only up to tunable block size type. 96 */ 97 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 98 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 99 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 100 101 /* Use kmem_alloc() to allocate data buffers. */ 102 #if !defined(__i386) 103 uint32_t nxge_use_kmem_alloc = 1; 104 #else 105 uint32_t nxge_use_kmem_alloc = 0; 106 #endif 107 108 rtrace_t npi_rtracebuf; 109 110 #if defined(sun4v) 111 /* 112 * Hypervisor N2/NIU services information. 113 */ 114 static hsvc_info_t niu_hsvc = { 115 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 116 NIU_MINOR_VER, "nxge" 117 }; 118 119 static int nxge_hsvc_register(p_nxge_t); 120 #endif 121 122 /* 123 * Function Prototypes 124 */ 125 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 126 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 127 static void nxge_unattach(p_nxge_t); 128 129 #if NXGE_PROPERTY 130 static void nxge_remove_hard_properties(p_nxge_t); 131 #endif 132 133 /* 134 * These two functions are required by nxge_hio.c 135 */ 136 extern int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 137 extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 138 139 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 140 141 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 142 static void nxge_destroy_mutexes(p_nxge_t); 143 144 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 145 static void nxge_unmap_regs(p_nxge_t nxgep); 146 #ifdef NXGE_DEBUG 147 static void nxge_test_map_regs(p_nxge_t nxgep); 148 #endif 149 150 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 151 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 152 static void nxge_remove_intrs(p_nxge_t nxgep); 153 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 154 155 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 156 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 157 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 158 static void nxge_intrs_enable(p_nxge_t nxgep); 159 static void nxge_intrs_disable(p_nxge_t nxgep); 160 161 static void nxge_suspend(p_nxge_t); 162 static nxge_status_t nxge_resume(p_nxge_t); 163 164 static nxge_status_t nxge_setup_dev(p_nxge_t); 165 static void nxge_destroy_dev(p_nxge_t); 166 167 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 168 static void nxge_free_mem_pool(p_nxge_t); 169 170 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 171 static void nxge_free_rx_mem_pool(p_nxge_t); 172 173 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 174 static void nxge_free_tx_mem_pool(p_nxge_t); 175 176 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 177 struct ddi_dma_attr *, 178 size_t, ddi_device_acc_attr_t *, uint_t, 179 p_nxge_dma_common_t); 180 181 static void nxge_dma_mem_free(p_nxge_dma_common_t); 182 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 183 184 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 185 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 186 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 187 188 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 189 p_nxge_dma_common_t *, size_t); 190 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 191 192 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 193 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 194 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 195 196 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 197 p_nxge_dma_common_t *, 198 size_t); 199 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 200 201 static int nxge_init_common_dev(p_nxge_t); 202 static void nxge_uninit_common_dev(p_nxge_t); 203 204 /* 205 * The next declarations are for the GLDv3 interface. 206 */ 207 static int nxge_m_start(void *); 208 static void nxge_m_stop(void *); 209 static int nxge_m_unicst(void *, const uint8_t *); 210 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 211 static int nxge_m_promisc(void *, boolean_t); 212 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 213 static void nxge_m_resources(void *); 214 mblk_t *nxge_m_tx(void *arg, mblk_t *); 215 static nxge_status_t nxge_mac_register(p_nxge_t); 216 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 217 mac_addr_slot_t slot); 218 void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 219 boolean_t factory); 220 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 221 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 222 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 223 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 224 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 225 uint_t, const void *); 226 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 227 uint_t, void *); 228 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 229 const void *); 230 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, 231 void *); 232 233 #define NXGE_M_CALLBACK_FLAGS\ 234 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 235 236 237 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 238 #define MAX_DUMP_SZ 256 239 240 #define NXGE_M_CALLBACK_FLAGS \ 241 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 242 243 mac_callbacks_t nxge_m_callbacks = { 244 NXGE_M_CALLBACK_FLAGS, 245 nxge_m_stat, 246 nxge_m_start, 247 nxge_m_stop, 248 nxge_m_promisc, 249 nxge_m_multicst, 250 nxge_m_unicst, 251 nxge_m_tx, 252 nxge_m_resources, 253 nxge_m_ioctl, 254 nxge_m_getcapab, 255 NULL, 256 NULL, 257 nxge_m_setprop, 258 nxge_m_getprop 259 }; 260 261 void 262 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 263 264 /* PSARC/2007/453 MSI-X interrupt limit override. */ 265 #define NXGE_MSIX_REQUEST_10G 8 266 #define NXGE_MSIX_REQUEST_1G 2 267 static int nxge_create_msi_property(p_nxge_t); 268 269 /* 270 * These global variables control the message 271 * output. 272 */ 273 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 274 uint64_t nxge_debug_level; 275 276 /* 277 * This list contains the instance structures for the Neptune 278 * devices present in the system. The lock exists to guarantee 279 * mutually exclusive access to the list. 280 */ 281 void *nxge_list = NULL; 282 283 void *nxge_hw_list = NULL; 284 nxge_os_mutex_t nxge_common_lock; 285 286 extern uint64_t npi_debug_level; 287 288 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 289 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 290 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 291 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 292 extern void nxge_fm_init(p_nxge_t, 293 ddi_device_acc_attr_t *, 294 ddi_device_acc_attr_t *, 295 ddi_dma_attr_t *); 296 extern void nxge_fm_fini(p_nxge_t); 297 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 298 299 /* 300 * Count used to maintain the number of buffers being used 301 * by Neptune instances and loaned up to the upper layers. 302 */ 303 uint32_t nxge_mblks_pending = 0; 304 305 /* 306 * Device register access attributes for PIO. 307 */ 308 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 309 DDI_DEVICE_ATTR_V0, 310 DDI_STRUCTURE_LE_ACC, 311 DDI_STRICTORDER_ACC, 312 }; 313 314 /* 315 * Device descriptor access attributes for DMA. 316 */ 317 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 318 DDI_DEVICE_ATTR_V0, 319 DDI_STRUCTURE_LE_ACC, 320 DDI_STRICTORDER_ACC 321 }; 322 323 /* 324 * Device buffer access attributes for DMA. 325 */ 326 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 327 DDI_DEVICE_ATTR_V0, 328 DDI_STRUCTURE_BE_ACC, 329 DDI_STRICTORDER_ACC 330 }; 331 332 ddi_dma_attr_t nxge_desc_dma_attr = { 333 DMA_ATTR_V0, /* version number. */ 334 0, /* low address */ 335 0xffffffffffffffff, /* high address */ 336 0xffffffffffffffff, /* address counter max */ 337 #ifndef NIU_PA_WORKAROUND 338 0x100000, /* alignment */ 339 #else 340 0x2000, 341 #endif 342 0xfc00fc, /* dlim_burstsizes */ 343 0x1, /* minimum transfer size */ 344 0xffffffffffffffff, /* maximum transfer size */ 345 0xffffffffffffffff, /* maximum segment size */ 346 1, /* scatter/gather list length */ 347 (unsigned int) 1, /* granularity */ 348 0 /* attribute flags */ 349 }; 350 351 ddi_dma_attr_t nxge_tx_dma_attr = { 352 DMA_ATTR_V0, /* version number. */ 353 0, /* low address */ 354 0xffffffffffffffff, /* high address */ 355 0xffffffffffffffff, /* address counter max */ 356 #if defined(_BIG_ENDIAN) 357 0x2000, /* alignment */ 358 #else 359 0x1000, /* alignment */ 360 #endif 361 0xfc00fc, /* dlim_burstsizes */ 362 0x1, /* minimum transfer size */ 363 0xffffffffffffffff, /* maximum transfer size */ 364 0xffffffffffffffff, /* maximum segment size */ 365 5, /* scatter/gather list length */ 366 (unsigned int) 1, /* granularity */ 367 0 /* attribute flags */ 368 }; 369 370 ddi_dma_attr_t nxge_rx_dma_attr = { 371 DMA_ATTR_V0, /* version number. */ 372 0, /* low address */ 373 0xffffffffffffffff, /* high address */ 374 0xffffffffffffffff, /* address counter max */ 375 0x2000, /* alignment */ 376 0xfc00fc, /* dlim_burstsizes */ 377 0x1, /* minimum transfer size */ 378 0xffffffffffffffff, /* maximum transfer size */ 379 0xffffffffffffffff, /* maximum segment size */ 380 1, /* scatter/gather list length */ 381 (unsigned int) 1, /* granularity */ 382 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 383 }; 384 385 ddi_dma_lim_t nxge_dma_limits = { 386 (uint_t)0, /* dlim_addr_lo */ 387 (uint_t)0xffffffff, /* dlim_addr_hi */ 388 (uint_t)0xffffffff, /* dlim_cntr_max */ 389 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 390 0x1, /* dlim_minxfer */ 391 1024 /* dlim_speed */ 392 }; 393 394 dma_method_t nxge_force_dma = DVMA; 395 396 /* 397 * dma chunk sizes. 398 * 399 * Try to allocate the largest possible size 400 * so that fewer number of dma chunks would be managed 401 */ 402 #ifdef NIU_PA_WORKAROUND 403 size_t alloc_sizes [] = {0x2000}; 404 #else 405 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 406 0x10000, 0x20000, 0x40000, 0x80000, 407 0x100000, 0x200000, 0x400000, 0x800000, 408 0x1000000, 0x2000000, 0x4000000}; 409 #endif 410 411 /* 412 * Translate "dev_t" to a pointer to the associated "dev_info_t". 413 */ 414 415 extern void nxge_get_environs(nxge_t *); 416 417 static int 418 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 419 { 420 p_nxge_t nxgep = NULL; 421 int instance; 422 int status = DDI_SUCCESS; 423 uint8_t portn; 424 nxge_mmac_t *mmac_info; 425 426 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 427 428 /* 429 * Get the device instance since we'll need to setup 430 * or retrieve a soft state for this instance. 431 */ 432 instance = ddi_get_instance(dip); 433 434 switch (cmd) { 435 case DDI_ATTACH: 436 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 437 break; 438 439 case DDI_RESUME: 440 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 441 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 442 if (nxgep == NULL) { 443 status = DDI_FAILURE; 444 break; 445 } 446 if (nxgep->dip != dip) { 447 status = DDI_FAILURE; 448 break; 449 } 450 if (nxgep->suspended == DDI_PM_SUSPEND) { 451 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 452 } else { 453 status = nxge_resume(nxgep); 454 } 455 goto nxge_attach_exit; 456 457 case DDI_PM_RESUME: 458 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 459 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 460 if (nxgep == NULL) { 461 status = DDI_FAILURE; 462 break; 463 } 464 if (nxgep->dip != dip) { 465 status = DDI_FAILURE; 466 break; 467 } 468 status = nxge_resume(nxgep); 469 goto nxge_attach_exit; 470 471 default: 472 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 473 status = DDI_FAILURE; 474 goto nxge_attach_exit; 475 } 476 477 478 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 479 status = DDI_FAILURE; 480 goto nxge_attach_exit; 481 } 482 483 nxgep = ddi_get_soft_state(nxge_list, instance); 484 if (nxgep == NULL) { 485 status = NXGE_ERROR; 486 goto nxge_attach_fail2; 487 } 488 489 nxgep->nxge_magic = NXGE_MAGIC; 490 491 nxgep->drv_state = 0; 492 nxgep->dip = dip; 493 nxgep->instance = instance; 494 nxgep->p_dip = ddi_get_parent(dip); 495 nxgep->nxge_debug_level = nxge_debug_level; 496 npi_debug_level = nxge_debug_level; 497 498 /* Are we a guest running in a Hybrid I/O environment? */ 499 nxge_get_environs(nxgep); 500 501 status = nxge_map_regs(nxgep); 502 503 if (status != NXGE_OK) { 504 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 505 goto nxge_attach_fail3; 506 } 507 508 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, 509 &nxge_dev_desc_dma_acc_attr, 510 &nxge_rx_dma_attr); 511 512 /* Create & initialize the per-Neptune data structure */ 513 /* (even if we're a guest). */ 514 status = nxge_init_common_dev(nxgep); 515 if (status != NXGE_OK) { 516 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 517 "nxge_init_common_dev failed")); 518 goto nxge_attach_fail4; 519 } 520 521 #if defined(sun4v) 522 /* This is required by nxge_hio_init(), which follows. */ 523 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 524 goto nxge_attach_fail; 525 #endif 526 527 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 528 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 529 "nxge_hio_init failed")); 530 goto nxge_attach_fail4; 531 } 532 533 if (nxgep->niu_type == NEPTUNE_2_10GF) { 534 if (nxgep->function_num > 1) { 535 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 536 " function %d. Only functions 0 and 1 are " 537 "supported for this card.", nxgep->function_num)); 538 status = NXGE_ERROR; 539 goto nxge_attach_fail4; 540 } 541 } 542 543 if (isLDOMguest(nxgep)) { 544 /* 545 * Use the function number here. 546 */ 547 nxgep->mac.portnum = nxgep->function_num; 548 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 549 550 /* XXX We'll set the MAC address counts to 1 for now. */ 551 mmac_info = &nxgep->nxge_mmac_info; 552 mmac_info->num_mmac = 1; 553 mmac_info->naddrfree = 1; 554 } else { 555 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 556 nxgep->mac.portnum = portn; 557 if ((portn == 0) || (portn == 1)) 558 nxgep->mac.porttype = PORT_TYPE_XMAC; 559 else 560 nxgep->mac.porttype = PORT_TYPE_BMAC; 561 /* 562 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 563 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 564 * The two types of MACs have different characterizations. 565 */ 566 mmac_info = &nxgep->nxge_mmac_info; 567 if (nxgep->function_num < 2) { 568 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 569 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 570 } else { 571 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 572 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 573 } 574 } 575 /* 576 * Setup the Ndd parameters for the this instance. 577 */ 578 nxge_init_param(nxgep); 579 580 /* 581 * Setup Register Tracing Buffer. 582 */ 583 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 584 585 /* init stats ptr */ 586 nxge_init_statsp(nxgep); 587 588 /* 589 * Copy the vpd info from eeprom to a local data 590 * structure, and then check its validity. 591 */ 592 if (!isLDOMguest(nxgep)) { 593 int *regp; 594 uint_t reglen; 595 int rv; 596 597 nxge_vpd_info_get(nxgep); 598 599 /* Find the NIU config handle. */ 600 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 601 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 602 "reg", ®p, ®len); 603 604 if (rv != DDI_PROP_SUCCESS) { 605 goto nxge_attach_fail5; 606 } 607 /* 608 * The address_hi, that is the first int, in the reg 609 * property consists of config handle, but need to remove 610 * the bits 28-31 which are OBP specific info. 611 */ 612 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 613 ddi_prop_free(regp); 614 } 615 616 if (isLDOMguest(nxgep)) { 617 uchar_t *prop_val; 618 uint_t prop_len; 619 620 extern void nxge_get_logical_props(p_nxge_t); 621 622 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 623 nxgep->mac.portmode = PORT_LOGICAL; 624 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 625 "phy-type", "virtual transceiver"); 626 627 nxgep->nports = 1; 628 nxgep->board_ver = 0; /* XXX What? */ 629 630 /* 631 * local-mac-address property gives us info on which 632 * specific MAC address the Hybrid resource is associated 633 * with. 634 */ 635 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 636 "local-mac-address", &prop_val, 637 &prop_len) != DDI_PROP_SUCCESS) { 638 goto nxge_attach_fail5; 639 } 640 if (prop_len != ETHERADDRL) { 641 ddi_prop_free(prop_val); 642 goto nxge_attach_fail5; 643 } 644 ether_copy(prop_val, nxgep->hio_mac_addr); 645 ddi_prop_free(prop_val); 646 nxge_get_logical_props(nxgep); 647 648 } else { 649 status = nxge_xcvr_find(nxgep); 650 651 if (status != NXGE_OK) { 652 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 653 " Couldn't determine card type" 654 " .... exit ")); 655 goto nxge_attach_fail5; 656 } 657 658 status = nxge_get_config_properties(nxgep); 659 660 if (status != NXGE_OK) { 661 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 662 "get_hw create failed")); 663 goto nxge_attach_fail; 664 } 665 } 666 667 /* 668 * Setup the Kstats for the driver. 669 */ 670 nxge_setup_kstats(nxgep); 671 672 if (!isLDOMguest(nxgep)) 673 nxge_setup_param(nxgep); 674 675 status = nxge_setup_system_dma_pages(nxgep); 676 if (status != NXGE_OK) { 677 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 678 goto nxge_attach_fail; 679 } 680 681 nxge_hw_id_init(nxgep); 682 683 if (!isLDOMguest(nxgep)) 684 nxge_hw_init_niu_common(nxgep); 685 686 status = nxge_setup_mutexes(nxgep); 687 if (status != NXGE_OK) { 688 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 689 goto nxge_attach_fail; 690 } 691 692 #if defined(sun4v) 693 if (isLDOMguest(nxgep)) { 694 /* Find our VR & channel sets. */ 695 status = nxge_hio_vr_add(nxgep); 696 goto nxge_attach_exit; 697 } 698 #endif 699 700 status = nxge_setup_dev(nxgep); 701 if (status != DDI_SUCCESS) { 702 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 703 goto nxge_attach_fail; 704 } 705 706 status = nxge_add_intrs(nxgep); 707 if (status != DDI_SUCCESS) { 708 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 709 goto nxge_attach_fail; 710 } 711 status = nxge_add_soft_intrs(nxgep); 712 if (status != DDI_SUCCESS) { 713 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 714 "add_soft_intr failed")); 715 goto nxge_attach_fail; 716 } 717 718 /* 719 * Enable interrupts. 720 */ 721 nxge_intrs_enable(nxgep); 722 723 // If a guest, register with vio_net instead. 724 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 725 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 726 "unable to register to mac layer (%d)", status)); 727 goto nxge_attach_fail; 728 } 729 730 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 731 732 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 733 "registered to mac (instance %d)", instance)); 734 735 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 736 737 goto nxge_attach_exit; 738 739 nxge_attach_fail: 740 nxge_unattach(nxgep); 741 goto nxge_attach_fail1; 742 743 nxge_attach_fail5: 744 /* 745 * Tear down the ndd parameters setup. 746 */ 747 nxge_destroy_param(nxgep); 748 749 /* 750 * Tear down the kstat setup. 751 */ 752 nxge_destroy_kstats(nxgep); 753 754 nxge_attach_fail4: 755 if (nxgep->nxge_hw_p) { 756 nxge_uninit_common_dev(nxgep); 757 nxgep->nxge_hw_p = NULL; 758 } 759 760 nxge_attach_fail3: 761 /* 762 * Unmap the register setup. 763 */ 764 nxge_unmap_regs(nxgep); 765 766 nxge_fm_fini(nxgep); 767 768 nxge_attach_fail2: 769 ddi_soft_state_free(nxge_list, nxgep->instance); 770 771 nxge_attach_fail1: 772 if (status != NXGE_OK) 773 status = (NXGE_ERROR | NXGE_DDI_FAILED); 774 nxgep = NULL; 775 776 nxge_attach_exit: 777 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 778 status)); 779 780 return (status); 781 } 782 783 static int 784 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 785 { 786 int status = DDI_SUCCESS; 787 int instance; 788 p_nxge_t nxgep = NULL; 789 790 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 791 instance = ddi_get_instance(dip); 792 nxgep = ddi_get_soft_state(nxge_list, instance); 793 if (nxgep == NULL) { 794 status = DDI_FAILURE; 795 goto nxge_detach_exit; 796 } 797 798 switch (cmd) { 799 case DDI_DETACH: 800 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 801 break; 802 803 case DDI_PM_SUSPEND: 804 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 805 nxgep->suspended = DDI_PM_SUSPEND; 806 nxge_suspend(nxgep); 807 break; 808 809 case DDI_SUSPEND: 810 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 811 if (nxgep->suspended != DDI_PM_SUSPEND) { 812 nxgep->suspended = DDI_SUSPEND; 813 nxge_suspend(nxgep); 814 } 815 break; 816 817 default: 818 status = DDI_FAILURE; 819 } 820 821 if (cmd != DDI_DETACH) 822 goto nxge_detach_exit; 823 824 /* 825 * Stop the xcvr polling. 826 */ 827 nxgep->suspended = cmd; 828 829 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 830 831 if (isLDOMguest(nxgep)) { 832 nxge_hio_unregister(nxgep); 833 } else if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 834 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 835 "<== nxge_detach status = 0x%08X", status)); 836 return (DDI_FAILURE); 837 } 838 839 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 840 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 841 842 nxge_unattach(nxgep); 843 nxgep = NULL; 844 845 nxge_detach_exit: 846 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 847 status)); 848 849 return (status); 850 } 851 852 static void 853 nxge_unattach(p_nxge_t nxgep) 854 { 855 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 856 857 if (nxgep == NULL || nxgep->dev_regs == NULL) { 858 return; 859 } 860 861 nxgep->nxge_magic = 0; 862 863 if (nxgep->nxge_timerid) { 864 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 865 nxgep->nxge_timerid = 0; 866 } 867 868 #if defined(sun4v) 869 if (isLDOMguest(nxgep)) { 870 (void) nxge_hio_vr_release(nxgep); 871 } 872 #endif 873 874 if (nxgep->nxge_hw_p) { 875 nxge_uninit_common_dev(nxgep); 876 nxgep->nxge_hw_p = NULL; 877 } 878 879 #if defined(sun4v) 880 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 881 (void) hsvc_unregister(&nxgep->niu_hsvc); 882 nxgep->niu_hsvc_available = B_FALSE; 883 } 884 #endif 885 /* 886 * Stop any further interrupts. 887 */ 888 nxge_remove_intrs(nxgep); 889 890 /* remove soft interrups */ 891 nxge_remove_soft_intrs(nxgep); 892 893 /* 894 * Stop the device and free resources. 895 */ 896 if (!isLDOMguest(nxgep)) { 897 nxge_destroy_dev(nxgep); 898 } 899 900 /* 901 * Tear down the ndd parameters setup. 902 */ 903 nxge_destroy_param(nxgep); 904 905 /* 906 * Tear down the kstat setup. 907 */ 908 nxge_destroy_kstats(nxgep); 909 910 /* 911 * Destroy all mutexes. 912 */ 913 nxge_destroy_mutexes(nxgep); 914 915 /* 916 * Remove the list of ndd parameters which 917 * were setup during attach. 918 */ 919 if (nxgep->dip) { 920 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 921 " nxge_unattach: remove all properties")); 922 923 (void) ddi_prop_remove_all(nxgep->dip); 924 } 925 926 #if NXGE_PROPERTY 927 nxge_remove_hard_properties(nxgep); 928 #endif 929 930 /* 931 * Unmap the register setup. 932 */ 933 nxge_unmap_regs(nxgep); 934 935 nxge_fm_fini(nxgep); 936 937 ddi_soft_state_free(nxge_list, nxgep->instance); 938 939 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 940 } 941 942 #if defined(sun4v) 943 int 944 nxge_hsvc_register( 945 nxge_t *nxgep) 946 { 947 nxge_status_t status; 948 949 if (nxgep->niu_type == N2_NIU) { 950 nxgep->niu_hsvc_available = B_FALSE; 951 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 952 if ((status = hsvc_register(&nxgep->niu_hsvc, 953 &nxgep->niu_min_ver)) != 0) { 954 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 955 "nxge_attach: %s: cannot negotiate " 956 "hypervisor services revision %d group: 0x%lx " 957 "major: 0x%lx minor: 0x%lx errno: %d", 958 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 959 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 960 niu_hsvc.hsvc_minor, status)); 961 return (DDI_FAILURE); 962 } 963 nxgep->niu_hsvc_available = B_TRUE; 964 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 965 "NIU Hypervisor service enabled")); 966 } 967 968 return (DDI_SUCCESS); 969 } 970 #endif 971 972 static char n2_siu_name[] = "niu"; 973 974 static nxge_status_t 975 nxge_map_regs(p_nxge_t nxgep) 976 { 977 int ddi_status = DDI_SUCCESS; 978 p_dev_regs_t dev_regs; 979 char buf[MAXPATHLEN + 1]; 980 char *devname; 981 #ifdef NXGE_DEBUG 982 char *sysname; 983 #endif 984 off_t regsize; 985 nxge_status_t status = NXGE_OK; 986 #if !defined(_BIG_ENDIAN) 987 off_t pci_offset; 988 uint16_t pcie_devctl; 989 #endif 990 991 if (isLDOMguest(nxgep)) { 992 return (nxge_guest_regs_map(nxgep)); 993 } 994 995 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 996 nxgep->dev_regs = NULL; 997 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 998 dev_regs->nxge_regh = NULL; 999 dev_regs->nxge_pciregh = NULL; 1000 dev_regs->nxge_msix_regh = NULL; 1001 dev_regs->nxge_vir_regh = NULL; 1002 dev_regs->nxge_vir2_regh = NULL; 1003 nxgep->niu_type = NIU_TYPE_NONE; 1004 1005 devname = ddi_pathname(nxgep->dip, buf); 1006 ASSERT(strlen(devname) > 0); 1007 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1008 "nxge_map_regs: pathname devname %s", devname)); 1009 1010 if (strstr(devname, n2_siu_name)) { 1011 /* N2/NIU */ 1012 nxgep->niu_type = N2_NIU; 1013 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1014 "nxge_map_regs: N2/NIU devname %s", devname)); 1015 /* get function number */ 1016 nxgep->function_num = 1017 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1018 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1019 "nxge_map_regs: N2/NIU function number %d", 1020 nxgep->function_num)); 1021 } else { 1022 int *prop_val; 1023 uint_t prop_len; 1024 uint8_t func_num; 1025 1026 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1027 0, "reg", 1028 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1029 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1030 "Reg property not found")); 1031 ddi_status = DDI_FAILURE; 1032 goto nxge_map_regs_fail0; 1033 1034 } else { 1035 func_num = (prop_val[0] >> 8) & 0x7; 1036 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1037 "Reg property found: fun # %d", 1038 func_num)); 1039 nxgep->function_num = func_num; 1040 if (isLDOMguest(nxgep)) { 1041 nxgep->function_num /= 2; 1042 return (NXGE_OK); 1043 } 1044 ddi_prop_free(prop_val); 1045 } 1046 } 1047 1048 switch (nxgep->niu_type) { 1049 default: 1050 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1051 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1052 "nxge_map_regs: pci config size 0x%x", regsize)); 1053 1054 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1055 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1056 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1057 if (ddi_status != DDI_SUCCESS) { 1058 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1059 "ddi_map_regs, nxge bus config regs failed")); 1060 goto nxge_map_regs_fail0; 1061 } 1062 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1063 "nxge_map_reg: PCI config addr 0x%0llx " 1064 " handle 0x%0llx", dev_regs->nxge_pciregp, 1065 dev_regs->nxge_pciregh)); 1066 /* 1067 * IMP IMP 1068 * workaround for bit swapping bug in HW 1069 * which ends up in no-snoop = yes 1070 * resulting, in DMA not synched properly 1071 */ 1072 #if !defined(_BIG_ENDIAN) 1073 /* workarounds for x86 systems */ 1074 pci_offset = 0x80 + PCIE_DEVCTL; 1075 pcie_devctl = 0x0; 1076 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 1077 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1078 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1079 pcie_devctl); 1080 #endif 1081 1082 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1083 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1084 "nxge_map_regs: pio size 0x%x", regsize)); 1085 /* set up the device mapped register */ 1086 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1087 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1088 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1089 if (ddi_status != DDI_SUCCESS) { 1090 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1091 "ddi_map_regs for Neptune global reg failed")); 1092 goto nxge_map_regs_fail1; 1093 } 1094 1095 /* set up the msi/msi-x mapped register */ 1096 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1097 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1098 "nxge_map_regs: msix size 0x%x", regsize)); 1099 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1100 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1101 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1102 if (ddi_status != DDI_SUCCESS) { 1103 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1104 "ddi_map_regs for msi reg failed")); 1105 goto nxge_map_regs_fail2; 1106 } 1107 1108 /* set up the vio region mapped register */ 1109 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1110 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1111 "nxge_map_regs: vio size 0x%x", regsize)); 1112 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1113 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1114 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1115 1116 if (ddi_status != DDI_SUCCESS) { 1117 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1118 "ddi_map_regs for nxge vio reg failed")); 1119 goto nxge_map_regs_fail3; 1120 } 1121 nxgep->dev_regs = dev_regs; 1122 1123 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1124 NPI_PCI_ADD_HANDLE_SET(nxgep, 1125 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1126 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1127 NPI_MSI_ADD_HANDLE_SET(nxgep, 1128 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1129 1130 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1131 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1132 1133 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1134 NPI_REG_ADD_HANDLE_SET(nxgep, 1135 (npi_reg_ptr_t)dev_regs->nxge_regp); 1136 1137 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1138 NPI_VREG_ADD_HANDLE_SET(nxgep, 1139 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1140 1141 break; 1142 1143 case N2_NIU: 1144 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1145 /* 1146 * Set up the device mapped register (FWARC 2006/556) 1147 * (changed back to 1: reg starts at 1!) 1148 */ 1149 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1150 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1151 "nxge_map_regs: dev size 0x%x", regsize)); 1152 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1153 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1154 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1155 1156 if (ddi_status != DDI_SUCCESS) { 1157 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1158 "ddi_map_regs for N2/NIU, global reg failed ")); 1159 goto nxge_map_regs_fail1; 1160 } 1161 1162 /* set up the first vio region mapped register */ 1163 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1164 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1165 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1166 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1167 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1168 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1169 1170 if (ddi_status != DDI_SUCCESS) { 1171 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1172 "ddi_map_regs for nxge vio reg failed")); 1173 goto nxge_map_regs_fail2; 1174 } 1175 /* set up the second vio region mapped register */ 1176 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1177 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1178 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1179 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1180 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1181 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1182 1183 if (ddi_status != DDI_SUCCESS) { 1184 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1185 "ddi_map_regs for nxge vio2 reg failed")); 1186 goto nxge_map_regs_fail3; 1187 } 1188 nxgep->dev_regs = dev_regs; 1189 1190 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1191 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1192 1193 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1194 NPI_REG_ADD_HANDLE_SET(nxgep, 1195 (npi_reg_ptr_t)dev_regs->nxge_regp); 1196 1197 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1198 NPI_VREG_ADD_HANDLE_SET(nxgep, 1199 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1200 1201 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1202 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1203 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1204 1205 break; 1206 } 1207 1208 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1209 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1210 1211 goto nxge_map_regs_exit; 1212 nxge_map_regs_fail3: 1213 if (dev_regs->nxge_msix_regh) { 1214 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1215 } 1216 if (dev_regs->nxge_vir_regh) { 1217 ddi_regs_map_free(&dev_regs->nxge_regh); 1218 } 1219 nxge_map_regs_fail2: 1220 if (dev_regs->nxge_regh) { 1221 ddi_regs_map_free(&dev_regs->nxge_regh); 1222 } 1223 nxge_map_regs_fail1: 1224 if (dev_regs->nxge_pciregh) { 1225 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1226 } 1227 nxge_map_regs_fail0: 1228 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1229 kmem_free(dev_regs, sizeof (dev_regs_t)); 1230 1231 nxge_map_regs_exit: 1232 if (ddi_status != DDI_SUCCESS) 1233 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1234 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1235 return (status); 1236 } 1237 1238 static void 1239 nxge_unmap_regs(p_nxge_t nxgep) 1240 { 1241 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1242 1243 if (isLDOMguest(nxgep)) { 1244 nxge_guest_regs_map_free(nxgep); 1245 return; 1246 } 1247 1248 if (nxgep->dev_regs) { 1249 if (nxgep->dev_regs->nxge_pciregh) { 1250 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1251 "==> nxge_unmap_regs: bus")); 1252 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1253 nxgep->dev_regs->nxge_pciregh = NULL; 1254 } 1255 if (nxgep->dev_regs->nxge_regh) { 1256 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1257 "==> nxge_unmap_regs: device registers")); 1258 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1259 nxgep->dev_regs->nxge_regh = NULL; 1260 } 1261 if (nxgep->dev_regs->nxge_msix_regh) { 1262 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1263 "==> nxge_unmap_regs: device interrupts")); 1264 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1265 nxgep->dev_regs->nxge_msix_regh = NULL; 1266 } 1267 if (nxgep->dev_regs->nxge_vir_regh) { 1268 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1269 "==> nxge_unmap_regs: vio region")); 1270 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1271 nxgep->dev_regs->nxge_vir_regh = NULL; 1272 } 1273 if (nxgep->dev_regs->nxge_vir2_regh) { 1274 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1275 "==> nxge_unmap_regs: vio2 region")); 1276 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1277 nxgep->dev_regs->nxge_vir2_regh = NULL; 1278 } 1279 1280 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1281 nxgep->dev_regs = NULL; 1282 } 1283 1284 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1285 } 1286 1287 static nxge_status_t 1288 nxge_setup_mutexes(p_nxge_t nxgep) 1289 { 1290 int ddi_status = DDI_SUCCESS; 1291 nxge_status_t status = NXGE_OK; 1292 nxge_classify_t *classify_ptr; 1293 int partition; 1294 1295 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1296 1297 /* 1298 * Get the interrupt cookie so the mutexes can be 1299 * Initialized. 1300 */ 1301 if (isLDOMguest(nxgep)) { 1302 nxgep->interrupt_cookie = 0; 1303 } else { 1304 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1305 &nxgep->interrupt_cookie); 1306 1307 if (ddi_status != DDI_SUCCESS) { 1308 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1309 "<== nxge_setup_mutexes: failed 0x%x", 1310 ddi_status)); 1311 goto nxge_setup_mutexes_exit; 1312 } 1313 } 1314 1315 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1316 MUTEX_INIT(&nxgep->poll_lock, NULL, 1317 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1318 1319 /* 1320 * Initialize mutexes for this device. 1321 */ 1322 MUTEX_INIT(nxgep->genlock, NULL, 1323 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1324 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1325 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1326 MUTEX_INIT(&nxgep->mif_lock, NULL, 1327 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1328 MUTEX_INIT(&nxgep->group_lock, NULL, 1329 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1330 RW_INIT(&nxgep->filter_lock, NULL, 1331 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1332 1333 classify_ptr = &nxgep->classifier; 1334 /* 1335 * FFLP Mutexes are never used in interrupt context 1336 * as fflp operation can take very long time to 1337 * complete and hence not suitable to invoke from interrupt 1338 * handlers. 1339 */ 1340 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1341 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1342 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1343 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1344 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1345 for (partition = 0; partition < MAX_PARTITION; partition++) { 1346 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1347 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1348 } 1349 } 1350 1351 nxge_setup_mutexes_exit: 1352 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1353 "<== nxge_setup_mutexes status = %x", status)); 1354 1355 if (ddi_status != DDI_SUCCESS) 1356 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1357 1358 return (status); 1359 } 1360 1361 static void 1362 nxge_destroy_mutexes(p_nxge_t nxgep) 1363 { 1364 int partition; 1365 nxge_classify_t *classify_ptr; 1366 1367 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1368 RW_DESTROY(&nxgep->filter_lock); 1369 MUTEX_DESTROY(&nxgep->group_lock); 1370 MUTEX_DESTROY(&nxgep->mif_lock); 1371 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1372 MUTEX_DESTROY(nxgep->genlock); 1373 1374 classify_ptr = &nxgep->classifier; 1375 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1376 1377 /* Destroy all polling resources. */ 1378 MUTEX_DESTROY(&nxgep->poll_lock); 1379 cv_destroy(&nxgep->poll_cv); 1380 1381 /* free data structures, based on HW type */ 1382 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1383 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1384 for (partition = 0; partition < MAX_PARTITION; partition++) { 1385 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1386 } 1387 } 1388 1389 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1390 } 1391 1392 nxge_status_t 1393 nxge_init(p_nxge_t nxgep) 1394 { 1395 nxge_status_t status = NXGE_OK; 1396 1397 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1398 1399 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1400 return (status); 1401 } 1402 1403 /* 1404 * Allocate system memory for the receive/transmit buffer blocks 1405 * and receive/transmit descriptor rings. 1406 */ 1407 status = nxge_alloc_mem_pool(nxgep); 1408 if (status != NXGE_OK) { 1409 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1410 goto nxge_init_fail1; 1411 } 1412 1413 if (!isLDOMguest(nxgep)) { 1414 /* 1415 * Initialize and enable the TXC registers. 1416 * (Globally enable the Tx controller, 1417 * enable the port, configure the dma channel bitmap, 1418 * configure the max burst size). 1419 */ 1420 status = nxge_txc_init(nxgep); 1421 if (status != NXGE_OK) { 1422 NXGE_ERROR_MSG((nxgep, 1423 NXGE_ERR_CTL, "init txc failed\n")); 1424 goto nxge_init_fail2; 1425 } 1426 } 1427 1428 /* 1429 * Initialize and enable TXDMA channels. 1430 */ 1431 status = nxge_init_txdma_channels(nxgep); 1432 if (status != NXGE_OK) { 1433 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1434 goto nxge_init_fail3; 1435 } 1436 1437 /* 1438 * Initialize and enable RXDMA channels. 1439 */ 1440 status = nxge_init_rxdma_channels(nxgep); 1441 if (status != NXGE_OK) { 1442 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1443 goto nxge_init_fail4; 1444 } 1445 1446 /* 1447 * The guest domain is now done. 1448 */ 1449 if (isLDOMguest(nxgep)) { 1450 nxgep->drv_state |= STATE_HW_INITIALIZED; 1451 goto nxge_init_exit; 1452 } 1453 1454 /* 1455 * Initialize TCAM and FCRAM (Neptune). 1456 */ 1457 status = nxge_classify_init(nxgep); 1458 if (status != NXGE_OK) { 1459 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1460 goto nxge_init_fail5; 1461 } 1462 1463 /* 1464 * Initialize ZCP 1465 */ 1466 status = nxge_zcp_init(nxgep); 1467 if (status != NXGE_OK) { 1468 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1469 goto nxge_init_fail5; 1470 } 1471 1472 /* 1473 * Initialize IPP. 1474 */ 1475 status = nxge_ipp_init(nxgep); 1476 if (status != NXGE_OK) { 1477 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1478 goto nxge_init_fail5; 1479 } 1480 1481 /* 1482 * Initialize the MAC block. 1483 */ 1484 status = nxge_mac_init(nxgep); 1485 if (status != NXGE_OK) { 1486 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1487 goto nxge_init_fail5; 1488 } 1489 1490 nxge_intrs_enable(nxgep); /* XXX What changes do I need to make here? */ 1491 1492 /* 1493 * Enable hardware interrupts. 1494 */ 1495 nxge_intr_hw_enable(nxgep); 1496 nxgep->drv_state |= STATE_HW_INITIALIZED; 1497 1498 goto nxge_init_exit; 1499 1500 nxge_init_fail5: 1501 nxge_uninit_rxdma_channels(nxgep); 1502 nxge_init_fail4: 1503 nxge_uninit_txdma_channels(nxgep); 1504 nxge_init_fail3: 1505 if (!isLDOMguest(nxgep)) { 1506 (void) nxge_txc_uninit(nxgep); 1507 } 1508 nxge_init_fail2: 1509 nxge_free_mem_pool(nxgep); 1510 nxge_init_fail1: 1511 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1512 "<== nxge_init status (failed) = 0x%08x", status)); 1513 return (status); 1514 1515 nxge_init_exit: 1516 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1517 status)); 1518 return (status); 1519 } 1520 1521 1522 timeout_id_t 1523 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1524 { 1525 if ((nxgep->suspended == 0) || 1526 (nxgep->suspended == DDI_RESUME)) { 1527 return (timeout(func, (caddr_t)nxgep, 1528 drv_usectohz(1000 * msec))); 1529 } 1530 return (NULL); 1531 } 1532 1533 /*ARGSUSED*/ 1534 void 1535 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1536 { 1537 if (timerid) { 1538 (void) untimeout(timerid); 1539 } 1540 } 1541 1542 void 1543 nxge_uninit(p_nxge_t nxgep) 1544 { 1545 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1546 1547 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1548 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1549 "==> nxge_uninit: not initialized")); 1550 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1551 "<== nxge_uninit")); 1552 return; 1553 } 1554 1555 /* stop timer */ 1556 if (nxgep->nxge_timerid) { 1557 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1558 nxgep->nxge_timerid = 0; 1559 } 1560 1561 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1562 (void) nxge_intr_hw_disable(nxgep); 1563 1564 /* 1565 * Reset the receive MAC side. 1566 */ 1567 (void) nxge_rx_mac_disable(nxgep); 1568 1569 /* Disable and soft reset the IPP */ 1570 if (!isLDOMguest(nxgep)) 1571 (void) nxge_ipp_disable(nxgep); 1572 1573 /* Free classification resources */ 1574 (void) nxge_classify_uninit(nxgep); 1575 1576 /* 1577 * Reset the transmit/receive DMA side. 1578 */ 1579 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1580 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1581 1582 nxge_uninit_txdma_channels(nxgep); 1583 nxge_uninit_rxdma_channels(nxgep); 1584 1585 /* 1586 * Reset the transmit MAC side. 1587 */ 1588 (void) nxge_tx_mac_disable(nxgep); 1589 1590 nxge_free_mem_pool(nxgep); 1591 1592 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1593 1594 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1595 1596 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1597 "nxge_mblks_pending %d", nxge_mblks_pending)); 1598 } 1599 1600 void 1601 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1602 { 1603 #if defined(__i386) 1604 size_t reg; 1605 #else 1606 uint64_t reg; 1607 #endif 1608 uint64_t regdata; 1609 int i, retry; 1610 1611 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1612 regdata = 0; 1613 retry = 1; 1614 1615 for (i = 0; i < retry; i++) { 1616 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1617 } 1618 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1619 } 1620 1621 void 1622 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1623 { 1624 #if defined(__i386) 1625 size_t reg; 1626 #else 1627 uint64_t reg; 1628 #endif 1629 uint64_t buf[2]; 1630 1631 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1632 #if defined(__i386) 1633 reg = (size_t)buf[0]; 1634 #else 1635 reg = buf[0]; 1636 #endif 1637 1638 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1639 } 1640 1641 1642 nxge_os_mutex_t nxgedebuglock; 1643 int nxge_debug_init = 0; 1644 1645 /*ARGSUSED*/ 1646 /*VARARGS*/ 1647 void 1648 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1649 { 1650 char msg_buffer[1048]; 1651 char prefix_buffer[32]; 1652 int instance; 1653 uint64_t debug_level; 1654 int cmn_level = CE_CONT; 1655 va_list ap; 1656 1657 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1658 /* In case a developer has changed nxge_debug_level. */ 1659 if (nxgep->nxge_debug_level != nxge_debug_level) 1660 nxgep->nxge_debug_level = nxge_debug_level; 1661 } 1662 1663 debug_level = (nxgep == NULL) ? nxge_debug_level : 1664 nxgep->nxge_debug_level; 1665 1666 if ((level & debug_level) || 1667 (level == NXGE_NOTE) || 1668 (level == NXGE_ERR_CTL)) { 1669 /* do the msg processing */ 1670 if (nxge_debug_init == 0) { 1671 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1672 nxge_debug_init = 1; 1673 } 1674 1675 MUTEX_ENTER(&nxgedebuglock); 1676 1677 if ((level & NXGE_NOTE)) { 1678 cmn_level = CE_NOTE; 1679 } 1680 1681 if (level & NXGE_ERR_CTL) { 1682 cmn_level = CE_WARN; 1683 } 1684 1685 va_start(ap, fmt); 1686 (void) vsprintf(msg_buffer, fmt, ap); 1687 va_end(ap); 1688 if (nxgep == NULL) { 1689 instance = -1; 1690 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1691 } else { 1692 instance = nxgep->instance; 1693 (void) sprintf(prefix_buffer, 1694 "%s%d :", "nxge", instance); 1695 } 1696 1697 MUTEX_EXIT(&nxgedebuglock); 1698 cmn_err(cmn_level, "!%s %s\n", 1699 prefix_buffer, msg_buffer); 1700 1701 } 1702 } 1703 1704 char * 1705 nxge_dump_packet(char *addr, int size) 1706 { 1707 uchar_t *ap = (uchar_t *)addr; 1708 int i; 1709 static char etherbuf[1024]; 1710 char *cp = etherbuf; 1711 char digits[] = "0123456789abcdef"; 1712 1713 if (!size) 1714 size = 60; 1715 1716 if (size > MAX_DUMP_SZ) { 1717 /* Dump the leading bytes */ 1718 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1719 if (*ap > 0x0f) 1720 *cp++ = digits[*ap >> 4]; 1721 *cp++ = digits[*ap++ & 0xf]; 1722 *cp++ = ':'; 1723 } 1724 for (i = 0; i < 20; i++) 1725 *cp++ = '.'; 1726 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1727 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1728 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1729 if (*ap > 0x0f) 1730 *cp++ = digits[*ap >> 4]; 1731 *cp++ = digits[*ap++ & 0xf]; 1732 *cp++ = ':'; 1733 } 1734 } else { 1735 for (i = 0; i < size; i++) { 1736 if (*ap > 0x0f) 1737 *cp++ = digits[*ap >> 4]; 1738 *cp++ = digits[*ap++ & 0xf]; 1739 *cp++ = ':'; 1740 } 1741 } 1742 *--cp = 0; 1743 return (etherbuf); 1744 } 1745 1746 #ifdef NXGE_DEBUG 1747 static void 1748 nxge_test_map_regs(p_nxge_t nxgep) 1749 { 1750 ddi_acc_handle_t cfg_handle; 1751 p_pci_cfg_t cfg_ptr; 1752 ddi_acc_handle_t dev_handle; 1753 char *dev_ptr; 1754 ddi_acc_handle_t pci_config_handle; 1755 uint32_t regval; 1756 int i; 1757 1758 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1759 1760 dev_handle = nxgep->dev_regs->nxge_regh; 1761 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1762 1763 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1764 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1765 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1766 1767 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1768 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1769 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1770 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1771 &cfg_ptr->vendorid)); 1772 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1773 "\tvendorid 0x%x devid 0x%x", 1774 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1775 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1776 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1777 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1778 "bar1c 0x%x", 1779 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1780 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1781 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1782 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1783 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1784 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1785 "base 28 0x%x bar2c 0x%x\n", 1786 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1787 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1788 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1789 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1790 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1791 "\nNeptune PCI BAR: base30 0x%x\n", 1792 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1793 1794 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1795 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1796 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1797 "first 0x%llx second 0x%llx third 0x%llx " 1798 "last 0x%llx ", 1799 NXGE_PIO_READ64(dev_handle, 1800 (uint64_t *)(dev_ptr + 0), 0), 1801 NXGE_PIO_READ64(dev_handle, 1802 (uint64_t *)(dev_ptr + 8), 0), 1803 NXGE_PIO_READ64(dev_handle, 1804 (uint64_t *)(dev_ptr + 16), 0), 1805 NXGE_PIO_READ64(cfg_handle, 1806 (uint64_t *)(dev_ptr + 24), 0))); 1807 } 1808 } 1809 1810 #endif 1811 1812 static void 1813 nxge_suspend(p_nxge_t nxgep) 1814 { 1815 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1816 1817 nxge_intrs_disable(nxgep); 1818 nxge_destroy_dev(nxgep); 1819 1820 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1821 } 1822 1823 static nxge_status_t 1824 nxge_resume(p_nxge_t nxgep) 1825 { 1826 nxge_status_t status = NXGE_OK; 1827 1828 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1829 1830 nxgep->suspended = DDI_RESUME; 1831 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1832 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1833 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1834 (void) nxge_rx_mac_enable(nxgep); 1835 (void) nxge_tx_mac_enable(nxgep); 1836 nxge_intrs_enable(nxgep); 1837 nxgep->suspended = 0; 1838 1839 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1840 "<== nxge_resume status = 0x%x", status)); 1841 return (status); 1842 } 1843 1844 static nxge_status_t 1845 nxge_setup_dev(p_nxge_t nxgep) 1846 { 1847 nxge_status_t status = NXGE_OK; 1848 1849 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1850 nxgep->mac.portnum)); 1851 1852 status = nxge_link_init(nxgep); 1853 1854 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1855 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1856 "port%d Bad register acc handle", nxgep->mac.portnum)); 1857 status = NXGE_ERROR; 1858 } 1859 1860 if (status != NXGE_OK) { 1861 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1862 " nxge_setup_dev status " 1863 "(xcvr init 0x%08x)", status)); 1864 goto nxge_setup_dev_exit; 1865 } 1866 1867 nxge_setup_dev_exit: 1868 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1869 "<== nxge_setup_dev port %d status = 0x%08x", 1870 nxgep->mac.portnum, status)); 1871 1872 return (status); 1873 } 1874 1875 static void 1876 nxge_destroy_dev(p_nxge_t nxgep) 1877 { 1878 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 1879 1880 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1881 1882 (void) nxge_hw_stop(nxgep); 1883 1884 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 1885 } 1886 1887 static nxge_status_t 1888 nxge_setup_system_dma_pages(p_nxge_t nxgep) 1889 { 1890 int ddi_status = DDI_SUCCESS; 1891 uint_t count; 1892 ddi_dma_cookie_t cookie; 1893 uint_t iommu_pagesize; 1894 nxge_status_t status = NXGE_OK; 1895 1896 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 1897 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 1898 if (nxgep->niu_type != N2_NIU) { 1899 iommu_pagesize = dvma_pagesize(nxgep->dip); 1900 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1901 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1902 " default_block_size %d iommu_pagesize %d", 1903 nxgep->sys_page_sz, 1904 ddi_ptob(nxgep->dip, (ulong_t)1), 1905 nxgep->rx_default_block_size, 1906 iommu_pagesize)); 1907 1908 if (iommu_pagesize != 0) { 1909 if (nxgep->sys_page_sz == iommu_pagesize) { 1910 if (iommu_pagesize > 0x4000) 1911 nxgep->sys_page_sz = 0x4000; 1912 } else { 1913 if (nxgep->sys_page_sz > iommu_pagesize) 1914 nxgep->sys_page_sz = iommu_pagesize; 1915 } 1916 } 1917 } 1918 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1919 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1920 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1921 "default_block_size %d page mask %d", 1922 nxgep->sys_page_sz, 1923 ddi_ptob(nxgep->dip, (ulong_t)1), 1924 nxgep->rx_default_block_size, 1925 nxgep->sys_page_mask)); 1926 1927 1928 switch (nxgep->sys_page_sz) { 1929 default: 1930 nxgep->sys_page_sz = 0x1000; 1931 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1932 nxgep->rx_default_block_size = 0x1000; 1933 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1934 break; 1935 case 0x1000: 1936 nxgep->rx_default_block_size = 0x1000; 1937 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1938 break; 1939 case 0x2000: 1940 nxgep->rx_default_block_size = 0x2000; 1941 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1942 break; 1943 case 0x4000: 1944 nxgep->rx_default_block_size = 0x4000; 1945 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 1946 break; 1947 case 0x8000: 1948 nxgep->rx_default_block_size = 0x8000; 1949 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 1950 break; 1951 } 1952 1953 #ifndef USE_RX_BIG_BUF 1954 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 1955 #else 1956 nxgep->rx_default_block_size = 0x2000; 1957 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1958 #endif 1959 /* 1960 * Get the system DMA burst size. 1961 */ 1962 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 1963 DDI_DMA_DONTWAIT, 0, 1964 &nxgep->dmasparehandle); 1965 if (ddi_status != DDI_SUCCESS) { 1966 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1967 "ddi_dma_alloc_handle: failed " 1968 " status 0x%x", ddi_status)); 1969 goto nxge_get_soft_properties_exit; 1970 } 1971 1972 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 1973 (caddr_t)nxgep->dmasparehandle, 1974 sizeof (nxgep->dmasparehandle), 1975 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1976 DDI_DMA_DONTWAIT, 0, 1977 &cookie, &count); 1978 if (ddi_status != DDI_DMA_MAPPED) { 1979 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1980 "Binding spare handle to find system" 1981 " burstsize failed.")); 1982 ddi_status = DDI_FAILURE; 1983 goto nxge_get_soft_properties_fail1; 1984 } 1985 1986 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 1987 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 1988 1989 nxge_get_soft_properties_fail1: 1990 ddi_dma_free_handle(&nxgep->dmasparehandle); 1991 1992 nxge_get_soft_properties_exit: 1993 1994 if (ddi_status != DDI_SUCCESS) 1995 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1996 1997 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1998 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 1999 return (status); 2000 } 2001 2002 static nxge_status_t 2003 nxge_alloc_mem_pool(p_nxge_t nxgep) 2004 { 2005 nxge_status_t status = NXGE_OK; 2006 2007 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2008 2009 status = nxge_alloc_rx_mem_pool(nxgep); 2010 if (status != NXGE_OK) { 2011 return (NXGE_ERROR); 2012 } 2013 2014 status = nxge_alloc_tx_mem_pool(nxgep); 2015 if (status != NXGE_OK) { 2016 nxge_free_rx_mem_pool(nxgep); 2017 return (NXGE_ERROR); 2018 } 2019 2020 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2021 return (NXGE_OK); 2022 } 2023 2024 static void 2025 nxge_free_mem_pool(p_nxge_t nxgep) 2026 { 2027 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2028 2029 nxge_free_rx_mem_pool(nxgep); 2030 nxge_free_tx_mem_pool(nxgep); 2031 2032 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2033 } 2034 2035 nxge_status_t 2036 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2037 { 2038 uint32_t rdc_max; 2039 p_nxge_dma_pt_cfg_t p_all_cfgp; 2040 p_nxge_hw_pt_cfg_t p_cfgp; 2041 p_nxge_dma_pool_t dma_poolp; 2042 p_nxge_dma_common_t *dma_buf_p; 2043 p_nxge_dma_pool_t dma_cntl_poolp; 2044 p_nxge_dma_common_t *dma_cntl_p; 2045 uint32_t *num_chunks; /* per dma */ 2046 nxge_status_t status = NXGE_OK; 2047 2048 uint32_t nxge_port_rbr_size; 2049 uint32_t nxge_port_rbr_spare_size; 2050 uint32_t nxge_port_rcr_size; 2051 uint32_t rx_cntl_alloc_size; 2052 2053 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2054 2055 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2056 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2057 rdc_max = NXGE_MAX_RDCS; 2058 2059 /* 2060 * Allocate memory for the common DMA data structures. 2061 */ 2062 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2063 KM_SLEEP); 2064 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2065 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2066 2067 dma_cntl_poolp = (p_nxge_dma_pool_t) 2068 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2069 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2070 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2071 2072 num_chunks = (uint32_t *)KMEM_ZALLOC( 2073 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2074 2075 /* 2076 * Assume that each DMA channel will be configured with 2077 * the default block size. 2078 * rbr block counts are modulo the batch count (16). 2079 */ 2080 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2081 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2082 2083 if (!nxge_port_rbr_size) { 2084 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2085 } 2086 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2087 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2088 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2089 } 2090 2091 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2092 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2093 2094 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2095 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2096 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2097 } 2098 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2099 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2100 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2101 "set to default %d", 2102 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2103 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2104 } 2105 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2106 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2107 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2108 "set to default %d", 2109 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2110 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2111 } 2112 2113 /* 2114 * N2/NIU has limitation on the descriptor sizes (contiguous 2115 * memory allocation on data buffers to 4M (contig_mem_alloc) 2116 * and little endian for control buffers (must use the ddi/dki mem alloc 2117 * function). 2118 */ 2119 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2120 if (nxgep->niu_type == N2_NIU) { 2121 nxge_port_rbr_spare_size = 0; 2122 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2123 (!ISP2(nxge_port_rbr_size))) { 2124 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2125 } 2126 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2127 (!ISP2(nxge_port_rcr_size))) { 2128 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2129 } 2130 } 2131 #endif 2132 2133 /* 2134 * Addresses of receive block ring, receive completion ring and the 2135 * mailbox must be all cache-aligned (64 bytes). 2136 */ 2137 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2138 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2139 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2140 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2141 2142 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2143 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2144 "nxge_port_rcr_size = %d " 2145 "rx_cntl_alloc_size = %d", 2146 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2147 nxge_port_rcr_size, 2148 rx_cntl_alloc_size)); 2149 2150 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2151 if (nxgep->niu_type == N2_NIU) { 2152 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2153 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2154 2155 if (!ISP2(rx_buf_alloc_size)) { 2156 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2157 "==> nxge_alloc_rx_mem_pool: " 2158 " must be power of 2")); 2159 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2160 goto nxge_alloc_rx_mem_pool_exit; 2161 } 2162 2163 if (rx_buf_alloc_size > (1 << 22)) { 2164 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2165 "==> nxge_alloc_rx_mem_pool: " 2166 " limit size to 4M")); 2167 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2168 goto nxge_alloc_rx_mem_pool_exit; 2169 } 2170 2171 if (rx_cntl_alloc_size < 0x2000) { 2172 rx_cntl_alloc_size = 0x2000; 2173 } 2174 } 2175 #endif 2176 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2177 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2178 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2179 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2180 2181 dma_poolp->ndmas = p_cfgp->max_rdcs; 2182 dma_poolp->num_chunks = num_chunks; 2183 dma_poolp->buf_allocated = B_TRUE; 2184 nxgep->rx_buf_pool_p = dma_poolp; 2185 dma_poolp->dma_buf_pool_p = dma_buf_p; 2186 2187 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2188 dma_cntl_poolp->buf_allocated = B_TRUE; 2189 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2190 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2191 2192 /* Allocate the receive rings, too. */ 2193 nxgep->rx_rbr_rings = 2194 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2195 nxgep->rx_rbr_rings->rbr_rings = 2196 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2197 nxgep->rx_rcr_rings = 2198 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2199 nxgep->rx_rcr_rings->rcr_rings = 2200 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2201 nxgep->rx_mbox_areas_p = 2202 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2203 nxgep->rx_mbox_areas_p->rxmbox_areas = 2204 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2205 2206 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2207 p_cfgp->max_rdcs; 2208 2209 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2210 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2211 2212 nxge_alloc_rx_mem_pool_exit: 2213 return (status); 2214 } 2215 2216 /* 2217 * nxge_alloc_rxb 2218 * 2219 * Allocate buffers for an RDC. 2220 * 2221 * Arguments: 2222 * nxgep 2223 * channel The channel to map into our kernel space. 2224 * 2225 * Notes: 2226 * 2227 * NPI function calls: 2228 * 2229 * NXGE function calls: 2230 * 2231 * Registers accessed: 2232 * 2233 * Context: 2234 * 2235 * Taking apart: 2236 * 2237 * Open questions: 2238 * 2239 */ 2240 nxge_status_t 2241 nxge_alloc_rxb( 2242 p_nxge_t nxgep, 2243 int channel) 2244 { 2245 size_t rx_buf_alloc_size; 2246 nxge_status_t status = NXGE_OK; 2247 2248 nxge_dma_common_t **data; 2249 nxge_dma_common_t **control; 2250 uint32_t *num_chunks; 2251 2252 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2253 2254 /* 2255 * Allocate memory for the receive buffers and descriptor rings. 2256 * Replace these allocation functions with the interface functions 2257 * provided by the partition manager if/when they are available. 2258 */ 2259 2260 /* 2261 * Allocate memory for the receive buffer blocks. 2262 */ 2263 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2264 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2265 2266 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2267 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2268 2269 if ((status = nxge_alloc_rx_buf_dma( 2270 nxgep, channel, data, rx_buf_alloc_size, 2271 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2272 return (status); 2273 } 2274 2275 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2276 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2277 2278 /* 2279 * Allocate memory for descriptor rings and mailbox. 2280 */ 2281 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2282 2283 if ((status = nxge_alloc_rx_cntl_dma( 2284 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2285 != NXGE_OK) { 2286 nxge_free_rx_cntl_dma(nxgep, *control); 2287 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2288 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2289 return (status); 2290 } 2291 2292 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2293 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2294 2295 return (status); 2296 } 2297 2298 void 2299 nxge_free_rxb( 2300 p_nxge_t nxgep, 2301 int channel) 2302 { 2303 nxge_dma_common_t *data; 2304 nxge_dma_common_t *control; 2305 uint32_t num_chunks; 2306 2307 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2308 2309 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2310 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2311 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2312 2313 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2314 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2315 2316 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2317 nxge_free_rx_cntl_dma(nxgep, control); 2318 2319 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2320 2321 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2322 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2323 2324 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2325 } 2326 2327 static void 2328 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2329 { 2330 int rdc_max = NXGE_MAX_RDCS; 2331 2332 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2333 2334 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2335 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2336 "<== nxge_free_rx_mem_pool " 2337 "(null rx buf pool or buf not allocated")); 2338 return; 2339 } 2340 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2341 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2342 "<== nxge_free_rx_mem_pool " 2343 "(null rx cntl buf pool or cntl buf not allocated")); 2344 return; 2345 } 2346 2347 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2348 sizeof (p_nxge_dma_common_t) * rdc_max); 2349 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2350 2351 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2352 sizeof (uint32_t) * rdc_max); 2353 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2354 sizeof (p_nxge_dma_common_t) * rdc_max); 2355 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2356 2357 nxgep->rx_buf_pool_p = 0; 2358 nxgep->rx_cntl_pool_p = 0; 2359 2360 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2361 sizeof (p_rx_rbr_ring_t) * rdc_max); 2362 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2363 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2364 sizeof (p_rx_rcr_ring_t) * rdc_max); 2365 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2366 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2367 sizeof (p_rx_mbox_t) * rdc_max); 2368 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2369 2370 nxgep->rx_rbr_rings = 0; 2371 nxgep->rx_rcr_rings = 0; 2372 nxgep->rx_mbox_areas_p = 0; 2373 2374 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2375 } 2376 2377 2378 static nxge_status_t 2379 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2380 p_nxge_dma_common_t *dmap, 2381 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2382 { 2383 p_nxge_dma_common_t rx_dmap; 2384 nxge_status_t status = NXGE_OK; 2385 size_t total_alloc_size; 2386 size_t allocated = 0; 2387 int i, size_index, array_size; 2388 boolean_t use_kmem_alloc = B_FALSE; 2389 2390 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2391 2392 rx_dmap = (p_nxge_dma_common_t) 2393 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2394 KM_SLEEP); 2395 2396 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2397 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2398 dma_channel, alloc_size, block_size, dmap)); 2399 2400 total_alloc_size = alloc_size; 2401 2402 #if defined(RX_USE_RECLAIM_POST) 2403 total_alloc_size = alloc_size + alloc_size/4; 2404 #endif 2405 2406 i = 0; 2407 size_index = 0; 2408 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2409 while ((alloc_sizes[size_index] < alloc_size) && 2410 (size_index < array_size)) 2411 size_index++; 2412 if (size_index >= array_size) { 2413 size_index = array_size - 1; 2414 } 2415 2416 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2417 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2418 use_kmem_alloc = B_TRUE; 2419 #if defined(__i386) || defined(__amd64) 2420 size_index = 0; 2421 #endif 2422 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2423 "==> nxge_alloc_rx_buf_dma: " 2424 "Neptune use kmem_alloc() - size_index %d", 2425 size_index)); 2426 } 2427 2428 while ((allocated < total_alloc_size) && 2429 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2430 rx_dmap[i].dma_chunk_index = i; 2431 rx_dmap[i].block_size = block_size; 2432 rx_dmap[i].alength = alloc_sizes[size_index]; 2433 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2434 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2435 rx_dmap[i].dma_channel = dma_channel; 2436 rx_dmap[i].contig_alloc_type = B_FALSE; 2437 rx_dmap[i].kmem_alloc_type = B_FALSE; 2438 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2439 2440 /* 2441 * N2/NIU: data buffers must be contiguous as the driver 2442 * needs to call Hypervisor api to set up 2443 * logical pages. 2444 */ 2445 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2446 rx_dmap[i].contig_alloc_type = B_TRUE; 2447 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2448 } else if (use_kmem_alloc) { 2449 /* For Neptune, use kmem_alloc */ 2450 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2451 "==> nxge_alloc_rx_buf_dma: " 2452 "Neptune use kmem_alloc()")); 2453 rx_dmap[i].kmem_alloc_type = B_TRUE; 2454 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2455 } 2456 2457 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2458 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2459 "i %d nblocks %d alength %d", 2460 dma_channel, i, &rx_dmap[i], block_size, 2461 i, rx_dmap[i].nblocks, 2462 rx_dmap[i].alength)); 2463 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2464 &nxge_rx_dma_attr, 2465 rx_dmap[i].alength, 2466 &nxge_dev_buf_dma_acc_attr, 2467 DDI_DMA_READ | DDI_DMA_STREAMING, 2468 (p_nxge_dma_common_t)(&rx_dmap[i])); 2469 if (status != NXGE_OK) { 2470 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2471 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2472 "dma %d size_index %d size requested %d", 2473 dma_channel, 2474 size_index, 2475 rx_dmap[i].alength)); 2476 size_index--; 2477 } else { 2478 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2479 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2480 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2481 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2482 "buf_alloc_state %d alloc_type %d", 2483 dma_channel, 2484 &rx_dmap[i], 2485 rx_dmap[i].kaddrp, 2486 rx_dmap[i].alength, 2487 rx_dmap[i].buf_alloc_state, 2488 rx_dmap[i].buf_alloc_type)); 2489 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2490 " alloc_rx_buf_dma allocated rdc %d " 2491 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2492 dma_channel, i, rx_dmap[i].alength, 2493 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2494 rx_dmap[i].kaddrp)); 2495 i++; 2496 allocated += alloc_sizes[size_index]; 2497 } 2498 } 2499 2500 if (allocated < total_alloc_size) { 2501 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2502 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2503 "allocated 0x%x requested 0x%x", 2504 dma_channel, 2505 allocated, total_alloc_size)); 2506 status = NXGE_ERROR; 2507 goto nxge_alloc_rx_mem_fail1; 2508 } 2509 2510 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2511 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2512 "allocated 0x%x requested 0x%x", 2513 dma_channel, 2514 allocated, total_alloc_size)); 2515 2516 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2517 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2518 dma_channel, i)); 2519 *num_chunks = i; 2520 *dmap = rx_dmap; 2521 2522 goto nxge_alloc_rx_mem_exit; 2523 2524 nxge_alloc_rx_mem_fail1: 2525 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2526 2527 nxge_alloc_rx_mem_exit: 2528 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2529 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2530 2531 return (status); 2532 } 2533 2534 /*ARGSUSED*/ 2535 static void 2536 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2537 uint32_t num_chunks) 2538 { 2539 int i; 2540 2541 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2542 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2543 2544 if (dmap == 0) 2545 return; 2546 2547 for (i = 0; i < num_chunks; i++) { 2548 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2549 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2550 i, dmap)); 2551 nxge_dma_free_rx_data_buf(dmap++); 2552 } 2553 2554 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2555 } 2556 2557 /*ARGSUSED*/ 2558 static nxge_status_t 2559 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2560 p_nxge_dma_common_t *dmap, size_t size) 2561 { 2562 p_nxge_dma_common_t rx_dmap; 2563 nxge_status_t status = NXGE_OK; 2564 2565 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2566 2567 rx_dmap = (p_nxge_dma_common_t) 2568 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2569 2570 rx_dmap->contig_alloc_type = B_FALSE; 2571 rx_dmap->kmem_alloc_type = B_FALSE; 2572 2573 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2574 &nxge_desc_dma_attr, 2575 size, 2576 &nxge_dev_desc_dma_acc_attr, 2577 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2578 rx_dmap); 2579 if (status != NXGE_OK) { 2580 goto nxge_alloc_rx_cntl_dma_fail1; 2581 } 2582 2583 *dmap = rx_dmap; 2584 goto nxge_alloc_rx_cntl_dma_exit; 2585 2586 nxge_alloc_rx_cntl_dma_fail1: 2587 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2588 2589 nxge_alloc_rx_cntl_dma_exit: 2590 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2591 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2592 2593 return (status); 2594 } 2595 2596 /*ARGSUSED*/ 2597 static void 2598 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2599 { 2600 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2601 2602 if (dmap == 0) 2603 return; 2604 2605 nxge_dma_mem_free(dmap); 2606 2607 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2608 } 2609 2610 typedef struct { 2611 size_t tx_size; 2612 size_t cr_size; 2613 size_t threshhold; 2614 } nxge_tdc_sizes_t; 2615 2616 static 2617 nxge_status_t 2618 nxge_tdc_sizes( 2619 nxge_t *nxgep, 2620 nxge_tdc_sizes_t *sizes) 2621 { 2622 uint32_t threshhold; /* The bcopy() threshhold */ 2623 size_t tx_size; /* Transmit buffer size */ 2624 size_t cr_size; /* Completion ring size */ 2625 2626 /* 2627 * Assume that each DMA channel will be configured with the 2628 * default transmit buffer size for copying transmit data. 2629 * (If a packet is bigger than this, it will not be copied.) 2630 */ 2631 if (nxgep->niu_type == N2_NIU) { 2632 threshhold = TX_BCOPY_SIZE; 2633 } else { 2634 threshhold = nxge_bcopy_thresh; 2635 } 2636 tx_size = nxge_tx_ring_size * threshhold; 2637 2638 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2639 cr_size += sizeof (txdma_mailbox_t); 2640 2641 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2642 if (nxgep->niu_type == N2_NIU) { 2643 if (!ISP2(tx_size)) { 2644 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2645 "==> nxge_tdc_sizes: Tx size" 2646 " must be power of 2")); 2647 return (NXGE_ERROR); 2648 } 2649 2650 if (tx_size > (1 << 22)) { 2651 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2652 "==> nxge_tdc_sizes: Tx size" 2653 " limited to 4M")); 2654 return (NXGE_ERROR); 2655 } 2656 2657 if (cr_size < 0x2000) 2658 cr_size = 0x2000; 2659 } 2660 #endif 2661 2662 sizes->threshhold = threshhold; 2663 sizes->tx_size = tx_size; 2664 sizes->cr_size = cr_size; 2665 2666 return (NXGE_OK); 2667 } 2668 /* 2669 * nxge_alloc_txb 2670 * 2671 * Allocate buffers for an TDC. 2672 * 2673 * Arguments: 2674 * nxgep 2675 * channel The channel to map into our kernel space. 2676 * 2677 * Notes: 2678 * 2679 * NPI function calls: 2680 * 2681 * NXGE function calls: 2682 * 2683 * Registers accessed: 2684 * 2685 * Context: 2686 * 2687 * Taking apart: 2688 * 2689 * Open questions: 2690 * 2691 */ 2692 nxge_status_t 2693 nxge_alloc_txb( 2694 p_nxge_t nxgep, 2695 int channel) 2696 { 2697 nxge_dma_common_t **dma_buf_p; 2698 nxge_dma_common_t **dma_cntl_p; 2699 uint32_t *num_chunks; 2700 nxge_status_t status = NXGE_OK; 2701 2702 nxge_tdc_sizes_t sizes; 2703 2704 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2705 2706 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2707 return (NXGE_ERROR); 2708 2709 /* 2710 * Allocate memory for transmit buffers and descriptor rings. 2711 * Replace these allocation functions with the interface functions 2712 * provided by the partition manager Real Soon Now. 2713 */ 2714 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2715 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2716 2717 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2718 2719 /* 2720 * Allocate memory for transmit buffers and descriptor rings. 2721 * Replace allocation functions with interface functions provided 2722 * by the partition manager when it is available. 2723 * 2724 * Allocate memory for the transmit buffer pool. 2725 */ 2726 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2727 "sizes: tx: %ld, cr:%ld, th:%ld", 2728 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2729 2730 *num_chunks = 0; 2731 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2732 sizes.tx_size, sizes.threshhold, num_chunks); 2733 if (status != NXGE_OK) { 2734 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2735 return (status); 2736 } 2737 2738 /* 2739 * Allocate memory for descriptor rings and mailbox. 2740 */ 2741 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2742 sizes.cr_size); 2743 if (status != NXGE_OK) { 2744 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2745 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2746 return (status); 2747 } 2748 2749 return (NXGE_OK); 2750 } 2751 2752 void 2753 nxge_free_txb( 2754 p_nxge_t nxgep, 2755 int channel) 2756 { 2757 nxge_dma_common_t *data; 2758 nxge_dma_common_t *control; 2759 uint32_t num_chunks; 2760 2761 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2762 2763 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2764 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2765 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2766 2767 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2768 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2769 2770 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2771 nxge_free_tx_cntl_dma(nxgep, control); 2772 2773 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2774 2775 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2776 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2777 2778 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2779 } 2780 2781 /* 2782 * nxge_alloc_tx_mem_pool 2783 * 2784 * This function allocates all of the per-port TDC control data structures. 2785 * The per-channel (TDC) data structures are allocated when needed. 2786 * 2787 * Arguments: 2788 * nxgep 2789 * 2790 * Notes: 2791 * 2792 * Context: 2793 * Any domain 2794 */ 2795 nxge_status_t 2796 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2797 { 2798 nxge_hw_pt_cfg_t *p_cfgp; 2799 nxge_dma_pool_t *dma_poolp; 2800 nxge_dma_common_t **dma_buf_p; 2801 nxge_dma_pool_t *dma_cntl_poolp; 2802 nxge_dma_common_t **dma_cntl_p; 2803 uint32_t *num_chunks; /* per dma */ 2804 int tdc_max; 2805 2806 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2807 2808 p_cfgp = &nxgep->pt_config.hw_config; 2809 tdc_max = NXGE_MAX_TDCS; 2810 2811 /* 2812 * Allocate memory for each transmit DMA channel. 2813 */ 2814 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2815 KM_SLEEP); 2816 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2817 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2818 2819 dma_cntl_poolp = (p_nxge_dma_pool_t) 2820 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2821 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2822 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2823 2824 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2825 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2826 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2827 "set to default %d", 2828 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2829 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2830 } 2831 2832 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2833 /* 2834 * N2/NIU has limitation on the descriptor sizes (contiguous 2835 * memory allocation on data buffers to 4M (contig_mem_alloc) 2836 * and little endian for control buffers (must use the ddi/dki mem alloc 2837 * function). The transmit ring is limited to 8K (includes the 2838 * mailbox). 2839 */ 2840 if (nxgep->niu_type == N2_NIU) { 2841 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2842 (!ISP2(nxge_tx_ring_size))) { 2843 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2844 } 2845 } 2846 #endif 2847 2848 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2849 2850 num_chunks = (uint32_t *)KMEM_ZALLOC( 2851 sizeof (uint32_t) * tdc_max, KM_SLEEP); 2852 2853 dma_poolp->ndmas = p_cfgp->tdc.owned; 2854 dma_poolp->num_chunks = num_chunks; 2855 dma_poolp->dma_buf_pool_p = dma_buf_p; 2856 nxgep->tx_buf_pool_p = dma_poolp; 2857 2858 dma_poolp->buf_allocated = B_TRUE; 2859 2860 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 2861 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2862 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2863 2864 dma_cntl_poolp->buf_allocated = B_TRUE; 2865 2866 nxgep->tx_rings = 2867 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 2868 nxgep->tx_rings->rings = 2869 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 2870 nxgep->tx_mbox_areas_p = 2871 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 2872 nxgep->tx_mbox_areas_p->txmbox_areas_p = 2873 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 2874 2875 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 2876 2877 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2878 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 2879 tdc_max, dma_poolp->ndmas)); 2880 2881 return (NXGE_OK); 2882 } 2883 2884 nxge_status_t 2885 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2886 p_nxge_dma_common_t *dmap, size_t alloc_size, 2887 size_t block_size, uint32_t *num_chunks) 2888 { 2889 p_nxge_dma_common_t tx_dmap; 2890 nxge_status_t status = NXGE_OK; 2891 size_t total_alloc_size; 2892 size_t allocated = 0; 2893 int i, size_index, array_size; 2894 2895 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 2896 2897 tx_dmap = (p_nxge_dma_common_t) 2898 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2899 KM_SLEEP); 2900 2901 total_alloc_size = alloc_size; 2902 i = 0; 2903 size_index = 0; 2904 array_size = sizeof (alloc_sizes) / sizeof (size_t); 2905 while ((alloc_sizes[size_index] < alloc_size) && 2906 (size_index < array_size)) 2907 size_index++; 2908 if (size_index >= array_size) { 2909 size_index = array_size - 1; 2910 } 2911 2912 while ((allocated < total_alloc_size) && 2913 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2914 2915 tx_dmap[i].dma_chunk_index = i; 2916 tx_dmap[i].block_size = block_size; 2917 tx_dmap[i].alength = alloc_sizes[size_index]; 2918 tx_dmap[i].orig_alength = tx_dmap[i].alength; 2919 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2920 tx_dmap[i].dma_channel = dma_channel; 2921 tx_dmap[i].contig_alloc_type = B_FALSE; 2922 tx_dmap[i].kmem_alloc_type = B_FALSE; 2923 2924 /* 2925 * N2/NIU: data buffers must be contiguous as the driver 2926 * needs to call Hypervisor api to set up 2927 * logical pages. 2928 */ 2929 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2930 tx_dmap[i].contig_alloc_type = B_TRUE; 2931 } 2932 2933 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2934 &nxge_tx_dma_attr, 2935 tx_dmap[i].alength, 2936 &nxge_dev_buf_dma_acc_attr, 2937 DDI_DMA_WRITE | DDI_DMA_STREAMING, 2938 (p_nxge_dma_common_t)(&tx_dmap[i])); 2939 if (status != NXGE_OK) { 2940 size_index--; 2941 } else { 2942 i++; 2943 allocated += alloc_sizes[size_index]; 2944 } 2945 } 2946 2947 if (allocated < total_alloc_size) { 2948 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2949 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 2950 "allocated 0x%x requested 0x%x", 2951 dma_channel, 2952 allocated, total_alloc_size)); 2953 status = NXGE_ERROR; 2954 goto nxge_alloc_tx_mem_fail1; 2955 } 2956 2957 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2958 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 2959 "allocated 0x%x requested 0x%x", 2960 dma_channel, 2961 allocated, total_alloc_size)); 2962 2963 *num_chunks = i; 2964 *dmap = tx_dmap; 2965 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2966 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 2967 *dmap, i)); 2968 goto nxge_alloc_tx_mem_exit; 2969 2970 nxge_alloc_tx_mem_fail1: 2971 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2972 2973 nxge_alloc_tx_mem_exit: 2974 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2975 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 2976 2977 return (status); 2978 } 2979 2980 /*ARGSUSED*/ 2981 static void 2982 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2983 uint32_t num_chunks) 2984 { 2985 int i; 2986 2987 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 2988 2989 if (dmap == 0) 2990 return; 2991 2992 for (i = 0; i < num_chunks; i++) { 2993 nxge_dma_mem_free(dmap++); 2994 } 2995 2996 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 2997 } 2998 2999 /*ARGSUSED*/ 3000 nxge_status_t 3001 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3002 p_nxge_dma_common_t *dmap, size_t size) 3003 { 3004 p_nxge_dma_common_t tx_dmap; 3005 nxge_status_t status = NXGE_OK; 3006 3007 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3008 tx_dmap = (p_nxge_dma_common_t) 3009 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3010 3011 tx_dmap->contig_alloc_type = B_FALSE; 3012 tx_dmap->kmem_alloc_type = B_FALSE; 3013 3014 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3015 &nxge_desc_dma_attr, 3016 size, 3017 &nxge_dev_desc_dma_acc_attr, 3018 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3019 tx_dmap); 3020 if (status != NXGE_OK) { 3021 goto nxge_alloc_tx_cntl_dma_fail1; 3022 } 3023 3024 *dmap = tx_dmap; 3025 goto nxge_alloc_tx_cntl_dma_exit; 3026 3027 nxge_alloc_tx_cntl_dma_fail1: 3028 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3029 3030 nxge_alloc_tx_cntl_dma_exit: 3031 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3032 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3033 3034 return (status); 3035 } 3036 3037 /*ARGSUSED*/ 3038 static void 3039 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3040 { 3041 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3042 3043 if (dmap == 0) 3044 return; 3045 3046 nxge_dma_mem_free(dmap); 3047 3048 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3049 } 3050 3051 /* 3052 * nxge_free_tx_mem_pool 3053 * 3054 * This function frees all of the per-port TDC control data structures. 3055 * The per-channel (TDC) data structures are freed when the channel 3056 * is stopped. 3057 * 3058 * Arguments: 3059 * nxgep 3060 * 3061 * Notes: 3062 * 3063 * Context: 3064 * Any domain 3065 */ 3066 static void 3067 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3068 { 3069 int tdc_max = NXGE_MAX_TDCS; 3070 3071 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3072 3073 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3074 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3075 "<== nxge_free_tx_mem_pool " 3076 "(null tx buf pool or buf not allocated")); 3077 return; 3078 } 3079 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3080 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3081 "<== nxge_free_tx_mem_pool " 3082 "(null tx cntl buf pool or cntl buf not allocated")); 3083 return; 3084 } 3085 3086 /* 1. Free the mailboxes. */ 3087 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3088 sizeof (p_tx_mbox_t) * tdc_max); 3089 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3090 3091 nxgep->tx_mbox_areas_p = 0; 3092 3093 /* 2. Free the transmit ring arrays. */ 3094 KMEM_FREE(nxgep->tx_rings->rings, 3095 sizeof (p_tx_ring_t) * tdc_max); 3096 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3097 3098 nxgep->tx_rings = 0; 3099 3100 /* 3. Free the completion ring data structures. */ 3101 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3102 sizeof (p_nxge_dma_common_t) * tdc_max); 3103 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3104 3105 nxgep->tx_cntl_pool_p = 0; 3106 3107 /* 4. Free the data ring data structures. */ 3108 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3109 sizeof (uint32_t) * tdc_max); 3110 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3111 sizeof (p_nxge_dma_common_t) * tdc_max); 3112 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3113 3114 nxgep->tx_buf_pool_p = 0; 3115 3116 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3117 } 3118 3119 /*ARGSUSED*/ 3120 static nxge_status_t 3121 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3122 struct ddi_dma_attr *dma_attrp, 3123 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3124 p_nxge_dma_common_t dma_p) 3125 { 3126 caddr_t kaddrp; 3127 int ddi_status = DDI_SUCCESS; 3128 boolean_t contig_alloc_type; 3129 boolean_t kmem_alloc_type; 3130 3131 contig_alloc_type = dma_p->contig_alloc_type; 3132 3133 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3134 /* 3135 * contig_alloc_type for contiguous memory only allowed 3136 * for N2/NIU. 3137 */ 3138 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3139 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3140 dma_p->contig_alloc_type)); 3141 return (NXGE_ERROR | NXGE_DDI_FAILED); 3142 } 3143 3144 dma_p->dma_handle = NULL; 3145 dma_p->acc_handle = NULL; 3146 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3147 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3148 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3149 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3150 if (ddi_status != DDI_SUCCESS) { 3151 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3152 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3153 return (NXGE_ERROR | NXGE_DDI_FAILED); 3154 } 3155 3156 kmem_alloc_type = dma_p->kmem_alloc_type; 3157 3158 switch (contig_alloc_type) { 3159 case B_FALSE: 3160 switch (kmem_alloc_type) { 3161 case B_FALSE: 3162 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3163 length, 3164 acc_attr_p, 3165 xfer_flags, 3166 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3167 &dma_p->acc_handle); 3168 if (ddi_status != DDI_SUCCESS) { 3169 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3170 "nxge_dma_mem_alloc: " 3171 "ddi_dma_mem_alloc failed")); 3172 ddi_dma_free_handle(&dma_p->dma_handle); 3173 dma_p->dma_handle = NULL; 3174 return (NXGE_ERROR | NXGE_DDI_FAILED); 3175 } 3176 if (dma_p->alength < length) { 3177 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3178 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3179 "< length.")); 3180 ddi_dma_mem_free(&dma_p->acc_handle); 3181 ddi_dma_free_handle(&dma_p->dma_handle); 3182 dma_p->acc_handle = NULL; 3183 dma_p->dma_handle = NULL; 3184 return (NXGE_ERROR); 3185 } 3186 3187 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3188 NULL, 3189 kaddrp, dma_p->alength, xfer_flags, 3190 DDI_DMA_DONTWAIT, 3191 0, &dma_p->dma_cookie, &dma_p->ncookies); 3192 if (ddi_status != DDI_DMA_MAPPED) { 3193 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3194 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3195 "failed " 3196 "(staus 0x%x ncookies %d.)", ddi_status, 3197 dma_p->ncookies)); 3198 if (dma_p->acc_handle) { 3199 ddi_dma_mem_free(&dma_p->acc_handle); 3200 dma_p->acc_handle = NULL; 3201 } 3202 ddi_dma_free_handle(&dma_p->dma_handle); 3203 dma_p->dma_handle = NULL; 3204 return (NXGE_ERROR | NXGE_DDI_FAILED); 3205 } 3206 3207 if (dma_p->ncookies != 1) { 3208 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3209 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3210 "> 1 cookie" 3211 "(staus 0x%x ncookies %d.)", ddi_status, 3212 dma_p->ncookies)); 3213 if (dma_p->acc_handle) { 3214 ddi_dma_mem_free(&dma_p->acc_handle); 3215 dma_p->acc_handle = NULL; 3216 } 3217 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3218 ddi_dma_free_handle(&dma_p->dma_handle); 3219 dma_p->dma_handle = NULL; 3220 return (NXGE_ERROR); 3221 } 3222 break; 3223 3224 case B_TRUE: 3225 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3226 if (kaddrp == NULL) { 3227 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3228 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3229 "kmem alloc failed")); 3230 return (NXGE_ERROR); 3231 } 3232 3233 dma_p->alength = length; 3234 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3235 NULL, kaddrp, dma_p->alength, xfer_flags, 3236 DDI_DMA_DONTWAIT, 0, 3237 &dma_p->dma_cookie, &dma_p->ncookies); 3238 if (ddi_status != DDI_DMA_MAPPED) { 3239 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3240 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3241 "(kmem_alloc) failed kaddrp $%p length %d " 3242 "(staus 0x%x (%d) ncookies %d.)", 3243 kaddrp, length, 3244 ddi_status, ddi_status, dma_p->ncookies)); 3245 KMEM_FREE(kaddrp, length); 3246 dma_p->acc_handle = NULL; 3247 ddi_dma_free_handle(&dma_p->dma_handle); 3248 dma_p->dma_handle = NULL; 3249 dma_p->kaddrp = NULL; 3250 return (NXGE_ERROR | NXGE_DDI_FAILED); 3251 } 3252 3253 if (dma_p->ncookies != 1) { 3254 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3255 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3256 "(kmem_alloc) > 1 cookie" 3257 "(staus 0x%x ncookies %d.)", ddi_status, 3258 dma_p->ncookies)); 3259 KMEM_FREE(kaddrp, length); 3260 dma_p->acc_handle = NULL; 3261 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3262 ddi_dma_free_handle(&dma_p->dma_handle); 3263 dma_p->dma_handle = NULL; 3264 dma_p->kaddrp = NULL; 3265 return (NXGE_ERROR); 3266 } 3267 3268 dma_p->kaddrp = kaddrp; 3269 3270 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3271 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3272 "kaddr $%p alength %d", 3273 dma_p, 3274 kaddrp, 3275 dma_p->alength)); 3276 break; 3277 } 3278 break; 3279 3280 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3281 case B_TRUE: 3282 kaddrp = (caddr_t)contig_mem_alloc(length); 3283 if (kaddrp == NULL) { 3284 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3285 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3286 ddi_dma_free_handle(&dma_p->dma_handle); 3287 return (NXGE_ERROR | NXGE_DDI_FAILED); 3288 } 3289 3290 dma_p->alength = length; 3291 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3292 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3293 &dma_p->dma_cookie, &dma_p->ncookies); 3294 if (ddi_status != DDI_DMA_MAPPED) { 3295 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3296 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3297 "(status 0x%x ncookies %d.)", ddi_status, 3298 dma_p->ncookies)); 3299 3300 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3301 "==> nxge_dma_mem_alloc: (not mapped)" 3302 "length %lu (0x%x) " 3303 "free contig kaddrp $%p " 3304 "va_to_pa $%p", 3305 length, length, 3306 kaddrp, 3307 va_to_pa(kaddrp))); 3308 3309 3310 contig_mem_free((void *)kaddrp, length); 3311 ddi_dma_free_handle(&dma_p->dma_handle); 3312 3313 dma_p->dma_handle = NULL; 3314 dma_p->acc_handle = NULL; 3315 dma_p->alength = NULL; 3316 dma_p->kaddrp = NULL; 3317 3318 return (NXGE_ERROR | NXGE_DDI_FAILED); 3319 } 3320 3321 if (dma_p->ncookies != 1 || 3322 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3323 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3324 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3325 "cookie or " 3326 "dmac_laddress is NULL $%p size %d " 3327 " (status 0x%x ncookies %d.)", 3328 ddi_status, 3329 dma_p->dma_cookie.dmac_laddress, 3330 dma_p->dma_cookie.dmac_size, 3331 dma_p->ncookies)); 3332 3333 contig_mem_free((void *)kaddrp, length); 3334 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3335 ddi_dma_free_handle(&dma_p->dma_handle); 3336 3337 dma_p->alength = 0; 3338 dma_p->dma_handle = NULL; 3339 dma_p->acc_handle = NULL; 3340 dma_p->kaddrp = NULL; 3341 3342 return (NXGE_ERROR | NXGE_DDI_FAILED); 3343 } 3344 break; 3345 3346 #else 3347 case B_TRUE: 3348 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3349 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3350 return (NXGE_ERROR | NXGE_DDI_FAILED); 3351 #endif 3352 } 3353 3354 dma_p->kaddrp = kaddrp; 3355 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3356 dma_p->alength - RXBUF_64B_ALIGNED; 3357 #if defined(__i386) 3358 dma_p->ioaddr_pp = 3359 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3360 #else 3361 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3362 #endif 3363 dma_p->last_ioaddr_pp = 3364 #if defined(__i386) 3365 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3366 #else 3367 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3368 #endif 3369 dma_p->alength - RXBUF_64B_ALIGNED; 3370 3371 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3372 3373 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3374 dma_p->orig_ioaddr_pp = 3375 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3376 dma_p->orig_alength = length; 3377 dma_p->orig_kaddrp = kaddrp; 3378 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3379 #endif 3380 3381 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3382 "dma buffer allocated: dma_p $%p " 3383 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3384 "dma_p->ioaddr_p $%p " 3385 "dma_p->orig_ioaddr_p $%p " 3386 "orig_vatopa $%p " 3387 "alength %d (0x%x) " 3388 "kaddrp $%p " 3389 "length %d (0x%x)", 3390 dma_p, 3391 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3392 dma_p->ioaddr_pp, 3393 dma_p->orig_ioaddr_pp, 3394 dma_p->orig_vatopa, 3395 dma_p->alength, dma_p->alength, 3396 kaddrp, 3397 length, length)); 3398 3399 return (NXGE_OK); 3400 } 3401 3402 static void 3403 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3404 { 3405 if (dma_p->dma_handle != NULL) { 3406 if (dma_p->ncookies) { 3407 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3408 dma_p->ncookies = 0; 3409 } 3410 ddi_dma_free_handle(&dma_p->dma_handle); 3411 dma_p->dma_handle = NULL; 3412 } 3413 3414 if (dma_p->acc_handle != NULL) { 3415 ddi_dma_mem_free(&dma_p->acc_handle); 3416 dma_p->acc_handle = NULL; 3417 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3418 } 3419 3420 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3421 if (dma_p->contig_alloc_type && 3422 dma_p->orig_kaddrp && dma_p->orig_alength) { 3423 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3424 "kaddrp $%p (orig_kaddrp $%p)" 3425 "mem type %d ", 3426 "orig_alength %d " 3427 "alength 0x%x (%d)", 3428 dma_p->kaddrp, 3429 dma_p->orig_kaddrp, 3430 dma_p->contig_alloc_type, 3431 dma_p->orig_alength, 3432 dma_p->alength, dma_p->alength)); 3433 3434 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3435 dma_p->orig_alength = NULL; 3436 dma_p->orig_kaddrp = NULL; 3437 dma_p->contig_alloc_type = B_FALSE; 3438 } 3439 #endif 3440 dma_p->kaddrp = NULL; 3441 dma_p->alength = NULL; 3442 } 3443 3444 static void 3445 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3446 { 3447 uint64_t kaddr; 3448 uint32_t buf_size; 3449 3450 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3451 3452 if (dma_p->dma_handle != NULL) { 3453 if (dma_p->ncookies) { 3454 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3455 dma_p->ncookies = 0; 3456 } 3457 ddi_dma_free_handle(&dma_p->dma_handle); 3458 dma_p->dma_handle = NULL; 3459 } 3460 3461 if (dma_p->acc_handle != NULL) { 3462 ddi_dma_mem_free(&dma_p->acc_handle); 3463 dma_p->acc_handle = NULL; 3464 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3465 } 3466 3467 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3468 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3469 dma_p, 3470 dma_p->buf_alloc_state)); 3471 3472 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3473 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3474 "<== nxge_dma_free_rx_data_buf: " 3475 "outstanding data buffers")); 3476 return; 3477 } 3478 3479 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3480 if (dma_p->contig_alloc_type && 3481 dma_p->orig_kaddrp && dma_p->orig_alength) { 3482 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3483 "kaddrp $%p (orig_kaddrp $%p)" 3484 "mem type %d ", 3485 "orig_alength %d " 3486 "alength 0x%x (%d)", 3487 dma_p->kaddrp, 3488 dma_p->orig_kaddrp, 3489 dma_p->contig_alloc_type, 3490 dma_p->orig_alength, 3491 dma_p->alength, dma_p->alength)); 3492 3493 kaddr = (uint64_t)dma_p->orig_kaddrp; 3494 buf_size = dma_p->orig_alength; 3495 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3496 dma_p->orig_alength = NULL; 3497 dma_p->orig_kaddrp = NULL; 3498 dma_p->contig_alloc_type = B_FALSE; 3499 dma_p->kaddrp = NULL; 3500 dma_p->alength = NULL; 3501 return; 3502 } 3503 #endif 3504 3505 if (dma_p->kmem_alloc_type) { 3506 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3507 "nxge_dma_free_rx_data_buf: free kmem " 3508 "kaddrp $%p (orig_kaddrp $%p)" 3509 "alloc type %d " 3510 "orig_alength %d " 3511 "alength 0x%x (%d)", 3512 dma_p->kaddrp, 3513 dma_p->orig_kaddrp, 3514 dma_p->kmem_alloc_type, 3515 dma_p->orig_alength, 3516 dma_p->alength, dma_p->alength)); 3517 #if defined(__i386) 3518 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3519 #else 3520 kaddr = (uint64_t)dma_p->kaddrp; 3521 #endif 3522 buf_size = dma_p->orig_alength; 3523 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3524 "nxge_dma_free_rx_data_buf: free dmap $%p " 3525 "kaddr $%p buf_size %d", 3526 dma_p, 3527 kaddr, buf_size)); 3528 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3529 dma_p->alength = 0; 3530 dma_p->orig_alength = 0; 3531 dma_p->kaddrp = NULL; 3532 dma_p->kmem_alloc_type = B_FALSE; 3533 } 3534 3535 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3536 } 3537 3538 /* 3539 * nxge_m_start() -- start transmitting and receiving. 3540 * 3541 * This function is called by the MAC layer when the first 3542 * stream is open to prepare the hardware ready for sending 3543 * and transmitting packets. 3544 */ 3545 static int 3546 nxge_m_start(void *arg) 3547 { 3548 p_nxge_t nxgep = (p_nxge_t)arg; 3549 3550 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3551 3552 MUTEX_ENTER(nxgep->genlock); 3553 if (nxge_init(nxgep) != NXGE_OK) { 3554 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3555 "<== nxge_m_start: initialization failed")); 3556 MUTEX_EXIT(nxgep->genlock); 3557 return (EIO); 3558 } 3559 3560 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 3561 goto nxge_m_start_exit; 3562 /* 3563 * Start timer to check the system error and tx hangs 3564 */ 3565 if (!isLDOMguest(nxgep)) 3566 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3567 nxge_check_hw_state, NXGE_CHECK_TIMER); 3568 #if defined(sun4v) 3569 else 3570 nxge_hio_start_timer(nxgep); 3571 #endif 3572 3573 nxgep->link_notify = B_TRUE; 3574 3575 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3576 3577 nxge_m_start_exit: 3578 MUTEX_EXIT(nxgep->genlock); 3579 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3580 return (0); 3581 } 3582 3583 /* 3584 * nxge_m_stop(): stop transmitting and receiving. 3585 */ 3586 static void 3587 nxge_m_stop(void *arg) 3588 { 3589 p_nxge_t nxgep = (p_nxge_t)arg; 3590 3591 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3592 3593 if (nxgep->nxge_timerid) { 3594 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3595 nxgep->nxge_timerid = 0; 3596 } 3597 3598 MUTEX_ENTER(nxgep->genlock); 3599 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3600 nxge_uninit(nxgep); 3601 3602 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3603 3604 MUTEX_EXIT(nxgep->genlock); 3605 3606 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3607 } 3608 3609 static int 3610 nxge_m_unicst(void *arg, const uint8_t *macaddr) 3611 { 3612 p_nxge_t nxgep = (p_nxge_t)arg; 3613 struct ether_addr addrp; 3614 3615 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 3616 3617 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 3618 if (nxge_set_mac_addr(nxgep, &addrp)) { 3619 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3620 "<== nxge_m_unicst: set unitcast failed")); 3621 return (EINVAL); 3622 } 3623 3624 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 3625 3626 return (0); 3627 } 3628 3629 static int 3630 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3631 { 3632 p_nxge_t nxgep = (p_nxge_t)arg; 3633 struct ether_addr addrp; 3634 3635 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3636 "==> nxge_m_multicst: add %d", add)); 3637 3638 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3639 if (add) { 3640 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3641 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3642 "<== nxge_m_multicst: add multicast failed")); 3643 return (EINVAL); 3644 } 3645 } else { 3646 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3647 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3648 "<== nxge_m_multicst: del multicast failed")); 3649 return (EINVAL); 3650 } 3651 } 3652 3653 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3654 3655 return (0); 3656 } 3657 3658 static int 3659 nxge_m_promisc(void *arg, boolean_t on) 3660 { 3661 p_nxge_t nxgep = (p_nxge_t)arg; 3662 3663 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3664 "==> nxge_m_promisc: on %d", on)); 3665 3666 if (nxge_set_promisc(nxgep, on)) { 3667 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3668 "<== nxge_m_promisc: set promisc failed")); 3669 return (EINVAL); 3670 } 3671 3672 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3673 "<== nxge_m_promisc: on %d", on)); 3674 3675 return (0); 3676 } 3677 3678 static void 3679 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3680 { 3681 p_nxge_t nxgep = (p_nxge_t)arg; 3682 struct iocblk *iocp; 3683 boolean_t need_privilege; 3684 int err; 3685 int cmd; 3686 3687 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3688 3689 iocp = (struct iocblk *)mp->b_rptr; 3690 iocp->ioc_error = 0; 3691 need_privilege = B_TRUE; 3692 cmd = iocp->ioc_cmd; 3693 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3694 switch (cmd) { 3695 default: 3696 miocnak(wq, mp, 0, EINVAL); 3697 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3698 return; 3699 3700 case LB_GET_INFO_SIZE: 3701 case LB_GET_INFO: 3702 case LB_GET_MODE: 3703 need_privilege = B_FALSE; 3704 break; 3705 case LB_SET_MODE: 3706 break; 3707 3708 case ND_GET: 3709 need_privilege = B_FALSE; 3710 break; 3711 case ND_SET: 3712 break; 3713 3714 case NXGE_GET_MII: 3715 case NXGE_PUT_MII: 3716 case NXGE_GET64: 3717 case NXGE_PUT64: 3718 case NXGE_GET_TX_RING_SZ: 3719 case NXGE_GET_TX_DESC: 3720 case NXGE_TX_SIDE_RESET: 3721 case NXGE_RX_SIDE_RESET: 3722 case NXGE_GLOBAL_RESET: 3723 case NXGE_RESET_MAC: 3724 case NXGE_TX_REGS_DUMP: 3725 case NXGE_RX_REGS_DUMP: 3726 case NXGE_INT_REGS_DUMP: 3727 case NXGE_VIR_INT_REGS_DUMP: 3728 case NXGE_PUT_TCAM: 3729 case NXGE_GET_TCAM: 3730 case NXGE_RTRACE: 3731 case NXGE_RDUMP: 3732 3733 need_privilege = B_FALSE; 3734 break; 3735 case NXGE_INJECT_ERR: 3736 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3737 nxge_err_inject(nxgep, wq, mp); 3738 break; 3739 } 3740 3741 if (need_privilege) { 3742 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3743 if (err != 0) { 3744 miocnak(wq, mp, 0, err); 3745 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3746 "<== nxge_m_ioctl: no priv")); 3747 return; 3748 } 3749 } 3750 3751 switch (cmd) { 3752 case ND_GET: 3753 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command")); 3754 case ND_SET: 3755 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command")); 3756 nxge_param_ioctl(nxgep, wq, mp, iocp); 3757 break; 3758 3759 case LB_GET_MODE: 3760 case LB_SET_MODE: 3761 case LB_GET_INFO_SIZE: 3762 case LB_GET_INFO: 3763 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3764 break; 3765 3766 case NXGE_GET_MII: 3767 case NXGE_PUT_MII: 3768 case NXGE_PUT_TCAM: 3769 case NXGE_GET_TCAM: 3770 case NXGE_GET64: 3771 case NXGE_PUT64: 3772 case NXGE_GET_TX_RING_SZ: 3773 case NXGE_GET_TX_DESC: 3774 case NXGE_TX_SIDE_RESET: 3775 case NXGE_RX_SIDE_RESET: 3776 case NXGE_GLOBAL_RESET: 3777 case NXGE_RESET_MAC: 3778 case NXGE_TX_REGS_DUMP: 3779 case NXGE_RX_REGS_DUMP: 3780 case NXGE_INT_REGS_DUMP: 3781 case NXGE_VIR_INT_REGS_DUMP: 3782 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3783 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3784 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3785 break; 3786 } 3787 3788 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3789 } 3790 3791 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3792 3793 static void 3794 nxge_m_resources(void *arg) 3795 { 3796 p_nxge_t nxgep = arg; 3797 mac_rx_fifo_t mrf; 3798 3799 nxge_grp_set_t *set = &nxgep->rx_set; 3800 uint8_t rdc; 3801 3802 rx_rcr_ring_t *ring; 3803 3804 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3805 3806 MUTEX_ENTER(nxgep->genlock); 3807 3808 if (set->owned.map == 0) { 3809 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3810 "nxge_m_resources: no receive resources")); 3811 goto nxge_m_resources_exit; 3812 } 3813 3814 /* 3815 * CR 6492541 Check to see if the drv_state has been initialized, 3816 * if not * call nxge_init(). 3817 */ 3818 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3819 if (nxge_init(nxgep) != NXGE_OK) 3820 goto nxge_m_resources_exit; 3821 } 3822 3823 mrf.mrf_type = MAC_RX_FIFO; 3824 mrf.mrf_blank = nxge_rx_hw_blank; 3825 mrf.mrf_arg = (void *)nxgep; 3826 3827 mrf.mrf_normal_blank_time = 128; 3828 mrf.mrf_normal_pkt_count = 8; 3829 3830 /* 3831 * Export our receive resources to the MAC layer. 3832 */ 3833 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 3834 if ((1 << rdc) & set->owned.map) { 3835 ring = nxgep->rx_rcr_rings->rcr_rings[rdc]; 3836 if (ring == 0) { 3837 /* 3838 * This is a big deal only if we are 3839 * *not* in an LDOMs environment. 3840 */ 3841 if (nxgep->environs == SOLARIS_DOMAIN) { 3842 cmn_err(CE_NOTE, 3843 "==> nxge_m_resources: " 3844 "ring %d == 0", rdc); 3845 } 3846 continue; 3847 } 3848 ring->rcr_mac_handle = mac_resource_add 3849 (nxgep->mach, (mac_resource_t *)&mrf); 3850 3851 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3852 "==> nxge_m_resources: RDC %d RCR %p MAC handle %p", 3853 rdc, ring, ring->rcr_mac_handle)); 3854 } 3855 } 3856 3857 nxge_m_resources_exit: 3858 MUTEX_EXIT(nxgep->genlock); 3859 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 3860 } 3861 3862 void 3863 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 3864 { 3865 p_nxge_mmac_stats_t mmac_stats; 3866 int i; 3867 nxge_mmac_t *mmac_info; 3868 3869 mmac_info = &nxgep->nxge_mmac_info; 3870 3871 mmac_stats = &nxgep->statsp->mmac_stats; 3872 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3873 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3874 3875 for (i = 0; i < ETHERADDRL; i++) { 3876 if (factory) { 3877 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3878 = mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i]; 3879 } else { 3880 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3881 = mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i]; 3882 } 3883 } 3884 } 3885 3886 /* 3887 * nxge_altmac_set() -- Set an alternate MAC address 3888 */ 3889 static int 3890 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 3891 { 3892 uint8_t addrn; 3893 uint8_t portn; 3894 npi_mac_addr_t altmac; 3895 hostinfo_t mac_rdc; 3896 p_nxge_class_pt_cfg_t clscfgp; 3897 3898 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3899 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3900 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3901 3902 portn = nxgep->mac.portnum; 3903 addrn = (uint8_t)slot - 1; 3904 3905 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 3906 addrn, &altmac) != NPI_SUCCESS) 3907 return (EIO); 3908 3909 /* 3910 * Set the rdc table number for the host info entry 3911 * for this mac address slot. 3912 */ 3913 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 3914 mac_rdc.value = 0; 3915 mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl; 3916 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 3917 3918 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 3919 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 3920 return (EIO); 3921 } 3922 3923 /* 3924 * Enable comparison with the alternate MAC address. 3925 * While the first alternate addr is enabled by bit 1 of register 3926 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 3927 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 3928 * accordingly before calling npi_mac_altaddr_entry. 3929 */ 3930 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3931 addrn = (uint8_t)slot - 1; 3932 else 3933 addrn = (uint8_t)slot; 3934 3935 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 3936 != NPI_SUCCESS) 3937 return (EIO); 3938 3939 return (0); 3940 } 3941 3942 /* 3943 * nxeg_m_mmac_add() - find an unused address slot, set the address 3944 * value to the one specified, enable the port to start filtering on 3945 * the new MAC address. Returns 0 on success. 3946 */ 3947 int 3948 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 3949 { 3950 p_nxge_t nxgep = arg; 3951 mac_addr_slot_t slot; 3952 nxge_mmac_t *mmac_info; 3953 int err; 3954 nxge_status_t status; 3955 3956 mutex_enter(nxgep->genlock); 3957 3958 /* 3959 * Make sure that nxge is initialized, if _start() has 3960 * not been called. 3961 */ 3962 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3963 status = nxge_init(nxgep); 3964 if (status != NXGE_OK) { 3965 mutex_exit(nxgep->genlock); 3966 return (ENXIO); 3967 } 3968 } 3969 3970 mmac_info = &nxgep->nxge_mmac_info; 3971 if (mmac_info->naddrfree == 0) { 3972 mutex_exit(nxgep->genlock); 3973 return (ENOSPC); 3974 } 3975 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3976 maddr->mma_addrlen)) { 3977 mutex_exit(nxgep->genlock); 3978 return (EINVAL); 3979 } 3980 /* 3981 * Search for the first available slot. Because naddrfree 3982 * is not zero, we are guaranteed to find one. 3983 * Slot 0 is for unique (primary) MAC. The first alternate 3984 * MAC slot is slot 1. 3985 * Each of the first two ports of Neptune has 16 alternate 3986 * MAC slots but only the first 7 (of 15) slots have assigned factory 3987 * MAC addresses. We first search among the slots without bundled 3988 * factory MACs. If we fail to find one in that range, then we 3989 * search the slots with bundled factory MACs. A factory MAC 3990 * will be wasted while the slot is used with a user MAC address. 3991 * But the slot could be used by factory MAC again after calling 3992 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 3993 */ 3994 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 3995 for (slot = mmac_info->num_factory_mmac + 1; 3996 slot <= mmac_info->num_mmac; slot++) { 3997 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3998 break; 3999 } 4000 if (slot > mmac_info->num_mmac) { 4001 for (slot = 1; slot <= mmac_info->num_factory_mmac; 4002 slot++) { 4003 if (!(mmac_info->mac_pool[slot].flags 4004 & MMAC_SLOT_USED)) 4005 break; 4006 } 4007 } 4008 } else { 4009 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 4010 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4011 break; 4012 } 4013 } 4014 ASSERT(slot <= mmac_info->num_mmac); 4015 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 4016 mutex_exit(nxgep->genlock); 4017 return (err); 4018 } 4019 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4020 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4021 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4022 mmac_info->naddrfree--; 4023 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4024 4025 maddr->mma_slot = slot; 4026 4027 mutex_exit(nxgep->genlock); 4028 return (0); 4029 } 4030 4031 /* 4032 * This function reserves an unused slot and programs the slot and the HW 4033 * with a factory mac address. 4034 */ 4035 static int 4036 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 4037 { 4038 p_nxge_t nxgep = arg; 4039 mac_addr_slot_t slot; 4040 nxge_mmac_t *mmac_info; 4041 int err; 4042 nxge_status_t status; 4043 4044 mutex_enter(nxgep->genlock); 4045 4046 /* 4047 * Make sure that nxge is initialized, if _start() has 4048 * not been called. 4049 */ 4050 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4051 status = nxge_init(nxgep); 4052 if (status != NXGE_OK) { 4053 mutex_exit(nxgep->genlock); 4054 return (ENXIO); 4055 } 4056 } 4057 4058 mmac_info = &nxgep->nxge_mmac_info; 4059 if (mmac_info->naddrfree == 0) { 4060 mutex_exit(nxgep->genlock); 4061 return (ENOSPC); 4062 } 4063 4064 slot = maddr->mma_slot; 4065 if (slot == -1) { /* -1: Take the first available slot */ 4066 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 4067 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4068 break; 4069 } 4070 if (slot > mmac_info->num_factory_mmac) { 4071 mutex_exit(nxgep->genlock); 4072 return (ENOSPC); 4073 } 4074 } 4075 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 4076 /* 4077 * Do not support factory MAC at a slot greater than 4078 * num_factory_mmac even when there are available factory 4079 * MAC addresses because the alternate MACs are bundled with 4080 * slot[1] through slot[num_factory_mmac] 4081 */ 4082 mutex_exit(nxgep->genlock); 4083 return (EINVAL); 4084 } 4085 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4086 mutex_exit(nxgep->genlock); 4087 return (EBUSY); 4088 } 4089 /* Verify the address to be reserved */ 4090 if (!mac_unicst_verify(nxgep->mach, 4091 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 4092 mutex_exit(nxgep->genlock); 4093 return (EINVAL); 4094 } 4095 if (err = nxge_altmac_set(nxgep, 4096 mmac_info->factory_mac_pool[slot], slot)) { 4097 mutex_exit(nxgep->genlock); 4098 return (err); 4099 } 4100 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 4101 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 4102 mmac_info->naddrfree--; 4103 4104 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 4105 mutex_exit(nxgep->genlock); 4106 4107 /* Pass info back to the caller */ 4108 maddr->mma_slot = slot; 4109 maddr->mma_addrlen = ETHERADDRL; 4110 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 4111 4112 return (0); 4113 } 4114 4115 /* 4116 * Remove the specified mac address and update the HW not to filter 4117 * the mac address anymore. 4118 */ 4119 int 4120 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 4121 { 4122 p_nxge_t nxgep = arg; 4123 nxge_mmac_t *mmac_info; 4124 uint8_t addrn; 4125 uint8_t portn; 4126 int err = 0; 4127 nxge_status_t status; 4128 4129 mutex_enter(nxgep->genlock); 4130 4131 /* 4132 * Make sure that nxge is initialized, if _start() has 4133 * not been called. 4134 */ 4135 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4136 status = nxge_init(nxgep); 4137 if (status != NXGE_OK) { 4138 mutex_exit(nxgep->genlock); 4139 return (ENXIO); 4140 } 4141 } 4142 4143 mmac_info = &nxgep->nxge_mmac_info; 4144 if (slot < 1 || slot > mmac_info->num_mmac) { 4145 mutex_exit(nxgep->genlock); 4146 return (EINVAL); 4147 } 4148 4149 portn = nxgep->mac.portnum; 4150 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4151 addrn = (uint8_t)slot - 1; 4152 else 4153 addrn = (uint8_t)slot; 4154 4155 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4156 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4157 == NPI_SUCCESS) { 4158 mmac_info->naddrfree++; 4159 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4160 /* 4161 * Regardless if the MAC we just stopped filtering 4162 * is a user addr or a facory addr, we must set 4163 * the MMAC_VENDOR_ADDR flag if this slot has an 4164 * associated factory MAC to indicate that a factory 4165 * MAC is available. 4166 */ 4167 if (slot <= mmac_info->num_factory_mmac) { 4168 mmac_info->mac_pool[slot].flags 4169 |= MMAC_VENDOR_ADDR; 4170 } 4171 /* 4172 * Clear mac_pool[slot].addr so that kstat shows 0 4173 * alternate MAC address if the slot is not used. 4174 * (But nxge_m_mmac_get returns the factory MAC even 4175 * when the slot is not used!) 4176 */ 4177 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4178 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4179 } else { 4180 err = EIO; 4181 } 4182 } else { 4183 err = EINVAL; 4184 } 4185 4186 mutex_exit(nxgep->genlock); 4187 return (err); 4188 } 4189 4190 /* 4191 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 4192 */ 4193 static int 4194 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 4195 { 4196 p_nxge_t nxgep = arg; 4197 mac_addr_slot_t slot; 4198 nxge_mmac_t *mmac_info; 4199 int err = 0; 4200 nxge_status_t status; 4201 4202 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 4203 maddr->mma_addrlen)) 4204 return (EINVAL); 4205 4206 slot = maddr->mma_slot; 4207 4208 mutex_enter(nxgep->genlock); 4209 4210 /* 4211 * Make sure that nxge is initialized, if _start() has 4212 * not been called. 4213 */ 4214 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4215 status = nxge_init(nxgep); 4216 if (status != NXGE_OK) { 4217 mutex_exit(nxgep->genlock); 4218 return (ENXIO); 4219 } 4220 } 4221 4222 mmac_info = &nxgep->nxge_mmac_info; 4223 if (slot < 1 || slot > mmac_info->num_mmac) { 4224 mutex_exit(nxgep->genlock); 4225 return (EINVAL); 4226 } 4227 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4228 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 4229 != 0) { 4230 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 4231 ETHERADDRL); 4232 /* 4233 * Assume that the MAC passed down from the caller 4234 * is not a factory MAC address (The user should 4235 * call mmac_remove followed by mmac_reserve if 4236 * he wants to use the factory MAC for this slot). 4237 */ 4238 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4239 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4240 } 4241 } else { 4242 err = EINVAL; 4243 } 4244 mutex_exit(nxgep->genlock); 4245 return (err); 4246 } 4247 4248 /* 4249 * nxge_m_mmac_get() - Get the MAC address and other information 4250 * related to the slot. mma_flags should be set to 0 in the call. 4251 * Note: although kstat shows MAC address as zero when a slot is 4252 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 4253 * to the caller as long as the slot is not using a user MAC address. 4254 * The following table shows the rules, 4255 * 4256 * USED VENDOR mma_addr 4257 * ------------------------------------------------------------ 4258 * (1) Slot uses a user MAC: yes no user MAC 4259 * (2) Slot uses a factory MAC: yes yes factory MAC 4260 * (3) Slot is not used but is 4261 * factory MAC capable: no yes factory MAC 4262 * (4) Slot is not used and is 4263 * not factory MAC capable: no no 0 4264 * ------------------------------------------------------------ 4265 */ 4266 static int 4267 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 4268 { 4269 nxge_t *nxgep = arg; 4270 mac_addr_slot_t slot; 4271 nxge_mmac_t *mmac_info; 4272 nxge_status_t status; 4273 4274 slot = maddr->mma_slot; 4275 4276 mutex_enter(nxgep->genlock); 4277 4278 /* 4279 * Make sure that nxge is initialized, if _start() has 4280 * not been called. 4281 */ 4282 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4283 status = nxge_init(nxgep); 4284 if (status != NXGE_OK) { 4285 mutex_exit(nxgep->genlock); 4286 return (ENXIO); 4287 } 4288 } 4289 4290 mmac_info = &nxgep->nxge_mmac_info; 4291 4292 if (slot < 1 || slot > mmac_info->num_mmac) { 4293 mutex_exit(nxgep->genlock); 4294 return (EINVAL); 4295 } 4296 maddr->mma_flags = 0; 4297 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 4298 maddr->mma_flags |= MMAC_SLOT_USED; 4299 4300 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 4301 maddr->mma_flags |= MMAC_VENDOR_ADDR; 4302 bcopy(mmac_info->factory_mac_pool[slot], 4303 maddr->mma_addr, ETHERADDRL); 4304 maddr->mma_addrlen = ETHERADDRL; 4305 } else { 4306 if (maddr->mma_flags & MMAC_SLOT_USED) { 4307 bcopy(mmac_info->mac_pool[slot].addr, 4308 maddr->mma_addr, ETHERADDRL); 4309 maddr->mma_addrlen = ETHERADDRL; 4310 } else { 4311 bzero(maddr->mma_addr, ETHERADDRL); 4312 maddr->mma_addrlen = 0; 4313 } 4314 } 4315 mutex_exit(nxgep->genlock); 4316 return (0); 4317 } 4318 4319 static boolean_t 4320 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4321 { 4322 nxge_t *nxgep = arg; 4323 uint32_t *txflags = cap_data; 4324 multiaddress_capab_t *mmacp = cap_data; 4325 4326 switch (cap) { 4327 case MAC_CAPAB_HCKSUM: 4328 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4329 "==> nxge_m_getcapab: checksum %d", nxge_cksum_enable)); 4330 if (nxge_cksum_enable) { 4331 *txflags = HCKSUM_INET_PARTIAL; 4332 } 4333 break; 4334 4335 case MAC_CAPAB_POLL: 4336 /* 4337 * There's nothing for us to fill in, simply returning 4338 * B_TRUE stating that we support polling is sufficient. 4339 */ 4340 break; 4341 4342 case MAC_CAPAB_MULTIADDRESS: 4343 mmacp = (multiaddress_capab_t *)cap_data; 4344 mutex_enter(nxgep->genlock); 4345 4346 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 4347 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 4348 mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */ 4349 /* 4350 * maddr_handle is driver's private data, passed back to 4351 * entry point functions as arg. 4352 */ 4353 mmacp->maddr_handle = nxgep; 4354 mmacp->maddr_add = nxge_m_mmac_add; 4355 mmacp->maddr_remove = nxge_m_mmac_remove; 4356 mmacp->maddr_modify = nxge_m_mmac_modify; 4357 mmacp->maddr_get = nxge_m_mmac_get; 4358 mmacp->maddr_reserve = nxge_m_mmac_reserve; 4359 4360 mutex_exit(nxgep->genlock); 4361 break; 4362 4363 case MAC_CAPAB_LSO: { 4364 mac_capab_lso_t *cap_lso = cap_data; 4365 4366 if (nxgep->soft_lso_enable) { 4367 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4368 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4369 nxge_lso_max = NXGE_LSO_MAXLEN; 4370 } 4371 cap_lso->lso_basic_tcp_ipv4.lso_max = nxge_lso_max; 4372 break; 4373 } else { 4374 return (B_FALSE); 4375 } 4376 } 4377 4378 #if defined(sun4v) 4379 case MAC_CAPAB_RINGS: { 4380 mac_capab_rings_t *mrings = (mac_capab_rings_t *)cap_data; 4381 4382 /* 4383 * Only the service domain driver responds to 4384 * this capability request. 4385 */ 4386 if (isLDOMservice(nxgep)) { 4387 mrings->mr_handle = (void *)nxgep; 4388 4389 /* 4390 * No dynamic allocation of groups and 4391 * rings at this time. Shares dictate the 4392 * configurartion. 4393 */ 4394 mrings->mr_gadd_ring = NULL; 4395 mrings->mr_grem_ring = NULL; 4396 mrings->mr_rget = NULL; 4397 mrings->mr_gget = nxge_hio_group_get; 4398 4399 if (mrings->mr_type == MAC_RING_TYPE_RX) { 4400 mrings->mr_rnum = 8; /* XXX */ 4401 mrings->mr_gnum = 6; /* XXX */ 4402 } else { 4403 mrings->mr_rnum = 8; /* XXX */ 4404 mrings->mr_gnum = 0; /* XXX */ 4405 } 4406 } else 4407 return (B_FALSE); 4408 break; 4409 } 4410 4411 case MAC_CAPAB_SHARES: { 4412 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4413 4414 /* 4415 * Only the service domain driver responds to 4416 * this capability request. 4417 */ 4418 if (isLDOMservice(nxgep)) { 4419 mshares->ms_snum = 3; 4420 mshares->ms_handle = (void *)nxgep; 4421 mshares->ms_salloc = nxge_hio_share_alloc; 4422 mshares->ms_sfree = nxge_hio_share_free; 4423 mshares->ms_sadd = NULL; 4424 mshares->ms_sremove = NULL; 4425 mshares->ms_squery = nxge_hio_share_query; 4426 } else 4427 return (B_FALSE); 4428 break; 4429 } 4430 #endif 4431 default: 4432 return (B_FALSE); 4433 } 4434 return (B_TRUE); 4435 } 4436 4437 static boolean_t 4438 nxge_param_locked(mac_prop_id_t pr_num) 4439 { 4440 /* 4441 * All adv_* parameters are locked (read-only) while 4442 * the device is in any sort of loopback mode ... 4443 */ 4444 switch (pr_num) { 4445 case DLD_PROP_ADV_1000FDX_CAP: 4446 case DLD_PROP_EN_1000FDX_CAP: 4447 case DLD_PROP_ADV_1000HDX_CAP: 4448 case DLD_PROP_EN_1000HDX_CAP: 4449 case DLD_PROP_ADV_100FDX_CAP: 4450 case DLD_PROP_EN_100FDX_CAP: 4451 case DLD_PROP_ADV_100HDX_CAP: 4452 case DLD_PROP_EN_100HDX_CAP: 4453 case DLD_PROP_ADV_10FDX_CAP: 4454 case DLD_PROP_EN_10FDX_CAP: 4455 case DLD_PROP_ADV_10HDX_CAP: 4456 case DLD_PROP_EN_10HDX_CAP: 4457 case DLD_PROP_AUTONEG: 4458 case DLD_PROP_FLOWCTRL: 4459 return (B_TRUE); 4460 } 4461 return (B_FALSE); 4462 } 4463 4464 /* 4465 * callback functions for set/get of properties 4466 */ 4467 static int 4468 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4469 uint_t pr_valsize, const void *pr_val) 4470 { 4471 nxge_t *nxgep = barg; 4472 p_nxge_param_t param_arr; 4473 p_nxge_stats_t statsp; 4474 int err = 0; 4475 uint8_t val; 4476 uint32_t cur_mtu, new_mtu, old_framesize; 4477 link_flowctrl_t fl; 4478 4479 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4480 param_arr = nxgep->param_arr; 4481 statsp = nxgep->statsp; 4482 mutex_enter(nxgep->genlock); 4483 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4484 nxge_param_locked(pr_num)) { 4485 /* 4486 * All adv_* parameters are locked (read-only) 4487 * while the device is in any sort of loopback mode. 4488 */ 4489 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4490 "==> nxge_m_setprop: loopback mode: read only")); 4491 mutex_exit(nxgep->genlock); 4492 return (EBUSY); 4493 } 4494 4495 val = *(uint8_t *)pr_val; 4496 switch (pr_num) { 4497 case DLD_PROP_EN_1000FDX_CAP: 4498 nxgep->param_en_1000fdx = val; 4499 param_arr[param_anar_1000fdx].value = val; 4500 4501 goto reprogram; 4502 4503 case DLD_PROP_EN_100FDX_CAP: 4504 nxgep->param_en_100fdx = val; 4505 param_arr[param_anar_100fdx].value = val; 4506 4507 goto reprogram; 4508 4509 case DLD_PROP_EN_10FDX_CAP: 4510 nxgep->param_en_10fdx = val; 4511 param_arr[param_anar_10fdx].value = val; 4512 4513 goto reprogram; 4514 4515 case DLD_PROP_EN_1000HDX_CAP: 4516 case DLD_PROP_EN_100HDX_CAP: 4517 case DLD_PROP_EN_10HDX_CAP: 4518 case DLD_PROP_ADV_1000FDX_CAP: 4519 case DLD_PROP_ADV_1000HDX_CAP: 4520 case DLD_PROP_ADV_100FDX_CAP: 4521 case DLD_PROP_ADV_100HDX_CAP: 4522 case DLD_PROP_ADV_10FDX_CAP: 4523 case DLD_PROP_ADV_10HDX_CAP: 4524 case DLD_PROP_STATUS: 4525 case DLD_PROP_SPEED: 4526 case DLD_PROP_DUPLEX: 4527 err = EINVAL; /* cannot set read-only properties */ 4528 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4529 "==> nxge_m_setprop: read only property %d", 4530 pr_num)); 4531 break; 4532 4533 case DLD_PROP_AUTONEG: 4534 param_arr[param_autoneg].value = val; 4535 4536 goto reprogram; 4537 4538 case DLD_PROP_DEFMTU: 4539 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4540 err = EBUSY; 4541 break; 4542 } 4543 4544 cur_mtu = nxgep->mac.default_mtu; 4545 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4546 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4547 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4548 new_mtu, nxgep->mac.is_jumbo)); 4549 4550 if (new_mtu == cur_mtu) { 4551 err = 0; 4552 break; 4553 } 4554 if (new_mtu < NXGE_DEFAULT_MTU || 4555 new_mtu > NXGE_MAXIMUM_MTU) { 4556 err = EINVAL; 4557 break; 4558 } 4559 4560 if ((new_mtu > NXGE_DEFAULT_MTU) && 4561 !nxgep->mac.is_jumbo) { 4562 err = EINVAL; 4563 break; 4564 } 4565 4566 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4567 nxgep->mac.maxframesize = (uint16_t) 4568 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4569 if (nxge_mac_set_framesize(nxgep)) { 4570 nxgep->mac.maxframesize = 4571 (uint16_t)old_framesize; 4572 err = EINVAL; 4573 break; 4574 } 4575 4576 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4577 if (err) { 4578 nxgep->mac.maxframesize = 4579 (uint16_t)old_framesize; 4580 err = EINVAL; 4581 break; 4582 } 4583 4584 nxgep->mac.default_mtu = new_mtu; 4585 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4586 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4587 new_mtu, nxgep->mac.maxframesize)); 4588 break; 4589 4590 case DLD_PROP_FLOWCTRL: 4591 bcopy(pr_val, &fl, sizeof (fl)); 4592 switch (fl) { 4593 default: 4594 err = EINVAL; 4595 break; 4596 4597 case LINK_FLOWCTRL_NONE: 4598 param_arr[param_anar_pause].value = 0; 4599 break; 4600 4601 case LINK_FLOWCTRL_RX: 4602 param_arr[param_anar_pause].value = 1; 4603 break; 4604 4605 case LINK_FLOWCTRL_TX: 4606 case LINK_FLOWCTRL_BI: 4607 err = EINVAL; 4608 break; 4609 } 4610 4611 reprogram: 4612 if (err == 0) { 4613 if (!nxge_param_link_update(nxgep)) { 4614 err = EINVAL; 4615 } 4616 } 4617 break; 4618 4619 default: 4620 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4621 "==> nxge_m_setprop: private property")); 4622 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4623 pr_val); 4624 break; 4625 } 4626 4627 mutex_exit(nxgep->genlock); 4628 4629 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4630 "<== nxge_m_setprop (return %d)", err)); 4631 return (err); 4632 } 4633 4634 static int 4635 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4636 uint_t pr_valsize, void *pr_val) 4637 { 4638 nxge_t *nxgep = barg; 4639 p_nxge_param_t param_arr = nxgep->param_arr; 4640 p_nxge_stats_t statsp = nxgep->statsp; 4641 int err = 0; 4642 link_flowctrl_t fl; 4643 uint64_t tmp = 0; 4644 4645 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4646 "==> nxge_m_getprop: pr_num %d", pr_num)); 4647 bzero(pr_val, pr_valsize); 4648 switch (pr_num) { 4649 case DLD_PROP_DUPLEX: 4650 if (pr_valsize < sizeof (uint8_t)) 4651 return (EINVAL); 4652 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4653 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4654 "==> nxge_m_getprop: duplex mode %d", 4655 *(uint8_t *)pr_val)); 4656 break; 4657 4658 case DLD_PROP_SPEED: 4659 if (pr_valsize < sizeof (uint64_t)) 4660 return (EINVAL); 4661 tmp = statsp->mac_stats.link_speed * 1000000ull; 4662 bcopy(&tmp, pr_val, sizeof (tmp)); 4663 break; 4664 4665 case DLD_PROP_STATUS: 4666 if (pr_valsize < sizeof (uint8_t)) 4667 return (EINVAL); 4668 *(uint8_t *)pr_val = statsp->mac_stats.link_up; 4669 break; 4670 4671 case DLD_PROP_AUTONEG: 4672 if (pr_valsize < sizeof (uint8_t)) 4673 return (EINVAL); 4674 *(uint8_t *)pr_val = 4675 param_arr[param_autoneg].value; 4676 break; 4677 4678 4679 case DLD_PROP_DEFMTU: { 4680 if (pr_valsize < sizeof (uint64_t)) 4681 return (EINVAL); 4682 tmp = nxgep->mac.default_mtu; 4683 bcopy(&tmp, pr_val, sizeof (tmp)); 4684 break; 4685 } 4686 4687 case DLD_PROP_FLOWCTRL: 4688 if (pr_valsize < sizeof (link_flowctrl_t)) 4689 return (EINVAL); 4690 4691 fl = LINK_FLOWCTRL_NONE; 4692 if (param_arr[param_anar_pause].value) { 4693 fl = LINK_FLOWCTRL_RX; 4694 } 4695 bcopy(&fl, pr_val, sizeof (fl)); 4696 break; 4697 4698 case DLD_PROP_ADV_1000FDX_CAP: 4699 if (pr_valsize < sizeof (uint8_t)) 4700 return (EINVAL); 4701 *(uint8_t *)pr_val = 4702 param_arr[param_anar_1000fdx].value; 4703 break; 4704 4705 case DLD_PROP_EN_1000FDX_CAP: 4706 if (pr_valsize < sizeof (uint8_t)) 4707 return (EINVAL); 4708 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4709 break; 4710 4711 case DLD_PROP_ADV_100FDX_CAP: 4712 if (pr_valsize < sizeof (uint8_t)) 4713 return (EINVAL); 4714 *(uint8_t *)pr_val = 4715 param_arr[param_anar_100fdx].value; 4716 break; 4717 4718 case DLD_PROP_EN_100FDX_CAP: 4719 if (pr_valsize < sizeof (uint8_t)) 4720 return (EINVAL); 4721 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4722 break; 4723 4724 case DLD_PROP_ADV_10FDX_CAP: 4725 if (pr_valsize < sizeof (uint8_t)) 4726 return (EINVAL); 4727 *(uint8_t *)pr_val = 4728 param_arr[param_anar_10fdx].value; 4729 break; 4730 4731 case DLD_PROP_EN_10FDX_CAP: 4732 if (pr_valsize < sizeof (uint8_t)) 4733 return (EINVAL); 4734 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4735 break; 4736 4737 case DLD_PROP_EN_1000HDX_CAP: 4738 case DLD_PROP_EN_100HDX_CAP: 4739 case DLD_PROP_EN_10HDX_CAP: 4740 case DLD_PROP_ADV_1000HDX_CAP: 4741 case DLD_PROP_ADV_100HDX_CAP: 4742 case DLD_PROP_ADV_10HDX_CAP: 4743 err = EINVAL; 4744 break; 4745 4746 default: 4747 err = nxge_get_priv_prop(nxgep, pr_name, pr_valsize, 4748 pr_val); 4749 } 4750 4751 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4752 4753 return (err); 4754 } 4755 4756 /* ARGSUSED */ 4757 static int 4758 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4759 const void *pr_val) 4760 { 4761 p_nxge_param_t param_arr = nxgep->param_arr; 4762 int err = 0; 4763 long result; 4764 4765 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4766 "==> nxge_set_priv_prop: name %s", pr_name)); 4767 4768 if (strcmp(pr_name, "_accept_jumbo") == 0) { 4769 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4770 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4771 "<== nxge_set_priv_prop: name %s " 4772 "pr_val %s result %d " 4773 "param %d is_jumbo %d", 4774 pr_name, pr_val, result, 4775 param_arr[param_accept_jumbo].value, 4776 nxgep->mac.is_jumbo)); 4777 4778 if (result > 1 || result < 0) { 4779 err = EINVAL; 4780 } else { 4781 if (nxgep->mac.is_jumbo == 4782 (uint32_t)result) { 4783 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4784 "no change (%d %d)", 4785 nxgep->mac.is_jumbo, 4786 result)); 4787 return (0); 4788 } 4789 } 4790 4791 param_arr[param_accept_jumbo].value = result; 4792 nxgep->mac.is_jumbo = B_FALSE; 4793 if (result) { 4794 nxgep->mac.is_jumbo = B_TRUE; 4795 } 4796 4797 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4798 "<== nxge_set_priv_prop: name %s (value %d) is_jumbo %d", 4799 pr_name, result, nxgep->mac.is_jumbo)); 4800 4801 return (err); 4802 } 4803 4804 /* Blanking */ 4805 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4806 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4807 (char *)pr_val, 4808 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4809 if (err) { 4810 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4811 "<== nxge_set_priv_prop: " 4812 "unable to set (%s)", pr_name)); 4813 err = EINVAL; 4814 } else { 4815 err = 0; 4816 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4817 "<== nxge_set_priv_prop: " 4818 "set (%s)", pr_name)); 4819 } 4820 4821 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4822 "<== nxge_set_priv_prop: name %s (value %d)", 4823 pr_name, result)); 4824 4825 return (err); 4826 } 4827 4828 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4829 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4830 (char *)pr_val, 4831 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4832 if (err) { 4833 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4834 "<== nxge_set_priv_prop: " 4835 "unable to set (%s)", pr_name)); 4836 err = EINVAL; 4837 } else { 4838 err = 0; 4839 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4840 "<== nxge_set_priv_prop: " 4841 "set (%s)", pr_name)); 4842 } 4843 4844 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4845 "<== nxge_set_priv_prop: name %s (value %d)", 4846 pr_name, result)); 4847 4848 return (err); 4849 } 4850 4851 /* Classification */ 4852 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4853 if (pr_val == NULL) { 4854 err = EINVAL; 4855 return (err); 4856 } 4857 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4858 4859 err = nxge_param_set_ip_opt(nxgep, NULL, 4860 NULL, (char *)pr_val, 4861 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4862 4863 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4864 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4865 pr_name, result)); 4866 4867 return (err); 4868 } 4869 4870 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 4871 if (pr_val == NULL) { 4872 err = EINVAL; 4873 return (err); 4874 } 4875 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4876 4877 err = nxge_param_set_ip_opt(nxgep, NULL, 4878 NULL, (char *)pr_val, 4879 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 4880 4881 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4882 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4883 pr_name, result)); 4884 4885 return (err); 4886 } 4887 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 4888 if (pr_val == NULL) { 4889 err = EINVAL; 4890 return (err); 4891 } 4892 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4893 4894 err = nxge_param_set_ip_opt(nxgep, NULL, 4895 NULL, (char *)pr_val, 4896 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 4897 4898 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4899 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4900 pr_name, result)); 4901 4902 return (err); 4903 } 4904 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 4905 if (pr_val == NULL) { 4906 err = EINVAL; 4907 return (err); 4908 } 4909 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4910 4911 err = nxge_param_set_ip_opt(nxgep, NULL, 4912 NULL, (char *)pr_val, 4913 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 4914 4915 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4916 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4917 pr_name, result)); 4918 4919 return (err); 4920 } 4921 4922 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 4923 if (pr_val == NULL) { 4924 err = EINVAL; 4925 return (err); 4926 } 4927 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4928 4929 err = nxge_param_set_ip_opt(nxgep, NULL, 4930 NULL, (char *)pr_val, 4931 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 4932 4933 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4934 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4935 pr_name, result)); 4936 4937 return (err); 4938 } 4939 4940 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 4941 if (pr_val == NULL) { 4942 err = EINVAL; 4943 return (err); 4944 } 4945 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4946 4947 err = nxge_param_set_ip_opt(nxgep, NULL, 4948 NULL, (char *)pr_val, 4949 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 4950 4951 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4952 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4953 pr_name, result)); 4954 4955 return (err); 4956 } 4957 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 4958 if (pr_val == NULL) { 4959 err = EINVAL; 4960 return (err); 4961 } 4962 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4963 4964 err = nxge_param_set_ip_opt(nxgep, NULL, 4965 NULL, (char *)pr_val, 4966 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 4967 4968 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4969 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4970 pr_name, result)); 4971 4972 return (err); 4973 } 4974 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 4975 if (pr_val == NULL) { 4976 err = EINVAL; 4977 return (err); 4978 } 4979 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4980 4981 err = nxge_param_set_ip_opt(nxgep, NULL, 4982 NULL, (char *)pr_val, 4983 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 4984 4985 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4986 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4987 pr_name, result)); 4988 4989 return (err); 4990 } 4991 4992 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 4993 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4994 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4995 "==> nxge_set_priv_prop: name %s (busy)", pr_name)); 4996 err = EBUSY; 4997 return (err); 4998 } 4999 if (pr_val == NULL) { 5000 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5001 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 5002 err = EINVAL; 5003 return (err); 5004 } 5005 5006 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5007 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5008 "<== nxge_set_priv_prop: name %s " 5009 "(lso %d pr_val %s value %d)", 5010 pr_name, nxgep->soft_lso_enable, pr_val, result)); 5011 5012 if (result > 1 || result < 0) { 5013 err = EINVAL; 5014 } else { 5015 if (nxgep->soft_lso_enable == (uint32_t)result) { 5016 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5017 "no change (%d %d)", 5018 nxgep->soft_lso_enable, result)); 5019 return (0); 5020 } 5021 } 5022 5023 nxgep->soft_lso_enable = (int)result; 5024 5025 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5026 "<== nxge_set_priv_prop: name %s (value %d)", 5027 pr_name, result)); 5028 5029 return (err); 5030 } 5031 5032 return (EINVAL); 5033 } 5034 5035 static int 5036 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 5037 void *pr_val) 5038 { 5039 p_nxge_param_t param_arr = nxgep->param_arr; 5040 char valstr[MAXNAMELEN]; 5041 int err = EINVAL; 5042 uint_t strsize; 5043 5044 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5045 "==> nxge_get_priv_prop: property %s", pr_name)); 5046 5047 /* function number */ 5048 if (strcmp(pr_name, "_function_number") == 0) { 5049 (void) sprintf(valstr, "%d", nxgep->function_num); 5050 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5051 "==> nxge_get_priv_prop: name %s " 5052 "(value %d valstr %s)", 5053 pr_name, nxgep->function_num, valstr)); 5054 5055 err = 0; 5056 goto done; 5057 } 5058 5059 /* Neptune firmware version */ 5060 if (strcmp(pr_name, "_fw_version") == 0) { 5061 (void) sprintf(valstr, "%s", nxgep->vpd_info.ver); 5062 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5063 "==> nxge_get_priv_prop: name %s " 5064 "(value %d valstr %s)", 5065 pr_name, nxgep->vpd_info.ver, valstr)); 5066 5067 err = 0; 5068 goto done; 5069 } 5070 5071 /* port PHY mode */ 5072 if (strcmp(pr_name, "_port_mode") == 0) { 5073 switch (nxgep->mac.portmode) { 5074 case PORT_1G_COPPER: 5075 (void) sprintf(valstr, "1G copper %s", 5076 nxgep->hot_swappable_phy ? 5077 "[Hot Swappable]" : ""); 5078 break; 5079 case PORT_1G_FIBER: 5080 (void) sprintf(valstr, "1G fiber %s", 5081 nxgep->hot_swappable_phy ? 5082 "[hot swappable]" : ""); 5083 break; 5084 case PORT_10G_COPPER: 5085 (void) sprintf(valstr, "10G copper %s", 5086 nxgep->hot_swappable_phy ? 5087 "[hot swappable]" : ""); 5088 break; 5089 case PORT_10G_FIBER: 5090 (void) sprintf(valstr, "10G fiber %s", 5091 nxgep->hot_swappable_phy ? 5092 "[hot swappable]" : ""); 5093 break; 5094 case PORT_10G_SERDES: 5095 (void) sprintf(valstr, "10G serdes %s", 5096 nxgep->hot_swappable_phy ? 5097 "[hot swappable]" : ""); 5098 break; 5099 case PORT_1G_SERDES: 5100 (void) sprintf(valstr, "1G serdes %s", 5101 nxgep->hot_swappable_phy ? 5102 "[hot swappable]" : ""); 5103 break; 5104 case PORT_1G_RGMII_FIBER: 5105 (void) sprintf(valstr, "1G rgmii fiber %s", 5106 nxgep->hot_swappable_phy ? 5107 "[hot swappable]" : ""); 5108 break; 5109 case PORT_HSP_MODE: 5110 (void) sprintf(valstr, 5111 "phy not present[hot swappable]"); 5112 break; 5113 default: 5114 (void) sprintf(valstr, "unknown %s", 5115 nxgep->hot_swappable_phy ? 5116 "[hot swappable]" : ""); 5117 break; 5118 } 5119 5120 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5121 "==> nxge_get_priv_prop: name %s (value %s)", 5122 pr_name, valstr)); 5123 5124 err = 0; 5125 goto done; 5126 } 5127 5128 /* Hot swappable PHY */ 5129 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5130 (void) sprintf(valstr, "%s", 5131 nxgep->hot_swappable_phy ? 5132 "yes" : "no"); 5133 5134 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5135 "==> nxge_get_priv_prop: name %s " 5136 "(value %d valstr %s)", 5137 pr_name, nxgep->hot_swappable_phy, valstr)); 5138 5139 err = 0; 5140 goto done; 5141 } 5142 5143 5144 /* accept jumbo */ 5145 if (strcmp(pr_name, "_accept_jumbo") == 0) { 5146 (void) sprintf(valstr, "%d", nxgep->mac.is_jumbo); 5147 err = 0; 5148 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5149 "==> nxge_get_priv_prop: name %s (value %d (%d, %d))", 5150 pr_name, 5151 (uint32_t)param_arr[param_accept_jumbo].value, 5152 nxgep->mac.is_jumbo, 5153 nxge_jumbo_enable)); 5154 5155 goto done; 5156 } 5157 5158 /* Receive Interrupt Blanking Parameters */ 5159 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5160 (void) sprintf(valstr, "%d", nxgep->intr_timeout); 5161 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5162 "==> nxge_get_priv_prop: name %s (value %d)", 5163 pr_name, 5164 (uint32_t)nxgep->intr_timeout)); 5165 err = 0; 5166 goto done; 5167 } 5168 5169 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5170 (void) sprintf(valstr, "%d", nxgep->intr_threshold); 5171 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5172 "==> nxge_get_priv_prop: name %s (value %d)", 5173 pr_name, (uint32_t)nxgep->intr_threshold)); 5174 5175 err = 0; 5176 goto done; 5177 } 5178 5179 /* Classification and Load Distribution Configuration */ 5180 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5181 err = nxge_dld_get_ip_opt(nxgep, 5182 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5183 5184 (void) sprintf(valstr, "%x", 5185 (int)param_arr[param_class_opt_ipv4_tcp].value); 5186 5187 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5188 "==> nxge_get_priv_prop: %s", valstr)); 5189 goto done; 5190 } 5191 5192 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5193 err = nxge_dld_get_ip_opt(nxgep, 5194 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5195 5196 (void) sprintf(valstr, "%x", 5197 (int)param_arr[param_class_opt_ipv4_udp].value); 5198 5199 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5200 "==> nxge_get_priv_prop: %s", valstr)); 5201 goto done; 5202 } 5203 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5204 err = nxge_dld_get_ip_opt(nxgep, 5205 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5206 5207 (void) sprintf(valstr, "%x", 5208 (int)param_arr[param_class_opt_ipv4_ah].value); 5209 5210 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5211 "==> nxge_get_priv_prop: %s", valstr)); 5212 goto done; 5213 } 5214 5215 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5216 err = nxge_dld_get_ip_opt(nxgep, 5217 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5218 5219 (void) printf(valstr, "%x", 5220 (int)param_arr[param_class_opt_ipv4_sctp].value); 5221 5222 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5223 "==> nxge_get_priv_prop: %s", valstr)); 5224 goto done; 5225 } 5226 5227 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5228 err = nxge_dld_get_ip_opt(nxgep, 5229 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5230 5231 (void) sprintf(valstr, "%x", 5232 (int)param_arr[param_class_opt_ipv6_tcp].value); 5233 5234 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5235 "==> nxge_get_priv_prop: %s", valstr)); 5236 err = 0; 5237 goto done; 5238 } 5239 5240 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5241 err = nxge_dld_get_ip_opt(nxgep, 5242 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5243 5244 (void) sprintf(valstr, "%x", 5245 (int)param_arr[param_class_opt_ipv6_udp].value); 5246 5247 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5248 "==> nxge_get_priv_prop: %s", valstr)); 5249 goto done; 5250 } 5251 5252 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5253 err = nxge_dld_get_ip_opt(nxgep, 5254 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5255 5256 (void) sprintf(valstr, "%x", 5257 (int)param_arr[param_class_opt_ipv6_ah].value); 5258 5259 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5260 "==> nxge_get_priv_prop: %s", valstr)); 5261 goto done; 5262 } 5263 5264 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5265 err = nxge_dld_get_ip_opt(nxgep, 5266 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5267 5268 (void) sprintf(valstr, "%x", 5269 (int)param_arr[param_class_opt_ipv6_sctp].value); 5270 5271 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5272 "==> nxge_get_priv_prop: %s", valstr)); 5273 goto done; 5274 } 5275 5276 /* Software LSO */ 5277 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5278 (void) sprintf(valstr, "%d", nxgep->soft_lso_enable); 5279 err = 0; 5280 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5281 "==> nxge_get_priv_prop: name %s (value %d)", 5282 pr_name, nxgep->soft_lso_enable)); 5283 5284 goto done; 5285 } 5286 5287 done: 5288 if (err == 0) { 5289 strsize = (uint_t)strlen(valstr); 5290 if (pr_valsize < strsize) { 5291 err = ENOBUFS; 5292 } else { 5293 (void) strlcpy(pr_val, valstr, pr_valsize); 5294 } 5295 } 5296 5297 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5298 "<== nxge_get_priv_prop: return %d", err)); 5299 return (err); 5300 } 5301 5302 /* 5303 * Module loading and removing entry points. 5304 */ 5305 5306 static struct cb_ops nxge_cb_ops = { 5307 nodev, /* cb_open */ 5308 nodev, /* cb_close */ 5309 nodev, /* cb_strategy */ 5310 nodev, /* cb_print */ 5311 nodev, /* cb_dump */ 5312 nodev, /* cb_read */ 5313 nodev, /* cb_write */ 5314 nodev, /* cb_ioctl */ 5315 nodev, /* cb_devmap */ 5316 nodev, /* cb_mmap */ 5317 nodev, /* cb_segmap */ 5318 nochpoll, /* cb_chpoll */ 5319 ddi_prop_op, /* cb_prop_op */ 5320 NULL, 5321 D_MP, /* cb_flag */ 5322 CB_REV, /* rev */ 5323 nodev, /* int (*cb_aread)() */ 5324 nodev /* int (*cb_awrite)() */ 5325 }; 5326 5327 static struct dev_ops nxge_dev_ops = { 5328 DEVO_REV, /* devo_rev */ 5329 0, /* devo_refcnt */ 5330 nulldev, 5331 nulldev, /* devo_identify */ 5332 nulldev, /* devo_probe */ 5333 nxge_attach, /* devo_attach */ 5334 nxge_detach, /* devo_detach */ 5335 nodev, /* devo_reset */ 5336 &nxge_cb_ops, /* devo_cb_ops */ 5337 (struct bus_ops *)NULL, /* devo_bus_ops */ 5338 ddi_power /* devo_power */ 5339 }; 5340 5341 extern struct mod_ops mod_driverops; 5342 5343 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5344 5345 /* 5346 * Module linkage information for the kernel. 5347 */ 5348 static struct modldrv nxge_modldrv = { 5349 &mod_driverops, 5350 NXGE_DESC_VER, 5351 &nxge_dev_ops 5352 }; 5353 5354 static struct modlinkage modlinkage = { 5355 MODREV_1, (void *) &nxge_modldrv, NULL 5356 }; 5357 5358 int 5359 _init(void) 5360 { 5361 int status; 5362 5363 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5364 mac_init_ops(&nxge_dev_ops, "nxge"); 5365 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5366 if (status != 0) { 5367 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5368 "failed to init device soft state")); 5369 goto _init_exit; 5370 } 5371 status = mod_install(&modlinkage); 5372 if (status != 0) { 5373 ddi_soft_state_fini(&nxge_list); 5374 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5375 goto _init_exit; 5376 } 5377 5378 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5379 5380 _init_exit: 5381 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 5382 5383 return (status); 5384 } 5385 5386 int 5387 _fini(void) 5388 { 5389 int status; 5390 5391 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5392 5393 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5394 5395 if (nxge_mblks_pending) 5396 return (EBUSY); 5397 5398 status = mod_remove(&modlinkage); 5399 if (status != DDI_SUCCESS) { 5400 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5401 "Module removal failed 0x%08x", 5402 status)); 5403 goto _fini_exit; 5404 } 5405 5406 mac_fini_ops(&nxge_dev_ops); 5407 5408 ddi_soft_state_fini(&nxge_list); 5409 5410 MUTEX_DESTROY(&nxge_common_lock); 5411 _fini_exit: 5412 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 5413 5414 return (status); 5415 } 5416 5417 int 5418 _info(struct modinfo *modinfop) 5419 { 5420 int status; 5421 5422 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5423 status = mod_info(&modlinkage, modinfop); 5424 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5425 5426 return (status); 5427 } 5428 5429 /*ARGSUSED*/ 5430 static nxge_status_t 5431 nxge_add_intrs(p_nxge_t nxgep) 5432 { 5433 5434 int intr_types; 5435 int type = 0; 5436 int ddi_status = DDI_SUCCESS; 5437 nxge_status_t status = NXGE_OK; 5438 5439 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5440 5441 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5442 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5443 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5444 nxgep->nxge_intr_type.intr_added = 0; 5445 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5446 nxgep->nxge_intr_type.intr_type = 0; 5447 5448 if (nxgep->niu_type == N2_NIU) { 5449 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5450 } else if (nxge_msi_enable) { 5451 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5452 } 5453 5454 /* Get the supported interrupt types */ 5455 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5456 != DDI_SUCCESS) { 5457 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5458 "ddi_intr_get_supported_types failed: status 0x%08x", 5459 ddi_status)); 5460 return (NXGE_ERROR | NXGE_DDI_FAILED); 5461 } 5462 nxgep->nxge_intr_type.intr_types = intr_types; 5463 5464 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5465 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5466 5467 /* 5468 * Solaris MSIX is not supported yet. use MSI for now. 5469 * nxge_msi_enable (1): 5470 * 1 - MSI 2 - MSI-X others - FIXED 5471 */ 5472 switch (nxge_msi_enable) { 5473 default: 5474 type = DDI_INTR_TYPE_FIXED; 5475 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5476 "use fixed (intx emulation) type %08x", 5477 type)); 5478 break; 5479 5480 case 2: 5481 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5482 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5483 if (intr_types & DDI_INTR_TYPE_MSIX) { 5484 type = DDI_INTR_TYPE_MSIX; 5485 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5486 "ddi_intr_get_supported_types: MSIX 0x%08x", 5487 type)); 5488 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5489 type = DDI_INTR_TYPE_MSI; 5490 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5491 "ddi_intr_get_supported_types: MSI 0x%08x", 5492 type)); 5493 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5494 type = DDI_INTR_TYPE_FIXED; 5495 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5496 "ddi_intr_get_supported_types: MSXED0x%08x", 5497 type)); 5498 } 5499 break; 5500 5501 case 1: 5502 if (intr_types & DDI_INTR_TYPE_MSI) { 5503 type = DDI_INTR_TYPE_MSI; 5504 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5505 "ddi_intr_get_supported_types: MSI 0x%08x", 5506 type)); 5507 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5508 type = DDI_INTR_TYPE_MSIX; 5509 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5510 "ddi_intr_get_supported_types: MSIX 0x%08x", 5511 type)); 5512 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5513 type = DDI_INTR_TYPE_FIXED; 5514 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5515 "ddi_intr_get_supported_types: MSXED0x%08x", 5516 type)); 5517 } 5518 } 5519 5520 nxgep->nxge_intr_type.intr_type = type; 5521 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5522 type == DDI_INTR_TYPE_FIXED) && 5523 nxgep->nxge_intr_type.niu_msi_enable) { 5524 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5525 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5526 " nxge_add_intrs: " 5527 " nxge_add_intrs_adv failed: status 0x%08x", 5528 status)); 5529 return (status); 5530 } else { 5531 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5532 "interrupts registered : type %d", type)); 5533 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5534 5535 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5536 "\nAdded advanced nxge add_intr_adv " 5537 "intr type 0x%x\n", type)); 5538 5539 return (status); 5540 } 5541 } 5542 5543 if (!nxgep->nxge_intr_type.intr_registered) { 5544 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5545 "failed to register interrupts")); 5546 return (NXGE_ERROR | NXGE_DDI_FAILED); 5547 } 5548 5549 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5550 return (status); 5551 } 5552 5553 /*ARGSUSED*/ 5554 static nxge_status_t 5555 nxge_add_soft_intrs(p_nxge_t nxgep) 5556 { 5557 5558 int ddi_status = DDI_SUCCESS; 5559 nxge_status_t status = NXGE_OK; 5560 5561 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 5562 5563 nxgep->resched_id = NULL; 5564 nxgep->resched_running = B_FALSE; 5565 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 5566 &nxgep->resched_id, 5567 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 5568 if (ddi_status != DDI_SUCCESS) { 5569 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 5570 "ddi_add_softintrs failed: status 0x%08x", 5571 ddi_status)); 5572 return (NXGE_ERROR | NXGE_DDI_FAILED); 5573 } 5574 5575 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 5576 5577 return (status); 5578 } 5579 5580 static nxge_status_t 5581 nxge_add_intrs_adv(p_nxge_t nxgep) 5582 { 5583 int intr_type; 5584 p_nxge_intr_t intrp; 5585 5586 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5587 5588 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5589 intr_type = intrp->intr_type; 5590 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5591 intr_type)); 5592 5593 switch (intr_type) { 5594 case DDI_INTR_TYPE_MSI: /* 0x2 */ 5595 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 5596 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 5597 5598 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 5599 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 5600 5601 default: 5602 return (NXGE_ERROR); 5603 } 5604 } 5605 5606 5607 /*ARGSUSED*/ 5608 static nxge_status_t 5609 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 5610 { 5611 dev_info_t *dip = nxgep->dip; 5612 p_nxge_ldg_t ldgp; 5613 p_nxge_intr_t intrp; 5614 uint_t *inthandler; 5615 void *arg1, *arg2; 5616 int behavior; 5617 int nintrs, navail, nrequest; 5618 int nactual, nrequired; 5619 int inum = 0; 5620 int x, y; 5621 int ddi_status = DDI_SUCCESS; 5622 nxge_status_t status = NXGE_OK; 5623 5624 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 5625 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5626 intrp->start_inum = 0; 5627 5628 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5629 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5630 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5631 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5632 "nintrs: %d", ddi_status, nintrs)); 5633 return (NXGE_ERROR | NXGE_DDI_FAILED); 5634 } 5635 5636 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5637 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5638 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5639 "ddi_intr_get_navail() failed, status: 0x%x%, " 5640 "nintrs: %d", ddi_status, navail)); 5641 return (NXGE_ERROR | NXGE_DDI_FAILED); 5642 } 5643 5644 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5645 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 5646 nintrs, navail)); 5647 5648 /* PSARC/2007/453 MSI-X interrupt limit override */ 5649 if (int_type == DDI_INTR_TYPE_MSIX) { 5650 nrequest = nxge_create_msi_property(nxgep); 5651 if (nrequest < navail) { 5652 navail = nrequest; 5653 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5654 "nxge_add_intrs_adv_type: nintrs %d " 5655 "navail %d (nrequest %d)", 5656 nintrs, navail, nrequest)); 5657 } 5658 } 5659 5660 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 5661 /* MSI must be power of 2 */ 5662 if ((navail & 16) == 16) { 5663 navail = 16; 5664 } else if ((navail & 8) == 8) { 5665 navail = 8; 5666 } else if ((navail & 4) == 4) { 5667 navail = 4; 5668 } else if ((navail & 2) == 2) { 5669 navail = 2; 5670 } else { 5671 navail = 1; 5672 } 5673 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5674 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 5675 "navail %d", nintrs, navail)); 5676 } 5677 5678 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5679 DDI_INTR_ALLOC_NORMAL); 5680 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 5681 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 5682 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5683 navail, &nactual, behavior); 5684 if (ddi_status != DDI_SUCCESS || nactual == 0) { 5685 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5686 " ddi_intr_alloc() failed: %d", 5687 ddi_status)); 5688 kmem_free(intrp->htable, intrp->intr_size); 5689 return (NXGE_ERROR | NXGE_DDI_FAILED); 5690 } 5691 5692 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 5693 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 5694 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5695 " ddi_intr_get_pri() failed: %d", 5696 ddi_status)); 5697 /* Free already allocated interrupts */ 5698 for (y = 0; y < nactual; y++) { 5699 (void) ddi_intr_free(intrp->htable[y]); 5700 } 5701 5702 kmem_free(intrp->htable, intrp->intr_size); 5703 return (NXGE_ERROR | NXGE_DDI_FAILED); 5704 } 5705 5706 nrequired = 0; 5707 switch (nxgep->niu_type) { 5708 default: 5709 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 5710 break; 5711 5712 case N2_NIU: 5713 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 5714 break; 5715 } 5716 5717 if (status != NXGE_OK) { 5718 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5719 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 5720 "failed: 0x%x", status)); 5721 /* Free already allocated interrupts */ 5722 for (y = 0; y < nactual; y++) { 5723 (void) ddi_intr_free(intrp->htable[y]); 5724 } 5725 5726 kmem_free(intrp->htable, intrp->intr_size); 5727 return (status); 5728 } 5729 5730 ldgp = nxgep->ldgvp->ldgp; 5731 for (x = 0; x < nrequired; x++, ldgp++) { 5732 ldgp->vector = (uint8_t)x; 5733 ldgp->intdata = SID_DATA(ldgp->func, x); 5734 arg1 = ldgp->ldvp; 5735 arg2 = nxgep; 5736 if (ldgp->nldvs == 1) { 5737 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 5738 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5739 "nxge_add_intrs_adv_type: " 5740 "arg1 0x%x arg2 0x%x: " 5741 "1-1 int handler (entry %d intdata 0x%x)\n", 5742 arg1, arg2, 5743 x, ldgp->intdata)); 5744 } else if (ldgp->nldvs > 1) { 5745 inthandler = (uint_t *)ldgp->sys_intr_handler; 5746 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5747 "nxge_add_intrs_adv_type: " 5748 "arg1 0x%x arg2 0x%x: " 5749 "nldevs %d int handler " 5750 "(entry %d intdata 0x%x)\n", 5751 arg1, arg2, 5752 ldgp->nldvs, x, ldgp->intdata)); 5753 } 5754 5755 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5756 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 5757 "htable 0x%llx", x, intrp->htable[x])); 5758 5759 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 5760 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 5761 != DDI_SUCCESS) { 5762 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5763 "==> nxge_add_intrs_adv_type: failed #%d " 5764 "status 0x%x", x, ddi_status)); 5765 for (y = 0; y < intrp->intr_added; y++) { 5766 (void) ddi_intr_remove_handler( 5767 intrp->htable[y]); 5768 } 5769 /* Free already allocated intr */ 5770 for (y = 0; y < nactual; y++) { 5771 (void) ddi_intr_free(intrp->htable[y]); 5772 } 5773 kmem_free(intrp->htable, intrp->intr_size); 5774 5775 (void) nxge_ldgv_uninit(nxgep); 5776 5777 return (NXGE_ERROR | NXGE_DDI_FAILED); 5778 } 5779 intrp->intr_added++; 5780 } 5781 5782 intrp->msi_intx_cnt = nactual; 5783 5784 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5785 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 5786 navail, nactual, 5787 intrp->msi_intx_cnt, 5788 intrp->intr_added)); 5789 5790 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 5791 5792 (void) nxge_intr_ldgv_init(nxgep); 5793 5794 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 5795 5796 return (status); 5797 } 5798 5799 /*ARGSUSED*/ 5800 static nxge_status_t 5801 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 5802 { 5803 dev_info_t *dip = nxgep->dip; 5804 p_nxge_ldg_t ldgp; 5805 p_nxge_intr_t intrp; 5806 uint_t *inthandler; 5807 void *arg1, *arg2; 5808 int behavior; 5809 int nintrs, navail; 5810 int nactual, nrequired; 5811 int inum = 0; 5812 int x, y; 5813 int ddi_status = DDI_SUCCESS; 5814 nxge_status_t status = NXGE_OK; 5815 5816 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 5817 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5818 intrp->start_inum = 0; 5819 5820 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5821 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5822 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5823 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5824 "nintrs: %d", status, nintrs)); 5825 return (NXGE_ERROR | NXGE_DDI_FAILED); 5826 } 5827 5828 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5829 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5830 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5831 "ddi_intr_get_navail() failed, status: 0x%x%, " 5832 "nintrs: %d", ddi_status, navail)); 5833 return (NXGE_ERROR | NXGE_DDI_FAILED); 5834 } 5835 5836 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5837 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 5838 nintrs, navail)); 5839 5840 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5841 DDI_INTR_ALLOC_NORMAL); 5842 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 5843 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 5844 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5845 navail, &nactual, behavior); 5846 if (ddi_status != DDI_SUCCESS || nactual == 0) { 5847 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5848 " ddi_intr_alloc() failed: %d", 5849 ddi_status)); 5850 kmem_free(intrp->htable, intrp->intr_size); 5851 return (NXGE_ERROR | NXGE_DDI_FAILED); 5852 } 5853 5854 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 5855 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 5856 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5857 " ddi_intr_get_pri() failed: %d", 5858 ddi_status)); 5859 /* Free already allocated interrupts */ 5860 for (y = 0; y < nactual; y++) { 5861 (void) ddi_intr_free(intrp->htable[y]); 5862 } 5863 5864 kmem_free(intrp->htable, intrp->intr_size); 5865 return (NXGE_ERROR | NXGE_DDI_FAILED); 5866 } 5867 5868 nrequired = 0; 5869 switch (nxgep->niu_type) { 5870 default: 5871 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 5872 break; 5873 5874 case N2_NIU: 5875 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 5876 break; 5877 } 5878 5879 if (status != NXGE_OK) { 5880 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5881 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 5882 "failed: 0x%x", status)); 5883 /* Free already allocated interrupts */ 5884 for (y = 0; y < nactual; y++) { 5885 (void) ddi_intr_free(intrp->htable[y]); 5886 } 5887 5888 kmem_free(intrp->htable, intrp->intr_size); 5889 return (status); 5890 } 5891 5892 ldgp = nxgep->ldgvp->ldgp; 5893 for (x = 0; x < nrequired; x++, ldgp++) { 5894 ldgp->vector = (uint8_t)x; 5895 if (nxgep->niu_type != N2_NIU) { 5896 ldgp->intdata = SID_DATA(ldgp->func, x); 5897 } 5898 5899 arg1 = ldgp->ldvp; 5900 arg2 = nxgep; 5901 if (ldgp->nldvs == 1) { 5902 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 5903 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5904 "nxge_add_intrs_adv_type_fix: " 5905 "1-1 int handler(%d) ldg %d ldv %d " 5906 "arg1 $%p arg2 $%p\n", 5907 x, ldgp->ldg, ldgp->ldvp->ldv, 5908 arg1, arg2)); 5909 } else if (ldgp->nldvs > 1) { 5910 inthandler = (uint_t *)ldgp->sys_intr_handler; 5911 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5912 "nxge_add_intrs_adv_type_fix: " 5913 "shared ldv %d int handler(%d) ldv %d ldg %d" 5914 "arg1 0x%016llx arg2 0x%016llx\n", 5915 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 5916 arg1, arg2)); 5917 } 5918 5919 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 5920 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 5921 != DDI_SUCCESS) { 5922 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5923 "==> nxge_add_intrs_adv_type_fix: failed #%d " 5924 "status 0x%x", x, ddi_status)); 5925 for (y = 0; y < intrp->intr_added; y++) { 5926 (void) ddi_intr_remove_handler( 5927 intrp->htable[y]); 5928 } 5929 for (y = 0; y < nactual; y++) { 5930 (void) ddi_intr_free(intrp->htable[y]); 5931 } 5932 /* Free already allocated intr */ 5933 kmem_free(intrp->htable, intrp->intr_size); 5934 5935 (void) nxge_ldgv_uninit(nxgep); 5936 5937 return (NXGE_ERROR | NXGE_DDI_FAILED); 5938 } 5939 intrp->intr_added++; 5940 } 5941 5942 intrp->msi_intx_cnt = nactual; 5943 5944 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 5945 5946 status = nxge_intr_ldgv_init(nxgep); 5947 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 5948 5949 return (status); 5950 } 5951 5952 static void 5953 nxge_remove_intrs(p_nxge_t nxgep) 5954 { 5955 int i, inum; 5956 p_nxge_intr_t intrp; 5957 5958 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 5959 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5960 if (!intrp->intr_registered) { 5961 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5962 "<== nxge_remove_intrs: interrupts not registered")); 5963 return; 5964 } 5965 5966 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 5967 5968 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 5969 (void) ddi_intr_block_disable(intrp->htable, 5970 intrp->intr_added); 5971 } else { 5972 for (i = 0; i < intrp->intr_added; i++) { 5973 (void) ddi_intr_disable(intrp->htable[i]); 5974 } 5975 } 5976 5977 for (inum = 0; inum < intrp->intr_added; inum++) { 5978 if (intrp->htable[inum]) { 5979 (void) ddi_intr_remove_handler(intrp->htable[inum]); 5980 } 5981 } 5982 5983 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 5984 if (intrp->htable[inum]) { 5985 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5986 "nxge_remove_intrs: ddi_intr_free inum %d " 5987 "msi_intx_cnt %d intr_added %d", 5988 inum, 5989 intrp->msi_intx_cnt, 5990 intrp->intr_added)); 5991 5992 (void) ddi_intr_free(intrp->htable[inum]); 5993 } 5994 } 5995 5996 kmem_free(intrp->htable, intrp->intr_size); 5997 intrp->intr_registered = B_FALSE; 5998 intrp->intr_enabled = B_FALSE; 5999 intrp->msi_intx_cnt = 0; 6000 intrp->intr_added = 0; 6001 6002 (void) nxge_ldgv_uninit(nxgep); 6003 6004 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6005 "#msix-request"); 6006 6007 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6008 } 6009 6010 /*ARGSUSED*/ 6011 static void 6012 nxge_remove_soft_intrs(p_nxge_t nxgep) 6013 { 6014 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 6015 if (nxgep->resched_id) { 6016 ddi_remove_softintr(nxgep->resched_id); 6017 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6018 "==> nxge_remove_soft_intrs: removed")); 6019 nxgep->resched_id = NULL; 6020 } 6021 6022 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 6023 } 6024 6025 /*ARGSUSED*/ 6026 static void 6027 nxge_intrs_enable(p_nxge_t nxgep) 6028 { 6029 p_nxge_intr_t intrp; 6030 int i; 6031 int status; 6032 6033 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6034 6035 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6036 6037 if (!intrp->intr_registered) { 6038 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6039 "interrupts are not registered")); 6040 return; 6041 } 6042 6043 if (intrp->intr_enabled) { 6044 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6045 "<== nxge_intrs_enable: already enabled")); 6046 return; 6047 } 6048 6049 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6050 status = ddi_intr_block_enable(intrp->htable, 6051 intrp->intr_added); 6052 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6053 "block enable - status 0x%x total inums #%d\n", 6054 status, intrp->intr_added)); 6055 } else { 6056 for (i = 0; i < intrp->intr_added; i++) { 6057 status = ddi_intr_enable(intrp->htable[i]); 6058 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6059 "ddi_intr_enable:enable - status 0x%x " 6060 "total inums %d enable inum #%d\n", 6061 status, intrp->intr_added, i)); 6062 if (status == DDI_SUCCESS) { 6063 intrp->intr_enabled = B_TRUE; 6064 } 6065 } 6066 } 6067 6068 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6069 } 6070 6071 /*ARGSUSED*/ 6072 static void 6073 nxge_intrs_disable(p_nxge_t nxgep) 6074 { 6075 p_nxge_intr_t intrp; 6076 int i; 6077 6078 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6079 6080 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6081 6082 if (!intrp->intr_registered) { 6083 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6084 "interrupts are not registered")); 6085 return; 6086 } 6087 6088 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6089 (void) ddi_intr_block_disable(intrp->htable, 6090 intrp->intr_added); 6091 } else { 6092 for (i = 0; i < intrp->intr_added; i++) { 6093 (void) ddi_intr_disable(intrp->htable[i]); 6094 } 6095 } 6096 6097 intrp->intr_enabled = B_FALSE; 6098 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6099 } 6100 6101 static nxge_status_t 6102 nxge_mac_register(p_nxge_t nxgep) 6103 { 6104 mac_register_t *macp; 6105 int status; 6106 6107 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6108 6109 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6110 return (NXGE_ERROR); 6111 6112 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6113 macp->m_driver = nxgep; 6114 macp->m_dip = nxgep->dip; 6115 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6116 macp->m_callbacks = &nxge_m_callbacks; 6117 macp->m_min_sdu = 0; 6118 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6119 NXGE_EHEADER_VLAN_CRC; 6120 macp->m_max_sdu = nxgep->mac.default_mtu; 6121 macp->m_margin = VLAN_TAGSZ; 6122 6123 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6124 "==> nxge_mac_register: instance %d " 6125 "max_sdu %d margin %d maxframe %d (header %d)", 6126 nxgep->instance, 6127 macp->m_max_sdu, macp->m_margin, 6128 nxgep->mac.maxframesize, 6129 NXGE_EHEADER_VLAN_CRC)); 6130 6131 status = mac_register(macp, &nxgep->mach); 6132 mac_free(macp); 6133 6134 if (status != 0) { 6135 cmn_err(CE_WARN, 6136 "!nxge_mac_register failed (status %d instance %d)", 6137 status, nxgep->instance); 6138 return (NXGE_ERROR); 6139 } 6140 6141 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6142 "(instance %d)", nxgep->instance)); 6143 6144 return (NXGE_OK); 6145 } 6146 6147 void 6148 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6149 { 6150 ssize_t size; 6151 mblk_t *nmp; 6152 uint8_t blk_id; 6153 uint8_t chan; 6154 uint32_t err_id; 6155 err_inject_t *eip; 6156 6157 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6158 6159 size = 1024; 6160 nmp = mp->b_cont; 6161 eip = (err_inject_t *)nmp->b_rptr; 6162 blk_id = eip->blk_id; 6163 err_id = eip->err_id; 6164 chan = eip->chan; 6165 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6166 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6167 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6168 switch (blk_id) { 6169 case MAC_BLK_ID: 6170 break; 6171 case TXMAC_BLK_ID: 6172 break; 6173 case RXMAC_BLK_ID: 6174 break; 6175 case MIF_BLK_ID: 6176 break; 6177 case IPP_BLK_ID: 6178 nxge_ipp_inject_err(nxgep, err_id); 6179 break; 6180 case TXC_BLK_ID: 6181 nxge_txc_inject_err(nxgep, err_id); 6182 break; 6183 case TXDMA_BLK_ID: 6184 nxge_txdma_inject_err(nxgep, err_id, chan); 6185 break; 6186 case RXDMA_BLK_ID: 6187 nxge_rxdma_inject_err(nxgep, err_id, chan); 6188 break; 6189 case ZCP_BLK_ID: 6190 nxge_zcp_inject_err(nxgep, err_id); 6191 break; 6192 case ESPC_BLK_ID: 6193 break; 6194 case FFLP_BLK_ID: 6195 break; 6196 case PHY_BLK_ID: 6197 break; 6198 case ETHER_SERDES_BLK_ID: 6199 break; 6200 case PCIE_SERDES_BLK_ID: 6201 break; 6202 case VIR_BLK_ID: 6203 break; 6204 } 6205 6206 nmp->b_wptr = nmp->b_rptr + size; 6207 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6208 6209 miocack(wq, mp, (int)size, 0); 6210 } 6211 6212 static int 6213 nxge_init_common_dev(p_nxge_t nxgep) 6214 { 6215 p_nxge_hw_list_t hw_p; 6216 dev_info_t *p_dip; 6217 6218 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6219 6220 p_dip = nxgep->p_dip; 6221 MUTEX_ENTER(&nxge_common_lock); 6222 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6223 "==> nxge_init_common_dev:func # %d", 6224 nxgep->function_num)); 6225 /* 6226 * Loop through existing per neptune hardware list. 6227 */ 6228 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6229 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6230 "==> nxge_init_common_device:func # %d " 6231 "hw_p $%p parent dip $%p", 6232 nxgep->function_num, 6233 hw_p, 6234 p_dip)); 6235 if (hw_p->parent_devp == p_dip) { 6236 nxgep->nxge_hw_p = hw_p; 6237 hw_p->ndevs++; 6238 hw_p->nxge_p[nxgep->function_num] = nxgep; 6239 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6240 "==> nxge_init_common_device:func # %d " 6241 "hw_p $%p parent dip $%p " 6242 "ndevs %d (found)", 6243 nxgep->function_num, 6244 hw_p, 6245 p_dip, 6246 hw_p->ndevs)); 6247 break; 6248 } 6249 } 6250 6251 if (hw_p == NULL) { 6252 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6253 "==> nxge_init_common_device:func # %d " 6254 "parent dip $%p (new)", 6255 nxgep->function_num, 6256 p_dip)); 6257 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6258 hw_p->parent_devp = p_dip; 6259 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6260 nxgep->nxge_hw_p = hw_p; 6261 hw_p->ndevs++; 6262 hw_p->nxge_p[nxgep->function_num] = nxgep; 6263 hw_p->next = nxge_hw_list; 6264 if (nxgep->niu_type == N2_NIU) { 6265 hw_p->niu_type = N2_NIU; 6266 hw_p->platform_type = P_NEPTUNE_NIU; 6267 } else { 6268 hw_p->niu_type = NIU_TYPE_NONE; 6269 hw_p->platform_type = P_NEPTUNE_NONE; 6270 } 6271 6272 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6273 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6274 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6275 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6276 6277 nxge_hw_list = hw_p; 6278 6279 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6280 } 6281 6282 MUTEX_EXIT(&nxge_common_lock); 6283 6284 nxgep->platform_type = hw_p->platform_type; 6285 if (nxgep->niu_type != N2_NIU) { 6286 nxgep->niu_type = hw_p->niu_type; 6287 } 6288 6289 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6290 "==> nxge_init_common_device (nxge_hw_list) $%p", 6291 nxge_hw_list)); 6292 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6293 6294 return (NXGE_OK); 6295 } 6296 6297 static void 6298 nxge_uninit_common_dev(p_nxge_t nxgep) 6299 { 6300 p_nxge_hw_list_t hw_p, h_hw_p; 6301 dev_info_t *p_dip; 6302 6303 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6304 if (nxgep->nxge_hw_p == NULL) { 6305 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6306 "<== nxge_uninit_common_device (no common)")); 6307 return; 6308 } 6309 6310 MUTEX_ENTER(&nxge_common_lock); 6311 h_hw_p = nxge_hw_list; 6312 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6313 p_dip = hw_p->parent_devp; 6314 if (nxgep->nxge_hw_p == hw_p && 6315 p_dip == nxgep->p_dip && 6316 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6317 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6318 6319 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6320 "==> nxge_uninit_common_device:func # %d " 6321 "hw_p $%p parent dip $%p " 6322 "ndevs %d (found)", 6323 nxgep->function_num, 6324 hw_p, 6325 p_dip, 6326 hw_p->ndevs)); 6327 6328 if (hw_p->ndevs) { 6329 hw_p->ndevs--; 6330 } 6331 hw_p->nxge_p[nxgep->function_num] = NULL; 6332 if (!hw_p->ndevs) { 6333 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6334 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6335 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6336 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6337 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6338 "==> nxge_uninit_common_device: " 6339 "func # %d " 6340 "hw_p $%p parent dip $%p " 6341 "ndevs %d (last)", 6342 nxgep->function_num, 6343 hw_p, 6344 p_dip, 6345 hw_p->ndevs)); 6346 6347 nxge_hio_uninit(nxgep); 6348 6349 if (hw_p == nxge_hw_list) { 6350 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6351 "==> nxge_uninit_common_device:" 6352 "remove head func # %d " 6353 "hw_p $%p parent dip $%p " 6354 "ndevs %d (head)", 6355 nxgep->function_num, 6356 hw_p, 6357 p_dip, 6358 hw_p->ndevs)); 6359 nxge_hw_list = hw_p->next; 6360 } else { 6361 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6362 "==> nxge_uninit_common_device:" 6363 "remove middle func # %d " 6364 "hw_p $%p parent dip $%p " 6365 "ndevs %d (middle)", 6366 nxgep->function_num, 6367 hw_p, 6368 p_dip, 6369 hw_p->ndevs)); 6370 h_hw_p->next = hw_p->next; 6371 } 6372 6373 nxgep->nxge_hw_p = NULL; 6374 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6375 } 6376 break; 6377 } else { 6378 h_hw_p = hw_p; 6379 } 6380 } 6381 6382 MUTEX_EXIT(&nxge_common_lock); 6383 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6384 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6385 nxge_hw_list)); 6386 6387 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6388 } 6389 6390 /* 6391 * Determines the number of ports from the niu_type or the platform type. 6392 * Returns the number of ports, or returns zero on failure. 6393 */ 6394 6395 int 6396 nxge_get_nports(p_nxge_t nxgep) 6397 { 6398 int nports = 0; 6399 6400 switch (nxgep->niu_type) { 6401 case N2_NIU: 6402 case NEPTUNE_2_10GF: 6403 nports = 2; 6404 break; 6405 case NEPTUNE_4_1GC: 6406 case NEPTUNE_2_10GF_2_1GC: 6407 case NEPTUNE_1_10GF_3_1GC: 6408 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6409 case NEPTUNE_2_10GF_2_1GRF: 6410 nports = 4; 6411 break; 6412 default: 6413 switch (nxgep->platform_type) { 6414 case P_NEPTUNE_NIU: 6415 case P_NEPTUNE_ATLAS_2PORT: 6416 nports = 2; 6417 break; 6418 case P_NEPTUNE_ATLAS_4PORT: 6419 case P_NEPTUNE_MARAMBA_P0: 6420 case P_NEPTUNE_MARAMBA_P1: 6421 case P_NEPTUNE_ALONSO: 6422 nports = 4; 6423 break; 6424 default: 6425 break; 6426 } 6427 break; 6428 } 6429 6430 return (nports); 6431 } 6432 6433 /* 6434 * The following two functions are to support 6435 * PSARC/2007/453 MSI-X interrupt limit override. 6436 */ 6437 static int 6438 nxge_create_msi_property(p_nxge_t nxgep) 6439 { 6440 int nmsi; 6441 extern int ncpus; 6442 6443 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6444 6445 switch (nxgep->mac.portmode) { 6446 case PORT_10G_COPPER: 6447 case PORT_10G_FIBER: 6448 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6449 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6450 /* 6451 * The maximum MSI-X requested will be 8. 6452 * If the # of CPUs is less than 8, we will reqeust 6453 * # MSI-X based on the # of CPUs. 6454 */ 6455 if (ncpus >= NXGE_MSIX_REQUEST_10G) { 6456 nmsi = NXGE_MSIX_REQUEST_10G; 6457 } else { 6458 nmsi = ncpus; 6459 } 6460 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6461 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6462 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6463 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6464 break; 6465 6466 default: 6467 nmsi = NXGE_MSIX_REQUEST_1G; 6468 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6469 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6470 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6471 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6472 break; 6473 } 6474 6475 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6476 return (nmsi); 6477 } 6478