1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved. 24 */ 25 26 /* 27 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 28 */ 29 #include <sys/nxge/nxge_impl.h> 30 #include <sys/nxge/nxge_hio.h> 31 #include <sys/nxge/nxge_rxdma.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * PSARC/2007/453 MSI-X interrupt limit override 39 */ 40 uint32_t nxge_msi_enable = 2; 41 42 /* 43 * Software workaround for a Neptune (PCI-E) 44 * hardware interrupt bug which the hardware 45 * may generate spurious interrupts after the 46 * device interrupt handler was removed. If this flag 47 * is enabled, the driver will reset the 48 * hardware when devices are being detached. 49 */ 50 uint32_t nxge_peu_reset_enable = 0; 51 52 /* 53 * Software workaround for the hardware 54 * checksum bugs that affect packet transmission 55 * and receive: 56 * 57 * Usage of nxge_cksum_offload: 58 * 59 * (1) nxge_cksum_offload = 0 (default): 60 * - transmits packets: 61 * TCP: uses the hardware checksum feature. 62 * UDP: driver will compute the software checksum 63 * based on the partial checksum computed 64 * by the IP layer. 65 * - receives packets 66 * TCP: marks packets checksum flags based on hardware result. 67 * UDP: will not mark checksum flags. 68 * 69 * (2) nxge_cksum_offload = 1: 70 * - transmit packets: 71 * TCP/UDP: uses the hardware checksum feature. 72 * - receives packets 73 * TCP/UDP: marks packet checksum flags based on hardware result. 74 * 75 * (3) nxge_cksum_offload = 2: 76 * - The driver will not register its checksum capability. 77 * Checksum for both TCP and UDP will be computed 78 * by the stack. 79 * - The software LSO is not allowed in this case. 80 * 81 * (4) nxge_cksum_offload > 2: 82 * - Will be treated as it is set to 2 83 * (stack will compute the checksum). 84 * 85 * (5) If the hardware bug is fixed, this workaround 86 * needs to be updated accordingly to reflect 87 * the new hardware revision. 88 */ 89 uint32_t nxge_cksum_offload = 0; 90 91 /* 92 * Globals: tunable parameters (/etc/system or adb) 93 * 94 */ 95 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 96 uint32_t nxge_rbr_spare_size = 0; 97 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 98 uint16_t nxge_rdc_buf_offset = SW_OFFSET_NO_OFFSET; 99 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 100 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 101 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 102 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 103 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 104 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 105 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 106 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 107 108 /* MAX LSO size */ 109 #define NXGE_LSO_MAXLEN 65535 110 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 111 112 113 /* 114 * Add tunable to reduce the amount of time spent in the 115 * ISR doing Rx Processing. 116 */ 117 uint32_t nxge_max_rx_pkts = 1024; 118 119 /* 120 * Tunables to manage the receive buffer blocks. 121 * 122 * nxge_rx_threshold_hi: copy all buffers. 123 * nxge_rx_bcopy_size_type: receive buffer block size type. 124 * nxge_rx_threshold_lo: copy only up to tunable block size type. 125 */ 126 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 127 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 128 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 129 130 uint32_t nxge_use_kmem_alloc = 1; 131 132 rtrace_t npi_rtracebuf; 133 134 /* 135 * The hardware sometimes fails to allow enough time for the link partner 136 * to send an acknowledgement for packets that the hardware sent to it. The 137 * hardware resends the packets earlier than it should be in those instances. 138 * This behavior caused some switches to acknowledge the wrong packets 139 * and it triggered the fatal error. 140 * This software workaround is to set the replay timer to a value 141 * suggested by the hardware team. 142 * 143 * PCI config space replay timer register: 144 * The following replay timeout value is 0xc 145 * for bit 14:18. 146 */ 147 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8 148 #define PCI_REPLAY_TIMEOUT_SHIFT 14 149 150 uint32_t nxge_set_replay_timer = 1; 151 uint32_t nxge_replay_timeout = 0xc; 152 153 /* 154 * The transmit serialization sometimes causes 155 * longer sleep before calling the driver transmit 156 * function as it sleeps longer than it should. 157 * The performace group suggests that a time wait tunable 158 * can be used to set the maximum wait time when needed 159 * and the default is set to 1 tick. 160 */ 161 uint32_t nxge_tx_serial_maxsleep = 1; 162 163 #if defined(sun4v) 164 /* 165 * Hypervisor N2/NIU services information. 166 */ 167 /* 168 * The following is the default API supported: 169 * major 1 and minor 1. 170 * 171 * Please update the MAX_NIU_MAJORS, 172 * MAX_NIU_MINORS, and minor number supported 173 * when the newer Hypervior API interfaces 174 * are added. Also, please update nxge_hsvc_register() 175 * if needed. 176 */ 177 static hsvc_info_t niu_hsvc = { 178 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 179 NIU_MINOR_VER, "nxge" 180 }; 181 182 static int nxge_hsvc_register(p_nxge_t); 183 #endif 184 185 /* 186 * Function Prototypes 187 */ 188 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 189 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 190 static void nxge_unattach(p_nxge_t); 191 static int nxge_quiesce(dev_info_t *); 192 193 #if NXGE_PROPERTY 194 static void nxge_remove_hard_properties(p_nxge_t); 195 #endif 196 197 /* 198 * These two functions are required by nxge_hio.c 199 */ 200 extern int nxge_m_mmac_remove(void *arg, int slot); 201 extern void nxge_grp_cleanup(p_nxge_t nxge); 202 203 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 204 205 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 206 static void nxge_destroy_mutexes(p_nxge_t); 207 208 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 209 static void nxge_unmap_regs(p_nxge_t nxgep); 210 #ifdef NXGE_DEBUG 211 static void nxge_test_map_regs(p_nxge_t nxgep); 212 #endif 213 214 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 215 static void nxge_remove_intrs(p_nxge_t nxgep); 216 217 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 218 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 219 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 220 static void nxge_intrs_enable(p_nxge_t nxgep); 221 static void nxge_intrs_disable(p_nxge_t nxgep); 222 223 static void nxge_suspend(p_nxge_t); 224 static nxge_status_t nxge_resume(p_nxge_t); 225 226 static nxge_status_t nxge_setup_dev(p_nxge_t); 227 static void nxge_destroy_dev(p_nxge_t); 228 229 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 230 static void nxge_free_mem_pool(p_nxge_t); 231 232 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 233 static void nxge_free_rx_mem_pool(p_nxge_t); 234 235 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 236 static void nxge_free_tx_mem_pool(p_nxge_t); 237 238 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 239 struct ddi_dma_attr *, 240 size_t, ddi_device_acc_attr_t *, uint_t, 241 p_nxge_dma_common_t); 242 243 static void nxge_dma_mem_free(p_nxge_dma_common_t); 244 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 245 246 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 247 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 248 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 249 250 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 251 p_nxge_dma_common_t *, size_t); 252 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 253 254 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 255 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 256 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 257 258 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 259 p_nxge_dma_common_t *, 260 size_t); 261 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 262 263 static int nxge_init_common_dev(p_nxge_t); 264 static void nxge_uninit_common_dev(p_nxge_t); 265 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 266 char *, caddr_t); 267 #if defined(sun4v) 268 extern nxge_status_t nxge_hio_rdc_enable(p_nxge_t nxgep); 269 extern nxge_status_t nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm); 270 #endif 271 272 /* 273 * The next declarations are for the GLDv3 interface. 274 */ 275 static int nxge_m_start(void *); 276 static void nxge_m_stop(void *); 277 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 278 static int nxge_m_promisc(void *, boolean_t); 279 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 280 nxge_status_t nxge_mac_register(p_nxge_t); 281 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 282 int slot, int rdctbl, boolean_t usetbl); 283 void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, 284 boolean_t factory); 285 286 static void nxge_m_getfactaddr(void *, uint_t, uint8_t *); 287 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 288 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 289 uint_t, const void *); 290 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 291 uint_t, void *); 292 static void nxge_m_propinfo(void *, const char *, mac_prop_id_t, 293 mac_prop_info_handle_t); 294 static void nxge_priv_propinfo(const char *, mac_prop_info_handle_t); 295 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 296 const void *); 297 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, void *); 298 static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int, 299 mac_ring_info_t *, mac_ring_handle_t); 300 static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t, 301 mac_ring_type_t); 302 static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t, 303 mac_ring_type_t); 304 305 static void nxge_niu_peu_reset(p_nxge_t nxgep); 306 static void nxge_set_pci_replay_timeout(nxge_t *); 307 308 char *nxge_priv_props[] = { 309 "_adv_10gfdx_cap", 310 "_adv_pause_cap", 311 "_function_number", 312 "_fw_version", 313 "_port_mode", 314 "_hot_swap_phy", 315 "_rxdma_intr_time", 316 "_rxdma_intr_pkts", 317 "_class_opt_ipv4_tcp", 318 "_class_opt_ipv4_udp", 319 "_class_opt_ipv4_ah", 320 "_class_opt_ipv4_sctp", 321 "_class_opt_ipv6_tcp", 322 "_class_opt_ipv6_udp", 323 "_class_opt_ipv6_ah", 324 "_class_opt_ipv6_sctp", 325 "_soft_lso_enable", 326 NULL 327 }; 328 329 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 330 #define MAX_DUMP_SZ 256 331 332 #define NXGE_M_CALLBACK_FLAGS \ 333 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) 334 335 mac_callbacks_t nxge_m_callbacks = { 336 NXGE_M_CALLBACK_FLAGS, 337 nxge_m_stat, 338 nxge_m_start, 339 nxge_m_stop, 340 nxge_m_promisc, 341 nxge_m_multicst, 342 NULL, 343 NULL, 344 NULL, 345 nxge_m_ioctl, 346 nxge_m_getcapab, 347 NULL, 348 NULL, 349 nxge_m_setprop, 350 nxge_m_getprop, 351 nxge_m_propinfo 352 }; 353 354 void 355 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 356 357 /* PSARC/2007/453 MSI-X interrupt limit override. */ 358 #define NXGE_MSIX_REQUEST_10G 8 359 #define NXGE_MSIX_REQUEST_1G 2 360 static int nxge_create_msi_property(p_nxge_t); 361 /* 362 * For applications that care about the 363 * latency, it was requested by PAE and the 364 * customers that the driver has tunables that 365 * allow the user to tune it to a higher number 366 * interrupts to spread the interrupts among 367 * multiple channels. The DDI framework limits 368 * the maximum number of MSI-X resources to allocate 369 * to 8 (ddi_msix_alloc_limit). If more than 8 370 * is set, ddi_msix_alloc_limit must be set accordingly. 371 * The default number of MSI interrupts are set to 372 * 8 for 10G and 2 for 1G link. 373 */ 374 #define NXGE_MSIX_MAX_ALLOWED 32 375 uint32_t nxge_msix_10g_intrs = NXGE_MSIX_REQUEST_10G; 376 uint32_t nxge_msix_1g_intrs = NXGE_MSIX_REQUEST_1G; 377 378 /* 379 * These global variables control the message 380 * output. 381 */ 382 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 383 uint64_t nxge_debug_level; 384 385 /* 386 * This list contains the instance structures for the Neptune 387 * devices present in the system. The lock exists to guarantee 388 * mutually exclusive access to the list. 389 */ 390 void *nxge_list = NULL; 391 void *nxge_hw_list = NULL; 392 nxge_os_mutex_t nxge_common_lock; 393 nxge_os_mutex_t nxgedebuglock; 394 395 extern uint64_t npi_debug_level; 396 397 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 398 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 399 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 400 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 401 extern void nxge_fm_init(p_nxge_t, 402 ddi_device_acc_attr_t *, 403 ddi_dma_attr_t *); 404 extern void nxge_fm_fini(p_nxge_t); 405 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 406 407 /* 408 * Count used to maintain the number of buffers being used 409 * by Neptune instances and loaned up to the upper layers. 410 */ 411 uint32_t nxge_mblks_pending = 0; 412 413 /* 414 * Device register access attributes for PIO. 415 */ 416 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 417 DDI_DEVICE_ATTR_V1, 418 DDI_STRUCTURE_LE_ACC, 419 DDI_STRICTORDER_ACC, 420 DDI_DEFAULT_ACC 421 }; 422 423 /* 424 * Device descriptor access attributes for DMA. 425 */ 426 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 427 DDI_DEVICE_ATTR_V0, 428 DDI_STRUCTURE_LE_ACC, 429 DDI_STRICTORDER_ACC 430 }; 431 432 /* 433 * Device buffer access attributes for DMA. 434 */ 435 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 436 DDI_DEVICE_ATTR_V0, 437 DDI_STRUCTURE_BE_ACC, 438 DDI_STRICTORDER_ACC 439 }; 440 441 ddi_dma_attr_t nxge_desc_dma_attr = { 442 DMA_ATTR_V0, /* version number. */ 443 0, /* low address */ 444 0xffffffffffffffff, /* high address */ 445 0xffffffffffffffff, /* address counter max */ 446 #ifndef NIU_PA_WORKAROUND 447 0x100000, /* alignment */ 448 #else 449 0x2000, 450 #endif 451 0xfc00fc, /* dlim_burstsizes */ 452 0x1, /* minimum transfer size */ 453 0xffffffffffffffff, /* maximum transfer size */ 454 0xffffffffffffffff, /* maximum segment size */ 455 1, /* scatter/gather list length */ 456 (unsigned int) 1, /* granularity */ 457 0 /* attribute flags */ 458 }; 459 460 ddi_dma_attr_t nxge_tx_dma_attr = { 461 DMA_ATTR_V0, /* version number. */ 462 0, /* low address */ 463 0xffffffffffffffff, /* high address */ 464 0xffffffffffffffff, /* address counter max */ 465 #if defined(_BIG_ENDIAN) 466 0x2000, /* alignment */ 467 #else 468 0x1000, /* alignment */ 469 #endif 470 0xfc00fc, /* dlim_burstsizes */ 471 0x1, /* minimum transfer size */ 472 0xffffffffffffffff, /* maximum transfer size */ 473 0xffffffffffffffff, /* maximum segment size */ 474 5, /* scatter/gather list length */ 475 (unsigned int) 1, /* granularity */ 476 0 /* attribute flags */ 477 }; 478 479 ddi_dma_attr_t nxge_rx_dma_attr = { 480 DMA_ATTR_V0, /* version number. */ 481 0, /* low address */ 482 0xffffffffffffffff, /* high address */ 483 0xffffffffffffffff, /* address counter max */ 484 0x2000, /* alignment */ 485 0xfc00fc, /* dlim_burstsizes */ 486 0x1, /* minimum transfer size */ 487 0xffffffffffffffff, /* maximum transfer size */ 488 0xffffffffffffffff, /* maximum segment size */ 489 1, /* scatter/gather list length */ 490 (unsigned int) 1, /* granularity */ 491 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 492 }; 493 494 ddi_dma_lim_t nxge_dma_limits = { 495 (uint_t)0, /* dlim_addr_lo */ 496 (uint_t)0xffffffff, /* dlim_addr_hi */ 497 (uint_t)0xffffffff, /* dlim_cntr_max */ 498 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 499 0x1, /* dlim_minxfer */ 500 1024 /* dlim_speed */ 501 }; 502 503 dma_method_t nxge_force_dma = DVMA; 504 505 /* 506 * dma chunk sizes. 507 * 508 * Try to allocate the largest possible size 509 * so that fewer number of dma chunks would be managed 510 */ 511 #ifdef NIU_PA_WORKAROUND 512 size_t alloc_sizes [] = {0x2000}; 513 #else 514 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 515 0x10000, 0x20000, 0x40000, 0x80000, 516 0x100000, 0x200000, 0x400000, 0x800000, 517 0x1000000, 0x2000000, 0x4000000}; 518 #endif 519 520 /* 521 * Translate "dev_t" to a pointer to the associated "dev_info_t". 522 */ 523 524 extern void nxge_get_environs(nxge_t *); 525 526 static int 527 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 528 { 529 p_nxge_t nxgep = NULL; 530 int instance; 531 int status = DDI_SUCCESS; 532 uint8_t portn; 533 nxge_mmac_t *mmac_info; 534 535 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 536 537 /* 538 * Get the device instance since we'll need to setup 539 * or retrieve a soft state for this instance. 540 */ 541 instance = ddi_get_instance(dip); 542 543 switch (cmd) { 544 case DDI_ATTACH: 545 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 546 break; 547 548 case DDI_RESUME: 549 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 550 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 551 if (nxgep == NULL) { 552 status = DDI_FAILURE; 553 break; 554 } 555 if (nxgep->dip != dip) { 556 status = DDI_FAILURE; 557 break; 558 } 559 if (nxgep->suspended == DDI_PM_SUSPEND) { 560 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 561 } else { 562 status = nxge_resume(nxgep); 563 } 564 goto nxge_attach_exit; 565 566 case DDI_PM_RESUME: 567 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 568 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 569 if (nxgep == NULL) { 570 status = DDI_FAILURE; 571 break; 572 } 573 if (nxgep->dip != dip) { 574 status = DDI_FAILURE; 575 break; 576 } 577 status = nxge_resume(nxgep); 578 goto nxge_attach_exit; 579 580 default: 581 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 582 status = DDI_FAILURE; 583 goto nxge_attach_exit; 584 } 585 586 587 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 588 status = DDI_FAILURE; 589 goto nxge_attach_exit; 590 } 591 592 nxgep = ddi_get_soft_state(nxge_list, instance); 593 if (nxgep == NULL) { 594 status = NXGE_ERROR; 595 goto nxge_attach_fail2; 596 } 597 598 nxgep->nxge_magic = NXGE_MAGIC; 599 600 nxgep->drv_state = 0; 601 nxgep->dip = dip; 602 nxgep->instance = instance; 603 nxgep->p_dip = ddi_get_parent(dip); 604 nxgep->nxge_debug_level = nxge_debug_level; 605 npi_debug_level = nxge_debug_level; 606 607 /* Are we a guest running in a Hybrid I/O environment? */ 608 nxge_get_environs(nxgep); 609 610 status = nxge_map_regs(nxgep); 611 612 if (status != NXGE_OK) { 613 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 614 goto nxge_attach_fail3; 615 } 616 617 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_rx_dma_attr); 618 619 /* Create & initialize the per-Neptune data structure */ 620 /* (even if we're a guest). */ 621 status = nxge_init_common_dev(nxgep); 622 if (status != NXGE_OK) { 623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 624 "nxge_init_common_dev failed")); 625 goto nxge_attach_fail4; 626 } 627 628 /* 629 * Software workaround: set the replay timer. 630 */ 631 if (nxgep->niu_type != N2_NIU) { 632 nxge_set_pci_replay_timeout(nxgep); 633 } 634 635 #if defined(sun4v) 636 /* This is required by nxge_hio_init(), which follows. */ 637 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 638 goto nxge_attach_fail4; 639 #endif 640 641 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 642 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 643 "nxge_hio_init failed")); 644 goto nxge_attach_fail4; 645 } 646 647 if (nxgep->niu_type == NEPTUNE_2_10GF) { 648 if (nxgep->function_num > 1) { 649 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 650 " function %d. Only functions 0 and 1 are " 651 "supported for this card.", nxgep->function_num)); 652 status = NXGE_ERROR; 653 goto nxge_attach_fail4; 654 } 655 } 656 657 if (isLDOMguest(nxgep)) { 658 /* 659 * Use the function number here. 660 */ 661 nxgep->mac.portnum = nxgep->function_num; 662 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 663 664 /* XXX We'll set the MAC address counts to 1 for now. */ 665 mmac_info = &nxgep->nxge_mmac_info; 666 mmac_info->num_mmac = 1; 667 mmac_info->naddrfree = 1; 668 } else { 669 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 670 nxgep->mac.portnum = portn; 671 if ((portn == 0) || (portn == 1)) 672 nxgep->mac.porttype = PORT_TYPE_XMAC; 673 else 674 nxgep->mac.porttype = PORT_TYPE_BMAC; 675 /* 676 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 677 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 678 * The two types of MACs have different characterizations. 679 */ 680 mmac_info = &nxgep->nxge_mmac_info; 681 if (nxgep->function_num < 2) { 682 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 683 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 684 } else { 685 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 686 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 687 } 688 } 689 /* 690 * Setup the Ndd parameters for the this instance. 691 */ 692 nxge_init_param(nxgep); 693 694 /* 695 * Setup Register Tracing Buffer. 696 */ 697 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 698 699 /* init stats ptr */ 700 nxge_init_statsp(nxgep); 701 702 /* 703 * Copy the vpd info from eeprom to a local data 704 * structure, and then check its validity. 705 */ 706 if (!isLDOMguest(nxgep)) { 707 int *regp; 708 uint_t reglen; 709 int rv; 710 711 nxge_vpd_info_get(nxgep); 712 713 /* Find the NIU config handle. */ 714 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 715 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 716 "reg", ®p, ®len); 717 718 if (rv != DDI_PROP_SUCCESS) { 719 goto nxge_attach_fail5; 720 } 721 /* 722 * The address_hi, that is the first int, in the reg 723 * property consists of config handle, but need to remove 724 * the bits 28-31 which are OBP specific info. 725 */ 726 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 727 ddi_prop_free(regp); 728 } 729 730 /* 731 * Set the defaults for the MTU size. 732 */ 733 nxge_hw_id_init(nxgep); 734 735 if (isLDOMguest(nxgep)) { 736 uchar_t *prop_val; 737 uint_t prop_len; 738 uint32_t max_frame_size; 739 740 extern void nxge_get_logical_props(p_nxge_t); 741 742 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 743 nxgep->mac.portmode = PORT_LOGICAL; 744 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 745 "phy-type", "virtual transceiver"); 746 747 nxgep->nports = 1; 748 nxgep->board_ver = 0; /* XXX What? */ 749 750 /* 751 * local-mac-address property gives us info on which 752 * specific MAC address the Hybrid resource is associated 753 * with. 754 */ 755 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 756 "local-mac-address", &prop_val, 757 &prop_len) != DDI_PROP_SUCCESS) { 758 goto nxge_attach_fail5; 759 } 760 if (prop_len != ETHERADDRL) { 761 ddi_prop_free(prop_val); 762 goto nxge_attach_fail5; 763 } 764 ether_copy(prop_val, nxgep->hio_mac_addr); 765 ddi_prop_free(prop_val); 766 nxge_get_logical_props(nxgep); 767 768 /* 769 * Enable Jumbo property based on the "max-frame-size" 770 * property value. 771 */ 772 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY, 773 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 774 "max-frame-size", NXGE_MTU_DEFAULT_MAX); 775 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) && 776 (max_frame_size <= TX_JUMBO_MTU)) { 777 nxgep->mac.is_jumbo = B_TRUE; 778 nxgep->mac.maxframesize = (uint16_t)max_frame_size; 779 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 780 NXGE_EHEADER_VLAN_CRC; 781 } 782 } else { 783 status = nxge_xcvr_find(nxgep); 784 785 if (status != NXGE_OK) { 786 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 787 " Couldn't determine card type" 788 " .... exit ")); 789 goto nxge_attach_fail5; 790 } 791 792 status = nxge_get_config_properties(nxgep); 793 794 if (status != NXGE_OK) { 795 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 796 "get_hw create failed")); 797 goto nxge_attach_fail; 798 } 799 } 800 801 /* 802 * Setup the Kstats for the driver. 803 */ 804 nxge_setup_kstats(nxgep); 805 806 if (!isLDOMguest(nxgep)) 807 nxge_setup_param(nxgep); 808 809 status = nxge_setup_system_dma_pages(nxgep); 810 if (status != NXGE_OK) { 811 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 812 goto nxge_attach_fail; 813 } 814 815 816 if (!isLDOMguest(nxgep)) 817 nxge_hw_init_niu_common(nxgep); 818 819 status = nxge_setup_mutexes(nxgep); 820 if (status != NXGE_OK) { 821 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 822 goto nxge_attach_fail; 823 } 824 825 #if defined(sun4v) 826 if (isLDOMguest(nxgep)) { 827 /* Find our VR & channel sets. */ 828 status = nxge_hio_vr_add(nxgep); 829 if (status != DDI_SUCCESS) { 830 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 831 "nxge_hio_vr_add failed")); 832 (void) hsvc_unregister(&nxgep->niu_hsvc); 833 nxgep->niu_hsvc_available = B_FALSE; 834 goto nxge_attach_fail; 835 } 836 goto nxge_attach_exit; 837 } 838 #endif 839 840 status = nxge_setup_dev(nxgep); 841 if (status != DDI_SUCCESS) { 842 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 843 goto nxge_attach_fail; 844 } 845 846 status = nxge_add_intrs(nxgep); 847 if (status != DDI_SUCCESS) { 848 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 849 goto nxge_attach_fail; 850 } 851 852 /* If a guest, register with vio_net instead. */ 853 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 854 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 855 "unable to register to mac layer (%d)", status)); 856 goto nxge_attach_fail; 857 } 858 859 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 860 861 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 862 "registered to mac (instance %d)", instance)); 863 864 /* nxge_link_monitor calls xcvr.check_link recursively */ 865 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 866 867 goto nxge_attach_exit; 868 869 nxge_attach_fail: 870 nxge_unattach(nxgep); 871 goto nxge_attach_fail1; 872 873 nxge_attach_fail5: 874 /* 875 * Tear down the ndd parameters setup. 876 */ 877 nxge_destroy_param(nxgep); 878 879 /* 880 * Tear down the kstat setup. 881 */ 882 nxge_destroy_kstats(nxgep); 883 884 nxge_attach_fail4: 885 if (nxgep->nxge_hw_p) { 886 nxge_uninit_common_dev(nxgep); 887 nxgep->nxge_hw_p = NULL; 888 } 889 890 nxge_attach_fail3: 891 /* 892 * Unmap the register setup. 893 */ 894 nxge_unmap_regs(nxgep); 895 896 nxge_fm_fini(nxgep); 897 898 nxge_attach_fail2: 899 ddi_soft_state_free(nxge_list, nxgep->instance); 900 901 nxge_attach_fail1: 902 if (status != NXGE_OK) 903 status = (NXGE_ERROR | NXGE_DDI_FAILED); 904 nxgep = NULL; 905 906 nxge_attach_exit: 907 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 908 status)); 909 910 return (status); 911 } 912 913 static int 914 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 915 { 916 int status = DDI_SUCCESS; 917 int instance; 918 p_nxge_t nxgep = NULL; 919 920 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 921 instance = ddi_get_instance(dip); 922 nxgep = ddi_get_soft_state(nxge_list, instance); 923 if (nxgep == NULL) { 924 status = DDI_FAILURE; 925 goto nxge_detach_exit; 926 } 927 928 switch (cmd) { 929 case DDI_DETACH: 930 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 931 break; 932 933 case DDI_PM_SUSPEND: 934 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 935 nxgep->suspended = DDI_PM_SUSPEND; 936 nxge_suspend(nxgep); 937 break; 938 939 case DDI_SUSPEND: 940 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 941 if (nxgep->suspended != DDI_PM_SUSPEND) { 942 nxgep->suspended = DDI_SUSPEND; 943 nxge_suspend(nxgep); 944 } 945 break; 946 947 default: 948 status = DDI_FAILURE; 949 } 950 951 if (cmd != DDI_DETACH) 952 goto nxge_detach_exit; 953 954 /* 955 * Stop the xcvr polling. 956 */ 957 nxgep->suspended = cmd; 958 959 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 960 961 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 962 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 963 "<== nxge_detach status = 0x%08X", status)); 964 return (DDI_FAILURE); 965 } 966 967 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 968 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 969 970 nxge_unattach(nxgep); 971 nxgep = NULL; 972 973 nxge_detach_exit: 974 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 975 status)); 976 977 return (status); 978 } 979 980 static void 981 nxge_unattach(p_nxge_t nxgep) 982 { 983 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 984 985 if (nxgep == NULL || nxgep->dev_regs == NULL) { 986 return; 987 } 988 989 nxgep->nxge_magic = 0; 990 991 if (nxgep->nxge_timerid) { 992 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 993 nxgep->nxge_timerid = 0; 994 } 995 996 /* 997 * If this flag is set, it will affect the Neptune 998 * only. 999 */ 1000 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { 1001 nxge_niu_peu_reset(nxgep); 1002 } 1003 1004 #if defined(sun4v) 1005 if (isLDOMguest(nxgep)) { 1006 (void) nxge_hio_vr_release(nxgep); 1007 } 1008 #endif 1009 1010 if (nxgep->nxge_hw_p) { 1011 nxge_uninit_common_dev(nxgep); 1012 nxgep->nxge_hw_p = NULL; 1013 } 1014 1015 #if defined(sun4v) 1016 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 1017 (void) hsvc_unregister(&nxgep->niu_hsvc); 1018 nxgep->niu_hsvc_available = B_FALSE; 1019 } 1020 #endif 1021 /* 1022 * Stop any further interrupts. 1023 */ 1024 nxge_remove_intrs(nxgep); 1025 1026 /* 1027 * Stop the device and free resources. 1028 */ 1029 if (!isLDOMguest(nxgep)) { 1030 nxge_destroy_dev(nxgep); 1031 } 1032 1033 /* 1034 * Tear down the ndd parameters setup. 1035 */ 1036 nxge_destroy_param(nxgep); 1037 1038 /* 1039 * Tear down the kstat setup. 1040 */ 1041 nxge_destroy_kstats(nxgep); 1042 1043 /* 1044 * Free any memory allocated for PHY properties 1045 */ 1046 if (nxgep->phy_prop.cnt > 0) { 1047 KMEM_FREE(nxgep->phy_prop.arr, 1048 sizeof (nxge_phy_mdio_val_t) * nxgep->phy_prop.cnt); 1049 nxgep->phy_prop.cnt = 0; 1050 } 1051 1052 /* 1053 * Destroy all mutexes. 1054 */ 1055 nxge_destroy_mutexes(nxgep); 1056 1057 /* 1058 * Remove the list of ndd parameters which 1059 * were setup during attach. 1060 */ 1061 if (nxgep->dip) { 1062 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 1063 " nxge_unattach: remove all properties")); 1064 1065 (void) ddi_prop_remove_all(nxgep->dip); 1066 } 1067 1068 #if NXGE_PROPERTY 1069 nxge_remove_hard_properties(nxgep); 1070 #endif 1071 1072 /* 1073 * Unmap the register setup. 1074 */ 1075 nxge_unmap_regs(nxgep); 1076 1077 nxge_fm_fini(nxgep); 1078 1079 ddi_soft_state_free(nxge_list, nxgep->instance); 1080 1081 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1082 } 1083 1084 #if defined(sun4v) 1085 int 1086 nxge_hsvc_register(nxge_t *nxgep) 1087 { 1088 nxge_status_t status; 1089 int i, j; 1090 1091 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hsvc_register")); 1092 if (nxgep->niu_type != N2_NIU) { 1093 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hsvc_register")); 1094 return (DDI_SUCCESS); 1095 } 1096 1097 /* 1098 * Currently, the NIU Hypervisor API supports two major versions: 1099 * version 1 and 2. 1100 * If Hypervisor introduces a higher major or minor version, 1101 * please update NIU_MAJOR_HI and NIU_MINOR_HI accordingly. 1102 */ 1103 nxgep->niu_hsvc_available = B_FALSE; 1104 bcopy(&niu_hsvc, &nxgep->niu_hsvc, 1105 sizeof (hsvc_info_t)); 1106 1107 for (i = NIU_MAJOR_HI; i > 0; i--) { 1108 nxgep->niu_hsvc.hsvc_major = i; 1109 for (j = NIU_MINOR_HI; j >= 0; j--) { 1110 nxgep->niu_hsvc.hsvc_minor = j; 1111 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1112 "nxge_hsvc_register: %s: negotiating " 1113 "hypervisor services revision %d " 1114 "group: 0x%lx major: 0x%lx " 1115 "minor: 0x%lx", 1116 nxgep->niu_hsvc.hsvc_modname, 1117 nxgep->niu_hsvc.hsvc_rev, 1118 nxgep->niu_hsvc.hsvc_group, 1119 nxgep->niu_hsvc.hsvc_major, 1120 nxgep->niu_hsvc.hsvc_minor, 1121 nxgep->niu_min_ver)); 1122 1123 if ((status = hsvc_register(&nxgep->niu_hsvc, 1124 &nxgep->niu_min_ver)) == 0) { 1125 /* Use the supported minor */ 1126 nxgep->niu_hsvc.hsvc_minor = nxgep->niu_min_ver; 1127 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1128 "nxge_hsvc_register: %s: negotiated " 1129 "hypervisor services revision %d " 1130 "group: 0x%lx major: 0x%lx " 1131 "minor: 0x%lx (niu_min_ver 0x%lx)", 1132 nxgep->niu_hsvc.hsvc_modname, 1133 nxgep->niu_hsvc.hsvc_rev, 1134 nxgep->niu_hsvc.hsvc_group, 1135 nxgep->niu_hsvc.hsvc_major, 1136 nxgep->niu_hsvc.hsvc_minor, 1137 nxgep->niu_min_ver)); 1138 1139 nxgep->niu_hsvc_available = B_TRUE; 1140 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1141 "<== nxge_hsvc_register: " 1142 "NIU Hypervisor service enabled")); 1143 return (DDI_SUCCESS); 1144 } 1145 1146 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1147 "nxge_hsvc_register: %s: negotiated failed - " 1148 "try lower major number " 1149 "hypervisor services revision %d " 1150 "group: 0x%lx major: 0x%lx minor: 0x%lx " 1151 "errno: %d", 1152 nxgep->niu_hsvc.hsvc_modname, 1153 nxgep->niu_hsvc.hsvc_rev, 1154 nxgep->niu_hsvc.hsvc_group, 1155 nxgep->niu_hsvc.hsvc_major, 1156 nxgep->niu_hsvc.hsvc_minor, status)); 1157 } 1158 } 1159 1160 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1161 "nxge_hsvc_register: %s: cannot negotiate " 1162 "hypervisor services revision %d group: 0x%lx " 1163 "major: 0x%lx minor: 0x%lx errno: %d", 1164 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1165 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1166 niu_hsvc.hsvc_minor, status)); 1167 1168 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1169 "<== nxge_hsvc_register: Register to NIU Hypervisor failed")); 1170 1171 return (DDI_FAILURE); 1172 } 1173 #endif 1174 1175 static char n2_siu_name[] = "niu"; 1176 1177 static nxge_status_t 1178 nxge_map_regs(p_nxge_t nxgep) 1179 { 1180 int ddi_status = DDI_SUCCESS; 1181 p_dev_regs_t dev_regs; 1182 char buf[MAXPATHLEN + 1]; 1183 char *devname; 1184 #ifdef NXGE_DEBUG 1185 char *sysname; 1186 #endif 1187 off_t regsize; 1188 nxge_status_t status = NXGE_OK; 1189 #if !defined(_BIG_ENDIAN) 1190 off_t pci_offset; 1191 uint16_t pcie_devctl; 1192 #endif 1193 1194 if (isLDOMguest(nxgep)) { 1195 return (nxge_guest_regs_map(nxgep)); 1196 } 1197 1198 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1199 nxgep->dev_regs = NULL; 1200 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1201 dev_regs->nxge_regh = NULL; 1202 dev_regs->nxge_pciregh = NULL; 1203 dev_regs->nxge_msix_regh = NULL; 1204 dev_regs->nxge_vir_regh = NULL; 1205 dev_regs->nxge_vir2_regh = NULL; 1206 nxgep->niu_type = NIU_TYPE_NONE; 1207 1208 devname = ddi_pathname(nxgep->dip, buf); 1209 ASSERT(strlen(devname) > 0); 1210 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1211 "nxge_map_regs: pathname devname %s", devname)); 1212 1213 /* 1214 * The driver is running on a N2-NIU system if devname is something 1215 * like "/niu@80/network@0" 1216 */ 1217 if (strstr(devname, n2_siu_name)) { 1218 /* N2/NIU */ 1219 nxgep->niu_type = N2_NIU; 1220 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1221 "nxge_map_regs: N2/NIU devname %s", devname)); 1222 /* 1223 * Get function number: 1224 * - N2/NIU: "/niu@80/network@0" and "/niu@80/network@1" 1225 */ 1226 nxgep->function_num = 1227 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1228 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1229 "nxge_map_regs: N2/NIU function number %d", 1230 nxgep->function_num)); 1231 } else { 1232 int *prop_val; 1233 uint_t prop_len; 1234 uint8_t func_num; 1235 1236 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1237 0, "reg", 1238 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1239 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1240 "Reg property not found")); 1241 ddi_status = DDI_FAILURE; 1242 goto nxge_map_regs_fail0; 1243 1244 } else { 1245 func_num = (prop_val[0] >> 8) & 0x7; 1246 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1247 "Reg property found: fun # %d", 1248 func_num)); 1249 nxgep->function_num = func_num; 1250 if (isLDOMguest(nxgep)) { 1251 nxgep->function_num /= 2; 1252 return (NXGE_OK); 1253 } 1254 ddi_prop_free(prop_val); 1255 } 1256 } 1257 1258 switch (nxgep->niu_type) { 1259 default: 1260 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1261 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1262 "nxge_map_regs: pci config size 0x%x", regsize)); 1263 1264 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1265 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1266 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1267 if (ddi_status != DDI_SUCCESS) { 1268 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1269 "ddi_map_regs, nxge bus config regs failed")); 1270 goto nxge_map_regs_fail0; 1271 } 1272 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1273 "nxge_map_reg: PCI config addr 0x%0llx " 1274 " handle 0x%0llx", dev_regs->nxge_pciregp, 1275 dev_regs->nxge_pciregh)); 1276 /* 1277 * IMP IMP 1278 * workaround for bit swapping bug in HW 1279 * which ends up in no-snoop = yes 1280 * resulting, in DMA not synched properly 1281 */ 1282 #if !defined(_BIG_ENDIAN) 1283 /* workarounds for x86 systems */ 1284 pci_offset = 0x80 + PCIE_DEVCTL; 1285 pcie_devctl = pci_config_get16(dev_regs->nxge_pciregh, 1286 pci_offset); 1287 pcie_devctl &= ~PCIE_DEVCTL_ENABLE_NO_SNOOP; 1288 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1289 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1290 pcie_devctl); 1291 #endif 1292 1293 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1294 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1295 "nxge_map_regs: pio size 0x%x", regsize)); 1296 /* set up the device mapped register */ 1297 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1298 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1299 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1300 if (ddi_status != DDI_SUCCESS) { 1301 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1302 "ddi_map_regs for Neptune global reg failed")); 1303 goto nxge_map_regs_fail1; 1304 } 1305 1306 /* set up the msi/msi-x mapped register */ 1307 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1308 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1309 "nxge_map_regs: msix size 0x%x", regsize)); 1310 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1311 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1312 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1313 if (ddi_status != DDI_SUCCESS) { 1314 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1315 "ddi_map_regs for msi reg failed")); 1316 goto nxge_map_regs_fail2; 1317 } 1318 1319 /* set up the vio region mapped register */ 1320 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1321 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1322 "nxge_map_regs: vio size 0x%x", regsize)); 1323 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1324 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1325 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1326 1327 if (ddi_status != DDI_SUCCESS) { 1328 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1329 "ddi_map_regs for nxge vio reg failed")); 1330 goto nxge_map_regs_fail3; 1331 } 1332 nxgep->dev_regs = dev_regs; 1333 1334 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1335 NPI_PCI_ADD_HANDLE_SET(nxgep, 1336 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1337 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1338 NPI_MSI_ADD_HANDLE_SET(nxgep, 1339 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1340 1341 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1342 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1343 1344 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1345 NPI_REG_ADD_HANDLE_SET(nxgep, 1346 (npi_reg_ptr_t)dev_regs->nxge_regp); 1347 1348 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1349 NPI_VREG_ADD_HANDLE_SET(nxgep, 1350 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1351 1352 break; 1353 1354 case N2_NIU: 1355 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1356 /* 1357 * Set up the device mapped register (FWARC 2006/556) 1358 * (changed back to 1: reg starts at 1!) 1359 */ 1360 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1361 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1362 "nxge_map_regs: dev size 0x%x", regsize)); 1363 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1364 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1365 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1366 1367 if (ddi_status != DDI_SUCCESS) { 1368 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1369 "ddi_map_regs for N2/NIU, global reg failed ")); 1370 goto nxge_map_regs_fail1; 1371 } 1372 1373 /* set up the first vio region mapped register */ 1374 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1375 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1376 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1377 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1378 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1379 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1380 1381 if (ddi_status != DDI_SUCCESS) { 1382 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1383 "ddi_map_regs for nxge vio reg failed")); 1384 goto nxge_map_regs_fail2; 1385 } 1386 /* set up the second vio region mapped register */ 1387 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1388 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1389 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1390 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1391 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1392 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1393 1394 if (ddi_status != DDI_SUCCESS) { 1395 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1396 "ddi_map_regs for nxge vio2 reg failed")); 1397 goto nxge_map_regs_fail3; 1398 } 1399 nxgep->dev_regs = dev_regs; 1400 1401 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1402 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1403 1404 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1405 NPI_REG_ADD_HANDLE_SET(nxgep, 1406 (npi_reg_ptr_t)dev_regs->nxge_regp); 1407 1408 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1409 NPI_VREG_ADD_HANDLE_SET(nxgep, 1410 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1411 1412 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1413 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1414 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1415 1416 break; 1417 } 1418 1419 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1420 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1421 1422 goto nxge_map_regs_exit; 1423 nxge_map_regs_fail3: 1424 if (dev_regs->nxge_msix_regh) { 1425 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1426 } 1427 if (dev_regs->nxge_vir_regh) { 1428 ddi_regs_map_free(&dev_regs->nxge_regh); 1429 } 1430 nxge_map_regs_fail2: 1431 if (dev_regs->nxge_regh) { 1432 ddi_regs_map_free(&dev_regs->nxge_regh); 1433 } 1434 nxge_map_regs_fail1: 1435 if (dev_regs->nxge_pciregh) { 1436 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1437 } 1438 nxge_map_regs_fail0: 1439 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1440 kmem_free(dev_regs, sizeof (dev_regs_t)); 1441 1442 nxge_map_regs_exit: 1443 if (ddi_status != DDI_SUCCESS) 1444 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1445 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1446 return (status); 1447 } 1448 1449 static void 1450 nxge_unmap_regs(p_nxge_t nxgep) 1451 { 1452 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1453 1454 if (isLDOMguest(nxgep)) { 1455 nxge_guest_regs_map_free(nxgep); 1456 return; 1457 } 1458 1459 if (nxgep->dev_regs) { 1460 if (nxgep->dev_regs->nxge_pciregh) { 1461 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1462 "==> nxge_unmap_regs: bus")); 1463 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1464 nxgep->dev_regs->nxge_pciregh = NULL; 1465 } 1466 if (nxgep->dev_regs->nxge_regh) { 1467 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1468 "==> nxge_unmap_regs: device registers")); 1469 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1470 nxgep->dev_regs->nxge_regh = NULL; 1471 } 1472 if (nxgep->dev_regs->nxge_msix_regh) { 1473 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1474 "==> nxge_unmap_regs: device interrupts")); 1475 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1476 nxgep->dev_regs->nxge_msix_regh = NULL; 1477 } 1478 if (nxgep->dev_regs->nxge_vir_regh) { 1479 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1480 "==> nxge_unmap_regs: vio region")); 1481 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1482 nxgep->dev_regs->nxge_vir_regh = NULL; 1483 } 1484 if (nxgep->dev_regs->nxge_vir2_regh) { 1485 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1486 "==> nxge_unmap_regs: vio2 region")); 1487 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1488 nxgep->dev_regs->nxge_vir2_regh = NULL; 1489 } 1490 1491 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1492 nxgep->dev_regs = NULL; 1493 } 1494 1495 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1496 } 1497 1498 static nxge_status_t 1499 nxge_setup_mutexes(p_nxge_t nxgep) 1500 { 1501 int ddi_status = DDI_SUCCESS; 1502 nxge_status_t status = NXGE_OK; 1503 nxge_classify_t *classify_ptr; 1504 int partition; 1505 1506 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1507 1508 /* 1509 * Get the interrupt cookie so the mutexes can be 1510 * Initialized. 1511 */ 1512 if (isLDOMguest(nxgep)) { 1513 nxgep->interrupt_cookie = 0; 1514 } else { 1515 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1516 &nxgep->interrupt_cookie); 1517 1518 if (ddi_status != DDI_SUCCESS) { 1519 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1520 "<== nxge_setup_mutexes: failed 0x%x", 1521 ddi_status)); 1522 goto nxge_setup_mutexes_exit; 1523 } 1524 } 1525 1526 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1527 MUTEX_INIT(&nxgep->poll_lock, NULL, 1528 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1529 1530 /* 1531 * Initialize mutexes for this device. 1532 */ 1533 MUTEX_INIT(nxgep->genlock, NULL, 1534 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1535 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1536 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1537 MUTEX_INIT(&nxgep->mif_lock, NULL, 1538 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1539 MUTEX_INIT(&nxgep->group_lock, NULL, 1540 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1541 RW_INIT(&nxgep->filter_lock, NULL, 1542 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1543 1544 classify_ptr = &nxgep->classifier; 1545 /* 1546 * FFLP Mutexes are never used in interrupt context 1547 * as fflp operation can take very long time to 1548 * complete and hence not suitable to invoke from interrupt 1549 * handlers. 1550 */ 1551 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1552 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1553 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1554 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1555 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1556 for (partition = 0; partition < MAX_PARTITION; partition++) { 1557 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1558 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1559 } 1560 } 1561 1562 nxge_setup_mutexes_exit: 1563 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1564 "<== nxge_setup_mutexes status = %x", status)); 1565 1566 if (ddi_status != DDI_SUCCESS) 1567 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1568 1569 return (status); 1570 } 1571 1572 static void 1573 nxge_destroy_mutexes(p_nxge_t nxgep) 1574 { 1575 int partition; 1576 nxge_classify_t *classify_ptr; 1577 1578 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1579 RW_DESTROY(&nxgep->filter_lock); 1580 MUTEX_DESTROY(&nxgep->group_lock); 1581 MUTEX_DESTROY(&nxgep->mif_lock); 1582 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1583 MUTEX_DESTROY(nxgep->genlock); 1584 1585 classify_ptr = &nxgep->classifier; 1586 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1587 1588 /* Destroy all polling resources. */ 1589 MUTEX_DESTROY(&nxgep->poll_lock); 1590 cv_destroy(&nxgep->poll_cv); 1591 1592 /* free data structures, based on HW type */ 1593 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1594 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1595 for (partition = 0; partition < MAX_PARTITION; partition++) { 1596 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1597 } 1598 } 1599 1600 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1601 } 1602 1603 nxge_status_t 1604 nxge_init(p_nxge_t nxgep) 1605 { 1606 nxge_status_t status = NXGE_OK; 1607 1608 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1609 1610 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1611 return (status); 1612 } 1613 1614 /* 1615 * Allocate system memory for the receive/transmit buffer blocks 1616 * and receive/transmit descriptor rings. 1617 */ 1618 status = nxge_alloc_mem_pool(nxgep); 1619 if (status != NXGE_OK) { 1620 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1621 goto nxge_init_fail1; 1622 } 1623 1624 if (!isLDOMguest(nxgep)) { 1625 /* 1626 * Initialize and enable the TXC registers. 1627 * (Globally enable the Tx controller, 1628 * enable the port, configure the dma channel bitmap, 1629 * configure the max burst size). 1630 */ 1631 status = nxge_txc_init(nxgep); 1632 if (status != NXGE_OK) { 1633 NXGE_ERROR_MSG((nxgep, 1634 NXGE_ERR_CTL, "init txc failed\n")); 1635 goto nxge_init_fail2; 1636 } 1637 } 1638 1639 /* 1640 * Initialize and enable TXDMA channels. 1641 */ 1642 status = nxge_init_txdma_channels(nxgep); 1643 if (status != NXGE_OK) { 1644 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1645 goto nxge_init_fail3; 1646 } 1647 1648 /* 1649 * Initialize and enable RXDMA channels. 1650 */ 1651 status = nxge_init_rxdma_channels(nxgep); 1652 if (status != NXGE_OK) { 1653 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1654 goto nxge_init_fail4; 1655 } 1656 1657 /* 1658 * The guest domain is now done. 1659 */ 1660 if (isLDOMguest(nxgep)) { 1661 nxgep->drv_state |= STATE_HW_INITIALIZED; 1662 goto nxge_init_exit; 1663 } 1664 1665 /* 1666 * Initialize TCAM and FCRAM (Neptune). 1667 */ 1668 status = nxge_classify_init(nxgep); 1669 if (status != NXGE_OK) { 1670 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1671 goto nxge_init_fail5; 1672 } 1673 1674 /* 1675 * Initialize ZCP 1676 */ 1677 status = nxge_zcp_init(nxgep); 1678 if (status != NXGE_OK) { 1679 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1680 goto nxge_init_fail5; 1681 } 1682 1683 /* 1684 * Initialize IPP. 1685 */ 1686 status = nxge_ipp_init(nxgep); 1687 if (status != NXGE_OK) { 1688 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1689 goto nxge_init_fail5; 1690 } 1691 1692 /* 1693 * Initialize the MAC block. 1694 */ 1695 status = nxge_mac_init(nxgep); 1696 if (status != NXGE_OK) { 1697 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1698 goto nxge_init_fail5; 1699 } 1700 1701 /* 1702 * Enable the interrrupts for DDI. 1703 */ 1704 nxge_intrs_enable(nxgep); 1705 1706 nxgep->drv_state |= STATE_HW_INITIALIZED; 1707 1708 goto nxge_init_exit; 1709 1710 nxge_init_fail5: 1711 nxge_uninit_rxdma_channels(nxgep); 1712 nxge_init_fail4: 1713 nxge_uninit_txdma_channels(nxgep); 1714 nxge_init_fail3: 1715 if (!isLDOMguest(nxgep)) { 1716 (void) nxge_txc_uninit(nxgep); 1717 } 1718 nxge_init_fail2: 1719 nxge_free_mem_pool(nxgep); 1720 nxge_init_fail1: 1721 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1722 "<== nxge_init status (failed) = 0x%08x", status)); 1723 return (status); 1724 1725 nxge_init_exit: 1726 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1727 status)); 1728 return (status); 1729 } 1730 1731 1732 timeout_id_t 1733 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1734 { 1735 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1736 return (timeout(func, (caddr_t)nxgep, 1737 drv_usectohz(1000 * msec))); 1738 } 1739 return (NULL); 1740 } 1741 1742 /*ARGSUSED*/ 1743 void 1744 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1745 { 1746 if (timerid) { 1747 (void) untimeout(timerid); 1748 } 1749 } 1750 1751 void 1752 nxge_uninit(p_nxge_t nxgep) 1753 { 1754 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1755 1756 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1757 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1758 "==> nxge_uninit: not initialized")); 1759 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1760 "<== nxge_uninit")); 1761 return; 1762 } 1763 1764 if (!isLDOMguest(nxgep)) { 1765 /* 1766 * Reset the receive MAC side. 1767 */ 1768 (void) nxge_rx_mac_disable(nxgep); 1769 1770 /* 1771 * Drain the IPP. 1772 */ 1773 (void) nxge_ipp_drain(nxgep); 1774 } 1775 1776 /* stop timer */ 1777 if (nxgep->nxge_timerid) { 1778 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1779 nxgep->nxge_timerid = 0; 1780 } 1781 1782 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1783 (void) nxge_intr_hw_disable(nxgep); 1784 1785 1786 /* Disable and soft reset the IPP */ 1787 if (!isLDOMguest(nxgep)) 1788 (void) nxge_ipp_disable(nxgep); 1789 1790 /* Free classification resources */ 1791 (void) nxge_classify_uninit(nxgep); 1792 1793 /* 1794 * Reset the transmit/receive DMA side. 1795 */ 1796 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1797 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1798 1799 nxge_uninit_txdma_channels(nxgep); 1800 nxge_uninit_rxdma_channels(nxgep); 1801 1802 /* 1803 * Reset the transmit MAC side. 1804 */ 1805 (void) nxge_tx_mac_disable(nxgep); 1806 1807 nxge_free_mem_pool(nxgep); 1808 1809 /* 1810 * Start the timer if the reset flag is not set. 1811 * If this reset flag is set, the link monitor 1812 * will not be started in order to stop furthur bus 1813 * activities coming from this interface. 1814 * The driver will start the monitor function 1815 * if the interface was initialized again later. 1816 */ 1817 if (!nxge_peu_reset_enable) { 1818 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1819 } 1820 1821 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1822 1823 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1824 "nxge_mblks_pending %d", nxge_mblks_pending)); 1825 } 1826 1827 void 1828 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1829 { 1830 uint64_t reg; 1831 uint64_t regdata; 1832 int i, retry; 1833 1834 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1835 regdata = 0; 1836 retry = 1; 1837 1838 for (i = 0; i < retry; i++) { 1839 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1840 } 1841 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1842 } 1843 1844 void 1845 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1846 { 1847 uint64_t reg; 1848 uint64_t buf[2]; 1849 1850 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1851 reg = buf[0]; 1852 1853 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1854 } 1855 1856 /*ARGSUSED*/ 1857 /*VARARGS*/ 1858 void 1859 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1860 { 1861 char msg_buffer[1048]; 1862 char prefix_buffer[32]; 1863 int instance; 1864 uint64_t debug_level; 1865 int cmn_level = CE_CONT; 1866 va_list ap; 1867 1868 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1869 /* In case a developer has changed nxge_debug_level. */ 1870 if (nxgep->nxge_debug_level != nxge_debug_level) 1871 nxgep->nxge_debug_level = nxge_debug_level; 1872 } 1873 1874 debug_level = (nxgep == NULL) ? nxge_debug_level : 1875 nxgep->nxge_debug_level; 1876 1877 if ((level & debug_level) || 1878 (level == NXGE_NOTE) || 1879 (level == NXGE_ERR_CTL)) { 1880 /* do the msg processing */ 1881 MUTEX_ENTER(&nxgedebuglock); 1882 1883 if ((level & NXGE_NOTE)) { 1884 cmn_level = CE_NOTE; 1885 } 1886 1887 if (level & NXGE_ERR_CTL) { 1888 cmn_level = CE_WARN; 1889 } 1890 1891 va_start(ap, fmt); 1892 (void) vsprintf(msg_buffer, fmt, ap); 1893 va_end(ap); 1894 if (nxgep == NULL) { 1895 instance = -1; 1896 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1897 } else { 1898 instance = nxgep->instance; 1899 (void) sprintf(prefix_buffer, 1900 "%s%d :", "nxge", instance); 1901 } 1902 1903 MUTEX_EXIT(&nxgedebuglock); 1904 cmn_err(cmn_level, "!%s %s\n", 1905 prefix_buffer, msg_buffer); 1906 1907 } 1908 } 1909 1910 char * 1911 nxge_dump_packet(char *addr, int size) 1912 { 1913 uchar_t *ap = (uchar_t *)addr; 1914 int i; 1915 static char etherbuf[1024]; 1916 char *cp = etherbuf; 1917 char digits[] = "0123456789abcdef"; 1918 1919 if (!size) 1920 size = 60; 1921 1922 if (size > MAX_DUMP_SZ) { 1923 /* Dump the leading bytes */ 1924 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1925 if (*ap > 0x0f) 1926 *cp++ = digits[*ap >> 4]; 1927 *cp++ = digits[*ap++ & 0xf]; 1928 *cp++ = ':'; 1929 } 1930 for (i = 0; i < 20; i++) 1931 *cp++ = '.'; 1932 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1933 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1934 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1935 if (*ap > 0x0f) 1936 *cp++ = digits[*ap >> 4]; 1937 *cp++ = digits[*ap++ & 0xf]; 1938 *cp++ = ':'; 1939 } 1940 } else { 1941 for (i = 0; i < size; i++) { 1942 if (*ap > 0x0f) 1943 *cp++ = digits[*ap >> 4]; 1944 *cp++ = digits[*ap++ & 0xf]; 1945 *cp++ = ':'; 1946 } 1947 } 1948 *--cp = 0; 1949 return (etherbuf); 1950 } 1951 1952 #ifdef NXGE_DEBUG 1953 static void 1954 nxge_test_map_regs(p_nxge_t nxgep) 1955 { 1956 ddi_acc_handle_t cfg_handle; 1957 p_pci_cfg_t cfg_ptr; 1958 ddi_acc_handle_t dev_handle; 1959 char *dev_ptr; 1960 ddi_acc_handle_t pci_config_handle; 1961 uint32_t regval; 1962 int i; 1963 1964 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1965 1966 dev_handle = nxgep->dev_regs->nxge_regh; 1967 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1968 1969 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1970 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1971 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1972 1973 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1974 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1975 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1976 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1977 &cfg_ptr->vendorid)); 1978 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1979 "\tvendorid 0x%x devid 0x%x", 1980 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1981 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1982 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1983 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1984 "bar1c 0x%x", 1985 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1986 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1987 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1988 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1989 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1990 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1991 "base 28 0x%x bar2c 0x%x\n", 1992 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1993 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1994 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1995 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1996 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1997 "\nNeptune PCI BAR: base30 0x%x\n", 1998 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1999 2000 cfg_handle = nxgep->dev_regs->nxge_pciregh; 2001 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 2002 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2003 "first 0x%llx second 0x%llx third 0x%llx " 2004 "last 0x%llx ", 2005 NXGE_PIO_READ64(dev_handle, 2006 (uint64_t *)(dev_ptr + 0), 0), 2007 NXGE_PIO_READ64(dev_handle, 2008 (uint64_t *)(dev_ptr + 8), 0), 2009 NXGE_PIO_READ64(dev_handle, 2010 (uint64_t *)(dev_ptr + 16), 0), 2011 NXGE_PIO_READ64(cfg_handle, 2012 (uint64_t *)(dev_ptr + 24), 0))); 2013 } 2014 } 2015 2016 #endif 2017 2018 static void 2019 nxge_suspend(p_nxge_t nxgep) 2020 { 2021 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 2022 2023 nxge_intrs_disable(nxgep); 2024 nxge_destroy_dev(nxgep); 2025 2026 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 2027 } 2028 2029 static nxge_status_t 2030 nxge_resume(p_nxge_t nxgep) 2031 { 2032 nxge_status_t status = NXGE_OK; 2033 2034 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 2035 2036 nxgep->suspended = DDI_RESUME; 2037 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 2038 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 2039 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 2040 (void) nxge_rx_mac_enable(nxgep); 2041 (void) nxge_tx_mac_enable(nxgep); 2042 nxge_intrs_enable(nxgep); 2043 nxgep->suspended = 0; 2044 2045 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2046 "<== nxge_resume status = 0x%x", status)); 2047 return (status); 2048 } 2049 2050 static nxge_status_t 2051 nxge_setup_dev(p_nxge_t nxgep) 2052 { 2053 nxge_status_t status = NXGE_OK; 2054 2055 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 2056 nxgep->mac.portnum)); 2057 2058 status = nxge_link_init(nxgep); 2059 2060 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 2061 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2062 "port%d Bad register acc handle", nxgep->mac.portnum)); 2063 status = NXGE_ERROR; 2064 } 2065 2066 if (status != NXGE_OK) { 2067 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2068 " nxge_setup_dev status " 2069 "(xcvr init 0x%08x)", status)); 2070 goto nxge_setup_dev_exit; 2071 } 2072 2073 nxge_setup_dev_exit: 2074 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2075 "<== nxge_setup_dev port %d status = 0x%08x", 2076 nxgep->mac.portnum, status)); 2077 2078 return (status); 2079 } 2080 2081 static void 2082 nxge_destroy_dev(p_nxge_t nxgep) 2083 { 2084 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 2085 2086 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 2087 2088 (void) nxge_hw_stop(nxgep); 2089 2090 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 2091 } 2092 2093 static nxge_status_t 2094 nxge_setup_system_dma_pages(p_nxge_t nxgep) 2095 { 2096 int ddi_status = DDI_SUCCESS; 2097 uint_t count; 2098 ddi_dma_cookie_t cookie; 2099 uint_t iommu_pagesize; 2100 nxge_status_t status = NXGE_OK; 2101 2102 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 2103 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 2104 if (nxgep->niu_type != N2_NIU) { 2105 iommu_pagesize = dvma_pagesize(nxgep->dip); 2106 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2107 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2108 " default_block_size %d iommu_pagesize %d", 2109 nxgep->sys_page_sz, 2110 ddi_ptob(nxgep->dip, (ulong_t)1), 2111 nxgep->rx_default_block_size, 2112 iommu_pagesize)); 2113 2114 if (iommu_pagesize != 0) { 2115 if (nxgep->sys_page_sz == iommu_pagesize) { 2116 if (iommu_pagesize > 0x4000) 2117 nxgep->sys_page_sz = 0x4000; 2118 } else { 2119 if (nxgep->sys_page_sz > iommu_pagesize) 2120 nxgep->sys_page_sz = iommu_pagesize; 2121 } 2122 } 2123 } 2124 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2125 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2126 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2127 "default_block_size %d page mask %d", 2128 nxgep->sys_page_sz, 2129 ddi_ptob(nxgep->dip, (ulong_t)1), 2130 nxgep->rx_default_block_size, 2131 nxgep->sys_page_mask)); 2132 2133 2134 switch (nxgep->sys_page_sz) { 2135 default: 2136 nxgep->sys_page_sz = 0x1000; 2137 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2138 nxgep->rx_default_block_size = 0x1000; 2139 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2140 break; 2141 case 0x1000: 2142 nxgep->rx_default_block_size = 0x1000; 2143 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2144 break; 2145 case 0x2000: 2146 nxgep->rx_default_block_size = 0x2000; 2147 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2148 break; 2149 case 0x4000: 2150 nxgep->rx_default_block_size = 0x4000; 2151 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2152 break; 2153 case 0x8000: 2154 nxgep->rx_default_block_size = 0x8000; 2155 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2156 break; 2157 } 2158 2159 #ifndef USE_RX_BIG_BUF 2160 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2161 #else 2162 nxgep->rx_default_block_size = 0x2000; 2163 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2164 #endif 2165 /* 2166 * Get the system DMA burst size. 2167 */ 2168 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2169 DDI_DMA_DONTWAIT, 0, 2170 &nxgep->dmasparehandle); 2171 if (ddi_status != DDI_SUCCESS) { 2172 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2173 "ddi_dma_alloc_handle: failed " 2174 " status 0x%x", ddi_status)); 2175 goto nxge_get_soft_properties_exit; 2176 } 2177 2178 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2179 (caddr_t)nxgep->dmasparehandle, 2180 sizeof (nxgep->dmasparehandle), 2181 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2182 DDI_DMA_DONTWAIT, 0, 2183 &cookie, &count); 2184 if (ddi_status != DDI_DMA_MAPPED) { 2185 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2186 "Binding spare handle to find system" 2187 " burstsize failed.")); 2188 ddi_status = DDI_FAILURE; 2189 goto nxge_get_soft_properties_fail1; 2190 } 2191 2192 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2193 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2194 2195 nxge_get_soft_properties_fail1: 2196 ddi_dma_free_handle(&nxgep->dmasparehandle); 2197 2198 nxge_get_soft_properties_exit: 2199 2200 if (ddi_status != DDI_SUCCESS) 2201 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2202 2203 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2204 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2205 return (status); 2206 } 2207 2208 static nxge_status_t 2209 nxge_alloc_mem_pool(p_nxge_t nxgep) 2210 { 2211 nxge_status_t status = NXGE_OK; 2212 2213 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2214 2215 status = nxge_alloc_rx_mem_pool(nxgep); 2216 if (status != NXGE_OK) { 2217 return (NXGE_ERROR); 2218 } 2219 2220 status = nxge_alloc_tx_mem_pool(nxgep); 2221 if (status != NXGE_OK) { 2222 nxge_free_rx_mem_pool(nxgep); 2223 return (NXGE_ERROR); 2224 } 2225 2226 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2227 return (NXGE_OK); 2228 } 2229 2230 static void 2231 nxge_free_mem_pool(p_nxge_t nxgep) 2232 { 2233 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2234 2235 nxge_free_rx_mem_pool(nxgep); 2236 nxge_free_tx_mem_pool(nxgep); 2237 2238 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2239 } 2240 2241 nxge_status_t 2242 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2243 { 2244 uint32_t rdc_max; 2245 p_nxge_dma_pt_cfg_t p_all_cfgp; 2246 p_nxge_hw_pt_cfg_t p_cfgp; 2247 p_nxge_dma_pool_t dma_poolp; 2248 p_nxge_dma_common_t *dma_buf_p; 2249 p_nxge_dma_pool_t dma_cntl_poolp; 2250 p_nxge_dma_common_t *dma_cntl_p; 2251 uint32_t *num_chunks; /* per dma */ 2252 nxge_status_t status = NXGE_OK; 2253 2254 uint32_t nxge_port_rbr_size; 2255 uint32_t nxge_port_rbr_spare_size; 2256 uint32_t nxge_port_rcr_size; 2257 uint32_t rx_cntl_alloc_size; 2258 2259 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2260 2261 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2262 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2263 rdc_max = NXGE_MAX_RDCS; 2264 2265 /* 2266 * Allocate memory for the common DMA data structures. 2267 */ 2268 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2269 KM_SLEEP); 2270 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2271 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2272 2273 dma_cntl_poolp = (p_nxge_dma_pool_t) 2274 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2275 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2276 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2277 2278 num_chunks = (uint32_t *)KMEM_ZALLOC( 2279 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2280 2281 /* 2282 * Assume that each DMA channel will be configured with 2283 * the default block size. 2284 * rbr block counts are modulo the batch count (16). 2285 */ 2286 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2287 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2288 2289 if (!nxge_port_rbr_size) { 2290 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2291 } 2292 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2293 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2294 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2295 } 2296 2297 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2298 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2299 2300 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2301 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2302 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2303 } 2304 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2305 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2306 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2307 "set to default %d", 2308 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2309 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2310 } 2311 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2312 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2313 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2314 "set to default %d", 2315 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2316 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2317 } 2318 2319 /* 2320 * N2/NIU has limitation on the descriptor sizes (contiguous 2321 * memory allocation on data buffers to 4M (contig_mem_alloc) 2322 * and little endian for control buffers (must use the ddi/dki mem alloc 2323 * function). 2324 */ 2325 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2326 if (nxgep->niu_type == N2_NIU) { 2327 nxge_port_rbr_spare_size = 0; 2328 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2329 (!ISP2(nxge_port_rbr_size))) { 2330 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2331 } 2332 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2333 (!ISP2(nxge_port_rcr_size))) { 2334 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2335 } 2336 } 2337 #endif 2338 2339 /* 2340 * Addresses of receive block ring, receive completion ring and the 2341 * mailbox must be all cache-aligned (64 bytes). 2342 */ 2343 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2344 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2345 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2346 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2347 2348 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2349 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2350 "nxge_port_rcr_size = %d " 2351 "rx_cntl_alloc_size = %d", 2352 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2353 nxge_port_rcr_size, 2354 rx_cntl_alloc_size)); 2355 2356 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2357 if (nxgep->niu_type == N2_NIU) { 2358 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2359 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2360 2361 if (!ISP2(rx_buf_alloc_size)) { 2362 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2363 "==> nxge_alloc_rx_mem_pool: " 2364 " must be power of 2")); 2365 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2366 goto nxge_alloc_rx_mem_pool_exit; 2367 } 2368 2369 if (rx_buf_alloc_size > (1 << 22)) { 2370 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2371 "==> nxge_alloc_rx_mem_pool: " 2372 " limit size to 4M")); 2373 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2374 goto nxge_alloc_rx_mem_pool_exit; 2375 } 2376 2377 if (rx_cntl_alloc_size < 0x2000) { 2378 rx_cntl_alloc_size = 0x2000; 2379 } 2380 } 2381 #endif 2382 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2383 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2384 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2385 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2386 2387 dma_poolp->ndmas = p_cfgp->max_rdcs; 2388 dma_poolp->num_chunks = num_chunks; 2389 dma_poolp->buf_allocated = B_TRUE; 2390 nxgep->rx_buf_pool_p = dma_poolp; 2391 dma_poolp->dma_buf_pool_p = dma_buf_p; 2392 2393 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2394 dma_cntl_poolp->buf_allocated = B_TRUE; 2395 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2396 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2397 2398 /* Allocate the receive rings, too. */ 2399 nxgep->rx_rbr_rings = 2400 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2401 nxgep->rx_rbr_rings->rbr_rings = 2402 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2403 nxgep->rx_rcr_rings = 2404 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2405 nxgep->rx_rcr_rings->rcr_rings = 2406 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2407 nxgep->rx_mbox_areas_p = 2408 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2409 nxgep->rx_mbox_areas_p->rxmbox_areas = 2410 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2411 2412 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2413 p_cfgp->max_rdcs; 2414 2415 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2416 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2417 2418 nxge_alloc_rx_mem_pool_exit: 2419 return (status); 2420 } 2421 2422 /* 2423 * nxge_alloc_rxb 2424 * 2425 * Allocate buffers for an RDC. 2426 * 2427 * Arguments: 2428 * nxgep 2429 * channel The channel to map into our kernel space. 2430 * 2431 * Notes: 2432 * 2433 * NPI function calls: 2434 * 2435 * NXGE function calls: 2436 * 2437 * Registers accessed: 2438 * 2439 * Context: 2440 * 2441 * Taking apart: 2442 * 2443 * Open questions: 2444 * 2445 */ 2446 nxge_status_t 2447 nxge_alloc_rxb( 2448 p_nxge_t nxgep, 2449 int channel) 2450 { 2451 size_t rx_buf_alloc_size; 2452 nxge_status_t status = NXGE_OK; 2453 2454 nxge_dma_common_t **data; 2455 nxge_dma_common_t **control; 2456 uint32_t *num_chunks; 2457 2458 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2459 2460 /* 2461 * Allocate memory for the receive buffers and descriptor rings. 2462 * Replace these allocation functions with the interface functions 2463 * provided by the partition manager if/when they are available. 2464 */ 2465 2466 /* 2467 * Allocate memory for the receive buffer blocks. 2468 */ 2469 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2470 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2471 2472 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2473 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2474 2475 if ((status = nxge_alloc_rx_buf_dma( 2476 nxgep, channel, data, rx_buf_alloc_size, 2477 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2478 return (status); 2479 } 2480 2481 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2482 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2483 2484 /* 2485 * Allocate memory for descriptor rings and mailbox. 2486 */ 2487 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2488 2489 if ((status = nxge_alloc_rx_cntl_dma( 2490 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2491 != NXGE_OK) { 2492 nxge_free_rx_cntl_dma(nxgep, *control); 2493 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2494 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2495 return (status); 2496 } 2497 2498 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2499 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2500 2501 return (status); 2502 } 2503 2504 void 2505 nxge_free_rxb( 2506 p_nxge_t nxgep, 2507 int channel) 2508 { 2509 nxge_dma_common_t *data; 2510 nxge_dma_common_t *control; 2511 uint32_t num_chunks; 2512 2513 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2514 2515 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2516 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2517 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2518 2519 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2520 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2521 2522 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2523 nxge_free_rx_cntl_dma(nxgep, control); 2524 2525 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2526 2527 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2528 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2529 2530 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2531 } 2532 2533 static void 2534 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2535 { 2536 int rdc_max = NXGE_MAX_RDCS; 2537 2538 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2539 2540 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2541 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2542 "<== nxge_free_rx_mem_pool " 2543 "(null rx buf pool or buf not allocated")); 2544 return; 2545 } 2546 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2547 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2548 "<== nxge_free_rx_mem_pool " 2549 "(null rx cntl buf pool or cntl buf not allocated")); 2550 return; 2551 } 2552 2553 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2554 sizeof (p_nxge_dma_common_t) * rdc_max); 2555 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2556 2557 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2558 sizeof (uint32_t) * rdc_max); 2559 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2560 sizeof (p_nxge_dma_common_t) * rdc_max); 2561 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2562 2563 nxgep->rx_buf_pool_p = 0; 2564 nxgep->rx_cntl_pool_p = 0; 2565 2566 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2567 sizeof (p_rx_rbr_ring_t) * rdc_max); 2568 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2569 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2570 sizeof (p_rx_rcr_ring_t) * rdc_max); 2571 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2572 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2573 sizeof (p_rx_mbox_t) * rdc_max); 2574 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2575 2576 nxgep->rx_rbr_rings = 0; 2577 nxgep->rx_rcr_rings = 0; 2578 nxgep->rx_mbox_areas_p = 0; 2579 2580 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2581 } 2582 2583 2584 static nxge_status_t 2585 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2586 p_nxge_dma_common_t *dmap, 2587 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2588 { 2589 p_nxge_dma_common_t rx_dmap; 2590 nxge_status_t status = NXGE_OK; 2591 size_t total_alloc_size; 2592 size_t allocated = 0; 2593 int i, size_index, array_size; 2594 boolean_t use_kmem_alloc = B_FALSE; 2595 2596 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2597 2598 rx_dmap = (p_nxge_dma_common_t) 2599 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2600 KM_SLEEP); 2601 2602 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2603 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2604 dma_channel, alloc_size, block_size, dmap)); 2605 2606 total_alloc_size = alloc_size; 2607 2608 #if defined(RX_USE_RECLAIM_POST) 2609 total_alloc_size = alloc_size + alloc_size/4; 2610 #endif 2611 2612 i = 0; 2613 size_index = 0; 2614 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2615 while ((size_index < array_size) && 2616 (alloc_sizes[size_index] < alloc_size)) 2617 size_index++; 2618 if (size_index >= array_size) { 2619 size_index = array_size - 1; 2620 } 2621 2622 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2623 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2624 use_kmem_alloc = B_TRUE; 2625 #if defined(__x86) 2626 size_index = 0; 2627 #endif 2628 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2629 "==> nxge_alloc_rx_buf_dma: " 2630 "Neptune use kmem_alloc() - size_index %d", 2631 size_index)); 2632 } 2633 2634 while ((allocated < total_alloc_size) && 2635 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2636 rx_dmap[i].dma_chunk_index = i; 2637 rx_dmap[i].block_size = block_size; 2638 rx_dmap[i].alength = alloc_sizes[size_index]; 2639 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2640 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2641 rx_dmap[i].dma_channel = dma_channel; 2642 rx_dmap[i].contig_alloc_type = B_FALSE; 2643 rx_dmap[i].kmem_alloc_type = B_FALSE; 2644 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2645 2646 /* 2647 * N2/NIU: data buffers must be contiguous as the driver 2648 * needs to call Hypervisor api to set up 2649 * logical pages. 2650 */ 2651 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2652 rx_dmap[i].contig_alloc_type = B_TRUE; 2653 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2654 } else if (use_kmem_alloc) { 2655 /* For Neptune, use kmem_alloc */ 2656 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2657 "==> nxge_alloc_rx_buf_dma: " 2658 "Neptune use kmem_alloc()")); 2659 rx_dmap[i].kmem_alloc_type = B_TRUE; 2660 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2661 } 2662 2663 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2664 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2665 "i %d nblocks %d alength %d", 2666 dma_channel, i, &rx_dmap[i], block_size, 2667 i, rx_dmap[i].nblocks, 2668 rx_dmap[i].alength)); 2669 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2670 &nxge_rx_dma_attr, 2671 rx_dmap[i].alength, 2672 &nxge_dev_buf_dma_acc_attr, 2673 DDI_DMA_READ | DDI_DMA_STREAMING, 2674 (p_nxge_dma_common_t)(&rx_dmap[i])); 2675 if (status != NXGE_OK) { 2676 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2677 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2678 "dma %d size_index %d size requested %d", 2679 dma_channel, 2680 size_index, 2681 rx_dmap[i].alength)); 2682 size_index--; 2683 } else { 2684 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2685 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2686 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2687 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2688 "buf_alloc_state %d alloc_type %d", 2689 dma_channel, 2690 &rx_dmap[i], 2691 rx_dmap[i].kaddrp, 2692 rx_dmap[i].alength, 2693 rx_dmap[i].buf_alloc_state, 2694 rx_dmap[i].buf_alloc_type)); 2695 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2696 " alloc_rx_buf_dma allocated rdc %d " 2697 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2698 dma_channel, i, rx_dmap[i].alength, 2699 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2700 rx_dmap[i].kaddrp)); 2701 i++; 2702 allocated += alloc_sizes[size_index]; 2703 } 2704 } 2705 2706 if (allocated < total_alloc_size) { 2707 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2708 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2709 "allocated 0x%x requested 0x%x", 2710 dma_channel, 2711 allocated, total_alloc_size)); 2712 status = NXGE_ERROR; 2713 goto nxge_alloc_rx_mem_fail1; 2714 } 2715 2716 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2717 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2718 "allocated 0x%x requested 0x%x", 2719 dma_channel, 2720 allocated, total_alloc_size)); 2721 2722 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2723 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2724 dma_channel, i)); 2725 *num_chunks = i; 2726 *dmap = rx_dmap; 2727 2728 goto nxge_alloc_rx_mem_exit; 2729 2730 nxge_alloc_rx_mem_fail1: 2731 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2732 2733 nxge_alloc_rx_mem_exit: 2734 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2735 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2736 2737 return (status); 2738 } 2739 2740 /*ARGSUSED*/ 2741 static void 2742 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2743 uint32_t num_chunks) 2744 { 2745 int i; 2746 2747 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2748 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2749 2750 if (dmap == 0) 2751 return; 2752 2753 for (i = 0; i < num_chunks; i++) { 2754 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2755 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2756 i, dmap)); 2757 nxge_dma_free_rx_data_buf(dmap++); 2758 } 2759 2760 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2761 } 2762 2763 /*ARGSUSED*/ 2764 static nxge_status_t 2765 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2766 p_nxge_dma_common_t *dmap, size_t size) 2767 { 2768 p_nxge_dma_common_t rx_dmap; 2769 nxge_status_t status = NXGE_OK; 2770 2771 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2772 2773 rx_dmap = (p_nxge_dma_common_t) 2774 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2775 2776 rx_dmap->contig_alloc_type = B_FALSE; 2777 rx_dmap->kmem_alloc_type = B_FALSE; 2778 2779 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2780 &nxge_desc_dma_attr, 2781 size, 2782 &nxge_dev_desc_dma_acc_attr, 2783 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2784 rx_dmap); 2785 if (status != NXGE_OK) { 2786 goto nxge_alloc_rx_cntl_dma_fail1; 2787 } 2788 2789 *dmap = rx_dmap; 2790 goto nxge_alloc_rx_cntl_dma_exit; 2791 2792 nxge_alloc_rx_cntl_dma_fail1: 2793 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2794 2795 nxge_alloc_rx_cntl_dma_exit: 2796 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2797 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2798 2799 return (status); 2800 } 2801 2802 /*ARGSUSED*/ 2803 static void 2804 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2805 { 2806 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2807 2808 if (dmap == 0) 2809 return; 2810 2811 nxge_dma_mem_free(dmap); 2812 2813 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2814 } 2815 2816 typedef struct { 2817 size_t tx_size; 2818 size_t cr_size; 2819 size_t threshhold; 2820 } nxge_tdc_sizes_t; 2821 2822 static 2823 nxge_status_t 2824 nxge_tdc_sizes( 2825 nxge_t *nxgep, 2826 nxge_tdc_sizes_t *sizes) 2827 { 2828 uint32_t threshhold; /* The bcopy() threshhold */ 2829 size_t tx_size; /* Transmit buffer size */ 2830 size_t cr_size; /* Completion ring size */ 2831 2832 /* 2833 * Assume that each DMA channel will be configured with the 2834 * default transmit buffer size for copying transmit data. 2835 * (If a packet is bigger than this, it will not be copied.) 2836 */ 2837 if (nxgep->niu_type == N2_NIU) { 2838 threshhold = TX_BCOPY_SIZE; 2839 } else { 2840 threshhold = nxge_bcopy_thresh; 2841 } 2842 tx_size = nxge_tx_ring_size * threshhold; 2843 2844 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2845 cr_size += sizeof (txdma_mailbox_t); 2846 2847 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2848 if (nxgep->niu_type == N2_NIU) { 2849 if (!ISP2(tx_size)) { 2850 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2851 "==> nxge_tdc_sizes: Tx size" 2852 " must be power of 2")); 2853 return (NXGE_ERROR); 2854 } 2855 2856 if (tx_size > (1 << 22)) { 2857 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2858 "==> nxge_tdc_sizes: Tx size" 2859 " limited to 4M")); 2860 return (NXGE_ERROR); 2861 } 2862 2863 if (cr_size < 0x2000) 2864 cr_size = 0x2000; 2865 } 2866 #endif 2867 2868 sizes->threshhold = threshhold; 2869 sizes->tx_size = tx_size; 2870 sizes->cr_size = cr_size; 2871 2872 return (NXGE_OK); 2873 } 2874 /* 2875 * nxge_alloc_txb 2876 * 2877 * Allocate buffers for an TDC. 2878 * 2879 * Arguments: 2880 * nxgep 2881 * channel The channel to map into our kernel space. 2882 * 2883 * Notes: 2884 * 2885 * NPI function calls: 2886 * 2887 * NXGE function calls: 2888 * 2889 * Registers accessed: 2890 * 2891 * Context: 2892 * 2893 * Taking apart: 2894 * 2895 * Open questions: 2896 * 2897 */ 2898 nxge_status_t 2899 nxge_alloc_txb( 2900 p_nxge_t nxgep, 2901 int channel) 2902 { 2903 nxge_dma_common_t **dma_buf_p; 2904 nxge_dma_common_t **dma_cntl_p; 2905 uint32_t *num_chunks; 2906 nxge_status_t status = NXGE_OK; 2907 2908 nxge_tdc_sizes_t sizes; 2909 2910 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2911 2912 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2913 return (NXGE_ERROR); 2914 2915 /* 2916 * Allocate memory for transmit buffers and descriptor rings. 2917 * Replace these allocation functions with the interface functions 2918 * provided by the partition manager Real Soon Now. 2919 */ 2920 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2921 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2922 2923 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2924 2925 /* 2926 * Allocate memory for transmit buffers and descriptor rings. 2927 * Replace allocation functions with interface functions provided 2928 * by the partition manager when it is available. 2929 * 2930 * Allocate memory for the transmit buffer pool. 2931 */ 2932 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2933 "sizes: tx: %ld, cr:%ld, th:%ld", 2934 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2935 2936 *num_chunks = 0; 2937 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2938 sizes.tx_size, sizes.threshhold, num_chunks); 2939 if (status != NXGE_OK) { 2940 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2941 return (status); 2942 } 2943 2944 /* 2945 * Allocate memory for descriptor rings and mailbox. 2946 */ 2947 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2948 sizes.cr_size); 2949 if (status != NXGE_OK) { 2950 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2951 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2952 return (status); 2953 } 2954 2955 return (NXGE_OK); 2956 } 2957 2958 void 2959 nxge_free_txb( 2960 p_nxge_t nxgep, 2961 int channel) 2962 { 2963 nxge_dma_common_t *data; 2964 nxge_dma_common_t *control; 2965 uint32_t num_chunks; 2966 2967 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2968 2969 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2970 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2971 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2972 2973 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2974 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2975 2976 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2977 nxge_free_tx_cntl_dma(nxgep, control); 2978 2979 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2980 2981 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2982 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2983 2984 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2985 } 2986 2987 /* 2988 * nxge_alloc_tx_mem_pool 2989 * 2990 * This function allocates all of the per-port TDC control data structures. 2991 * The per-channel (TDC) data structures are allocated when needed. 2992 * 2993 * Arguments: 2994 * nxgep 2995 * 2996 * Notes: 2997 * 2998 * Context: 2999 * Any domain 3000 */ 3001 nxge_status_t 3002 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 3003 { 3004 nxge_hw_pt_cfg_t *p_cfgp; 3005 nxge_dma_pool_t *dma_poolp; 3006 nxge_dma_common_t **dma_buf_p; 3007 nxge_dma_pool_t *dma_cntl_poolp; 3008 nxge_dma_common_t **dma_cntl_p; 3009 uint32_t *num_chunks; /* per dma */ 3010 int tdc_max; 3011 3012 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 3013 3014 p_cfgp = &nxgep->pt_config.hw_config; 3015 tdc_max = NXGE_MAX_TDCS; 3016 3017 /* 3018 * Allocate memory for each transmit DMA channel. 3019 */ 3020 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 3021 KM_SLEEP); 3022 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 3023 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 3024 3025 dma_cntl_poolp = (p_nxge_dma_pool_t) 3026 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 3027 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 3028 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 3029 3030 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 3031 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3032 "nxge_alloc_tx_mem_pool: TDC too high %d, " 3033 "set to default %d", 3034 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 3035 nxge_tx_ring_size = TDC_DEFAULT_MAX; 3036 } 3037 3038 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3039 /* 3040 * N2/NIU has limitation on the descriptor sizes (contiguous 3041 * memory allocation on data buffers to 4M (contig_mem_alloc) 3042 * and little endian for control buffers (must use the ddi/dki mem alloc 3043 * function). The transmit ring is limited to 8K (includes the 3044 * mailbox). 3045 */ 3046 if (nxgep->niu_type == N2_NIU) { 3047 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 3048 (!ISP2(nxge_tx_ring_size))) { 3049 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 3050 } 3051 } 3052 #endif 3053 3054 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 3055 3056 num_chunks = (uint32_t *)KMEM_ZALLOC( 3057 sizeof (uint32_t) * tdc_max, KM_SLEEP); 3058 3059 dma_poolp->ndmas = p_cfgp->tdc.owned; 3060 dma_poolp->num_chunks = num_chunks; 3061 dma_poolp->dma_buf_pool_p = dma_buf_p; 3062 nxgep->tx_buf_pool_p = dma_poolp; 3063 3064 dma_poolp->buf_allocated = B_TRUE; 3065 3066 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 3067 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 3068 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 3069 3070 dma_cntl_poolp->buf_allocated = B_TRUE; 3071 3072 nxgep->tx_rings = 3073 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 3074 nxgep->tx_rings->rings = 3075 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 3076 nxgep->tx_mbox_areas_p = 3077 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 3078 nxgep->tx_mbox_areas_p->txmbox_areas_p = 3079 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 3080 3081 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 3082 3083 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3084 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 3085 tdc_max, dma_poolp->ndmas)); 3086 3087 return (NXGE_OK); 3088 } 3089 3090 nxge_status_t 3091 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 3092 p_nxge_dma_common_t *dmap, size_t alloc_size, 3093 size_t block_size, uint32_t *num_chunks) 3094 { 3095 p_nxge_dma_common_t tx_dmap; 3096 nxge_status_t status = NXGE_OK; 3097 size_t total_alloc_size; 3098 size_t allocated = 0; 3099 int i, size_index, array_size; 3100 3101 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 3102 3103 tx_dmap = (p_nxge_dma_common_t) 3104 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 3105 KM_SLEEP); 3106 3107 total_alloc_size = alloc_size; 3108 i = 0; 3109 size_index = 0; 3110 array_size = sizeof (alloc_sizes) / sizeof (size_t); 3111 while ((size_index < array_size) && 3112 (alloc_sizes[size_index] < alloc_size)) 3113 size_index++; 3114 if (size_index >= array_size) { 3115 size_index = array_size - 1; 3116 } 3117 3118 while ((allocated < total_alloc_size) && 3119 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 3120 3121 tx_dmap[i].dma_chunk_index = i; 3122 tx_dmap[i].block_size = block_size; 3123 tx_dmap[i].alength = alloc_sizes[size_index]; 3124 tx_dmap[i].orig_alength = tx_dmap[i].alength; 3125 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 3126 tx_dmap[i].dma_channel = dma_channel; 3127 tx_dmap[i].contig_alloc_type = B_FALSE; 3128 tx_dmap[i].kmem_alloc_type = B_FALSE; 3129 3130 /* 3131 * N2/NIU: data buffers must be contiguous as the driver 3132 * needs to call Hypervisor api to set up 3133 * logical pages. 3134 */ 3135 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 3136 tx_dmap[i].contig_alloc_type = B_TRUE; 3137 } 3138 3139 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3140 &nxge_tx_dma_attr, 3141 tx_dmap[i].alength, 3142 &nxge_dev_buf_dma_acc_attr, 3143 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3144 (p_nxge_dma_common_t)(&tx_dmap[i])); 3145 if (status != NXGE_OK) { 3146 size_index--; 3147 } else { 3148 i++; 3149 allocated += alloc_sizes[size_index]; 3150 } 3151 } 3152 3153 if (allocated < total_alloc_size) { 3154 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3155 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3156 "allocated 0x%x requested 0x%x", 3157 dma_channel, 3158 allocated, total_alloc_size)); 3159 status = NXGE_ERROR; 3160 goto nxge_alloc_tx_mem_fail1; 3161 } 3162 3163 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3164 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3165 "allocated 0x%x requested 0x%x", 3166 dma_channel, 3167 allocated, total_alloc_size)); 3168 3169 *num_chunks = i; 3170 *dmap = tx_dmap; 3171 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3172 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3173 *dmap, i)); 3174 goto nxge_alloc_tx_mem_exit; 3175 3176 nxge_alloc_tx_mem_fail1: 3177 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3178 3179 nxge_alloc_tx_mem_exit: 3180 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3181 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3182 3183 return (status); 3184 } 3185 3186 /*ARGSUSED*/ 3187 static void 3188 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3189 uint32_t num_chunks) 3190 { 3191 int i; 3192 3193 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3194 3195 if (dmap == 0) 3196 return; 3197 3198 for (i = 0; i < num_chunks; i++) { 3199 nxge_dma_mem_free(dmap++); 3200 } 3201 3202 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3203 } 3204 3205 /*ARGSUSED*/ 3206 nxge_status_t 3207 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3208 p_nxge_dma_common_t *dmap, size_t size) 3209 { 3210 p_nxge_dma_common_t tx_dmap; 3211 nxge_status_t status = NXGE_OK; 3212 3213 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3214 tx_dmap = (p_nxge_dma_common_t) 3215 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3216 3217 tx_dmap->contig_alloc_type = B_FALSE; 3218 tx_dmap->kmem_alloc_type = B_FALSE; 3219 3220 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3221 &nxge_desc_dma_attr, 3222 size, 3223 &nxge_dev_desc_dma_acc_attr, 3224 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3225 tx_dmap); 3226 if (status != NXGE_OK) { 3227 goto nxge_alloc_tx_cntl_dma_fail1; 3228 } 3229 3230 *dmap = tx_dmap; 3231 goto nxge_alloc_tx_cntl_dma_exit; 3232 3233 nxge_alloc_tx_cntl_dma_fail1: 3234 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3235 3236 nxge_alloc_tx_cntl_dma_exit: 3237 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3238 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3239 3240 return (status); 3241 } 3242 3243 /*ARGSUSED*/ 3244 static void 3245 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3246 { 3247 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3248 3249 if (dmap == 0) 3250 return; 3251 3252 nxge_dma_mem_free(dmap); 3253 3254 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3255 } 3256 3257 /* 3258 * nxge_free_tx_mem_pool 3259 * 3260 * This function frees all of the per-port TDC control data structures. 3261 * The per-channel (TDC) data structures are freed when the channel 3262 * is stopped. 3263 * 3264 * Arguments: 3265 * nxgep 3266 * 3267 * Notes: 3268 * 3269 * Context: 3270 * Any domain 3271 */ 3272 static void 3273 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3274 { 3275 int tdc_max = NXGE_MAX_TDCS; 3276 3277 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3278 3279 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3280 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3281 "<== nxge_free_tx_mem_pool " 3282 "(null tx buf pool or buf not allocated")); 3283 return; 3284 } 3285 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3286 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3287 "<== nxge_free_tx_mem_pool " 3288 "(null tx cntl buf pool or cntl buf not allocated")); 3289 return; 3290 } 3291 3292 /* 1. Free the mailboxes. */ 3293 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3294 sizeof (p_tx_mbox_t) * tdc_max); 3295 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3296 3297 nxgep->tx_mbox_areas_p = 0; 3298 3299 /* 2. Free the transmit ring arrays. */ 3300 KMEM_FREE(nxgep->tx_rings->rings, 3301 sizeof (p_tx_ring_t) * tdc_max); 3302 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3303 3304 nxgep->tx_rings = 0; 3305 3306 /* 3. Free the completion ring data structures. */ 3307 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3308 sizeof (p_nxge_dma_common_t) * tdc_max); 3309 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3310 3311 nxgep->tx_cntl_pool_p = 0; 3312 3313 /* 4. Free the data ring data structures. */ 3314 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3315 sizeof (uint32_t) * tdc_max); 3316 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3317 sizeof (p_nxge_dma_common_t) * tdc_max); 3318 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3319 3320 nxgep->tx_buf_pool_p = 0; 3321 3322 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3323 } 3324 3325 /*ARGSUSED*/ 3326 static nxge_status_t 3327 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3328 struct ddi_dma_attr *dma_attrp, 3329 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3330 p_nxge_dma_common_t dma_p) 3331 { 3332 caddr_t kaddrp; 3333 int ddi_status = DDI_SUCCESS; 3334 boolean_t contig_alloc_type; 3335 boolean_t kmem_alloc_type; 3336 3337 contig_alloc_type = dma_p->contig_alloc_type; 3338 3339 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3340 /* 3341 * contig_alloc_type for contiguous memory only allowed 3342 * for N2/NIU. 3343 */ 3344 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3345 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3346 dma_p->contig_alloc_type)); 3347 return (NXGE_ERROR | NXGE_DDI_FAILED); 3348 } 3349 3350 dma_p->dma_handle = NULL; 3351 dma_p->acc_handle = NULL; 3352 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3353 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3354 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3355 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3356 if (ddi_status != DDI_SUCCESS) { 3357 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3358 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3359 return (NXGE_ERROR | NXGE_DDI_FAILED); 3360 } 3361 3362 kmem_alloc_type = dma_p->kmem_alloc_type; 3363 3364 switch (contig_alloc_type) { 3365 case B_FALSE: 3366 switch (kmem_alloc_type) { 3367 case B_FALSE: 3368 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3369 length, 3370 acc_attr_p, 3371 xfer_flags, 3372 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3373 &dma_p->acc_handle); 3374 if (ddi_status != DDI_SUCCESS) { 3375 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3376 "nxge_dma_mem_alloc: " 3377 "ddi_dma_mem_alloc failed")); 3378 ddi_dma_free_handle(&dma_p->dma_handle); 3379 dma_p->dma_handle = NULL; 3380 return (NXGE_ERROR | NXGE_DDI_FAILED); 3381 } 3382 if (dma_p->alength < length) { 3383 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3384 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3385 "< length.")); 3386 ddi_dma_mem_free(&dma_p->acc_handle); 3387 ddi_dma_free_handle(&dma_p->dma_handle); 3388 dma_p->acc_handle = NULL; 3389 dma_p->dma_handle = NULL; 3390 return (NXGE_ERROR); 3391 } 3392 3393 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3394 NULL, 3395 kaddrp, dma_p->alength, xfer_flags, 3396 DDI_DMA_DONTWAIT, 3397 0, &dma_p->dma_cookie, &dma_p->ncookies); 3398 if (ddi_status != DDI_DMA_MAPPED) { 3399 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3400 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3401 "failed " 3402 "(staus 0x%x ncookies %d.)", ddi_status, 3403 dma_p->ncookies)); 3404 if (dma_p->acc_handle) { 3405 ddi_dma_mem_free(&dma_p->acc_handle); 3406 dma_p->acc_handle = NULL; 3407 } 3408 ddi_dma_free_handle(&dma_p->dma_handle); 3409 dma_p->dma_handle = NULL; 3410 return (NXGE_ERROR | NXGE_DDI_FAILED); 3411 } 3412 3413 if (dma_p->ncookies != 1) { 3414 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3415 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3416 "> 1 cookie" 3417 "(staus 0x%x ncookies %d.)", ddi_status, 3418 dma_p->ncookies)); 3419 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3420 if (dma_p->acc_handle) { 3421 ddi_dma_mem_free(&dma_p->acc_handle); 3422 dma_p->acc_handle = NULL; 3423 } 3424 ddi_dma_free_handle(&dma_p->dma_handle); 3425 dma_p->dma_handle = NULL; 3426 dma_p->acc_handle = NULL; 3427 return (NXGE_ERROR); 3428 } 3429 break; 3430 3431 case B_TRUE: 3432 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3433 if (kaddrp == NULL) { 3434 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3435 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3436 "kmem alloc failed")); 3437 return (NXGE_ERROR); 3438 } 3439 3440 dma_p->alength = length; 3441 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3442 NULL, kaddrp, dma_p->alength, xfer_flags, 3443 DDI_DMA_DONTWAIT, 0, 3444 &dma_p->dma_cookie, &dma_p->ncookies); 3445 if (ddi_status != DDI_DMA_MAPPED) { 3446 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3447 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3448 "(kmem_alloc) failed kaddrp $%p length %d " 3449 "(staus 0x%x (%d) ncookies %d.)", 3450 kaddrp, length, 3451 ddi_status, ddi_status, dma_p->ncookies)); 3452 KMEM_FREE(kaddrp, length); 3453 dma_p->acc_handle = NULL; 3454 ddi_dma_free_handle(&dma_p->dma_handle); 3455 dma_p->dma_handle = NULL; 3456 dma_p->kaddrp = NULL; 3457 return (NXGE_ERROR | NXGE_DDI_FAILED); 3458 } 3459 3460 if (dma_p->ncookies != 1) { 3461 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3462 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3463 "(kmem_alloc) > 1 cookie" 3464 "(staus 0x%x ncookies %d.)", ddi_status, 3465 dma_p->ncookies)); 3466 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3467 KMEM_FREE(kaddrp, length); 3468 ddi_dma_free_handle(&dma_p->dma_handle); 3469 dma_p->dma_handle = NULL; 3470 dma_p->acc_handle = NULL; 3471 dma_p->kaddrp = NULL; 3472 return (NXGE_ERROR); 3473 } 3474 3475 dma_p->kaddrp = kaddrp; 3476 3477 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3478 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3479 "kaddr $%p alength %d", 3480 dma_p, 3481 kaddrp, 3482 dma_p->alength)); 3483 break; 3484 } 3485 break; 3486 3487 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3488 case B_TRUE: 3489 kaddrp = (caddr_t)contig_mem_alloc(length); 3490 if (kaddrp == NULL) { 3491 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3492 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3493 ddi_dma_free_handle(&dma_p->dma_handle); 3494 return (NXGE_ERROR | NXGE_DDI_FAILED); 3495 } 3496 3497 dma_p->alength = length; 3498 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3499 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3500 &dma_p->dma_cookie, &dma_p->ncookies); 3501 if (ddi_status != DDI_DMA_MAPPED) { 3502 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3503 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3504 "(status 0x%x ncookies %d.)", ddi_status, 3505 dma_p->ncookies)); 3506 3507 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3508 "==> nxge_dma_mem_alloc: (not mapped)" 3509 "length %lu (0x%x) " 3510 "free contig kaddrp $%p " 3511 "va_to_pa $%p", 3512 length, length, 3513 kaddrp, 3514 va_to_pa(kaddrp))); 3515 3516 3517 contig_mem_free((void *)kaddrp, length); 3518 ddi_dma_free_handle(&dma_p->dma_handle); 3519 3520 dma_p->dma_handle = NULL; 3521 dma_p->acc_handle = NULL; 3522 dma_p->alength = 0; 3523 dma_p->kaddrp = NULL; 3524 3525 return (NXGE_ERROR | NXGE_DDI_FAILED); 3526 } 3527 3528 if (dma_p->ncookies != 1 || 3529 (dma_p->dma_cookie.dmac_laddress == 0)) { 3530 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3531 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3532 "cookie or " 3533 "dmac_laddress is NULL $%p size %d " 3534 " (status 0x%x ncookies %d.)", 3535 ddi_status, 3536 dma_p->dma_cookie.dmac_laddress, 3537 dma_p->dma_cookie.dmac_size, 3538 dma_p->ncookies)); 3539 3540 contig_mem_free((void *)kaddrp, length); 3541 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3542 ddi_dma_free_handle(&dma_p->dma_handle); 3543 3544 dma_p->alength = 0; 3545 dma_p->dma_handle = NULL; 3546 dma_p->acc_handle = NULL; 3547 dma_p->kaddrp = NULL; 3548 3549 return (NXGE_ERROR | NXGE_DDI_FAILED); 3550 } 3551 break; 3552 3553 #else 3554 case B_TRUE: 3555 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3556 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3557 return (NXGE_ERROR | NXGE_DDI_FAILED); 3558 #endif 3559 } 3560 3561 dma_p->kaddrp = kaddrp; 3562 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3563 dma_p->alength - RXBUF_64B_ALIGNED; 3564 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3565 dma_p->last_ioaddr_pp = 3566 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3567 dma_p->alength - RXBUF_64B_ALIGNED; 3568 3569 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3570 3571 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3572 dma_p->orig_ioaddr_pp = 3573 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3574 dma_p->orig_alength = length; 3575 dma_p->orig_kaddrp = kaddrp; 3576 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3577 #endif 3578 3579 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3580 "dma buffer allocated: dma_p $%p " 3581 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3582 "dma_p->ioaddr_p $%p " 3583 "dma_p->orig_ioaddr_p $%p " 3584 "orig_vatopa $%p " 3585 "alength %d (0x%x) " 3586 "kaddrp $%p " 3587 "length %d (0x%x)", 3588 dma_p, 3589 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3590 dma_p->ioaddr_pp, 3591 dma_p->orig_ioaddr_pp, 3592 dma_p->orig_vatopa, 3593 dma_p->alength, dma_p->alength, 3594 kaddrp, 3595 length, length)); 3596 3597 return (NXGE_OK); 3598 } 3599 3600 static void 3601 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3602 { 3603 if (dma_p->dma_handle != NULL) { 3604 if (dma_p->ncookies) { 3605 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3606 dma_p->ncookies = 0; 3607 } 3608 ddi_dma_free_handle(&dma_p->dma_handle); 3609 dma_p->dma_handle = NULL; 3610 } 3611 3612 if (dma_p->acc_handle != NULL) { 3613 ddi_dma_mem_free(&dma_p->acc_handle); 3614 dma_p->acc_handle = NULL; 3615 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3616 } 3617 3618 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3619 if (dma_p->contig_alloc_type && 3620 dma_p->orig_kaddrp && dma_p->orig_alength) { 3621 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3622 "kaddrp $%p (orig_kaddrp $%p)" 3623 "mem type %d ", 3624 "orig_alength %d " 3625 "alength 0x%x (%d)", 3626 dma_p->kaddrp, 3627 dma_p->orig_kaddrp, 3628 dma_p->contig_alloc_type, 3629 dma_p->orig_alength, 3630 dma_p->alength, dma_p->alength)); 3631 3632 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3633 dma_p->orig_alength = 0; 3634 dma_p->orig_kaddrp = NULL; 3635 dma_p->contig_alloc_type = B_FALSE; 3636 } 3637 #endif 3638 dma_p->kaddrp = NULL; 3639 dma_p->alength = 0; 3640 } 3641 3642 static void 3643 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3644 { 3645 uint64_t kaddr; 3646 uint32_t buf_size; 3647 3648 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3649 3650 if (dma_p->dma_handle != NULL) { 3651 if (dma_p->ncookies) { 3652 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3653 dma_p->ncookies = 0; 3654 } 3655 ddi_dma_free_handle(&dma_p->dma_handle); 3656 dma_p->dma_handle = NULL; 3657 } 3658 3659 if (dma_p->acc_handle != NULL) { 3660 ddi_dma_mem_free(&dma_p->acc_handle); 3661 dma_p->acc_handle = NULL; 3662 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3663 } 3664 3665 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3666 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3667 dma_p, 3668 dma_p->buf_alloc_state)); 3669 3670 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3671 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3672 "<== nxge_dma_free_rx_data_buf: " 3673 "outstanding data buffers")); 3674 return; 3675 } 3676 3677 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3678 if (dma_p->contig_alloc_type && 3679 dma_p->orig_kaddrp && dma_p->orig_alength) { 3680 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3681 "kaddrp $%p (orig_kaddrp $%p)" 3682 "mem type %d ", 3683 "orig_alength %d " 3684 "alength 0x%x (%d)", 3685 dma_p->kaddrp, 3686 dma_p->orig_kaddrp, 3687 dma_p->contig_alloc_type, 3688 dma_p->orig_alength, 3689 dma_p->alength, dma_p->alength)); 3690 3691 kaddr = (uint64_t)dma_p->orig_kaddrp; 3692 buf_size = dma_p->orig_alength; 3693 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3694 dma_p->orig_alength = 0; 3695 dma_p->orig_kaddrp = NULL; 3696 dma_p->contig_alloc_type = B_FALSE; 3697 dma_p->kaddrp = NULL; 3698 dma_p->alength = 0; 3699 return; 3700 } 3701 #endif 3702 3703 if (dma_p->kmem_alloc_type) { 3704 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3705 "nxge_dma_free_rx_data_buf: free kmem " 3706 "kaddrp $%p (orig_kaddrp $%p)" 3707 "alloc type %d " 3708 "orig_alength %d " 3709 "alength 0x%x (%d)", 3710 dma_p->kaddrp, 3711 dma_p->orig_kaddrp, 3712 dma_p->kmem_alloc_type, 3713 dma_p->orig_alength, 3714 dma_p->alength, dma_p->alength)); 3715 kaddr = (uint64_t)dma_p->kaddrp; 3716 buf_size = dma_p->orig_alength; 3717 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3718 "nxge_dma_free_rx_data_buf: free dmap $%p " 3719 "kaddr $%p buf_size %d", 3720 dma_p, 3721 kaddr, buf_size)); 3722 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3723 dma_p->alength = 0; 3724 dma_p->orig_alength = 0; 3725 dma_p->kaddrp = NULL; 3726 dma_p->kmem_alloc_type = B_FALSE; 3727 } 3728 3729 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3730 } 3731 3732 /* 3733 * nxge_m_start() -- start transmitting and receiving. 3734 * 3735 * This function is called by the MAC layer when the first 3736 * stream is open to prepare the hardware ready for sending 3737 * and transmitting packets. 3738 */ 3739 static int 3740 nxge_m_start(void *arg) 3741 { 3742 p_nxge_t nxgep = (p_nxge_t)arg; 3743 3744 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3745 3746 /* 3747 * Are we already started? 3748 */ 3749 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 3750 return (0); 3751 } 3752 3753 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) { 3754 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 3755 } 3756 3757 /* 3758 * Make sure RX MAC is disabled while we initialize. 3759 */ 3760 if (!isLDOMguest(nxgep)) { 3761 (void) nxge_rx_mac_disable(nxgep); 3762 } 3763 3764 /* 3765 * Grab the global lock. 3766 */ 3767 MUTEX_ENTER(nxgep->genlock); 3768 3769 /* 3770 * Initialize the driver and hardware. 3771 */ 3772 if (nxge_init(nxgep) != NXGE_OK) { 3773 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3774 "<== nxge_m_start: initialization failed")); 3775 MUTEX_EXIT(nxgep->genlock); 3776 return (EIO); 3777 } 3778 3779 /* 3780 * Start timer to check the system error and tx hangs 3781 */ 3782 if (!isLDOMguest(nxgep)) 3783 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3784 nxge_check_hw_state, NXGE_CHECK_TIMER); 3785 #if defined(sun4v) 3786 else 3787 nxge_hio_start_timer(nxgep); 3788 #endif 3789 3790 nxgep->link_notify = B_TRUE; 3791 nxgep->link_check_count = 0; 3792 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3793 3794 /* 3795 * Let the global lock go, since we are intialized. 3796 */ 3797 MUTEX_EXIT(nxgep->genlock); 3798 3799 /* 3800 * Let the MAC start receiving packets, now that 3801 * we are initialized. 3802 */ 3803 if (!isLDOMguest(nxgep)) { 3804 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 3805 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3806 "<== nxge_m_start: enable of RX mac failed")); 3807 return (EIO); 3808 } 3809 3810 /* 3811 * Enable hardware interrupts. 3812 */ 3813 nxge_intr_hw_enable(nxgep); 3814 } 3815 #if defined(sun4v) 3816 else { 3817 /* 3818 * In guest domain we enable RDCs and their interrupts as 3819 * the last step. 3820 */ 3821 if (nxge_hio_rdc_enable(nxgep) != NXGE_OK) { 3822 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3823 "<== nxge_m_start: enable of RDCs failed")); 3824 return (EIO); 3825 } 3826 3827 if (nxge_hio_rdc_intr_arm(nxgep, B_TRUE) != NXGE_OK) { 3828 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3829 "<== nxge_m_start: intrs enable for RDCs failed")); 3830 return (EIO); 3831 } 3832 } 3833 #endif 3834 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3835 return (0); 3836 } 3837 3838 static boolean_t 3839 nxge_check_groups_stopped(p_nxge_t nxgep) 3840 { 3841 int i; 3842 3843 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 3844 if (nxgep->rx_hio_groups[i].started) 3845 return (B_FALSE); 3846 } 3847 3848 return (B_TRUE); 3849 } 3850 3851 /* 3852 * nxge_m_stop(): stop transmitting and receiving. 3853 */ 3854 static void 3855 nxge_m_stop(void *arg) 3856 { 3857 p_nxge_t nxgep = (p_nxge_t)arg; 3858 boolean_t groups_stopped; 3859 3860 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3861 3862 /* 3863 * Are the groups stopped? 3864 */ 3865 groups_stopped = nxge_check_groups_stopped(nxgep); 3866 ASSERT(groups_stopped == B_TRUE); 3867 if (!groups_stopped) { 3868 cmn_err(CE_WARN, "nxge(%d): groups are not stopped!\n", 3869 nxgep->instance); 3870 return; 3871 } 3872 3873 if (!isLDOMguest(nxgep)) { 3874 /* 3875 * Disable the RX mac. 3876 */ 3877 (void) nxge_rx_mac_disable(nxgep); 3878 3879 /* 3880 * Wait for the IPP to drain. 3881 */ 3882 (void) nxge_ipp_drain(nxgep); 3883 3884 /* 3885 * Disable hardware interrupts. 3886 */ 3887 nxge_intr_hw_disable(nxgep); 3888 } 3889 #if defined(sun4v) 3890 else { 3891 (void) nxge_hio_rdc_intr_arm(nxgep, B_FALSE); 3892 } 3893 #endif 3894 3895 /* 3896 * Grab the global lock. 3897 */ 3898 MUTEX_ENTER(nxgep->genlock); 3899 3900 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3901 if (nxgep->nxge_timerid) { 3902 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3903 nxgep->nxge_timerid = 0; 3904 } 3905 3906 /* 3907 * Clean up. 3908 */ 3909 nxge_uninit(nxgep); 3910 3911 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3912 3913 /* 3914 * Let go of the global lock. 3915 */ 3916 MUTEX_EXIT(nxgep->genlock); 3917 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3918 } 3919 3920 static int 3921 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3922 { 3923 p_nxge_t nxgep = (p_nxge_t)arg; 3924 struct ether_addr addrp; 3925 3926 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3927 "==> nxge_m_multicst: add %d", add)); 3928 3929 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3930 if (add) { 3931 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3932 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3933 "<== nxge_m_multicst: add multicast failed")); 3934 return (EINVAL); 3935 } 3936 } else { 3937 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3938 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3939 "<== nxge_m_multicst: del multicast failed")); 3940 return (EINVAL); 3941 } 3942 } 3943 3944 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3945 3946 return (0); 3947 } 3948 3949 static int 3950 nxge_m_promisc(void *arg, boolean_t on) 3951 { 3952 p_nxge_t nxgep = (p_nxge_t)arg; 3953 3954 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3955 "==> nxge_m_promisc: on %d", on)); 3956 3957 if (nxge_set_promisc(nxgep, on)) { 3958 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3959 "<== nxge_m_promisc: set promisc failed")); 3960 return (EINVAL); 3961 } 3962 3963 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3964 "<== nxge_m_promisc: on %d", on)); 3965 3966 return (0); 3967 } 3968 3969 static void 3970 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3971 { 3972 p_nxge_t nxgep = (p_nxge_t)arg; 3973 struct iocblk *iocp; 3974 boolean_t need_privilege; 3975 int err; 3976 int cmd; 3977 3978 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3979 3980 iocp = (struct iocblk *)mp->b_rptr; 3981 iocp->ioc_error = 0; 3982 need_privilege = B_TRUE; 3983 cmd = iocp->ioc_cmd; 3984 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3985 switch (cmd) { 3986 default: 3987 miocnak(wq, mp, 0, EINVAL); 3988 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3989 return; 3990 3991 case LB_GET_INFO_SIZE: 3992 case LB_GET_INFO: 3993 case LB_GET_MODE: 3994 need_privilege = B_FALSE; 3995 break; 3996 case LB_SET_MODE: 3997 break; 3998 3999 4000 case NXGE_GET_MII: 4001 case NXGE_PUT_MII: 4002 case NXGE_GET64: 4003 case NXGE_PUT64: 4004 case NXGE_GET_TX_RING_SZ: 4005 case NXGE_GET_TX_DESC: 4006 case NXGE_TX_SIDE_RESET: 4007 case NXGE_RX_SIDE_RESET: 4008 case NXGE_GLOBAL_RESET: 4009 case NXGE_RESET_MAC: 4010 case NXGE_TX_REGS_DUMP: 4011 case NXGE_RX_REGS_DUMP: 4012 case NXGE_INT_REGS_DUMP: 4013 case NXGE_VIR_INT_REGS_DUMP: 4014 case NXGE_PUT_TCAM: 4015 case NXGE_GET_TCAM: 4016 case NXGE_RTRACE: 4017 case NXGE_RDUMP: 4018 case NXGE_RX_CLASS: 4019 case NXGE_RX_HASH: 4020 4021 need_privilege = B_FALSE; 4022 break; 4023 case NXGE_INJECT_ERR: 4024 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 4025 nxge_err_inject(nxgep, wq, mp); 4026 break; 4027 } 4028 4029 if (need_privilege) { 4030 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 4031 if (err != 0) { 4032 miocnak(wq, mp, 0, err); 4033 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4034 "<== nxge_m_ioctl: no priv")); 4035 return; 4036 } 4037 } 4038 4039 switch (cmd) { 4040 4041 case LB_GET_MODE: 4042 case LB_SET_MODE: 4043 case LB_GET_INFO_SIZE: 4044 case LB_GET_INFO: 4045 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 4046 break; 4047 4048 case NXGE_GET_MII: 4049 case NXGE_PUT_MII: 4050 case NXGE_PUT_TCAM: 4051 case NXGE_GET_TCAM: 4052 case NXGE_GET64: 4053 case NXGE_PUT64: 4054 case NXGE_GET_TX_RING_SZ: 4055 case NXGE_GET_TX_DESC: 4056 case NXGE_TX_SIDE_RESET: 4057 case NXGE_RX_SIDE_RESET: 4058 case NXGE_GLOBAL_RESET: 4059 case NXGE_RESET_MAC: 4060 case NXGE_TX_REGS_DUMP: 4061 case NXGE_RX_REGS_DUMP: 4062 case NXGE_INT_REGS_DUMP: 4063 case NXGE_VIR_INT_REGS_DUMP: 4064 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4065 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 4066 nxge_hw_ioctl(nxgep, wq, mp, iocp); 4067 break; 4068 case NXGE_RX_CLASS: 4069 if (nxge_rxclass_ioctl(nxgep, wq, mp->b_cont) < 0) 4070 miocnak(wq, mp, 0, EINVAL); 4071 else 4072 miocack(wq, mp, sizeof (rx_class_cfg_t), 0); 4073 break; 4074 case NXGE_RX_HASH: 4075 4076 if (nxge_rxhash_ioctl(nxgep, wq, mp->b_cont) < 0) 4077 miocnak(wq, mp, 0, EINVAL); 4078 else 4079 miocack(wq, mp, sizeof (cfg_cmd_t), 0); 4080 break; 4081 } 4082 4083 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 4084 } 4085 4086 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 4087 4088 void 4089 nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory) 4090 { 4091 p_nxge_mmac_stats_t mmac_stats; 4092 int i; 4093 nxge_mmac_t *mmac_info; 4094 4095 mmac_info = &nxgep->nxge_mmac_info; 4096 4097 mmac_stats = &nxgep->statsp->mmac_stats; 4098 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 4099 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 4100 4101 for (i = 0; i < ETHERADDRL; i++) { 4102 if (factory) { 4103 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4104 = mmac_info->factory_mac_pool[slot][ 4105 (ETHERADDRL-1) - i]; 4106 } else { 4107 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4108 = mmac_info->mac_pool[slot].addr[ 4109 (ETHERADDRL - 1) - i]; 4110 } 4111 } 4112 } 4113 4114 /* 4115 * nxge_altmac_set() -- Set an alternate MAC address 4116 */ 4117 static int 4118 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, int slot, 4119 int rdctbl, boolean_t usetbl) 4120 { 4121 uint8_t addrn; 4122 uint8_t portn; 4123 npi_mac_addr_t altmac; 4124 hostinfo_t mac_rdc; 4125 p_nxge_class_pt_cfg_t clscfgp; 4126 4127 4128 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 4129 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 4130 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 4131 4132 portn = nxgep->mac.portnum; 4133 addrn = (uint8_t)slot - 1; 4134 4135 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, 4136 nxgep->function_num, addrn, &altmac) != NPI_SUCCESS) 4137 return (EIO); 4138 4139 /* 4140 * Set the rdc table number for the host info entry 4141 * for this mac address slot. 4142 */ 4143 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 4144 mac_rdc.value = 0; 4145 if (usetbl) 4146 mac_rdc.bits.w0.rdc_tbl_num = rdctbl; 4147 else 4148 mac_rdc.bits.w0.rdc_tbl_num = 4149 clscfgp->mac_host_info[addrn].rdctbl; 4150 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 4151 4152 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 4153 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 4154 return (EIO); 4155 } 4156 4157 /* 4158 * Enable comparison with the alternate MAC address. 4159 * While the first alternate addr is enabled by bit 1 of register 4160 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 4161 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 4162 * accordingly before calling npi_mac_altaddr_entry. 4163 */ 4164 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4165 addrn = (uint8_t)slot - 1; 4166 else 4167 addrn = (uint8_t)slot; 4168 4169 if (npi_mac_altaddr_enable(nxgep->npi_handle, 4170 nxgep->function_num, addrn) != NPI_SUCCESS) { 4171 return (EIO); 4172 } 4173 4174 return (0); 4175 } 4176 4177 /* 4178 * nxeg_m_mmac_add_g() - find an unused address slot, set the address 4179 * value to the one specified, enable the port to start filtering on 4180 * the new MAC address. Returns 0 on success. 4181 */ 4182 int 4183 nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl, 4184 boolean_t usetbl) 4185 { 4186 p_nxge_t nxgep = arg; 4187 int slot; 4188 nxge_mmac_t *mmac_info; 4189 int err; 4190 nxge_status_t status; 4191 4192 mutex_enter(nxgep->genlock); 4193 4194 /* 4195 * Make sure that nxge is initialized, if _start() has 4196 * not been called. 4197 */ 4198 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4199 status = nxge_init(nxgep); 4200 if (status != NXGE_OK) { 4201 mutex_exit(nxgep->genlock); 4202 return (ENXIO); 4203 } 4204 } 4205 4206 mmac_info = &nxgep->nxge_mmac_info; 4207 if (mmac_info->naddrfree == 0) { 4208 mutex_exit(nxgep->genlock); 4209 return (ENOSPC); 4210 } 4211 4212 /* 4213 * Search for the first available slot. Because naddrfree 4214 * is not zero, we are guaranteed to find one. 4215 * Each of the first two ports of Neptune has 16 alternate 4216 * MAC slots but only the first 7 (of 15) slots have assigned factory 4217 * MAC addresses. We first search among the slots without bundled 4218 * factory MACs. If we fail to find one in that range, then we 4219 * search the slots with bundled factory MACs. A factory MAC 4220 * will be wasted while the slot is used with a user MAC address. 4221 * But the slot could be used by factory MAC again after calling 4222 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4223 */ 4224 for (slot = 0; slot <= mmac_info->num_mmac; slot++) { 4225 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4226 break; 4227 } 4228 4229 ASSERT(slot <= mmac_info->num_mmac); 4230 4231 if ((err = nxge_altmac_set(nxgep, (uint8_t *)maddr, slot, rdctbl, 4232 usetbl)) != 0) { 4233 mutex_exit(nxgep->genlock); 4234 return (err); 4235 } 4236 4237 bcopy(maddr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4238 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4239 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4240 mmac_info->naddrfree--; 4241 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4242 4243 mutex_exit(nxgep->genlock); 4244 return (0); 4245 } 4246 4247 /* 4248 * Remove the specified mac address and update the HW not to filter 4249 * the mac address anymore. 4250 */ 4251 int 4252 nxge_m_mmac_remove(void *arg, int slot) 4253 { 4254 p_nxge_t nxgep = arg; 4255 nxge_mmac_t *mmac_info; 4256 uint8_t addrn; 4257 uint8_t portn; 4258 int err = 0; 4259 nxge_status_t status; 4260 4261 mutex_enter(nxgep->genlock); 4262 4263 /* 4264 * Make sure that nxge is initialized, if _start() has 4265 * not been called. 4266 */ 4267 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4268 status = nxge_init(nxgep); 4269 if (status != NXGE_OK) { 4270 mutex_exit(nxgep->genlock); 4271 return (ENXIO); 4272 } 4273 } 4274 4275 mmac_info = &nxgep->nxge_mmac_info; 4276 if (slot < 1 || slot > mmac_info->num_mmac) { 4277 mutex_exit(nxgep->genlock); 4278 return (EINVAL); 4279 } 4280 4281 portn = nxgep->mac.portnum; 4282 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4283 addrn = (uint8_t)slot - 1; 4284 else 4285 addrn = (uint8_t)slot; 4286 4287 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4288 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4289 == NPI_SUCCESS) { 4290 mmac_info->naddrfree++; 4291 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4292 /* 4293 * Regardless if the MAC we just stopped filtering 4294 * is a user addr or a facory addr, we must set 4295 * the MMAC_VENDOR_ADDR flag if this slot has an 4296 * associated factory MAC to indicate that a factory 4297 * MAC is available. 4298 */ 4299 if (slot <= mmac_info->num_factory_mmac) { 4300 mmac_info->mac_pool[slot].flags 4301 |= MMAC_VENDOR_ADDR; 4302 } 4303 /* 4304 * Clear mac_pool[slot].addr so that kstat shows 0 4305 * alternate MAC address if the slot is not used. 4306 * (But nxge_m_mmac_get returns the factory MAC even 4307 * when the slot is not used!) 4308 */ 4309 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4310 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4311 } else { 4312 err = EIO; 4313 } 4314 } else { 4315 err = EINVAL; 4316 } 4317 4318 mutex_exit(nxgep->genlock); 4319 return (err); 4320 } 4321 4322 /* 4323 * The callback to query all the factory addresses. naddr must be the same as 4324 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and 4325 * mcm_addr is the space allocated for keep all the addresses, whose size is 4326 * naddr * MAXMACADDRLEN. 4327 */ 4328 static void 4329 nxge_m_getfactaddr(void *arg, uint_t naddr, uint8_t *addr) 4330 { 4331 nxge_t *nxgep = arg; 4332 nxge_mmac_t *mmac_info; 4333 int i; 4334 4335 mutex_enter(nxgep->genlock); 4336 4337 mmac_info = &nxgep->nxge_mmac_info; 4338 ASSERT(naddr == mmac_info->num_factory_mmac); 4339 4340 for (i = 0; i < naddr; i++) { 4341 bcopy(mmac_info->factory_mac_pool[i + 1], 4342 addr + i * MAXMACADDRLEN, ETHERADDRL); 4343 } 4344 4345 mutex_exit(nxgep->genlock); 4346 } 4347 4348 4349 static boolean_t 4350 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4351 { 4352 nxge_t *nxgep = arg; 4353 uint32_t *txflags = cap_data; 4354 4355 switch (cap) { 4356 case MAC_CAPAB_HCKSUM: 4357 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4358 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4359 if (nxge_cksum_offload <= 1) { 4360 *txflags = HCKSUM_INET_PARTIAL; 4361 } 4362 break; 4363 4364 case MAC_CAPAB_MULTIFACTADDR: { 4365 mac_capab_multifactaddr_t *mfacp = cap_data; 4366 4367 if (!isLDOMguest(nxgep)) { 4368 mutex_enter(nxgep->genlock); 4369 mfacp->mcm_naddr = 4370 nxgep->nxge_mmac_info.num_factory_mmac; 4371 mfacp->mcm_getaddr = nxge_m_getfactaddr; 4372 mutex_exit(nxgep->genlock); 4373 } 4374 break; 4375 } 4376 4377 case MAC_CAPAB_LSO: { 4378 mac_capab_lso_t *cap_lso = cap_data; 4379 4380 if (nxgep->soft_lso_enable) { 4381 if (nxge_cksum_offload <= 1) { 4382 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4383 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4384 nxge_lso_max = NXGE_LSO_MAXLEN; 4385 } 4386 cap_lso->lso_basic_tcp_ipv4.lso_max = 4387 nxge_lso_max; 4388 } 4389 break; 4390 } else { 4391 return (B_FALSE); 4392 } 4393 } 4394 4395 case MAC_CAPAB_RINGS: { 4396 mac_capab_rings_t *cap_rings = cap_data; 4397 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 4398 4399 mutex_enter(nxgep->genlock); 4400 if (cap_rings->mr_type == MAC_RING_TYPE_RX) { 4401 if (isLDOMguest(nxgep)) { 4402 cap_rings->mr_group_type = 4403 MAC_GROUP_TYPE_STATIC; 4404 cap_rings->mr_rnum = 4405 NXGE_HIO_SHARE_MAX_CHANNELS; 4406 cap_rings->mr_rget = nxge_fill_ring; 4407 cap_rings->mr_gnum = 1; 4408 cap_rings->mr_gget = nxge_hio_group_get; 4409 cap_rings->mr_gaddring = NULL; 4410 cap_rings->mr_gremring = NULL; 4411 } else { 4412 /* 4413 * Service Domain. 4414 */ 4415 cap_rings->mr_group_type = 4416 MAC_GROUP_TYPE_DYNAMIC; 4417 cap_rings->mr_rnum = p_cfgp->max_rdcs; 4418 cap_rings->mr_rget = nxge_fill_ring; 4419 cap_rings->mr_gnum = p_cfgp->max_rdc_grpids; 4420 cap_rings->mr_gget = nxge_hio_group_get; 4421 cap_rings->mr_gaddring = nxge_group_add_ring; 4422 cap_rings->mr_gremring = nxge_group_rem_ring; 4423 } 4424 4425 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4426 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]", 4427 p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids)); 4428 } else { 4429 /* 4430 * TX Rings. 4431 */ 4432 if (isLDOMguest(nxgep)) { 4433 cap_rings->mr_group_type = 4434 MAC_GROUP_TYPE_STATIC; 4435 cap_rings->mr_rnum = 4436 NXGE_HIO_SHARE_MAX_CHANNELS; 4437 cap_rings->mr_rget = nxge_fill_ring; 4438 cap_rings->mr_gnum = 0; 4439 cap_rings->mr_gget = NULL; 4440 cap_rings->mr_gaddring = NULL; 4441 cap_rings->mr_gremring = NULL; 4442 } else { 4443 /* 4444 * Service Domain. 4445 */ 4446 cap_rings->mr_group_type = 4447 MAC_GROUP_TYPE_DYNAMIC; 4448 cap_rings->mr_rnum = p_cfgp->tdc.count; 4449 cap_rings->mr_rget = nxge_fill_ring; 4450 4451 /* 4452 * Share capable. 4453 * 4454 * Do not report the default group: hence -1 4455 */ 4456 cap_rings->mr_gnum = 4457 NXGE_MAX_TDC_GROUPS / nxgep->nports - 1; 4458 cap_rings->mr_gget = nxge_hio_group_get; 4459 cap_rings->mr_gaddring = nxge_group_add_ring; 4460 cap_rings->mr_gremring = nxge_group_rem_ring; 4461 } 4462 4463 NXGE_DEBUG_MSG((nxgep, TX_CTL, 4464 "==> nxge_m_getcapab: tx rings # of rings %d", 4465 p_cfgp->tdc.count)); 4466 } 4467 mutex_exit(nxgep->genlock); 4468 break; 4469 } 4470 4471 #if defined(sun4v) 4472 case MAC_CAPAB_SHARES: { 4473 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4474 4475 /* 4476 * Only the service domain driver responds to 4477 * this capability request. 4478 */ 4479 mutex_enter(nxgep->genlock); 4480 if (isLDOMservice(nxgep)) { 4481 mshares->ms_snum = 3; 4482 mshares->ms_handle = (void *)nxgep; 4483 mshares->ms_salloc = nxge_hio_share_alloc; 4484 mshares->ms_sfree = nxge_hio_share_free; 4485 mshares->ms_sadd = nxge_hio_share_add_group; 4486 mshares->ms_sremove = nxge_hio_share_rem_group; 4487 mshares->ms_squery = nxge_hio_share_query; 4488 mshares->ms_sbind = nxge_hio_share_bind; 4489 mshares->ms_sunbind = nxge_hio_share_unbind; 4490 mutex_exit(nxgep->genlock); 4491 } else { 4492 mutex_exit(nxgep->genlock); 4493 return (B_FALSE); 4494 } 4495 break; 4496 } 4497 #endif 4498 default: 4499 return (B_FALSE); 4500 } 4501 return (B_TRUE); 4502 } 4503 4504 static boolean_t 4505 nxge_param_locked(mac_prop_id_t pr_num) 4506 { 4507 /* 4508 * All adv_* parameters are locked (read-only) while 4509 * the device is in any sort of loopback mode ... 4510 */ 4511 switch (pr_num) { 4512 case MAC_PROP_ADV_1000FDX_CAP: 4513 case MAC_PROP_EN_1000FDX_CAP: 4514 case MAC_PROP_ADV_1000HDX_CAP: 4515 case MAC_PROP_EN_1000HDX_CAP: 4516 case MAC_PROP_ADV_100FDX_CAP: 4517 case MAC_PROP_EN_100FDX_CAP: 4518 case MAC_PROP_ADV_100HDX_CAP: 4519 case MAC_PROP_EN_100HDX_CAP: 4520 case MAC_PROP_ADV_10FDX_CAP: 4521 case MAC_PROP_EN_10FDX_CAP: 4522 case MAC_PROP_ADV_10HDX_CAP: 4523 case MAC_PROP_EN_10HDX_CAP: 4524 case MAC_PROP_AUTONEG: 4525 case MAC_PROP_FLOWCTRL: 4526 return (B_TRUE); 4527 } 4528 return (B_FALSE); 4529 } 4530 4531 /* 4532 * callback functions for set/get of properties 4533 */ 4534 static int 4535 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4536 uint_t pr_valsize, const void *pr_val) 4537 { 4538 nxge_t *nxgep = barg; 4539 p_nxge_param_t param_arr = nxgep->param_arr; 4540 p_nxge_stats_t statsp = nxgep->statsp; 4541 int err = 0; 4542 4543 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4544 4545 mutex_enter(nxgep->genlock); 4546 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4547 nxge_param_locked(pr_num)) { 4548 /* 4549 * All adv_* parameters are locked (read-only) 4550 * while the device is in any sort of loopback mode. 4551 */ 4552 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4553 "==> nxge_m_setprop: loopback mode: read only")); 4554 mutex_exit(nxgep->genlock); 4555 return (EBUSY); 4556 } 4557 4558 switch (pr_num) { 4559 case MAC_PROP_EN_1000FDX_CAP: 4560 nxgep->param_en_1000fdx = 4561 param_arr[param_anar_1000fdx].value = *(uint8_t *)pr_val; 4562 goto reprogram; 4563 4564 case MAC_PROP_EN_100FDX_CAP: 4565 nxgep->param_en_100fdx = 4566 param_arr[param_anar_100fdx].value = *(uint8_t *)pr_val; 4567 goto reprogram; 4568 4569 case MAC_PROP_EN_10FDX_CAP: 4570 nxgep->param_en_10fdx = 4571 param_arr[param_anar_10fdx].value = *(uint8_t *)pr_val; 4572 goto reprogram; 4573 4574 case MAC_PROP_AUTONEG: 4575 param_arr[param_autoneg].value = *(uint8_t *)pr_val; 4576 goto reprogram; 4577 4578 case MAC_PROP_MTU: { 4579 uint32_t cur_mtu, new_mtu, old_framesize; 4580 4581 cur_mtu = nxgep->mac.default_mtu; 4582 ASSERT(pr_valsize >= sizeof (new_mtu)); 4583 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4584 4585 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4586 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4587 new_mtu, nxgep->mac.is_jumbo)); 4588 4589 if (new_mtu == cur_mtu) { 4590 err = 0; 4591 break; 4592 } 4593 4594 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4595 err = EBUSY; 4596 break; 4597 } 4598 4599 if ((new_mtu < NXGE_DEFAULT_MTU) || 4600 (new_mtu > NXGE_MAXIMUM_MTU)) { 4601 err = EINVAL; 4602 break; 4603 } 4604 4605 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4606 nxgep->mac.maxframesize = (uint16_t) 4607 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4608 if (nxge_mac_set_framesize(nxgep)) { 4609 nxgep->mac.maxframesize = 4610 (uint16_t)old_framesize; 4611 err = EINVAL; 4612 break; 4613 } 4614 4615 nxgep->mac.default_mtu = new_mtu; 4616 nxgep->mac.is_jumbo = (new_mtu > NXGE_DEFAULT_MTU); 4617 4618 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4619 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4620 new_mtu, nxgep->mac.maxframesize)); 4621 break; 4622 } 4623 4624 case MAC_PROP_FLOWCTRL: { 4625 link_flowctrl_t fl; 4626 4627 ASSERT(pr_valsize >= sizeof (fl)); 4628 bcopy(pr_val, &fl, sizeof (fl)); 4629 4630 switch (fl) { 4631 case LINK_FLOWCTRL_NONE: 4632 param_arr[param_anar_pause].value = 0; 4633 break; 4634 4635 case LINK_FLOWCTRL_RX: 4636 param_arr[param_anar_pause].value = 1; 4637 break; 4638 4639 case LINK_FLOWCTRL_TX: 4640 case LINK_FLOWCTRL_BI: 4641 err = EINVAL; 4642 break; 4643 default: 4644 err = EINVAL; 4645 break; 4646 } 4647 reprogram: 4648 if ((err == 0) && !isLDOMguest(nxgep)) { 4649 if (!nxge_param_link_update(nxgep)) { 4650 err = EINVAL; 4651 } 4652 } else { 4653 err = EINVAL; 4654 } 4655 break; 4656 } 4657 4658 case MAC_PROP_PRIVATE: 4659 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4660 "==> nxge_m_setprop: private property")); 4661 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, pr_val); 4662 break; 4663 4664 default: 4665 err = ENOTSUP; 4666 break; 4667 } 4668 4669 mutex_exit(nxgep->genlock); 4670 4671 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4672 "<== nxge_m_setprop (return %d)", err)); 4673 return (err); 4674 } 4675 4676 static int 4677 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4678 uint_t pr_valsize, void *pr_val) 4679 { 4680 nxge_t *nxgep = barg; 4681 p_nxge_param_t param_arr = nxgep->param_arr; 4682 p_nxge_stats_t statsp = nxgep->statsp; 4683 4684 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4685 "==> nxge_m_getprop: pr_num %d", pr_num)); 4686 4687 switch (pr_num) { 4688 case MAC_PROP_DUPLEX: 4689 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4690 break; 4691 4692 case MAC_PROP_SPEED: { 4693 uint64_t val = statsp->mac_stats.link_speed * 1000000ull; 4694 4695 ASSERT(pr_valsize >= sizeof (val)); 4696 bcopy(&val, pr_val, sizeof (val)); 4697 break; 4698 } 4699 4700 case MAC_PROP_STATUS: { 4701 link_state_t state = statsp->mac_stats.link_up ? 4702 LINK_STATE_UP : LINK_STATE_DOWN; 4703 4704 ASSERT(pr_valsize >= sizeof (state)); 4705 bcopy(&state, pr_val, sizeof (state)); 4706 break; 4707 } 4708 4709 case MAC_PROP_AUTONEG: 4710 *(uint8_t *)pr_val = param_arr[param_autoneg].value; 4711 break; 4712 4713 case MAC_PROP_FLOWCTRL: { 4714 link_flowctrl_t fl = param_arr[param_anar_pause].value != 0 ? 4715 LINK_FLOWCTRL_RX : LINK_FLOWCTRL_NONE; 4716 4717 ASSERT(pr_valsize >= sizeof (fl)); 4718 bcopy(&fl, pr_val, sizeof (fl)); 4719 break; 4720 } 4721 4722 case MAC_PROP_ADV_1000FDX_CAP: 4723 *(uint8_t *)pr_val = param_arr[param_anar_1000fdx].value; 4724 break; 4725 4726 case MAC_PROP_EN_1000FDX_CAP: 4727 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4728 break; 4729 4730 case MAC_PROP_ADV_100FDX_CAP: 4731 *(uint8_t *)pr_val = param_arr[param_anar_100fdx].value; 4732 break; 4733 4734 case MAC_PROP_EN_100FDX_CAP: 4735 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4736 break; 4737 4738 case MAC_PROP_ADV_10FDX_CAP: 4739 *(uint8_t *)pr_val = param_arr[param_anar_10fdx].value; 4740 break; 4741 4742 case MAC_PROP_EN_10FDX_CAP: 4743 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4744 break; 4745 4746 case MAC_PROP_PRIVATE: 4747 return (nxge_get_priv_prop(nxgep, pr_name, pr_valsize, 4748 pr_val)); 4749 4750 default: 4751 return (ENOTSUP); 4752 } 4753 4754 return (0); 4755 } 4756 4757 static void 4758 nxge_m_propinfo(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4759 mac_prop_info_handle_t prh) 4760 { 4761 nxge_t *nxgep = barg; 4762 p_nxge_stats_t statsp = nxgep->statsp; 4763 4764 /* 4765 * By default permissions are read/write unless specified 4766 * otherwise by the driver. 4767 */ 4768 4769 switch (pr_num) { 4770 case MAC_PROP_DUPLEX: 4771 case MAC_PROP_SPEED: 4772 case MAC_PROP_STATUS: 4773 case MAC_PROP_EN_1000HDX_CAP: 4774 case MAC_PROP_EN_100HDX_CAP: 4775 case MAC_PROP_EN_10HDX_CAP: 4776 case MAC_PROP_ADV_1000FDX_CAP: 4777 case MAC_PROP_ADV_1000HDX_CAP: 4778 case MAC_PROP_ADV_100FDX_CAP: 4779 case MAC_PROP_ADV_100HDX_CAP: 4780 case MAC_PROP_ADV_10FDX_CAP: 4781 case MAC_PROP_ADV_10HDX_CAP: 4782 /* 4783 * Note that read-only properties don't need to 4784 * provide default values since they cannot be 4785 * changed by the administrator. 4786 */ 4787 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 4788 break; 4789 4790 case MAC_PROP_EN_1000FDX_CAP: 4791 case MAC_PROP_EN_100FDX_CAP: 4792 case MAC_PROP_EN_10FDX_CAP: 4793 mac_prop_info_set_default_uint8(prh, 1); 4794 break; 4795 4796 case MAC_PROP_AUTONEG: 4797 mac_prop_info_set_default_uint8(prh, 1); 4798 break; 4799 4800 case MAC_PROP_FLOWCTRL: 4801 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_RX); 4802 break; 4803 4804 case MAC_PROP_MTU: 4805 mac_prop_info_set_range_uint32(prh, 4806 NXGE_DEFAULT_MTU, NXGE_MAXIMUM_MTU); 4807 break; 4808 4809 case MAC_PROP_PRIVATE: 4810 nxge_priv_propinfo(pr_name, prh); 4811 break; 4812 } 4813 4814 mutex_enter(nxgep->genlock); 4815 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4816 nxge_param_locked(pr_num)) { 4817 /* 4818 * Some properties are locked (read-only) while the 4819 * device is in any sort of loopback mode. 4820 */ 4821 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 4822 } 4823 mutex_exit(nxgep->genlock); 4824 } 4825 4826 static void 4827 nxge_priv_propinfo(const char *pr_name, mac_prop_info_handle_t prh) 4828 { 4829 char valstr[64]; 4830 4831 bzero(valstr, sizeof (valstr)); 4832 4833 if (strcmp(pr_name, "_function_number") == 0 || 4834 strcmp(pr_name, "_fw_version") == 0 || 4835 strcmp(pr_name, "_port_mode") == 0 || 4836 strcmp(pr_name, "_hot_swap_phy") == 0) { 4837 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 4838 4839 } else if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4840 (void) snprintf(valstr, sizeof (valstr), 4841 "%d", RXDMA_RCR_TO_DEFAULT); 4842 4843 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4844 (void) snprintf(valstr, sizeof (valstr), 4845 "%d", RXDMA_RCR_PTHRES_DEFAULT); 4846 4847 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 || 4848 strcmp(pr_name, "_class_opt_ipv4_udp") == 0 || 4849 strcmp(pr_name, "_class_opt_ipv4_ah") == 0 || 4850 strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 || 4851 strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 || 4852 strcmp(pr_name, "_class_opt_ipv6_udp") == 0 || 4853 strcmp(pr_name, "_class_opt_ipv6_ah") == 0 || 4854 strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 4855 (void) snprintf(valstr, sizeof (valstr), "%x", 4856 NXGE_CLASS_FLOW_GEN_SERVER); 4857 4858 } else if (strcmp(pr_name, "_soft_lso_enable") == 0) { 4859 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 4860 4861 } else if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 4862 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 4863 4864 } else if (strcmp(pr_name, "_adv_pause_cap") == 0) { 4865 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 4866 } 4867 4868 if (strlen(valstr) > 0) 4869 mac_prop_info_set_default_str(prh, valstr); 4870 } 4871 4872 /* ARGSUSED */ 4873 static int 4874 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4875 const void *pr_val) 4876 { 4877 p_nxge_param_t param_arr = nxgep->param_arr; 4878 int err = 0; 4879 long result; 4880 4881 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4882 "==> nxge_set_priv_prop: name %s", pr_name)); 4883 4884 /* Blanking */ 4885 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4886 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4887 (char *)pr_val, 4888 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4889 if (err) { 4890 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4891 "<== nxge_set_priv_prop: " 4892 "unable to set (%s)", pr_name)); 4893 err = EINVAL; 4894 } else { 4895 err = 0; 4896 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4897 "<== nxge_set_priv_prop: " 4898 "set (%s)", pr_name)); 4899 } 4900 4901 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4902 "<== nxge_set_priv_prop: name %s (value %d)", 4903 pr_name, result)); 4904 4905 return (err); 4906 } 4907 4908 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4909 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4910 (char *)pr_val, 4911 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4912 if (err) { 4913 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4914 "<== nxge_set_priv_prop: " 4915 "unable to set (%s)", pr_name)); 4916 err = EINVAL; 4917 } else { 4918 err = 0; 4919 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4920 "<== nxge_set_priv_prop: " 4921 "set (%s)", pr_name)); 4922 } 4923 4924 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4925 "<== nxge_set_priv_prop: name %s (value %d)", 4926 pr_name, result)); 4927 4928 return (err); 4929 } 4930 4931 /* Classification */ 4932 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4933 if (pr_val == NULL) { 4934 err = EINVAL; 4935 return (err); 4936 } 4937 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4938 4939 err = nxge_param_set_ip_opt(nxgep, NULL, 4940 NULL, (char *)pr_val, 4941 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4942 4943 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4944 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4945 pr_name, result)); 4946 4947 return (err); 4948 } 4949 4950 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 4951 if (pr_val == NULL) { 4952 err = EINVAL; 4953 return (err); 4954 } 4955 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4956 4957 err = nxge_param_set_ip_opt(nxgep, NULL, 4958 NULL, (char *)pr_val, 4959 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 4960 4961 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4962 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4963 pr_name, result)); 4964 4965 return (err); 4966 } 4967 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 4968 if (pr_val == NULL) { 4969 err = EINVAL; 4970 return (err); 4971 } 4972 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4973 4974 err = nxge_param_set_ip_opt(nxgep, NULL, 4975 NULL, (char *)pr_val, 4976 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 4977 4978 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4979 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4980 pr_name, result)); 4981 4982 return (err); 4983 } 4984 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 4985 if (pr_val == NULL) { 4986 err = EINVAL; 4987 return (err); 4988 } 4989 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4990 4991 err = nxge_param_set_ip_opt(nxgep, NULL, 4992 NULL, (char *)pr_val, 4993 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 4994 4995 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4996 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4997 pr_name, result)); 4998 4999 return (err); 5000 } 5001 5002 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5003 if (pr_val == NULL) { 5004 err = EINVAL; 5005 return (err); 5006 } 5007 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5008 5009 err = nxge_param_set_ip_opt(nxgep, NULL, 5010 NULL, (char *)pr_val, 5011 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5012 5013 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5014 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5015 pr_name, result)); 5016 5017 return (err); 5018 } 5019 5020 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5021 if (pr_val == NULL) { 5022 err = EINVAL; 5023 return (err); 5024 } 5025 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5026 5027 err = nxge_param_set_ip_opt(nxgep, NULL, 5028 NULL, (char *)pr_val, 5029 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5030 5031 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5032 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5033 pr_name, result)); 5034 5035 return (err); 5036 } 5037 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5038 if (pr_val == NULL) { 5039 err = EINVAL; 5040 return (err); 5041 } 5042 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5043 5044 err = nxge_param_set_ip_opt(nxgep, NULL, 5045 NULL, (char *)pr_val, 5046 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5047 5048 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5049 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5050 pr_name, result)); 5051 5052 return (err); 5053 } 5054 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5055 if (pr_val == NULL) { 5056 err = EINVAL; 5057 return (err); 5058 } 5059 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5060 5061 err = nxge_param_set_ip_opt(nxgep, NULL, 5062 NULL, (char *)pr_val, 5063 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5064 5065 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5066 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5067 pr_name, result)); 5068 5069 return (err); 5070 } 5071 5072 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5073 if (pr_val == NULL) { 5074 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5075 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 5076 err = EINVAL; 5077 return (err); 5078 } 5079 5080 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5081 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5082 "<== nxge_set_priv_prop: name %s " 5083 "(lso %d pr_val %s value %d)", 5084 pr_name, nxgep->soft_lso_enable, pr_val, result)); 5085 5086 if (result > 1 || result < 0) { 5087 err = EINVAL; 5088 } else { 5089 if (nxgep->soft_lso_enable == (uint32_t)result) { 5090 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5091 "no change (%d %d)", 5092 nxgep->soft_lso_enable, result)); 5093 return (0); 5094 } 5095 } 5096 5097 nxgep->soft_lso_enable = (int)result; 5098 5099 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5100 "<== nxge_set_priv_prop: name %s (value %d)", 5101 pr_name, result)); 5102 5103 return (err); 5104 } 5105 /* 5106 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the 5107 * following code to be executed. 5108 */ 5109 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5110 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5111 (caddr_t)¶m_arr[param_anar_10gfdx]); 5112 return (err); 5113 } 5114 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5115 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5116 (caddr_t)¶m_arr[param_anar_pause]); 5117 return (err); 5118 } 5119 5120 return (ENOTSUP); 5121 } 5122 5123 static int 5124 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 5125 void *pr_val) 5126 { 5127 p_nxge_param_t param_arr = nxgep->param_arr; 5128 char valstr[MAXNAMELEN]; 5129 int err = ENOTSUP; 5130 uint_t strsize; 5131 5132 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5133 "==> nxge_get_priv_prop: property %s", pr_name)); 5134 5135 /* function number */ 5136 if (strcmp(pr_name, "_function_number") == 0) { 5137 (void) snprintf(valstr, sizeof (valstr), "%d", 5138 nxgep->function_num); 5139 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5140 "==> nxge_get_priv_prop: name %s " 5141 "(value %d valstr %s)", 5142 pr_name, nxgep->function_num, valstr)); 5143 5144 err = 0; 5145 goto done; 5146 } 5147 5148 /* Neptune firmware version */ 5149 if (strcmp(pr_name, "_fw_version") == 0) { 5150 (void) snprintf(valstr, sizeof (valstr), "%s", 5151 nxgep->vpd_info.ver); 5152 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5153 "==> nxge_get_priv_prop: name %s " 5154 "(value %d valstr %s)", 5155 pr_name, nxgep->vpd_info.ver, valstr)); 5156 5157 err = 0; 5158 goto done; 5159 } 5160 5161 /* port PHY mode */ 5162 if (strcmp(pr_name, "_port_mode") == 0) { 5163 switch (nxgep->mac.portmode) { 5164 case PORT_1G_COPPER: 5165 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 5166 nxgep->hot_swappable_phy ? 5167 "[Hot Swappable]" : ""); 5168 break; 5169 case PORT_1G_FIBER: 5170 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 5171 nxgep->hot_swappable_phy ? 5172 "[hot swappable]" : ""); 5173 break; 5174 case PORT_10G_COPPER: 5175 (void) snprintf(valstr, sizeof (valstr), 5176 "10G copper %s", 5177 nxgep->hot_swappable_phy ? 5178 "[hot swappable]" : ""); 5179 break; 5180 case PORT_10G_FIBER: 5181 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 5182 nxgep->hot_swappable_phy ? 5183 "[hot swappable]" : ""); 5184 break; 5185 case PORT_10G_SERDES: 5186 (void) snprintf(valstr, sizeof (valstr), 5187 "10G serdes %s", nxgep->hot_swappable_phy ? 5188 "[hot swappable]" : ""); 5189 break; 5190 case PORT_1G_SERDES: 5191 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5192 nxgep->hot_swappable_phy ? 5193 "[hot swappable]" : ""); 5194 break; 5195 case PORT_1G_TN1010: 5196 (void) snprintf(valstr, sizeof (valstr), 5197 "1G TN1010 copper %s", nxgep->hot_swappable_phy ? 5198 "[hot swappable]" : ""); 5199 break; 5200 case PORT_10G_TN1010: 5201 (void) snprintf(valstr, sizeof (valstr), 5202 "10G TN1010 copper %s", nxgep->hot_swappable_phy ? 5203 "[hot swappable]" : ""); 5204 break; 5205 case PORT_1G_RGMII_FIBER: 5206 (void) snprintf(valstr, sizeof (valstr), 5207 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5208 "[hot swappable]" : ""); 5209 break; 5210 case PORT_HSP_MODE: 5211 (void) snprintf(valstr, sizeof (valstr), 5212 "phy not present[hot swappable]"); 5213 break; 5214 default: 5215 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5216 nxgep->hot_swappable_phy ? 5217 "[hot swappable]" : ""); 5218 break; 5219 } 5220 5221 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5222 "==> nxge_get_priv_prop: name %s (value %s)", 5223 pr_name, valstr)); 5224 5225 err = 0; 5226 goto done; 5227 } 5228 5229 /* Hot swappable PHY */ 5230 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5231 (void) snprintf(valstr, sizeof (valstr), "%s", 5232 nxgep->hot_swappable_phy ? 5233 "yes" : "no"); 5234 5235 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5236 "==> nxge_get_priv_prop: name %s " 5237 "(value %d valstr %s)", 5238 pr_name, nxgep->hot_swappable_phy, valstr)); 5239 5240 err = 0; 5241 goto done; 5242 } 5243 5244 5245 /* Receive Interrupt Blanking Parameters */ 5246 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5247 err = 0; 5248 (void) snprintf(valstr, sizeof (valstr), "%d", 5249 nxgep->intr_timeout); 5250 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5251 "==> nxge_get_priv_prop: name %s (value %d)", 5252 pr_name, 5253 (uint32_t)nxgep->intr_timeout)); 5254 goto done; 5255 } 5256 5257 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5258 err = 0; 5259 (void) snprintf(valstr, sizeof (valstr), "%d", 5260 nxgep->intr_threshold); 5261 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5262 "==> nxge_get_priv_prop: name %s (value %d)", 5263 pr_name, (uint32_t)nxgep->intr_threshold)); 5264 5265 goto done; 5266 } 5267 5268 /* Classification and Load Distribution Configuration */ 5269 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5270 err = nxge_dld_get_ip_opt(nxgep, 5271 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5272 5273 (void) snprintf(valstr, sizeof (valstr), "%x", 5274 (int)param_arr[param_class_opt_ipv4_tcp].value); 5275 5276 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5277 "==> nxge_get_priv_prop: %s", valstr)); 5278 goto done; 5279 } 5280 5281 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5282 err = nxge_dld_get_ip_opt(nxgep, 5283 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5284 5285 (void) snprintf(valstr, sizeof (valstr), "%x", 5286 (int)param_arr[param_class_opt_ipv4_udp].value); 5287 5288 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5289 "==> nxge_get_priv_prop: %s", valstr)); 5290 goto done; 5291 } 5292 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5293 err = nxge_dld_get_ip_opt(nxgep, 5294 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5295 5296 (void) snprintf(valstr, sizeof (valstr), "%x", 5297 (int)param_arr[param_class_opt_ipv4_ah].value); 5298 5299 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5300 "==> nxge_get_priv_prop: %s", valstr)); 5301 goto done; 5302 } 5303 5304 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5305 err = nxge_dld_get_ip_opt(nxgep, 5306 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5307 5308 (void) snprintf(valstr, sizeof (valstr), "%x", 5309 (int)param_arr[param_class_opt_ipv4_sctp].value); 5310 5311 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5312 "==> nxge_get_priv_prop: %s", valstr)); 5313 goto done; 5314 } 5315 5316 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5317 err = nxge_dld_get_ip_opt(nxgep, 5318 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5319 5320 (void) snprintf(valstr, sizeof (valstr), "%x", 5321 (int)param_arr[param_class_opt_ipv6_tcp].value); 5322 5323 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5324 "==> nxge_get_priv_prop: %s", valstr)); 5325 goto done; 5326 } 5327 5328 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5329 err = nxge_dld_get_ip_opt(nxgep, 5330 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5331 5332 (void) snprintf(valstr, sizeof (valstr), "%x", 5333 (int)param_arr[param_class_opt_ipv6_udp].value); 5334 5335 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5336 "==> nxge_get_priv_prop: %s", valstr)); 5337 goto done; 5338 } 5339 5340 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5341 err = nxge_dld_get_ip_opt(nxgep, 5342 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5343 5344 (void) snprintf(valstr, sizeof (valstr), "%x", 5345 (int)param_arr[param_class_opt_ipv6_ah].value); 5346 5347 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5348 "==> nxge_get_priv_prop: %s", valstr)); 5349 goto done; 5350 } 5351 5352 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5353 err = nxge_dld_get_ip_opt(nxgep, 5354 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5355 5356 (void) snprintf(valstr, sizeof (valstr), "%x", 5357 (int)param_arr[param_class_opt_ipv6_sctp].value); 5358 5359 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5360 "==> nxge_get_priv_prop: %s", valstr)); 5361 goto done; 5362 } 5363 5364 /* Software LSO */ 5365 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5366 (void) snprintf(valstr, sizeof (valstr), 5367 "%d", nxgep->soft_lso_enable); 5368 err = 0; 5369 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5370 "==> nxge_get_priv_prop: name %s (value %d)", 5371 pr_name, nxgep->soft_lso_enable)); 5372 5373 goto done; 5374 } 5375 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5376 err = 0; 5377 if (nxgep->param_arr[param_anar_10gfdx].value != 0) { 5378 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5379 goto done; 5380 } else { 5381 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5382 goto done; 5383 } 5384 } 5385 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5386 err = 0; 5387 if (nxgep->param_arr[param_anar_pause].value != 0) { 5388 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5389 goto done; 5390 } else { 5391 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5392 goto done; 5393 } 5394 } 5395 5396 done: 5397 if (err == 0) { 5398 strsize = (uint_t)strlen(valstr); 5399 if (pr_valsize < strsize) { 5400 err = ENOBUFS; 5401 } else { 5402 (void) strlcpy(pr_val, valstr, pr_valsize); 5403 } 5404 } 5405 5406 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5407 "<== nxge_get_priv_prop: return %d", err)); 5408 return (err); 5409 } 5410 5411 /* 5412 * Module loading and removing entry points. 5413 */ 5414 5415 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach, 5416 nodev, NULL, D_MP, NULL, nxge_quiesce); 5417 5418 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5419 5420 /* 5421 * Module linkage information for the kernel. 5422 */ 5423 static struct modldrv nxge_modldrv = { 5424 &mod_driverops, 5425 NXGE_DESC_VER, 5426 &nxge_dev_ops 5427 }; 5428 5429 static struct modlinkage modlinkage = { 5430 MODREV_1, (void *) &nxge_modldrv, NULL 5431 }; 5432 5433 int 5434 _init(void) 5435 { 5436 int status; 5437 5438 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 5439 5440 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5441 5442 mac_init_ops(&nxge_dev_ops, "nxge"); 5443 5444 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5445 if (status != 0) { 5446 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5447 "failed to init device soft state")); 5448 goto _init_exit; 5449 } 5450 5451 status = mod_install(&modlinkage); 5452 if (status != 0) { 5453 ddi_soft_state_fini(&nxge_list); 5454 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5455 goto _init_exit; 5456 } 5457 5458 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5459 5460 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status)); 5461 return (status); 5462 5463 _init_exit: 5464 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status)); 5465 MUTEX_DESTROY(&nxgedebuglock); 5466 return (status); 5467 } 5468 5469 int 5470 _fini(void) 5471 { 5472 int status; 5473 5474 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5475 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5476 5477 if (nxge_mblks_pending) 5478 return (EBUSY); 5479 5480 status = mod_remove(&modlinkage); 5481 if (status != DDI_SUCCESS) { 5482 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5483 "Module removal failed 0x%08x", 5484 status)); 5485 goto _fini_exit; 5486 } 5487 5488 mac_fini_ops(&nxge_dev_ops); 5489 5490 ddi_soft_state_fini(&nxge_list); 5491 5492 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status)); 5493 5494 MUTEX_DESTROY(&nxge_common_lock); 5495 MUTEX_DESTROY(&nxgedebuglock); 5496 return (status); 5497 5498 _fini_exit: 5499 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status)); 5500 return (status); 5501 } 5502 5503 int 5504 _info(struct modinfo *modinfop) 5505 { 5506 int status; 5507 5508 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5509 status = mod_info(&modlinkage, modinfop); 5510 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5511 5512 return (status); 5513 } 5514 5515 /*ARGSUSED*/ 5516 static int 5517 nxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5518 { 5519 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5520 p_nxge_t nxgep = rhp->nxgep; 5521 uint32_t channel; 5522 p_tx_ring_t ring; 5523 5524 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5525 ring = nxgep->tx_rings->rings[channel]; 5526 5527 MUTEX_ENTER(&ring->lock); 5528 ASSERT(ring->tx_ring_handle == NULL); 5529 ring->tx_ring_handle = rhp->ring_handle; 5530 MUTEX_EXIT(&ring->lock); 5531 5532 return (0); 5533 } 5534 5535 static void 5536 nxge_tx_ring_stop(mac_ring_driver_t rdriver) 5537 { 5538 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5539 p_nxge_t nxgep = rhp->nxgep; 5540 uint32_t channel; 5541 p_tx_ring_t ring; 5542 5543 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5544 ring = nxgep->tx_rings->rings[channel]; 5545 5546 MUTEX_ENTER(&ring->lock); 5547 ASSERT(ring->tx_ring_handle != NULL); 5548 ring->tx_ring_handle = (mac_ring_handle_t)NULL; 5549 MUTEX_EXIT(&ring->lock); 5550 } 5551 5552 int 5553 nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5554 { 5555 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5556 p_nxge_t nxgep = rhp->nxgep; 5557 uint32_t channel; 5558 p_rx_rcr_ring_t ring; 5559 int i; 5560 5561 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5562 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5563 5564 MUTEX_ENTER(&ring->lock); 5565 5566 if (ring->started) { 5567 ASSERT(ring->started == B_FALSE); 5568 MUTEX_EXIT(&ring->lock); 5569 return (0); 5570 } 5571 5572 /* set rcr_ring */ 5573 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) { 5574 if ((nxgep->ldgvp->ldvp[i].is_rxdma) && 5575 (nxgep->ldgvp->ldvp[i].channel == channel)) { 5576 ring->ldvp = &nxgep->ldgvp->ldvp[i]; 5577 ring->ldgp = nxgep->ldgvp->ldvp[i].ldgp; 5578 } 5579 } 5580 5581 ring->rcr_mac_handle = rhp->ring_handle; 5582 ring->rcr_gen_num = mr_gen_num; 5583 ring->started = B_TRUE; 5584 rhp->ring_gen_num = mr_gen_num; 5585 MUTEX_EXIT(&ring->lock); 5586 5587 return (0); 5588 } 5589 5590 static void 5591 nxge_rx_ring_stop(mac_ring_driver_t rdriver) 5592 { 5593 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5594 p_nxge_t nxgep = rhp->nxgep; 5595 uint32_t channel; 5596 p_rx_rcr_ring_t ring; 5597 5598 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5599 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5600 5601 MUTEX_ENTER(&ring->lock); 5602 ASSERT(ring->started == B_TRUE); 5603 ring->rcr_mac_handle = NULL; 5604 ring->ldvp = NULL; 5605 ring->ldgp = NULL; 5606 ring->started = B_FALSE; 5607 MUTEX_EXIT(&ring->lock); 5608 } 5609 5610 static int 5611 nxge_ring_get_htable_idx(p_nxge_t nxgep, mac_ring_type_t type, uint32_t channel) 5612 { 5613 int i; 5614 5615 #if defined(sun4v) 5616 if (isLDOMguest(nxgep)) { 5617 return (nxge_hio_get_dc_htable_idx(nxgep, 5618 (type == MAC_RING_TYPE_TX) ? VP_BOUND_TX : VP_BOUND_RX, 5619 channel)); 5620 } 5621 #endif 5622 5623 ASSERT(nxgep->ldgvp != NULL); 5624 5625 switch (type) { 5626 case MAC_RING_TYPE_TX: 5627 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) { 5628 if ((nxgep->ldgvp->ldvp[i].is_txdma) && 5629 (nxgep->ldgvp->ldvp[i].channel == channel)) { 5630 return ((int) 5631 nxgep->ldgvp->ldvp[i].ldgp->htable_idx); 5632 } 5633 } 5634 break; 5635 5636 case MAC_RING_TYPE_RX: 5637 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) { 5638 if ((nxgep->ldgvp->ldvp[i].is_rxdma) && 5639 (nxgep->ldgvp->ldvp[i].channel == channel)) { 5640 return ((int) 5641 nxgep->ldgvp->ldvp[i].ldgp->htable_idx); 5642 } 5643 } 5644 } 5645 5646 return (-1); 5647 } 5648 5649 /* 5650 * Callback funtion for MAC layer to register all rings. 5651 */ 5652 static void 5653 nxge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 5654 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 5655 { 5656 p_nxge_t nxgep = (p_nxge_t)arg; 5657 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 5658 p_nxge_intr_t intrp; 5659 uint32_t channel; 5660 int htable_idx; 5661 p_nxge_ring_handle_t rhandlep; 5662 5663 ASSERT(nxgep != NULL); 5664 ASSERT(p_cfgp != NULL); 5665 ASSERT(infop != NULL); 5666 5667 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5668 "==> nxge_fill_ring 0x%x index %d", rtype, index)); 5669 5670 5671 switch (rtype) { 5672 case MAC_RING_TYPE_TX: { 5673 mac_intr_t *mintr = &infop->mri_intr; 5674 5675 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5676 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d", 5677 rtype, index, p_cfgp->tdc.count)); 5678 5679 ASSERT((index >= 0) && (index < p_cfgp->tdc.count)); 5680 rhandlep = &nxgep->tx_ring_handles[index]; 5681 rhandlep->nxgep = nxgep; 5682 rhandlep->index = index; 5683 rhandlep->ring_handle = rh; 5684 5685 channel = nxgep->pt_config.hw_config.tdc.start + index; 5686 rhandlep->channel = channel; 5687 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5688 htable_idx = nxge_ring_get_htable_idx(nxgep, rtype, 5689 channel); 5690 if (htable_idx >= 0) 5691 mintr->mi_ddi_handle = intrp->htable[htable_idx]; 5692 else 5693 mintr->mi_ddi_handle = NULL; 5694 5695 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5696 infop->mri_start = nxge_tx_ring_start; 5697 infop->mri_stop = nxge_tx_ring_stop; 5698 infop->mri_tx = nxge_tx_ring_send; 5699 infop->mri_stat = nxge_tx_ring_stat; 5700 infop->mri_flags = MAC_RING_TX_SERIALIZE; 5701 break; 5702 } 5703 5704 case MAC_RING_TYPE_RX: { 5705 mac_intr_t nxge_mac_intr; 5706 int nxge_rindex; 5707 p_nxge_intr_t intrp; 5708 5709 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5710 5711 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5712 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d", 5713 rtype, index, p_cfgp->max_rdcs)); 5714 5715 /* 5716 * 'index' is the ring index within the group. 5717 * Find the ring index in the nxge instance. 5718 */ 5719 nxge_rindex = nxge_get_rxring_index(nxgep, rg_index, index); 5720 channel = nxgep->pt_config.hw_config.start_rdc + index; 5721 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5722 5723 ASSERT((nxge_rindex >= 0) && (nxge_rindex < p_cfgp->max_rdcs)); 5724 rhandlep = &nxgep->rx_ring_handles[nxge_rindex]; 5725 rhandlep->nxgep = nxgep; 5726 rhandlep->index = nxge_rindex; 5727 rhandlep->ring_handle = rh; 5728 rhandlep->channel = channel; 5729 5730 /* 5731 * Entrypoint to enable interrupt (disable poll) and 5732 * disable interrupt (enable poll). 5733 */ 5734 bzero(&nxge_mac_intr, sizeof (nxge_mac_intr)); 5735 nxge_mac_intr.mi_handle = (mac_intr_handle_t)rhandlep; 5736 nxge_mac_intr.mi_enable = (mac_intr_enable_t)nxge_disable_poll; 5737 nxge_mac_intr.mi_disable = (mac_intr_disable_t)nxge_enable_poll; 5738 5739 htable_idx = nxge_ring_get_htable_idx(nxgep, rtype, 5740 channel); 5741 if (htable_idx >= 0) 5742 nxge_mac_intr.mi_ddi_handle = intrp->htable[htable_idx]; 5743 else 5744 nxge_mac_intr.mi_ddi_handle = NULL; 5745 5746 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5747 infop->mri_start = nxge_rx_ring_start; 5748 infop->mri_stop = nxge_rx_ring_stop; 5749 infop->mri_intr = nxge_mac_intr; 5750 infop->mri_poll = nxge_rx_poll; 5751 infop->mri_stat = nxge_rx_ring_stat; 5752 infop->mri_flags = MAC_RING_RX_ENQUEUE; 5753 break; 5754 } 5755 5756 default: 5757 break; 5758 } 5759 5760 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_fill_ring 0x%x", rtype)); 5761 } 5762 5763 static void 5764 nxge_group_add_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5765 mac_ring_type_t type) 5766 { 5767 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5768 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5769 nxge_t *nxge; 5770 nxge_grp_t *grp; 5771 nxge_rdc_grp_t *rdc_grp; 5772 uint16_t channel; /* device-wise ring id */ 5773 int dev_gindex; 5774 int rv; 5775 5776 nxge = rgroup->nxgep; 5777 5778 switch (type) { 5779 case MAC_RING_TYPE_TX: 5780 /* 5781 * nxge_grp_dc_add takes a channel number which is a 5782 * "devise" ring ID. 5783 */ 5784 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5785 5786 /* 5787 * Remove the ring from the default group 5788 */ 5789 if (rgroup->gindex != 0) { 5790 (void) nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5791 } 5792 5793 /* 5794 * nxge->tx_set.group[] is an array of groups indexed by 5795 * a "port" group ID. 5796 */ 5797 grp = nxge->tx_set.group[rgroup->gindex]; 5798 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5799 if (rv != 0) { 5800 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5801 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5802 } 5803 break; 5804 5805 case MAC_RING_TYPE_RX: 5806 /* 5807 * nxge->rx_set.group[] is an array of groups indexed by 5808 * a "port" group ID. 5809 */ 5810 grp = nxge->rx_set.group[rgroup->gindex]; 5811 5812 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5813 rgroup->gindex; 5814 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5815 5816 /* 5817 * nxge_grp_dc_add takes a channel number which is a 5818 * "devise" ring ID. 5819 */ 5820 channel = nxge->pt_config.hw_config.start_rdc + rhandle->index; 5821 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_RX, channel); 5822 if (rv != 0) { 5823 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5824 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5825 } 5826 5827 rdc_grp->map |= (1 << channel); 5828 rdc_grp->max_rdcs++; 5829 5830 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5831 break; 5832 } 5833 } 5834 5835 static void 5836 nxge_group_rem_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5837 mac_ring_type_t type) 5838 { 5839 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5840 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5841 nxge_t *nxge; 5842 uint16_t channel; /* device-wise ring id */ 5843 nxge_rdc_grp_t *rdc_grp; 5844 int dev_gindex; 5845 5846 nxge = rgroup->nxgep; 5847 5848 switch (type) { 5849 case MAC_RING_TYPE_TX: 5850 dev_gindex = nxge->pt_config.hw_config.def_mac_txdma_grpid + 5851 rgroup->gindex; 5852 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5853 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5854 5855 /* 5856 * Add the ring back to the default group 5857 */ 5858 if (rgroup->gindex != 0) { 5859 nxge_grp_t *grp; 5860 grp = nxge->tx_set.group[0]; 5861 (void) nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5862 } 5863 break; 5864 5865 case MAC_RING_TYPE_RX: 5866 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5867 rgroup->gindex; 5868 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5869 channel = rdc_grp->start_rdc + rhandle->index; 5870 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 5871 5872 rdc_grp->map &= ~(1 << channel); 5873 rdc_grp->max_rdcs--; 5874 5875 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5876 break; 5877 } 5878 } 5879 5880 5881 /*ARGSUSED*/ 5882 static nxge_status_t 5883 nxge_add_intrs(p_nxge_t nxgep) 5884 { 5885 5886 int intr_types; 5887 int type = 0; 5888 int ddi_status = DDI_SUCCESS; 5889 nxge_status_t status = NXGE_OK; 5890 5891 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5892 5893 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5894 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5895 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5896 nxgep->nxge_intr_type.intr_added = 0; 5897 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5898 nxgep->nxge_intr_type.intr_type = 0; 5899 5900 if (nxgep->niu_type == N2_NIU) { 5901 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5902 } else if (nxge_msi_enable) { 5903 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5904 } 5905 5906 /* Get the supported interrupt types */ 5907 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5908 != DDI_SUCCESS) { 5909 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5910 "ddi_intr_get_supported_types failed: status 0x%08x", 5911 ddi_status)); 5912 return (NXGE_ERROR | NXGE_DDI_FAILED); 5913 } 5914 nxgep->nxge_intr_type.intr_types = intr_types; 5915 5916 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5917 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5918 5919 /* 5920 * Solaris MSIX is not supported yet. use MSI for now. 5921 * nxge_msi_enable (1): 5922 * 1 - MSI 2 - MSI-X others - FIXED 5923 */ 5924 switch (nxge_msi_enable) { 5925 default: 5926 type = DDI_INTR_TYPE_FIXED; 5927 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5928 "use fixed (intx emulation) type %08x", 5929 type)); 5930 break; 5931 5932 case 2: 5933 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5934 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5935 if (intr_types & DDI_INTR_TYPE_MSIX) { 5936 type = DDI_INTR_TYPE_MSIX; 5937 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5938 "ddi_intr_get_supported_types: MSIX 0x%08x", 5939 type)); 5940 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5941 type = DDI_INTR_TYPE_MSI; 5942 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5943 "ddi_intr_get_supported_types: MSI 0x%08x", 5944 type)); 5945 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5946 type = DDI_INTR_TYPE_FIXED; 5947 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5948 "ddi_intr_get_supported_types: MSXED0x%08x", 5949 type)); 5950 } 5951 break; 5952 5953 case 1: 5954 if (intr_types & DDI_INTR_TYPE_MSI) { 5955 type = DDI_INTR_TYPE_MSI; 5956 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5957 "ddi_intr_get_supported_types: MSI 0x%08x", 5958 type)); 5959 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5960 type = DDI_INTR_TYPE_MSIX; 5961 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5962 "ddi_intr_get_supported_types: MSIX 0x%08x", 5963 type)); 5964 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5965 type = DDI_INTR_TYPE_FIXED; 5966 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5967 "ddi_intr_get_supported_types: MSXED0x%08x", 5968 type)); 5969 } 5970 } 5971 5972 nxgep->nxge_intr_type.intr_type = type; 5973 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5974 type == DDI_INTR_TYPE_FIXED) && 5975 nxgep->nxge_intr_type.niu_msi_enable) { 5976 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5977 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5978 " nxge_add_intrs: " 5979 " nxge_add_intrs_adv failed: status 0x%08x", 5980 status)); 5981 return (status); 5982 } else { 5983 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5984 "interrupts registered : type %d", type)); 5985 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5986 5987 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5988 "\nAdded advanced nxge add_intr_adv " 5989 "intr type 0x%x\n", type)); 5990 5991 return (status); 5992 } 5993 } 5994 5995 if (!nxgep->nxge_intr_type.intr_registered) { 5996 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5997 "failed to register interrupts")); 5998 return (NXGE_ERROR | NXGE_DDI_FAILED); 5999 } 6000 6001 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 6002 return (status); 6003 } 6004 6005 static nxge_status_t 6006 nxge_add_intrs_adv(p_nxge_t nxgep) 6007 { 6008 int intr_type; 6009 p_nxge_intr_t intrp; 6010 6011 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 6012 6013 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6014 intr_type = intrp->intr_type; 6015 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 6016 intr_type)); 6017 6018 switch (intr_type) { 6019 case DDI_INTR_TYPE_MSI: /* 0x2 */ 6020 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 6021 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 6022 6023 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 6024 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 6025 6026 default: 6027 return (NXGE_ERROR); 6028 } 6029 } 6030 6031 6032 /*ARGSUSED*/ 6033 static nxge_status_t 6034 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 6035 { 6036 dev_info_t *dip = nxgep->dip; 6037 p_nxge_ldg_t ldgp; 6038 p_nxge_intr_t intrp; 6039 ddi_intr_handler_t *inthandler; 6040 void *arg1, *arg2; 6041 int behavior; 6042 int nintrs, navail, nrequest; 6043 int nactual, nrequired; 6044 int inum = 0; 6045 int x, y; 6046 int ddi_status = DDI_SUCCESS; 6047 nxge_status_t status = NXGE_OK; 6048 6049 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 6050 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6051 intrp->start_inum = 0; 6052 6053 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6054 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6055 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6056 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6057 "nintrs: %d", ddi_status, nintrs)); 6058 return (NXGE_ERROR | NXGE_DDI_FAILED); 6059 } 6060 6061 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6062 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6063 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6064 "ddi_intr_get_navail() failed, status: 0x%x%, " 6065 "nintrs: %d", ddi_status, navail)); 6066 return (NXGE_ERROR | NXGE_DDI_FAILED); 6067 } 6068 6069 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6070 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 6071 nintrs, navail)); 6072 6073 /* PSARC/2007/453 MSI-X interrupt limit override */ 6074 if (int_type == DDI_INTR_TYPE_MSIX) { 6075 nrequest = nxge_create_msi_property(nxgep); 6076 if (nrequest < navail) { 6077 navail = nrequest; 6078 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6079 "nxge_add_intrs_adv_type: nintrs %d " 6080 "navail %d (nrequest %d)", 6081 nintrs, navail, nrequest)); 6082 } 6083 } 6084 6085 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 6086 /* MSI must be power of 2 */ 6087 if ((navail & 16) == 16) { 6088 navail = 16; 6089 } else if ((navail & 8) == 8) { 6090 navail = 8; 6091 } else if ((navail & 4) == 4) { 6092 navail = 4; 6093 } else if ((navail & 2) == 2) { 6094 navail = 2; 6095 } else { 6096 navail = 1; 6097 } 6098 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6099 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 6100 "navail %d", nintrs, navail)); 6101 } 6102 6103 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6104 DDI_INTR_ALLOC_NORMAL); 6105 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6106 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6107 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6108 navail, &nactual, behavior); 6109 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6110 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6111 " ddi_intr_alloc() failed: %d", 6112 ddi_status)); 6113 kmem_free(intrp->htable, intrp->intr_size); 6114 return (NXGE_ERROR | NXGE_DDI_FAILED); 6115 } 6116 6117 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6118 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6119 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6120 " ddi_intr_get_pri() failed: %d", 6121 ddi_status)); 6122 /* Free already allocated interrupts */ 6123 for (y = 0; y < nactual; y++) { 6124 (void) ddi_intr_free(intrp->htable[y]); 6125 } 6126 6127 kmem_free(intrp->htable, intrp->intr_size); 6128 return (NXGE_ERROR | NXGE_DDI_FAILED); 6129 } 6130 6131 nrequired = 0; 6132 switch (nxgep->niu_type) { 6133 default: 6134 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6135 break; 6136 6137 case N2_NIU: 6138 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6139 break; 6140 } 6141 6142 if (status != NXGE_OK) { 6143 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6144 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 6145 "failed: 0x%x", status)); 6146 /* Free already allocated interrupts */ 6147 for (y = 0; y < nactual; y++) { 6148 (void) ddi_intr_free(intrp->htable[y]); 6149 } 6150 6151 kmem_free(intrp->htable, intrp->intr_size); 6152 return (status); 6153 } 6154 6155 ldgp = nxgep->ldgvp->ldgp; 6156 for (x = 0; x < nrequired; x++, ldgp++) { 6157 ldgp->vector = (uint8_t)x; 6158 ldgp->intdata = SID_DATA(ldgp->func, x); 6159 arg1 = ldgp->ldvp; 6160 arg2 = nxgep; 6161 if (ldgp->nldvs == 1) { 6162 inthandler = ldgp->ldvp->ldv_intr_handler; 6163 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6164 "nxge_add_intrs_adv_type: " 6165 "arg1 0x%x arg2 0x%x: " 6166 "1-1 int handler (entry %d intdata 0x%x)\n", 6167 arg1, arg2, 6168 x, ldgp->intdata)); 6169 } else if (ldgp->nldvs > 1) { 6170 inthandler = ldgp->sys_intr_handler; 6171 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6172 "nxge_add_intrs_adv_type: " 6173 "arg1 0x%x arg2 0x%x: " 6174 "nldevs %d int handler " 6175 "(entry %d intdata 0x%x)\n", 6176 arg1, arg2, 6177 ldgp->nldvs, x, ldgp->intdata)); 6178 } else { 6179 inthandler = NULL; 6180 } 6181 6182 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6183 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 6184 "htable 0x%llx", x, intrp->htable[x])); 6185 6186 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6187 inthandler, arg1, arg2)) != DDI_SUCCESS) { 6188 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6189 "==> nxge_add_intrs_adv_type: failed #%d " 6190 "status 0x%x", x, ddi_status)); 6191 for (y = 0; y < intrp->intr_added; y++) { 6192 (void) ddi_intr_remove_handler( 6193 intrp->htable[y]); 6194 } 6195 /* Free already allocated intr */ 6196 for (y = 0; y < nactual; y++) { 6197 (void) ddi_intr_free(intrp->htable[y]); 6198 } 6199 kmem_free(intrp->htable, intrp->intr_size); 6200 6201 (void) nxge_ldgv_uninit(nxgep); 6202 6203 return (NXGE_ERROR | NXGE_DDI_FAILED); 6204 } 6205 6206 ldgp->htable_idx = x; 6207 intrp->intr_added++; 6208 } 6209 6210 intrp->msi_intx_cnt = nactual; 6211 6212 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6213 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 6214 navail, nactual, 6215 intrp->msi_intx_cnt, 6216 intrp->intr_added)); 6217 6218 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6219 6220 (void) nxge_intr_ldgv_init(nxgep); 6221 6222 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 6223 6224 return (status); 6225 } 6226 6227 /*ARGSUSED*/ 6228 static nxge_status_t 6229 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 6230 { 6231 dev_info_t *dip = nxgep->dip; 6232 p_nxge_ldg_t ldgp; 6233 p_nxge_intr_t intrp; 6234 ddi_intr_handler_t *inthandler; 6235 void *arg1, *arg2; 6236 int behavior; 6237 int nintrs, navail; 6238 int nactual, nrequired; 6239 int inum = 0; 6240 int x, y; 6241 int ddi_status = DDI_SUCCESS; 6242 nxge_status_t status = NXGE_OK; 6243 6244 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 6245 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6246 intrp->start_inum = 0; 6247 6248 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6249 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6250 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6251 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6252 "nintrs: %d", status, nintrs)); 6253 return (NXGE_ERROR | NXGE_DDI_FAILED); 6254 } 6255 6256 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6257 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6258 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6259 "ddi_intr_get_navail() failed, status: 0x%x%, " 6260 "nintrs: %d", ddi_status, navail)); 6261 return (NXGE_ERROR | NXGE_DDI_FAILED); 6262 } 6263 6264 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6265 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6266 nintrs, navail)); 6267 6268 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6269 DDI_INTR_ALLOC_NORMAL); 6270 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6271 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6272 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6273 navail, &nactual, behavior); 6274 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6275 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6276 " ddi_intr_alloc() failed: %d", 6277 ddi_status)); 6278 kmem_free(intrp->htable, intrp->intr_size); 6279 return (NXGE_ERROR | NXGE_DDI_FAILED); 6280 } 6281 6282 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6283 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6284 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6285 " ddi_intr_get_pri() failed: %d", 6286 ddi_status)); 6287 /* Free already allocated interrupts */ 6288 for (y = 0; y < nactual; y++) { 6289 (void) ddi_intr_free(intrp->htable[y]); 6290 } 6291 6292 kmem_free(intrp->htable, intrp->intr_size); 6293 return (NXGE_ERROR | NXGE_DDI_FAILED); 6294 } 6295 6296 nrequired = 0; 6297 switch (nxgep->niu_type) { 6298 default: 6299 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6300 break; 6301 6302 case N2_NIU: 6303 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6304 break; 6305 } 6306 6307 if (status != NXGE_OK) { 6308 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6309 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6310 "failed: 0x%x", status)); 6311 /* Free already allocated interrupts */ 6312 for (y = 0; y < nactual; y++) { 6313 (void) ddi_intr_free(intrp->htable[y]); 6314 } 6315 6316 kmem_free(intrp->htable, intrp->intr_size); 6317 return (status); 6318 } 6319 6320 ldgp = nxgep->ldgvp->ldgp; 6321 for (x = 0; x < nrequired; x++, ldgp++) { 6322 ldgp->vector = (uint8_t)x; 6323 if (nxgep->niu_type != N2_NIU) { 6324 ldgp->intdata = SID_DATA(ldgp->func, x); 6325 } 6326 6327 arg1 = ldgp->ldvp; 6328 arg2 = nxgep; 6329 if (ldgp->nldvs == 1) { 6330 inthandler = ldgp->ldvp->ldv_intr_handler; 6331 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6332 "nxge_add_intrs_adv_type_fix: " 6333 "1-1 int handler(%d) ldg %d ldv %d " 6334 "arg1 $%p arg2 $%p\n", 6335 x, ldgp->ldg, ldgp->ldvp->ldv, 6336 arg1, arg2)); 6337 } else if (ldgp->nldvs > 1) { 6338 inthandler = ldgp->sys_intr_handler; 6339 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6340 "nxge_add_intrs_adv_type_fix: " 6341 "shared ldv %d int handler(%d) ldv %d ldg %d" 6342 "arg1 0x%016llx arg2 0x%016llx\n", 6343 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6344 arg1, arg2)); 6345 } else { 6346 inthandler = NULL; 6347 } 6348 6349 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6350 inthandler, arg1, arg2)) != DDI_SUCCESS) { 6351 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6352 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6353 "status 0x%x", x, ddi_status)); 6354 for (y = 0; y < intrp->intr_added; y++) { 6355 (void) ddi_intr_remove_handler( 6356 intrp->htable[y]); 6357 } 6358 for (y = 0; y < nactual; y++) { 6359 (void) ddi_intr_free(intrp->htable[y]); 6360 } 6361 /* Free already allocated intr */ 6362 kmem_free(intrp->htable, intrp->intr_size); 6363 6364 (void) nxge_ldgv_uninit(nxgep); 6365 6366 return (NXGE_ERROR | NXGE_DDI_FAILED); 6367 } 6368 6369 ldgp->htable_idx = x; 6370 intrp->intr_added++; 6371 } 6372 6373 intrp->msi_intx_cnt = nactual; 6374 6375 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6376 6377 status = nxge_intr_ldgv_init(nxgep); 6378 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6379 6380 return (status); 6381 } 6382 6383 static void 6384 nxge_remove_intrs(p_nxge_t nxgep) 6385 { 6386 int i, inum; 6387 p_nxge_intr_t intrp; 6388 6389 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6390 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6391 if (!intrp->intr_registered) { 6392 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6393 "<== nxge_remove_intrs: interrupts not registered")); 6394 return; 6395 } 6396 6397 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6398 6399 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6400 (void) ddi_intr_block_disable(intrp->htable, 6401 intrp->intr_added); 6402 } else { 6403 for (i = 0; i < intrp->intr_added; i++) { 6404 (void) ddi_intr_disable(intrp->htable[i]); 6405 } 6406 } 6407 6408 for (inum = 0; inum < intrp->intr_added; inum++) { 6409 if (intrp->htable[inum]) { 6410 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6411 } 6412 } 6413 6414 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6415 if (intrp->htable[inum]) { 6416 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6417 "nxge_remove_intrs: ddi_intr_free inum %d " 6418 "msi_intx_cnt %d intr_added %d", 6419 inum, 6420 intrp->msi_intx_cnt, 6421 intrp->intr_added)); 6422 6423 (void) ddi_intr_free(intrp->htable[inum]); 6424 } 6425 } 6426 6427 kmem_free(intrp->htable, intrp->intr_size); 6428 intrp->intr_registered = B_FALSE; 6429 intrp->intr_enabled = B_FALSE; 6430 intrp->msi_intx_cnt = 0; 6431 intrp->intr_added = 0; 6432 6433 (void) nxge_ldgv_uninit(nxgep); 6434 6435 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6436 "#msix-request"); 6437 6438 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6439 } 6440 6441 /*ARGSUSED*/ 6442 static void 6443 nxge_intrs_enable(p_nxge_t nxgep) 6444 { 6445 p_nxge_intr_t intrp; 6446 int i; 6447 int status; 6448 6449 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6450 6451 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6452 6453 if (!intrp->intr_registered) { 6454 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6455 "interrupts are not registered")); 6456 return; 6457 } 6458 6459 if (intrp->intr_enabled) { 6460 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6461 "<== nxge_intrs_enable: already enabled")); 6462 return; 6463 } 6464 6465 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6466 status = ddi_intr_block_enable(intrp->htable, 6467 intrp->intr_added); 6468 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6469 "block enable - status 0x%x total inums #%d\n", 6470 status, intrp->intr_added)); 6471 } else { 6472 for (i = 0; i < intrp->intr_added; i++) { 6473 status = ddi_intr_enable(intrp->htable[i]); 6474 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6475 "ddi_intr_enable:enable - status 0x%x " 6476 "total inums %d enable inum #%d\n", 6477 status, intrp->intr_added, i)); 6478 if (status == DDI_SUCCESS) { 6479 intrp->intr_enabled = B_TRUE; 6480 } 6481 } 6482 } 6483 6484 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6485 } 6486 6487 /*ARGSUSED*/ 6488 static void 6489 nxge_intrs_disable(p_nxge_t nxgep) 6490 { 6491 p_nxge_intr_t intrp; 6492 int i; 6493 6494 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6495 6496 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6497 6498 if (!intrp->intr_registered) { 6499 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6500 "interrupts are not registered")); 6501 return; 6502 } 6503 6504 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6505 (void) ddi_intr_block_disable(intrp->htable, 6506 intrp->intr_added); 6507 } else { 6508 for (i = 0; i < intrp->intr_added; i++) { 6509 (void) ddi_intr_disable(intrp->htable[i]); 6510 } 6511 } 6512 6513 intrp->intr_enabled = B_FALSE; 6514 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6515 } 6516 6517 nxge_status_t 6518 nxge_mac_register(p_nxge_t nxgep) 6519 { 6520 mac_register_t *macp; 6521 int status; 6522 6523 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6524 6525 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6526 return (NXGE_ERROR); 6527 6528 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6529 macp->m_driver = nxgep; 6530 macp->m_dip = nxgep->dip; 6531 if (!isLDOMguest(nxgep)) { 6532 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6533 } else { 6534 macp->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); 6535 macp->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); 6536 (void) memset(macp->m_src_addr, 0xff, sizeof (MAXMACADDRLEN)); 6537 } 6538 macp->m_callbacks = &nxge_m_callbacks; 6539 macp->m_min_sdu = 0; 6540 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6541 NXGE_EHEADER_VLAN_CRC; 6542 macp->m_max_sdu = nxgep->mac.default_mtu; 6543 macp->m_margin = VLAN_TAGSZ; 6544 macp->m_priv_props = nxge_priv_props; 6545 if (isLDOMguest(nxgep)) 6546 macp->m_v12n = MAC_VIRT_LEVEL1; 6547 else 6548 macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1; 6549 6550 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6551 "==> nxge_mac_register: instance %d " 6552 "max_sdu %d margin %d maxframe %d (header %d)", 6553 nxgep->instance, 6554 macp->m_max_sdu, macp->m_margin, 6555 nxgep->mac.maxframesize, 6556 NXGE_EHEADER_VLAN_CRC)); 6557 6558 status = mac_register(macp, &nxgep->mach); 6559 if (isLDOMguest(nxgep)) { 6560 KMEM_FREE(macp->m_src_addr, MAXMACADDRLEN); 6561 KMEM_FREE(macp->m_dst_addr, MAXMACADDRLEN); 6562 } 6563 mac_free(macp); 6564 6565 if (status != 0) { 6566 cmn_err(CE_WARN, 6567 "!nxge_mac_register failed (status %d instance %d)", 6568 status, nxgep->instance); 6569 return (NXGE_ERROR); 6570 } 6571 6572 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6573 "(instance %d)", nxgep->instance)); 6574 6575 return (NXGE_OK); 6576 } 6577 6578 void 6579 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6580 { 6581 ssize_t size; 6582 mblk_t *nmp; 6583 uint8_t blk_id; 6584 uint8_t chan; 6585 uint32_t err_id; 6586 err_inject_t *eip; 6587 6588 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6589 6590 size = 1024; 6591 nmp = mp->b_cont; 6592 eip = (err_inject_t *)nmp->b_rptr; 6593 blk_id = eip->blk_id; 6594 err_id = eip->err_id; 6595 chan = eip->chan; 6596 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6597 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6598 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6599 switch (blk_id) { 6600 case MAC_BLK_ID: 6601 break; 6602 case TXMAC_BLK_ID: 6603 break; 6604 case RXMAC_BLK_ID: 6605 break; 6606 case MIF_BLK_ID: 6607 break; 6608 case IPP_BLK_ID: 6609 nxge_ipp_inject_err(nxgep, err_id); 6610 break; 6611 case TXC_BLK_ID: 6612 nxge_txc_inject_err(nxgep, err_id); 6613 break; 6614 case TXDMA_BLK_ID: 6615 nxge_txdma_inject_err(nxgep, err_id, chan); 6616 break; 6617 case RXDMA_BLK_ID: 6618 nxge_rxdma_inject_err(nxgep, err_id, chan); 6619 break; 6620 case ZCP_BLK_ID: 6621 nxge_zcp_inject_err(nxgep, err_id); 6622 break; 6623 case ESPC_BLK_ID: 6624 break; 6625 case FFLP_BLK_ID: 6626 break; 6627 case PHY_BLK_ID: 6628 break; 6629 case ETHER_SERDES_BLK_ID: 6630 break; 6631 case PCIE_SERDES_BLK_ID: 6632 break; 6633 case VIR_BLK_ID: 6634 break; 6635 } 6636 6637 nmp->b_wptr = nmp->b_rptr + size; 6638 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6639 6640 miocack(wq, mp, (int)size, 0); 6641 } 6642 6643 static int 6644 nxge_init_common_dev(p_nxge_t nxgep) 6645 { 6646 p_nxge_hw_list_t hw_p; 6647 dev_info_t *p_dip; 6648 6649 ASSERT(nxgep != NULL); 6650 6651 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6652 6653 p_dip = nxgep->p_dip; 6654 MUTEX_ENTER(&nxge_common_lock); 6655 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6656 "==> nxge_init_common_dev:func # %d", 6657 nxgep->function_num)); 6658 /* 6659 * Loop through existing per neptune hardware list. 6660 */ 6661 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6662 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6663 "==> nxge_init_common_device:func # %d " 6664 "hw_p $%p parent dip $%p", 6665 nxgep->function_num, 6666 hw_p, 6667 p_dip)); 6668 if (hw_p->parent_devp == p_dip) { 6669 nxgep->nxge_hw_p = hw_p; 6670 hw_p->ndevs++; 6671 hw_p->nxge_p[nxgep->function_num] = nxgep; 6672 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6673 "==> nxge_init_common_device:func # %d " 6674 "hw_p $%p parent dip $%p " 6675 "ndevs %d (found)", 6676 nxgep->function_num, 6677 hw_p, 6678 p_dip, 6679 hw_p->ndevs)); 6680 break; 6681 } 6682 } 6683 6684 if (hw_p == NULL) { 6685 6686 char **prop_val; 6687 uint_t prop_len; 6688 int i; 6689 6690 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6691 "==> nxge_init_common_device:func # %d " 6692 "parent dip $%p (new)", 6693 nxgep->function_num, 6694 p_dip)); 6695 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6696 hw_p->parent_devp = p_dip; 6697 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6698 nxgep->nxge_hw_p = hw_p; 6699 hw_p->ndevs++; 6700 hw_p->nxge_p[nxgep->function_num] = nxgep; 6701 hw_p->next = nxge_hw_list; 6702 if (nxgep->niu_type == N2_NIU) { 6703 hw_p->niu_type = N2_NIU; 6704 hw_p->platform_type = P_NEPTUNE_NIU; 6705 hw_p->tcam_size = TCAM_NIU_TCAM_MAX_ENTRY; 6706 } else { 6707 hw_p->niu_type = NIU_TYPE_NONE; 6708 hw_p->platform_type = P_NEPTUNE_NONE; 6709 hw_p->tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY; 6710 } 6711 6712 hw_p->tcam = KMEM_ZALLOC(sizeof (tcam_flow_spec_t) * 6713 hw_p->tcam_size, KM_SLEEP); 6714 6715 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6716 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6717 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6718 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6719 6720 nxge_hw_list = hw_p; 6721 6722 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0, 6723 "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) { 6724 for (i = 0; i < prop_len; i++) { 6725 if ((strcmp((caddr_t)prop_val[i], 6726 NXGE_ROCK_COMPATIBLE) == 0)) { 6727 hw_p->platform_type = P_NEPTUNE_ROCK; 6728 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6729 "ROCK hw_p->platform_type %d", 6730 hw_p->platform_type)); 6731 break; 6732 } 6733 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6734 "nxge_init_common_dev: read compatible" 6735 " property[%d] val[%s]", 6736 i, (caddr_t)prop_val[i])); 6737 } 6738 } 6739 6740 ddi_prop_free(prop_val); 6741 6742 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6743 } 6744 6745 MUTEX_EXIT(&nxge_common_lock); 6746 6747 nxgep->platform_type = hw_p->platform_type; 6748 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxgep->platform_type %d", 6749 nxgep->platform_type)); 6750 if (nxgep->niu_type != N2_NIU) { 6751 nxgep->niu_type = hw_p->niu_type; 6752 } 6753 6754 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6755 "==> nxge_init_common_device (nxge_hw_list) $%p", 6756 nxge_hw_list)); 6757 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6758 6759 return (NXGE_OK); 6760 } 6761 6762 static void 6763 nxge_uninit_common_dev(p_nxge_t nxgep) 6764 { 6765 p_nxge_hw_list_t hw_p, h_hw_p; 6766 p_nxge_dma_pt_cfg_t p_dma_cfgp; 6767 p_nxge_hw_pt_cfg_t p_cfgp; 6768 dev_info_t *p_dip; 6769 6770 ASSERT(nxgep != NULL); 6771 6772 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6773 if (nxgep->nxge_hw_p == NULL) { 6774 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6775 "<== nxge_uninit_common_device (no common)")); 6776 return; 6777 } 6778 6779 MUTEX_ENTER(&nxge_common_lock); 6780 h_hw_p = nxge_hw_list; 6781 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6782 p_dip = hw_p->parent_devp; 6783 if (nxgep->nxge_hw_p == hw_p && 6784 p_dip == nxgep->p_dip && 6785 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6786 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6787 6788 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6789 "==> nxge_uninit_common_device:func # %d " 6790 "hw_p $%p parent dip $%p " 6791 "ndevs %d (found)", 6792 nxgep->function_num, 6793 hw_p, 6794 p_dip, 6795 hw_p->ndevs)); 6796 6797 /* 6798 * Release the RDC table, a shared resoruce 6799 * of the nxge hardware. The RDC table was 6800 * assigned to this instance of nxge in 6801 * nxge_use_cfg_dma_config(). 6802 */ 6803 if (!isLDOMguest(nxgep)) { 6804 p_dma_cfgp = 6805 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 6806 p_cfgp = 6807 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 6808 (void) nxge_fzc_rdc_tbl_unbind(nxgep, 6809 p_cfgp->def_mac_rxdma_grpid); 6810 6811 /* Cleanup any outstanding groups. */ 6812 nxge_grp_cleanup(nxgep); 6813 } 6814 6815 if (hw_p->ndevs) { 6816 hw_p->ndevs--; 6817 } 6818 hw_p->nxge_p[nxgep->function_num] = NULL; 6819 if (!hw_p->ndevs) { 6820 KMEM_FREE(hw_p->tcam, 6821 sizeof (tcam_flow_spec_t) * 6822 hw_p->tcam_size); 6823 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6824 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6825 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6826 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6827 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6828 "==> nxge_uninit_common_device: " 6829 "func # %d " 6830 "hw_p $%p parent dip $%p " 6831 "ndevs %d (last)", 6832 nxgep->function_num, 6833 hw_p, 6834 p_dip, 6835 hw_p->ndevs)); 6836 6837 nxge_hio_uninit(nxgep); 6838 6839 if (hw_p == nxge_hw_list) { 6840 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6841 "==> nxge_uninit_common_device:" 6842 "remove head func # %d " 6843 "hw_p $%p parent dip $%p " 6844 "ndevs %d (head)", 6845 nxgep->function_num, 6846 hw_p, 6847 p_dip, 6848 hw_p->ndevs)); 6849 nxge_hw_list = hw_p->next; 6850 } else { 6851 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6852 "==> nxge_uninit_common_device:" 6853 "remove middle func # %d " 6854 "hw_p $%p parent dip $%p " 6855 "ndevs %d (middle)", 6856 nxgep->function_num, 6857 hw_p, 6858 p_dip, 6859 hw_p->ndevs)); 6860 h_hw_p->next = hw_p->next; 6861 } 6862 6863 nxgep->nxge_hw_p = NULL; 6864 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6865 } 6866 break; 6867 } else { 6868 h_hw_p = hw_p; 6869 } 6870 } 6871 6872 MUTEX_EXIT(&nxge_common_lock); 6873 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6874 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6875 nxge_hw_list)); 6876 6877 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6878 } 6879 6880 /* 6881 * Determines the number of ports from the niu_type or the platform type. 6882 * Returns the number of ports, or returns zero on failure. 6883 */ 6884 6885 int 6886 nxge_get_nports(p_nxge_t nxgep) 6887 { 6888 int nports = 0; 6889 6890 switch (nxgep->niu_type) { 6891 case N2_NIU: 6892 case NEPTUNE_2_10GF: 6893 nports = 2; 6894 break; 6895 case NEPTUNE_4_1GC: 6896 case NEPTUNE_2_10GF_2_1GC: 6897 case NEPTUNE_1_10GF_3_1GC: 6898 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6899 case NEPTUNE_2_10GF_2_1GRF: 6900 nports = 4; 6901 break; 6902 default: 6903 switch (nxgep->platform_type) { 6904 case P_NEPTUNE_NIU: 6905 case P_NEPTUNE_ATLAS_2PORT: 6906 nports = 2; 6907 break; 6908 case P_NEPTUNE_ATLAS_4PORT: 6909 case P_NEPTUNE_MARAMBA_P0: 6910 case P_NEPTUNE_MARAMBA_P1: 6911 case P_NEPTUNE_ROCK: 6912 case P_NEPTUNE_ALONSO: 6913 nports = 4; 6914 break; 6915 default: 6916 break; 6917 } 6918 break; 6919 } 6920 6921 return (nports); 6922 } 6923 6924 /* 6925 * The following two functions are to support 6926 * PSARC/2007/453 MSI-X interrupt limit override. 6927 */ 6928 static int 6929 nxge_create_msi_property(p_nxge_t nxgep) 6930 { 6931 int nmsi; 6932 extern int ncpus; 6933 6934 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6935 6936 switch (nxgep->mac.portmode) { 6937 case PORT_10G_COPPER: 6938 case PORT_10G_FIBER: 6939 case PORT_10G_TN1010: 6940 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6941 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6942 /* 6943 * The maximum MSI-X requested will be 8. 6944 * If the # of CPUs is less than 8, we will request 6945 * # MSI-X based on the # of CPUs (default). 6946 */ 6947 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6948 "==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d", 6949 nxge_msix_10g_intrs)); 6950 if ((nxge_msix_10g_intrs == 0) || 6951 (nxge_msix_10g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6952 nmsi = NXGE_MSIX_REQUEST_10G; 6953 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6954 "==>nxge_create_msi_property (10G): reset to 8")); 6955 } else { 6956 nmsi = nxge_msix_10g_intrs; 6957 } 6958 6959 /* 6960 * If # of interrupts requested is 8 (default), 6961 * the checking of the number of cpus will be 6962 * be maintained. 6963 */ 6964 if ((nmsi == NXGE_MSIX_REQUEST_10G) && 6965 (ncpus < nmsi)) { 6966 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6967 "==>nxge_create_msi_property (10G): reset to 8")); 6968 nmsi = ncpus; 6969 } 6970 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6971 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6972 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6973 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6974 break; 6975 6976 default: 6977 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6978 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6979 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6980 "==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d", 6981 nxge_msix_1g_intrs)); 6982 if ((nxge_msix_1g_intrs == 0) || 6983 (nxge_msix_1g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6984 nmsi = NXGE_MSIX_REQUEST_1G; 6985 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6986 "==>nxge_create_msi_property (1G): reset to 2")); 6987 } else { 6988 nmsi = nxge_msix_1g_intrs; 6989 } 6990 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6991 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6992 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6993 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6994 break; 6995 } 6996 6997 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6998 return (nmsi); 6999 } 7000 7001 /* 7002 * The following is a software around for the Neptune hardware's 7003 * interrupt bugs; The Neptune hardware may generate spurious interrupts when 7004 * an interrupr handler is removed. 7005 */ 7006 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98 7007 #define NXGE_PIM_RESET (1ULL << 29) 7008 #define NXGE_GLU_RESET (1ULL << 30) 7009 #define NXGE_NIU_RESET (1ULL << 31) 7010 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \ 7011 NXGE_GLU_RESET | \ 7012 NXGE_NIU_RESET) 7013 7014 #define NXGE_WAIT_QUITE_TIME 200000 7015 #define NXGE_WAIT_QUITE_RETRY 40 7016 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */ 7017 7018 static void 7019 nxge_niu_peu_reset(p_nxge_t nxgep) 7020 { 7021 uint32_t rvalue; 7022 p_nxge_hw_list_t hw_p; 7023 p_nxge_t fnxgep; 7024 int i, j; 7025 7026 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset")); 7027 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 7028 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 7029 "==> nxge_niu_peu_reset: NULL hardware pointer")); 7030 return; 7031 } 7032 7033 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7034 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d", 7035 hw_p->flags, nxgep->nxge_link_poll_timerid, 7036 nxgep->nxge_timerid)); 7037 7038 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 7039 /* 7040 * Make sure other instances from the same hardware 7041 * stop sending PIO and in quiescent state. 7042 */ 7043 for (i = 0; i < NXGE_MAX_PORTS; i++) { 7044 fnxgep = hw_p->nxge_p[i]; 7045 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7046 "==> nxge_niu_peu_reset: checking entry %d " 7047 "nxgep $%p", i, fnxgep)); 7048 #ifdef NXGE_DEBUG 7049 if (fnxgep) { 7050 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7051 "==> nxge_niu_peu_reset: entry %d (function %d) " 7052 "link timer id %d hw timer id %d", 7053 i, fnxgep->function_num, 7054 fnxgep->nxge_link_poll_timerid, 7055 fnxgep->nxge_timerid)); 7056 } 7057 #endif 7058 if (fnxgep && fnxgep != nxgep && 7059 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) { 7060 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7061 "==> nxge_niu_peu_reset: checking $%p " 7062 "(function %d) timer ids", 7063 fnxgep, fnxgep->function_num)); 7064 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) { 7065 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7066 "==> nxge_niu_peu_reset: waiting")); 7067 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 7068 if (!fnxgep->nxge_timerid && 7069 !fnxgep->nxge_link_poll_timerid) { 7070 break; 7071 } 7072 } 7073 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 7074 if (fnxgep->nxge_timerid || 7075 fnxgep->nxge_link_poll_timerid) { 7076 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 7077 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 7078 "<== nxge_niu_peu_reset: cannot reset " 7079 "hardware (devices are still in use)")); 7080 return; 7081 } 7082 } 7083 } 7084 7085 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) { 7086 hw_p->flags |= COMMON_RESET_NIU_PCI; 7087 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh, 7088 NXGE_PCI_PORT_LOGIC_OFFSET); 7089 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7090 "nxge_niu_peu_reset: read offset 0x%x (%d) " 7091 "(data 0x%x)", 7092 NXGE_PCI_PORT_LOGIC_OFFSET, 7093 NXGE_PCI_PORT_LOGIC_OFFSET, 7094 rvalue)); 7095 7096 rvalue |= NXGE_PCI_RESET_ALL; 7097 pci_config_put32(nxgep->dev_regs->nxge_pciregh, 7098 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue); 7099 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7100 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x", 7101 rvalue)); 7102 7103 NXGE_DELAY(NXGE_PCI_RESET_WAIT); 7104 } 7105 7106 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 7107 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset")); 7108 } 7109 7110 static void 7111 nxge_set_pci_replay_timeout(p_nxge_t nxgep) 7112 { 7113 p_dev_regs_t dev_regs; 7114 uint32_t value; 7115 7116 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout")); 7117 7118 if (!nxge_set_replay_timer) { 7119 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7120 "==> nxge_set_pci_replay_timeout: will not change " 7121 "the timeout")); 7122 return; 7123 } 7124 7125 dev_regs = nxgep->dev_regs; 7126 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7127 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p", 7128 dev_regs, dev_regs->nxge_pciregh)); 7129 7130 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) { 7131 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7132 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or " 7133 "no PCI handle", 7134 dev_regs)); 7135 return; 7136 } 7137 value = (pci_config_get32(dev_regs->nxge_pciregh, 7138 PCI_REPLAY_TIMEOUT_CFG_OFFSET) | 7139 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT)); 7140 7141 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7142 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x " 7143 "(timeout value to set 0x%x at offset 0x%x) value 0x%x", 7144 pci_config_get32(dev_regs->nxge_pciregh, 7145 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout, 7146 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value)); 7147 7148 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET, 7149 value); 7150 7151 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7152 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x", 7153 pci_config_get32(dev_regs->nxge_pciregh, 7154 PCI_REPLAY_TIMEOUT_CFG_OFFSET))); 7155 7156 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout")); 7157 } 7158 7159 /* 7160 * quiesce(9E) entry point. 7161 * 7162 * This function is called when the system is single-threaded at high 7163 * PIL with preemption disabled. Therefore, this function must not be 7164 * blocked. 7165 * 7166 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 7167 * DDI_FAILURE indicates an error condition and should almost never happen. 7168 */ 7169 static int 7170 nxge_quiesce(dev_info_t *dip) 7171 { 7172 int instance = ddi_get_instance(dip); 7173 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 7174 7175 if (nxgep == NULL) 7176 return (DDI_FAILURE); 7177 7178 /* Turn off debugging */ 7179 nxge_debug_level = NO_DEBUG; 7180 nxgep->nxge_debug_level = NO_DEBUG; 7181 npi_debug_level = NO_DEBUG; 7182 7183 /* 7184 * Stop link monitor only when linkchkmod is interrupt based 7185 */ 7186 if (nxgep->mac.linkchkmode == LINKCHK_INTR) { 7187 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 7188 } 7189 7190 (void) nxge_intr_hw_disable(nxgep); 7191 7192 /* 7193 * Reset the receive MAC side. 7194 */ 7195 (void) nxge_rx_mac_disable(nxgep); 7196 7197 /* Disable and soft reset the IPP */ 7198 if (!isLDOMguest(nxgep)) 7199 (void) nxge_ipp_disable(nxgep); 7200 7201 /* 7202 * Reset the transmit/receive DMA side. 7203 */ 7204 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 7205 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 7206 7207 /* 7208 * Reset the transmit MAC side. 7209 */ 7210 (void) nxge_tx_mac_disable(nxgep); 7211 7212 return (DDI_SUCCESS); 7213 } 7214