1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 29 */ 30 #include <sys/nxge/nxge_impl.h> 31 #include <sys/nxge/nxge_hio.h> 32 #include <sys/nxge/nxge_rxdma.h> 33 #include <sys/pcie.h> 34 35 uint32_t nxge_use_partition = 0; /* debug partition flag */ 36 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 37 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 38 /* 39 * PSARC/2007/453 MSI-X interrupt limit override 40 */ 41 uint32_t nxge_msi_enable = 2; 42 43 /* 44 * Software workaround for a Neptune (PCI-E) 45 * hardware interrupt bug which the hardware 46 * may generate spurious interrupts after the 47 * device interrupt handler was removed. If this flag 48 * is enabled, the driver will reset the 49 * hardware when devices are being detached. 50 */ 51 uint32_t nxge_peu_reset_enable = 0; 52 53 /* 54 * Software workaround for the hardware 55 * checksum bugs that affect packet transmission 56 * and receive: 57 * 58 * Usage of nxge_cksum_offload: 59 * 60 * (1) nxge_cksum_offload = 0 (default): 61 * - transmits packets: 62 * TCP: uses the hardware checksum feature. 63 * UDP: driver will compute the software checksum 64 * based on the partial checksum computed 65 * by the IP layer. 66 * - receives packets 67 * TCP: marks packets checksum flags based on hardware result. 68 * UDP: will not mark checksum flags. 69 * 70 * (2) nxge_cksum_offload = 1: 71 * - transmit packets: 72 * TCP/UDP: uses the hardware checksum feature. 73 * - receives packets 74 * TCP/UDP: marks packet checksum flags based on hardware result. 75 * 76 * (3) nxge_cksum_offload = 2: 77 * - The driver will not register its checksum capability. 78 * Checksum for both TCP and UDP will be computed 79 * by the stack. 80 * - The software LSO is not allowed in this case. 81 * 82 * (4) nxge_cksum_offload > 2: 83 * - Will be treated as it is set to 2 84 * (stack will compute the checksum). 85 * 86 * (5) If the hardware bug is fixed, this workaround 87 * needs to be updated accordingly to reflect 88 * the new hardware revision. 89 */ 90 uint32_t nxge_cksum_offload = 0; 91 92 /* 93 * Globals: tunable parameters (/etc/system or adb) 94 * 95 */ 96 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 97 uint32_t nxge_rbr_spare_size = 0; 98 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 99 uint16_t nxge_rdc_buf_offset = SW_OFFSET_NO_OFFSET; 100 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 101 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 102 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 103 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 104 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 105 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 106 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 107 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 108 109 /* MAX LSO size */ 110 #define NXGE_LSO_MAXLEN 65535 111 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 112 113 114 /* 115 * Add tunable to reduce the amount of time spent in the 116 * ISR doing Rx Processing. 117 */ 118 uint32_t nxge_max_rx_pkts = 1024; 119 120 /* 121 * Tunables to manage the receive buffer blocks. 122 * 123 * nxge_rx_threshold_hi: copy all buffers. 124 * nxge_rx_bcopy_size_type: receive buffer block size type. 125 * nxge_rx_threshold_lo: copy only up to tunable block size type. 126 */ 127 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 128 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 129 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 130 131 /* Use kmem_alloc() to allocate data buffers. */ 132 #if defined(__sparc) 133 uint32_t nxge_use_kmem_alloc = 1; 134 #elif defined(__i386) 135 uint32_t nxge_use_kmem_alloc = 0; 136 #else 137 uint32_t nxge_use_kmem_alloc = 1; 138 #endif 139 140 rtrace_t npi_rtracebuf; 141 142 /* 143 * The hardware sometimes fails to allow enough time for the link partner 144 * to send an acknowledgement for packets that the hardware sent to it. The 145 * hardware resends the packets earlier than it should be in those instances. 146 * This behavior caused some switches to acknowledge the wrong packets 147 * and it triggered the fatal error. 148 * This software workaround is to set the replay timer to a value 149 * suggested by the hardware team. 150 * 151 * PCI config space replay timer register: 152 * The following replay timeout value is 0xc 153 * for bit 14:18. 154 */ 155 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8 156 #define PCI_REPLAY_TIMEOUT_SHIFT 14 157 158 uint32_t nxge_set_replay_timer = 1; 159 uint32_t nxge_replay_timeout = 0xc; 160 161 /* 162 * The transmit serialization sometimes causes 163 * longer sleep before calling the driver transmit 164 * function as it sleeps longer than it should. 165 * The performace group suggests that a time wait tunable 166 * can be used to set the maximum wait time when needed 167 * and the default is set to 1 tick. 168 */ 169 uint32_t nxge_tx_serial_maxsleep = 1; 170 171 #if defined(sun4v) 172 /* 173 * Hypervisor N2/NIU services information. 174 */ 175 /* 176 * The following is the default API supported: 177 * major 1 and minor 1. 178 * 179 * Please update the MAX_NIU_MAJORS, 180 * MAX_NIU_MINORS, and minor number supported 181 * when the newer Hypervior API interfaces 182 * are added. Also, please update nxge_hsvc_register() 183 * if needed. 184 */ 185 static hsvc_info_t niu_hsvc = { 186 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 187 NIU_MINOR_VER, "nxge" 188 }; 189 190 static int nxge_hsvc_register(p_nxge_t); 191 #endif 192 193 /* 194 * Function Prototypes 195 */ 196 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 197 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 198 static void nxge_unattach(p_nxge_t); 199 static int nxge_quiesce(dev_info_t *); 200 201 #if NXGE_PROPERTY 202 static void nxge_remove_hard_properties(p_nxge_t); 203 #endif 204 205 /* 206 * These two functions are required by nxge_hio.c 207 */ 208 extern int nxge_m_mmac_remove(void *arg, int slot); 209 extern void nxge_grp_cleanup(p_nxge_t nxge); 210 211 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 212 213 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 214 static void nxge_destroy_mutexes(p_nxge_t); 215 216 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 217 static void nxge_unmap_regs(p_nxge_t nxgep); 218 #ifdef NXGE_DEBUG 219 static void nxge_test_map_regs(p_nxge_t nxgep); 220 #endif 221 222 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 223 static void nxge_remove_intrs(p_nxge_t nxgep); 224 225 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 226 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 227 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 228 static void nxge_intrs_enable(p_nxge_t nxgep); 229 static void nxge_intrs_disable(p_nxge_t nxgep); 230 231 static void nxge_suspend(p_nxge_t); 232 static nxge_status_t nxge_resume(p_nxge_t); 233 234 static nxge_status_t nxge_setup_dev(p_nxge_t); 235 static void nxge_destroy_dev(p_nxge_t); 236 237 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 238 static void nxge_free_mem_pool(p_nxge_t); 239 240 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 241 static void nxge_free_rx_mem_pool(p_nxge_t); 242 243 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 244 static void nxge_free_tx_mem_pool(p_nxge_t); 245 246 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 247 struct ddi_dma_attr *, 248 size_t, ddi_device_acc_attr_t *, uint_t, 249 p_nxge_dma_common_t); 250 251 static void nxge_dma_mem_free(p_nxge_dma_common_t); 252 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 253 254 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 255 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 256 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 257 258 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 259 p_nxge_dma_common_t *, size_t); 260 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 261 262 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 263 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 264 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 265 266 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 267 p_nxge_dma_common_t *, 268 size_t); 269 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 270 271 static int nxge_init_common_dev(p_nxge_t); 272 static void nxge_uninit_common_dev(p_nxge_t); 273 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 274 char *, caddr_t); 275 #if defined(sun4v) 276 extern nxge_status_t nxge_hio_rdc_enable(p_nxge_t nxgep); 277 extern nxge_status_t nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm); 278 #endif 279 280 /* 281 * The next declarations are for the GLDv3 interface. 282 */ 283 static int nxge_m_start(void *); 284 static void nxge_m_stop(void *); 285 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 286 static int nxge_m_promisc(void *, boolean_t); 287 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 288 nxge_status_t nxge_mac_register(p_nxge_t); 289 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 290 int slot, int rdctbl, boolean_t usetbl); 291 void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, 292 boolean_t factory); 293 294 static void nxge_m_getfactaddr(void *, uint_t, uint8_t *); 295 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 296 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 297 uint_t, const void *); 298 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 299 uint_t, void *); 300 static void nxge_m_propinfo(void *, const char *, mac_prop_id_t, 301 mac_prop_info_handle_t); 302 static void nxge_priv_propinfo(const char *, mac_prop_info_handle_t); 303 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 304 const void *); 305 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, void *); 306 static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int, 307 mac_ring_info_t *, mac_ring_handle_t); 308 static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t, 309 mac_ring_type_t); 310 static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t, 311 mac_ring_type_t); 312 313 static void nxge_niu_peu_reset(p_nxge_t nxgep); 314 static void nxge_set_pci_replay_timeout(nxge_t *); 315 316 char *nxge_priv_props[] = { 317 "_adv_10gfdx_cap", 318 "_adv_pause_cap", 319 "_function_number", 320 "_fw_version", 321 "_port_mode", 322 "_hot_swap_phy", 323 "_rxdma_intr_time", 324 "_rxdma_intr_pkts", 325 "_class_opt_ipv4_tcp", 326 "_class_opt_ipv4_udp", 327 "_class_opt_ipv4_ah", 328 "_class_opt_ipv4_sctp", 329 "_class_opt_ipv6_tcp", 330 "_class_opt_ipv6_udp", 331 "_class_opt_ipv6_ah", 332 "_class_opt_ipv6_sctp", 333 "_soft_lso_enable", 334 NULL 335 }; 336 337 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 338 #define MAX_DUMP_SZ 256 339 340 #define NXGE_M_CALLBACK_FLAGS \ 341 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) 342 343 mac_callbacks_t nxge_m_callbacks = { 344 NXGE_M_CALLBACK_FLAGS, 345 nxge_m_stat, 346 nxge_m_start, 347 nxge_m_stop, 348 nxge_m_promisc, 349 nxge_m_multicst, 350 NULL, 351 NULL, 352 NULL, 353 nxge_m_ioctl, 354 nxge_m_getcapab, 355 NULL, 356 NULL, 357 nxge_m_setprop, 358 nxge_m_getprop, 359 nxge_m_propinfo 360 }; 361 362 void 363 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 364 365 /* PSARC/2007/453 MSI-X interrupt limit override. */ 366 #define NXGE_MSIX_REQUEST_10G 8 367 #define NXGE_MSIX_REQUEST_1G 2 368 static int nxge_create_msi_property(p_nxge_t); 369 /* 370 * For applications that care about the 371 * latency, it was requested by PAE and the 372 * customers that the driver has tunables that 373 * allow the user to tune it to a higher number 374 * interrupts to spread the interrupts among 375 * multiple channels. The DDI framework limits 376 * the maximum number of MSI-X resources to allocate 377 * to 8 (ddi_msix_alloc_limit). If more than 8 378 * is set, ddi_msix_alloc_limit must be set accordingly. 379 * The default number of MSI interrupts are set to 380 * 8 for 10G and 2 for 1G link. 381 */ 382 #define NXGE_MSIX_MAX_ALLOWED 32 383 uint32_t nxge_msix_10g_intrs = NXGE_MSIX_REQUEST_10G; 384 uint32_t nxge_msix_1g_intrs = NXGE_MSIX_REQUEST_1G; 385 386 /* 387 * These global variables control the message 388 * output. 389 */ 390 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 391 uint64_t nxge_debug_level; 392 393 /* 394 * This list contains the instance structures for the Neptune 395 * devices present in the system. The lock exists to guarantee 396 * mutually exclusive access to the list. 397 */ 398 void *nxge_list = NULL; 399 void *nxge_hw_list = NULL; 400 nxge_os_mutex_t nxge_common_lock; 401 nxge_os_mutex_t nxgedebuglock; 402 403 extern uint64_t npi_debug_level; 404 405 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 406 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 407 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 408 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 409 extern void nxge_fm_init(p_nxge_t, 410 ddi_device_acc_attr_t *, 411 ddi_dma_attr_t *); 412 extern void nxge_fm_fini(p_nxge_t); 413 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 414 415 /* 416 * Count used to maintain the number of buffers being used 417 * by Neptune instances and loaned up to the upper layers. 418 */ 419 uint32_t nxge_mblks_pending = 0; 420 421 /* 422 * Device register access attributes for PIO. 423 */ 424 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 425 DDI_DEVICE_ATTR_V1, 426 DDI_STRUCTURE_LE_ACC, 427 DDI_STRICTORDER_ACC, 428 DDI_DEFAULT_ACC 429 }; 430 431 /* 432 * Device descriptor access attributes for DMA. 433 */ 434 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 435 DDI_DEVICE_ATTR_V0, 436 DDI_STRUCTURE_LE_ACC, 437 DDI_STRICTORDER_ACC 438 }; 439 440 /* 441 * Device buffer access attributes for DMA. 442 */ 443 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 444 DDI_DEVICE_ATTR_V0, 445 DDI_STRUCTURE_BE_ACC, 446 DDI_STRICTORDER_ACC 447 }; 448 449 ddi_dma_attr_t nxge_desc_dma_attr = { 450 DMA_ATTR_V0, /* version number. */ 451 0, /* low address */ 452 0xffffffffffffffff, /* high address */ 453 0xffffffffffffffff, /* address counter max */ 454 #ifndef NIU_PA_WORKAROUND 455 0x100000, /* alignment */ 456 #else 457 0x2000, 458 #endif 459 0xfc00fc, /* dlim_burstsizes */ 460 0x1, /* minimum transfer size */ 461 0xffffffffffffffff, /* maximum transfer size */ 462 0xffffffffffffffff, /* maximum segment size */ 463 1, /* scatter/gather list length */ 464 (unsigned int) 1, /* granularity */ 465 0 /* attribute flags */ 466 }; 467 468 ddi_dma_attr_t nxge_tx_dma_attr = { 469 DMA_ATTR_V0, /* version number. */ 470 0, /* low address */ 471 0xffffffffffffffff, /* high address */ 472 0xffffffffffffffff, /* address counter max */ 473 #if defined(_BIG_ENDIAN) 474 0x2000, /* alignment */ 475 #else 476 0x1000, /* alignment */ 477 #endif 478 0xfc00fc, /* dlim_burstsizes */ 479 0x1, /* minimum transfer size */ 480 0xffffffffffffffff, /* maximum transfer size */ 481 0xffffffffffffffff, /* maximum segment size */ 482 5, /* scatter/gather list length */ 483 (unsigned int) 1, /* granularity */ 484 0 /* attribute flags */ 485 }; 486 487 ddi_dma_attr_t nxge_rx_dma_attr = { 488 DMA_ATTR_V0, /* version number. */ 489 0, /* low address */ 490 0xffffffffffffffff, /* high address */ 491 0xffffffffffffffff, /* address counter max */ 492 0x2000, /* alignment */ 493 0xfc00fc, /* dlim_burstsizes */ 494 0x1, /* minimum transfer size */ 495 0xffffffffffffffff, /* maximum transfer size */ 496 0xffffffffffffffff, /* maximum segment size */ 497 1, /* scatter/gather list length */ 498 (unsigned int) 1, /* granularity */ 499 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 500 }; 501 502 ddi_dma_lim_t nxge_dma_limits = { 503 (uint_t)0, /* dlim_addr_lo */ 504 (uint_t)0xffffffff, /* dlim_addr_hi */ 505 (uint_t)0xffffffff, /* dlim_cntr_max */ 506 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 507 0x1, /* dlim_minxfer */ 508 1024 /* dlim_speed */ 509 }; 510 511 dma_method_t nxge_force_dma = DVMA; 512 513 /* 514 * dma chunk sizes. 515 * 516 * Try to allocate the largest possible size 517 * so that fewer number of dma chunks would be managed 518 */ 519 #ifdef NIU_PA_WORKAROUND 520 size_t alloc_sizes [] = {0x2000}; 521 #else 522 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 523 0x10000, 0x20000, 0x40000, 0x80000, 524 0x100000, 0x200000, 0x400000, 0x800000, 525 0x1000000, 0x2000000, 0x4000000}; 526 #endif 527 528 /* 529 * Translate "dev_t" to a pointer to the associated "dev_info_t". 530 */ 531 532 extern void nxge_get_environs(nxge_t *); 533 534 static int 535 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 536 { 537 p_nxge_t nxgep = NULL; 538 int instance; 539 int status = DDI_SUCCESS; 540 uint8_t portn; 541 nxge_mmac_t *mmac_info; 542 543 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 544 545 /* 546 * Get the device instance since we'll need to setup 547 * or retrieve a soft state for this instance. 548 */ 549 instance = ddi_get_instance(dip); 550 551 switch (cmd) { 552 case DDI_ATTACH: 553 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 554 break; 555 556 case DDI_RESUME: 557 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 558 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 559 if (nxgep == NULL) { 560 status = DDI_FAILURE; 561 break; 562 } 563 if (nxgep->dip != dip) { 564 status = DDI_FAILURE; 565 break; 566 } 567 if (nxgep->suspended == DDI_PM_SUSPEND) { 568 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 569 } else { 570 status = nxge_resume(nxgep); 571 } 572 goto nxge_attach_exit; 573 574 case DDI_PM_RESUME: 575 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 576 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 577 if (nxgep == NULL) { 578 status = DDI_FAILURE; 579 break; 580 } 581 if (nxgep->dip != dip) { 582 status = DDI_FAILURE; 583 break; 584 } 585 status = nxge_resume(nxgep); 586 goto nxge_attach_exit; 587 588 default: 589 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 590 status = DDI_FAILURE; 591 goto nxge_attach_exit; 592 } 593 594 595 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 596 status = DDI_FAILURE; 597 goto nxge_attach_exit; 598 } 599 600 nxgep = ddi_get_soft_state(nxge_list, instance); 601 if (nxgep == NULL) { 602 status = NXGE_ERROR; 603 goto nxge_attach_fail2; 604 } 605 606 nxgep->nxge_magic = NXGE_MAGIC; 607 608 nxgep->drv_state = 0; 609 nxgep->dip = dip; 610 nxgep->instance = instance; 611 nxgep->p_dip = ddi_get_parent(dip); 612 nxgep->nxge_debug_level = nxge_debug_level; 613 npi_debug_level = nxge_debug_level; 614 615 /* Are we a guest running in a Hybrid I/O environment? */ 616 nxge_get_environs(nxgep); 617 618 status = nxge_map_regs(nxgep); 619 620 if (status != NXGE_OK) { 621 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 622 goto nxge_attach_fail3; 623 } 624 625 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_rx_dma_attr); 626 627 /* Create & initialize the per-Neptune data structure */ 628 /* (even if we're a guest). */ 629 status = nxge_init_common_dev(nxgep); 630 if (status != NXGE_OK) { 631 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 632 "nxge_init_common_dev failed")); 633 goto nxge_attach_fail4; 634 } 635 636 /* 637 * Software workaround: set the replay timer. 638 */ 639 if (nxgep->niu_type != N2_NIU) { 640 nxge_set_pci_replay_timeout(nxgep); 641 } 642 643 #if defined(sun4v) 644 /* This is required by nxge_hio_init(), which follows. */ 645 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 646 goto nxge_attach_fail4; 647 #endif 648 649 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 650 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 651 "nxge_hio_init failed")); 652 goto nxge_attach_fail4; 653 } 654 655 if (nxgep->niu_type == NEPTUNE_2_10GF) { 656 if (nxgep->function_num > 1) { 657 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 658 " function %d. Only functions 0 and 1 are " 659 "supported for this card.", nxgep->function_num)); 660 status = NXGE_ERROR; 661 goto nxge_attach_fail4; 662 } 663 } 664 665 if (isLDOMguest(nxgep)) { 666 /* 667 * Use the function number here. 668 */ 669 nxgep->mac.portnum = nxgep->function_num; 670 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 671 672 /* XXX We'll set the MAC address counts to 1 for now. */ 673 mmac_info = &nxgep->nxge_mmac_info; 674 mmac_info->num_mmac = 1; 675 mmac_info->naddrfree = 1; 676 } else { 677 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 678 nxgep->mac.portnum = portn; 679 if ((portn == 0) || (portn == 1)) 680 nxgep->mac.porttype = PORT_TYPE_XMAC; 681 else 682 nxgep->mac.porttype = PORT_TYPE_BMAC; 683 /* 684 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 685 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 686 * The two types of MACs have different characterizations. 687 */ 688 mmac_info = &nxgep->nxge_mmac_info; 689 if (nxgep->function_num < 2) { 690 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 691 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 692 } else { 693 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 694 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 695 } 696 } 697 /* 698 * Setup the Ndd parameters for the this instance. 699 */ 700 nxge_init_param(nxgep); 701 702 /* 703 * Setup Register Tracing Buffer. 704 */ 705 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 706 707 /* init stats ptr */ 708 nxge_init_statsp(nxgep); 709 710 /* 711 * Copy the vpd info from eeprom to a local data 712 * structure, and then check its validity. 713 */ 714 if (!isLDOMguest(nxgep)) { 715 int *regp; 716 uint_t reglen; 717 int rv; 718 719 nxge_vpd_info_get(nxgep); 720 721 /* Find the NIU config handle. */ 722 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 723 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 724 "reg", ®p, ®len); 725 726 if (rv != DDI_PROP_SUCCESS) { 727 goto nxge_attach_fail5; 728 } 729 /* 730 * The address_hi, that is the first int, in the reg 731 * property consists of config handle, but need to remove 732 * the bits 28-31 which are OBP specific info. 733 */ 734 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 735 ddi_prop_free(regp); 736 } 737 738 /* 739 * Set the defaults for the MTU size. 740 */ 741 nxge_hw_id_init(nxgep); 742 743 if (isLDOMguest(nxgep)) { 744 uchar_t *prop_val; 745 uint_t prop_len; 746 uint32_t max_frame_size; 747 748 extern void nxge_get_logical_props(p_nxge_t); 749 750 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 751 nxgep->mac.portmode = PORT_LOGICAL; 752 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 753 "phy-type", "virtual transceiver"); 754 755 nxgep->nports = 1; 756 nxgep->board_ver = 0; /* XXX What? */ 757 758 /* 759 * local-mac-address property gives us info on which 760 * specific MAC address the Hybrid resource is associated 761 * with. 762 */ 763 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 764 "local-mac-address", &prop_val, 765 &prop_len) != DDI_PROP_SUCCESS) { 766 goto nxge_attach_fail5; 767 } 768 if (prop_len != ETHERADDRL) { 769 ddi_prop_free(prop_val); 770 goto nxge_attach_fail5; 771 } 772 ether_copy(prop_val, nxgep->hio_mac_addr); 773 ddi_prop_free(prop_val); 774 nxge_get_logical_props(nxgep); 775 776 /* 777 * Enable Jumbo property based on the "max-frame-size" 778 * property value. 779 */ 780 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY, 781 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 782 "max-frame-size", NXGE_MTU_DEFAULT_MAX); 783 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) && 784 (max_frame_size <= TX_JUMBO_MTU)) { 785 nxgep->mac.is_jumbo = B_TRUE; 786 nxgep->mac.maxframesize = (uint16_t)max_frame_size; 787 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 788 NXGE_EHEADER_VLAN_CRC; 789 } 790 } else { 791 status = nxge_xcvr_find(nxgep); 792 793 if (status != NXGE_OK) { 794 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 795 " Couldn't determine card type" 796 " .... exit ")); 797 goto nxge_attach_fail5; 798 } 799 800 status = nxge_get_config_properties(nxgep); 801 802 if (status != NXGE_OK) { 803 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 804 "get_hw create failed")); 805 goto nxge_attach_fail; 806 } 807 } 808 809 /* 810 * Setup the Kstats for the driver. 811 */ 812 nxge_setup_kstats(nxgep); 813 814 if (!isLDOMguest(nxgep)) 815 nxge_setup_param(nxgep); 816 817 status = nxge_setup_system_dma_pages(nxgep); 818 if (status != NXGE_OK) { 819 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 820 goto nxge_attach_fail; 821 } 822 823 824 if (!isLDOMguest(nxgep)) 825 nxge_hw_init_niu_common(nxgep); 826 827 status = nxge_setup_mutexes(nxgep); 828 if (status != NXGE_OK) { 829 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 830 goto nxge_attach_fail; 831 } 832 833 #if defined(sun4v) 834 if (isLDOMguest(nxgep)) { 835 /* Find our VR & channel sets. */ 836 status = nxge_hio_vr_add(nxgep); 837 if (status != DDI_SUCCESS) { 838 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 839 "nxge_hio_vr_add failed")); 840 (void) hsvc_unregister(&nxgep->niu_hsvc); 841 nxgep->niu_hsvc_available = B_FALSE; 842 goto nxge_attach_fail; 843 } 844 goto nxge_attach_exit; 845 } 846 #endif 847 848 status = nxge_setup_dev(nxgep); 849 if (status != DDI_SUCCESS) { 850 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 851 goto nxge_attach_fail; 852 } 853 854 status = nxge_add_intrs(nxgep); 855 if (status != DDI_SUCCESS) { 856 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 857 goto nxge_attach_fail; 858 } 859 860 /* If a guest, register with vio_net instead. */ 861 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 862 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 863 "unable to register to mac layer (%d)", status)); 864 goto nxge_attach_fail; 865 } 866 867 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 868 869 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 870 "registered to mac (instance %d)", instance)); 871 872 /* nxge_link_monitor calls xcvr.check_link recursively */ 873 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 874 875 goto nxge_attach_exit; 876 877 nxge_attach_fail: 878 nxge_unattach(nxgep); 879 goto nxge_attach_fail1; 880 881 nxge_attach_fail5: 882 /* 883 * Tear down the ndd parameters setup. 884 */ 885 nxge_destroy_param(nxgep); 886 887 /* 888 * Tear down the kstat setup. 889 */ 890 nxge_destroy_kstats(nxgep); 891 892 nxge_attach_fail4: 893 if (nxgep->nxge_hw_p) { 894 nxge_uninit_common_dev(nxgep); 895 nxgep->nxge_hw_p = NULL; 896 } 897 898 nxge_attach_fail3: 899 /* 900 * Unmap the register setup. 901 */ 902 nxge_unmap_regs(nxgep); 903 904 nxge_fm_fini(nxgep); 905 906 nxge_attach_fail2: 907 ddi_soft_state_free(nxge_list, nxgep->instance); 908 909 nxge_attach_fail1: 910 if (status != NXGE_OK) 911 status = (NXGE_ERROR | NXGE_DDI_FAILED); 912 nxgep = NULL; 913 914 nxge_attach_exit: 915 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 916 status)); 917 918 return (status); 919 } 920 921 static int 922 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 923 { 924 int status = DDI_SUCCESS; 925 int instance; 926 p_nxge_t nxgep = NULL; 927 928 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 929 instance = ddi_get_instance(dip); 930 nxgep = ddi_get_soft_state(nxge_list, instance); 931 if (nxgep == NULL) { 932 status = DDI_FAILURE; 933 goto nxge_detach_exit; 934 } 935 936 switch (cmd) { 937 case DDI_DETACH: 938 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 939 break; 940 941 case DDI_PM_SUSPEND: 942 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 943 nxgep->suspended = DDI_PM_SUSPEND; 944 nxge_suspend(nxgep); 945 break; 946 947 case DDI_SUSPEND: 948 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 949 if (nxgep->suspended != DDI_PM_SUSPEND) { 950 nxgep->suspended = DDI_SUSPEND; 951 nxge_suspend(nxgep); 952 } 953 break; 954 955 default: 956 status = DDI_FAILURE; 957 } 958 959 if (cmd != DDI_DETACH) 960 goto nxge_detach_exit; 961 962 /* 963 * Stop the xcvr polling. 964 */ 965 nxgep->suspended = cmd; 966 967 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 968 969 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 970 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 971 "<== nxge_detach status = 0x%08X", status)); 972 return (DDI_FAILURE); 973 } 974 975 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 976 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 977 978 nxge_unattach(nxgep); 979 nxgep = NULL; 980 981 nxge_detach_exit: 982 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 983 status)); 984 985 return (status); 986 } 987 988 static void 989 nxge_unattach(p_nxge_t nxgep) 990 { 991 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 992 993 if (nxgep == NULL || nxgep->dev_regs == NULL) { 994 return; 995 } 996 997 nxgep->nxge_magic = 0; 998 999 if (nxgep->nxge_timerid) { 1000 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1001 nxgep->nxge_timerid = 0; 1002 } 1003 1004 /* 1005 * If this flag is set, it will affect the Neptune 1006 * only. 1007 */ 1008 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { 1009 nxge_niu_peu_reset(nxgep); 1010 } 1011 1012 #if defined(sun4v) 1013 if (isLDOMguest(nxgep)) { 1014 (void) nxge_hio_vr_release(nxgep); 1015 } 1016 #endif 1017 1018 if (nxgep->nxge_hw_p) { 1019 nxge_uninit_common_dev(nxgep); 1020 nxgep->nxge_hw_p = NULL; 1021 } 1022 1023 #if defined(sun4v) 1024 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 1025 (void) hsvc_unregister(&nxgep->niu_hsvc); 1026 nxgep->niu_hsvc_available = B_FALSE; 1027 } 1028 #endif 1029 /* 1030 * Stop any further interrupts. 1031 */ 1032 nxge_remove_intrs(nxgep); 1033 1034 /* 1035 * Stop the device and free resources. 1036 */ 1037 if (!isLDOMguest(nxgep)) { 1038 nxge_destroy_dev(nxgep); 1039 } 1040 1041 /* 1042 * Tear down the ndd parameters setup. 1043 */ 1044 nxge_destroy_param(nxgep); 1045 1046 /* 1047 * Tear down the kstat setup. 1048 */ 1049 nxge_destroy_kstats(nxgep); 1050 1051 /* 1052 * Destroy all mutexes. 1053 */ 1054 nxge_destroy_mutexes(nxgep); 1055 1056 /* 1057 * Remove the list of ndd parameters which 1058 * were setup during attach. 1059 */ 1060 if (nxgep->dip) { 1061 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 1062 " nxge_unattach: remove all properties")); 1063 1064 (void) ddi_prop_remove_all(nxgep->dip); 1065 } 1066 1067 #if NXGE_PROPERTY 1068 nxge_remove_hard_properties(nxgep); 1069 #endif 1070 1071 /* 1072 * Unmap the register setup. 1073 */ 1074 nxge_unmap_regs(nxgep); 1075 1076 nxge_fm_fini(nxgep); 1077 1078 ddi_soft_state_free(nxge_list, nxgep->instance); 1079 1080 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1081 } 1082 1083 #if defined(sun4v) 1084 int 1085 nxge_hsvc_register(nxge_t *nxgep) 1086 { 1087 nxge_status_t status; 1088 int i, j; 1089 1090 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hsvc_register")); 1091 if (nxgep->niu_type != N2_NIU) { 1092 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hsvc_register")); 1093 return (DDI_SUCCESS); 1094 } 1095 1096 /* 1097 * Currently, the NIU Hypervisor API supports two major versions: 1098 * version 1 and 2. 1099 * If Hypervisor introduces a higher major or minor version, 1100 * please update NIU_MAJOR_HI and NIU_MINOR_HI accordingly. 1101 */ 1102 nxgep->niu_hsvc_available = B_FALSE; 1103 bcopy(&niu_hsvc, &nxgep->niu_hsvc, 1104 sizeof (hsvc_info_t)); 1105 1106 for (i = NIU_MAJOR_HI; i > 0; i--) { 1107 nxgep->niu_hsvc.hsvc_major = i; 1108 for (j = NIU_MINOR_HI; j >= 0; j--) { 1109 nxgep->niu_hsvc.hsvc_minor = j; 1110 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1111 "nxge_hsvc_register: %s: negotiating " 1112 "hypervisor services revision %d " 1113 "group: 0x%lx major: 0x%lx " 1114 "minor: 0x%lx", 1115 nxgep->niu_hsvc.hsvc_modname, 1116 nxgep->niu_hsvc.hsvc_rev, 1117 nxgep->niu_hsvc.hsvc_group, 1118 nxgep->niu_hsvc.hsvc_major, 1119 nxgep->niu_hsvc.hsvc_minor, 1120 nxgep->niu_min_ver)); 1121 1122 if ((status = hsvc_register(&nxgep->niu_hsvc, 1123 &nxgep->niu_min_ver)) == 0) { 1124 /* Use the supported minor */ 1125 nxgep->niu_hsvc.hsvc_minor = nxgep->niu_min_ver; 1126 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1127 "nxge_hsvc_register: %s: negotiated " 1128 "hypervisor services revision %d " 1129 "group: 0x%lx major: 0x%lx " 1130 "minor: 0x%lx (niu_min_ver 0x%lx)", 1131 nxgep->niu_hsvc.hsvc_modname, 1132 nxgep->niu_hsvc.hsvc_rev, 1133 nxgep->niu_hsvc.hsvc_group, 1134 nxgep->niu_hsvc.hsvc_major, 1135 nxgep->niu_hsvc.hsvc_minor, 1136 nxgep->niu_min_ver)); 1137 1138 nxgep->niu_hsvc_available = B_TRUE; 1139 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1140 "<== nxge_hsvc_register: " 1141 "NIU Hypervisor service enabled")); 1142 return (DDI_SUCCESS); 1143 } 1144 1145 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1146 "nxge_hsvc_register: %s: negotiated failed - " 1147 "try lower major number " 1148 "hypervisor services revision %d " 1149 "group: 0x%lx major: 0x%lx minor: 0x%lx " 1150 "errno: %d", 1151 nxgep->niu_hsvc.hsvc_modname, 1152 nxgep->niu_hsvc.hsvc_rev, 1153 nxgep->niu_hsvc.hsvc_group, 1154 nxgep->niu_hsvc.hsvc_major, 1155 nxgep->niu_hsvc.hsvc_minor, status)); 1156 } 1157 } 1158 1159 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1160 "nxge_hsvc_register: %s: cannot negotiate " 1161 "hypervisor services revision %d group: 0x%lx " 1162 "major: 0x%lx minor: 0x%lx errno: %d", 1163 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1164 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1165 niu_hsvc.hsvc_minor, status)); 1166 1167 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1168 "<== nxge_hsvc_register: Register to NIU Hypervisor failed")); 1169 1170 return (DDI_FAILURE); 1171 } 1172 #endif 1173 1174 static char n2_siu_name[] = "niu"; 1175 1176 static nxge_status_t 1177 nxge_map_regs(p_nxge_t nxgep) 1178 { 1179 int ddi_status = DDI_SUCCESS; 1180 p_dev_regs_t dev_regs; 1181 char buf[MAXPATHLEN + 1]; 1182 char *devname; 1183 #ifdef NXGE_DEBUG 1184 char *sysname; 1185 #endif 1186 off_t regsize; 1187 nxge_status_t status = NXGE_OK; 1188 #if !defined(_BIG_ENDIAN) 1189 off_t pci_offset; 1190 uint16_t pcie_devctl; 1191 #endif 1192 1193 if (isLDOMguest(nxgep)) { 1194 return (nxge_guest_regs_map(nxgep)); 1195 } 1196 1197 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1198 nxgep->dev_regs = NULL; 1199 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1200 dev_regs->nxge_regh = NULL; 1201 dev_regs->nxge_pciregh = NULL; 1202 dev_regs->nxge_msix_regh = NULL; 1203 dev_regs->nxge_vir_regh = NULL; 1204 dev_regs->nxge_vir2_regh = NULL; 1205 nxgep->niu_type = NIU_TYPE_NONE; 1206 1207 devname = ddi_pathname(nxgep->dip, buf); 1208 ASSERT(strlen(devname) > 0); 1209 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1210 "nxge_map_regs: pathname devname %s", devname)); 1211 1212 /* 1213 * The driver is running on a N2-NIU system if devname is something 1214 * like "/niu@80/network@0" 1215 */ 1216 if (strstr(devname, n2_siu_name)) { 1217 /* N2/NIU */ 1218 nxgep->niu_type = N2_NIU; 1219 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1220 "nxge_map_regs: N2/NIU devname %s", devname)); 1221 /* 1222 * Get function number: 1223 * - N2/NIU: "/niu@80/network@0" and "/niu@80/network@1" 1224 */ 1225 nxgep->function_num = 1226 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1227 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1228 "nxge_map_regs: N2/NIU function number %d", 1229 nxgep->function_num)); 1230 } else { 1231 int *prop_val; 1232 uint_t prop_len; 1233 uint8_t func_num; 1234 1235 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1236 0, "reg", 1237 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1238 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1239 "Reg property not found")); 1240 ddi_status = DDI_FAILURE; 1241 goto nxge_map_regs_fail0; 1242 1243 } else { 1244 func_num = (prop_val[0] >> 8) & 0x7; 1245 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1246 "Reg property found: fun # %d", 1247 func_num)); 1248 nxgep->function_num = func_num; 1249 if (isLDOMguest(nxgep)) { 1250 nxgep->function_num /= 2; 1251 return (NXGE_OK); 1252 } 1253 ddi_prop_free(prop_val); 1254 } 1255 } 1256 1257 switch (nxgep->niu_type) { 1258 default: 1259 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1260 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1261 "nxge_map_regs: pci config size 0x%x", regsize)); 1262 1263 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1264 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1265 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1266 if (ddi_status != DDI_SUCCESS) { 1267 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1268 "ddi_map_regs, nxge bus config regs failed")); 1269 goto nxge_map_regs_fail0; 1270 } 1271 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1272 "nxge_map_reg: PCI config addr 0x%0llx " 1273 " handle 0x%0llx", dev_regs->nxge_pciregp, 1274 dev_regs->nxge_pciregh)); 1275 /* 1276 * IMP IMP 1277 * workaround for bit swapping bug in HW 1278 * which ends up in no-snoop = yes 1279 * resulting, in DMA not synched properly 1280 */ 1281 #if !defined(_BIG_ENDIAN) 1282 /* workarounds for x86 systems */ 1283 pci_offset = 0x80 + PCIE_DEVCTL; 1284 pcie_devctl = pci_config_get16(dev_regs->nxge_pciregh, 1285 pci_offset); 1286 pcie_devctl &= ~PCIE_DEVCTL_ENABLE_NO_SNOOP; 1287 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1288 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1289 pcie_devctl); 1290 #endif 1291 1292 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1293 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1294 "nxge_map_regs: pio size 0x%x", regsize)); 1295 /* set up the device mapped register */ 1296 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1297 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1298 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1299 if (ddi_status != DDI_SUCCESS) { 1300 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1301 "ddi_map_regs for Neptune global reg failed")); 1302 goto nxge_map_regs_fail1; 1303 } 1304 1305 /* set up the msi/msi-x mapped register */ 1306 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1307 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1308 "nxge_map_regs: msix size 0x%x", regsize)); 1309 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1310 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1311 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1312 if (ddi_status != DDI_SUCCESS) { 1313 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1314 "ddi_map_regs for msi reg failed")); 1315 goto nxge_map_regs_fail2; 1316 } 1317 1318 /* set up the vio region mapped register */ 1319 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1320 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1321 "nxge_map_regs: vio size 0x%x", regsize)); 1322 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1323 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1324 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1325 1326 if (ddi_status != DDI_SUCCESS) { 1327 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1328 "ddi_map_regs for nxge vio reg failed")); 1329 goto nxge_map_regs_fail3; 1330 } 1331 nxgep->dev_regs = dev_regs; 1332 1333 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1334 NPI_PCI_ADD_HANDLE_SET(nxgep, 1335 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1336 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1337 NPI_MSI_ADD_HANDLE_SET(nxgep, 1338 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1339 1340 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1341 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1342 1343 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1344 NPI_REG_ADD_HANDLE_SET(nxgep, 1345 (npi_reg_ptr_t)dev_regs->nxge_regp); 1346 1347 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1348 NPI_VREG_ADD_HANDLE_SET(nxgep, 1349 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1350 1351 break; 1352 1353 case N2_NIU: 1354 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1355 /* 1356 * Set up the device mapped register (FWARC 2006/556) 1357 * (changed back to 1: reg starts at 1!) 1358 */ 1359 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1360 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1361 "nxge_map_regs: dev size 0x%x", regsize)); 1362 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1363 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1364 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1365 1366 if (ddi_status != DDI_SUCCESS) { 1367 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1368 "ddi_map_regs for N2/NIU, global reg failed ")); 1369 goto nxge_map_regs_fail1; 1370 } 1371 1372 /* set up the first vio region mapped register */ 1373 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1374 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1375 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1376 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1377 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1378 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1379 1380 if (ddi_status != DDI_SUCCESS) { 1381 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1382 "ddi_map_regs for nxge vio reg failed")); 1383 goto nxge_map_regs_fail2; 1384 } 1385 /* set up the second vio region mapped register */ 1386 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1387 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1388 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1389 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1390 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1391 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1392 1393 if (ddi_status != DDI_SUCCESS) { 1394 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1395 "ddi_map_regs for nxge vio2 reg failed")); 1396 goto nxge_map_regs_fail3; 1397 } 1398 nxgep->dev_regs = dev_regs; 1399 1400 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1401 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1402 1403 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1404 NPI_REG_ADD_HANDLE_SET(nxgep, 1405 (npi_reg_ptr_t)dev_regs->nxge_regp); 1406 1407 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1408 NPI_VREG_ADD_HANDLE_SET(nxgep, 1409 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1410 1411 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1412 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1413 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1414 1415 break; 1416 } 1417 1418 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1419 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1420 1421 goto nxge_map_regs_exit; 1422 nxge_map_regs_fail3: 1423 if (dev_regs->nxge_msix_regh) { 1424 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1425 } 1426 if (dev_regs->nxge_vir_regh) { 1427 ddi_regs_map_free(&dev_regs->nxge_regh); 1428 } 1429 nxge_map_regs_fail2: 1430 if (dev_regs->nxge_regh) { 1431 ddi_regs_map_free(&dev_regs->nxge_regh); 1432 } 1433 nxge_map_regs_fail1: 1434 if (dev_regs->nxge_pciregh) { 1435 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1436 } 1437 nxge_map_regs_fail0: 1438 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1439 kmem_free(dev_regs, sizeof (dev_regs_t)); 1440 1441 nxge_map_regs_exit: 1442 if (ddi_status != DDI_SUCCESS) 1443 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1444 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1445 return (status); 1446 } 1447 1448 static void 1449 nxge_unmap_regs(p_nxge_t nxgep) 1450 { 1451 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1452 1453 if (isLDOMguest(nxgep)) { 1454 nxge_guest_regs_map_free(nxgep); 1455 return; 1456 } 1457 1458 if (nxgep->dev_regs) { 1459 if (nxgep->dev_regs->nxge_pciregh) { 1460 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1461 "==> nxge_unmap_regs: bus")); 1462 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1463 nxgep->dev_regs->nxge_pciregh = NULL; 1464 } 1465 if (nxgep->dev_regs->nxge_regh) { 1466 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1467 "==> nxge_unmap_regs: device registers")); 1468 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1469 nxgep->dev_regs->nxge_regh = NULL; 1470 } 1471 if (nxgep->dev_regs->nxge_msix_regh) { 1472 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1473 "==> nxge_unmap_regs: device interrupts")); 1474 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1475 nxgep->dev_regs->nxge_msix_regh = NULL; 1476 } 1477 if (nxgep->dev_regs->nxge_vir_regh) { 1478 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1479 "==> nxge_unmap_regs: vio region")); 1480 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1481 nxgep->dev_regs->nxge_vir_regh = NULL; 1482 } 1483 if (nxgep->dev_regs->nxge_vir2_regh) { 1484 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1485 "==> nxge_unmap_regs: vio2 region")); 1486 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1487 nxgep->dev_regs->nxge_vir2_regh = NULL; 1488 } 1489 1490 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1491 nxgep->dev_regs = NULL; 1492 } 1493 1494 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1495 } 1496 1497 static nxge_status_t 1498 nxge_setup_mutexes(p_nxge_t nxgep) 1499 { 1500 int ddi_status = DDI_SUCCESS; 1501 nxge_status_t status = NXGE_OK; 1502 nxge_classify_t *classify_ptr; 1503 int partition; 1504 1505 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1506 1507 /* 1508 * Get the interrupt cookie so the mutexes can be 1509 * Initialized. 1510 */ 1511 if (isLDOMguest(nxgep)) { 1512 nxgep->interrupt_cookie = 0; 1513 } else { 1514 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1515 &nxgep->interrupt_cookie); 1516 1517 if (ddi_status != DDI_SUCCESS) { 1518 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1519 "<== nxge_setup_mutexes: failed 0x%x", 1520 ddi_status)); 1521 goto nxge_setup_mutexes_exit; 1522 } 1523 } 1524 1525 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1526 MUTEX_INIT(&nxgep->poll_lock, NULL, 1527 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1528 1529 /* 1530 * Initialize mutexes for this device. 1531 */ 1532 MUTEX_INIT(nxgep->genlock, NULL, 1533 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1534 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1535 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1536 MUTEX_INIT(&nxgep->mif_lock, NULL, 1537 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1538 MUTEX_INIT(&nxgep->group_lock, NULL, 1539 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1540 RW_INIT(&nxgep->filter_lock, NULL, 1541 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1542 1543 classify_ptr = &nxgep->classifier; 1544 /* 1545 * FFLP Mutexes are never used in interrupt context 1546 * as fflp operation can take very long time to 1547 * complete and hence not suitable to invoke from interrupt 1548 * handlers. 1549 */ 1550 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1551 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1552 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1553 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1554 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1555 for (partition = 0; partition < MAX_PARTITION; partition++) { 1556 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1557 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1558 } 1559 } 1560 1561 nxge_setup_mutexes_exit: 1562 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1563 "<== nxge_setup_mutexes status = %x", status)); 1564 1565 if (ddi_status != DDI_SUCCESS) 1566 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1567 1568 return (status); 1569 } 1570 1571 static void 1572 nxge_destroy_mutexes(p_nxge_t nxgep) 1573 { 1574 int partition; 1575 nxge_classify_t *classify_ptr; 1576 1577 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1578 RW_DESTROY(&nxgep->filter_lock); 1579 MUTEX_DESTROY(&nxgep->group_lock); 1580 MUTEX_DESTROY(&nxgep->mif_lock); 1581 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1582 MUTEX_DESTROY(nxgep->genlock); 1583 1584 classify_ptr = &nxgep->classifier; 1585 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1586 1587 /* Destroy all polling resources. */ 1588 MUTEX_DESTROY(&nxgep->poll_lock); 1589 cv_destroy(&nxgep->poll_cv); 1590 1591 /* free data structures, based on HW type */ 1592 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1593 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1594 for (partition = 0; partition < MAX_PARTITION; partition++) { 1595 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1596 } 1597 } 1598 1599 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1600 } 1601 1602 nxge_status_t 1603 nxge_init(p_nxge_t nxgep) 1604 { 1605 nxge_status_t status = NXGE_OK; 1606 1607 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1608 1609 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1610 return (status); 1611 } 1612 1613 /* 1614 * Allocate system memory for the receive/transmit buffer blocks 1615 * and receive/transmit descriptor rings. 1616 */ 1617 status = nxge_alloc_mem_pool(nxgep); 1618 if (status != NXGE_OK) { 1619 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1620 goto nxge_init_fail1; 1621 } 1622 1623 if (!isLDOMguest(nxgep)) { 1624 /* 1625 * Initialize and enable the TXC registers. 1626 * (Globally enable the Tx controller, 1627 * enable the port, configure the dma channel bitmap, 1628 * configure the max burst size). 1629 */ 1630 status = nxge_txc_init(nxgep); 1631 if (status != NXGE_OK) { 1632 NXGE_ERROR_MSG((nxgep, 1633 NXGE_ERR_CTL, "init txc failed\n")); 1634 goto nxge_init_fail2; 1635 } 1636 } 1637 1638 /* 1639 * Initialize and enable TXDMA channels. 1640 */ 1641 status = nxge_init_txdma_channels(nxgep); 1642 if (status != NXGE_OK) { 1643 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1644 goto nxge_init_fail3; 1645 } 1646 1647 /* 1648 * Initialize and enable RXDMA channels. 1649 */ 1650 status = nxge_init_rxdma_channels(nxgep); 1651 if (status != NXGE_OK) { 1652 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1653 goto nxge_init_fail4; 1654 } 1655 1656 /* 1657 * The guest domain is now done. 1658 */ 1659 if (isLDOMguest(nxgep)) { 1660 nxgep->drv_state |= STATE_HW_INITIALIZED; 1661 goto nxge_init_exit; 1662 } 1663 1664 /* 1665 * Initialize TCAM and FCRAM (Neptune). 1666 */ 1667 status = nxge_classify_init(nxgep); 1668 if (status != NXGE_OK) { 1669 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1670 goto nxge_init_fail5; 1671 } 1672 1673 /* 1674 * Initialize ZCP 1675 */ 1676 status = nxge_zcp_init(nxgep); 1677 if (status != NXGE_OK) { 1678 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1679 goto nxge_init_fail5; 1680 } 1681 1682 /* 1683 * Initialize IPP. 1684 */ 1685 status = nxge_ipp_init(nxgep); 1686 if (status != NXGE_OK) { 1687 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1688 goto nxge_init_fail5; 1689 } 1690 1691 /* 1692 * Initialize the MAC block. 1693 */ 1694 status = nxge_mac_init(nxgep); 1695 if (status != NXGE_OK) { 1696 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1697 goto nxge_init_fail5; 1698 } 1699 1700 /* 1701 * Enable the interrrupts for DDI. 1702 */ 1703 nxge_intrs_enable(nxgep); 1704 1705 nxgep->drv_state |= STATE_HW_INITIALIZED; 1706 1707 goto nxge_init_exit; 1708 1709 nxge_init_fail5: 1710 nxge_uninit_rxdma_channels(nxgep); 1711 nxge_init_fail4: 1712 nxge_uninit_txdma_channels(nxgep); 1713 nxge_init_fail3: 1714 if (!isLDOMguest(nxgep)) { 1715 (void) nxge_txc_uninit(nxgep); 1716 } 1717 nxge_init_fail2: 1718 nxge_free_mem_pool(nxgep); 1719 nxge_init_fail1: 1720 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1721 "<== nxge_init status (failed) = 0x%08x", status)); 1722 return (status); 1723 1724 nxge_init_exit: 1725 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1726 status)); 1727 return (status); 1728 } 1729 1730 1731 timeout_id_t 1732 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1733 { 1734 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1735 return (timeout(func, (caddr_t)nxgep, 1736 drv_usectohz(1000 * msec))); 1737 } 1738 return (NULL); 1739 } 1740 1741 /*ARGSUSED*/ 1742 void 1743 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1744 { 1745 if (timerid) { 1746 (void) untimeout(timerid); 1747 } 1748 } 1749 1750 void 1751 nxge_uninit(p_nxge_t nxgep) 1752 { 1753 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1754 1755 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1756 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1757 "==> nxge_uninit: not initialized")); 1758 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1759 "<== nxge_uninit")); 1760 return; 1761 } 1762 1763 if (!isLDOMguest(nxgep)) { 1764 /* 1765 * Reset the receive MAC side. 1766 */ 1767 (void) nxge_rx_mac_disable(nxgep); 1768 1769 /* 1770 * Drain the IPP. 1771 */ 1772 (void) nxge_ipp_drain(nxgep); 1773 } 1774 1775 /* stop timer */ 1776 if (nxgep->nxge_timerid) { 1777 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1778 nxgep->nxge_timerid = 0; 1779 } 1780 1781 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1782 (void) nxge_intr_hw_disable(nxgep); 1783 1784 1785 /* Disable and soft reset the IPP */ 1786 if (!isLDOMguest(nxgep)) 1787 (void) nxge_ipp_disable(nxgep); 1788 1789 /* Free classification resources */ 1790 (void) nxge_classify_uninit(nxgep); 1791 1792 /* 1793 * Reset the transmit/receive DMA side. 1794 */ 1795 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1796 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1797 1798 nxge_uninit_txdma_channels(nxgep); 1799 nxge_uninit_rxdma_channels(nxgep); 1800 1801 /* 1802 * Reset the transmit MAC side. 1803 */ 1804 (void) nxge_tx_mac_disable(nxgep); 1805 1806 nxge_free_mem_pool(nxgep); 1807 1808 /* 1809 * Start the timer if the reset flag is not set. 1810 * If this reset flag is set, the link monitor 1811 * will not be started in order to stop furthur bus 1812 * activities coming from this interface. 1813 * The driver will start the monitor function 1814 * if the interface was initialized again later. 1815 */ 1816 if (!nxge_peu_reset_enable) { 1817 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1818 } 1819 1820 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1821 1822 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1823 "nxge_mblks_pending %d", nxge_mblks_pending)); 1824 } 1825 1826 void 1827 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1828 { 1829 uint64_t reg; 1830 uint64_t regdata; 1831 int i, retry; 1832 1833 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1834 regdata = 0; 1835 retry = 1; 1836 1837 for (i = 0; i < retry; i++) { 1838 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1839 } 1840 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1841 } 1842 1843 void 1844 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1845 { 1846 uint64_t reg; 1847 uint64_t buf[2]; 1848 1849 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1850 reg = buf[0]; 1851 1852 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1853 } 1854 1855 /*ARGSUSED*/ 1856 /*VARARGS*/ 1857 void 1858 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1859 { 1860 char msg_buffer[1048]; 1861 char prefix_buffer[32]; 1862 int instance; 1863 uint64_t debug_level; 1864 int cmn_level = CE_CONT; 1865 va_list ap; 1866 1867 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1868 /* In case a developer has changed nxge_debug_level. */ 1869 if (nxgep->nxge_debug_level != nxge_debug_level) 1870 nxgep->nxge_debug_level = nxge_debug_level; 1871 } 1872 1873 debug_level = (nxgep == NULL) ? nxge_debug_level : 1874 nxgep->nxge_debug_level; 1875 1876 if ((level & debug_level) || 1877 (level == NXGE_NOTE) || 1878 (level == NXGE_ERR_CTL)) { 1879 /* do the msg processing */ 1880 MUTEX_ENTER(&nxgedebuglock); 1881 1882 if ((level & NXGE_NOTE)) { 1883 cmn_level = CE_NOTE; 1884 } 1885 1886 if (level & NXGE_ERR_CTL) { 1887 cmn_level = CE_WARN; 1888 } 1889 1890 va_start(ap, fmt); 1891 (void) vsprintf(msg_buffer, fmt, ap); 1892 va_end(ap); 1893 if (nxgep == NULL) { 1894 instance = -1; 1895 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1896 } else { 1897 instance = nxgep->instance; 1898 (void) sprintf(prefix_buffer, 1899 "%s%d :", "nxge", instance); 1900 } 1901 1902 MUTEX_EXIT(&nxgedebuglock); 1903 cmn_err(cmn_level, "!%s %s\n", 1904 prefix_buffer, msg_buffer); 1905 1906 } 1907 } 1908 1909 char * 1910 nxge_dump_packet(char *addr, int size) 1911 { 1912 uchar_t *ap = (uchar_t *)addr; 1913 int i; 1914 static char etherbuf[1024]; 1915 char *cp = etherbuf; 1916 char digits[] = "0123456789abcdef"; 1917 1918 if (!size) 1919 size = 60; 1920 1921 if (size > MAX_DUMP_SZ) { 1922 /* Dump the leading bytes */ 1923 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1924 if (*ap > 0x0f) 1925 *cp++ = digits[*ap >> 4]; 1926 *cp++ = digits[*ap++ & 0xf]; 1927 *cp++ = ':'; 1928 } 1929 for (i = 0; i < 20; i++) 1930 *cp++ = '.'; 1931 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1932 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1933 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1934 if (*ap > 0x0f) 1935 *cp++ = digits[*ap >> 4]; 1936 *cp++ = digits[*ap++ & 0xf]; 1937 *cp++ = ':'; 1938 } 1939 } else { 1940 for (i = 0; i < size; i++) { 1941 if (*ap > 0x0f) 1942 *cp++ = digits[*ap >> 4]; 1943 *cp++ = digits[*ap++ & 0xf]; 1944 *cp++ = ':'; 1945 } 1946 } 1947 *--cp = 0; 1948 return (etherbuf); 1949 } 1950 1951 #ifdef NXGE_DEBUG 1952 static void 1953 nxge_test_map_regs(p_nxge_t nxgep) 1954 { 1955 ddi_acc_handle_t cfg_handle; 1956 p_pci_cfg_t cfg_ptr; 1957 ddi_acc_handle_t dev_handle; 1958 char *dev_ptr; 1959 ddi_acc_handle_t pci_config_handle; 1960 uint32_t regval; 1961 int i; 1962 1963 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1964 1965 dev_handle = nxgep->dev_regs->nxge_regh; 1966 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1967 1968 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1969 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1970 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1971 1972 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1973 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1974 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1975 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1976 &cfg_ptr->vendorid)); 1977 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1978 "\tvendorid 0x%x devid 0x%x", 1979 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1980 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1981 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1982 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1983 "bar1c 0x%x", 1984 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1985 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1986 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1987 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1988 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1989 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1990 "base 28 0x%x bar2c 0x%x\n", 1991 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1992 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1993 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1994 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1995 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1996 "\nNeptune PCI BAR: base30 0x%x\n", 1997 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1998 1999 cfg_handle = nxgep->dev_regs->nxge_pciregh; 2000 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 2001 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2002 "first 0x%llx second 0x%llx third 0x%llx " 2003 "last 0x%llx ", 2004 NXGE_PIO_READ64(dev_handle, 2005 (uint64_t *)(dev_ptr + 0), 0), 2006 NXGE_PIO_READ64(dev_handle, 2007 (uint64_t *)(dev_ptr + 8), 0), 2008 NXGE_PIO_READ64(dev_handle, 2009 (uint64_t *)(dev_ptr + 16), 0), 2010 NXGE_PIO_READ64(cfg_handle, 2011 (uint64_t *)(dev_ptr + 24), 0))); 2012 } 2013 } 2014 2015 #endif 2016 2017 static void 2018 nxge_suspend(p_nxge_t nxgep) 2019 { 2020 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 2021 2022 nxge_intrs_disable(nxgep); 2023 nxge_destroy_dev(nxgep); 2024 2025 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 2026 } 2027 2028 static nxge_status_t 2029 nxge_resume(p_nxge_t nxgep) 2030 { 2031 nxge_status_t status = NXGE_OK; 2032 2033 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 2034 2035 nxgep->suspended = DDI_RESUME; 2036 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 2037 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 2038 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 2039 (void) nxge_rx_mac_enable(nxgep); 2040 (void) nxge_tx_mac_enable(nxgep); 2041 nxge_intrs_enable(nxgep); 2042 nxgep->suspended = 0; 2043 2044 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2045 "<== nxge_resume status = 0x%x", status)); 2046 return (status); 2047 } 2048 2049 static nxge_status_t 2050 nxge_setup_dev(p_nxge_t nxgep) 2051 { 2052 nxge_status_t status = NXGE_OK; 2053 2054 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 2055 nxgep->mac.portnum)); 2056 2057 status = nxge_link_init(nxgep); 2058 2059 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 2060 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2061 "port%d Bad register acc handle", nxgep->mac.portnum)); 2062 status = NXGE_ERROR; 2063 } 2064 2065 if (status != NXGE_OK) { 2066 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2067 " nxge_setup_dev status " 2068 "(xcvr init 0x%08x)", status)); 2069 goto nxge_setup_dev_exit; 2070 } 2071 2072 nxge_setup_dev_exit: 2073 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2074 "<== nxge_setup_dev port %d status = 0x%08x", 2075 nxgep->mac.portnum, status)); 2076 2077 return (status); 2078 } 2079 2080 static void 2081 nxge_destroy_dev(p_nxge_t nxgep) 2082 { 2083 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 2084 2085 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 2086 2087 (void) nxge_hw_stop(nxgep); 2088 2089 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 2090 } 2091 2092 static nxge_status_t 2093 nxge_setup_system_dma_pages(p_nxge_t nxgep) 2094 { 2095 int ddi_status = DDI_SUCCESS; 2096 uint_t count; 2097 ddi_dma_cookie_t cookie; 2098 uint_t iommu_pagesize; 2099 nxge_status_t status = NXGE_OK; 2100 2101 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 2102 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 2103 if (nxgep->niu_type != N2_NIU) { 2104 iommu_pagesize = dvma_pagesize(nxgep->dip); 2105 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2106 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2107 " default_block_size %d iommu_pagesize %d", 2108 nxgep->sys_page_sz, 2109 ddi_ptob(nxgep->dip, (ulong_t)1), 2110 nxgep->rx_default_block_size, 2111 iommu_pagesize)); 2112 2113 if (iommu_pagesize != 0) { 2114 if (nxgep->sys_page_sz == iommu_pagesize) { 2115 if (iommu_pagesize > 0x4000) 2116 nxgep->sys_page_sz = 0x4000; 2117 } else { 2118 if (nxgep->sys_page_sz > iommu_pagesize) 2119 nxgep->sys_page_sz = iommu_pagesize; 2120 } 2121 } 2122 } 2123 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2124 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2125 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2126 "default_block_size %d page mask %d", 2127 nxgep->sys_page_sz, 2128 ddi_ptob(nxgep->dip, (ulong_t)1), 2129 nxgep->rx_default_block_size, 2130 nxgep->sys_page_mask)); 2131 2132 2133 switch (nxgep->sys_page_sz) { 2134 default: 2135 nxgep->sys_page_sz = 0x1000; 2136 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2137 nxgep->rx_default_block_size = 0x1000; 2138 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2139 break; 2140 case 0x1000: 2141 nxgep->rx_default_block_size = 0x1000; 2142 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2143 break; 2144 case 0x2000: 2145 nxgep->rx_default_block_size = 0x2000; 2146 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2147 break; 2148 case 0x4000: 2149 nxgep->rx_default_block_size = 0x4000; 2150 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2151 break; 2152 case 0x8000: 2153 nxgep->rx_default_block_size = 0x8000; 2154 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2155 break; 2156 } 2157 2158 #ifndef USE_RX_BIG_BUF 2159 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2160 #else 2161 nxgep->rx_default_block_size = 0x2000; 2162 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2163 #endif 2164 /* 2165 * Get the system DMA burst size. 2166 */ 2167 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2168 DDI_DMA_DONTWAIT, 0, 2169 &nxgep->dmasparehandle); 2170 if (ddi_status != DDI_SUCCESS) { 2171 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2172 "ddi_dma_alloc_handle: failed " 2173 " status 0x%x", ddi_status)); 2174 goto nxge_get_soft_properties_exit; 2175 } 2176 2177 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2178 (caddr_t)nxgep->dmasparehandle, 2179 sizeof (nxgep->dmasparehandle), 2180 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2181 DDI_DMA_DONTWAIT, 0, 2182 &cookie, &count); 2183 if (ddi_status != DDI_DMA_MAPPED) { 2184 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2185 "Binding spare handle to find system" 2186 " burstsize failed.")); 2187 ddi_status = DDI_FAILURE; 2188 goto nxge_get_soft_properties_fail1; 2189 } 2190 2191 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2192 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2193 2194 nxge_get_soft_properties_fail1: 2195 ddi_dma_free_handle(&nxgep->dmasparehandle); 2196 2197 nxge_get_soft_properties_exit: 2198 2199 if (ddi_status != DDI_SUCCESS) 2200 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2201 2202 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2203 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2204 return (status); 2205 } 2206 2207 static nxge_status_t 2208 nxge_alloc_mem_pool(p_nxge_t nxgep) 2209 { 2210 nxge_status_t status = NXGE_OK; 2211 2212 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2213 2214 status = nxge_alloc_rx_mem_pool(nxgep); 2215 if (status != NXGE_OK) { 2216 return (NXGE_ERROR); 2217 } 2218 2219 status = nxge_alloc_tx_mem_pool(nxgep); 2220 if (status != NXGE_OK) { 2221 nxge_free_rx_mem_pool(nxgep); 2222 return (NXGE_ERROR); 2223 } 2224 2225 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2226 return (NXGE_OK); 2227 } 2228 2229 static void 2230 nxge_free_mem_pool(p_nxge_t nxgep) 2231 { 2232 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2233 2234 nxge_free_rx_mem_pool(nxgep); 2235 nxge_free_tx_mem_pool(nxgep); 2236 2237 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2238 } 2239 2240 nxge_status_t 2241 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2242 { 2243 uint32_t rdc_max; 2244 p_nxge_dma_pt_cfg_t p_all_cfgp; 2245 p_nxge_hw_pt_cfg_t p_cfgp; 2246 p_nxge_dma_pool_t dma_poolp; 2247 p_nxge_dma_common_t *dma_buf_p; 2248 p_nxge_dma_pool_t dma_cntl_poolp; 2249 p_nxge_dma_common_t *dma_cntl_p; 2250 uint32_t *num_chunks; /* per dma */ 2251 nxge_status_t status = NXGE_OK; 2252 2253 uint32_t nxge_port_rbr_size; 2254 uint32_t nxge_port_rbr_spare_size; 2255 uint32_t nxge_port_rcr_size; 2256 uint32_t rx_cntl_alloc_size; 2257 2258 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2259 2260 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2261 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2262 rdc_max = NXGE_MAX_RDCS; 2263 2264 /* 2265 * Allocate memory for the common DMA data structures. 2266 */ 2267 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2268 KM_SLEEP); 2269 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2270 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2271 2272 dma_cntl_poolp = (p_nxge_dma_pool_t) 2273 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2274 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2275 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2276 2277 num_chunks = (uint32_t *)KMEM_ZALLOC( 2278 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2279 2280 /* 2281 * Assume that each DMA channel will be configured with 2282 * the default block size. 2283 * rbr block counts are modulo the batch count (16). 2284 */ 2285 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2286 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2287 2288 if (!nxge_port_rbr_size) { 2289 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2290 } 2291 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2292 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2293 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2294 } 2295 2296 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2297 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2298 2299 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2300 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2301 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2302 } 2303 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2304 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2305 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2306 "set to default %d", 2307 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2308 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2309 } 2310 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2311 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2312 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2313 "set to default %d", 2314 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2315 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2316 } 2317 2318 /* 2319 * N2/NIU has limitation on the descriptor sizes (contiguous 2320 * memory allocation on data buffers to 4M (contig_mem_alloc) 2321 * and little endian for control buffers (must use the ddi/dki mem alloc 2322 * function). 2323 */ 2324 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2325 if (nxgep->niu_type == N2_NIU) { 2326 nxge_port_rbr_spare_size = 0; 2327 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2328 (!ISP2(nxge_port_rbr_size))) { 2329 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2330 } 2331 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2332 (!ISP2(nxge_port_rcr_size))) { 2333 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2334 } 2335 } 2336 #endif 2337 2338 /* 2339 * Addresses of receive block ring, receive completion ring and the 2340 * mailbox must be all cache-aligned (64 bytes). 2341 */ 2342 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2343 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2344 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2345 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2346 2347 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2348 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2349 "nxge_port_rcr_size = %d " 2350 "rx_cntl_alloc_size = %d", 2351 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2352 nxge_port_rcr_size, 2353 rx_cntl_alloc_size)); 2354 2355 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2356 if (nxgep->niu_type == N2_NIU) { 2357 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2358 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2359 2360 if (!ISP2(rx_buf_alloc_size)) { 2361 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2362 "==> nxge_alloc_rx_mem_pool: " 2363 " must be power of 2")); 2364 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2365 goto nxge_alloc_rx_mem_pool_exit; 2366 } 2367 2368 if (rx_buf_alloc_size > (1 << 22)) { 2369 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2370 "==> nxge_alloc_rx_mem_pool: " 2371 " limit size to 4M")); 2372 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2373 goto nxge_alloc_rx_mem_pool_exit; 2374 } 2375 2376 if (rx_cntl_alloc_size < 0x2000) { 2377 rx_cntl_alloc_size = 0x2000; 2378 } 2379 } 2380 #endif 2381 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2382 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2383 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2384 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2385 2386 dma_poolp->ndmas = p_cfgp->max_rdcs; 2387 dma_poolp->num_chunks = num_chunks; 2388 dma_poolp->buf_allocated = B_TRUE; 2389 nxgep->rx_buf_pool_p = dma_poolp; 2390 dma_poolp->dma_buf_pool_p = dma_buf_p; 2391 2392 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2393 dma_cntl_poolp->buf_allocated = B_TRUE; 2394 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2395 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2396 2397 /* Allocate the receive rings, too. */ 2398 nxgep->rx_rbr_rings = 2399 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2400 nxgep->rx_rbr_rings->rbr_rings = 2401 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2402 nxgep->rx_rcr_rings = 2403 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2404 nxgep->rx_rcr_rings->rcr_rings = 2405 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2406 nxgep->rx_mbox_areas_p = 2407 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2408 nxgep->rx_mbox_areas_p->rxmbox_areas = 2409 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2410 2411 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2412 p_cfgp->max_rdcs; 2413 2414 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2415 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2416 2417 nxge_alloc_rx_mem_pool_exit: 2418 return (status); 2419 } 2420 2421 /* 2422 * nxge_alloc_rxb 2423 * 2424 * Allocate buffers for an RDC. 2425 * 2426 * Arguments: 2427 * nxgep 2428 * channel The channel to map into our kernel space. 2429 * 2430 * Notes: 2431 * 2432 * NPI function calls: 2433 * 2434 * NXGE function calls: 2435 * 2436 * Registers accessed: 2437 * 2438 * Context: 2439 * 2440 * Taking apart: 2441 * 2442 * Open questions: 2443 * 2444 */ 2445 nxge_status_t 2446 nxge_alloc_rxb( 2447 p_nxge_t nxgep, 2448 int channel) 2449 { 2450 size_t rx_buf_alloc_size; 2451 nxge_status_t status = NXGE_OK; 2452 2453 nxge_dma_common_t **data; 2454 nxge_dma_common_t **control; 2455 uint32_t *num_chunks; 2456 2457 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2458 2459 /* 2460 * Allocate memory for the receive buffers and descriptor rings. 2461 * Replace these allocation functions with the interface functions 2462 * provided by the partition manager if/when they are available. 2463 */ 2464 2465 /* 2466 * Allocate memory for the receive buffer blocks. 2467 */ 2468 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2469 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2470 2471 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2472 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2473 2474 if ((status = nxge_alloc_rx_buf_dma( 2475 nxgep, channel, data, rx_buf_alloc_size, 2476 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2477 return (status); 2478 } 2479 2480 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2481 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2482 2483 /* 2484 * Allocate memory for descriptor rings and mailbox. 2485 */ 2486 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2487 2488 if ((status = nxge_alloc_rx_cntl_dma( 2489 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2490 != NXGE_OK) { 2491 nxge_free_rx_cntl_dma(nxgep, *control); 2492 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2493 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2494 return (status); 2495 } 2496 2497 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2498 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2499 2500 return (status); 2501 } 2502 2503 void 2504 nxge_free_rxb( 2505 p_nxge_t nxgep, 2506 int channel) 2507 { 2508 nxge_dma_common_t *data; 2509 nxge_dma_common_t *control; 2510 uint32_t num_chunks; 2511 2512 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2513 2514 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2515 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2516 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2517 2518 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2519 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2520 2521 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2522 nxge_free_rx_cntl_dma(nxgep, control); 2523 2524 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2525 2526 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2527 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2528 2529 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2530 } 2531 2532 static void 2533 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2534 { 2535 int rdc_max = NXGE_MAX_RDCS; 2536 2537 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2538 2539 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2540 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2541 "<== nxge_free_rx_mem_pool " 2542 "(null rx buf pool or buf not allocated")); 2543 return; 2544 } 2545 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2546 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2547 "<== nxge_free_rx_mem_pool " 2548 "(null rx cntl buf pool or cntl buf not allocated")); 2549 return; 2550 } 2551 2552 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2553 sizeof (p_nxge_dma_common_t) * rdc_max); 2554 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2555 2556 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2557 sizeof (uint32_t) * rdc_max); 2558 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2559 sizeof (p_nxge_dma_common_t) * rdc_max); 2560 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2561 2562 nxgep->rx_buf_pool_p = 0; 2563 nxgep->rx_cntl_pool_p = 0; 2564 2565 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2566 sizeof (p_rx_rbr_ring_t) * rdc_max); 2567 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2568 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2569 sizeof (p_rx_rcr_ring_t) * rdc_max); 2570 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2571 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2572 sizeof (p_rx_mbox_t) * rdc_max); 2573 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2574 2575 nxgep->rx_rbr_rings = 0; 2576 nxgep->rx_rcr_rings = 0; 2577 nxgep->rx_mbox_areas_p = 0; 2578 2579 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2580 } 2581 2582 2583 static nxge_status_t 2584 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2585 p_nxge_dma_common_t *dmap, 2586 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2587 { 2588 p_nxge_dma_common_t rx_dmap; 2589 nxge_status_t status = NXGE_OK; 2590 size_t total_alloc_size; 2591 size_t allocated = 0; 2592 int i, size_index, array_size; 2593 boolean_t use_kmem_alloc = B_FALSE; 2594 2595 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2596 2597 rx_dmap = (p_nxge_dma_common_t) 2598 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2599 KM_SLEEP); 2600 2601 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2602 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2603 dma_channel, alloc_size, block_size, dmap)); 2604 2605 total_alloc_size = alloc_size; 2606 2607 #if defined(RX_USE_RECLAIM_POST) 2608 total_alloc_size = alloc_size + alloc_size/4; 2609 #endif 2610 2611 i = 0; 2612 size_index = 0; 2613 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2614 while ((size_index < array_size) && 2615 (alloc_sizes[size_index] < alloc_size)) 2616 size_index++; 2617 if (size_index >= array_size) { 2618 size_index = array_size - 1; 2619 } 2620 2621 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2622 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2623 use_kmem_alloc = B_TRUE; 2624 #if defined(__i386) || defined(__amd64) 2625 size_index = 0; 2626 #endif 2627 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2628 "==> nxge_alloc_rx_buf_dma: " 2629 "Neptune use kmem_alloc() - size_index %d", 2630 size_index)); 2631 } 2632 2633 while ((allocated < total_alloc_size) && 2634 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2635 rx_dmap[i].dma_chunk_index = i; 2636 rx_dmap[i].block_size = block_size; 2637 rx_dmap[i].alength = alloc_sizes[size_index]; 2638 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2639 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2640 rx_dmap[i].dma_channel = dma_channel; 2641 rx_dmap[i].contig_alloc_type = B_FALSE; 2642 rx_dmap[i].kmem_alloc_type = B_FALSE; 2643 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2644 2645 /* 2646 * N2/NIU: data buffers must be contiguous as the driver 2647 * needs to call Hypervisor api to set up 2648 * logical pages. 2649 */ 2650 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2651 rx_dmap[i].contig_alloc_type = B_TRUE; 2652 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2653 } else if (use_kmem_alloc) { 2654 /* For Neptune, use kmem_alloc */ 2655 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2656 "==> nxge_alloc_rx_buf_dma: " 2657 "Neptune use kmem_alloc()")); 2658 rx_dmap[i].kmem_alloc_type = B_TRUE; 2659 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2660 } 2661 2662 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2663 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2664 "i %d nblocks %d alength %d", 2665 dma_channel, i, &rx_dmap[i], block_size, 2666 i, rx_dmap[i].nblocks, 2667 rx_dmap[i].alength)); 2668 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2669 &nxge_rx_dma_attr, 2670 rx_dmap[i].alength, 2671 &nxge_dev_buf_dma_acc_attr, 2672 DDI_DMA_READ | DDI_DMA_STREAMING, 2673 (p_nxge_dma_common_t)(&rx_dmap[i])); 2674 if (status != NXGE_OK) { 2675 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2676 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2677 "dma %d size_index %d size requested %d", 2678 dma_channel, 2679 size_index, 2680 rx_dmap[i].alength)); 2681 size_index--; 2682 } else { 2683 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2684 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2685 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2686 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2687 "buf_alloc_state %d alloc_type %d", 2688 dma_channel, 2689 &rx_dmap[i], 2690 rx_dmap[i].kaddrp, 2691 rx_dmap[i].alength, 2692 rx_dmap[i].buf_alloc_state, 2693 rx_dmap[i].buf_alloc_type)); 2694 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2695 " alloc_rx_buf_dma allocated rdc %d " 2696 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2697 dma_channel, i, rx_dmap[i].alength, 2698 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2699 rx_dmap[i].kaddrp)); 2700 i++; 2701 allocated += alloc_sizes[size_index]; 2702 } 2703 } 2704 2705 if (allocated < total_alloc_size) { 2706 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2707 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2708 "allocated 0x%x requested 0x%x", 2709 dma_channel, 2710 allocated, total_alloc_size)); 2711 status = NXGE_ERROR; 2712 goto nxge_alloc_rx_mem_fail1; 2713 } 2714 2715 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2716 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2717 "allocated 0x%x requested 0x%x", 2718 dma_channel, 2719 allocated, total_alloc_size)); 2720 2721 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2722 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2723 dma_channel, i)); 2724 *num_chunks = i; 2725 *dmap = rx_dmap; 2726 2727 goto nxge_alloc_rx_mem_exit; 2728 2729 nxge_alloc_rx_mem_fail1: 2730 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2731 2732 nxge_alloc_rx_mem_exit: 2733 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2734 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2735 2736 return (status); 2737 } 2738 2739 /*ARGSUSED*/ 2740 static void 2741 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2742 uint32_t num_chunks) 2743 { 2744 int i; 2745 2746 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2747 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2748 2749 if (dmap == 0) 2750 return; 2751 2752 for (i = 0; i < num_chunks; i++) { 2753 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2754 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2755 i, dmap)); 2756 nxge_dma_free_rx_data_buf(dmap++); 2757 } 2758 2759 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2760 } 2761 2762 /*ARGSUSED*/ 2763 static nxge_status_t 2764 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2765 p_nxge_dma_common_t *dmap, size_t size) 2766 { 2767 p_nxge_dma_common_t rx_dmap; 2768 nxge_status_t status = NXGE_OK; 2769 2770 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2771 2772 rx_dmap = (p_nxge_dma_common_t) 2773 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2774 2775 rx_dmap->contig_alloc_type = B_FALSE; 2776 rx_dmap->kmem_alloc_type = B_FALSE; 2777 2778 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2779 &nxge_desc_dma_attr, 2780 size, 2781 &nxge_dev_desc_dma_acc_attr, 2782 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2783 rx_dmap); 2784 if (status != NXGE_OK) { 2785 goto nxge_alloc_rx_cntl_dma_fail1; 2786 } 2787 2788 *dmap = rx_dmap; 2789 goto nxge_alloc_rx_cntl_dma_exit; 2790 2791 nxge_alloc_rx_cntl_dma_fail1: 2792 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2793 2794 nxge_alloc_rx_cntl_dma_exit: 2795 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2796 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2797 2798 return (status); 2799 } 2800 2801 /*ARGSUSED*/ 2802 static void 2803 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2804 { 2805 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2806 2807 if (dmap == 0) 2808 return; 2809 2810 nxge_dma_mem_free(dmap); 2811 2812 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2813 } 2814 2815 typedef struct { 2816 size_t tx_size; 2817 size_t cr_size; 2818 size_t threshhold; 2819 } nxge_tdc_sizes_t; 2820 2821 static 2822 nxge_status_t 2823 nxge_tdc_sizes( 2824 nxge_t *nxgep, 2825 nxge_tdc_sizes_t *sizes) 2826 { 2827 uint32_t threshhold; /* The bcopy() threshhold */ 2828 size_t tx_size; /* Transmit buffer size */ 2829 size_t cr_size; /* Completion ring size */ 2830 2831 /* 2832 * Assume that each DMA channel will be configured with the 2833 * default transmit buffer size for copying transmit data. 2834 * (If a packet is bigger than this, it will not be copied.) 2835 */ 2836 if (nxgep->niu_type == N2_NIU) { 2837 threshhold = TX_BCOPY_SIZE; 2838 } else { 2839 threshhold = nxge_bcopy_thresh; 2840 } 2841 tx_size = nxge_tx_ring_size * threshhold; 2842 2843 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2844 cr_size += sizeof (txdma_mailbox_t); 2845 2846 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2847 if (nxgep->niu_type == N2_NIU) { 2848 if (!ISP2(tx_size)) { 2849 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2850 "==> nxge_tdc_sizes: Tx size" 2851 " must be power of 2")); 2852 return (NXGE_ERROR); 2853 } 2854 2855 if (tx_size > (1 << 22)) { 2856 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2857 "==> nxge_tdc_sizes: Tx size" 2858 " limited to 4M")); 2859 return (NXGE_ERROR); 2860 } 2861 2862 if (cr_size < 0x2000) 2863 cr_size = 0x2000; 2864 } 2865 #endif 2866 2867 sizes->threshhold = threshhold; 2868 sizes->tx_size = tx_size; 2869 sizes->cr_size = cr_size; 2870 2871 return (NXGE_OK); 2872 } 2873 /* 2874 * nxge_alloc_txb 2875 * 2876 * Allocate buffers for an TDC. 2877 * 2878 * Arguments: 2879 * nxgep 2880 * channel The channel to map into our kernel space. 2881 * 2882 * Notes: 2883 * 2884 * NPI function calls: 2885 * 2886 * NXGE function calls: 2887 * 2888 * Registers accessed: 2889 * 2890 * Context: 2891 * 2892 * Taking apart: 2893 * 2894 * Open questions: 2895 * 2896 */ 2897 nxge_status_t 2898 nxge_alloc_txb( 2899 p_nxge_t nxgep, 2900 int channel) 2901 { 2902 nxge_dma_common_t **dma_buf_p; 2903 nxge_dma_common_t **dma_cntl_p; 2904 uint32_t *num_chunks; 2905 nxge_status_t status = NXGE_OK; 2906 2907 nxge_tdc_sizes_t sizes; 2908 2909 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2910 2911 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2912 return (NXGE_ERROR); 2913 2914 /* 2915 * Allocate memory for transmit buffers and descriptor rings. 2916 * Replace these allocation functions with the interface functions 2917 * provided by the partition manager Real Soon Now. 2918 */ 2919 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2920 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2921 2922 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2923 2924 /* 2925 * Allocate memory for transmit buffers and descriptor rings. 2926 * Replace allocation functions with interface functions provided 2927 * by the partition manager when it is available. 2928 * 2929 * Allocate memory for the transmit buffer pool. 2930 */ 2931 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2932 "sizes: tx: %ld, cr:%ld, th:%ld", 2933 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2934 2935 *num_chunks = 0; 2936 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2937 sizes.tx_size, sizes.threshhold, num_chunks); 2938 if (status != NXGE_OK) { 2939 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2940 return (status); 2941 } 2942 2943 /* 2944 * Allocate memory for descriptor rings and mailbox. 2945 */ 2946 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2947 sizes.cr_size); 2948 if (status != NXGE_OK) { 2949 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2950 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2951 return (status); 2952 } 2953 2954 return (NXGE_OK); 2955 } 2956 2957 void 2958 nxge_free_txb( 2959 p_nxge_t nxgep, 2960 int channel) 2961 { 2962 nxge_dma_common_t *data; 2963 nxge_dma_common_t *control; 2964 uint32_t num_chunks; 2965 2966 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2967 2968 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2969 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2970 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2971 2972 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2973 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2974 2975 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2976 nxge_free_tx_cntl_dma(nxgep, control); 2977 2978 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2979 2980 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2981 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2982 2983 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2984 } 2985 2986 /* 2987 * nxge_alloc_tx_mem_pool 2988 * 2989 * This function allocates all of the per-port TDC control data structures. 2990 * The per-channel (TDC) data structures are allocated when needed. 2991 * 2992 * Arguments: 2993 * nxgep 2994 * 2995 * Notes: 2996 * 2997 * Context: 2998 * Any domain 2999 */ 3000 nxge_status_t 3001 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 3002 { 3003 nxge_hw_pt_cfg_t *p_cfgp; 3004 nxge_dma_pool_t *dma_poolp; 3005 nxge_dma_common_t **dma_buf_p; 3006 nxge_dma_pool_t *dma_cntl_poolp; 3007 nxge_dma_common_t **dma_cntl_p; 3008 uint32_t *num_chunks; /* per dma */ 3009 int tdc_max; 3010 3011 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 3012 3013 p_cfgp = &nxgep->pt_config.hw_config; 3014 tdc_max = NXGE_MAX_TDCS; 3015 3016 /* 3017 * Allocate memory for each transmit DMA channel. 3018 */ 3019 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 3020 KM_SLEEP); 3021 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 3022 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 3023 3024 dma_cntl_poolp = (p_nxge_dma_pool_t) 3025 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 3026 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 3027 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 3028 3029 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 3030 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3031 "nxge_alloc_tx_mem_pool: TDC too high %d, " 3032 "set to default %d", 3033 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 3034 nxge_tx_ring_size = TDC_DEFAULT_MAX; 3035 } 3036 3037 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3038 /* 3039 * N2/NIU has limitation on the descriptor sizes (contiguous 3040 * memory allocation on data buffers to 4M (contig_mem_alloc) 3041 * and little endian for control buffers (must use the ddi/dki mem alloc 3042 * function). The transmit ring is limited to 8K (includes the 3043 * mailbox). 3044 */ 3045 if (nxgep->niu_type == N2_NIU) { 3046 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 3047 (!ISP2(nxge_tx_ring_size))) { 3048 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 3049 } 3050 } 3051 #endif 3052 3053 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 3054 3055 num_chunks = (uint32_t *)KMEM_ZALLOC( 3056 sizeof (uint32_t) * tdc_max, KM_SLEEP); 3057 3058 dma_poolp->ndmas = p_cfgp->tdc.owned; 3059 dma_poolp->num_chunks = num_chunks; 3060 dma_poolp->dma_buf_pool_p = dma_buf_p; 3061 nxgep->tx_buf_pool_p = dma_poolp; 3062 3063 dma_poolp->buf_allocated = B_TRUE; 3064 3065 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 3066 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 3067 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 3068 3069 dma_cntl_poolp->buf_allocated = B_TRUE; 3070 3071 nxgep->tx_rings = 3072 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 3073 nxgep->tx_rings->rings = 3074 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 3075 nxgep->tx_mbox_areas_p = 3076 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 3077 nxgep->tx_mbox_areas_p->txmbox_areas_p = 3078 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 3079 3080 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 3081 3082 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3083 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 3084 tdc_max, dma_poolp->ndmas)); 3085 3086 return (NXGE_OK); 3087 } 3088 3089 nxge_status_t 3090 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 3091 p_nxge_dma_common_t *dmap, size_t alloc_size, 3092 size_t block_size, uint32_t *num_chunks) 3093 { 3094 p_nxge_dma_common_t tx_dmap; 3095 nxge_status_t status = NXGE_OK; 3096 size_t total_alloc_size; 3097 size_t allocated = 0; 3098 int i, size_index, array_size; 3099 3100 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 3101 3102 tx_dmap = (p_nxge_dma_common_t) 3103 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 3104 KM_SLEEP); 3105 3106 total_alloc_size = alloc_size; 3107 i = 0; 3108 size_index = 0; 3109 array_size = sizeof (alloc_sizes) / sizeof (size_t); 3110 while ((size_index < array_size) && 3111 (alloc_sizes[size_index] < alloc_size)) 3112 size_index++; 3113 if (size_index >= array_size) { 3114 size_index = array_size - 1; 3115 } 3116 3117 while ((allocated < total_alloc_size) && 3118 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 3119 3120 tx_dmap[i].dma_chunk_index = i; 3121 tx_dmap[i].block_size = block_size; 3122 tx_dmap[i].alength = alloc_sizes[size_index]; 3123 tx_dmap[i].orig_alength = tx_dmap[i].alength; 3124 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 3125 tx_dmap[i].dma_channel = dma_channel; 3126 tx_dmap[i].contig_alloc_type = B_FALSE; 3127 tx_dmap[i].kmem_alloc_type = B_FALSE; 3128 3129 /* 3130 * N2/NIU: data buffers must be contiguous as the driver 3131 * needs to call Hypervisor api to set up 3132 * logical pages. 3133 */ 3134 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 3135 tx_dmap[i].contig_alloc_type = B_TRUE; 3136 } 3137 3138 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3139 &nxge_tx_dma_attr, 3140 tx_dmap[i].alength, 3141 &nxge_dev_buf_dma_acc_attr, 3142 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3143 (p_nxge_dma_common_t)(&tx_dmap[i])); 3144 if (status != NXGE_OK) { 3145 size_index--; 3146 } else { 3147 i++; 3148 allocated += alloc_sizes[size_index]; 3149 } 3150 } 3151 3152 if (allocated < total_alloc_size) { 3153 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3154 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3155 "allocated 0x%x requested 0x%x", 3156 dma_channel, 3157 allocated, total_alloc_size)); 3158 status = NXGE_ERROR; 3159 goto nxge_alloc_tx_mem_fail1; 3160 } 3161 3162 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3163 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3164 "allocated 0x%x requested 0x%x", 3165 dma_channel, 3166 allocated, total_alloc_size)); 3167 3168 *num_chunks = i; 3169 *dmap = tx_dmap; 3170 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3171 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3172 *dmap, i)); 3173 goto nxge_alloc_tx_mem_exit; 3174 3175 nxge_alloc_tx_mem_fail1: 3176 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3177 3178 nxge_alloc_tx_mem_exit: 3179 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3180 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3181 3182 return (status); 3183 } 3184 3185 /*ARGSUSED*/ 3186 static void 3187 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3188 uint32_t num_chunks) 3189 { 3190 int i; 3191 3192 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3193 3194 if (dmap == 0) 3195 return; 3196 3197 for (i = 0; i < num_chunks; i++) { 3198 nxge_dma_mem_free(dmap++); 3199 } 3200 3201 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3202 } 3203 3204 /*ARGSUSED*/ 3205 nxge_status_t 3206 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3207 p_nxge_dma_common_t *dmap, size_t size) 3208 { 3209 p_nxge_dma_common_t tx_dmap; 3210 nxge_status_t status = NXGE_OK; 3211 3212 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3213 tx_dmap = (p_nxge_dma_common_t) 3214 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3215 3216 tx_dmap->contig_alloc_type = B_FALSE; 3217 tx_dmap->kmem_alloc_type = B_FALSE; 3218 3219 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3220 &nxge_desc_dma_attr, 3221 size, 3222 &nxge_dev_desc_dma_acc_attr, 3223 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3224 tx_dmap); 3225 if (status != NXGE_OK) { 3226 goto nxge_alloc_tx_cntl_dma_fail1; 3227 } 3228 3229 *dmap = tx_dmap; 3230 goto nxge_alloc_tx_cntl_dma_exit; 3231 3232 nxge_alloc_tx_cntl_dma_fail1: 3233 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3234 3235 nxge_alloc_tx_cntl_dma_exit: 3236 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3237 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3238 3239 return (status); 3240 } 3241 3242 /*ARGSUSED*/ 3243 static void 3244 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3245 { 3246 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3247 3248 if (dmap == 0) 3249 return; 3250 3251 nxge_dma_mem_free(dmap); 3252 3253 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3254 } 3255 3256 /* 3257 * nxge_free_tx_mem_pool 3258 * 3259 * This function frees all of the per-port TDC control data structures. 3260 * The per-channel (TDC) data structures are freed when the channel 3261 * is stopped. 3262 * 3263 * Arguments: 3264 * nxgep 3265 * 3266 * Notes: 3267 * 3268 * Context: 3269 * Any domain 3270 */ 3271 static void 3272 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3273 { 3274 int tdc_max = NXGE_MAX_TDCS; 3275 3276 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3277 3278 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3279 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3280 "<== nxge_free_tx_mem_pool " 3281 "(null tx buf pool or buf not allocated")); 3282 return; 3283 } 3284 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3285 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3286 "<== nxge_free_tx_mem_pool " 3287 "(null tx cntl buf pool or cntl buf not allocated")); 3288 return; 3289 } 3290 3291 /* 1. Free the mailboxes. */ 3292 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3293 sizeof (p_tx_mbox_t) * tdc_max); 3294 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3295 3296 nxgep->tx_mbox_areas_p = 0; 3297 3298 /* 2. Free the transmit ring arrays. */ 3299 KMEM_FREE(nxgep->tx_rings->rings, 3300 sizeof (p_tx_ring_t) * tdc_max); 3301 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3302 3303 nxgep->tx_rings = 0; 3304 3305 /* 3. Free the completion ring data structures. */ 3306 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3307 sizeof (p_nxge_dma_common_t) * tdc_max); 3308 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3309 3310 nxgep->tx_cntl_pool_p = 0; 3311 3312 /* 4. Free the data ring data structures. */ 3313 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3314 sizeof (uint32_t) * tdc_max); 3315 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3316 sizeof (p_nxge_dma_common_t) * tdc_max); 3317 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3318 3319 nxgep->tx_buf_pool_p = 0; 3320 3321 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3322 } 3323 3324 /*ARGSUSED*/ 3325 static nxge_status_t 3326 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3327 struct ddi_dma_attr *dma_attrp, 3328 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3329 p_nxge_dma_common_t dma_p) 3330 { 3331 caddr_t kaddrp; 3332 int ddi_status = DDI_SUCCESS; 3333 boolean_t contig_alloc_type; 3334 boolean_t kmem_alloc_type; 3335 3336 contig_alloc_type = dma_p->contig_alloc_type; 3337 3338 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3339 /* 3340 * contig_alloc_type for contiguous memory only allowed 3341 * for N2/NIU. 3342 */ 3343 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3344 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3345 dma_p->contig_alloc_type)); 3346 return (NXGE_ERROR | NXGE_DDI_FAILED); 3347 } 3348 3349 dma_p->dma_handle = NULL; 3350 dma_p->acc_handle = NULL; 3351 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3352 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3353 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3354 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3355 if (ddi_status != DDI_SUCCESS) { 3356 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3357 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3358 return (NXGE_ERROR | NXGE_DDI_FAILED); 3359 } 3360 3361 kmem_alloc_type = dma_p->kmem_alloc_type; 3362 3363 switch (contig_alloc_type) { 3364 case B_FALSE: 3365 switch (kmem_alloc_type) { 3366 case B_FALSE: 3367 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3368 length, 3369 acc_attr_p, 3370 xfer_flags, 3371 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3372 &dma_p->acc_handle); 3373 if (ddi_status != DDI_SUCCESS) { 3374 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3375 "nxge_dma_mem_alloc: " 3376 "ddi_dma_mem_alloc failed")); 3377 ddi_dma_free_handle(&dma_p->dma_handle); 3378 dma_p->dma_handle = NULL; 3379 return (NXGE_ERROR | NXGE_DDI_FAILED); 3380 } 3381 if (dma_p->alength < length) { 3382 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3383 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3384 "< length.")); 3385 ddi_dma_mem_free(&dma_p->acc_handle); 3386 ddi_dma_free_handle(&dma_p->dma_handle); 3387 dma_p->acc_handle = NULL; 3388 dma_p->dma_handle = NULL; 3389 return (NXGE_ERROR); 3390 } 3391 3392 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3393 NULL, 3394 kaddrp, dma_p->alength, xfer_flags, 3395 DDI_DMA_DONTWAIT, 3396 0, &dma_p->dma_cookie, &dma_p->ncookies); 3397 if (ddi_status != DDI_DMA_MAPPED) { 3398 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3399 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3400 "failed " 3401 "(staus 0x%x ncookies %d.)", ddi_status, 3402 dma_p->ncookies)); 3403 if (dma_p->acc_handle) { 3404 ddi_dma_mem_free(&dma_p->acc_handle); 3405 dma_p->acc_handle = NULL; 3406 } 3407 ddi_dma_free_handle(&dma_p->dma_handle); 3408 dma_p->dma_handle = NULL; 3409 return (NXGE_ERROR | NXGE_DDI_FAILED); 3410 } 3411 3412 if (dma_p->ncookies != 1) { 3413 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3414 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3415 "> 1 cookie" 3416 "(staus 0x%x ncookies %d.)", ddi_status, 3417 dma_p->ncookies)); 3418 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3419 if (dma_p->acc_handle) { 3420 ddi_dma_mem_free(&dma_p->acc_handle); 3421 dma_p->acc_handle = NULL; 3422 } 3423 ddi_dma_free_handle(&dma_p->dma_handle); 3424 dma_p->dma_handle = NULL; 3425 dma_p->acc_handle = NULL; 3426 return (NXGE_ERROR); 3427 } 3428 break; 3429 3430 case B_TRUE: 3431 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3432 if (kaddrp == NULL) { 3433 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3434 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3435 "kmem alloc failed")); 3436 return (NXGE_ERROR); 3437 } 3438 3439 dma_p->alength = length; 3440 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3441 NULL, kaddrp, dma_p->alength, xfer_flags, 3442 DDI_DMA_DONTWAIT, 0, 3443 &dma_p->dma_cookie, &dma_p->ncookies); 3444 if (ddi_status != DDI_DMA_MAPPED) { 3445 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3446 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3447 "(kmem_alloc) failed kaddrp $%p length %d " 3448 "(staus 0x%x (%d) ncookies %d.)", 3449 kaddrp, length, 3450 ddi_status, ddi_status, dma_p->ncookies)); 3451 KMEM_FREE(kaddrp, length); 3452 dma_p->acc_handle = NULL; 3453 ddi_dma_free_handle(&dma_p->dma_handle); 3454 dma_p->dma_handle = NULL; 3455 dma_p->kaddrp = NULL; 3456 return (NXGE_ERROR | NXGE_DDI_FAILED); 3457 } 3458 3459 if (dma_p->ncookies != 1) { 3460 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3461 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3462 "(kmem_alloc) > 1 cookie" 3463 "(staus 0x%x ncookies %d.)", ddi_status, 3464 dma_p->ncookies)); 3465 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3466 KMEM_FREE(kaddrp, length); 3467 ddi_dma_free_handle(&dma_p->dma_handle); 3468 dma_p->dma_handle = NULL; 3469 dma_p->acc_handle = NULL; 3470 dma_p->kaddrp = NULL; 3471 return (NXGE_ERROR); 3472 } 3473 3474 dma_p->kaddrp = kaddrp; 3475 3476 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3477 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3478 "kaddr $%p alength %d", 3479 dma_p, 3480 kaddrp, 3481 dma_p->alength)); 3482 break; 3483 } 3484 break; 3485 3486 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3487 case B_TRUE: 3488 kaddrp = (caddr_t)contig_mem_alloc(length); 3489 if (kaddrp == NULL) { 3490 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3491 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3492 ddi_dma_free_handle(&dma_p->dma_handle); 3493 return (NXGE_ERROR | NXGE_DDI_FAILED); 3494 } 3495 3496 dma_p->alength = length; 3497 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3498 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3499 &dma_p->dma_cookie, &dma_p->ncookies); 3500 if (ddi_status != DDI_DMA_MAPPED) { 3501 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3502 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3503 "(status 0x%x ncookies %d.)", ddi_status, 3504 dma_p->ncookies)); 3505 3506 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3507 "==> nxge_dma_mem_alloc: (not mapped)" 3508 "length %lu (0x%x) " 3509 "free contig kaddrp $%p " 3510 "va_to_pa $%p", 3511 length, length, 3512 kaddrp, 3513 va_to_pa(kaddrp))); 3514 3515 3516 contig_mem_free((void *)kaddrp, length); 3517 ddi_dma_free_handle(&dma_p->dma_handle); 3518 3519 dma_p->dma_handle = NULL; 3520 dma_p->acc_handle = NULL; 3521 dma_p->alength = NULL; 3522 dma_p->kaddrp = NULL; 3523 3524 return (NXGE_ERROR | NXGE_DDI_FAILED); 3525 } 3526 3527 if (dma_p->ncookies != 1 || 3528 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3529 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3530 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3531 "cookie or " 3532 "dmac_laddress is NULL $%p size %d " 3533 " (status 0x%x ncookies %d.)", 3534 ddi_status, 3535 dma_p->dma_cookie.dmac_laddress, 3536 dma_p->dma_cookie.dmac_size, 3537 dma_p->ncookies)); 3538 3539 contig_mem_free((void *)kaddrp, length); 3540 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3541 ddi_dma_free_handle(&dma_p->dma_handle); 3542 3543 dma_p->alength = 0; 3544 dma_p->dma_handle = NULL; 3545 dma_p->acc_handle = NULL; 3546 dma_p->kaddrp = NULL; 3547 3548 return (NXGE_ERROR | NXGE_DDI_FAILED); 3549 } 3550 break; 3551 3552 #else 3553 case B_TRUE: 3554 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3555 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3556 return (NXGE_ERROR | NXGE_DDI_FAILED); 3557 #endif 3558 } 3559 3560 dma_p->kaddrp = kaddrp; 3561 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3562 dma_p->alength - RXBUF_64B_ALIGNED; 3563 #if defined(__i386) 3564 dma_p->ioaddr_pp = 3565 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3566 #else 3567 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3568 #endif 3569 dma_p->last_ioaddr_pp = 3570 #if defined(__i386) 3571 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3572 #else 3573 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3574 #endif 3575 dma_p->alength - RXBUF_64B_ALIGNED; 3576 3577 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3578 3579 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3580 dma_p->orig_ioaddr_pp = 3581 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3582 dma_p->orig_alength = length; 3583 dma_p->orig_kaddrp = kaddrp; 3584 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3585 #endif 3586 3587 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3588 "dma buffer allocated: dma_p $%p " 3589 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3590 "dma_p->ioaddr_p $%p " 3591 "dma_p->orig_ioaddr_p $%p " 3592 "orig_vatopa $%p " 3593 "alength %d (0x%x) " 3594 "kaddrp $%p " 3595 "length %d (0x%x)", 3596 dma_p, 3597 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3598 dma_p->ioaddr_pp, 3599 dma_p->orig_ioaddr_pp, 3600 dma_p->orig_vatopa, 3601 dma_p->alength, dma_p->alength, 3602 kaddrp, 3603 length, length)); 3604 3605 return (NXGE_OK); 3606 } 3607 3608 static void 3609 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3610 { 3611 if (dma_p->dma_handle != NULL) { 3612 if (dma_p->ncookies) { 3613 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3614 dma_p->ncookies = 0; 3615 } 3616 ddi_dma_free_handle(&dma_p->dma_handle); 3617 dma_p->dma_handle = NULL; 3618 } 3619 3620 if (dma_p->acc_handle != NULL) { 3621 ddi_dma_mem_free(&dma_p->acc_handle); 3622 dma_p->acc_handle = NULL; 3623 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3624 } 3625 3626 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3627 if (dma_p->contig_alloc_type && 3628 dma_p->orig_kaddrp && dma_p->orig_alength) { 3629 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3630 "kaddrp $%p (orig_kaddrp $%p)" 3631 "mem type %d ", 3632 "orig_alength %d " 3633 "alength 0x%x (%d)", 3634 dma_p->kaddrp, 3635 dma_p->orig_kaddrp, 3636 dma_p->contig_alloc_type, 3637 dma_p->orig_alength, 3638 dma_p->alength, dma_p->alength)); 3639 3640 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3641 dma_p->orig_alength = NULL; 3642 dma_p->orig_kaddrp = NULL; 3643 dma_p->contig_alloc_type = B_FALSE; 3644 } 3645 #endif 3646 dma_p->kaddrp = NULL; 3647 dma_p->alength = NULL; 3648 } 3649 3650 static void 3651 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3652 { 3653 uint64_t kaddr; 3654 uint32_t buf_size; 3655 3656 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3657 3658 if (dma_p->dma_handle != NULL) { 3659 if (dma_p->ncookies) { 3660 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3661 dma_p->ncookies = 0; 3662 } 3663 ddi_dma_free_handle(&dma_p->dma_handle); 3664 dma_p->dma_handle = NULL; 3665 } 3666 3667 if (dma_p->acc_handle != NULL) { 3668 ddi_dma_mem_free(&dma_p->acc_handle); 3669 dma_p->acc_handle = NULL; 3670 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3671 } 3672 3673 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3674 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3675 dma_p, 3676 dma_p->buf_alloc_state)); 3677 3678 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3679 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3680 "<== nxge_dma_free_rx_data_buf: " 3681 "outstanding data buffers")); 3682 return; 3683 } 3684 3685 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3686 if (dma_p->contig_alloc_type && 3687 dma_p->orig_kaddrp && dma_p->orig_alength) { 3688 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3689 "kaddrp $%p (orig_kaddrp $%p)" 3690 "mem type %d ", 3691 "orig_alength %d " 3692 "alength 0x%x (%d)", 3693 dma_p->kaddrp, 3694 dma_p->orig_kaddrp, 3695 dma_p->contig_alloc_type, 3696 dma_p->orig_alength, 3697 dma_p->alength, dma_p->alength)); 3698 3699 kaddr = (uint64_t)dma_p->orig_kaddrp; 3700 buf_size = dma_p->orig_alength; 3701 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3702 dma_p->orig_alength = NULL; 3703 dma_p->orig_kaddrp = NULL; 3704 dma_p->contig_alloc_type = B_FALSE; 3705 dma_p->kaddrp = NULL; 3706 dma_p->alength = NULL; 3707 return; 3708 } 3709 #endif 3710 3711 if (dma_p->kmem_alloc_type) { 3712 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3713 "nxge_dma_free_rx_data_buf: free kmem " 3714 "kaddrp $%p (orig_kaddrp $%p)" 3715 "alloc type %d " 3716 "orig_alength %d " 3717 "alength 0x%x (%d)", 3718 dma_p->kaddrp, 3719 dma_p->orig_kaddrp, 3720 dma_p->kmem_alloc_type, 3721 dma_p->orig_alength, 3722 dma_p->alength, dma_p->alength)); 3723 #if defined(__i386) 3724 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3725 #else 3726 kaddr = (uint64_t)dma_p->kaddrp; 3727 #endif 3728 buf_size = dma_p->orig_alength; 3729 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3730 "nxge_dma_free_rx_data_buf: free dmap $%p " 3731 "kaddr $%p buf_size %d", 3732 dma_p, 3733 kaddr, buf_size)); 3734 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3735 dma_p->alength = 0; 3736 dma_p->orig_alength = 0; 3737 dma_p->kaddrp = NULL; 3738 dma_p->kmem_alloc_type = B_FALSE; 3739 } 3740 3741 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3742 } 3743 3744 /* 3745 * nxge_m_start() -- start transmitting and receiving. 3746 * 3747 * This function is called by the MAC layer when the first 3748 * stream is open to prepare the hardware ready for sending 3749 * and transmitting packets. 3750 */ 3751 static int 3752 nxge_m_start(void *arg) 3753 { 3754 p_nxge_t nxgep = (p_nxge_t)arg; 3755 3756 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3757 3758 /* 3759 * Are we already started? 3760 */ 3761 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 3762 return (0); 3763 } 3764 3765 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) { 3766 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 3767 } 3768 3769 /* 3770 * Make sure RX MAC is disabled while we initialize. 3771 */ 3772 if (!isLDOMguest(nxgep)) { 3773 (void) nxge_rx_mac_disable(nxgep); 3774 } 3775 3776 /* 3777 * Grab the global lock. 3778 */ 3779 MUTEX_ENTER(nxgep->genlock); 3780 3781 /* 3782 * Initialize the driver and hardware. 3783 */ 3784 if (nxge_init(nxgep) != NXGE_OK) { 3785 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3786 "<== nxge_m_start: initialization failed")); 3787 MUTEX_EXIT(nxgep->genlock); 3788 return (EIO); 3789 } 3790 3791 /* 3792 * Start timer to check the system error and tx hangs 3793 */ 3794 if (!isLDOMguest(nxgep)) 3795 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3796 nxge_check_hw_state, NXGE_CHECK_TIMER); 3797 #if defined(sun4v) 3798 else 3799 nxge_hio_start_timer(nxgep); 3800 #endif 3801 3802 nxgep->link_notify = B_TRUE; 3803 nxgep->link_check_count = 0; 3804 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3805 3806 /* 3807 * Let the global lock go, since we are intialized. 3808 */ 3809 MUTEX_EXIT(nxgep->genlock); 3810 3811 /* 3812 * Let the MAC start receiving packets, now that 3813 * we are initialized. 3814 */ 3815 if (!isLDOMguest(nxgep)) { 3816 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 3817 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3818 "<== nxge_m_start: enable of RX mac failed")); 3819 return (EIO); 3820 } 3821 3822 /* 3823 * Enable hardware interrupts. 3824 */ 3825 nxge_intr_hw_enable(nxgep); 3826 } 3827 #if defined(sun4v) 3828 else { 3829 /* 3830 * In guest domain we enable RDCs and their interrupts as 3831 * the last step. 3832 */ 3833 if (nxge_hio_rdc_enable(nxgep) != NXGE_OK) { 3834 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3835 "<== nxge_m_start: enable of RDCs failed")); 3836 return (EIO); 3837 } 3838 3839 if (nxge_hio_rdc_intr_arm(nxgep, B_TRUE) != NXGE_OK) { 3840 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3841 "<== nxge_m_start: intrs enable for RDCs failed")); 3842 return (EIO); 3843 } 3844 } 3845 #endif 3846 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3847 return (0); 3848 } 3849 3850 static boolean_t 3851 nxge_check_groups_stopped(p_nxge_t nxgep) 3852 { 3853 int i; 3854 3855 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 3856 if (nxgep->rx_hio_groups[i].started) 3857 return (B_FALSE); 3858 } 3859 3860 return (B_TRUE); 3861 } 3862 3863 /* 3864 * nxge_m_stop(): stop transmitting and receiving. 3865 */ 3866 static void 3867 nxge_m_stop(void *arg) 3868 { 3869 p_nxge_t nxgep = (p_nxge_t)arg; 3870 boolean_t groups_stopped; 3871 3872 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3873 3874 /* 3875 * Are the groups stopped? 3876 */ 3877 groups_stopped = nxge_check_groups_stopped(nxgep); 3878 ASSERT(groups_stopped == B_TRUE); 3879 if (!groups_stopped) { 3880 cmn_err(CE_WARN, "nxge(%d): groups are not stopped!\n", 3881 nxgep->instance); 3882 return; 3883 } 3884 3885 if (!isLDOMguest(nxgep)) { 3886 /* 3887 * Disable the RX mac. 3888 */ 3889 (void) nxge_rx_mac_disable(nxgep); 3890 3891 /* 3892 * Wait for the IPP to drain. 3893 */ 3894 (void) nxge_ipp_drain(nxgep); 3895 3896 /* 3897 * Disable hardware interrupts. 3898 */ 3899 nxge_intr_hw_disable(nxgep); 3900 } 3901 #if defined(sun4v) 3902 else { 3903 (void) nxge_hio_rdc_intr_arm(nxgep, B_FALSE); 3904 } 3905 #endif 3906 3907 /* 3908 * Grab the global lock. 3909 */ 3910 MUTEX_ENTER(nxgep->genlock); 3911 3912 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3913 if (nxgep->nxge_timerid) { 3914 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3915 nxgep->nxge_timerid = 0; 3916 } 3917 3918 /* 3919 * Clean up. 3920 */ 3921 nxge_uninit(nxgep); 3922 3923 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3924 3925 /* 3926 * Let go of the global lock. 3927 */ 3928 MUTEX_EXIT(nxgep->genlock); 3929 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3930 } 3931 3932 static int 3933 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3934 { 3935 p_nxge_t nxgep = (p_nxge_t)arg; 3936 struct ether_addr addrp; 3937 3938 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3939 "==> nxge_m_multicst: add %d", add)); 3940 3941 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3942 if (add) { 3943 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3944 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3945 "<== nxge_m_multicst: add multicast failed")); 3946 return (EINVAL); 3947 } 3948 } else { 3949 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3950 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3951 "<== nxge_m_multicst: del multicast failed")); 3952 return (EINVAL); 3953 } 3954 } 3955 3956 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3957 3958 return (0); 3959 } 3960 3961 static int 3962 nxge_m_promisc(void *arg, boolean_t on) 3963 { 3964 p_nxge_t nxgep = (p_nxge_t)arg; 3965 3966 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3967 "==> nxge_m_promisc: on %d", on)); 3968 3969 if (nxge_set_promisc(nxgep, on)) { 3970 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3971 "<== nxge_m_promisc: set promisc failed")); 3972 return (EINVAL); 3973 } 3974 3975 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3976 "<== nxge_m_promisc: on %d", on)); 3977 3978 return (0); 3979 } 3980 3981 static void 3982 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3983 { 3984 p_nxge_t nxgep = (p_nxge_t)arg; 3985 struct iocblk *iocp; 3986 boolean_t need_privilege; 3987 int err; 3988 int cmd; 3989 3990 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3991 3992 iocp = (struct iocblk *)mp->b_rptr; 3993 iocp->ioc_error = 0; 3994 need_privilege = B_TRUE; 3995 cmd = iocp->ioc_cmd; 3996 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3997 switch (cmd) { 3998 default: 3999 miocnak(wq, mp, 0, EINVAL); 4000 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 4001 return; 4002 4003 case LB_GET_INFO_SIZE: 4004 case LB_GET_INFO: 4005 case LB_GET_MODE: 4006 need_privilege = B_FALSE; 4007 break; 4008 case LB_SET_MODE: 4009 break; 4010 4011 4012 case NXGE_GET_MII: 4013 case NXGE_PUT_MII: 4014 case NXGE_GET64: 4015 case NXGE_PUT64: 4016 case NXGE_GET_TX_RING_SZ: 4017 case NXGE_GET_TX_DESC: 4018 case NXGE_TX_SIDE_RESET: 4019 case NXGE_RX_SIDE_RESET: 4020 case NXGE_GLOBAL_RESET: 4021 case NXGE_RESET_MAC: 4022 case NXGE_TX_REGS_DUMP: 4023 case NXGE_RX_REGS_DUMP: 4024 case NXGE_INT_REGS_DUMP: 4025 case NXGE_VIR_INT_REGS_DUMP: 4026 case NXGE_PUT_TCAM: 4027 case NXGE_GET_TCAM: 4028 case NXGE_RTRACE: 4029 case NXGE_RDUMP: 4030 case NXGE_RX_CLASS: 4031 case NXGE_RX_HASH: 4032 4033 need_privilege = B_FALSE; 4034 break; 4035 case NXGE_INJECT_ERR: 4036 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 4037 nxge_err_inject(nxgep, wq, mp); 4038 break; 4039 } 4040 4041 if (need_privilege) { 4042 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 4043 if (err != 0) { 4044 miocnak(wq, mp, 0, err); 4045 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4046 "<== nxge_m_ioctl: no priv")); 4047 return; 4048 } 4049 } 4050 4051 switch (cmd) { 4052 4053 case LB_GET_MODE: 4054 case LB_SET_MODE: 4055 case LB_GET_INFO_SIZE: 4056 case LB_GET_INFO: 4057 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 4058 break; 4059 4060 case NXGE_GET_MII: 4061 case NXGE_PUT_MII: 4062 case NXGE_PUT_TCAM: 4063 case NXGE_GET_TCAM: 4064 case NXGE_GET64: 4065 case NXGE_PUT64: 4066 case NXGE_GET_TX_RING_SZ: 4067 case NXGE_GET_TX_DESC: 4068 case NXGE_TX_SIDE_RESET: 4069 case NXGE_RX_SIDE_RESET: 4070 case NXGE_GLOBAL_RESET: 4071 case NXGE_RESET_MAC: 4072 case NXGE_TX_REGS_DUMP: 4073 case NXGE_RX_REGS_DUMP: 4074 case NXGE_INT_REGS_DUMP: 4075 case NXGE_VIR_INT_REGS_DUMP: 4076 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4077 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 4078 nxge_hw_ioctl(nxgep, wq, mp, iocp); 4079 break; 4080 case NXGE_RX_CLASS: 4081 if (nxge_rxclass_ioctl(nxgep, wq, mp->b_cont) < 0) 4082 miocnak(wq, mp, 0, EINVAL); 4083 else 4084 miocack(wq, mp, sizeof (rx_class_cfg_t), 0); 4085 break; 4086 case NXGE_RX_HASH: 4087 4088 if (nxge_rxhash_ioctl(nxgep, wq, mp->b_cont) < 0) 4089 miocnak(wq, mp, 0, EINVAL); 4090 else 4091 miocack(wq, mp, sizeof (cfg_cmd_t), 0); 4092 break; 4093 } 4094 4095 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 4096 } 4097 4098 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 4099 4100 void 4101 nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory) 4102 { 4103 p_nxge_mmac_stats_t mmac_stats; 4104 int i; 4105 nxge_mmac_t *mmac_info; 4106 4107 mmac_info = &nxgep->nxge_mmac_info; 4108 4109 mmac_stats = &nxgep->statsp->mmac_stats; 4110 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 4111 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 4112 4113 for (i = 0; i < ETHERADDRL; i++) { 4114 if (factory) { 4115 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4116 = mmac_info->factory_mac_pool[slot][ 4117 (ETHERADDRL-1) - i]; 4118 } else { 4119 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4120 = mmac_info->mac_pool[slot].addr[ 4121 (ETHERADDRL - 1) - i]; 4122 } 4123 } 4124 } 4125 4126 /* 4127 * nxge_altmac_set() -- Set an alternate MAC address 4128 */ 4129 static int 4130 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, int slot, 4131 int rdctbl, boolean_t usetbl) 4132 { 4133 uint8_t addrn; 4134 uint8_t portn; 4135 npi_mac_addr_t altmac; 4136 hostinfo_t mac_rdc; 4137 p_nxge_class_pt_cfg_t clscfgp; 4138 4139 4140 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 4141 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 4142 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 4143 4144 portn = nxgep->mac.portnum; 4145 addrn = (uint8_t)slot - 1; 4146 4147 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, 4148 nxgep->function_num, addrn, &altmac) != NPI_SUCCESS) 4149 return (EIO); 4150 4151 /* 4152 * Set the rdc table number for the host info entry 4153 * for this mac address slot. 4154 */ 4155 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 4156 mac_rdc.value = 0; 4157 if (usetbl) 4158 mac_rdc.bits.w0.rdc_tbl_num = rdctbl; 4159 else 4160 mac_rdc.bits.w0.rdc_tbl_num = 4161 clscfgp->mac_host_info[addrn].rdctbl; 4162 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 4163 4164 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 4165 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 4166 return (EIO); 4167 } 4168 4169 /* 4170 * Enable comparison with the alternate MAC address. 4171 * While the first alternate addr is enabled by bit 1 of register 4172 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 4173 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 4174 * accordingly before calling npi_mac_altaddr_entry. 4175 */ 4176 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4177 addrn = (uint8_t)slot - 1; 4178 else 4179 addrn = (uint8_t)slot; 4180 4181 if (npi_mac_altaddr_enable(nxgep->npi_handle, 4182 nxgep->function_num, addrn) != NPI_SUCCESS) { 4183 return (EIO); 4184 } 4185 4186 return (0); 4187 } 4188 4189 /* 4190 * nxeg_m_mmac_add_g() - find an unused address slot, set the address 4191 * value to the one specified, enable the port to start filtering on 4192 * the new MAC address. Returns 0 on success. 4193 */ 4194 int 4195 nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl, 4196 boolean_t usetbl) 4197 { 4198 p_nxge_t nxgep = arg; 4199 int slot; 4200 nxge_mmac_t *mmac_info; 4201 int err; 4202 nxge_status_t status; 4203 4204 mutex_enter(nxgep->genlock); 4205 4206 /* 4207 * Make sure that nxge is initialized, if _start() has 4208 * not been called. 4209 */ 4210 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4211 status = nxge_init(nxgep); 4212 if (status != NXGE_OK) { 4213 mutex_exit(nxgep->genlock); 4214 return (ENXIO); 4215 } 4216 } 4217 4218 mmac_info = &nxgep->nxge_mmac_info; 4219 if (mmac_info->naddrfree == 0) { 4220 mutex_exit(nxgep->genlock); 4221 return (ENOSPC); 4222 } 4223 4224 /* 4225 * Search for the first available slot. Because naddrfree 4226 * is not zero, we are guaranteed to find one. 4227 * Each of the first two ports of Neptune has 16 alternate 4228 * MAC slots but only the first 7 (of 15) slots have assigned factory 4229 * MAC addresses. We first search among the slots without bundled 4230 * factory MACs. If we fail to find one in that range, then we 4231 * search the slots with bundled factory MACs. A factory MAC 4232 * will be wasted while the slot is used with a user MAC address. 4233 * But the slot could be used by factory MAC again after calling 4234 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4235 */ 4236 for (slot = 0; slot <= mmac_info->num_mmac; slot++) { 4237 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4238 break; 4239 } 4240 4241 ASSERT(slot <= mmac_info->num_mmac); 4242 4243 if ((err = nxge_altmac_set(nxgep, (uint8_t *)maddr, slot, rdctbl, 4244 usetbl)) != 0) { 4245 mutex_exit(nxgep->genlock); 4246 return (err); 4247 } 4248 4249 bcopy(maddr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4250 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4251 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4252 mmac_info->naddrfree--; 4253 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4254 4255 mutex_exit(nxgep->genlock); 4256 return (0); 4257 } 4258 4259 /* 4260 * Remove the specified mac address and update the HW not to filter 4261 * the mac address anymore. 4262 */ 4263 int 4264 nxge_m_mmac_remove(void *arg, int slot) 4265 { 4266 p_nxge_t nxgep = arg; 4267 nxge_mmac_t *mmac_info; 4268 uint8_t addrn; 4269 uint8_t portn; 4270 int err = 0; 4271 nxge_status_t status; 4272 4273 mutex_enter(nxgep->genlock); 4274 4275 /* 4276 * Make sure that nxge is initialized, if _start() has 4277 * not been called. 4278 */ 4279 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4280 status = nxge_init(nxgep); 4281 if (status != NXGE_OK) { 4282 mutex_exit(nxgep->genlock); 4283 return (ENXIO); 4284 } 4285 } 4286 4287 mmac_info = &nxgep->nxge_mmac_info; 4288 if (slot < 1 || slot > mmac_info->num_mmac) { 4289 mutex_exit(nxgep->genlock); 4290 return (EINVAL); 4291 } 4292 4293 portn = nxgep->mac.portnum; 4294 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4295 addrn = (uint8_t)slot - 1; 4296 else 4297 addrn = (uint8_t)slot; 4298 4299 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4300 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4301 == NPI_SUCCESS) { 4302 mmac_info->naddrfree++; 4303 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4304 /* 4305 * Regardless if the MAC we just stopped filtering 4306 * is a user addr or a facory addr, we must set 4307 * the MMAC_VENDOR_ADDR flag if this slot has an 4308 * associated factory MAC to indicate that a factory 4309 * MAC is available. 4310 */ 4311 if (slot <= mmac_info->num_factory_mmac) { 4312 mmac_info->mac_pool[slot].flags 4313 |= MMAC_VENDOR_ADDR; 4314 } 4315 /* 4316 * Clear mac_pool[slot].addr so that kstat shows 0 4317 * alternate MAC address if the slot is not used. 4318 * (But nxge_m_mmac_get returns the factory MAC even 4319 * when the slot is not used!) 4320 */ 4321 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4322 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4323 } else { 4324 err = EIO; 4325 } 4326 } else { 4327 err = EINVAL; 4328 } 4329 4330 mutex_exit(nxgep->genlock); 4331 return (err); 4332 } 4333 4334 /* 4335 * The callback to query all the factory addresses. naddr must be the same as 4336 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and 4337 * mcm_addr is the space allocated for keep all the addresses, whose size is 4338 * naddr * MAXMACADDRLEN. 4339 */ 4340 static void 4341 nxge_m_getfactaddr(void *arg, uint_t naddr, uint8_t *addr) 4342 { 4343 nxge_t *nxgep = arg; 4344 nxge_mmac_t *mmac_info; 4345 int i; 4346 4347 mutex_enter(nxgep->genlock); 4348 4349 mmac_info = &nxgep->nxge_mmac_info; 4350 ASSERT(naddr == mmac_info->num_factory_mmac); 4351 4352 for (i = 0; i < naddr; i++) { 4353 bcopy(mmac_info->factory_mac_pool[i + 1], 4354 addr + i * MAXMACADDRLEN, ETHERADDRL); 4355 } 4356 4357 mutex_exit(nxgep->genlock); 4358 } 4359 4360 4361 static boolean_t 4362 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4363 { 4364 nxge_t *nxgep = arg; 4365 uint32_t *txflags = cap_data; 4366 4367 switch (cap) { 4368 case MAC_CAPAB_HCKSUM: 4369 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4370 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4371 if (nxge_cksum_offload <= 1) { 4372 *txflags = HCKSUM_INET_PARTIAL; 4373 } 4374 break; 4375 4376 case MAC_CAPAB_MULTIFACTADDR: { 4377 mac_capab_multifactaddr_t *mfacp = cap_data; 4378 4379 if (!isLDOMguest(nxgep)) { 4380 mutex_enter(nxgep->genlock); 4381 mfacp->mcm_naddr = 4382 nxgep->nxge_mmac_info.num_factory_mmac; 4383 mfacp->mcm_getaddr = nxge_m_getfactaddr; 4384 mutex_exit(nxgep->genlock); 4385 } 4386 break; 4387 } 4388 4389 case MAC_CAPAB_LSO: { 4390 mac_capab_lso_t *cap_lso = cap_data; 4391 4392 if (nxgep->soft_lso_enable) { 4393 if (nxge_cksum_offload <= 1) { 4394 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4395 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4396 nxge_lso_max = NXGE_LSO_MAXLEN; 4397 } 4398 cap_lso->lso_basic_tcp_ipv4.lso_max = 4399 nxge_lso_max; 4400 } 4401 break; 4402 } else { 4403 return (B_FALSE); 4404 } 4405 } 4406 4407 case MAC_CAPAB_RINGS: { 4408 mac_capab_rings_t *cap_rings = cap_data; 4409 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 4410 4411 mutex_enter(nxgep->genlock); 4412 if (cap_rings->mr_type == MAC_RING_TYPE_RX) { 4413 if (isLDOMguest(nxgep)) { 4414 cap_rings->mr_group_type = 4415 MAC_GROUP_TYPE_STATIC; 4416 cap_rings->mr_rnum = 4417 NXGE_HIO_SHARE_MAX_CHANNELS; 4418 cap_rings->mr_rget = nxge_fill_ring; 4419 cap_rings->mr_gnum = 1; 4420 cap_rings->mr_gget = nxge_hio_group_get; 4421 cap_rings->mr_gaddring = NULL; 4422 cap_rings->mr_gremring = NULL; 4423 } else { 4424 /* 4425 * Service Domain. 4426 */ 4427 cap_rings->mr_group_type = 4428 MAC_GROUP_TYPE_DYNAMIC; 4429 cap_rings->mr_rnum = p_cfgp->max_rdcs; 4430 cap_rings->mr_rget = nxge_fill_ring; 4431 cap_rings->mr_gnum = p_cfgp->max_rdc_grpids; 4432 cap_rings->mr_gget = nxge_hio_group_get; 4433 cap_rings->mr_gaddring = nxge_group_add_ring; 4434 cap_rings->mr_gremring = nxge_group_rem_ring; 4435 } 4436 4437 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4438 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]", 4439 p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids)); 4440 } else { 4441 /* 4442 * TX Rings. 4443 */ 4444 if (isLDOMguest(nxgep)) { 4445 cap_rings->mr_group_type = 4446 MAC_GROUP_TYPE_STATIC; 4447 cap_rings->mr_rnum = 4448 NXGE_HIO_SHARE_MAX_CHANNELS; 4449 cap_rings->mr_rget = nxge_fill_ring; 4450 cap_rings->mr_gnum = 0; 4451 cap_rings->mr_gget = NULL; 4452 cap_rings->mr_gaddring = NULL; 4453 cap_rings->mr_gremring = NULL; 4454 } else { 4455 /* 4456 * Service Domain. 4457 */ 4458 cap_rings->mr_group_type = 4459 MAC_GROUP_TYPE_DYNAMIC; 4460 cap_rings->mr_rnum = p_cfgp->tdc.count; 4461 cap_rings->mr_rget = nxge_fill_ring; 4462 4463 /* 4464 * Share capable. 4465 * 4466 * Do not report the default group: hence -1 4467 */ 4468 cap_rings->mr_gnum = 4469 NXGE_MAX_TDC_GROUPS / nxgep->nports - 1; 4470 cap_rings->mr_gget = nxge_hio_group_get; 4471 cap_rings->mr_gaddring = nxge_group_add_ring; 4472 cap_rings->mr_gremring = nxge_group_rem_ring; 4473 } 4474 4475 NXGE_DEBUG_MSG((nxgep, TX_CTL, 4476 "==> nxge_m_getcapab: tx rings # of rings %d", 4477 p_cfgp->tdc.count)); 4478 } 4479 mutex_exit(nxgep->genlock); 4480 break; 4481 } 4482 4483 #if defined(sun4v) 4484 case MAC_CAPAB_SHARES: { 4485 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4486 4487 /* 4488 * Only the service domain driver responds to 4489 * this capability request. 4490 */ 4491 mutex_enter(nxgep->genlock); 4492 if (isLDOMservice(nxgep)) { 4493 mshares->ms_snum = 3; 4494 mshares->ms_handle = (void *)nxgep; 4495 mshares->ms_salloc = nxge_hio_share_alloc; 4496 mshares->ms_sfree = nxge_hio_share_free; 4497 mshares->ms_sadd = nxge_hio_share_add_group; 4498 mshares->ms_sremove = nxge_hio_share_rem_group; 4499 mshares->ms_squery = nxge_hio_share_query; 4500 mshares->ms_sbind = nxge_hio_share_bind; 4501 mshares->ms_sunbind = nxge_hio_share_unbind; 4502 mutex_exit(nxgep->genlock); 4503 } else { 4504 mutex_exit(nxgep->genlock); 4505 return (B_FALSE); 4506 } 4507 break; 4508 } 4509 #endif 4510 default: 4511 return (B_FALSE); 4512 } 4513 return (B_TRUE); 4514 } 4515 4516 static boolean_t 4517 nxge_param_locked(mac_prop_id_t pr_num) 4518 { 4519 /* 4520 * All adv_* parameters are locked (read-only) while 4521 * the device is in any sort of loopback mode ... 4522 */ 4523 switch (pr_num) { 4524 case MAC_PROP_ADV_1000FDX_CAP: 4525 case MAC_PROP_EN_1000FDX_CAP: 4526 case MAC_PROP_ADV_1000HDX_CAP: 4527 case MAC_PROP_EN_1000HDX_CAP: 4528 case MAC_PROP_ADV_100FDX_CAP: 4529 case MAC_PROP_EN_100FDX_CAP: 4530 case MAC_PROP_ADV_100HDX_CAP: 4531 case MAC_PROP_EN_100HDX_CAP: 4532 case MAC_PROP_ADV_10FDX_CAP: 4533 case MAC_PROP_EN_10FDX_CAP: 4534 case MAC_PROP_ADV_10HDX_CAP: 4535 case MAC_PROP_EN_10HDX_CAP: 4536 case MAC_PROP_AUTONEG: 4537 case MAC_PROP_FLOWCTRL: 4538 return (B_TRUE); 4539 } 4540 return (B_FALSE); 4541 } 4542 4543 /* 4544 * callback functions for set/get of properties 4545 */ 4546 static int 4547 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4548 uint_t pr_valsize, const void *pr_val) 4549 { 4550 nxge_t *nxgep = barg; 4551 p_nxge_param_t param_arr = nxgep->param_arr; 4552 p_nxge_stats_t statsp = nxgep->statsp; 4553 int err = 0; 4554 4555 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4556 4557 mutex_enter(nxgep->genlock); 4558 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4559 nxge_param_locked(pr_num)) { 4560 /* 4561 * All adv_* parameters are locked (read-only) 4562 * while the device is in any sort of loopback mode. 4563 */ 4564 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4565 "==> nxge_m_setprop: loopback mode: read only")); 4566 mutex_exit(nxgep->genlock); 4567 return (EBUSY); 4568 } 4569 4570 switch (pr_num) { 4571 case MAC_PROP_EN_1000FDX_CAP: 4572 nxgep->param_en_1000fdx = 4573 param_arr[param_anar_1000fdx].value = *(uint8_t *)pr_val; 4574 goto reprogram; 4575 4576 case MAC_PROP_EN_100FDX_CAP: 4577 nxgep->param_en_100fdx = 4578 param_arr[param_anar_100fdx].value = *(uint8_t *)pr_val; 4579 goto reprogram; 4580 4581 case MAC_PROP_EN_10FDX_CAP: 4582 nxgep->param_en_10fdx = 4583 param_arr[param_anar_10fdx].value = *(uint8_t *)pr_val; 4584 goto reprogram; 4585 4586 case MAC_PROP_AUTONEG: 4587 param_arr[param_autoneg].value = *(uint8_t *)pr_val; 4588 goto reprogram; 4589 4590 case MAC_PROP_MTU: { 4591 uint32_t cur_mtu, new_mtu, old_framesize; 4592 4593 cur_mtu = nxgep->mac.default_mtu; 4594 ASSERT(pr_valsize >= sizeof (new_mtu)); 4595 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4596 4597 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4598 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4599 new_mtu, nxgep->mac.is_jumbo)); 4600 4601 if (new_mtu == cur_mtu) { 4602 err = 0; 4603 break; 4604 } 4605 4606 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4607 err = EBUSY; 4608 break; 4609 } 4610 4611 if ((new_mtu < NXGE_DEFAULT_MTU) || 4612 (new_mtu > NXGE_MAXIMUM_MTU)) { 4613 err = EINVAL; 4614 break; 4615 } 4616 4617 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4618 nxgep->mac.maxframesize = (uint16_t) 4619 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4620 if (nxge_mac_set_framesize(nxgep)) { 4621 nxgep->mac.maxframesize = 4622 (uint16_t)old_framesize; 4623 err = EINVAL; 4624 break; 4625 } 4626 4627 nxgep->mac.default_mtu = new_mtu; 4628 nxgep->mac.is_jumbo = (new_mtu > NXGE_DEFAULT_MTU); 4629 4630 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4631 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4632 new_mtu, nxgep->mac.maxframesize)); 4633 break; 4634 } 4635 4636 case MAC_PROP_FLOWCTRL: { 4637 link_flowctrl_t fl; 4638 4639 ASSERT(pr_valsize >= sizeof (fl)); 4640 bcopy(pr_val, &fl, sizeof (fl)); 4641 4642 switch (fl) { 4643 case LINK_FLOWCTRL_NONE: 4644 param_arr[param_anar_pause].value = 0; 4645 break; 4646 4647 case LINK_FLOWCTRL_RX: 4648 param_arr[param_anar_pause].value = 1; 4649 break; 4650 4651 case LINK_FLOWCTRL_TX: 4652 case LINK_FLOWCTRL_BI: 4653 err = EINVAL; 4654 break; 4655 default: 4656 err = EINVAL; 4657 break; 4658 } 4659 reprogram: 4660 if ((err == 0) && !isLDOMguest(nxgep)) { 4661 if (!nxge_param_link_update(nxgep)) { 4662 err = EINVAL; 4663 } 4664 } else { 4665 err = EINVAL; 4666 } 4667 break; 4668 } 4669 4670 case MAC_PROP_PRIVATE: 4671 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4672 "==> nxge_m_setprop: private property")); 4673 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, pr_val); 4674 break; 4675 4676 default: 4677 err = ENOTSUP; 4678 break; 4679 } 4680 4681 mutex_exit(nxgep->genlock); 4682 4683 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4684 "<== nxge_m_setprop (return %d)", err)); 4685 return (err); 4686 } 4687 4688 static int 4689 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4690 uint_t pr_valsize, void *pr_val) 4691 { 4692 nxge_t *nxgep = barg; 4693 p_nxge_param_t param_arr = nxgep->param_arr; 4694 p_nxge_stats_t statsp = nxgep->statsp; 4695 4696 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4697 "==> nxge_m_getprop: pr_num %d", pr_num)); 4698 4699 switch (pr_num) { 4700 case MAC_PROP_DUPLEX: 4701 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4702 break; 4703 4704 case MAC_PROP_SPEED: { 4705 uint64_t val = statsp->mac_stats.link_speed * 1000000ull; 4706 4707 ASSERT(pr_valsize >= sizeof (val)); 4708 bcopy(&val, pr_val, sizeof (val)); 4709 break; 4710 } 4711 4712 case MAC_PROP_STATUS: { 4713 link_state_t state = statsp->mac_stats.link_up ? 4714 LINK_STATE_UP : LINK_STATE_DOWN; 4715 4716 ASSERT(pr_valsize >= sizeof (state)); 4717 bcopy(&state, pr_val, sizeof (state)); 4718 break; 4719 } 4720 4721 case MAC_PROP_AUTONEG: 4722 *(uint8_t *)pr_val = param_arr[param_autoneg].value; 4723 break; 4724 4725 case MAC_PROP_FLOWCTRL: { 4726 link_flowctrl_t fl = param_arr[param_anar_pause].value != 0 ? 4727 LINK_FLOWCTRL_RX : LINK_FLOWCTRL_NONE; 4728 4729 ASSERT(pr_valsize >= sizeof (fl)); 4730 bcopy(&fl, pr_val, sizeof (fl)); 4731 break; 4732 } 4733 4734 case MAC_PROP_ADV_1000FDX_CAP: 4735 *(uint8_t *)pr_val = param_arr[param_anar_1000fdx].value; 4736 break; 4737 4738 case MAC_PROP_EN_1000FDX_CAP: 4739 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4740 break; 4741 4742 case MAC_PROP_ADV_100FDX_CAP: 4743 *(uint8_t *)pr_val = param_arr[param_anar_100fdx].value; 4744 break; 4745 4746 case MAC_PROP_EN_100FDX_CAP: 4747 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4748 break; 4749 4750 case MAC_PROP_ADV_10FDX_CAP: 4751 *(uint8_t *)pr_val = param_arr[param_anar_10fdx].value; 4752 break; 4753 4754 case MAC_PROP_EN_10FDX_CAP: 4755 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4756 break; 4757 4758 case MAC_PROP_PRIVATE: 4759 return (nxge_get_priv_prop(nxgep, pr_name, pr_valsize, 4760 pr_val)); 4761 4762 default: 4763 return (ENOTSUP); 4764 } 4765 4766 return (0); 4767 } 4768 4769 static void 4770 nxge_m_propinfo(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4771 mac_prop_info_handle_t prh) 4772 { 4773 nxge_t *nxgep = barg; 4774 p_nxge_stats_t statsp = nxgep->statsp; 4775 4776 /* 4777 * By default permissions are read/write unless specified 4778 * otherwise by the driver. 4779 */ 4780 4781 switch (pr_num) { 4782 case MAC_PROP_DUPLEX: 4783 case MAC_PROP_SPEED: 4784 case MAC_PROP_STATUS: 4785 case MAC_PROP_EN_1000HDX_CAP: 4786 case MAC_PROP_EN_100HDX_CAP: 4787 case MAC_PROP_EN_10HDX_CAP: 4788 case MAC_PROP_ADV_1000FDX_CAP: 4789 case MAC_PROP_ADV_1000HDX_CAP: 4790 case MAC_PROP_ADV_100FDX_CAP: 4791 case MAC_PROP_ADV_100HDX_CAP: 4792 case MAC_PROP_ADV_10FDX_CAP: 4793 case MAC_PROP_ADV_10HDX_CAP: 4794 /* 4795 * Note that read-only properties don't need to 4796 * provide default values since they cannot be 4797 * changed by the administrator. 4798 */ 4799 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 4800 break; 4801 4802 case MAC_PROP_EN_1000FDX_CAP: 4803 case MAC_PROP_EN_100FDX_CAP: 4804 case MAC_PROP_EN_10FDX_CAP: 4805 mac_prop_info_set_default_uint8(prh, 1); 4806 break; 4807 4808 case MAC_PROP_AUTONEG: 4809 mac_prop_info_set_default_uint8(prh, 1); 4810 break; 4811 4812 case MAC_PROP_FLOWCTRL: 4813 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_RX); 4814 break; 4815 4816 case MAC_PROP_MTU: 4817 mac_prop_info_set_range_uint32(prh, 4818 NXGE_DEFAULT_MTU, NXGE_MAXIMUM_MTU); 4819 break; 4820 4821 case MAC_PROP_PRIVATE: 4822 nxge_priv_propinfo(pr_name, prh); 4823 break; 4824 } 4825 4826 mutex_enter(nxgep->genlock); 4827 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4828 nxge_param_locked(pr_num)) { 4829 /* 4830 * Some properties are locked (read-only) while the 4831 * device is in any sort of loopback mode. 4832 */ 4833 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 4834 } 4835 mutex_exit(nxgep->genlock); 4836 } 4837 4838 static void 4839 nxge_priv_propinfo(const char *pr_name, mac_prop_info_handle_t prh) 4840 { 4841 char valstr[64]; 4842 4843 bzero(valstr, sizeof (valstr)); 4844 4845 if (strcmp(pr_name, "_function_number") == 0 || 4846 strcmp(pr_name, "_fw_version") == 0 || 4847 strcmp(pr_name, "_port_mode") == 0 || 4848 strcmp(pr_name, "_hot_swap_phy") == 0) { 4849 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 4850 4851 } else if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4852 (void) snprintf(valstr, sizeof (valstr), 4853 "%d", RXDMA_RCR_TO_DEFAULT); 4854 4855 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4856 (void) snprintf(valstr, sizeof (valstr), 4857 "%d", RXDMA_RCR_PTHRES_DEFAULT); 4858 4859 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 || 4860 strcmp(pr_name, "_class_opt_ipv4_udp") == 0 || 4861 strcmp(pr_name, "_class_opt_ipv4_ah") == 0 || 4862 strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 || 4863 strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 || 4864 strcmp(pr_name, "_class_opt_ipv6_udp") == 0 || 4865 strcmp(pr_name, "_class_opt_ipv6_ah") == 0 || 4866 strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 4867 (void) snprintf(valstr, sizeof (valstr), "%x", 4868 NXGE_CLASS_FLOW_GEN_SERVER); 4869 4870 } else if (strcmp(pr_name, "_soft_lso_enable") == 0) { 4871 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 4872 4873 } else if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 4874 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 4875 4876 } else if (strcmp(pr_name, "_adv_pause_cap") == 0) { 4877 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 4878 } 4879 4880 if (strlen(valstr) > 0) 4881 mac_prop_info_set_default_str(prh, valstr); 4882 } 4883 4884 /* ARGSUSED */ 4885 static int 4886 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4887 const void *pr_val) 4888 { 4889 p_nxge_param_t param_arr = nxgep->param_arr; 4890 int err = 0; 4891 long result; 4892 4893 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4894 "==> nxge_set_priv_prop: name %s", pr_name)); 4895 4896 /* Blanking */ 4897 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4898 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4899 (char *)pr_val, 4900 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4901 if (err) { 4902 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4903 "<== nxge_set_priv_prop: " 4904 "unable to set (%s)", pr_name)); 4905 err = EINVAL; 4906 } else { 4907 err = 0; 4908 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4909 "<== nxge_set_priv_prop: " 4910 "set (%s)", pr_name)); 4911 } 4912 4913 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4914 "<== nxge_set_priv_prop: name %s (value %d)", 4915 pr_name, result)); 4916 4917 return (err); 4918 } 4919 4920 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4921 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4922 (char *)pr_val, 4923 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4924 if (err) { 4925 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4926 "<== nxge_set_priv_prop: " 4927 "unable to set (%s)", pr_name)); 4928 err = EINVAL; 4929 } else { 4930 err = 0; 4931 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4932 "<== nxge_set_priv_prop: " 4933 "set (%s)", pr_name)); 4934 } 4935 4936 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4937 "<== nxge_set_priv_prop: name %s (value %d)", 4938 pr_name, result)); 4939 4940 return (err); 4941 } 4942 4943 /* Classification */ 4944 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4945 if (pr_val == NULL) { 4946 err = EINVAL; 4947 return (err); 4948 } 4949 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4950 4951 err = nxge_param_set_ip_opt(nxgep, NULL, 4952 NULL, (char *)pr_val, 4953 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4954 4955 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4956 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4957 pr_name, result)); 4958 4959 return (err); 4960 } 4961 4962 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 4963 if (pr_val == NULL) { 4964 err = EINVAL; 4965 return (err); 4966 } 4967 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4968 4969 err = nxge_param_set_ip_opt(nxgep, NULL, 4970 NULL, (char *)pr_val, 4971 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 4972 4973 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4974 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4975 pr_name, result)); 4976 4977 return (err); 4978 } 4979 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 4980 if (pr_val == NULL) { 4981 err = EINVAL; 4982 return (err); 4983 } 4984 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4985 4986 err = nxge_param_set_ip_opt(nxgep, NULL, 4987 NULL, (char *)pr_val, 4988 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 4989 4990 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4991 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4992 pr_name, result)); 4993 4994 return (err); 4995 } 4996 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 4997 if (pr_val == NULL) { 4998 err = EINVAL; 4999 return (err); 5000 } 5001 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5002 5003 err = nxge_param_set_ip_opt(nxgep, NULL, 5004 NULL, (char *)pr_val, 5005 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5006 5007 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5008 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5009 pr_name, result)); 5010 5011 return (err); 5012 } 5013 5014 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5015 if (pr_val == NULL) { 5016 err = EINVAL; 5017 return (err); 5018 } 5019 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5020 5021 err = nxge_param_set_ip_opt(nxgep, NULL, 5022 NULL, (char *)pr_val, 5023 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5024 5025 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5026 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5027 pr_name, result)); 5028 5029 return (err); 5030 } 5031 5032 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5033 if (pr_val == NULL) { 5034 err = EINVAL; 5035 return (err); 5036 } 5037 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5038 5039 err = nxge_param_set_ip_opt(nxgep, NULL, 5040 NULL, (char *)pr_val, 5041 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5042 5043 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5044 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5045 pr_name, result)); 5046 5047 return (err); 5048 } 5049 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5050 if (pr_val == NULL) { 5051 err = EINVAL; 5052 return (err); 5053 } 5054 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5055 5056 err = nxge_param_set_ip_opt(nxgep, NULL, 5057 NULL, (char *)pr_val, 5058 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5059 5060 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5061 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5062 pr_name, result)); 5063 5064 return (err); 5065 } 5066 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5067 if (pr_val == NULL) { 5068 err = EINVAL; 5069 return (err); 5070 } 5071 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5072 5073 err = nxge_param_set_ip_opt(nxgep, NULL, 5074 NULL, (char *)pr_val, 5075 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5076 5077 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5078 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5079 pr_name, result)); 5080 5081 return (err); 5082 } 5083 5084 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5085 if (pr_val == NULL) { 5086 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5087 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 5088 err = EINVAL; 5089 return (err); 5090 } 5091 5092 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5093 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5094 "<== nxge_set_priv_prop: name %s " 5095 "(lso %d pr_val %s value %d)", 5096 pr_name, nxgep->soft_lso_enable, pr_val, result)); 5097 5098 if (result > 1 || result < 0) { 5099 err = EINVAL; 5100 } else { 5101 if (nxgep->soft_lso_enable == (uint32_t)result) { 5102 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5103 "no change (%d %d)", 5104 nxgep->soft_lso_enable, result)); 5105 return (0); 5106 } 5107 } 5108 5109 nxgep->soft_lso_enable = (int)result; 5110 5111 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5112 "<== nxge_set_priv_prop: name %s (value %d)", 5113 pr_name, result)); 5114 5115 return (err); 5116 } 5117 /* 5118 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the 5119 * following code to be executed. 5120 */ 5121 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5122 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5123 (caddr_t)¶m_arr[param_anar_10gfdx]); 5124 return (err); 5125 } 5126 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5127 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5128 (caddr_t)¶m_arr[param_anar_pause]); 5129 return (err); 5130 } 5131 5132 return (EINVAL); 5133 } 5134 5135 static int 5136 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 5137 void *pr_val) 5138 { 5139 p_nxge_param_t param_arr = nxgep->param_arr; 5140 char valstr[MAXNAMELEN]; 5141 int err = EINVAL; 5142 uint_t strsize; 5143 5144 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5145 "==> nxge_get_priv_prop: property %s", pr_name)); 5146 5147 /* function number */ 5148 if (strcmp(pr_name, "_function_number") == 0) { 5149 (void) snprintf(valstr, sizeof (valstr), "%d", 5150 nxgep->function_num); 5151 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5152 "==> nxge_get_priv_prop: name %s " 5153 "(value %d valstr %s)", 5154 pr_name, nxgep->function_num, valstr)); 5155 5156 err = 0; 5157 goto done; 5158 } 5159 5160 /* Neptune firmware version */ 5161 if (strcmp(pr_name, "_fw_version") == 0) { 5162 (void) snprintf(valstr, sizeof (valstr), "%s", 5163 nxgep->vpd_info.ver); 5164 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5165 "==> nxge_get_priv_prop: name %s " 5166 "(value %d valstr %s)", 5167 pr_name, nxgep->vpd_info.ver, valstr)); 5168 5169 err = 0; 5170 goto done; 5171 } 5172 5173 /* port PHY mode */ 5174 if (strcmp(pr_name, "_port_mode") == 0) { 5175 switch (nxgep->mac.portmode) { 5176 case PORT_1G_COPPER: 5177 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 5178 nxgep->hot_swappable_phy ? 5179 "[Hot Swappable]" : ""); 5180 break; 5181 case PORT_1G_FIBER: 5182 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 5183 nxgep->hot_swappable_phy ? 5184 "[hot swappable]" : ""); 5185 break; 5186 case PORT_10G_COPPER: 5187 (void) snprintf(valstr, sizeof (valstr), 5188 "10G copper %s", 5189 nxgep->hot_swappable_phy ? 5190 "[hot swappable]" : ""); 5191 break; 5192 case PORT_10G_FIBER: 5193 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 5194 nxgep->hot_swappable_phy ? 5195 "[hot swappable]" : ""); 5196 break; 5197 case PORT_10G_SERDES: 5198 (void) snprintf(valstr, sizeof (valstr), 5199 "10G serdes %s", nxgep->hot_swappable_phy ? 5200 "[hot swappable]" : ""); 5201 break; 5202 case PORT_1G_SERDES: 5203 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5204 nxgep->hot_swappable_phy ? 5205 "[hot swappable]" : ""); 5206 break; 5207 case PORT_1G_TN1010: 5208 (void) snprintf(valstr, sizeof (valstr), 5209 "1G TN1010 copper %s", nxgep->hot_swappable_phy ? 5210 "[hot swappable]" : ""); 5211 break; 5212 case PORT_10G_TN1010: 5213 (void) snprintf(valstr, sizeof (valstr), 5214 "10G TN1010 copper %s", nxgep->hot_swappable_phy ? 5215 "[hot swappable]" : ""); 5216 break; 5217 case PORT_1G_RGMII_FIBER: 5218 (void) snprintf(valstr, sizeof (valstr), 5219 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5220 "[hot swappable]" : ""); 5221 break; 5222 case PORT_HSP_MODE: 5223 (void) snprintf(valstr, sizeof (valstr), 5224 "phy not present[hot swappable]"); 5225 break; 5226 default: 5227 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5228 nxgep->hot_swappable_phy ? 5229 "[hot swappable]" : ""); 5230 break; 5231 } 5232 5233 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5234 "==> nxge_get_priv_prop: name %s (value %s)", 5235 pr_name, valstr)); 5236 5237 err = 0; 5238 goto done; 5239 } 5240 5241 /* Hot swappable PHY */ 5242 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5243 (void) snprintf(valstr, sizeof (valstr), "%s", 5244 nxgep->hot_swappable_phy ? 5245 "yes" : "no"); 5246 5247 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5248 "==> nxge_get_priv_prop: name %s " 5249 "(value %d valstr %s)", 5250 pr_name, nxgep->hot_swappable_phy, valstr)); 5251 5252 err = 0; 5253 goto done; 5254 } 5255 5256 5257 /* Receive Interrupt Blanking Parameters */ 5258 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5259 err = 0; 5260 (void) snprintf(valstr, sizeof (valstr), "%d", 5261 nxgep->intr_timeout); 5262 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5263 "==> nxge_get_priv_prop: name %s (value %d)", 5264 pr_name, 5265 (uint32_t)nxgep->intr_timeout)); 5266 goto done; 5267 } 5268 5269 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5270 err = 0; 5271 (void) snprintf(valstr, sizeof (valstr), "%d", 5272 nxgep->intr_threshold); 5273 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5274 "==> nxge_get_priv_prop: name %s (value %d)", 5275 pr_name, (uint32_t)nxgep->intr_threshold)); 5276 5277 goto done; 5278 } 5279 5280 /* Classification and Load Distribution Configuration */ 5281 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5282 err = nxge_dld_get_ip_opt(nxgep, 5283 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5284 5285 (void) snprintf(valstr, sizeof (valstr), "%x", 5286 (int)param_arr[param_class_opt_ipv4_tcp].value); 5287 5288 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5289 "==> nxge_get_priv_prop: %s", valstr)); 5290 goto done; 5291 } 5292 5293 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5294 err = nxge_dld_get_ip_opt(nxgep, 5295 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5296 5297 (void) snprintf(valstr, sizeof (valstr), "%x", 5298 (int)param_arr[param_class_opt_ipv4_udp].value); 5299 5300 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5301 "==> nxge_get_priv_prop: %s", valstr)); 5302 goto done; 5303 } 5304 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5305 err = nxge_dld_get_ip_opt(nxgep, 5306 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5307 5308 (void) snprintf(valstr, sizeof (valstr), "%x", 5309 (int)param_arr[param_class_opt_ipv4_ah].value); 5310 5311 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5312 "==> nxge_get_priv_prop: %s", valstr)); 5313 goto done; 5314 } 5315 5316 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5317 err = nxge_dld_get_ip_opt(nxgep, 5318 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5319 5320 (void) snprintf(valstr, sizeof (valstr), "%x", 5321 (int)param_arr[param_class_opt_ipv4_sctp].value); 5322 5323 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5324 "==> nxge_get_priv_prop: %s", valstr)); 5325 goto done; 5326 } 5327 5328 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5329 err = nxge_dld_get_ip_opt(nxgep, 5330 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5331 5332 (void) snprintf(valstr, sizeof (valstr), "%x", 5333 (int)param_arr[param_class_opt_ipv6_tcp].value); 5334 5335 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5336 "==> nxge_get_priv_prop: %s", valstr)); 5337 goto done; 5338 } 5339 5340 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5341 err = nxge_dld_get_ip_opt(nxgep, 5342 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5343 5344 (void) snprintf(valstr, sizeof (valstr), "%x", 5345 (int)param_arr[param_class_opt_ipv6_udp].value); 5346 5347 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5348 "==> nxge_get_priv_prop: %s", valstr)); 5349 goto done; 5350 } 5351 5352 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5353 err = nxge_dld_get_ip_opt(nxgep, 5354 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5355 5356 (void) snprintf(valstr, sizeof (valstr), "%x", 5357 (int)param_arr[param_class_opt_ipv6_ah].value); 5358 5359 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5360 "==> nxge_get_priv_prop: %s", valstr)); 5361 goto done; 5362 } 5363 5364 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5365 err = nxge_dld_get_ip_opt(nxgep, 5366 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5367 5368 (void) snprintf(valstr, sizeof (valstr), "%x", 5369 (int)param_arr[param_class_opt_ipv6_sctp].value); 5370 5371 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5372 "==> nxge_get_priv_prop: %s", valstr)); 5373 goto done; 5374 } 5375 5376 /* Software LSO */ 5377 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5378 (void) snprintf(valstr, sizeof (valstr), 5379 "%d", nxgep->soft_lso_enable); 5380 err = 0; 5381 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5382 "==> nxge_get_priv_prop: name %s (value %d)", 5383 pr_name, nxgep->soft_lso_enable)); 5384 5385 goto done; 5386 } 5387 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5388 err = 0; 5389 if (nxgep->param_arr[param_anar_10gfdx].value != 0) { 5390 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5391 goto done; 5392 } else { 5393 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5394 goto done; 5395 } 5396 } 5397 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5398 err = 0; 5399 if (nxgep->param_arr[param_anar_pause].value != 0) { 5400 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5401 goto done; 5402 } else { 5403 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5404 goto done; 5405 } 5406 } 5407 5408 done: 5409 if (err == 0) { 5410 strsize = (uint_t)strlen(valstr); 5411 if (pr_valsize < strsize) { 5412 err = ENOBUFS; 5413 } else { 5414 (void) strlcpy(pr_val, valstr, pr_valsize); 5415 } 5416 } 5417 5418 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5419 "<== nxge_get_priv_prop: return %d", err)); 5420 return (err); 5421 } 5422 5423 /* 5424 * Module loading and removing entry points. 5425 */ 5426 5427 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach, 5428 nodev, NULL, D_MP, NULL, nxge_quiesce); 5429 5430 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5431 5432 /* 5433 * Module linkage information for the kernel. 5434 */ 5435 static struct modldrv nxge_modldrv = { 5436 &mod_driverops, 5437 NXGE_DESC_VER, 5438 &nxge_dev_ops 5439 }; 5440 5441 static struct modlinkage modlinkage = { 5442 MODREV_1, (void *) &nxge_modldrv, NULL 5443 }; 5444 5445 int 5446 _init(void) 5447 { 5448 int status; 5449 5450 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 5451 5452 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5453 5454 mac_init_ops(&nxge_dev_ops, "nxge"); 5455 5456 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5457 if (status != 0) { 5458 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5459 "failed to init device soft state")); 5460 goto _init_exit; 5461 } 5462 5463 status = mod_install(&modlinkage); 5464 if (status != 0) { 5465 ddi_soft_state_fini(&nxge_list); 5466 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5467 goto _init_exit; 5468 } 5469 5470 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5471 5472 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status)); 5473 return (status); 5474 5475 _init_exit: 5476 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status)); 5477 MUTEX_DESTROY(&nxgedebuglock); 5478 return (status); 5479 } 5480 5481 int 5482 _fini(void) 5483 { 5484 int status; 5485 5486 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5487 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5488 5489 if (nxge_mblks_pending) 5490 return (EBUSY); 5491 5492 status = mod_remove(&modlinkage); 5493 if (status != DDI_SUCCESS) { 5494 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5495 "Module removal failed 0x%08x", 5496 status)); 5497 goto _fini_exit; 5498 } 5499 5500 mac_fini_ops(&nxge_dev_ops); 5501 5502 ddi_soft_state_fini(&nxge_list); 5503 5504 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status)); 5505 5506 MUTEX_DESTROY(&nxge_common_lock); 5507 MUTEX_DESTROY(&nxgedebuglock); 5508 return (status); 5509 5510 _fini_exit: 5511 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status)); 5512 return (status); 5513 } 5514 5515 int 5516 _info(struct modinfo *modinfop) 5517 { 5518 int status; 5519 5520 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5521 status = mod_info(&modlinkage, modinfop); 5522 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5523 5524 return (status); 5525 } 5526 5527 /*ARGSUSED*/ 5528 static int 5529 nxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5530 { 5531 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5532 p_nxge_t nxgep = rhp->nxgep; 5533 uint32_t channel; 5534 p_tx_ring_t ring; 5535 5536 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5537 ring = nxgep->tx_rings->rings[channel]; 5538 5539 MUTEX_ENTER(&ring->lock); 5540 ASSERT(ring->tx_ring_handle == NULL); 5541 ring->tx_ring_handle = rhp->ring_handle; 5542 MUTEX_EXIT(&ring->lock); 5543 5544 return (0); 5545 } 5546 5547 static void 5548 nxge_tx_ring_stop(mac_ring_driver_t rdriver) 5549 { 5550 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5551 p_nxge_t nxgep = rhp->nxgep; 5552 uint32_t channel; 5553 p_tx_ring_t ring; 5554 5555 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5556 ring = nxgep->tx_rings->rings[channel]; 5557 5558 MUTEX_ENTER(&ring->lock); 5559 ASSERT(ring->tx_ring_handle != NULL); 5560 ring->tx_ring_handle = (mac_ring_handle_t)NULL; 5561 MUTEX_EXIT(&ring->lock); 5562 } 5563 5564 int 5565 nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5566 { 5567 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5568 p_nxge_t nxgep = rhp->nxgep; 5569 uint32_t channel; 5570 p_rx_rcr_ring_t ring; 5571 int i; 5572 5573 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5574 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5575 5576 MUTEX_ENTER(&ring->lock); 5577 5578 if (ring->started) { 5579 ASSERT(ring->started == B_FALSE); 5580 MUTEX_EXIT(&ring->lock); 5581 return (0); 5582 } 5583 5584 /* set rcr_ring */ 5585 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) { 5586 if ((nxgep->ldgvp->ldvp[i].is_rxdma) && 5587 (nxgep->ldgvp->ldvp[i].channel == channel)) { 5588 ring->ldvp = &nxgep->ldgvp->ldvp[i]; 5589 ring->ldgp = nxgep->ldgvp->ldvp[i].ldgp; 5590 } 5591 } 5592 5593 ring->rcr_mac_handle = rhp->ring_handle; 5594 ring->rcr_gen_num = mr_gen_num; 5595 ring->started = B_TRUE; 5596 rhp->ring_gen_num = mr_gen_num; 5597 MUTEX_EXIT(&ring->lock); 5598 5599 return (0); 5600 } 5601 5602 static void 5603 nxge_rx_ring_stop(mac_ring_driver_t rdriver) 5604 { 5605 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5606 p_nxge_t nxgep = rhp->nxgep; 5607 uint32_t channel; 5608 p_rx_rcr_ring_t ring; 5609 5610 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5611 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5612 5613 MUTEX_ENTER(&ring->lock); 5614 ASSERT(ring->started == B_TRUE); 5615 ring->rcr_mac_handle = NULL; 5616 ring->ldvp = NULL; 5617 ring->ldgp = NULL; 5618 ring->started = B_FALSE; 5619 MUTEX_EXIT(&ring->lock); 5620 } 5621 5622 static int 5623 nxge_ring_get_htable_idx(p_nxge_t nxgep, mac_ring_type_t type, uint32_t channel) 5624 { 5625 int i; 5626 5627 #if defined(sun4v) 5628 if (isLDOMguest(nxgep)) { 5629 return (nxge_hio_get_dc_htable_idx(nxgep, 5630 (type == MAC_RING_TYPE_TX) ? VP_BOUND_TX : VP_BOUND_RX, 5631 channel)); 5632 } 5633 #endif 5634 5635 ASSERT(nxgep->ldgvp != NULL); 5636 5637 switch (type) { 5638 case MAC_RING_TYPE_TX: 5639 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) { 5640 if ((nxgep->ldgvp->ldvp[i].is_txdma) && 5641 (nxgep->ldgvp->ldvp[i].channel == channel)) { 5642 return ((int) 5643 nxgep->ldgvp->ldvp[i].ldgp->htable_idx); 5644 } 5645 } 5646 break; 5647 5648 case MAC_RING_TYPE_RX: 5649 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) { 5650 if ((nxgep->ldgvp->ldvp[i].is_rxdma) && 5651 (nxgep->ldgvp->ldvp[i].channel == channel)) { 5652 return ((int) 5653 nxgep->ldgvp->ldvp[i].ldgp->htable_idx); 5654 } 5655 } 5656 } 5657 5658 return (-1); 5659 } 5660 5661 /* 5662 * Callback funtion for MAC layer to register all rings. 5663 */ 5664 static void 5665 nxge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 5666 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 5667 { 5668 p_nxge_t nxgep = (p_nxge_t)arg; 5669 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 5670 p_nxge_intr_t intrp; 5671 uint32_t channel; 5672 int htable_idx; 5673 p_nxge_ring_handle_t rhandlep; 5674 5675 ASSERT(nxgep != NULL); 5676 ASSERT(p_cfgp != NULL); 5677 ASSERT(infop != NULL); 5678 5679 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5680 "==> nxge_fill_ring 0x%x index %d", rtype, index)); 5681 5682 5683 switch (rtype) { 5684 case MAC_RING_TYPE_TX: { 5685 mac_intr_t *mintr = &infop->mri_intr; 5686 5687 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5688 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d", 5689 rtype, index, p_cfgp->tdc.count)); 5690 5691 ASSERT((index >= 0) && (index < p_cfgp->tdc.count)); 5692 rhandlep = &nxgep->tx_ring_handles[index]; 5693 rhandlep->nxgep = nxgep; 5694 rhandlep->index = index; 5695 rhandlep->ring_handle = rh; 5696 5697 channel = nxgep->pt_config.hw_config.tdc.start + index; 5698 rhandlep->channel = channel; 5699 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5700 htable_idx = nxge_ring_get_htable_idx(nxgep, rtype, 5701 channel); 5702 if (htable_idx >= 0) 5703 mintr->mi_ddi_handle = intrp->htable[htable_idx]; 5704 else 5705 mintr->mi_ddi_handle = NULL; 5706 5707 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5708 infop->mri_start = nxge_tx_ring_start; 5709 infop->mri_stop = nxge_tx_ring_stop; 5710 infop->mri_tx = nxge_tx_ring_send; 5711 infop->mri_stat = nxge_tx_ring_stat; 5712 infop->mri_flags = MAC_RING_TX_SERIALIZE; 5713 break; 5714 } 5715 5716 case MAC_RING_TYPE_RX: { 5717 mac_intr_t nxge_mac_intr; 5718 int nxge_rindex; 5719 p_nxge_intr_t intrp; 5720 5721 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5722 5723 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5724 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d", 5725 rtype, index, p_cfgp->max_rdcs)); 5726 5727 /* 5728 * 'index' is the ring index within the group. 5729 * Find the ring index in the nxge instance. 5730 */ 5731 nxge_rindex = nxge_get_rxring_index(nxgep, rg_index, index); 5732 channel = nxgep->pt_config.hw_config.start_rdc + index; 5733 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5734 5735 ASSERT((nxge_rindex >= 0) && (nxge_rindex < p_cfgp->max_rdcs)); 5736 rhandlep = &nxgep->rx_ring_handles[nxge_rindex]; 5737 rhandlep->nxgep = nxgep; 5738 rhandlep->index = nxge_rindex; 5739 rhandlep->ring_handle = rh; 5740 rhandlep->channel = channel; 5741 5742 /* 5743 * Entrypoint to enable interrupt (disable poll) and 5744 * disable interrupt (enable poll). 5745 */ 5746 bzero(&nxge_mac_intr, sizeof (nxge_mac_intr)); 5747 nxge_mac_intr.mi_handle = (mac_intr_handle_t)rhandlep; 5748 nxge_mac_intr.mi_enable = (mac_intr_enable_t)nxge_disable_poll; 5749 nxge_mac_intr.mi_disable = (mac_intr_disable_t)nxge_enable_poll; 5750 5751 htable_idx = nxge_ring_get_htable_idx(nxgep, rtype, 5752 channel); 5753 if (htable_idx >= 0) 5754 nxge_mac_intr.mi_ddi_handle = intrp->htable[htable_idx]; 5755 else 5756 nxge_mac_intr.mi_ddi_handle = NULL; 5757 5758 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5759 infop->mri_start = nxge_rx_ring_start; 5760 infop->mri_stop = nxge_rx_ring_stop; 5761 infop->mri_intr = nxge_mac_intr; 5762 infop->mri_poll = nxge_rx_poll; 5763 infop->mri_stat = nxge_rx_ring_stat; 5764 infop->mri_flags = MAC_RING_RX_ENQUEUE; 5765 break; 5766 } 5767 5768 default: 5769 break; 5770 } 5771 5772 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_fill_ring 0x%x", rtype)); 5773 } 5774 5775 static void 5776 nxge_group_add_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5777 mac_ring_type_t type) 5778 { 5779 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5780 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5781 nxge_t *nxge; 5782 nxge_grp_t *grp; 5783 nxge_rdc_grp_t *rdc_grp; 5784 uint16_t channel; /* device-wise ring id */ 5785 int dev_gindex; 5786 int rv; 5787 5788 nxge = rgroup->nxgep; 5789 5790 switch (type) { 5791 case MAC_RING_TYPE_TX: 5792 /* 5793 * nxge_grp_dc_add takes a channel number which is a 5794 * "devise" ring ID. 5795 */ 5796 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5797 5798 /* 5799 * Remove the ring from the default group 5800 */ 5801 if (rgroup->gindex != 0) { 5802 (void) nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5803 } 5804 5805 /* 5806 * nxge->tx_set.group[] is an array of groups indexed by 5807 * a "port" group ID. 5808 */ 5809 grp = nxge->tx_set.group[rgroup->gindex]; 5810 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5811 if (rv != 0) { 5812 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5813 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5814 } 5815 break; 5816 5817 case MAC_RING_TYPE_RX: 5818 /* 5819 * nxge->rx_set.group[] is an array of groups indexed by 5820 * a "port" group ID. 5821 */ 5822 grp = nxge->rx_set.group[rgroup->gindex]; 5823 5824 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5825 rgroup->gindex; 5826 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5827 5828 /* 5829 * nxge_grp_dc_add takes a channel number which is a 5830 * "devise" ring ID. 5831 */ 5832 channel = nxge->pt_config.hw_config.start_rdc + rhandle->index; 5833 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_RX, channel); 5834 if (rv != 0) { 5835 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5836 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5837 } 5838 5839 rdc_grp->map |= (1 << channel); 5840 rdc_grp->max_rdcs++; 5841 5842 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5843 break; 5844 } 5845 } 5846 5847 static void 5848 nxge_group_rem_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5849 mac_ring_type_t type) 5850 { 5851 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5852 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5853 nxge_t *nxge; 5854 uint16_t channel; /* device-wise ring id */ 5855 nxge_rdc_grp_t *rdc_grp; 5856 int dev_gindex; 5857 5858 nxge = rgroup->nxgep; 5859 5860 switch (type) { 5861 case MAC_RING_TYPE_TX: 5862 dev_gindex = nxge->pt_config.hw_config.def_mac_txdma_grpid + 5863 rgroup->gindex; 5864 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5865 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5866 5867 /* 5868 * Add the ring back to the default group 5869 */ 5870 if (rgroup->gindex != 0) { 5871 nxge_grp_t *grp; 5872 grp = nxge->tx_set.group[0]; 5873 (void) nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5874 } 5875 break; 5876 5877 case MAC_RING_TYPE_RX: 5878 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5879 rgroup->gindex; 5880 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5881 channel = rdc_grp->start_rdc + rhandle->index; 5882 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 5883 5884 rdc_grp->map &= ~(1 << channel); 5885 rdc_grp->max_rdcs--; 5886 5887 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5888 break; 5889 } 5890 } 5891 5892 5893 /*ARGSUSED*/ 5894 static nxge_status_t 5895 nxge_add_intrs(p_nxge_t nxgep) 5896 { 5897 5898 int intr_types; 5899 int type = 0; 5900 int ddi_status = DDI_SUCCESS; 5901 nxge_status_t status = NXGE_OK; 5902 5903 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5904 5905 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5906 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5907 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5908 nxgep->nxge_intr_type.intr_added = 0; 5909 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5910 nxgep->nxge_intr_type.intr_type = 0; 5911 5912 if (nxgep->niu_type == N2_NIU) { 5913 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5914 } else if (nxge_msi_enable) { 5915 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5916 } 5917 5918 /* Get the supported interrupt types */ 5919 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5920 != DDI_SUCCESS) { 5921 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5922 "ddi_intr_get_supported_types failed: status 0x%08x", 5923 ddi_status)); 5924 return (NXGE_ERROR | NXGE_DDI_FAILED); 5925 } 5926 nxgep->nxge_intr_type.intr_types = intr_types; 5927 5928 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5929 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5930 5931 /* 5932 * Solaris MSIX is not supported yet. use MSI for now. 5933 * nxge_msi_enable (1): 5934 * 1 - MSI 2 - MSI-X others - FIXED 5935 */ 5936 switch (nxge_msi_enable) { 5937 default: 5938 type = DDI_INTR_TYPE_FIXED; 5939 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5940 "use fixed (intx emulation) type %08x", 5941 type)); 5942 break; 5943 5944 case 2: 5945 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5946 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5947 if (intr_types & DDI_INTR_TYPE_MSIX) { 5948 type = DDI_INTR_TYPE_MSIX; 5949 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5950 "ddi_intr_get_supported_types: MSIX 0x%08x", 5951 type)); 5952 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5953 type = DDI_INTR_TYPE_MSI; 5954 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5955 "ddi_intr_get_supported_types: MSI 0x%08x", 5956 type)); 5957 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5958 type = DDI_INTR_TYPE_FIXED; 5959 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5960 "ddi_intr_get_supported_types: MSXED0x%08x", 5961 type)); 5962 } 5963 break; 5964 5965 case 1: 5966 if (intr_types & DDI_INTR_TYPE_MSI) { 5967 type = DDI_INTR_TYPE_MSI; 5968 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5969 "ddi_intr_get_supported_types: MSI 0x%08x", 5970 type)); 5971 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5972 type = DDI_INTR_TYPE_MSIX; 5973 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5974 "ddi_intr_get_supported_types: MSIX 0x%08x", 5975 type)); 5976 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5977 type = DDI_INTR_TYPE_FIXED; 5978 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5979 "ddi_intr_get_supported_types: MSXED0x%08x", 5980 type)); 5981 } 5982 } 5983 5984 nxgep->nxge_intr_type.intr_type = type; 5985 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5986 type == DDI_INTR_TYPE_FIXED) && 5987 nxgep->nxge_intr_type.niu_msi_enable) { 5988 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5989 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5990 " nxge_add_intrs: " 5991 " nxge_add_intrs_adv failed: status 0x%08x", 5992 status)); 5993 return (status); 5994 } else { 5995 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5996 "interrupts registered : type %d", type)); 5997 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5998 5999 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6000 "\nAdded advanced nxge add_intr_adv " 6001 "intr type 0x%x\n", type)); 6002 6003 return (status); 6004 } 6005 } 6006 6007 if (!nxgep->nxge_intr_type.intr_registered) { 6008 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 6009 "failed to register interrupts")); 6010 return (NXGE_ERROR | NXGE_DDI_FAILED); 6011 } 6012 6013 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 6014 return (status); 6015 } 6016 6017 static nxge_status_t 6018 nxge_add_intrs_adv(p_nxge_t nxgep) 6019 { 6020 int intr_type; 6021 p_nxge_intr_t intrp; 6022 6023 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 6024 6025 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6026 intr_type = intrp->intr_type; 6027 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 6028 intr_type)); 6029 6030 switch (intr_type) { 6031 case DDI_INTR_TYPE_MSI: /* 0x2 */ 6032 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 6033 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 6034 6035 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 6036 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 6037 6038 default: 6039 return (NXGE_ERROR); 6040 } 6041 } 6042 6043 6044 /*ARGSUSED*/ 6045 static nxge_status_t 6046 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 6047 { 6048 dev_info_t *dip = nxgep->dip; 6049 p_nxge_ldg_t ldgp; 6050 p_nxge_intr_t intrp; 6051 uint_t *inthandler; 6052 void *arg1, *arg2; 6053 int behavior; 6054 int nintrs, navail, nrequest; 6055 int nactual, nrequired; 6056 int inum = 0; 6057 int x, y; 6058 int ddi_status = DDI_SUCCESS; 6059 nxge_status_t status = NXGE_OK; 6060 6061 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 6062 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6063 intrp->start_inum = 0; 6064 6065 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6066 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6067 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6068 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6069 "nintrs: %d", ddi_status, nintrs)); 6070 return (NXGE_ERROR | NXGE_DDI_FAILED); 6071 } 6072 6073 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6074 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6075 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6076 "ddi_intr_get_navail() failed, status: 0x%x%, " 6077 "nintrs: %d", ddi_status, navail)); 6078 return (NXGE_ERROR | NXGE_DDI_FAILED); 6079 } 6080 6081 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6082 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 6083 nintrs, navail)); 6084 6085 /* PSARC/2007/453 MSI-X interrupt limit override */ 6086 if (int_type == DDI_INTR_TYPE_MSIX) { 6087 nrequest = nxge_create_msi_property(nxgep); 6088 if (nrequest < navail) { 6089 navail = nrequest; 6090 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6091 "nxge_add_intrs_adv_type: nintrs %d " 6092 "navail %d (nrequest %d)", 6093 nintrs, navail, nrequest)); 6094 } 6095 } 6096 6097 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 6098 /* MSI must be power of 2 */ 6099 if ((navail & 16) == 16) { 6100 navail = 16; 6101 } else if ((navail & 8) == 8) { 6102 navail = 8; 6103 } else if ((navail & 4) == 4) { 6104 navail = 4; 6105 } else if ((navail & 2) == 2) { 6106 navail = 2; 6107 } else { 6108 navail = 1; 6109 } 6110 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6111 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 6112 "navail %d", nintrs, navail)); 6113 } 6114 6115 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6116 DDI_INTR_ALLOC_NORMAL); 6117 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6118 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6119 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6120 navail, &nactual, behavior); 6121 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6122 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6123 " ddi_intr_alloc() failed: %d", 6124 ddi_status)); 6125 kmem_free(intrp->htable, intrp->intr_size); 6126 return (NXGE_ERROR | NXGE_DDI_FAILED); 6127 } 6128 6129 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6130 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6131 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6132 " ddi_intr_get_pri() failed: %d", 6133 ddi_status)); 6134 /* Free already allocated interrupts */ 6135 for (y = 0; y < nactual; y++) { 6136 (void) ddi_intr_free(intrp->htable[y]); 6137 } 6138 6139 kmem_free(intrp->htable, intrp->intr_size); 6140 return (NXGE_ERROR | NXGE_DDI_FAILED); 6141 } 6142 6143 nrequired = 0; 6144 switch (nxgep->niu_type) { 6145 default: 6146 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6147 break; 6148 6149 case N2_NIU: 6150 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6151 break; 6152 } 6153 6154 if (status != NXGE_OK) { 6155 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6156 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 6157 "failed: 0x%x", status)); 6158 /* Free already allocated interrupts */ 6159 for (y = 0; y < nactual; y++) { 6160 (void) ddi_intr_free(intrp->htable[y]); 6161 } 6162 6163 kmem_free(intrp->htable, intrp->intr_size); 6164 return (status); 6165 } 6166 6167 ldgp = nxgep->ldgvp->ldgp; 6168 for (x = 0; x < nrequired; x++, ldgp++) { 6169 ldgp->vector = (uint8_t)x; 6170 ldgp->intdata = SID_DATA(ldgp->func, x); 6171 arg1 = ldgp->ldvp; 6172 arg2 = nxgep; 6173 if (ldgp->nldvs == 1) { 6174 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6175 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6176 "nxge_add_intrs_adv_type: " 6177 "arg1 0x%x arg2 0x%x: " 6178 "1-1 int handler (entry %d intdata 0x%x)\n", 6179 arg1, arg2, 6180 x, ldgp->intdata)); 6181 } else if (ldgp->nldvs > 1) { 6182 inthandler = (uint_t *)ldgp->sys_intr_handler; 6183 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6184 "nxge_add_intrs_adv_type: " 6185 "arg1 0x%x arg2 0x%x: " 6186 "nldevs %d int handler " 6187 "(entry %d intdata 0x%x)\n", 6188 arg1, arg2, 6189 ldgp->nldvs, x, ldgp->intdata)); 6190 } 6191 6192 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6193 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 6194 "htable 0x%llx", x, intrp->htable[x])); 6195 6196 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6197 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6198 != DDI_SUCCESS) { 6199 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6200 "==> nxge_add_intrs_adv_type: failed #%d " 6201 "status 0x%x", x, ddi_status)); 6202 for (y = 0; y < intrp->intr_added; y++) { 6203 (void) ddi_intr_remove_handler( 6204 intrp->htable[y]); 6205 } 6206 /* Free already allocated intr */ 6207 for (y = 0; y < nactual; y++) { 6208 (void) ddi_intr_free(intrp->htable[y]); 6209 } 6210 kmem_free(intrp->htable, intrp->intr_size); 6211 6212 (void) nxge_ldgv_uninit(nxgep); 6213 6214 return (NXGE_ERROR | NXGE_DDI_FAILED); 6215 } 6216 6217 ldgp->htable_idx = x; 6218 intrp->intr_added++; 6219 } 6220 6221 intrp->msi_intx_cnt = nactual; 6222 6223 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6224 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 6225 navail, nactual, 6226 intrp->msi_intx_cnt, 6227 intrp->intr_added)); 6228 6229 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6230 6231 (void) nxge_intr_ldgv_init(nxgep); 6232 6233 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 6234 6235 return (status); 6236 } 6237 6238 /*ARGSUSED*/ 6239 static nxge_status_t 6240 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 6241 { 6242 dev_info_t *dip = nxgep->dip; 6243 p_nxge_ldg_t ldgp; 6244 p_nxge_intr_t intrp; 6245 uint_t *inthandler; 6246 void *arg1, *arg2; 6247 int behavior; 6248 int nintrs, navail; 6249 int nactual, nrequired; 6250 int inum = 0; 6251 int x, y; 6252 int ddi_status = DDI_SUCCESS; 6253 nxge_status_t status = NXGE_OK; 6254 6255 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 6256 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6257 intrp->start_inum = 0; 6258 6259 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6260 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6261 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6262 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6263 "nintrs: %d", status, nintrs)); 6264 return (NXGE_ERROR | NXGE_DDI_FAILED); 6265 } 6266 6267 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6268 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6269 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6270 "ddi_intr_get_navail() failed, status: 0x%x%, " 6271 "nintrs: %d", ddi_status, navail)); 6272 return (NXGE_ERROR | NXGE_DDI_FAILED); 6273 } 6274 6275 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6276 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6277 nintrs, navail)); 6278 6279 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6280 DDI_INTR_ALLOC_NORMAL); 6281 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6282 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6283 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6284 navail, &nactual, behavior); 6285 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6286 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6287 " ddi_intr_alloc() failed: %d", 6288 ddi_status)); 6289 kmem_free(intrp->htable, intrp->intr_size); 6290 return (NXGE_ERROR | NXGE_DDI_FAILED); 6291 } 6292 6293 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6294 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6295 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6296 " ddi_intr_get_pri() failed: %d", 6297 ddi_status)); 6298 /* Free already allocated interrupts */ 6299 for (y = 0; y < nactual; y++) { 6300 (void) ddi_intr_free(intrp->htable[y]); 6301 } 6302 6303 kmem_free(intrp->htable, intrp->intr_size); 6304 return (NXGE_ERROR | NXGE_DDI_FAILED); 6305 } 6306 6307 nrequired = 0; 6308 switch (nxgep->niu_type) { 6309 default: 6310 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6311 break; 6312 6313 case N2_NIU: 6314 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6315 break; 6316 } 6317 6318 if (status != NXGE_OK) { 6319 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6320 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6321 "failed: 0x%x", status)); 6322 /* Free already allocated interrupts */ 6323 for (y = 0; y < nactual; y++) { 6324 (void) ddi_intr_free(intrp->htable[y]); 6325 } 6326 6327 kmem_free(intrp->htable, intrp->intr_size); 6328 return (status); 6329 } 6330 6331 ldgp = nxgep->ldgvp->ldgp; 6332 for (x = 0; x < nrequired; x++, ldgp++) { 6333 ldgp->vector = (uint8_t)x; 6334 if (nxgep->niu_type != N2_NIU) { 6335 ldgp->intdata = SID_DATA(ldgp->func, x); 6336 } 6337 6338 arg1 = ldgp->ldvp; 6339 arg2 = nxgep; 6340 if (ldgp->nldvs == 1) { 6341 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6342 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6343 "nxge_add_intrs_adv_type_fix: " 6344 "1-1 int handler(%d) ldg %d ldv %d " 6345 "arg1 $%p arg2 $%p\n", 6346 x, ldgp->ldg, ldgp->ldvp->ldv, 6347 arg1, arg2)); 6348 } else if (ldgp->nldvs > 1) { 6349 inthandler = (uint_t *)ldgp->sys_intr_handler; 6350 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6351 "nxge_add_intrs_adv_type_fix: " 6352 "shared ldv %d int handler(%d) ldv %d ldg %d" 6353 "arg1 0x%016llx arg2 0x%016llx\n", 6354 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6355 arg1, arg2)); 6356 } 6357 6358 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6359 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6360 != DDI_SUCCESS) { 6361 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6362 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6363 "status 0x%x", x, ddi_status)); 6364 for (y = 0; y < intrp->intr_added; y++) { 6365 (void) ddi_intr_remove_handler( 6366 intrp->htable[y]); 6367 } 6368 for (y = 0; y < nactual; y++) { 6369 (void) ddi_intr_free(intrp->htable[y]); 6370 } 6371 /* Free already allocated intr */ 6372 kmem_free(intrp->htable, intrp->intr_size); 6373 6374 (void) nxge_ldgv_uninit(nxgep); 6375 6376 return (NXGE_ERROR | NXGE_DDI_FAILED); 6377 } 6378 6379 ldgp->htable_idx = x; 6380 intrp->intr_added++; 6381 } 6382 6383 intrp->msi_intx_cnt = nactual; 6384 6385 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6386 6387 status = nxge_intr_ldgv_init(nxgep); 6388 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6389 6390 return (status); 6391 } 6392 6393 static void 6394 nxge_remove_intrs(p_nxge_t nxgep) 6395 { 6396 int i, inum; 6397 p_nxge_intr_t intrp; 6398 6399 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6400 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6401 if (!intrp->intr_registered) { 6402 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6403 "<== nxge_remove_intrs: interrupts not registered")); 6404 return; 6405 } 6406 6407 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6408 6409 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6410 (void) ddi_intr_block_disable(intrp->htable, 6411 intrp->intr_added); 6412 } else { 6413 for (i = 0; i < intrp->intr_added; i++) { 6414 (void) ddi_intr_disable(intrp->htable[i]); 6415 } 6416 } 6417 6418 for (inum = 0; inum < intrp->intr_added; inum++) { 6419 if (intrp->htable[inum]) { 6420 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6421 } 6422 } 6423 6424 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6425 if (intrp->htable[inum]) { 6426 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6427 "nxge_remove_intrs: ddi_intr_free inum %d " 6428 "msi_intx_cnt %d intr_added %d", 6429 inum, 6430 intrp->msi_intx_cnt, 6431 intrp->intr_added)); 6432 6433 (void) ddi_intr_free(intrp->htable[inum]); 6434 } 6435 } 6436 6437 kmem_free(intrp->htable, intrp->intr_size); 6438 intrp->intr_registered = B_FALSE; 6439 intrp->intr_enabled = B_FALSE; 6440 intrp->msi_intx_cnt = 0; 6441 intrp->intr_added = 0; 6442 6443 (void) nxge_ldgv_uninit(nxgep); 6444 6445 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6446 "#msix-request"); 6447 6448 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6449 } 6450 6451 /*ARGSUSED*/ 6452 static void 6453 nxge_intrs_enable(p_nxge_t nxgep) 6454 { 6455 p_nxge_intr_t intrp; 6456 int i; 6457 int status; 6458 6459 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6460 6461 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6462 6463 if (!intrp->intr_registered) { 6464 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6465 "interrupts are not registered")); 6466 return; 6467 } 6468 6469 if (intrp->intr_enabled) { 6470 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6471 "<== nxge_intrs_enable: already enabled")); 6472 return; 6473 } 6474 6475 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6476 status = ddi_intr_block_enable(intrp->htable, 6477 intrp->intr_added); 6478 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6479 "block enable - status 0x%x total inums #%d\n", 6480 status, intrp->intr_added)); 6481 } else { 6482 for (i = 0; i < intrp->intr_added; i++) { 6483 status = ddi_intr_enable(intrp->htable[i]); 6484 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6485 "ddi_intr_enable:enable - status 0x%x " 6486 "total inums %d enable inum #%d\n", 6487 status, intrp->intr_added, i)); 6488 if (status == DDI_SUCCESS) { 6489 intrp->intr_enabled = B_TRUE; 6490 } 6491 } 6492 } 6493 6494 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6495 } 6496 6497 /*ARGSUSED*/ 6498 static void 6499 nxge_intrs_disable(p_nxge_t nxgep) 6500 { 6501 p_nxge_intr_t intrp; 6502 int i; 6503 6504 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6505 6506 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6507 6508 if (!intrp->intr_registered) { 6509 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6510 "interrupts are not registered")); 6511 return; 6512 } 6513 6514 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6515 (void) ddi_intr_block_disable(intrp->htable, 6516 intrp->intr_added); 6517 } else { 6518 for (i = 0; i < intrp->intr_added; i++) { 6519 (void) ddi_intr_disable(intrp->htable[i]); 6520 } 6521 } 6522 6523 intrp->intr_enabled = B_FALSE; 6524 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6525 } 6526 6527 nxge_status_t 6528 nxge_mac_register(p_nxge_t nxgep) 6529 { 6530 mac_register_t *macp; 6531 int status; 6532 6533 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6534 6535 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6536 return (NXGE_ERROR); 6537 6538 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6539 macp->m_driver = nxgep; 6540 macp->m_dip = nxgep->dip; 6541 if (!isLDOMguest(nxgep)) { 6542 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6543 } else { 6544 macp->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); 6545 macp->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); 6546 (void) memset(macp->m_src_addr, 0xff, sizeof (MAXMACADDRLEN)); 6547 } 6548 macp->m_callbacks = &nxge_m_callbacks; 6549 macp->m_min_sdu = 0; 6550 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6551 NXGE_EHEADER_VLAN_CRC; 6552 macp->m_max_sdu = nxgep->mac.default_mtu; 6553 macp->m_margin = VLAN_TAGSZ; 6554 macp->m_priv_props = nxge_priv_props; 6555 if (isLDOMguest(nxgep)) 6556 macp->m_v12n = MAC_VIRT_LEVEL1; 6557 else 6558 macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1; 6559 6560 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6561 "==> nxge_mac_register: instance %d " 6562 "max_sdu %d margin %d maxframe %d (header %d)", 6563 nxgep->instance, 6564 macp->m_max_sdu, macp->m_margin, 6565 nxgep->mac.maxframesize, 6566 NXGE_EHEADER_VLAN_CRC)); 6567 6568 status = mac_register(macp, &nxgep->mach); 6569 if (isLDOMguest(nxgep)) { 6570 KMEM_FREE(macp->m_src_addr, MAXMACADDRLEN); 6571 KMEM_FREE(macp->m_dst_addr, MAXMACADDRLEN); 6572 } 6573 mac_free(macp); 6574 6575 if (status != 0) { 6576 cmn_err(CE_WARN, 6577 "!nxge_mac_register failed (status %d instance %d)", 6578 status, nxgep->instance); 6579 return (NXGE_ERROR); 6580 } 6581 6582 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6583 "(instance %d)", nxgep->instance)); 6584 6585 return (NXGE_OK); 6586 } 6587 6588 void 6589 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6590 { 6591 ssize_t size; 6592 mblk_t *nmp; 6593 uint8_t blk_id; 6594 uint8_t chan; 6595 uint32_t err_id; 6596 err_inject_t *eip; 6597 6598 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6599 6600 size = 1024; 6601 nmp = mp->b_cont; 6602 eip = (err_inject_t *)nmp->b_rptr; 6603 blk_id = eip->blk_id; 6604 err_id = eip->err_id; 6605 chan = eip->chan; 6606 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6607 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6608 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6609 switch (blk_id) { 6610 case MAC_BLK_ID: 6611 break; 6612 case TXMAC_BLK_ID: 6613 break; 6614 case RXMAC_BLK_ID: 6615 break; 6616 case MIF_BLK_ID: 6617 break; 6618 case IPP_BLK_ID: 6619 nxge_ipp_inject_err(nxgep, err_id); 6620 break; 6621 case TXC_BLK_ID: 6622 nxge_txc_inject_err(nxgep, err_id); 6623 break; 6624 case TXDMA_BLK_ID: 6625 nxge_txdma_inject_err(nxgep, err_id, chan); 6626 break; 6627 case RXDMA_BLK_ID: 6628 nxge_rxdma_inject_err(nxgep, err_id, chan); 6629 break; 6630 case ZCP_BLK_ID: 6631 nxge_zcp_inject_err(nxgep, err_id); 6632 break; 6633 case ESPC_BLK_ID: 6634 break; 6635 case FFLP_BLK_ID: 6636 break; 6637 case PHY_BLK_ID: 6638 break; 6639 case ETHER_SERDES_BLK_ID: 6640 break; 6641 case PCIE_SERDES_BLK_ID: 6642 break; 6643 case VIR_BLK_ID: 6644 break; 6645 } 6646 6647 nmp->b_wptr = nmp->b_rptr + size; 6648 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6649 6650 miocack(wq, mp, (int)size, 0); 6651 } 6652 6653 static int 6654 nxge_init_common_dev(p_nxge_t nxgep) 6655 { 6656 p_nxge_hw_list_t hw_p; 6657 dev_info_t *p_dip; 6658 6659 ASSERT(nxgep != NULL); 6660 6661 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6662 6663 p_dip = nxgep->p_dip; 6664 MUTEX_ENTER(&nxge_common_lock); 6665 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6666 "==> nxge_init_common_dev:func # %d", 6667 nxgep->function_num)); 6668 /* 6669 * Loop through existing per neptune hardware list. 6670 */ 6671 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6672 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6673 "==> nxge_init_common_device:func # %d " 6674 "hw_p $%p parent dip $%p", 6675 nxgep->function_num, 6676 hw_p, 6677 p_dip)); 6678 if (hw_p->parent_devp == p_dip) { 6679 nxgep->nxge_hw_p = hw_p; 6680 hw_p->ndevs++; 6681 hw_p->nxge_p[nxgep->function_num] = nxgep; 6682 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6683 "==> nxge_init_common_device:func # %d " 6684 "hw_p $%p parent dip $%p " 6685 "ndevs %d (found)", 6686 nxgep->function_num, 6687 hw_p, 6688 p_dip, 6689 hw_p->ndevs)); 6690 break; 6691 } 6692 } 6693 6694 if (hw_p == NULL) { 6695 6696 char **prop_val; 6697 uint_t prop_len; 6698 int i; 6699 6700 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6701 "==> nxge_init_common_device:func # %d " 6702 "parent dip $%p (new)", 6703 nxgep->function_num, 6704 p_dip)); 6705 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6706 hw_p->parent_devp = p_dip; 6707 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6708 nxgep->nxge_hw_p = hw_p; 6709 hw_p->ndevs++; 6710 hw_p->nxge_p[nxgep->function_num] = nxgep; 6711 hw_p->next = nxge_hw_list; 6712 if (nxgep->niu_type == N2_NIU) { 6713 hw_p->niu_type = N2_NIU; 6714 hw_p->platform_type = P_NEPTUNE_NIU; 6715 hw_p->tcam_size = TCAM_NIU_TCAM_MAX_ENTRY; 6716 } else { 6717 hw_p->niu_type = NIU_TYPE_NONE; 6718 hw_p->platform_type = P_NEPTUNE_NONE; 6719 hw_p->tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY; 6720 } 6721 6722 hw_p->tcam = KMEM_ZALLOC(sizeof (tcam_flow_spec_t) * 6723 hw_p->tcam_size, KM_SLEEP); 6724 6725 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6726 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6727 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6728 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6729 6730 nxge_hw_list = hw_p; 6731 6732 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0, 6733 "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) { 6734 for (i = 0; i < prop_len; i++) { 6735 if ((strcmp((caddr_t)prop_val[i], 6736 NXGE_ROCK_COMPATIBLE) == 0)) { 6737 hw_p->platform_type = P_NEPTUNE_ROCK; 6738 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6739 "ROCK hw_p->platform_type %d", 6740 hw_p->platform_type)); 6741 break; 6742 } 6743 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6744 "nxge_init_common_dev: read compatible" 6745 " property[%d] val[%s]", 6746 i, (caddr_t)prop_val[i])); 6747 } 6748 } 6749 6750 ddi_prop_free(prop_val); 6751 6752 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6753 } 6754 6755 MUTEX_EXIT(&nxge_common_lock); 6756 6757 nxgep->platform_type = hw_p->platform_type; 6758 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxgep->platform_type %d", 6759 nxgep->platform_type)); 6760 if (nxgep->niu_type != N2_NIU) { 6761 nxgep->niu_type = hw_p->niu_type; 6762 } 6763 6764 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6765 "==> nxge_init_common_device (nxge_hw_list) $%p", 6766 nxge_hw_list)); 6767 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6768 6769 return (NXGE_OK); 6770 } 6771 6772 static void 6773 nxge_uninit_common_dev(p_nxge_t nxgep) 6774 { 6775 p_nxge_hw_list_t hw_p, h_hw_p; 6776 p_nxge_dma_pt_cfg_t p_dma_cfgp; 6777 p_nxge_hw_pt_cfg_t p_cfgp; 6778 dev_info_t *p_dip; 6779 6780 ASSERT(nxgep != NULL); 6781 6782 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6783 if (nxgep->nxge_hw_p == NULL) { 6784 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6785 "<== nxge_uninit_common_device (no common)")); 6786 return; 6787 } 6788 6789 MUTEX_ENTER(&nxge_common_lock); 6790 h_hw_p = nxge_hw_list; 6791 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6792 p_dip = hw_p->parent_devp; 6793 if (nxgep->nxge_hw_p == hw_p && 6794 p_dip == nxgep->p_dip && 6795 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6796 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6797 6798 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6799 "==> nxge_uninit_common_device:func # %d " 6800 "hw_p $%p parent dip $%p " 6801 "ndevs %d (found)", 6802 nxgep->function_num, 6803 hw_p, 6804 p_dip, 6805 hw_p->ndevs)); 6806 6807 /* 6808 * Release the RDC table, a shared resoruce 6809 * of the nxge hardware. The RDC table was 6810 * assigned to this instance of nxge in 6811 * nxge_use_cfg_dma_config(). 6812 */ 6813 if (!isLDOMguest(nxgep)) { 6814 p_dma_cfgp = 6815 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 6816 p_cfgp = 6817 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 6818 (void) nxge_fzc_rdc_tbl_unbind(nxgep, 6819 p_cfgp->def_mac_rxdma_grpid); 6820 6821 /* Cleanup any outstanding groups. */ 6822 nxge_grp_cleanup(nxgep); 6823 } 6824 6825 if (hw_p->ndevs) { 6826 hw_p->ndevs--; 6827 } 6828 hw_p->nxge_p[nxgep->function_num] = NULL; 6829 if (!hw_p->ndevs) { 6830 KMEM_FREE(hw_p->tcam, 6831 sizeof (tcam_flow_spec_t) * 6832 hw_p->tcam_size); 6833 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6834 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6835 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6836 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6837 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6838 "==> nxge_uninit_common_device: " 6839 "func # %d " 6840 "hw_p $%p parent dip $%p " 6841 "ndevs %d (last)", 6842 nxgep->function_num, 6843 hw_p, 6844 p_dip, 6845 hw_p->ndevs)); 6846 6847 nxge_hio_uninit(nxgep); 6848 6849 if (hw_p == nxge_hw_list) { 6850 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6851 "==> nxge_uninit_common_device:" 6852 "remove head func # %d " 6853 "hw_p $%p parent dip $%p " 6854 "ndevs %d (head)", 6855 nxgep->function_num, 6856 hw_p, 6857 p_dip, 6858 hw_p->ndevs)); 6859 nxge_hw_list = hw_p->next; 6860 } else { 6861 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6862 "==> nxge_uninit_common_device:" 6863 "remove middle func # %d " 6864 "hw_p $%p parent dip $%p " 6865 "ndevs %d (middle)", 6866 nxgep->function_num, 6867 hw_p, 6868 p_dip, 6869 hw_p->ndevs)); 6870 h_hw_p->next = hw_p->next; 6871 } 6872 6873 nxgep->nxge_hw_p = NULL; 6874 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6875 } 6876 break; 6877 } else { 6878 h_hw_p = hw_p; 6879 } 6880 } 6881 6882 MUTEX_EXIT(&nxge_common_lock); 6883 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6884 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6885 nxge_hw_list)); 6886 6887 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6888 } 6889 6890 /* 6891 * Determines the number of ports from the niu_type or the platform type. 6892 * Returns the number of ports, or returns zero on failure. 6893 */ 6894 6895 int 6896 nxge_get_nports(p_nxge_t nxgep) 6897 { 6898 int nports = 0; 6899 6900 switch (nxgep->niu_type) { 6901 case N2_NIU: 6902 case NEPTUNE_2_10GF: 6903 nports = 2; 6904 break; 6905 case NEPTUNE_4_1GC: 6906 case NEPTUNE_2_10GF_2_1GC: 6907 case NEPTUNE_1_10GF_3_1GC: 6908 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6909 case NEPTUNE_2_10GF_2_1GRF: 6910 nports = 4; 6911 break; 6912 default: 6913 switch (nxgep->platform_type) { 6914 case P_NEPTUNE_NIU: 6915 case P_NEPTUNE_ATLAS_2PORT: 6916 nports = 2; 6917 break; 6918 case P_NEPTUNE_ATLAS_4PORT: 6919 case P_NEPTUNE_MARAMBA_P0: 6920 case P_NEPTUNE_MARAMBA_P1: 6921 case P_NEPTUNE_ROCK: 6922 case P_NEPTUNE_ALONSO: 6923 nports = 4; 6924 break; 6925 default: 6926 break; 6927 } 6928 break; 6929 } 6930 6931 return (nports); 6932 } 6933 6934 /* 6935 * The following two functions are to support 6936 * PSARC/2007/453 MSI-X interrupt limit override. 6937 */ 6938 static int 6939 nxge_create_msi_property(p_nxge_t nxgep) 6940 { 6941 int nmsi; 6942 extern int ncpus; 6943 6944 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6945 6946 switch (nxgep->mac.portmode) { 6947 case PORT_10G_COPPER: 6948 case PORT_10G_FIBER: 6949 case PORT_10G_TN1010: 6950 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6951 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6952 /* 6953 * The maximum MSI-X requested will be 8. 6954 * If the # of CPUs is less than 8, we will request 6955 * # MSI-X based on the # of CPUs (default). 6956 */ 6957 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6958 "==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d", 6959 nxge_msix_10g_intrs)); 6960 if ((nxge_msix_10g_intrs == 0) || 6961 (nxge_msix_10g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6962 nmsi = NXGE_MSIX_REQUEST_10G; 6963 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6964 "==>nxge_create_msi_property (10G): reset to 8")); 6965 } else { 6966 nmsi = nxge_msix_10g_intrs; 6967 } 6968 6969 /* 6970 * If # of interrupts requested is 8 (default), 6971 * the checking of the number of cpus will be 6972 * be maintained. 6973 */ 6974 if ((nmsi == NXGE_MSIX_REQUEST_10G) && 6975 (ncpus < nmsi)) { 6976 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6977 "==>nxge_create_msi_property (10G): reset to 8")); 6978 nmsi = ncpus; 6979 } 6980 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6981 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6982 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6983 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6984 break; 6985 6986 default: 6987 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6988 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6989 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6990 "==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d", 6991 nxge_msix_1g_intrs)); 6992 if ((nxge_msix_1g_intrs == 0) || 6993 (nxge_msix_1g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6994 nmsi = NXGE_MSIX_REQUEST_1G; 6995 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6996 "==>nxge_create_msi_property (1G): reset to 2")); 6997 } else { 6998 nmsi = nxge_msix_1g_intrs; 6999 } 7000 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 7001 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 7002 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 7003 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 7004 break; 7005 } 7006 7007 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 7008 return (nmsi); 7009 } 7010 7011 /* 7012 * The following is a software around for the Neptune hardware's 7013 * interrupt bugs; The Neptune hardware may generate spurious interrupts when 7014 * an interrupr handler is removed. 7015 */ 7016 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98 7017 #define NXGE_PIM_RESET (1ULL << 29) 7018 #define NXGE_GLU_RESET (1ULL << 30) 7019 #define NXGE_NIU_RESET (1ULL << 31) 7020 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \ 7021 NXGE_GLU_RESET | \ 7022 NXGE_NIU_RESET) 7023 7024 #define NXGE_WAIT_QUITE_TIME 200000 7025 #define NXGE_WAIT_QUITE_RETRY 40 7026 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */ 7027 7028 static void 7029 nxge_niu_peu_reset(p_nxge_t nxgep) 7030 { 7031 uint32_t rvalue; 7032 p_nxge_hw_list_t hw_p; 7033 p_nxge_t fnxgep; 7034 int i, j; 7035 7036 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset")); 7037 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 7038 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 7039 "==> nxge_niu_peu_reset: NULL hardware pointer")); 7040 return; 7041 } 7042 7043 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7044 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d", 7045 hw_p->flags, nxgep->nxge_link_poll_timerid, 7046 nxgep->nxge_timerid)); 7047 7048 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 7049 /* 7050 * Make sure other instances from the same hardware 7051 * stop sending PIO and in quiescent state. 7052 */ 7053 for (i = 0; i < NXGE_MAX_PORTS; i++) { 7054 fnxgep = hw_p->nxge_p[i]; 7055 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7056 "==> nxge_niu_peu_reset: checking entry %d " 7057 "nxgep $%p", i, fnxgep)); 7058 #ifdef NXGE_DEBUG 7059 if (fnxgep) { 7060 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7061 "==> nxge_niu_peu_reset: entry %d (function %d) " 7062 "link timer id %d hw timer id %d", 7063 i, fnxgep->function_num, 7064 fnxgep->nxge_link_poll_timerid, 7065 fnxgep->nxge_timerid)); 7066 } 7067 #endif 7068 if (fnxgep && fnxgep != nxgep && 7069 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) { 7070 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7071 "==> nxge_niu_peu_reset: checking $%p " 7072 "(function %d) timer ids", 7073 fnxgep, fnxgep->function_num)); 7074 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) { 7075 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7076 "==> nxge_niu_peu_reset: waiting")); 7077 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 7078 if (!fnxgep->nxge_timerid && 7079 !fnxgep->nxge_link_poll_timerid) { 7080 break; 7081 } 7082 } 7083 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 7084 if (fnxgep->nxge_timerid || 7085 fnxgep->nxge_link_poll_timerid) { 7086 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 7087 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 7088 "<== nxge_niu_peu_reset: cannot reset " 7089 "hardware (devices are still in use)")); 7090 return; 7091 } 7092 } 7093 } 7094 7095 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) { 7096 hw_p->flags |= COMMON_RESET_NIU_PCI; 7097 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh, 7098 NXGE_PCI_PORT_LOGIC_OFFSET); 7099 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7100 "nxge_niu_peu_reset: read offset 0x%x (%d) " 7101 "(data 0x%x)", 7102 NXGE_PCI_PORT_LOGIC_OFFSET, 7103 NXGE_PCI_PORT_LOGIC_OFFSET, 7104 rvalue)); 7105 7106 rvalue |= NXGE_PCI_RESET_ALL; 7107 pci_config_put32(nxgep->dev_regs->nxge_pciregh, 7108 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue); 7109 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7110 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x", 7111 rvalue)); 7112 7113 NXGE_DELAY(NXGE_PCI_RESET_WAIT); 7114 } 7115 7116 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 7117 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset")); 7118 } 7119 7120 static void 7121 nxge_set_pci_replay_timeout(p_nxge_t nxgep) 7122 { 7123 p_dev_regs_t dev_regs; 7124 uint32_t value; 7125 7126 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout")); 7127 7128 if (!nxge_set_replay_timer) { 7129 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7130 "==> nxge_set_pci_replay_timeout: will not change " 7131 "the timeout")); 7132 return; 7133 } 7134 7135 dev_regs = nxgep->dev_regs; 7136 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7137 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p", 7138 dev_regs, dev_regs->nxge_pciregh)); 7139 7140 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) { 7141 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7142 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or " 7143 "no PCI handle", 7144 dev_regs)); 7145 return; 7146 } 7147 value = (pci_config_get32(dev_regs->nxge_pciregh, 7148 PCI_REPLAY_TIMEOUT_CFG_OFFSET) | 7149 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT)); 7150 7151 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7152 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x " 7153 "(timeout value to set 0x%x at offset 0x%x) value 0x%x", 7154 pci_config_get32(dev_regs->nxge_pciregh, 7155 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout, 7156 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value)); 7157 7158 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET, 7159 value); 7160 7161 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7162 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x", 7163 pci_config_get32(dev_regs->nxge_pciregh, 7164 PCI_REPLAY_TIMEOUT_CFG_OFFSET))); 7165 7166 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout")); 7167 } 7168 7169 /* 7170 * quiesce(9E) entry point. 7171 * 7172 * This function is called when the system is single-threaded at high 7173 * PIL with preemption disabled. Therefore, this function must not be 7174 * blocked. 7175 * 7176 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 7177 * DDI_FAILURE indicates an error condition and should almost never happen. 7178 */ 7179 static int 7180 nxge_quiesce(dev_info_t *dip) 7181 { 7182 int instance = ddi_get_instance(dip); 7183 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 7184 7185 if (nxgep == NULL) 7186 return (DDI_FAILURE); 7187 7188 /* Turn off debugging */ 7189 nxge_debug_level = NO_DEBUG; 7190 nxgep->nxge_debug_level = NO_DEBUG; 7191 npi_debug_level = NO_DEBUG; 7192 7193 /* 7194 * Stop link monitor only when linkchkmod is interrupt based 7195 */ 7196 if (nxgep->mac.linkchkmode == LINKCHK_INTR) { 7197 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 7198 } 7199 7200 (void) nxge_intr_hw_disable(nxgep); 7201 7202 /* 7203 * Reset the receive MAC side. 7204 */ 7205 (void) nxge_rx_mac_disable(nxgep); 7206 7207 /* Disable and soft reset the IPP */ 7208 if (!isLDOMguest(nxgep)) 7209 (void) nxge_ipp_disable(nxgep); 7210 7211 /* 7212 * Reset the transmit/receive DMA side. 7213 */ 7214 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 7215 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 7216 7217 /* 7218 * Reset the transmit MAC side. 7219 */ 7220 (void) nxge_tx_mac_disable(nxgep); 7221 7222 return (DDI_SUCCESS); 7223 } 7224