1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 28 */ 29 #include <sys/nxge/nxge_impl.h> 30 #include <sys/nxge/nxge_hio.h> 31 #include <sys/nxge/nxge_rxdma.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * PSARC/2007/453 MSI-X interrupt limit override 39 * (This PSARC case is limited to MSI-X vectors 40 * and SPARC platforms only). 41 */ 42 #if defined(_BIG_ENDIAN) 43 uint32_t nxge_msi_enable = 2; 44 #else 45 uint32_t nxge_msi_enable = 1; 46 #endif 47 48 /* 49 * Software workaround for a Neptune (PCI-E) 50 * hardware interrupt bug which the hardware 51 * may generate spurious interrupts after the 52 * device interrupt handler was removed. If this flag 53 * is enabled, the driver will reset the 54 * hardware when devices are being detached. 55 */ 56 uint32_t nxge_peu_reset_enable = 0; 57 58 /* 59 * Software workaround for the hardware 60 * checksum bugs that affect packet transmission 61 * and receive: 62 * 63 * Usage of nxge_cksum_offload: 64 * 65 * (1) nxge_cksum_offload = 0 (default): 66 * - transmits packets: 67 * TCP: uses the hardware checksum feature. 68 * UDP: driver will compute the software checksum 69 * based on the partial checksum computed 70 * by the IP layer. 71 * - receives packets 72 * TCP: marks packets checksum flags based on hardware result. 73 * UDP: will not mark checksum flags. 74 * 75 * (2) nxge_cksum_offload = 1: 76 * - transmit packets: 77 * TCP/UDP: uses the hardware checksum feature. 78 * - receives packets 79 * TCP/UDP: marks packet checksum flags based on hardware result. 80 * 81 * (3) nxge_cksum_offload = 2: 82 * - The driver will not register its checksum capability. 83 * Checksum for both TCP and UDP will be computed 84 * by the stack. 85 * - The software LSO is not allowed in this case. 86 * 87 * (4) nxge_cksum_offload > 2: 88 * - Will be treated as it is set to 2 89 * (stack will compute the checksum). 90 * 91 * (5) If the hardware bug is fixed, this workaround 92 * needs to be updated accordingly to reflect 93 * the new hardware revision. 94 */ 95 uint32_t nxge_cksum_offload = 0; 96 97 /* 98 * Globals: tunable parameters (/etc/system or adb) 99 * 100 */ 101 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 102 uint32_t nxge_rbr_spare_size = 0; 103 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 104 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 105 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 106 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 107 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 108 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 109 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 110 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 111 boolean_t nxge_jumbo_enable = B_FALSE; 112 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 113 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 114 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 115 116 /* MAX LSO size */ 117 #define NXGE_LSO_MAXLEN 65535 118 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 119 120 121 /* 122 * Add tunable to reduce the amount of time spent in the 123 * ISR doing Rx Processing. 124 */ 125 uint32_t nxge_max_rx_pkts = 1024; 126 127 /* 128 * Tunables to manage the receive buffer blocks. 129 * 130 * nxge_rx_threshold_hi: copy all buffers. 131 * nxge_rx_bcopy_size_type: receive buffer block size type. 132 * nxge_rx_threshold_lo: copy only up to tunable block size type. 133 */ 134 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 135 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 136 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 137 138 /* Use kmem_alloc() to allocate data buffers. */ 139 #if defined(_BIG_ENDIAN) 140 uint32_t nxge_use_kmem_alloc = 1; 141 #else 142 uint32_t nxge_use_kmem_alloc = 0; 143 #endif 144 145 rtrace_t npi_rtracebuf; 146 147 /* 148 * The hardware sometimes fails to allow enough time for the link partner 149 * to send an acknowledgement for packets that the hardware sent to it. The 150 * hardware resends the packets earlier than it should be in those instances. 151 * This behavior caused some switches to acknowledge the wrong packets 152 * and it triggered the fatal error. 153 * This software workaround is to set the replay timer to a value 154 * suggested by the hardware team. 155 * 156 * PCI config space replay timer register: 157 * The following replay timeout value is 0xc 158 * for bit 14:18. 159 */ 160 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8 161 #define PCI_REPLAY_TIMEOUT_SHIFT 14 162 163 uint32_t nxge_set_replay_timer = 1; 164 uint32_t nxge_replay_timeout = 0xc; 165 166 /* 167 * The transmit serialization sometimes causes 168 * longer sleep before calling the driver transmit 169 * function as it sleeps longer than it should. 170 * The performace group suggests that a time wait tunable 171 * can be used to set the maximum wait time when needed 172 * and the default is set to 1 tick. 173 */ 174 uint32_t nxge_tx_serial_maxsleep = 1; 175 176 #if defined(sun4v) 177 /* 178 * Hypervisor N2/NIU services information. 179 */ 180 static hsvc_info_t niu_hsvc = { 181 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 182 NIU_MINOR_VER, "nxge" 183 }; 184 185 static int nxge_hsvc_register(p_nxge_t); 186 #endif 187 188 /* 189 * Function Prototypes 190 */ 191 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 192 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 193 static void nxge_unattach(p_nxge_t); 194 static int nxge_quiesce(dev_info_t *); 195 196 #if NXGE_PROPERTY 197 static void nxge_remove_hard_properties(p_nxge_t); 198 #endif 199 200 /* 201 * These two functions are required by nxge_hio.c 202 */ 203 extern int nxge_m_mmac_remove(void *arg, int slot); 204 extern void nxge_grp_cleanup(p_nxge_t nxge); 205 206 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 207 208 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 209 static void nxge_destroy_mutexes(p_nxge_t); 210 211 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 212 static void nxge_unmap_regs(p_nxge_t nxgep); 213 #ifdef NXGE_DEBUG 214 static void nxge_test_map_regs(p_nxge_t nxgep); 215 #endif 216 217 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 218 static void nxge_remove_intrs(p_nxge_t nxgep); 219 220 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 221 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 222 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 223 static void nxge_intrs_enable(p_nxge_t nxgep); 224 static void nxge_intrs_disable(p_nxge_t nxgep); 225 226 static void nxge_suspend(p_nxge_t); 227 static nxge_status_t nxge_resume(p_nxge_t); 228 229 static nxge_status_t nxge_setup_dev(p_nxge_t); 230 static void nxge_destroy_dev(p_nxge_t); 231 232 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 233 static void nxge_free_mem_pool(p_nxge_t); 234 235 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 236 static void nxge_free_rx_mem_pool(p_nxge_t); 237 238 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 239 static void nxge_free_tx_mem_pool(p_nxge_t); 240 241 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 242 struct ddi_dma_attr *, 243 size_t, ddi_device_acc_attr_t *, uint_t, 244 p_nxge_dma_common_t); 245 246 static void nxge_dma_mem_free(p_nxge_dma_common_t); 247 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 248 249 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 250 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 251 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 252 253 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 254 p_nxge_dma_common_t *, size_t); 255 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 256 257 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 258 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 259 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 260 261 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 262 p_nxge_dma_common_t *, 263 size_t); 264 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 265 266 static int nxge_init_common_dev(p_nxge_t); 267 static void nxge_uninit_common_dev(p_nxge_t); 268 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 269 char *, caddr_t); 270 271 /* 272 * The next declarations are for the GLDv3 interface. 273 */ 274 static int nxge_m_start(void *); 275 static void nxge_m_stop(void *); 276 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 277 static int nxge_m_promisc(void *, boolean_t); 278 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 279 static nxge_status_t nxge_mac_register(p_nxge_t); 280 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 281 int slot, int rdctbl, boolean_t usetbl); 282 void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, 283 boolean_t factory); 284 #if defined(sun4v) 285 extern mblk_t *nxge_m_tx(void *arg, mblk_t *mp); 286 #endif 287 288 static void nxge_m_getfactaddr(void *, uint_t, uint8_t *); 289 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 290 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 291 uint_t, const void *); 292 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 293 uint_t, uint_t, void *, uint_t *); 294 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 295 const void *); 296 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 297 void *, uint_t *); 298 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 299 static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int, 300 mac_ring_info_t *, mac_ring_handle_t); 301 static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t, 302 mac_ring_type_t); 303 static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t, 304 mac_ring_type_t); 305 306 static void nxge_niu_peu_reset(p_nxge_t nxgep); 307 static void nxge_set_pci_replay_timeout(nxge_t *); 308 309 mac_priv_prop_t nxge_priv_props[] = { 310 {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 311 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 312 {"_function_number", MAC_PROP_PERM_READ}, 313 {"_fw_version", MAC_PROP_PERM_READ}, 314 {"_port_mode", MAC_PROP_PERM_READ}, 315 {"_hot_swap_phy", MAC_PROP_PERM_READ}, 316 {"_accept_jumbo", MAC_PROP_PERM_RW}, 317 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 318 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 319 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 320 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 321 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 322 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 323 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 324 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 325 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 326 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 327 {"_soft_lso_enable", MAC_PROP_PERM_RW} 328 }; 329 330 #define NXGE_MAX_PRIV_PROPS \ 331 (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 332 333 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 334 #define MAX_DUMP_SZ 256 335 336 #define NXGE_M_CALLBACK_FLAGS \ 337 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 338 339 mac_callbacks_t nxge_m_callbacks = { 340 NXGE_M_CALLBACK_FLAGS, 341 nxge_m_stat, 342 nxge_m_start, 343 nxge_m_stop, 344 nxge_m_promisc, 345 nxge_m_multicst, 346 NULL, 347 NULL, 348 nxge_m_ioctl, 349 nxge_m_getcapab, 350 NULL, 351 NULL, 352 nxge_m_setprop, 353 nxge_m_getprop 354 }; 355 356 void 357 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 358 359 /* PSARC/2007/453 MSI-X interrupt limit override. */ 360 #define NXGE_MSIX_REQUEST_10G 8 361 #define NXGE_MSIX_REQUEST_1G 2 362 static int nxge_create_msi_property(p_nxge_t); 363 364 /* 365 * These global variables control the message 366 * output. 367 */ 368 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 369 uint64_t nxge_debug_level; 370 371 /* 372 * This list contains the instance structures for the Neptune 373 * devices present in the system. The lock exists to guarantee 374 * mutually exclusive access to the list. 375 */ 376 void *nxge_list = NULL; 377 378 void *nxge_hw_list = NULL; 379 nxge_os_mutex_t nxge_common_lock; 380 381 extern uint64_t npi_debug_level; 382 383 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 384 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 385 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 386 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 387 extern void nxge_fm_init(p_nxge_t, 388 ddi_device_acc_attr_t *, 389 ddi_device_acc_attr_t *, 390 ddi_dma_attr_t *); 391 extern void nxge_fm_fini(p_nxge_t); 392 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 393 394 /* 395 * Count used to maintain the number of buffers being used 396 * by Neptune instances and loaned up to the upper layers. 397 */ 398 uint32_t nxge_mblks_pending = 0; 399 400 /* 401 * Device register access attributes for PIO. 402 */ 403 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 404 DDI_DEVICE_ATTR_V0, 405 DDI_STRUCTURE_LE_ACC, 406 DDI_STRICTORDER_ACC, 407 }; 408 409 /* 410 * Device descriptor access attributes for DMA. 411 */ 412 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 413 DDI_DEVICE_ATTR_V0, 414 DDI_STRUCTURE_LE_ACC, 415 DDI_STRICTORDER_ACC 416 }; 417 418 /* 419 * Device buffer access attributes for DMA. 420 */ 421 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 422 DDI_DEVICE_ATTR_V0, 423 DDI_STRUCTURE_BE_ACC, 424 DDI_STRICTORDER_ACC 425 }; 426 427 ddi_dma_attr_t nxge_desc_dma_attr = { 428 DMA_ATTR_V0, /* version number. */ 429 0, /* low address */ 430 0xffffffffffffffff, /* high address */ 431 0xffffffffffffffff, /* address counter max */ 432 #ifndef NIU_PA_WORKAROUND 433 0x100000, /* alignment */ 434 #else 435 0x2000, 436 #endif 437 0xfc00fc, /* dlim_burstsizes */ 438 0x1, /* minimum transfer size */ 439 0xffffffffffffffff, /* maximum transfer size */ 440 0xffffffffffffffff, /* maximum segment size */ 441 1, /* scatter/gather list length */ 442 (unsigned int) 1, /* granularity */ 443 0 /* attribute flags */ 444 }; 445 446 ddi_dma_attr_t nxge_tx_dma_attr = { 447 DMA_ATTR_V0, /* version number. */ 448 0, /* low address */ 449 0xffffffffffffffff, /* high address */ 450 0xffffffffffffffff, /* address counter max */ 451 #if defined(_BIG_ENDIAN) 452 0x2000, /* alignment */ 453 #else 454 0x1000, /* alignment */ 455 #endif 456 0xfc00fc, /* dlim_burstsizes */ 457 0x1, /* minimum transfer size */ 458 0xffffffffffffffff, /* maximum transfer size */ 459 0xffffffffffffffff, /* maximum segment size */ 460 5, /* scatter/gather list length */ 461 (unsigned int) 1, /* granularity */ 462 0 /* attribute flags */ 463 }; 464 465 ddi_dma_attr_t nxge_rx_dma_attr = { 466 DMA_ATTR_V0, /* version number. */ 467 0, /* low address */ 468 0xffffffffffffffff, /* high address */ 469 0xffffffffffffffff, /* address counter max */ 470 0x2000, /* alignment */ 471 0xfc00fc, /* dlim_burstsizes */ 472 0x1, /* minimum transfer size */ 473 0xffffffffffffffff, /* maximum transfer size */ 474 0xffffffffffffffff, /* maximum segment size */ 475 1, /* scatter/gather list length */ 476 (unsigned int) 1, /* granularity */ 477 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 478 }; 479 480 ddi_dma_lim_t nxge_dma_limits = { 481 (uint_t)0, /* dlim_addr_lo */ 482 (uint_t)0xffffffff, /* dlim_addr_hi */ 483 (uint_t)0xffffffff, /* dlim_cntr_max */ 484 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 485 0x1, /* dlim_minxfer */ 486 1024 /* dlim_speed */ 487 }; 488 489 dma_method_t nxge_force_dma = DVMA; 490 491 /* 492 * dma chunk sizes. 493 * 494 * Try to allocate the largest possible size 495 * so that fewer number of dma chunks would be managed 496 */ 497 #ifdef NIU_PA_WORKAROUND 498 size_t alloc_sizes [] = {0x2000}; 499 #else 500 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 501 0x10000, 0x20000, 0x40000, 0x80000, 502 0x100000, 0x200000, 0x400000, 0x800000, 503 0x1000000, 0x2000000, 0x4000000}; 504 #endif 505 506 /* 507 * Translate "dev_t" to a pointer to the associated "dev_info_t". 508 */ 509 510 extern void nxge_get_environs(nxge_t *); 511 512 static int 513 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 514 { 515 p_nxge_t nxgep = NULL; 516 int instance; 517 int status = DDI_SUCCESS; 518 uint8_t portn; 519 nxge_mmac_t *mmac_info; 520 p_nxge_param_t param_arr; 521 522 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 523 524 /* 525 * Get the device instance since we'll need to setup 526 * or retrieve a soft state for this instance. 527 */ 528 instance = ddi_get_instance(dip); 529 530 switch (cmd) { 531 case DDI_ATTACH: 532 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 533 break; 534 535 case DDI_RESUME: 536 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 537 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 538 if (nxgep == NULL) { 539 status = DDI_FAILURE; 540 break; 541 } 542 if (nxgep->dip != dip) { 543 status = DDI_FAILURE; 544 break; 545 } 546 if (nxgep->suspended == DDI_PM_SUSPEND) { 547 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 548 } else { 549 status = nxge_resume(nxgep); 550 } 551 goto nxge_attach_exit; 552 553 case DDI_PM_RESUME: 554 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 555 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 556 if (nxgep == NULL) { 557 status = DDI_FAILURE; 558 break; 559 } 560 if (nxgep->dip != dip) { 561 status = DDI_FAILURE; 562 break; 563 } 564 status = nxge_resume(nxgep); 565 goto nxge_attach_exit; 566 567 default: 568 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 569 status = DDI_FAILURE; 570 goto nxge_attach_exit; 571 } 572 573 574 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 575 status = DDI_FAILURE; 576 goto nxge_attach_exit; 577 } 578 579 nxgep = ddi_get_soft_state(nxge_list, instance); 580 if (nxgep == NULL) { 581 status = NXGE_ERROR; 582 goto nxge_attach_fail2; 583 } 584 585 nxgep->nxge_magic = NXGE_MAGIC; 586 587 nxgep->drv_state = 0; 588 nxgep->dip = dip; 589 nxgep->instance = instance; 590 nxgep->p_dip = ddi_get_parent(dip); 591 nxgep->nxge_debug_level = nxge_debug_level; 592 npi_debug_level = nxge_debug_level; 593 594 /* Are we a guest running in a Hybrid I/O environment? */ 595 nxge_get_environs(nxgep); 596 597 status = nxge_map_regs(nxgep); 598 599 if (status != NXGE_OK) { 600 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 601 goto nxge_attach_fail3; 602 } 603 604 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, 605 &nxge_dev_desc_dma_acc_attr, 606 &nxge_rx_dma_attr); 607 608 /* Create & initialize the per-Neptune data structure */ 609 /* (even if we're a guest). */ 610 status = nxge_init_common_dev(nxgep); 611 if (status != NXGE_OK) { 612 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 613 "nxge_init_common_dev failed")); 614 goto nxge_attach_fail4; 615 } 616 617 /* 618 * Software workaround: set the replay timer. 619 */ 620 if (nxgep->niu_type != N2_NIU) { 621 nxge_set_pci_replay_timeout(nxgep); 622 } 623 #if defined(sun4v) 624 if (isLDOMguest(nxgep)) { 625 nxge_m_callbacks.mc_tx = nxge_m_tx; 626 } 627 #endif 628 629 #if defined(sun4v) 630 /* This is required by nxge_hio_init(), which follows. */ 631 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 632 goto nxge_attach_fail4; 633 #endif 634 635 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 636 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 637 "nxge_hio_init failed")); 638 goto nxge_attach_fail4; 639 } 640 641 if (nxgep->niu_type == NEPTUNE_2_10GF) { 642 if (nxgep->function_num > 1) { 643 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 644 " function %d. Only functions 0 and 1 are " 645 "supported for this card.", nxgep->function_num)); 646 status = NXGE_ERROR; 647 goto nxge_attach_fail4; 648 } 649 } 650 651 if (isLDOMguest(nxgep)) { 652 /* 653 * Use the function number here. 654 */ 655 nxgep->mac.portnum = nxgep->function_num; 656 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 657 658 /* XXX We'll set the MAC address counts to 1 for now. */ 659 mmac_info = &nxgep->nxge_mmac_info; 660 mmac_info->num_mmac = 1; 661 mmac_info->naddrfree = 1; 662 } else { 663 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 664 nxgep->mac.portnum = portn; 665 if ((portn == 0) || (portn == 1)) 666 nxgep->mac.porttype = PORT_TYPE_XMAC; 667 else 668 nxgep->mac.porttype = PORT_TYPE_BMAC; 669 /* 670 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 671 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 672 * The two types of MACs have different characterizations. 673 */ 674 mmac_info = &nxgep->nxge_mmac_info; 675 if (nxgep->function_num < 2) { 676 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 677 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 678 } else { 679 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 680 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 681 } 682 } 683 /* 684 * Setup the Ndd parameters for the this instance. 685 */ 686 nxge_init_param(nxgep); 687 688 /* 689 * Setup Register Tracing Buffer. 690 */ 691 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 692 693 /* init stats ptr */ 694 nxge_init_statsp(nxgep); 695 696 /* 697 * Copy the vpd info from eeprom to a local data 698 * structure, and then check its validity. 699 */ 700 if (!isLDOMguest(nxgep)) { 701 int *regp; 702 uint_t reglen; 703 int rv; 704 705 nxge_vpd_info_get(nxgep); 706 707 /* Find the NIU config handle. */ 708 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 709 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 710 "reg", ®p, ®len); 711 712 if (rv != DDI_PROP_SUCCESS) { 713 goto nxge_attach_fail5; 714 } 715 /* 716 * The address_hi, that is the first int, in the reg 717 * property consists of config handle, but need to remove 718 * the bits 28-31 which are OBP specific info. 719 */ 720 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 721 ddi_prop_free(regp); 722 } 723 724 if (isLDOMguest(nxgep)) { 725 uchar_t *prop_val; 726 uint_t prop_len; 727 uint32_t max_frame_size; 728 729 extern void nxge_get_logical_props(p_nxge_t); 730 731 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 732 nxgep->mac.portmode = PORT_LOGICAL; 733 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 734 "phy-type", "virtual transceiver"); 735 736 nxgep->nports = 1; 737 nxgep->board_ver = 0; /* XXX What? */ 738 739 /* 740 * local-mac-address property gives us info on which 741 * specific MAC address the Hybrid resource is associated 742 * with. 743 */ 744 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 745 "local-mac-address", &prop_val, 746 &prop_len) != DDI_PROP_SUCCESS) { 747 goto nxge_attach_fail5; 748 } 749 if (prop_len != ETHERADDRL) { 750 ddi_prop_free(prop_val); 751 goto nxge_attach_fail5; 752 } 753 ether_copy(prop_val, nxgep->hio_mac_addr); 754 ddi_prop_free(prop_val); 755 nxge_get_logical_props(nxgep); 756 757 /* 758 * Enable Jumbo property based on the "max-frame-size" 759 * property value. 760 */ 761 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY, 762 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 763 "max-frame-size", NXGE_MTU_DEFAULT_MAX); 764 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) && 765 (max_frame_size <= TX_JUMBO_MTU)) { 766 param_arr = nxgep->param_arr; 767 768 param_arr[param_accept_jumbo].value = 1; 769 nxgep->mac.is_jumbo = B_TRUE; 770 nxgep->mac.maxframesize = (uint16_t)max_frame_size; 771 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 772 NXGE_EHEADER_VLAN_CRC; 773 } 774 } else { 775 status = nxge_xcvr_find(nxgep); 776 777 if (status != NXGE_OK) { 778 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 779 " Couldn't determine card type" 780 " .... exit ")); 781 goto nxge_attach_fail5; 782 } 783 784 status = nxge_get_config_properties(nxgep); 785 786 if (status != NXGE_OK) { 787 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 788 "get_hw create failed")); 789 goto nxge_attach_fail; 790 } 791 } 792 793 /* 794 * Setup the Kstats for the driver. 795 */ 796 nxge_setup_kstats(nxgep); 797 798 if (!isLDOMguest(nxgep)) 799 nxge_setup_param(nxgep); 800 801 status = nxge_setup_system_dma_pages(nxgep); 802 if (status != NXGE_OK) { 803 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 804 goto nxge_attach_fail; 805 } 806 807 nxge_hw_id_init(nxgep); 808 809 if (!isLDOMguest(nxgep)) 810 nxge_hw_init_niu_common(nxgep); 811 812 status = nxge_setup_mutexes(nxgep); 813 if (status != NXGE_OK) { 814 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 815 goto nxge_attach_fail; 816 } 817 818 #if defined(sun4v) 819 if (isLDOMguest(nxgep)) { 820 /* Find our VR & channel sets. */ 821 status = nxge_hio_vr_add(nxgep); 822 if (status != NXGE_OK) { 823 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 824 "nxge_hio_vr_add failed")); 825 (void) hsvc_unregister(&nxgep->niu_hsvc); 826 nxgep->niu_hsvc_available = B_FALSE; 827 } 828 goto nxge_attach_exit; 829 } 830 #endif 831 832 status = nxge_setup_dev(nxgep); 833 if (status != DDI_SUCCESS) { 834 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 835 goto nxge_attach_fail; 836 } 837 838 status = nxge_add_intrs(nxgep); 839 if (status != DDI_SUCCESS) { 840 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 841 goto nxge_attach_fail; 842 } 843 844 /* If a guest, register with vio_net instead. */ 845 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 846 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 847 "unable to register to mac layer (%d)", status)); 848 goto nxge_attach_fail; 849 } 850 851 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 852 853 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 854 "registered to mac (instance %d)", instance)); 855 856 /* nxge_link_monitor calls xcvr.check_link recursively */ 857 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 858 859 goto nxge_attach_exit; 860 861 nxge_attach_fail: 862 nxge_unattach(nxgep); 863 goto nxge_attach_fail1; 864 865 nxge_attach_fail5: 866 /* 867 * Tear down the ndd parameters setup. 868 */ 869 nxge_destroy_param(nxgep); 870 871 /* 872 * Tear down the kstat setup. 873 */ 874 nxge_destroy_kstats(nxgep); 875 876 nxge_attach_fail4: 877 if (nxgep->nxge_hw_p) { 878 nxge_uninit_common_dev(nxgep); 879 nxgep->nxge_hw_p = NULL; 880 } 881 882 nxge_attach_fail3: 883 /* 884 * Unmap the register setup. 885 */ 886 nxge_unmap_regs(nxgep); 887 888 nxge_fm_fini(nxgep); 889 890 nxge_attach_fail2: 891 ddi_soft_state_free(nxge_list, nxgep->instance); 892 893 nxge_attach_fail1: 894 if (status != NXGE_OK) 895 status = (NXGE_ERROR | NXGE_DDI_FAILED); 896 nxgep = NULL; 897 898 nxge_attach_exit: 899 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 900 status)); 901 902 return (status); 903 } 904 905 static int 906 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 907 { 908 int status = DDI_SUCCESS; 909 int instance; 910 p_nxge_t nxgep = NULL; 911 912 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 913 instance = ddi_get_instance(dip); 914 nxgep = ddi_get_soft_state(nxge_list, instance); 915 if (nxgep == NULL) { 916 status = DDI_FAILURE; 917 goto nxge_detach_exit; 918 } 919 920 switch (cmd) { 921 case DDI_DETACH: 922 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 923 break; 924 925 case DDI_PM_SUSPEND: 926 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 927 nxgep->suspended = DDI_PM_SUSPEND; 928 nxge_suspend(nxgep); 929 break; 930 931 case DDI_SUSPEND: 932 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 933 if (nxgep->suspended != DDI_PM_SUSPEND) { 934 nxgep->suspended = DDI_SUSPEND; 935 nxge_suspend(nxgep); 936 } 937 break; 938 939 default: 940 status = DDI_FAILURE; 941 } 942 943 if (cmd != DDI_DETACH) 944 goto nxge_detach_exit; 945 946 /* 947 * Stop the xcvr polling. 948 */ 949 nxgep->suspended = cmd; 950 951 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 952 953 if (isLDOMguest(nxgep)) { 954 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 955 nxge_m_stop((void *)nxgep); 956 nxge_hio_unregister(nxgep); 957 } else if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 958 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 959 "<== nxge_detach status = 0x%08X", status)); 960 return (DDI_FAILURE); 961 } 962 963 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 964 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 965 966 nxge_unattach(nxgep); 967 nxgep = NULL; 968 969 nxge_detach_exit: 970 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 971 status)); 972 973 return (status); 974 } 975 976 static void 977 nxge_unattach(p_nxge_t nxgep) 978 { 979 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 980 981 if (nxgep == NULL || nxgep->dev_regs == NULL) { 982 return; 983 } 984 985 nxgep->nxge_magic = 0; 986 987 if (nxgep->nxge_timerid) { 988 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 989 nxgep->nxge_timerid = 0; 990 } 991 992 /* 993 * If this flag is set, it will affect the Neptune 994 * only. 995 */ 996 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { 997 nxge_niu_peu_reset(nxgep); 998 } 999 1000 #if defined(sun4v) 1001 if (isLDOMguest(nxgep)) { 1002 (void) nxge_hio_vr_release(nxgep); 1003 } 1004 #endif 1005 1006 if (nxgep->nxge_hw_p) { 1007 nxge_uninit_common_dev(nxgep); 1008 nxgep->nxge_hw_p = NULL; 1009 } 1010 1011 #if defined(sun4v) 1012 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 1013 (void) hsvc_unregister(&nxgep->niu_hsvc); 1014 nxgep->niu_hsvc_available = B_FALSE; 1015 } 1016 #endif 1017 /* 1018 * Stop any further interrupts. 1019 */ 1020 nxge_remove_intrs(nxgep); 1021 1022 /* 1023 * Stop the device and free resources. 1024 */ 1025 if (!isLDOMguest(nxgep)) { 1026 nxge_destroy_dev(nxgep); 1027 } 1028 1029 /* 1030 * Tear down the ndd parameters setup. 1031 */ 1032 nxge_destroy_param(nxgep); 1033 1034 /* 1035 * Tear down the kstat setup. 1036 */ 1037 nxge_destroy_kstats(nxgep); 1038 1039 /* 1040 * Destroy all mutexes. 1041 */ 1042 nxge_destroy_mutexes(nxgep); 1043 1044 /* 1045 * Remove the list of ndd parameters which 1046 * were setup during attach. 1047 */ 1048 if (nxgep->dip) { 1049 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 1050 " nxge_unattach: remove all properties")); 1051 1052 (void) ddi_prop_remove_all(nxgep->dip); 1053 } 1054 1055 #if NXGE_PROPERTY 1056 nxge_remove_hard_properties(nxgep); 1057 #endif 1058 1059 /* 1060 * Unmap the register setup. 1061 */ 1062 nxge_unmap_regs(nxgep); 1063 1064 nxge_fm_fini(nxgep); 1065 1066 ddi_soft_state_free(nxge_list, nxgep->instance); 1067 1068 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1069 } 1070 1071 #if defined(sun4v) 1072 int 1073 nxge_hsvc_register(nxge_t *nxgep) 1074 { 1075 nxge_status_t status; 1076 1077 if (nxgep->niu_type == N2_NIU) { 1078 nxgep->niu_hsvc_available = B_FALSE; 1079 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 1080 if ((status = hsvc_register(&nxgep->niu_hsvc, 1081 &nxgep->niu_min_ver)) != 0) { 1082 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1083 "nxge_attach: %s: cannot negotiate " 1084 "hypervisor services revision %d group: 0x%lx " 1085 "major: 0x%lx minor: 0x%lx errno: %d", 1086 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1087 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1088 niu_hsvc.hsvc_minor, status)); 1089 return (DDI_FAILURE); 1090 } 1091 nxgep->niu_hsvc_available = B_TRUE; 1092 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1093 "NIU Hypervisor service enabled")); 1094 } 1095 1096 return (DDI_SUCCESS); 1097 } 1098 #endif 1099 1100 static char n2_siu_name[] = "niu"; 1101 1102 static nxge_status_t 1103 nxge_map_regs(p_nxge_t nxgep) 1104 { 1105 int ddi_status = DDI_SUCCESS; 1106 p_dev_regs_t dev_regs; 1107 char buf[MAXPATHLEN + 1]; 1108 char *devname; 1109 #ifdef NXGE_DEBUG 1110 char *sysname; 1111 #endif 1112 off_t regsize; 1113 nxge_status_t status = NXGE_OK; 1114 #if !defined(_BIG_ENDIAN) 1115 off_t pci_offset; 1116 uint16_t pcie_devctl; 1117 #endif 1118 1119 if (isLDOMguest(nxgep)) { 1120 return (nxge_guest_regs_map(nxgep)); 1121 } 1122 1123 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1124 nxgep->dev_regs = NULL; 1125 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1126 dev_regs->nxge_regh = NULL; 1127 dev_regs->nxge_pciregh = NULL; 1128 dev_regs->nxge_msix_regh = NULL; 1129 dev_regs->nxge_vir_regh = NULL; 1130 dev_regs->nxge_vir2_regh = NULL; 1131 nxgep->niu_type = NIU_TYPE_NONE; 1132 1133 devname = ddi_pathname(nxgep->dip, buf); 1134 ASSERT(strlen(devname) > 0); 1135 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1136 "nxge_map_regs: pathname devname %s", devname)); 1137 1138 /* 1139 * The driver is running on a N2-NIU system if devname is something 1140 * like "/niu@80/network@0" 1141 */ 1142 if (strstr(devname, n2_siu_name)) { 1143 /* N2/NIU */ 1144 nxgep->niu_type = N2_NIU; 1145 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1146 "nxge_map_regs: N2/NIU devname %s", devname)); 1147 /* get function number */ 1148 nxgep->function_num = 1149 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1150 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1151 "nxge_map_regs: N2/NIU function number %d", 1152 nxgep->function_num)); 1153 } else { 1154 int *prop_val; 1155 uint_t prop_len; 1156 uint8_t func_num; 1157 1158 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1159 0, "reg", 1160 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1161 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1162 "Reg property not found")); 1163 ddi_status = DDI_FAILURE; 1164 goto nxge_map_regs_fail0; 1165 1166 } else { 1167 func_num = (prop_val[0] >> 8) & 0x7; 1168 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1169 "Reg property found: fun # %d", 1170 func_num)); 1171 nxgep->function_num = func_num; 1172 if (isLDOMguest(nxgep)) { 1173 nxgep->function_num /= 2; 1174 return (NXGE_OK); 1175 } 1176 ddi_prop_free(prop_val); 1177 } 1178 } 1179 1180 switch (nxgep->niu_type) { 1181 default: 1182 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1183 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1184 "nxge_map_regs: pci config size 0x%x", regsize)); 1185 1186 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1187 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1188 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1189 if (ddi_status != DDI_SUCCESS) { 1190 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1191 "ddi_map_regs, nxge bus config regs failed")); 1192 goto nxge_map_regs_fail0; 1193 } 1194 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1195 "nxge_map_reg: PCI config addr 0x%0llx " 1196 " handle 0x%0llx", dev_regs->nxge_pciregp, 1197 dev_regs->nxge_pciregh)); 1198 /* 1199 * IMP IMP 1200 * workaround for bit swapping bug in HW 1201 * which ends up in no-snoop = yes 1202 * resulting, in DMA not synched properly 1203 */ 1204 #if !defined(_BIG_ENDIAN) 1205 /* workarounds for x86 systems */ 1206 pci_offset = 0x80 + PCIE_DEVCTL; 1207 pcie_devctl = 0x0; 1208 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 1209 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1210 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1211 pcie_devctl); 1212 #endif 1213 1214 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1215 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1216 "nxge_map_regs: pio size 0x%x", regsize)); 1217 /* set up the device mapped register */ 1218 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1219 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1220 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1221 if (ddi_status != DDI_SUCCESS) { 1222 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1223 "ddi_map_regs for Neptune global reg failed")); 1224 goto nxge_map_regs_fail1; 1225 } 1226 1227 /* set up the msi/msi-x mapped register */ 1228 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1229 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1230 "nxge_map_regs: msix size 0x%x", regsize)); 1231 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1232 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1233 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1234 if (ddi_status != DDI_SUCCESS) { 1235 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1236 "ddi_map_regs for msi reg failed")); 1237 goto nxge_map_regs_fail2; 1238 } 1239 1240 /* set up the vio region mapped register */ 1241 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1242 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1243 "nxge_map_regs: vio size 0x%x", regsize)); 1244 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1245 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1246 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1247 1248 if (ddi_status != DDI_SUCCESS) { 1249 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1250 "ddi_map_regs for nxge vio reg failed")); 1251 goto nxge_map_regs_fail3; 1252 } 1253 nxgep->dev_regs = dev_regs; 1254 1255 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1256 NPI_PCI_ADD_HANDLE_SET(nxgep, 1257 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1258 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1259 NPI_MSI_ADD_HANDLE_SET(nxgep, 1260 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1261 1262 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1263 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1264 1265 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1266 NPI_REG_ADD_HANDLE_SET(nxgep, 1267 (npi_reg_ptr_t)dev_regs->nxge_regp); 1268 1269 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1270 NPI_VREG_ADD_HANDLE_SET(nxgep, 1271 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1272 1273 break; 1274 1275 case N2_NIU: 1276 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1277 /* 1278 * Set up the device mapped register (FWARC 2006/556) 1279 * (changed back to 1: reg starts at 1!) 1280 */ 1281 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1282 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1283 "nxge_map_regs: dev size 0x%x", regsize)); 1284 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1285 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1286 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1287 1288 if (ddi_status != DDI_SUCCESS) { 1289 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1290 "ddi_map_regs for N2/NIU, global reg failed ")); 1291 goto nxge_map_regs_fail1; 1292 } 1293 1294 /* set up the first vio region mapped register */ 1295 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1296 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1297 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1298 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1299 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1300 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1301 1302 if (ddi_status != DDI_SUCCESS) { 1303 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1304 "ddi_map_regs for nxge vio reg failed")); 1305 goto nxge_map_regs_fail2; 1306 } 1307 /* set up the second vio region mapped register */ 1308 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1309 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1310 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1311 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1312 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1313 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1314 1315 if (ddi_status != DDI_SUCCESS) { 1316 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1317 "ddi_map_regs for nxge vio2 reg failed")); 1318 goto nxge_map_regs_fail3; 1319 } 1320 nxgep->dev_regs = dev_regs; 1321 1322 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1323 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1324 1325 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1326 NPI_REG_ADD_HANDLE_SET(nxgep, 1327 (npi_reg_ptr_t)dev_regs->nxge_regp); 1328 1329 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1330 NPI_VREG_ADD_HANDLE_SET(nxgep, 1331 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1332 1333 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1334 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1335 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1336 1337 break; 1338 } 1339 1340 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1341 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1342 1343 goto nxge_map_regs_exit; 1344 nxge_map_regs_fail3: 1345 if (dev_regs->nxge_msix_regh) { 1346 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1347 } 1348 if (dev_regs->nxge_vir_regh) { 1349 ddi_regs_map_free(&dev_regs->nxge_regh); 1350 } 1351 nxge_map_regs_fail2: 1352 if (dev_regs->nxge_regh) { 1353 ddi_regs_map_free(&dev_regs->nxge_regh); 1354 } 1355 nxge_map_regs_fail1: 1356 if (dev_regs->nxge_pciregh) { 1357 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1358 } 1359 nxge_map_regs_fail0: 1360 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1361 kmem_free(dev_regs, sizeof (dev_regs_t)); 1362 1363 nxge_map_regs_exit: 1364 if (ddi_status != DDI_SUCCESS) 1365 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1366 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1367 return (status); 1368 } 1369 1370 static void 1371 nxge_unmap_regs(p_nxge_t nxgep) 1372 { 1373 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1374 1375 if (isLDOMguest(nxgep)) { 1376 nxge_guest_regs_map_free(nxgep); 1377 return; 1378 } 1379 1380 if (nxgep->dev_regs) { 1381 if (nxgep->dev_regs->nxge_pciregh) { 1382 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1383 "==> nxge_unmap_regs: bus")); 1384 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1385 nxgep->dev_regs->nxge_pciregh = NULL; 1386 } 1387 if (nxgep->dev_regs->nxge_regh) { 1388 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1389 "==> nxge_unmap_regs: device registers")); 1390 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1391 nxgep->dev_regs->nxge_regh = NULL; 1392 } 1393 if (nxgep->dev_regs->nxge_msix_regh) { 1394 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1395 "==> nxge_unmap_regs: device interrupts")); 1396 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1397 nxgep->dev_regs->nxge_msix_regh = NULL; 1398 } 1399 if (nxgep->dev_regs->nxge_vir_regh) { 1400 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1401 "==> nxge_unmap_regs: vio region")); 1402 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1403 nxgep->dev_regs->nxge_vir_regh = NULL; 1404 } 1405 if (nxgep->dev_regs->nxge_vir2_regh) { 1406 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1407 "==> nxge_unmap_regs: vio2 region")); 1408 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1409 nxgep->dev_regs->nxge_vir2_regh = NULL; 1410 } 1411 1412 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1413 nxgep->dev_regs = NULL; 1414 } 1415 1416 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1417 } 1418 1419 static nxge_status_t 1420 nxge_setup_mutexes(p_nxge_t nxgep) 1421 { 1422 int ddi_status = DDI_SUCCESS; 1423 nxge_status_t status = NXGE_OK; 1424 nxge_classify_t *classify_ptr; 1425 int partition; 1426 1427 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1428 1429 /* 1430 * Get the interrupt cookie so the mutexes can be 1431 * Initialized. 1432 */ 1433 if (isLDOMguest(nxgep)) { 1434 nxgep->interrupt_cookie = 0; 1435 } else { 1436 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1437 &nxgep->interrupt_cookie); 1438 1439 if (ddi_status != DDI_SUCCESS) { 1440 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1441 "<== nxge_setup_mutexes: failed 0x%x", 1442 ddi_status)); 1443 goto nxge_setup_mutexes_exit; 1444 } 1445 } 1446 1447 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1448 MUTEX_INIT(&nxgep->poll_lock, NULL, 1449 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1450 1451 /* 1452 * Initialize mutexes for this device. 1453 */ 1454 MUTEX_INIT(nxgep->genlock, NULL, 1455 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1456 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1457 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1458 MUTEX_INIT(&nxgep->mif_lock, NULL, 1459 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1460 MUTEX_INIT(&nxgep->group_lock, NULL, 1461 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1462 RW_INIT(&nxgep->filter_lock, NULL, 1463 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1464 1465 classify_ptr = &nxgep->classifier; 1466 /* 1467 * FFLP Mutexes are never used in interrupt context 1468 * as fflp operation can take very long time to 1469 * complete and hence not suitable to invoke from interrupt 1470 * handlers. 1471 */ 1472 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1473 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1474 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1475 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1476 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1477 for (partition = 0; partition < MAX_PARTITION; partition++) { 1478 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1479 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1480 } 1481 } 1482 1483 nxge_setup_mutexes_exit: 1484 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1485 "<== nxge_setup_mutexes status = %x", status)); 1486 1487 if (ddi_status != DDI_SUCCESS) 1488 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1489 1490 return (status); 1491 } 1492 1493 static void 1494 nxge_destroy_mutexes(p_nxge_t nxgep) 1495 { 1496 int partition; 1497 nxge_classify_t *classify_ptr; 1498 1499 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1500 RW_DESTROY(&nxgep->filter_lock); 1501 MUTEX_DESTROY(&nxgep->group_lock); 1502 MUTEX_DESTROY(&nxgep->mif_lock); 1503 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1504 MUTEX_DESTROY(nxgep->genlock); 1505 1506 classify_ptr = &nxgep->classifier; 1507 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1508 1509 /* Destroy all polling resources. */ 1510 MUTEX_DESTROY(&nxgep->poll_lock); 1511 cv_destroy(&nxgep->poll_cv); 1512 1513 /* free data structures, based on HW type */ 1514 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1515 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1516 for (partition = 0; partition < MAX_PARTITION; partition++) { 1517 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1518 } 1519 } 1520 1521 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1522 } 1523 1524 nxge_status_t 1525 nxge_init(p_nxge_t nxgep) 1526 { 1527 nxge_status_t status = NXGE_OK; 1528 1529 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1530 1531 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1532 return (status); 1533 } 1534 1535 /* 1536 * Allocate system memory for the receive/transmit buffer blocks 1537 * and receive/transmit descriptor rings. 1538 */ 1539 status = nxge_alloc_mem_pool(nxgep); 1540 if (status != NXGE_OK) { 1541 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1542 goto nxge_init_fail1; 1543 } 1544 1545 if (!isLDOMguest(nxgep)) { 1546 /* 1547 * Initialize and enable the TXC registers. 1548 * (Globally enable the Tx controller, 1549 * enable the port, configure the dma channel bitmap, 1550 * configure the max burst size). 1551 */ 1552 status = nxge_txc_init(nxgep); 1553 if (status != NXGE_OK) { 1554 NXGE_ERROR_MSG((nxgep, 1555 NXGE_ERR_CTL, "init txc failed\n")); 1556 goto nxge_init_fail2; 1557 } 1558 } 1559 1560 /* 1561 * Initialize and enable TXDMA channels. 1562 */ 1563 status = nxge_init_txdma_channels(nxgep); 1564 if (status != NXGE_OK) { 1565 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1566 goto nxge_init_fail3; 1567 } 1568 1569 /* 1570 * Initialize and enable RXDMA channels. 1571 */ 1572 status = nxge_init_rxdma_channels(nxgep); 1573 if (status != NXGE_OK) { 1574 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1575 goto nxge_init_fail4; 1576 } 1577 1578 /* 1579 * The guest domain is now done. 1580 */ 1581 if (isLDOMguest(nxgep)) { 1582 nxgep->drv_state |= STATE_HW_INITIALIZED; 1583 goto nxge_init_exit; 1584 } 1585 1586 /* 1587 * Initialize TCAM and FCRAM (Neptune). 1588 */ 1589 status = nxge_classify_init(nxgep); 1590 if (status != NXGE_OK) { 1591 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1592 goto nxge_init_fail5; 1593 } 1594 1595 /* 1596 * Initialize ZCP 1597 */ 1598 status = nxge_zcp_init(nxgep); 1599 if (status != NXGE_OK) { 1600 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1601 goto nxge_init_fail5; 1602 } 1603 1604 /* 1605 * Initialize IPP. 1606 */ 1607 status = nxge_ipp_init(nxgep); 1608 if (status != NXGE_OK) { 1609 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1610 goto nxge_init_fail5; 1611 } 1612 1613 /* 1614 * Initialize the MAC block. 1615 */ 1616 status = nxge_mac_init(nxgep); 1617 if (status != NXGE_OK) { 1618 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1619 goto nxge_init_fail5; 1620 } 1621 1622 nxge_intrs_enable(nxgep); /* XXX What changes do I need to make here? */ 1623 1624 /* 1625 * Enable hardware interrupts. 1626 */ 1627 nxge_intr_hw_enable(nxgep); 1628 nxgep->drv_state |= STATE_HW_INITIALIZED; 1629 1630 goto nxge_init_exit; 1631 1632 nxge_init_fail5: 1633 nxge_uninit_rxdma_channels(nxgep); 1634 nxge_init_fail4: 1635 nxge_uninit_txdma_channels(nxgep); 1636 nxge_init_fail3: 1637 if (!isLDOMguest(nxgep)) { 1638 (void) nxge_txc_uninit(nxgep); 1639 } 1640 nxge_init_fail2: 1641 nxge_free_mem_pool(nxgep); 1642 nxge_init_fail1: 1643 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1644 "<== nxge_init status (failed) = 0x%08x", status)); 1645 return (status); 1646 1647 nxge_init_exit: 1648 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1649 status)); 1650 return (status); 1651 } 1652 1653 1654 timeout_id_t 1655 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1656 { 1657 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1658 return (timeout(func, (caddr_t)nxgep, 1659 drv_usectohz(1000 * msec))); 1660 } 1661 return (NULL); 1662 } 1663 1664 /*ARGSUSED*/ 1665 void 1666 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1667 { 1668 if (timerid) { 1669 (void) untimeout(timerid); 1670 } 1671 } 1672 1673 void 1674 nxge_uninit(p_nxge_t nxgep) 1675 { 1676 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1677 1678 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1679 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1680 "==> nxge_uninit: not initialized")); 1681 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1682 "<== nxge_uninit")); 1683 return; 1684 } 1685 1686 /* stop timer */ 1687 if (nxgep->nxge_timerid) { 1688 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1689 nxgep->nxge_timerid = 0; 1690 } 1691 1692 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1693 (void) nxge_intr_hw_disable(nxgep); 1694 1695 /* 1696 * Reset the receive MAC side. 1697 */ 1698 (void) nxge_rx_mac_disable(nxgep); 1699 1700 /* Disable and soft reset the IPP */ 1701 if (!isLDOMguest(nxgep)) 1702 (void) nxge_ipp_disable(nxgep); 1703 1704 /* Free classification resources */ 1705 (void) nxge_classify_uninit(nxgep); 1706 1707 /* 1708 * Reset the transmit/receive DMA side. 1709 */ 1710 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1711 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1712 1713 nxge_uninit_txdma_channels(nxgep); 1714 nxge_uninit_rxdma_channels(nxgep); 1715 1716 /* 1717 * Reset the transmit MAC side. 1718 */ 1719 (void) nxge_tx_mac_disable(nxgep); 1720 1721 nxge_free_mem_pool(nxgep); 1722 1723 /* 1724 * Start the timer if the reset flag is not set. 1725 * If this reset flag is set, the link monitor 1726 * will not be started in order to stop furthur bus 1727 * activities coming from this interface. 1728 * The driver will start the monitor function 1729 * if the interface was initialized again later. 1730 */ 1731 if (!nxge_peu_reset_enable) { 1732 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1733 } 1734 1735 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1736 1737 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1738 "nxge_mblks_pending %d", nxge_mblks_pending)); 1739 } 1740 1741 void 1742 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1743 { 1744 #if defined(__i386) 1745 size_t reg; 1746 #else 1747 uint64_t reg; 1748 #endif 1749 uint64_t regdata; 1750 int i, retry; 1751 1752 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1753 regdata = 0; 1754 retry = 1; 1755 1756 for (i = 0; i < retry; i++) { 1757 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1758 } 1759 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1760 } 1761 1762 void 1763 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1764 { 1765 #if defined(__i386) 1766 size_t reg; 1767 #else 1768 uint64_t reg; 1769 #endif 1770 uint64_t buf[2]; 1771 1772 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1773 #if defined(__i386) 1774 reg = (size_t)buf[0]; 1775 #else 1776 reg = buf[0]; 1777 #endif 1778 1779 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1780 } 1781 1782 1783 nxge_os_mutex_t nxgedebuglock; 1784 int nxge_debug_init = 0; 1785 1786 /*ARGSUSED*/ 1787 /*VARARGS*/ 1788 void 1789 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1790 { 1791 char msg_buffer[1048]; 1792 char prefix_buffer[32]; 1793 int instance; 1794 uint64_t debug_level; 1795 int cmn_level = CE_CONT; 1796 va_list ap; 1797 1798 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1799 /* In case a developer has changed nxge_debug_level. */ 1800 if (nxgep->nxge_debug_level != nxge_debug_level) 1801 nxgep->nxge_debug_level = nxge_debug_level; 1802 } 1803 1804 debug_level = (nxgep == NULL) ? nxge_debug_level : 1805 nxgep->nxge_debug_level; 1806 1807 if ((level & debug_level) || 1808 (level == NXGE_NOTE) || 1809 (level == NXGE_ERR_CTL)) { 1810 /* do the msg processing */ 1811 if (nxge_debug_init == 0) { 1812 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1813 nxge_debug_init = 1; 1814 } 1815 1816 MUTEX_ENTER(&nxgedebuglock); 1817 1818 if ((level & NXGE_NOTE)) { 1819 cmn_level = CE_NOTE; 1820 } 1821 1822 if (level & NXGE_ERR_CTL) { 1823 cmn_level = CE_WARN; 1824 } 1825 1826 va_start(ap, fmt); 1827 (void) vsprintf(msg_buffer, fmt, ap); 1828 va_end(ap); 1829 if (nxgep == NULL) { 1830 instance = -1; 1831 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1832 } else { 1833 instance = nxgep->instance; 1834 (void) sprintf(prefix_buffer, 1835 "%s%d :", "nxge", instance); 1836 } 1837 1838 MUTEX_EXIT(&nxgedebuglock); 1839 cmn_err(cmn_level, "!%s %s\n", 1840 prefix_buffer, msg_buffer); 1841 1842 } 1843 } 1844 1845 char * 1846 nxge_dump_packet(char *addr, int size) 1847 { 1848 uchar_t *ap = (uchar_t *)addr; 1849 int i; 1850 static char etherbuf[1024]; 1851 char *cp = etherbuf; 1852 char digits[] = "0123456789abcdef"; 1853 1854 if (!size) 1855 size = 60; 1856 1857 if (size > MAX_DUMP_SZ) { 1858 /* Dump the leading bytes */ 1859 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1860 if (*ap > 0x0f) 1861 *cp++ = digits[*ap >> 4]; 1862 *cp++ = digits[*ap++ & 0xf]; 1863 *cp++ = ':'; 1864 } 1865 for (i = 0; i < 20; i++) 1866 *cp++ = '.'; 1867 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1868 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1869 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1870 if (*ap > 0x0f) 1871 *cp++ = digits[*ap >> 4]; 1872 *cp++ = digits[*ap++ & 0xf]; 1873 *cp++ = ':'; 1874 } 1875 } else { 1876 for (i = 0; i < size; i++) { 1877 if (*ap > 0x0f) 1878 *cp++ = digits[*ap >> 4]; 1879 *cp++ = digits[*ap++ & 0xf]; 1880 *cp++ = ':'; 1881 } 1882 } 1883 *--cp = 0; 1884 return (etherbuf); 1885 } 1886 1887 #ifdef NXGE_DEBUG 1888 static void 1889 nxge_test_map_regs(p_nxge_t nxgep) 1890 { 1891 ddi_acc_handle_t cfg_handle; 1892 p_pci_cfg_t cfg_ptr; 1893 ddi_acc_handle_t dev_handle; 1894 char *dev_ptr; 1895 ddi_acc_handle_t pci_config_handle; 1896 uint32_t regval; 1897 int i; 1898 1899 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1900 1901 dev_handle = nxgep->dev_regs->nxge_regh; 1902 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1903 1904 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1905 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1906 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1907 1908 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1909 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1910 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1911 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1912 &cfg_ptr->vendorid)); 1913 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1914 "\tvendorid 0x%x devid 0x%x", 1915 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1916 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1917 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1918 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1919 "bar1c 0x%x", 1920 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1921 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1922 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1923 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1924 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1925 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1926 "base 28 0x%x bar2c 0x%x\n", 1927 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1928 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1929 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1930 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1931 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1932 "\nNeptune PCI BAR: base30 0x%x\n", 1933 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1934 1935 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1936 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1937 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1938 "first 0x%llx second 0x%llx third 0x%llx " 1939 "last 0x%llx ", 1940 NXGE_PIO_READ64(dev_handle, 1941 (uint64_t *)(dev_ptr + 0), 0), 1942 NXGE_PIO_READ64(dev_handle, 1943 (uint64_t *)(dev_ptr + 8), 0), 1944 NXGE_PIO_READ64(dev_handle, 1945 (uint64_t *)(dev_ptr + 16), 0), 1946 NXGE_PIO_READ64(cfg_handle, 1947 (uint64_t *)(dev_ptr + 24), 0))); 1948 } 1949 } 1950 1951 #endif 1952 1953 static void 1954 nxge_suspend(p_nxge_t nxgep) 1955 { 1956 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1957 1958 nxge_intrs_disable(nxgep); 1959 nxge_destroy_dev(nxgep); 1960 1961 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1962 } 1963 1964 static nxge_status_t 1965 nxge_resume(p_nxge_t nxgep) 1966 { 1967 nxge_status_t status = NXGE_OK; 1968 1969 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1970 1971 nxgep->suspended = DDI_RESUME; 1972 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1973 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1974 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1975 (void) nxge_rx_mac_enable(nxgep); 1976 (void) nxge_tx_mac_enable(nxgep); 1977 nxge_intrs_enable(nxgep); 1978 nxgep->suspended = 0; 1979 1980 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1981 "<== nxge_resume status = 0x%x", status)); 1982 return (status); 1983 } 1984 1985 static nxge_status_t 1986 nxge_setup_dev(p_nxge_t nxgep) 1987 { 1988 nxge_status_t status = NXGE_OK; 1989 1990 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1991 nxgep->mac.portnum)); 1992 1993 status = nxge_link_init(nxgep); 1994 1995 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1996 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1997 "port%d Bad register acc handle", nxgep->mac.portnum)); 1998 status = NXGE_ERROR; 1999 } 2000 2001 if (status != NXGE_OK) { 2002 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2003 " nxge_setup_dev status " 2004 "(xcvr init 0x%08x)", status)); 2005 goto nxge_setup_dev_exit; 2006 } 2007 2008 nxge_setup_dev_exit: 2009 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2010 "<== nxge_setup_dev port %d status = 0x%08x", 2011 nxgep->mac.portnum, status)); 2012 2013 return (status); 2014 } 2015 2016 static void 2017 nxge_destroy_dev(p_nxge_t nxgep) 2018 { 2019 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 2020 2021 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 2022 2023 (void) nxge_hw_stop(nxgep); 2024 2025 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 2026 } 2027 2028 static nxge_status_t 2029 nxge_setup_system_dma_pages(p_nxge_t nxgep) 2030 { 2031 int ddi_status = DDI_SUCCESS; 2032 uint_t count; 2033 ddi_dma_cookie_t cookie; 2034 uint_t iommu_pagesize; 2035 nxge_status_t status = NXGE_OK; 2036 2037 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 2038 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 2039 if (nxgep->niu_type != N2_NIU) { 2040 iommu_pagesize = dvma_pagesize(nxgep->dip); 2041 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2042 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2043 " default_block_size %d iommu_pagesize %d", 2044 nxgep->sys_page_sz, 2045 ddi_ptob(nxgep->dip, (ulong_t)1), 2046 nxgep->rx_default_block_size, 2047 iommu_pagesize)); 2048 2049 if (iommu_pagesize != 0) { 2050 if (nxgep->sys_page_sz == iommu_pagesize) { 2051 if (iommu_pagesize > 0x4000) 2052 nxgep->sys_page_sz = 0x4000; 2053 } else { 2054 if (nxgep->sys_page_sz > iommu_pagesize) 2055 nxgep->sys_page_sz = iommu_pagesize; 2056 } 2057 } 2058 } 2059 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2060 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2061 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2062 "default_block_size %d page mask %d", 2063 nxgep->sys_page_sz, 2064 ddi_ptob(nxgep->dip, (ulong_t)1), 2065 nxgep->rx_default_block_size, 2066 nxgep->sys_page_mask)); 2067 2068 2069 switch (nxgep->sys_page_sz) { 2070 default: 2071 nxgep->sys_page_sz = 0x1000; 2072 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2073 nxgep->rx_default_block_size = 0x1000; 2074 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2075 break; 2076 case 0x1000: 2077 nxgep->rx_default_block_size = 0x1000; 2078 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2079 break; 2080 case 0x2000: 2081 nxgep->rx_default_block_size = 0x2000; 2082 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2083 break; 2084 case 0x4000: 2085 nxgep->rx_default_block_size = 0x4000; 2086 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2087 break; 2088 case 0x8000: 2089 nxgep->rx_default_block_size = 0x8000; 2090 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2091 break; 2092 } 2093 2094 #ifndef USE_RX_BIG_BUF 2095 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2096 #else 2097 nxgep->rx_default_block_size = 0x2000; 2098 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2099 #endif 2100 /* 2101 * Get the system DMA burst size. 2102 */ 2103 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2104 DDI_DMA_DONTWAIT, 0, 2105 &nxgep->dmasparehandle); 2106 if (ddi_status != DDI_SUCCESS) { 2107 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2108 "ddi_dma_alloc_handle: failed " 2109 " status 0x%x", ddi_status)); 2110 goto nxge_get_soft_properties_exit; 2111 } 2112 2113 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2114 (caddr_t)nxgep->dmasparehandle, 2115 sizeof (nxgep->dmasparehandle), 2116 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2117 DDI_DMA_DONTWAIT, 0, 2118 &cookie, &count); 2119 if (ddi_status != DDI_DMA_MAPPED) { 2120 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2121 "Binding spare handle to find system" 2122 " burstsize failed.")); 2123 ddi_status = DDI_FAILURE; 2124 goto nxge_get_soft_properties_fail1; 2125 } 2126 2127 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2128 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2129 2130 nxge_get_soft_properties_fail1: 2131 ddi_dma_free_handle(&nxgep->dmasparehandle); 2132 2133 nxge_get_soft_properties_exit: 2134 2135 if (ddi_status != DDI_SUCCESS) 2136 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2137 2138 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2139 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2140 return (status); 2141 } 2142 2143 static nxge_status_t 2144 nxge_alloc_mem_pool(p_nxge_t nxgep) 2145 { 2146 nxge_status_t status = NXGE_OK; 2147 2148 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2149 2150 status = nxge_alloc_rx_mem_pool(nxgep); 2151 if (status != NXGE_OK) { 2152 return (NXGE_ERROR); 2153 } 2154 2155 status = nxge_alloc_tx_mem_pool(nxgep); 2156 if (status != NXGE_OK) { 2157 nxge_free_rx_mem_pool(nxgep); 2158 return (NXGE_ERROR); 2159 } 2160 2161 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2162 return (NXGE_OK); 2163 } 2164 2165 static void 2166 nxge_free_mem_pool(p_nxge_t nxgep) 2167 { 2168 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2169 2170 nxge_free_rx_mem_pool(nxgep); 2171 nxge_free_tx_mem_pool(nxgep); 2172 2173 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2174 } 2175 2176 nxge_status_t 2177 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2178 { 2179 uint32_t rdc_max; 2180 p_nxge_dma_pt_cfg_t p_all_cfgp; 2181 p_nxge_hw_pt_cfg_t p_cfgp; 2182 p_nxge_dma_pool_t dma_poolp; 2183 p_nxge_dma_common_t *dma_buf_p; 2184 p_nxge_dma_pool_t dma_cntl_poolp; 2185 p_nxge_dma_common_t *dma_cntl_p; 2186 uint32_t *num_chunks; /* per dma */ 2187 nxge_status_t status = NXGE_OK; 2188 2189 uint32_t nxge_port_rbr_size; 2190 uint32_t nxge_port_rbr_spare_size; 2191 uint32_t nxge_port_rcr_size; 2192 uint32_t rx_cntl_alloc_size; 2193 2194 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2195 2196 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2197 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2198 rdc_max = NXGE_MAX_RDCS; 2199 2200 /* 2201 * Allocate memory for the common DMA data structures. 2202 */ 2203 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2204 KM_SLEEP); 2205 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2206 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2207 2208 dma_cntl_poolp = (p_nxge_dma_pool_t) 2209 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2210 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2211 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2212 2213 num_chunks = (uint32_t *)KMEM_ZALLOC( 2214 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2215 2216 /* 2217 * Assume that each DMA channel will be configured with 2218 * the default block size. 2219 * rbr block counts are modulo the batch count (16). 2220 */ 2221 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2222 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2223 2224 if (!nxge_port_rbr_size) { 2225 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2226 } 2227 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2228 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2229 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2230 } 2231 2232 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2233 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2234 2235 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2236 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2237 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2238 } 2239 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2240 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2241 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2242 "set to default %d", 2243 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2244 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2245 } 2246 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2247 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2248 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2249 "set to default %d", 2250 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2251 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2252 } 2253 2254 /* 2255 * N2/NIU has limitation on the descriptor sizes (contiguous 2256 * memory allocation on data buffers to 4M (contig_mem_alloc) 2257 * and little endian for control buffers (must use the ddi/dki mem alloc 2258 * function). 2259 */ 2260 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2261 if (nxgep->niu_type == N2_NIU) { 2262 nxge_port_rbr_spare_size = 0; 2263 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2264 (!ISP2(nxge_port_rbr_size))) { 2265 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2266 } 2267 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2268 (!ISP2(nxge_port_rcr_size))) { 2269 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2270 } 2271 } 2272 #endif 2273 2274 /* 2275 * Addresses of receive block ring, receive completion ring and the 2276 * mailbox must be all cache-aligned (64 bytes). 2277 */ 2278 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2279 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2280 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2281 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2282 2283 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2284 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2285 "nxge_port_rcr_size = %d " 2286 "rx_cntl_alloc_size = %d", 2287 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2288 nxge_port_rcr_size, 2289 rx_cntl_alloc_size)); 2290 2291 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2292 if (nxgep->niu_type == N2_NIU) { 2293 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2294 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2295 2296 if (!ISP2(rx_buf_alloc_size)) { 2297 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2298 "==> nxge_alloc_rx_mem_pool: " 2299 " must be power of 2")); 2300 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2301 goto nxge_alloc_rx_mem_pool_exit; 2302 } 2303 2304 if (rx_buf_alloc_size > (1 << 22)) { 2305 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2306 "==> nxge_alloc_rx_mem_pool: " 2307 " limit size to 4M")); 2308 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2309 goto nxge_alloc_rx_mem_pool_exit; 2310 } 2311 2312 if (rx_cntl_alloc_size < 0x2000) { 2313 rx_cntl_alloc_size = 0x2000; 2314 } 2315 } 2316 #endif 2317 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2318 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2319 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2320 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2321 2322 dma_poolp->ndmas = p_cfgp->max_rdcs; 2323 dma_poolp->num_chunks = num_chunks; 2324 dma_poolp->buf_allocated = B_TRUE; 2325 nxgep->rx_buf_pool_p = dma_poolp; 2326 dma_poolp->dma_buf_pool_p = dma_buf_p; 2327 2328 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2329 dma_cntl_poolp->buf_allocated = B_TRUE; 2330 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2331 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2332 2333 /* Allocate the receive rings, too. */ 2334 nxgep->rx_rbr_rings = 2335 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2336 nxgep->rx_rbr_rings->rbr_rings = 2337 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2338 nxgep->rx_rcr_rings = 2339 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2340 nxgep->rx_rcr_rings->rcr_rings = 2341 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2342 nxgep->rx_mbox_areas_p = 2343 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2344 nxgep->rx_mbox_areas_p->rxmbox_areas = 2345 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2346 2347 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2348 p_cfgp->max_rdcs; 2349 2350 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2351 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2352 2353 nxge_alloc_rx_mem_pool_exit: 2354 return (status); 2355 } 2356 2357 /* 2358 * nxge_alloc_rxb 2359 * 2360 * Allocate buffers for an RDC. 2361 * 2362 * Arguments: 2363 * nxgep 2364 * channel The channel to map into our kernel space. 2365 * 2366 * Notes: 2367 * 2368 * NPI function calls: 2369 * 2370 * NXGE function calls: 2371 * 2372 * Registers accessed: 2373 * 2374 * Context: 2375 * 2376 * Taking apart: 2377 * 2378 * Open questions: 2379 * 2380 */ 2381 nxge_status_t 2382 nxge_alloc_rxb( 2383 p_nxge_t nxgep, 2384 int channel) 2385 { 2386 size_t rx_buf_alloc_size; 2387 nxge_status_t status = NXGE_OK; 2388 2389 nxge_dma_common_t **data; 2390 nxge_dma_common_t **control; 2391 uint32_t *num_chunks; 2392 2393 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2394 2395 /* 2396 * Allocate memory for the receive buffers and descriptor rings. 2397 * Replace these allocation functions with the interface functions 2398 * provided by the partition manager if/when they are available. 2399 */ 2400 2401 /* 2402 * Allocate memory for the receive buffer blocks. 2403 */ 2404 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2405 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2406 2407 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2408 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2409 2410 if ((status = nxge_alloc_rx_buf_dma( 2411 nxgep, channel, data, rx_buf_alloc_size, 2412 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2413 return (status); 2414 } 2415 2416 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2417 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2418 2419 /* 2420 * Allocate memory for descriptor rings and mailbox. 2421 */ 2422 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2423 2424 if ((status = nxge_alloc_rx_cntl_dma( 2425 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2426 != NXGE_OK) { 2427 nxge_free_rx_cntl_dma(nxgep, *control); 2428 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2429 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2430 return (status); 2431 } 2432 2433 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2434 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2435 2436 return (status); 2437 } 2438 2439 void 2440 nxge_free_rxb( 2441 p_nxge_t nxgep, 2442 int channel) 2443 { 2444 nxge_dma_common_t *data; 2445 nxge_dma_common_t *control; 2446 uint32_t num_chunks; 2447 2448 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2449 2450 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2451 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2452 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2453 2454 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2455 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2456 2457 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2458 nxge_free_rx_cntl_dma(nxgep, control); 2459 2460 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2461 2462 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2463 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2464 2465 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2466 } 2467 2468 static void 2469 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2470 { 2471 int rdc_max = NXGE_MAX_RDCS; 2472 2473 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2474 2475 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2476 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2477 "<== nxge_free_rx_mem_pool " 2478 "(null rx buf pool or buf not allocated")); 2479 return; 2480 } 2481 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2482 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2483 "<== nxge_free_rx_mem_pool " 2484 "(null rx cntl buf pool or cntl buf not allocated")); 2485 return; 2486 } 2487 2488 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2489 sizeof (p_nxge_dma_common_t) * rdc_max); 2490 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2491 2492 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2493 sizeof (uint32_t) * rdc_max); 2494 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2495 sizeof (p_nxge_dma_common_t) * rdc_max); 2496 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2497 2498 nxgep->rx_buf_pool_p = 0; 2499 nxgep->rx_cntl_pool_p = 0; 2500 2501 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2502 sizeof (p_rx_rbr_ring_t) * rdc_max); 2503 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2504 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2505 sizeof (p_rx_rcr_ring_t) * rdc_max); 2506 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2507 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2508 sizeof (p_rx_mbox_t) * rdc_max); 2509 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2510 2511 nxgep->rx_rbr_rings = 0; 2512 nxgep->rx_rcr_rings = 0; 2513 nxgep->rx_mbox_areas_p = 0; 2514 2515 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2516 } 2517 2518 2519 static nxge_status_t 2520 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2521 p_nxge_dma_common_t *dmap, 2522 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2523 { 2524 p_nxge_dma_common_t rx_dmap; 2525 nxge_status_t status = NXGE_OK; 2526 size_t total_alloc_size; 2527 size_t allocated = 0; 2528 int i, size_index, array_size; 2529 boolean_t use_kmem_alloc = B_FALSE; 2530 2531 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2532 2533 rx_dmap = (p_nxge_dma_common_t) 2534 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2535 KM_SLEEP); 2536 2537 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2538 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2539 dma_channel, alloc_size, block_size, dmap)); 2540 2541 total_alloc_size = alloc_size; 2542 2543 #if defined(RX_USE_RECLAIM_POST) 2544 total_alloc_size = alloc_size + alloc_size/4; 2545 #endif 2546 2547 i = 0; 2548 size_index = 0; 2549 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2550 while ((alloc_sizes[size_index] < alloc_size) && 2551 (size_index < array_size)) 2552 size_index++; 2553 if (size_index >= array_size) { 2554 size_index = array_size - 1; 2555 } 2556 2557 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2558 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2559 use_kmem_alloc = B_TRUE; 2560 #if defined(__i386) || defined(__amd64) 2561 size_index = 0; 2562 #endif 2563 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2564 "==> nxge_alloc_rx_buf_dma: " 2565 "Neptune use kmem_alloc() - size_index %d", 2566 size_index)); 2567 } 2568 2569 while ((allocated < total_alloc_size) && 2570 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2571 rx_dmap[i].dma_chunk_index = i; 2572 rx_dmap[i].block_size = block_size; 2573 rx_dmap[i].alength = alloc_sizes[size_index]; 2574 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2575 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2576 rx_dmap[i].dma_channel = dma_channel; 2577 rx_dmap[i].contig_alloc_type = B_FALSE; 2578 rx_dmap[i].kmem_alloc_type = B_FALSE; 2579 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2580 2581 /* 2582 * N2/NIU: data buffers must be contiguous as the driver 2583 * needs to call Hypervisor api to set up 2584 * logical pages. 2585 */ 2586 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2587 rx_dmap[i].contig_alloc_type = B_TRUE; 2588 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2589 } else if (use_kmem_alloc) { 2590 /* For Neptune, use kmem_alloc */ 2591 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2592 "==> nxge_alloc_rx_buf_dma: " 2593 "Neptune use kmem_alloc()")); 2594 rx_dmap[i].kmem_alloc_type = B_TRUE; 2595 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2596 } 2597 2598 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2599 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2600 "i %d nblocks %d alength %d", 2601 dma_channel, i, &rx_dmap[i], block_size, 2602 i, rx_dmap[i].nblocks, 2603 rx_dmap[i].alength)); 2604 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2605 &nxge_rx_dma_attr, 2606 rx_dmap[i].alength, 2607 &nxge_dev_buf_dma_acc_attr, 2608 DDI_DMA_READ | DDI_DMA_STREAMING, 2609 (p_nxge_dma_common_t)(&rx_dmap[i])); 2610 if (status != NXGE_OK) { 2611 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2612 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2613 "dma %d size_index %d size requested %d", 2614 dma_channel, 2615 size_index, 2616 rx_dmap[i].alength)); 2617 size_index--; 2618 } else { 2619 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2620 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2621 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2622 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2623 "buf_alloc_state %d alloc_type %d", 2624 dma_channel, 2625 &rx_dmap[i], 2626 rx_dmap[i].kaddrp, 2627 rx_dmap[i].alength, 2628 rx_dmap[i].buf_alloc_state, 2629 rx_dmap[i].buf_alloc_type)); 2630 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2631 " alloc_rx_buf_dma allocated rdc %d " 2632 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2633 dma_channel, i, rx_dmap[i].alength, 2634 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2635 rx_dmap[i].kaddrp)); 2636 i++; 2637 allocated += alloc_sizes[size_index]; 2638 } 2639 } 2640 2641 if (allocated < total_alloc_size) { 2642 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2643 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2644 "allocated 0x%x requested 0x%x", 2645 dma_channel, 2646 allocated, total_alloc_size)); 2647 status = NXGE_ERROR; 2648 goto nxge_alloc_rx_mem_fail1; 2649 } 2650 2651 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2652 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2653 "allocated 0x%x requested 0x%x", 2654 dma_channel, 2655 allocated, total_alloc_size)); 2656 2657 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2658 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2659 dma_channel, i)); 2660 *num_chunks = i; 2661 *dmap = rx_dmap; 2662 2663 goto nxge_alloc_rx_mem_exit; 2664 2665 nxge_alloc_rx_mem_fail1: 2666 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2667 2668 nxge_alloc_rx_mem_exit: 2669 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2670 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2671 2672 return (status); 2673 } 2674 2675 /*ARGSUSED*/ 2676 static void 2677 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2678 uint32_t num_chunks) 2679 { 2680 int i; 2681 2682 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2683 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2684 2685 if (dmap == 0) 2686 return; 2687 2688 for (i = 0; i < num_chunks; i++) { 2689 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2690 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2691 i, dmap)); 2692 nxge_dma_free_rx_data_buf(dmap++); 2693 } 2694 2695 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2696 } 2697 2698 /*ARGSUSED*/ 2699 static nxge_status_t 2700 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2701 p_nxge_dma_common_t *dmap, size_t size) 2702 { 2703 p_nxge_dma_common_t rx_dmap; 2704 nxge_status_t status = NXGE_OK; 2705 2706 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2707 2708 rx_dmap = (p_nxge_dma_common_t) 2709 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2710 2711 rx_dmap->contig_alloc_type = B_FALSE; 2712 rx_dmap->kmem_alloc_type = B_FALSE; 2713 2714 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2715 &nxge_desc_dma_attr, 2716 size, 2717 &nxge_dev_desc_dma_acc_attr, 2718 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2719 rx_dmap); 2720 if (status != NXGE_OK) { 2721 goto nxge_alloc_rx_cntl_dma_fail1; 2722 } 2723 2724 *dmap = rx_dmap; 2725 goto nxge_alloc_rx_cntl_dma_exit; 2726 2727 nxge_alloc_rx_cntl_dma_fail1: 2728 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2729 2730 nxge_alloc_rx_cntl_dma_exit: 2731 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2732 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2733 2734 return (status); 2735 } 2736 2737 /*ARGSUSED*/ 2738 static void 2739 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2740 { 2741 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2742 2743 if (dmap == 0) 2744 return; 2745 2746 nxge_dma_mem_free(dmap); 2747 2748 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2749 } 2750 2751 typedef struct { 2752 size_t tx_size; 2753 size_t cr_size; 2754 size_t threshhold; 2755 } nxge_tdc_sizes_t; 2756 2757 static 2758 nxge_status_t 2759 nxge_tdc_sizes( 2760 nxge_t *nxgep, 2761 nxge_tdc_sizes_t *sizes) 2762 { 2763 uint32_t threshhold; /* The bcopy() threshhold */ 2764 size_t tx_size; /* Transmit buffer size */ 2765 size_t cr_size; /* Completion ring size */ 2766 2767 /* 2768 * Assume that each DMA channel will be configured with the 2769 * default transmit buffer size for copying transmit data. 2770 * (If a packet is bigger than this, it will not be copied.) 2771 */ 2772 if (nxgep->niu_type == N2_NIU) { 2773 threshhold = TX_BCOPY_SIZE; 2774 } else { 2775 threshhold = nxge_bcopy_thresh; 2776 } 2777 tx_size = nxge_tx_ring_size * threshhold; 2778 2779 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2780 cr_size += sizeof (txdma_mailbox_t); 2781 2782 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2783 if (nxgep->niu_type == N2_NIU) { 2784 if (!ISP2(tx_size)) { 2785 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2786 "==> nxge_tdc_sizes: Tx size" 2787 " must be power of 2")); 2788 return (NXGE_ERROR); 2789 } 2790 2791 if (tx_size > (1 << 22)) { 2792 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2793 "==> nxge_tdc_sizes: Tx size" 2794 " limited to 4M")); 2795 return (NXGE_ERROR); 2796 } 2797 2798 if (cr_size < 0x2000) 2799 cr_size = 0x2000; 2800 } 2801 #endif 2802 2803 sizes->threshhold = threshhold; 2804 sizes->tx_size = tx_size; 2805 sizes->cr_size = cr_size; 2806 2807 return (NXGE_OK); 2808 } 2809 /* 2810 * nxge_alloc_txb 2811 * 2812 * Allocate buffers for an TDC. 2813 * 2814 * Arguments: 2815 * nxgep 2816 * channel The channel to map into our kernel space. 2817 * 2818 * Notes: 2819 * 2820 * NPI function calls: 2821 * 2822 * NXGE function calls: 2823 * 2824 * Registers accessed: 2825 * 2826 * Context: 2827 * 2828 * Taking apart: 2829 * 2830 * Open questions: 2831 * 2832 */ 2833 nxge_status_t 2834 nxge_alloc_txb( 2835 p_nxge_t nxgep, 2836 int channel) 2837 { 2838 nxge_dma_common_t **dma_buf_p; 2839 nxge_dma_common_t **dma_cntl_p; 2840 uint32_t *num_chunks; 2841 nxge_status_t status = NXGE_OK; 2842 2843 nxge_tdc_sizes_t sizes; 2844 2845 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2846 2847 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2848 return (NXGE_ERROR); 2849 2850 /* 2851 * Allocate memory for transmit buffers and descriptor rings. 2852 * Replace these allocation functions with the interface functions 2853 * provided by the partition manager Real Soon Now. 2854 */ 2855 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2856 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2857 2858 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2859 2860 /* 2861 * Allocate memory for transmit buffers and descriptor rings. 2862 * Replace allocation functions with interface functions provided 2863 * by the partition manager when it is available. 2864 * 2865 * Allocate memory for the transmit buffer pool. 2866 */ 2867 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2868 "sizes: tx: %ld, cr:%ld, th:%ld", 2869 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2870 2871 *num_chunks = 0; 2872 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2873 sizes.tx_size, sizes.threshhold, num_chunks); 2874 if (status != NXGE_OK) { 2875 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2876 return (status); 2877 } 2878 2879 /* 2880 * Allocate memory for descriptor rings and mailbox. 2881 */ 2882 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2883 sizes.cr_size); 2884 if (status != NXGE_OK) { 2885 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2886 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2887 return (status); 2888 } 2889 2890 return (NXGE_OK); 2891 } 2892 2893 void 2894 nxge_free_txb( 2895 p_nxge_t nxgep, 2896 int channel) 2897 { 2898 nxge_dma_common_t *data; 2899 nxge_dma_common_t *control; 2900 uint32_t num_chunks; 2901 2902 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2903 2904 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2905 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2906 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2907 2908 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2909 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2910 2911 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2912 nxge_free_tx_cntl_dma(nxgep, control); 2913 2914 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2915 2916 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2917 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2918 2919 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2920 } 2921 2922 /* 2923 * nxge_alloc_tx_mem_pool 2924 * 2925 * This function allocates all of the per-port TDC control data structures. 2926 * The per-channel (TDC) data structures are allocated when needed. 2927 * 2928 * Arguments: 2929 * nxgep 2930 * 2931 * Notes: 2932 * 2933 * Context: 2934 * Any domain 2935 */ 2936 nxge_status_t 2937 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2938 { 2939 nxge_hw_pt_cfg_t *p_cfgp; 2940 nxge_dma_pool_t *dma_poolp; 2941 nxge_dma_common_t **dma_buf_p; 2942 nxge_dma_pool_t *dma_cntl_poolp; 2943 nxge_dma_common_t **dma_cntl_p; 2944 uint32_t *num_chunks; /* per dma */ 2945 int tdc_max; 2946 2947 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2948 2949 p_cfgp = &nxgep->pt_config.hw_config; 2950 tdc_max = NXGE_MAX_TDCS; 2951 2952 /* 2953 * Allocate memory for each transmit DMA channel. 2954 */ 2955 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2956 KM_SLEEP); 2957 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2958 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2959 2960 dma_cntl_poolp = (p_nxge_dma_pool_t) 2961 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2962 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2963 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2964 2965 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2966 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2967 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2968 "set to default %d", 2969 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2970 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2971 } 2972 2973 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2974 /* 2975 * N2/NIU has limitation on the descriptor sizes (contiguous 2976 * memory allocation on data buffers to 4M (contig_mem_alloc) 2977 * and little endian for control buffers (must use the ddi/dki mem alloc 2978 * function). The transmit ring is limited to 8K (includes the 2979 * mailbox). 2980 */ 2981 if (nxgep->niu_type == N2_NIU) { 2982 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2983 (!ISP2(nxge_tx_ring_size))) { 2984 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2985 } 2986 } 2987 #endif 2988 2989 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2990 2991 num_chunks = (uint32_t *)KMEM_ZALLOC( 2992 sizeof (uint32_t) * tdc_max, KM_SLEEP); 2993 2994 dma_poolp->ndmas = p_cfgp->tdc.owned; 2995 dma_poolp->num_chunks = num_chunks; 2996 dma_poolp->dma_buf_pool_p = dma_buf_p; 2997 nxgep->tx_buf_pool_p = dma_poolp; 2998 2999 dma_poolp->buf_allocated = B_TRUE; 3000 3001 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 3002 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 3003 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 3004 3005 dma_cntl_poolp->buf_allocated = B_TRUE; 3006 3007 nxgep->tx_rings = 3008 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 3009 nxgep->tx_rings->rings = 3010 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 3011 nxgep->tx_mbox_areas_p = 3012 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 3013 nxgep->tx_mbox_areas_p->txmbox_areas_p = 3014 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 3015 3016 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 3017 3018 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3019 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 3020 tdc_max, dma_poolp->ndmas)); 3021 3022 return (NXGE_OK); 3023 } 3024 3025 nxge_status_t 3026 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 3027 p_nxge_dma_common_t *dmap, size_t alloc_size, 3028 size_t block_size, uint32_t *num_chunks) 3029 { 3030 p_nxge_dma_common_t tx_dmap; 3031 nxge_status_t status = NXGE_OK; 3032 size_t total_alloc_size; 3033 size_t allocated = 0; 3034 int i, size_index, array_size; 3035 3036 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 3037 3038 tx_dmap = (p_nxge_dma_common_t) 3039 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 3040 KM_SLEEP); 3041 3042 total_alloc_size = alloc_size; 3043 i = 0; 3044 size_index = 0; 3045 array_size = sizeof (alloc_sizes) / sizeof (size_t); 3046 while ((alloc_sizes[size_index] < alloc_size) && 3047 (size_index < array_size)) 3048 size_index++; 3049 if (size_index >= array_size) { 3050 size_index = array_size - 1; 3051 } 3052 3053 while ((allocated < total_alloc_size) && 3054 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 3055 3056 tx_dmap[i].dma_chunk_index = i; 3057 tx_dmap[i].block_size = block_size; 3058 tx_dmap[i].alength = alloc_sizes[size_index]; 3059 tx_dmap[i].orig_alength = tx_dmap[i].alength; 3060 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 3061 tx_dmap[i].dma_channel = dma_channel; 3062 tx_dmap[i].contig_alloc_type = B_FALSE; 3063 tx_dmap[i].kmem_alloc_type = B_FALSE; 3064 3065 /* 3066 * N2/NIU: data buffers must be contiguous as the driver 3067 * needs to call Hypervisor api to set up 3068 * logical pages. 3069 */ 3070 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 3071 tx_dmap[i].contig_alloc_type = B_TRUE; 3072 } 3073 3074 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3075 &nxge_tx_dma_attr, 3076 tx_dmap[i].alength, 3077 &nxge_dev_buf_dma_acc_attr, 3078 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3079 (p_nxge_dma_common_t)(&tx_dmap[i])); 3080 if (status != NXGE_OK) { 3081 size_index--; 3082 } else { 3083 i++; 3084 allocated += alloc_sizes[size_index]; 3085 } 3086 } 3087 3088 if (allocated < total_alloc_size) { 3089 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3090 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3091 "allocated 0x%x requested 0x%x", 3092 dma_channel, 3093 allocated, total_alloc_size)); 3094 status = NXGE_ERROR; 3095 goto nxge_alloc_tx_mem_fail1; 3096 } 3097 3098 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3099 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3100 "allocated 0x%x requested 0x%x", 3101 dma_channel, 3102 allocated, total_alloc_size)); 3103 3104 *num_chunks = i; 3105 *dmap = tx_dmap; 3106 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3107 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3108 *dmap, i)); 3109 goto nxge_alloc_tx_mem_exit; 3110 3111 nxge_alloc_tx_mem_fail1: 3112 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3113 3114 nxge_alloc_tx_mem_exit: 3115 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3116 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3117 3118 return (status); 3119 } 3120 3121 /*ARGSUSED*/ 3122 static void 3123 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3124 uint32_t num_chunks) 3125 { 3126 int i; 3127 3128 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3129 3130 if (dmap == 0) 3131 return; 3132 3133 for (i = 0; i < num_chunks; i++) { 3134 nxge_dma_mem_free(dmap++); 3135 } 3136 3137 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3138 } 3139 3140 /*ARGSUSED*/ 3141 nxge_status_t 3142 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3143 p_nxge_dma_common_t *dmap, size_t size) 3144 { 3145 p_nxge_dma_common_t tx_dmap; 3146 nxge_status_t status = NXGE_OK; 3147 3148 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3149 tx_dmap = (p_nxge_dma_common_t) 3150 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3151 3152 tx_dmap->contig_alloc_type = B_FALSE; 3153 tx_dmap->kmem_alloc_type = B_FALSE; 3154 3155 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3156 &nxge_desc_dma_attr, 3157 size, 3158 &nxge_dev_desc_dma_acc_attr, 3159 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3160 tx_dmap); 3161 if (status != NXGE_OK) { 3162 goto nxge_alloc_tx_cntl_dma_fail1; 3163 } 3164 3165 *dmap = tx_dmap; 3166 goto nxge_alloc_tx_cntl_dma_exit; 3167 3168 nxge_alloc_tx_cntl_dma_fail1: 3169 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3170 3171 nxge_alloc_tx_cntl_dma_exit: 3172 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3173 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3174 3175 return (status); 3176 } 3177 3178 /*ARGSUSED*/ 3179 static void 3180 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3181 { 3182 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3183 3184 if (dmap == 0) 3185 return; 3186 3187 nxge_dma_mem_free(dmap); 3188 3189 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3190 } 3191 3192 /* 3193 * nxge_free_tx_mem_pool 3194 * 3195 * This function frees all of the per-port TDC control data structures. 3196 * The per-channel (TDC) data structures are freed when the channel 3197 * is stopped. 3198 * 3199 * Arguments: 3200 * nxgep 3201 * 3202 * Notes: 3203 * 3204 * Context: 3205 * Any domain 3206 */ 3207 static void 3208 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3209 { 3210 int tdc_max = NXGE_MAX_TDCS; 3211 3212 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3213 3214 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3215 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3216 "<== nxge_free_tx_mem_pool " 3217 "(null tx buf pool or buf not allocated")); 3218 return; 3219 } 3220 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3221 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3222 "<== nxge_free_tx_mem_pool " 3223 "(null tx cntl buf pool or cntl buf not allocated")); 3224 return; 3225 } 3226 3227 /* 1. Free the mailboxes. */ 3228 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3229 sizeof (p_tx_mbox_t) * tdc_max); 3230 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3231 3232 nxgep->tx_mbox_areas_p = 0; 3233 3234 /* 2. Free the transmit ring arrays. */ 3235 KMEM_FREE(nxgep->tx_rings->rings, 3236 sizeof (p_tx_ring_t) * tdc_max); 3237 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3238 3239 nxgep->tx_rings = 0; 3240 3241 /* 3. Free the completion ring data structures. */ 3242 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3243 sizeof (p_nxge_dma_common_t) * tdc_max); 3244 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3245 3246 nxgep->tx_cntl_pool_p = 0; 3247 3248 /* 4. Free the data ring data structures. */ 3249 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3250 sizeof (uint32_t) * tdc_max); 3251 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3252 sizeof (p_nxge_dma_common_t) * tdc_max); 3253 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3254 3255 nxgep->tx_buf_pool_p = 0; 3256 3257 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3258 } 3259 3260 /*ARGSUSED*/ 3261 static nxge_status_t 3262 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3263 struct ddi_dma_attr *dma_attrp, 3264 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3265 p_nxge_dma_common_t dma_p) 3266 { 3267 caddr_t kaddrp; 3268 int ddi_status = DDI_SUCCESS; 3269 boolean_t contig_alloc_type; 3270 boolean_t kmem_alloc_type; 3271 3272 contig_alloc_type = dma_p->contig_alloc_type; 3273 3274 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3275 /* 3276 * contig_alloc_type for contiguous memory only allowed 3277 * for N2/NIU. 3278 */ 3279 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3280 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3281 dma_p->contig_alloc_type)); 3282 return (NXGE_ERROR | NXGE_DDI_FAILED); 3283 } 3284 3285 dma_p->dma_handle = NULL; 3286 dma_p->acc_handle = NULL; 3287 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3288 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3289 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3290 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3291 if (ddi_status != DDI_SUCCESS) { 3292 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3293 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3294 return (NXGE_ERROR | NXGE_DDI_FAILED); 3295 } 3296 3297 kmem_alloc_type = dma_p->kmem_alloc_type; 3298 3299 switch (contig_alloc_type) { 3300 case B_FALSE: 3301 switch (kmem_alloc_type) { 3302 case B_FALSE: 3303 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3304 length, 3305 acc_attr_p, 3306 xfer_flags, 3307 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3308 &dma_p->acc_handle); 3309 if (ddi_status != DDI_SUCCESS) { 3310 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3311 "nxge_dma_mem_alloc: " 3312 "ddi_dma_mem_alloc failed")); 3313 ddi_dma_free_handle(&dma_p->dma_handle); 3314 dma_p->dma_handle = NULL; 3315 return (NXGE_ERROR | NXGE_DDI_FAILED); 3316 } 3317 if (dma_p->alength < length) { 3318 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3319 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3320 "< length.")); 3321 ddi_dma_mem_free(&dma_p->acc_handle); 3322 ddi_dma_free_handle(&dma_p->dma_handle); 3323 dma_p->acc_handle = NULL; 3324 dma_p->dma_handle = NULL; 3325 return (NXGE_ERROR); 3326 } 3327 3328 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3329 NULL, 3330 kaddrp, dma_p->alength, xfer_flags, 3331 DDI_DMA_DONTWAIT, 3332 0, &dma_p->dma_cookie, &dma_p->ncookies); 3333 if (ddi_status != DDI_DMA_MAPPED) { 3334 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3335 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3336 "failed " 3337 "(staus 0x%x ncookies %d.)", ddi_status, 3338 dma_p->ncookies)); 3339 if (dma_p->acc_handle) { 3340 ddi_dma_mem_free(&dma_p->acc_handle); 3341 dma_p->acc_handle = NULL; 3342 } 3343 ddi_dma_free_handle(&dma_p->dma_handle); 3344 dma_p->dma_handle = NULL; 3345 return (NXGE_ERROR | NXGE_DDI_FAILED); 3346 } 3347 3348 if (dma_p->ncookies != 1) { 3349 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3350 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3351 "> 1 cookie" 3352 "(staus 0x%x ncookies %d.)", ddi_status, 3353 dma_p->ncookies)); 3354 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3355 if (dma_p->acc_handle) { 3356 ddi_dma_mem_free(&dma_p->acc_handle); 3357 dma_p->acc_handle = NULL; 3358 } 3359 ddi_dma_free_handle(&dma_p->dma_handle); 3360 dma_p->dma_handle = NULL; 3361 dma_p->acc_handle = NULL; 3362 return (NXGE_ERROR); 3363 } 3364 break; 3365 3366 case B_TRUE: 3367 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3368 if (kaddrp == NULL) { 3369 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3370 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3371 "kmem alloc failed")); 3372 return (NXGE_ERROR); 3373 } 3374 3375 dma_p->alength = length; 3376 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3377 NULL, kaddrp, dma_p->alength, xfer_flags, 3378 DDI_DMA_DONTWAIT, 0, 3379 &dma_p->dma_cookie, &dma_p->ncookies); 3380 if (ddi_status != DDI_DMA_MAPPED) { 3381 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3382 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3383 "(kmem_alloc) failed kaddrp $%p length %d " 3384 "(staus 0x%x (%d) ncookies %d.)", 3385 kaddrp, length, 3386 ddi_status, ddi_status, dma_p->ncookies)); 3387 KMEM_FREE(kaddrp, length); 3388 dma_p->acc_handle = NULL; 3389 ddi_dma_free_handle(&dma_p->dma_handle); 3390 dma_p->dma_handle = NULL; 3391 dma_p->kaddrp = NULL; 3392 return (NXGE_ERROR | NXGE_DDI_FAILED); 3393 } 3394 3395 if (dma_p->ncookies != 1) { 3396 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3397 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3398 "(kmem_alloc) > 1 cookie" 3399 "(staus 0x%x ncookies %d.)", ddi_status, 3400 dma_p->ncookies)); 3401 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3402 KMEM_FREE(kaddrp, length); 3403 ddi_dma_free_handle(&dma_p->dma_handle); 3404 dma_p->dma_handle = NULL; 3405 dma_p->acc_handle = NULL; 3406 dma_p->kaddrp = NULL; 3407 return (NXGE_ERROR); 3408 } 3409 3410 dma_p->kaddrp = kaddrp; 3411 3412 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3413 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3414 "kaddr $%p alength %d", 3415 dma_p, 3416 kaddrp, 3417 dma_p->alength)); 3418 break; 3419 } 3420 break; 3421 3422 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3423 case B_TRUE: 3424 kaddrp = (caddr_t)contig_mem_alloc(length); 3425 if (kaddrp == NULL) { 3426 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3427 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3428 ddi_dma_free_handle(&dma_p->dma_handle); 3429 return (NXGE_ERROR | NXGE_DDI_FAILED); 3430 } 3431 3432 dma_p->alength = length; 3433 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3434 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3435 &dma_p->dma_cookie, &dma_p->ncookies); 3436 if (ddi_status != DDI_DMA_MAPPED) { 3437 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3438 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3439 "(status 0x%x ncookies %d.)", ddi_status, 3440 dma_p->ncookies)); 3441 3442 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3443 "==> nxge_dma_mem_alloc: (not mapped)" 3444 "length %lu (0x%x) " 3445 "free contig kaddrp $%p " 3446 "va_to_pa $%p", 3447 length, length, 3448 kaddrp, 3449 va_to_pa(kaddrp))); 3450 3451 3452 contig_mem_free((void *)kaddrp, length); 3453 ddi_dma_free_handle(&dma_p->dma_handle); 3454 3455 dma_p->dma_handle = NULL; 3456 dma_p->acc_handle = NULL; 3457 dma_p->alength = NULL; 3458 dma_p->kaddrp = NULL; 3459 3460 return (NXGE_ERROR | NXGE_DDI_FAILED); 3461 } 3462 3463 if (dma_p->ncookies != 1 || 3464 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3465 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3466 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3467 "cookie or " 3468 "dmac_laddress is NULL $%p size %d " 3469 " (status 0x%x ncookies %d.)", 3470 ddi_status, 3471 dma_p->dma_cookie.dmac_laddress, 3472 dma_p->dma_cookie.dmac_size, 3473 dma_p->ncookies)); 3474 3475 contig_mem_free((void *)kaddrp, length); 3476 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3477 ddi_dma_free_handle(&dma_p->dma_handle); 3478 3479 dma_p->alength = 0; 3480 dma_p->dma_handle = NULL; 3481 dma_p->acc_handle = NULL; 3482 dma_p->kaddrp = NULL; 3483 3484 return (NXGE_ERROR | NXGE_DDI_FAILED); 3485 } 3486 break; 3487 3488 #else 3489 case B_TRUE: 3490 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3491 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3492 return (NXGE_ERROR | NXGE_DDI_FAILED); 3493 #endif 3494 } 3495 3496 dma_p->kaddrp = kaddrp; 3497 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3498 dma_p->alength - RXBUF_64B_ALIGNED; 3499 #if defined(__i386) 3500 dma_p->ioaddr_pp = 3501 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3502 #else 3503 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3504 #endif 3505 dma_p->last_ioaddr_pp = 3506 #if defined(__i386) 3507 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3508 #else 3509 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3510 #endif 3511 dma_p->alength - RXBUF_64B_ALIGNED; 3512 3513 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3514 3515 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3516 dma_p->orig_ioaddr_pp = 3517 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3518 dma_p->orig_alength = length; 3519 dma_p->orig_kaddrp = kaddrp; 3520 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3521 #endif 3522 3523 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3524 "dma buffer allocated: dma_p $%p " 3525 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3526 "dma_p->ioaddr_p $%p " 3527 "dma_p->orig_ioaddr_p $%p " 3528 "orig_vatopa $%p " 3529 "alength %d (0x%x) " 3530 "kaddrp $%p " 3531 "length %d (0x%x)", 3532 dma_p, 3533 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3534 dma_p->ioaddr_pp, 3535 dma_p->orig_ioaddr_pp, 3536 dma_p->orig_vatopa, 3537 dma_p->alength, dma_p->alength, 3538 kaddrp, 3539 length, length)); 3540 3541 return (NXGE_OK); 3542 } 3543 3544 static void 3545 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3546 { 3547 if (dma_p->dma_handle != NULL) { 3548 if (dma_p->ncookies) { 3549 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3550 dma_p->ncookies = 0; 3551 } 3552 ddi_dma_free_handle(&dma_p->dma_handle); 3553 dma_p->dma_handle = NULL; 3554 } 3555 3556 if (dma_p->acc_handle != NULL) { 3557 ddi_dma_mem_free(&dma_p->acc_handle); 3558 dma_p->acc_handle = NULL; 3559 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3560 } 3561 3562 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3563 if (dma_p->contig_alloc_type && 3564 dma_p->orig_kaddrp && dma_p->orig_alength) { 3565 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3566 "kaddrp $%p (orig_kaddrp $%p)" 3567 "mem type %d ", 3568 "orig_alength %d " 3569 "alength 0x%x (%d)", 3570 dma_p->kaddrp, 3571 dma_p->orig_kaddrp, 3572 dma_p->contig_alloc_type, 3573 dma_p->orig_alength, 3574 dma_p->alength, dma_p->alength)); 3575 3576 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3577 dma_p->orig_alength = NULL; 3578 dma_p->orig_kaddrp = NULL; 3579 dma_p->contig_alloc_type = B_FALSE; 3580 } 3581 #endif 3582 dma_p->kaddrp = NULL; 3583 dma_p->alength = NULL; 3584 } 3585 3586 static void 3587 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3588 { 3589 uint64_t kaddr; 3590 uint32_t buf_size; 3591 3592 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3593 3594 if (dma_p->dma_handle != NULL) { 3595 if (dma_p->ncookies) { 3596 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3597 dma_p->ncookies = 0; 3598 } 3599 ddi_dma_free_handle(&dma_p->dma_handle); 3600 dma_p->dma_handle = NULL; 3601 } 3602 3603 if (dma_p->acc_handle != NULL) { 3604 ddi_dma_mem_free(&dma_p->acc_handle); 3605 dma_p->acc_handle = NULL; 3606 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3607 } 3608 3609 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3610 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3611 dma_p, 3612 dma_p->buf_alloc_state)); 3613 3614 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3615 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3616 "<== nxge_dma_free_rx_data_buf: " 3617 "outstanding data buffers")); 3618 return; 3619 } 3620 3621 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3622 if (dma_p->contig_alloc_type && 3623 dma_p->orig_kaddrp && dma_p->orig_alength) { 3624 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3625 "kaddrp $%p (orig_kaddrp $%p)" 3626 "mem type %d ", 3627 "orig_alength %d " 3628 "alength 0x%x (%d)", 3629 dma_p->kaddrp, 3630 dma_p->orig_kaddrp, 3631 dma_p->contig_alloc_type, 3632 dma_p->orig_alength, 3633 dma_p->alength, dma_p->alength)); 3634 3635 kaddr = (uint64_t)dma_p->orig_kaddrp; 3636 buf_size = dma_p->orig_alength; 3637 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3638 dma_p->orig_alength = NULL; 3639 dma_p->orig_kaddrp = NULL; 3640 dma_p->contig_alloc_type = B_FALSE; 3641 dma_p->kaddrp = NULL; 3642 dma_p->alength = NULL; 3643 return; 3644 } 3645 #endif 3646 3647 if (dma_p->kmem_alloc_type) { 3648 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3649 "nxge_dma_free_rx_data_buf: free kmem " 3650 "kaddrp $%p (orig_kaddrp $%p)" 3651 "alloc type %d " 3652 "orig_alength %d " 3653 "alength 0x%x (%d)", 3654 dma_p->kaddrp, 3655 dma_p->orig_kaddrp, 3656 dma_p->kmem_alloc_type, 3657 dma_p->orig_alength, 3658 dma_p->alength, dma_p->alength)); 3659 #if defined(__i386) 3660 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3661 #else 3662 kaddr = (uint64_t)dma_p->kaddrp; 3663 #endif 3664 buf_size = dma_p->orig_alength; 3665 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3666 "nxge_dma_free_rx_data_buf: free dmap $%p " 3667 "kaddr $%p buf_size %d", 3668 dma_p, 3669 kaddr, buf_size)); 3670 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3671 dma_p->alength = 0; 3672 dma_p->orig_alength = 0; 3673 dma_p->kaddrp = NULL; 3674 dma_p->kmem_alloc_type = B_FALSE; 3675 } 3676 3677 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3678 } 3679 3680 /* 3681 * nxge_m_start() -- start transmitting and receiving. 3682 * 3683 * This function is called by the MAC layer when the first 3684 * stream is open to prepare the hardware ready for sending 3685 * and transmitting packets. 3686 */ 3687 static int 3688 nxge_m_start(void *arg) 3689 { 3690 p_nxge_t nxgep = (p_nxge_t)arg; 3691 3692 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3693 3694 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) { 3695 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 3696 } 3697 3698 MUTEX_ENTER(nxgep->genlock); 3699 if (nxge_init(nxgep) != NXGE_OK) { 3700 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3701 "<== nxge_m_start: initialization failed")); 3702 MUTEX_EXIT(nxgep->genlock); 3703 return (EIO); 3704 } 3705 3706 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 3707 goto nxge_m_start_exit; 3708 /* 3709 * Start timer to check the system error and tx hangs 3710 */ 3711 if (!isLDOMguest(nxgep)) 3712 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3713 nxge_check_hw_state, NXGE_CHECK_TIMER); 3714 #if defined(sun4v) 3715 else 3716 nxge_hio_start_timer(nxgep); 3717 #endif 3718 3719 nxgep->link_notify = B_TRUE; 3720 3721 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3722 3723 nxge_m_start_exit: 3724 MUTEX_EXIT(nxgep->genlock); 3725 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3726 return (0); 3727 } 3728 3729 3730 static boolean_t 3731 nxge_check_groups_stopped(p_nxge_t nxgep) 3732 { 3733 int i; 3734 3735 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 3736 if (nxgep->rx_hio_groups[i].started) 3737 return (B_FALSE); 3738 } 3739 3740 return (B_TRUE); 3741 } 3742 3743 /* 3744 * nxge_m_stop(): stop transmitting and receiving. 3745 */ 3746 static void 3747 nxge_m_stop(void *arg) 3748 { 3749 p_nxge_t nxgep = (p_nxge_t)arg; 3750 boolean_t groups_stopped; 3751 3752 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3753 3754 groups_stopped = nxge_check_groups_stopped(nxgep); 3755 #ifdef later 3756 ASSERT(groups_stopped == B_FALSE); 3757 #endif 3758 3759 if (!groups_stopped) { 3760 cmn_err(CE_WARN, "nxge(%d): groups are not stopped!\n", 3761 nxgep->instance); 3762 return; 3763 } 3764 3765 MUTEX_ENTER(nxgep->genlock); 3766 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3767 3768 if (nxgep->nxge_timerid) { 3769 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3770 nxgep->nxge_timerid = 0; 3771 } 3772 3773 nxge_uninit(nxgep); 3774 3775 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3776 3777 MUTEX_EXIT(nxgep->genlock); 3778 3779 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3780 } 3781 3782 static int 3783 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3784 { 3785 p_nxge_t nxgep = (p_nxge_t)arg; 3786 struct ether_addr addrp; 3787 3788 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3789 "==> nxge_m_multicst: add %d", add)); 3790 3791 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3792 if (add) { 3793 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3794 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3795 "<== nxge_m_multicst: add multicast failed")); 3796 return (EINVAL); 3797 } 3798 } else { 3799 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3800 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3801 "<== nxge_m_multicst: del multicast failed")); 3802 return (EINVAL); 3803 } 3804 } 3805 3806 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3807 3808 return (0); 3809 } 3810 3811 static int 3812 nxge_m_promisc(void *arg, boolean_t on) 3813 { 3814 p_nxge_t nxgep = (p_nxge_t)arg; 3815 3816 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3817 "==> nxge_m_promisc: on %d", on)); 3818 3819 if (nxge_set_promisc(nxgep, on)) { 3820 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3821 "<== nxge_m_promisc: set promisc failed")); 3822 return (EINVAL); 3823 } 3824 3825 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3826 "<== nxge_m_promisc: on %d", on)); 3827 3828 return (0); 3829 } 3830 3831 static void 3832 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3833 { 3834 p_nxge_t nxgep = (p_nxge_t)arg; 3835 struct iocblk *iocp; 3836 boolean_t need_privilege; 3837 int err; 3838 int cmd; 3839 3840 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3841 3842 iocp = (struct iocblk *)mp->b_rptr; 3843 iocp->ioc_error = 0; 3844 need_privilege = B_TRUE; 3845 cmd = iocp->ioc_cmd; 3846 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3847 switch (cmd) { 3848 default: 3849 miocnak(wq, mp, 0, EINVAL); 3850 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3851 return; 3852 3853 case LB_GET_INFO_SIZE: 3854 case LB_GET_INFO: 3855 case LB_GET_MODE: 3856 need_privilege = B_FALSE; 3857 break; 3858 case LB_SET_MODE: 3859 break; 3860 3861 3862 case NXGE_GET_MII: 3863 case NXGE_PUT_MII: 3864 case NXGE_GET64: 3865 case NXGE_PUT64: 3866 case NXGE_GET_TX_RING_SZ: 3867 case NXGE_GET_TX_DESC: 3868 case NXGE_TX_SIDE_RESET: 3869 case NXGE_RX_SIDE_RESET: 3870 case NXGE_GLOBAL_RESET: 3871 case NXGE_RESET_MAC: 3872 case NXGE_TX_REGS_DUMP: 3873 case NXGE_RX_REGS_DUMP: 3874 case NXGE_INT_REGS_DUMP: 3875 case NXGE_VIR_INT_REGS_DUMP: 3876 case NXGE_PUT_TCAM: 3877 case NXGE_GET_TCAM: 3878 case NXGE_RTRACE: 3879 case NXGE_RDUMP: 3880 3881 need_privilege = B_FALSE; 3882 break; 3883 case NXGE_INJECT_ERR: 3884 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3885 nxge_err_inject(nxgep, wq, mp); 3886 break; 3887 } 3888 3889 if (need_privilege) { 3890 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3891 if (err != 0) { 3892 miocnak(wq, mp, 0, err); 3893 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3894 "<== nxge_m_ioctl: no priv")); 3895 return; 3896 } 3897 } 3898 3899 switch (cmd) { 3900 3901 case LB_GET_MODE: 3902 case LB_SET_MODE: 3903 case LB_GET_INFO_SIZE: 3904 case LB_GET_INFO: 3905 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3906 break; 3907 3908 case NXGE_GET_MII: 3909 case NXGE_PUT_MII: 3910 case NXGE_PUT_TCAM: 3911 case NXGE_GET_TCAM: 3912 case NXGE_GET64: 3913 case NXGE_PUT64: 3914 case NXGE_GET_TX_RING_SZ: 3915 case NXGE_GET_TX_DESC: 3916 case NXGE_TX_SIDE_RESET: 3917 case NXGE_RX_SIDE_RESET: 3918 case NXGE_GLOBAL_RESET: 3919 case NXGE_RESET_MAC: 3920 case NXGE_TX_REGS_DUMP: 3921 case NXGE_RX_REGS_DUMP: 3922 case NXGE_INT_REGS_DUMP: 3923 case NXGE_VIR_INT_REGS_DUMP: 3924 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3925 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3926 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3927 break; 3928 } 3929 3930 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3931 } 3932 3933 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3934 3935 void 3936 nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory) 3937 { 3938 p_nxge_mmac_stats_t mmac_stats; 3939 int i; 3940 nxge_mmac_t *mmac_info; 3941 3942 mmac_info = &nxgep->nxge_mmac_info; 3943 3944 mmac_stats = &nxgep->statsp->mmac_stats; 3945 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3946 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3947 3948 for (i = 0; i < ETHERADDRL; i++) { 3949 if (factory) { 3950 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3951 = mmac_info->factory_mac_pool[slot][ 3952 (ETHERADDRL-1) - i]; 3953 } else { 3954 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3955 = mmac_info->mac_pool[slot].addr[ 3956 (ETHERADDRL - 1) - i]; 3957 } 3958 } 3959 } 3960 3961 /* 3962 * nxge_altmac_set() -- Set an alternate MAC address 3963 */ 3964 static int 3965 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, int slot, 3966 int rdctbl, boolean_t usetbl) 3967 { 3968 uint8_t addrn; 3969 uint8_t portn; 3970 npi_mac_addr_t altmac; 3971 hostinfo_t mac_rdc; 3972 p_nxge_class_pt_cfg_t clscfgp; 3973 3974 3975 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3976 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3977 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3978 3979 portn = nxgep->mac.portnum; 3980 addrn = (uint8_t)slot - 1; 3981 3982 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, 3983 nxgep->function_num, addrn, &altmac) != NPI_SUCCESS) 3984 return (EIO); 3985 3986 /* 3987 * Set the rdc table number for the host info entry 3988 * for this mac address slot. 3989 */ 3990 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 3991 mac_rdc.value = 0; 3992 if (usetbl) 3993 mac_rdc.bits.w0.rdc_tbl_num = rdctbl; 3994 else 3995 mac_rdc.bits.w0.rdc_tbl_num = 3996 clscfgp->mac_host_info[addrn].rdctbl; 3997 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 3998 3999 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 4000 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 4001 return (EIO); 4002 } 4003 4004 /* 4005 * Enable comparison with the alternate MAC address. 4006 * While the first alternate addr is enabled by bit 1 of register 4007 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 4008 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 4009 * accordingly before calling npi_mac_altaddr_entry. 4010 */ 4011 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4012 addrn = (uint8_t)slot - 1; 4013 else 4014 addrn = (uint8_t)slot; 4015 4016 if (npi_mac_altaddr_enable(nxgep->npi_handle, 4017 nxgep->function_num, addrn) != NPI_SUCCESS) { 4018 return (EIO); 4019 } 4020 4021 return (0); 4022 } 4023 4024 /* 4025 * nxeg_m_mmac_add_g() - find an unused address slot, set the address 4026 * value to the one specified, enable the port to start filtering on 4027 * the new MAC address. Returns 0 on success. 4028 */ 4029 int 4030 nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl, 4031 boolean_t usetbl) 4032 { 4033 p_nxge_t nxgep = arg; 4034 int slot; 4035 nxge_mmac_t *mmac_info; 4036 int err; 4037 nxge_status_t status; 4038 4039 mutex_enter(nxgep->genlock); 4040 4041 /* 4042 * Make sure that nxge is initialized, if _start() has 4043 * not been called. 4044 */ 4045 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4046 status = nxge_init(nxgep); 4047 if (status != NXGE_OK) { 4048 mutex_exit(nxgep->genlock); 4049 return (ENXIO); 4050 } 4051 } 4052 4053 mmac_info = &nxgep->nxge_mmac_info; 4054 if (mmac_info->naddrfree == 0) { 4055 mutex_exit(nxgep->genlock); 4056 return (ENOSPC); 4057 } 4058 4059 /* 4060 * Search for the first available slot. Because naddrfree 4061 * is not zero, we are guaranteed to find one. 4062 * Each of the first two ports of Neptune has 16 alternate 4063 * MAC slots but only the first 7 (of 15) slots have assigned factory 4064 * MAC addresses. We first search among the slots without bundled 4065 * factory MACs. If we fail to find one in that range, then we 4066 * search the slots with bundled factory MACs. A factory MAC 4067 * will be wasted while the slot is used with a user MAC address. 4068 * But the slot could be used by factory MAC again after calling 4069 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4070 */ 4071 for (slot = 0; slot <= mmac_info->num_mmac; slot++) { 4072 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4073 break; 4074 } 4075 4076 ASSERT(slot <= mmac_info->num_mmac); 4077 4078 if ((err = nxge_altmac_set(nxgep, (uint8_t *)maddr, slot, rdctbl, 4079 usetbl)) != 0) { 4080 mutex_exit(nxgep->genlock); 4081 return (err); 4082 } 4083 4084 bcopy(maddr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4085 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4086 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4087 mmac_info->naddrfree--; 4088 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4089 4090 mutex_exit(nxgep->genlock); 4091 return (0); 4092 } 4093 4094 /* 4095 * Remove the specified mac address and update the HW not to filter 4096 * the mac address anymore. 4097 */ 4098 int 4099 nxge_m_mmac_remove(void *arg, int slot) 4100 { 4101 p_nxge_t nxgep = arg; 4102 nxge_mmac_t *mmac_info; 4103 uint8_t addrn; 4104 uint8_t portn; 4105 int err = 0; 4106 nxge_status_t status; 4107 4108 mutex_enter(nxgep->genlock); 4109 4110 /* 4111 * Make sure that nxge is initialized, if _start() has 4112 * not been called. 4113 */ 4114 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4115 status = nxge_init(nxgep); 4116 if (status != NXGE_OK) { 4117 mutex_exit(nxgep->genlock); 4118 return (ENXIO); 4119 } 4120 } 4121 4122 mmac_info = &nxgep->nxge_mmac_info; 4123 if (slot < 1 || slot > mmac_info->num_mmac) { 4124 mutex_exit(nxgep->genlock); 4125 return (EINVAL); 4126 } 4127 4128 portn = nxgep->mac.portnum; 4129 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4130 addrn = (uint8_t)slot - 1; 4131 else 4132 addrn = (uint8_t)slot; 4133 4134 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4135 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4136 == NPI_SUCCESS) { 4137 mmac_info->naddrfree++; 4138 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4139 /* 4140 * Regardless if the MAC we just stopped filtering 4141 * is a user addr or a facory addr, we must set 4142 * the MMAC_VENDOR_ADDR flag if this slot has an 4143 * associated factory MAC to indicate that a factory 4144 * MAC is available. 4145 */ 4146 if (slot <= mmac_info->num_factory_mmac) { 4147 mmac_info->mac_pool[slot].flags 4148 |= MMAC_VENDOR_ADDR; 4149 } 4150 /* 4151 * Clear mac_pool[slot].addr so that kstat shows 0 4152 * alternate MAC address if the slot is not used. 4153 * (But nxge_m_mmac_get returns the factory MAC even 4154 * when the slot is not used!) 4155 */ 4156 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4157 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4158 } else { 4159 err = EIO; 4160 } 4161 } else { 4162 err = EINVAL; 4163 } 4164 4165 mutex_exit(nxgep->genlock); 4166 return (err); 4167 } 4168 4169 /* 4170 * The callback to query all the factory addresses. naddr must be the same as 4171 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and 4172 * mcm_addr is the space allocated for keep all the addresses, whose size is 4173 * naddr * MAXMACADDRLEN. 4174 */ 4175 static void 4176 nxge_m_getfactaddr(void *arg, uint_t naddr, uint8_t *addr) 4177 { 4178 nxge_t *nxgep = arg; 4179 nxge_mmac_t *mmac_info; 4180 int i; 4181 4182 mutex_enter(nxgep->genlock); 4183 4184 mmac_info = &nxgep->nxge_mmac_info; 4185 ASSERT(naddr == mmac_info->num_factory_mmac); 4186 4187 for (i = 0; i < naddr; i++) { 4188 bcopy(mmac_info->factory_mac_pool[i + 1], 4189 addr + i * MAXMACADDRLEN, ETHERADDRL); 4190 } 4191 4192 mutex_exit(nxgep->genlock); 4193 } 4194 4195 4196 static boolean_t 4197 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4198 { 4199 nxge_t *nxgep = arg; 4200 uint32_t *txflags = cap_data; 4201 4202 switch (cap) { 4203 case MAC_CAPAB_HCKSUM: 4204 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4205 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4206 if (nxge_cksum_offload <= 1) { 4207 *txflags = HCKSUM_INET_PARTIAL; 4208 } 4209 break; 4210 4211 case MAC_CAPAB_MULTIFACTADDR: { 4212 mac_capab_multifactaddr_t *mfacp = cap_data; 4213 4214 mutex_enter(nxgep->genlock); 4215 mfacp->mcm_naddr = nxgep->nxge_mmac_info.num_factory_mmac; 4216 mfacp->mcm_getaddr = nxge_m_getfactaddr; 4217 mutex_exit(nxgep->genlock); 4218 break; 4219 } 4220 4221 case MAC_CAPAB_LSO: { 4222 mac_capab_lso_t *cap_lso = cap_data; 4223 4224 if (nxgep->soft_lso_enable) { 4225 if (nxge_cksum_offload <= 1) { 4226 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4227 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4228 nxge_lso_max = NXGE_LSO_MAXLEN; 4229 } 4230 cap_lso->lso_basic_tcp_ipv4.lso_max = 4231 nxge_lso_max; 4232 } 4233 break; 4234 } else { 4235 return (B_FALSE); 4236 } 4237 } 4238 4239 case MAC_CAPAB_RINGS: { 4240 mac_capab_rings_t *cap_rings = cap_data; 4241 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 4242 4243 mutex_enter(nxgep->genlock); 4244 if (cap_rings->mr_type == MAC_RING_TYPE_RX) { 4245 cap_rings->mr_group_type = MAC_GROUP_TYPE_DYNAMIC; 4246 cap_rings->mr_rnum = p_cfgp->max_rdcs; 4247 cap_rings->mr_rget = nxge_fill_ring; 4248 cap_rings->mr_gnum = p_cfgp->max_rdc_grpids; 4249 cap_rings->mr_gget = nxge_hio_group_get; 4250 cap_rings->mr_gaddring = nxge_group_add_ring; 4251 cap_rings->mr_gremring = nxge_group_rem_ring; 4252 4253 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4254 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]", 4255 p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids)); 4256 } else { 4257 cap_rings->mr_group_type = MAC_GROUP_TYPE_DYNAMIC; 4258 cap_rings->mr_rnum = p_cfgp->tdc.count; 4259 cap_rings->mr_rget = nxge_fill_ring; 4260 if (isLDOMservice(nxgep)) { 4261 /* share capable */ 4262 /* Do not report the default ring: hence -1 */ 4263 cap_rings->mr_gnum = 4264 NXGE_MAX_TDC_GROUPS / nxgep->nports - 1; 4265 } else { 4266 cap_rings->mr_gnum = 0; 4267 } 4268 4269 cap_rings->mr_gget = nxge_hio_group_get; 4270 cap_rings->mr_gaddring = nxge_group_add_ring; 4271 cap_rings->mr_gremring = nxge_group_rem_ring; 4272 4273 NXGE_DEBUG_MSG((nxgep, TX_CTL, 4274 "==> nxge_m_getcapab: tx rings # of rings %d", 4275 p_cfgp->tdc.count)); 4276 } 4277 mutex_exit(nxgep->genlock); 4278 break; 4279 } 4280 4281 #if defined(sun4v) 4282 case MAC_CAPAB_SHARES: { 4283 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4284 4285 /* 4286 * Only the service domain driver responds to 4287 * this capability request. 4288 */ 4289 mutex_enter(nxgep->genlock); 4290 if (isLDOMservice(nxgep)) { 4291 mshares->ms_snum = 3; 4292 mshares->ms_handle = (void *)nxgep; 4293 mshares->ms_salloc = nxge_hio_share_alloc; 4294 mshares->ms_sfree = nxge_hio_share_free; 4295 mshares->ms_sadd = nxge_hio_share_add_group; 4296 mshares->ms_sremove = nxge_hio_share_rem_group; 4297 mshares->ms_squery = nxge_hio_share_query; 4298 mshares->ms_sbind = nxge_hio_share_bind; 4299 mshares->ms_sunbind = nxge_hio_share_unbind; 4300 mutex_exit(nxgep->genlock); 4301 } else { 4302 mutex_exit(nxgep->genlock); 4303 return (B_FALSE); 4304 } 4305 break; 4306 } 4307 #endif 4308 default: 4309 return (B_FALSE); 4310 } 4311 return (B_TRUE); 4312 } 4313 4314 static boolean_t 4315 nxge_param_locked(mac_prop_id_t pr_num) 4316 { 4317 /* 4318 * All adv_* parameters are locked (read-only) while 4319 * the device is in any sort of loopback mode ... 4320 */ 4321 switch (pr_num) { 4322 case MAC_PROP_ADV_1000FDX_CAP: 4323 case MAC_PROP_EN_1000FDX_CAP: 4324 case MAC_PROP_ADV_1000HDX_CAP: 4325 case MAC_PROP_EN_1000HDX_CAP: 4326 case MAC_PROP_ADV_100FDX_CAP: 4327 case MAC_PROP_EN_100FDX_CAP: 4328 case MAC_PROP_ADV_100HDX_CAP: 4329 case MAC_PROP_EN_100HDX_CAP: 4330 case MAC_PROP_ADV_10FDX_CAP: 4331 case MAC_PROP_EN_10FDX_CAP: 4332 case MAC_PROP_ADV_10HDX_CAP: 4333 case MAC_PROP_EN_10HDX_CAP: 4334 case MAC_PROP_AUTONEG: 4335 case MAC_PROP_FLOWCTRL: 4336 return (B_TRUE); 4337 } 4338 return (B_FALSE); 4339 } 4340 4341 /* 4342 * callback functions for set/get of properties 4343 */ 4344 static int 4345 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4346 uint_t pr_valsize, const void *pr_val) 4347 { 4348 nxge_t *nxgep = barg; 4349 p_nxge_param_t param_arr; 4350 p_nxge_stats_t statsp; 4351 int err = 0; 4352 uint8_t val; 4353 uint32_t cur_mtu, new_mtu, old_framesize; 4354 link_flowctrl_t fl; 4355 4356 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4357 param_arr = nxgep->param_arr; 4358 statsp = nxgep->statsp; 4359 mutex_enter(nxgep->genlock); 4360 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4361 nxge_param_locked(pr_num)) { 4362 /* 4363 * All adv_* parameters are locked (read-only) 4364 * while the device is in any sort of loopback mode. 4365 */ 4366 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4367 "==> nxge_m_setprop: loopback mode: read only")); 4368 mutex_exit(nxgep->genlock); 4369 return (EBUSY); 4370 } 4371 4372 val = *(uint8_t *)pr_val; 4373 switch (pr_num) { 4374 case MAC_PROP_EN_1000FDX_CAP: 4375 nxgep->param_en_1000fdx = val; 4376 param_arr[param_anar_1000fdx].value = val; 4377 4378 goto reprogram; 4379 4380 case MAC_PROP_EN_100FDX_CAP: 4381 nxgep->param_en_100fdx = val; 4382 param_arr[param_anar_100fdx].value = val; 4383 4384 goto reprogram; 4385 4386 case MAC_PROP_EN_10FDX_CAP: 4387 nxgep->param_en_10fdx = val; 4388 param_arr[param_anar_10fdx].value = val; 4389 4390 goto reprogram; 4391 4392 case MAC_PROP_EN_1000HDX_CAP: 4393 case MAC_PROP_EN_100HDX_CAP: 4394 case MAC_PROP_EN_10HDX_CAP: 4395 case MAC_PROP_ADV_1000FDX_CAP: 4396 case MAC_PROP_ADV_1000HDX_CAP: 4397 case MAC_PROP_ADV_100FDX_CAP: 4398 case MAC_PROP_ADV_100HDX_CAP: 4399 case MAC_PROP_ADV_10FDX_CAP: 4400 case MAC_PROP_ADV_10HDX_CAP: 4401 case MAC_PROP_STATUS: 4402 case MAC_PROP_SPEED: 4403 case MAC_PROP_DUPLEX: 4404 err = EINVAL; /* cannot set read-only properties */ 4405 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4406 "==> nxge_m_setprop: read only property %d", 4407 pr_num)); 4408 break; 4409 4410 case MAC_PROP_AUTONEG: 4411 param_arr[param_autoneg].value = val; 4412 4413 goto reprogram; 4414 4415 case MAC_PROP_MTU: 4416 cur_mtu = nxgep->mac.default_mtu; 4417 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4418 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4419 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4420 new_mtu, nxgep->mac.is_jumbo)); 4421 4422 if (new_mtu == cur_mtu) { 4423 err = 0; 4424 break; 4425 } 4426 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4427 err = EBUSY; 4428 break; 4429 } 4430 if (new_mtu < NXGE_DEFAULT_MTU || 4431 new_mtu > NXGE_MAXIMUM_MTU) { 4432 err = EINVAL; 4433 break; 4434 } 4435 4436 if ((new_mtu > NXGE_DEFAULT_MTU) && 4437 !nxgep->mac.is_jumbo) { 4438 err = EINVAL; 4439 break; 4440 } 4441 4442 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4443 nxgep->mac.maxframesize = (uint16_t) 4444 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4445 if (nxge_mac_set_framesize(nxgep)) { 4446 nxgep->mac.maxframesize = 4447 (uint16_t)old_framesize; 4448 err = EINVAL; 4449 break; 4450 } 4451 4452 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4453 if (err) { 4454 nxgep->mac.maxframesize = 4455 (uint16_t)old_framesize; 4456 err = EINVAL; 4457 break; 4458 } 4459 4460 nxgep->mac.default_mtu = new_mtu; 4461 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4462 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4463 new_mtu, nxgep->mac.maxframesize)); 4464 break; 4465 4466 case MAC_PROP_FLOWCTRL: 4467 bcopy(pr_val, &fl, sizeof (fl)); 4468 switch (fl) { 4469 default: 4470 err = EINVAL; 4471 break; 4472 4473 case LINK_FLOWCTRL_NONE: 4474 param_arr[param_anar_pause].value = 0; 4475 break; 4476 4477 case LINK_FLOWCTRL_RX: 4478 param_arr[param_anar_pause].value = 1; 4479 break; 4480 4481 case LINK_FLOWCTRL_TX: 4482 case LINK_FLOWCTRL_BI: 4483 err = EINVAL; 4484 break; 4485 } 4486 4487 reprogram: 4488 if (err == 0) { 4489 if (!nxge_param_link_update(nxgep)) { 4490 err = EINVAL; 4491 } 4492 } 4493 break; 4494 case MAC_PROP_PRIVATE: 4495 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4496 "==> nxge_m_setprop: private property")); 4497 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4498 pr_val); 4499 break; 4500 4501 default: 4502 err = ENOTSUP; 4503 break; 4504 } 4505 4506 mutex_exit(nxgep->genlock); 4507 4508 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4509 "<== nxge_m_setprop (return %d)", err)); 4510 return (err); 4511 } 4512 4513 static int 4514 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4515 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 4516 { 4517 nxge_t *nxgep = barg; 4518 p_nxge_param_t param_arr = nxgep->param_arr; 4519 p_nxge_stats_t statsp = nxgep->statsp; 4520 int err = 0; 4521 link_flowctrl_t fl; 4522 uint64_t tmp = 0; 4523 link_state_t ls; 4524 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4525 4526 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4527 "==> nxge_m_getprop: pr_num %d", pr_num)); 4528 4529 if (pr_valsize == 0) 4530 return (EINVAL); 4531 4532 *perm = MAC_PROP_PERM_RW; 4533 4534 if ((is_default) && (pr_num != MAC_PROP_PRIVATE)) { 4535 err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4536 return (err); 4537 } 4538 4539 bzero(pr_val, pr_valsize); 4540 switch (pr_num) { 4541 case MAC_PROP_DUPLEX: 4542 *perm = MAC_PROP_PERM_READ; 4543 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4544 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4545 "==> nxge_m_getprop: duplex mode %d", 4546 *(uint8_t *)pr_val)); 4547 break; 4548 4549 case MAC_PROP_SPEED: 4550 if (pr_valsize < sizeof (uint64_t)) 4551 return (EINVAL); 4552 *perm = MAC_PROP_PERM_READ; 4553 tmp = statsp->mac_stats.link_speed * 1000000ull; 4554 bcopy(&tmp, pr_val, sizeof (tmp)); 4555 break; 4556 4557 case MAC_PROP_STATUS: 4558 if (pr_valsize < sizeof (link_state_t)) 4559 return (EINVAL); 4560 *perm = MAC_PROP_PERM_READ; 4561 if (!statsp->mac_stats.link_up) 4562 ls = LINK_STATE_DOWN; 4563 else 4564 ls = LINK_STATE_UP; 4565 bcopy(&ls, pr_val, sizeof (ls)); 4566 break; 4567 4568 case MAC_PROP_AUTONEG: 4569 *(uint8_t *)pr_val = 4570 param_arr[param_autoneg].value; 4571 break; 4572 4573 case MAC_PROP_FLOWCTRL: 4574 if (pr_valsize < sizeof (link_flowctrl_t)) 4575 return (EINVAL); 4576 4577 fl = LINK_FLOWCTRL_NONE; 4578 if (param_arr[param_anar_pause].value) { 4579 fl = LINK_FLOWCTRL_RX; 4580 } 4581 bcopy(&fl, pr_val, sizeof (fl)); 4582 break; 4583 4584 case MAC_PROP_ADV_1000FDX_CAP: 4585 *perm = MAC_PROP_PERM_READ; 4586 *(uint8_t *)pr_val = 4587 param_arr[param_anar_1000fdx].value; 4588 break; 4589 4590 case MAC_PROP_EN_1000FDX_CAP: 4591 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4592 break; 4593 4594 case MAC_PROP_ADV_100FDX_CAP: 4595 *perm = MAC_PROP_PERM_READ; 4596 *(uint8_t *)pr_val = 4597 param_arr[param_anar_100fdx].value; 4598 break; 4599 4600 case MAC_PROP_EN_100FDX_CAP: 4601 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4602 break; 4603 4604 case MAC_PROP_ADV_10FDX_CAP: 4605 *perm = MAC_PROP_PERM_READ; 4606 *(uint8_t *)pr_val = 4607 param_arr[param_anar_10fdx].value; 4608 break; 4609 4610 case MAC_PROP_EN_10FDX_CAP: 4611 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4612 break; 4613 4614 case MAC_PROP_EN_1000HDX_CAP: 4615 case MAC_PROP_EN_100HDX_CAP: 4616 case MAC_PROP_EN_10HDX_CAP: 4617 case MAC_PROP_ADV_1000HDX_CAP: 4618 case MAC_PROP_ADV_100HDX_CAP: 4619 case MAC_PROP_ADV_10HDX_CAP: 4620 err = ENOTSUP; 4621 break; 4622 4623 case MAC_PROP_PRIVATE: 4624 err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4625 pr_valsize, pr_val, perm); 4626 break; 4627 default: 4628 err = EINVAL; 4629 break; 4630 } 4631 4632 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4633 4634 return (err); 4635 } 4636 4637 /* ARGSUSED */ 4638 static int 4639 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4640 const void *pr_val) 4641 { 4642 p_nxge_param_t param_arr = nxgep->param_arr; 4643 int err = 0; 4644 long result; 4645 4646 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4647 "==> nxge_set_priv_prop: name %s", pr_name)); 4648 4649 if (strcmp(pr_name, "_accept_jumbo") == 0) { 4650 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4651 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4652 "<== nxge_set_priv_prop: name %s " 4653 "pr_val %s result %d " 4654 "param %d is_jumbo %d", 4655 pr_name, pr_val, result, 4656 param_arr[param_accept_jumbo].value, 4657 nxgep->mac.is_jumbo)); 4658 4659 if (result > 1 || result < 0) { 4660 err = EINVAL; 4661 } else { 4662 if (nxgep->mac.is_jumbo == 4663 (uint32_t)result) { 4664 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4665 "no change (%d %d)", 4666 nxgep->mac.is_jumbo, 4667 result)); 4668 return (0); 4669 } 4670 } 4671 4672 param_arr[param_accept_jumbo].value = result; 4673 nxgep->mac.is_jumbo = B_FALSE; 4674 if (result) { 4675 nxgep->mac.is_jumbo = B_TRUE; 4676 } 4677 4678 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4679 "<== nxge_set_priv_prop: name %s (value %d) is_jumbo %d", 4680 pr_name, result, nxgep->mac.is_jumbo)); 4681 4682 return (err); 4683 } 4684 4685 /* Blanking */ 4686 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4687 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4688 (char *)pr_val, 4689 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4690 if (err) { 4691 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4692 "<== nxge_set_priv_prop: " 4693 "unable to set (%s)", pr_name)); 4694 err = EINVAL; 4695 } else { 4696 err = 0; 4697 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4698 "<== nxge_set_priv_prop: " 4699 "set (%s)", pr_name)); 4700 } 4701 4702 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4703 "<== nxge_set_priv_prop: name %s (value %d)", 4704 pr_name, result)); 4705 4706 return (err); 4707 } 4708 4709 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4710 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4711 (char *)pr_val, 4712 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4713 if (err) { 4714 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4715 "<== nxge_set_priv_prop: " 4716 "unable to set (%s)", pr_name)); 4717 err = EINVAL; 4718 } else { 4719 err = 0; 4720 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4721 "<== nxge_set_priv_prop: " 4722 "set (%s)", pr_name)); 4723 } 4724 4725 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4726 "<== nxge_set_priv_prop: name %s (value %d)", 4727 pr_name, result)); 4728 4729 return (err); 4730 } 4731 4732 /* Classification */ 4733 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4734 if (pr_val == NULL) { 4735 err = EINVAL; 4736 return (err); 4737 } 4738 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4739 4740 err = nxge_param_set_ip_opt(nxgep, NULL, 4741 NULL, (char *)pr_val, 4742 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4743 4744 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4745 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4746 pr_name, result)); 4747 4748 return (err); 4749 } 4750 4751 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 4752 if (pr_val == NULL) { 4753 err = EINVAL; 4754 return (err); 4755 } 4756 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4757 4758 err = nxge_param_set_ip_opt(nxgep, NULL, 4759 NULL, (char *)pr_val, 4760 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 4761 4762 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4763 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4764 pr_name, result)); 4765 4766 return (err); 4767 } 4768 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 4769 if (pr_val == NULL) { 4770 err = EINVAL; 4771 return (err); 4772 } 4773 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4774 4775 err = nxge_param_set_ip_opt(nxgep, NULL, 4776 NULL, (char *)pr_val, 4777 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 4778 4779 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4780 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4781 pr_name, result)); 4782 4783 return (err); 4784 } 4785 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 4786 if (pr_val == NULL) { 4787 err = EINVAL; 4788 return (err); 4789 } 4790 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4791 4792 err = nxge_param_set_ip_opt(nxgep, NULL, 4793 NULL, (char *)pr_val, 4794 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 4795 4796 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4797 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4798 pr_name, result)); 4799 4800 return (err); 4801 } 4802 4803 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 4804 if (pr_val == NULL) { 4805 err = EINVAL; 4806 return (err); 4807 } 4808 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4809 4810 err = nxge_param_set_ip_opt(nxgep, NULL, 4811 NULL, (char *)pr_val, 4812 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 4813 4814 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4815 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4816 pr_name, result)); 4817 4818 return (err); 4819 } 4820 4821 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 4822 if (pr_val == NULL) { 4823 err = EINVAL; 4824 return (err); 4825 } 4826 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4827 4828 err = nxge_param_set_ip_opt(nxgep, NULL, 4829 NULL, (char *)pr_val, 4830 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 4831 4832 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4833 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4834 pr_name, result)); 4835 4836 return (err); 4837 } 4838 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 4839 if (pr_val == NULL) { 4840 err = EINVAL; 4841 return (err); 4842 } 4843 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4844 4845 err = nxge_param_set_ip_opt(nxgep, NULL, 4846 NULL, (char *)pr_val, 4847 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 4848 4849 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4850 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4851 pr_name, result)); 4852 4853 return (err); 4854 } 4855 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 4856 if (pr_val == NULL) { 4857 err = EINVAL; 4858 return (err); 4859 } 4860 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4861 4862 err = nxge_param_set_ip_opt(nxgep, NULL, 4863 NULL, (char *)pr_val, 4864 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 4865 4866 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4867 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4868 pr_name, result)); 4869 4870 return (err); 4871 } 4872 4873 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 4874 if (pr_val == NULL) { 4875 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4876 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 4877 err = EINVAL; 4878 return (err); 4879 } 4880 4881 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4882 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4883 "<== nxge_set_priv_prop: name %s " 4884 "(lso %d pr_val %s value %d)", 4885 pr_name, nxgep->soft_lso_enable, pr_val, result)); 4886 4887 if (result > 1 || result < 0) { 4888 err = EINVAL; 4889 } else { 4890 if (nxgep->soft_lso_enable == (uint32_t)result) { 4891 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4892 "no change (%d %d)", 4893 nxgep->soft_lso_enable, result)); 4894 return (0); 4895 } 4896 } 4897 4898 nxgep->soft_lso_enable = (int)result; 4899 4900 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4901 "<== nxge_set_priv_prop: name %s (value %d)", 4902 pr_name, result)); 4903 4904 return (err); 4905 } 4906 /* 4907 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the 4908 * following code to be executed. 4909 */ 4910 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 4911 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 4912 (caddr_t)¶m_arr[param_anar_10gfdx]); 4913 return (err); 4914 } 4915 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 4916 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 4917 (caddr_t)¶m_arr[param_anar_pause]); 4918 return (err); 4919 } 4920 4921 return (EINVAL); 4922 } 4923 4924 static int 4925 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 4926 uint_t pr_valsize, void *pr_val, uint_t *perm) 4927 { 4928 p_nxge_param_t param_arr = nxgep->param_arr; 4929 char valstr[MAXNAMELEN]; 4930 int err = EINVAL; 4931 uint_t strsize; 4932 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4933 4934 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4935 "==> nxge_get_priv_prop: property %s", pr_name)); 4936 4937 /* function number */ 4938 if (strcmp(pr_name, "_function_number") == 0) { 4939 if (is_default) 4940 return (ENOTSUP); 4941 *perm = MAC_PROP_PERM_READ; 4942 (void) snprintf(valstr, sizeof (valstr), "%d", 4943 nxgep->function_num); 4944 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4945 "==> nxge_get_priv_prop: name %s " 4946 "(value %d valstr %s)", 4947 pr_name, nxgep->function_num, valstr)); 4948 4949 err = 0; 4950 goto done; 4951 } 4952 4953 /* Neptune firmware version */ 4954 if (strcmp(pr_name, "_fw_version") == 0) { 4955 if (is_default) 4956 return (ENOTSUP); 4957 *perm = MAC_PROP_PERM_READ; 4958 (void) snprintf(valstr, sizeof (valstr), "%s", 4959 nxgep->vpd_info.ver); 4960 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4961 "==> nxge_get_priv_prop: name %s " 4962 "(value %d valstr %s)", 4963 pr_name, nxgep->vpd_info.ver, valstr)); 4964 4965 err = 0; 4966 goto done; 4967 } 4968 4969 /* port PHY mode */ 4970 if (strcmp(pr_name, "_port_mode") == 0) { 4971 if (is_default) 4972 return (ENOTSUP); 4973 *perm = MAC_PROP_PERM_READ; 4974 switch (nxgep->mac.portmode) { 4975 case PORT_1G_COPPER: 4976 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 4977 nxgep->hot_swappable_phy ? 4978 "[Hot Swappable]" : ""); 4979 break; 4980 case PORT_1G_FIBER: 4981 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 4982 nxgep->hot_swappable_phy ? 4983 "[hot swappable]" : ""); 4984 break; 4985 case PORT_10G_COPPER: 4986 (void) snprintf(valstr, sizeof (valstr), 4987 "10G copper %s", 4988 nxgep->hot_swappable_phy ? 4989 "[hot swappable]" : ""); 4990 break; 4991 case PORT_10G_FIBER: 4992 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 4993 nxgep->hot_swappable_phy ? 4994 "[hot swappable]" : ""); 4995 break; 4996 case PORT_10G_SERDES: 4997 (void) snprintf(valstr, sizeof (valstr), 4998 "10G serdes %s", nxgep->hot_swappable_phy ? 4999 "[hot swappable]" : ""); 5000 break; 5001 case PORT_1G_SERDES: 5002 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5003 nxgep->hot_swappable_phy ? 5004 "[hot swappable]" : ""); 5005 break; 5006 case PORT_1G_TN1010: 5007 (void) snprintf(valstr, sizeof (valstr), 5008 "1G TN1010 copper %s", nxgep->hot_swappable_phy ? 5009 "[hot swappable]" : ""); 5010 break; 5011 case PORT_10G_TN1010: 5012 (void) snprintf(valstr, sizeof (valstr), 5013 "10G TN1010 copper %s", nxgep->hot_swappable_phy ? 5014 "[hot swappable]" : ""); 5015 break; 5016 case PORT_1G_RGMII_FIBER: 5017 (void) snprintf(valstr, sizeof (valstr), 5018 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5019 "[hot swappable]" : ""); 5020 break; 5021 case PORT_HSP_MODE: 5022 (void) snprintf(valstr, sizeof (valstr), 5023 "phy not present[hot swappable]"); 5024 break; 5025 default: 5026 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5027 nxgep->hot_swappable_phy ? 5028 "[hot swappable]" : ""); 5029 break; 5030 } 5031 5032 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5033 "==> nxge_get_priv_prop: name %s (value %s)", 5034 pr_name, valstr)); 5035 5036 err = 0; 5037 goto done; 5038 } 5039 5040 /* Hot swappable PHY */ 5041 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5042 if (is_default) 5043 return (ENOTSUP); 5044 *perm = MAC_PROP_PERM_READ; 5045 (void) snprintf(valstr, sizeof (valstr), "%s", 5046 nxgep->hot_swappable_phy ? 5047 "yes" : "no"); 5048 5049 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5050 "==> nxge_get_priv_prop: name %s " 5051 "(value %d valstr %s)", 5052 pr_name, nxgep->hot_swappable_phy, valstr)); 5053 5054 err = 0; 5055 goto done; 5056 } 5057 5058 5059 /* accept jumbo */ 5060 if (strcmp(pr_name, "_accept_jumbo") == 0) { 5061 if (is_default) 5062 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5063 else 5064 (void) snprintf(valstr, sizeof (valstr), 5065 "%d", nxgep->mac.is_jumbo); 5066 err = 0; 5067 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5068 "==> nxge_get_priv_prop: name %s (value %d (%d, %d))", 5069 pr_name, 5070 (uint32_t)param_arr[param_accept_jumbo].value, 5071 nxgep->mac.is_jumbo, 5072 nxge_jumbo_enable)); 5073 5074 goto done; 5075 } 5076 5077 /* Receive Interrupt Blanking Parameters */ 5078 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5079 err = 0; 5080 if (is_default) { 5081 (void) snprintf(valstr, sizeof (valstr), 5082 "%d", RXDMA_RCR_TO_DEFAULT); 5083 goto done; 5084 } 5085 5086 (void) snprintf(valstr, sizeof (valstr), "%d", 5087 nxgep->intr_timeout); 5088 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5089 "==> nxge_get_priv_prop: name %s (value %d)", 5090 pr_name, 5091 (uint32_t)nxgep->intr_timeout)); 5092 goto done; 5093 } 5094 5095 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5096 err = 0; 5097 if (is_default) { 5098 (void) snprintf(valstr, sizeof (valstr), 5099 "%d", RXDMA_RCR_PTHRES_DEFAULT); 5100 goto done; 5101 } 5102 (void) snprintf(valstr, sizeof (valstr), "%d", 5103 nxgep->intr_threshold); 5104 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5105 "==> nxge_get_priv_prop: name %s (value %d)", 5106 pr_name, (uint32_t)nxgep->intr_threshold)); 5107 5108 goto done; 5109 } 5110 5111 /* Classification and Load Distribution Configuration */ 5112 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5113 if (is_default) { 5114 (void) snprintf(valstr, sizeof (valstr), "%x", 5115 NXGE_CLASS_FLOW_GEN_SERVER); 5116 err = 0; 5117 goto done; 5118 } 5119 err = nxge_dld_get_ip_opt(nxgep, 5120 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5121 5122 (void) snprintf(valstr, sizeof (valstr), "%x", 5123 (int)param_arr[param_class_opt_ipv4_tcp].value); 5124 5125 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5126 "==> nxge_get_priv_prop: %s", valstr)); 5127 goto done; 5128 } 5129 5130 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5131 if (is_default) { 5132 (void) snprintf(valstr, sizeof (valstr), "%x", 5133 NXGE_CLASS_FLOW_GEN_SERVER); 5134 err = 0; 5135 goto done; 5136 } 5137 err = nxge_dld_get_ip_opt(nxgep, 5138 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5139 5140 (void) snprintf(valstr, sizeof (valstr), "%x", 5141 (int)param_arr[param_class_opt_ipv4_udp].value); 5142 5143 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5144 "==> nxge_get_priv_prop: %s", valstr)); 5145 goto done; 5146 } 5147 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5148 if (is_default) { 5149 (void) snprintf(valstr, sizeof (valstr), "%x", 5150 NXGE_CLASS_FLOW_GEN_SERVER); 5151 err = 0; 5152 goto done; 5153 } 5154 err = nxge_dld_get_ip_opt(nxgep, 5155 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5156 5157 (void) snprintf(valstr, sizeof (valstr), "%x", 5158 (int)param_arr[param_class_opt_ipv4_ah].value); 5159 5160 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5161 "==> nxge_get_priv_prop: %s", valstr)); 5162 goto done; 5163 } 5164 5165 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5166 if (is_default) { 5167 (void) snprintf(valstr, sizeof (valstr), "%x", 5168 NXGE_CLASS_FLOW_GEN_SERVER); 5169 err = 0; 5170 goto done; 5171 } 5172 err = nxge_dld_get_ip_opt(nxgep, 5173 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5174 5175 (void) snprintf(valstr, sizeof (valstr), "%x", 5176 (int)param_arr[param_class_opt_ipv4_sctp].value); 5177 5178 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5179 "==> nxge_get_priv_prop: %s", valstr)); 5180 goto done; 5181 } 5182 5183 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5184 if (is_default) { 5185 (void) snprintf(valstr, sizeof (valstr), "%x", 5186 NXGE_CLASS_FLOW_GEN_SERVER); 5187 err = 0; 5188 goto done; 5189 } 5190 err = nxge_dld_get_ip_opt(nxgep, 5191 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5192 5193 (void) snprintf(valstr, sizeof (valstr), "%x", 5194 (int)param_arr[param_class_opt_ipv6_tcp].value); 5195 5196 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5197 "==> nxge_get_priv_prop: %s", valstr)); 5198 goto done; 5199 } 5200 5201 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5202 if (is_default) { 5203 (void) snprintf(valstr, sizeof (valstr), "%x", 5204 NXGE_CLASS_FLOW_GEN_SERVER); 5205 err = 0; 5206 goto done; 5207 } 5208 err = nxge_dld_get_ip_opt(nxgep, 5209 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5210 5211 (void) snprintf(valstr, sizeof (valstr), "%x", 5212 (int)param_arr[param_class_opt_ipv6_udp].value); 5213 5214 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5215 "==> nxge_get_priv_prop: %s", valstr)); 5216 goto done; 5217 } 5218 5219 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5220 if (is_default) { 5221 (void) snprintf(valstr, sizeof (valstr), "%x", 5222 NXGE_CLASS_FLOW_GEN_SERVER); 5223 err = 0; 5224 goto done; 5225 } 5226 err = nxge_dld_get_ip_opt(nxgep, 5227 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5228 5229 (void) snprintf(valstr, sizeof (valstr), "%x", 5230 (int)param_arr[param_class_opt_ipv6_ah].value); 5231 5232 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5233 "==> nxge_get_priv_prop: %s", valstr)); 5234 goto done; 5235 } 5236 5237 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5238 if (is_default) { 5239 (void) snprintf(valstr, sizeof (valstr), "%x", 5240 NXGE_CLASS_FLOW_GEN_SERVER); 5241 err = 0; 5242 goto done; 5243 } 5244 err = nxge_dld_get_ip_opt(nxgep, 5245 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5246 5247 (void) snprintf(valstr, sizeof (valstr), "%x", 5248 (int)param_arr[param_class_opt_ipv6_sctp].value); 5249 5250 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5251 "==> nxge_get_priv_prop: %s", valstr)); 5252 goto done; 5253 } 5254 5255 /* Software LSO */ 5256 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5257 if (is_default) { 5258 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5259 err = 0; 5260 goto done; 5261 } 5262 (void) snprintf(valstr, sizeof (valstr), 5263 "%d", nxgep->soft_lso_enable); 5264 err = 0; 5265 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5266 "==> nxge_get_priv_prop: name %s (value %d)", 5267 pr_name, nxgep->soft_lso_enable)); 5268 5269 goto done; 5270 } 5271 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5272 err = 0; 5273 if (is_default || 5274 nxgep->param_arr[param_anar_10gfdx].value != 0) { 5275 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5276 goto done; 5277 } else { 5278 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5279 goto done; 5280 } 5281 } 5282 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5283 err = 0; 5284 if (is_default || 5285 nxgep->param_arr[param_anar_pause].value != 0) { 5286 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5287 goto done; 5288 } else { 5289 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5290 goto done; 5291 } 5292 } 5293 5294 done: 5295 if (err == 0) { 5296 strsize = (uint_t)strlen(valstr); 5297 if (pr_valsize < strsize) { 5298 err = ENOBUFS; 5299 } else { 5300 (void) strlcpy(pr_val, valstr, pr_valsize); 5301 } 5302 } 5303 5304 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5305 "<== nxge_get_priv_prop: return %d", err)); 5306 return (err); 5307 } 5308 5309 /* 5310 * Module loading and removing entry points. 5311 */ 5312 5313 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach, 5314 nodev, NULL, D_MP, NULL, nxge_quiesce); 5315 5316 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5317 5318 /* 5319 * Module linkage information for the kernel. 5320 */ 5321 static struct modldrv nxge_modldrv = { 5322 &mod_driverops, 5323 NXGE_DESC_VER, 5324 &nxge_dev_ops 5325 }; 5326 5327 static struct modlinkage modlinkage = { 5328 MODREV_1, (void *) &nxge_modldrv, NULL 5329 }; 5330 5331 int 5332 _init(void) 5333 { 5334 int status; 5335 5336 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5337 mac_init_ops(&nxge_dev_ops, "nxge"); 5338 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5339 if (status != 0) { 5340 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5341 "failed to init device soft state")); 5342 goto _init_exit; 5343 } 5344 status = mod_install(&modlinkage); 5345 if (status != 0) { 5346 ddi_soft_state_fini(&nxge_list); 5347 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5348 goto _init_exit; 5349 } 5350 5351 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5352 5353 _init_exit: 5354 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 5355 5356 return (status); 5357 } 5358 5359 int 5360 _fini(void) 5361 { 5362 int status; 5363 5364 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5365 5366 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5367 5368 if (nxge_mblks_pending) 5369 return (EBUSY); 5370 5371 status = mod_remove(&modlinkage); 5372 if (status != DDI_SUCCESS) { 5373 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5374 "Module removal failed 0x%08x", 5375 status)); 5376 goto _fini_exit; 5377 } 5378 5379 mac_fini_ops(&nxge_dev_ops); 5380 5381 ddi_soft_state_fini(&nxge_list); 5382 5383 MUTEX_DESTROY(&nxge_common_lock); 5384 _fini_exit: 5385 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 5386 5387 return (status); 5388 } 5389 5390 int 5391 _info(struct modinfo *modinfop) 5392 { 5393 int status; 5394 5395 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5396 status = mod_info(&modlinkage, modinfop); 5397 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5398 5399 return (status); 5400 } 5401 5402 /*ARGSUSED*/ 5403 static int 5404 nxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5405 { 5406 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5407 p_nxge_t nxgep = rhp->nxgep; 5408 uint32_t channel; 5409 p_tx_ring_t ring; 5410 5411 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5412 ring = nxgep->tx_rings->rings[channel]; 5413 5414 MUTEX_ENTER(&ring->lock); 5415 ring->tx_ring_handle = rhp->ring_handle; 5416 MUTEX_EXIT(&ring->lock); 5417 5418 return (0); 5419 } 5420 5421 static void 5422 nxge_tx_ring_stop(mac_ring_driver_t rdriver) 5423 { 5424 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5425 p_nxge_t nxgep = rhp->nxgep; 5426 uint32_t channel; 5427 p_tx_ring_t ring; 5428 5429 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5430 ring = nxgep->tx_rings->rings[channel]; 5431 5432 MUTEX_ENTER(&ring->lock); 5433 ring->tx_ring_handle = (mac_ring_handle_t)NULL; 5434 MUTEX_EXIT(&ring->lock); 5435 } 5436 5437 static int 5438 nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5439 { 5440 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5441 p_nxge_t nxgep = rhp->nxgep; 5442 uint32_t channel; 5443 p_rx_rcr_ring_t ring; 5444 int i; 5445 5446 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5447 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5448 5449 MUTEX_ENTER(&ring->lock); 5450 5451 if (nxgep->rx_channel_started[channel] == B_TRUE) { 5452 MUTEX_EXIT(&ring->lock); 5453 return (0); 5454 } 5455 5456 /* set rcr_ring */ 5457 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) { 5458 if ((nxgep->ldgvp->ldvp[i].is_rxdma == 1) && 5459 (nxgep->ldgvp->ldvp[i].channel == channel)) { 5460 ring->ldvp = &nxgep->ldgvp->ldvp[i]; 5461 ring->ldgp = nxgep->ldgvp->ldvp[i].ldgp; 5462 } 5463 } 5464 5465 nxgep->rx_channel_started[channel] = B_TRUE; 5466 ring->rcr_mac_handle = rhp->ring_handle; 5467 ring->rcr_gen_num = mr_gen_num; 5468 MUTEX_EXIT(&ring->lock); 5469 5470 return (0); 5471 } 5472 5473 static void 5474 nxge_rx_ring_stop(mac_ring_driver_t rdriver) 5475 { 5476 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5477 p_nxge_t nxgep = rhp->nxgep; 5478 uint32_t channel; 5479 p_rx_rcr_ring_t ring; 5480 5481 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5482 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5483 5484 MUTEX_ENTER(&ring->lock); 5485 nxgep->rx_channel_started[channel] = B_FALSE; 5486 ring->rcr_mac_handle = NULL; 5487 MUTEX_EXIT(&ring->lock); 5488 } 5489 5490 /* 5491 * Callback funtion for MAC layer to register all rings. 5492 */ 5493 static void 5494 nxge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 5495 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 5496 { 5497 p_nxge_t nxgep = (p_nxge_t)arg; 5498 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 5499 5500 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5501 "==> nxge_fill_ring 0x%x index %d", rtype, index)); 5502 5503 switch (rtype) { 5504 case MAC_RING_TYPE_TX: { 5505 p_nxge_ring_handle_t rhandlep; 5506 5507 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5508 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d", 5509 rtype, index, p_cfgp->tdc.count)); 5510 5511 ASSERT((index >= 0) && (index < p_cfgp->tdc.count)); 5512 rhandlep = &nxgep->tx_ring_handles[index]; 5513 rhandlep->nxgep = nxgep; 5514 rhandlep->index = index; 5515 rhandlep->ring_handle = rh; 5516 5517 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5518 infop->mri_start = nxge_tx_ring_start; 5519 infop->mri_stop = nxge_tx_ring_stop; 5520 infop->mri_tx = nxge_tx_ring_send; 5521 5522 break; 5523 } 5524 case MAC_RING_TYPE_RX: { 5525 p_nxge_ring_handle_t rhandlep; 5526 int nxge_rindex; 5527 mac_intr_t nxge_mac_intr; 5528 5529 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5530 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d", 5531 rtype, index, p_cfgp->max_rdcs)); 5532 5533 /* 5534 * 'index' is the ring index within the group. 5535 * Find the ring index in the nxge instance. 5536 */ 5537 nxge_rindex = nxge_get_rxring_index(nxgep, rg_index, index); 5538 5539 ASSERT((nxge_rindex >= 0) && (nxge_rindex < p_cfgp->max_rdcs)); 5540 rhandlep = &nxgep->rx_ring_handles[nxge_rindex]; 5541 rhandlep->nxgep = nxgep; 5542 rhandlep->index = nxge_rindex; 5543 rhandlep->ring_handle = rh; 5544 5545 /* 5546 * Entrypoint to enable interrupt (disable poll) and 5547 * disable interrupt (enable poll). 5548 */ 5549 nxge_mac_intr.mi_handle = (mac_intr_handle_t)rhandlep; 5550 nxge_mac_intr.mi_enable = (mac_intr_enable_t)nxge_disable_poll; 5551 nxge_mac_intr.mi_disable = (mac_intr_disable_t)nxge_enable_poll; 5552 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5553 infop->mri_start = nxge_rx_ring_start; 5554 infop->mri_stop = nxge_rx_ring_stop; 5555 infop->mri_intr = nxge_mac_intr; /* ??? */ 5556 infop->mri_poll = nxge_rx_poll; 5557 5558 break; 5559 } 5560 default: 5561 break; 5562 } 5563 5564 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_fill_ring 0x%x", 5565 rtype)); 5566 } 5567 5568 static void 5569 nxge_group_add_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5570 mac_ring_type_t type) 5571 { 5572 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5573 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5574 nxge_t *nxge; 5575 nxge_grp_t *grp; 5576 nxge_rdc_grp_t *rdc_grp; 5577 uint16_t channel; /* device-wise ring id */ 5578 int dev_gindex; 5579 int rv; 5580 5581 nxge = rgroup->nxgep; 5582 5583 switch (type) { 5584 case MAC_RING_TYPE_TX: 5585 /* 5586 * nxge_grp_dc_add takes a channel number which is a 5587 * "devise" ring ID. 5588 */ 5589 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5590 5591 /* 5592 * Remove the ring from the default group 5593 */ 5594 if (rgroup->gindex != 0) { 5595 (void) nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5596 } 5597 5598 /* 5599 * nxge->tx_set.group[] is an array of groups indexed by 5600 * a "port" group ID. 5601 */ 5602 grp = nxge->tx_set.group[rgroup->gindex]; 5603 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5604 if (rv != 0) { 5605 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5606 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5607 } 5608 break; 5609 5610 case MAC_RING_TYPE_RX: 5611 /* 5612 * nxge->rx_set.group[] is an array of groups indexed by 5613 * a "port" group ID. 5614 */ 5615 grp = nxge->rx_set.group[rgroup->gindex]; 5616 5617 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5618 rgroup->gindex; 5619 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5620 5621 /* 5622 * nxge_grp_dc_add takes a channel number which is a 5623 * "devise" ring ID. 5624 */ 5625 channel = nxge->pt_config.hw_config.start_rdc + rhandle->index; 5626 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_RX, channel); 5627 if (rv != 0) { 5628 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5629 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5630 } 5631 5632 rdc_grp->map |= (1 << channel); 5633 rdc_grp->max_rdcs++; 5634 5635 (void) nxge_init_fzc_rdc_tbl(nxge, rgroup->rdctbl); 5636 break; 5637 } 5638 } 5639 5640 static void 5641 nxge_group_rem_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5642 mac_ring_type_t type) 5643 { 5644 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5645 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5646 nxge_t *nxge; 5647 uint16_t channel; /* device-wise ring id */ 5648 nxge_rdc_grp_t *rdc_grp; 5649 int dev_gindex; 5650 5651 nxge = rgroup->nxgep; 5652 5653 switch (type) { 5654 case MAC_RING_TYPE_TX: 5655 dev_gindex = nxge->pt_config.hw_config.def_mac_txdma_grpid + 5656 rgroup->gindex; 5657 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5658 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5659 5660 /* 5661 * Add the ring back to the default group 5662 */ 5663 if (rgroup->gindex != 0) { 5664 nxge_grp_t *grp; 5665 grp = nxge->tx_set.group[0]; 5666 (void) nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5667 } 5668 break; 5669 5670 case MAC_RING_TYPE_RX: 5671 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5672 rgroup->gindex; 5673 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5674 channel = rdc_grp->start_rdc + rhandle->index; 5675 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 5676 5677 rdc_grp->map &= ~(1 << channel); 5678 rdc_grp->max_rdcs--; 5679 5680 (void) nxge_init_fzc_rdc_tbl(nxge, rgroup->rdctbl); 5681 break; 5682 } 5683 } 5684 5685 5686 /*ARGSUSED*/ 5687 static nxge_status_t 5688 nxge_add_intrs(p_nxge_t nxgep) 5689 { 5690 5691 int intr_types; 5692 int type = 0; 5693 int ddi_status = DDI_SUCCESS; 5694 nxge_status_t status = NXGE_OK; 5695 5696 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5697 5698 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5699 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5700 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5701 nxgep->nxge_intr_type.intr_added = 0; 5702 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5703 nxgep->nxge_intr_type.intr_type = 0; 5704 5705 if (nxgep->niu_type == N2_NIU) { 5706 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5707 } else if (nxge_msi_enable) { 5708 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5709 } 5710 5711 /* Get the supported interrupt types */ 5712 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5713 != DDI_SUCCESS) { 5714 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5715 "ddi_intr_get_supported_types failed: status 0x%08x", 5716 ddi_status)); 5717 return (NXGE_ERROR | NXGE_DDI_FAILED); 5718 } 5719 nxgep->nxge_intr_type.intr_types = intr_types; 5720 5721 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5722 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5723 5724 /* 5725 * Solaris MSIX is not supported yet. use MSI for now. 5726 * nxge_msi_enable (1): 5727 * 1 - MSI 2 - MSI-X others - FIXED 5728 */ 5729 switch (nxge_msi_enable) { 5730 default: 5731 type = DDI_INTR_TYPE_FIXED; 5732 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5733 "use fixed (intx emulation) type %08x", 5734 type)); 5735 break; 5736 5737 case 2: 5738 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5739 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5740 if (intr_types & DDI_INTR_TYPE_MSIX) { 5741 type = DDI_INTR_TYPE_MSIX; 5742 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5743 "ddi_intr_get_supported_types: MSIX 0x%08x", 5744 type)); 5745 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5746 type = DDI_INTR_TYPE_MSI; 5747 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5748 "ddi_intr_get_supported_types: MSI 0x%08x", 5749 type)); 5750 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5751 type = DDI_INTR_TYPE_FIXED; 5752 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5753 "ddi_intr_get_supported_types: MSXED0x%08x", 5754 type)); 5755 } 5756 break; 5757 5758 case 1: 5759 if (intr_types & DDI_INTR_TYPE_MSI) { 5760 type = DDI_INTR_TYPE_MSI; 5761 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5762 "ddi_intr_get_supported_types: MSI 0x%08x", 5763 type)); 5764 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5765 type = DDI_INTR_TYPE_MSIX; 5766 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5767 "ddi_intr_get_supported_types: MSIX 0x%08x", 5768 type)); 5769 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5770 type = DDI_INTR_TYPE_FIXED; 5771 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5772 "ddi_intr_get_supported_types: MSXED0x%08x", 5773 type)); 5774 } 5775 } 5776 5777 nxgep->nxge_intr_type.intr_type = type; 5778 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5779 type == DDI_INTR_TYPE_FIXED) && 5780 nxgep->nxge_intr_type.niu_msi_enable) { 5781 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5782 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5783 " nxge_add_intrs: " 5784 " nxge_add_intrs_adv failed: status 0x%08x", 5785 status)); 5786 return (status); 5787 } else { 5788 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5789 "interrupts registered : type %d", type)); 5790 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5791 5792 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5793 "\nAdded advanced nxge add_intr_adv " 5794 "intr type 0x%x\n", type)); 5795 5796 return (status); 5797 } 5798 } 5799 5800 if (!nxgep->nxge_intr_type.intr_registered) { 5801 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5802 "failed to register interrupts")); 5803 return (NXGE_ERROR | NXGE_DDI_FAILED); 5804 } 5805 5806 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5807 return (status); 5808 } 5809 5810 static nxge_status_t 5811 nxge_add_intrs_adv(p_nxge_t nxgep) 5812 { 5813 int intr_type; 5814 p_nxge_intr_t intrp; 5815 5816 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5817 5818 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5819 intr_type = intrp->intr_type; 5820 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5821 intr_type)); 5822 5823 switch (intr_type) { 5824 case DDI_INTR_TYPE_MSI: /* 0x2 */ 5825 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 5826 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 5827 5828 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 5829 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 5830 5831 default: 5832 return (NXGE_ERROR); 5833 } 5834 } 5835 5836 5837 /*ARGSUSED*/ 5838 static nxge_status_t 5839 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 5840 { 5841 dev_info_t *dip = nxgep->dip; 5842 p_nxge_ldg_t ldgp; 5843 p_nxge_intr_t intrp; 5844 uint_t *inthandler; 5845 void *arg1, *arg2; 5846 int behavior; 5847 int nintrs, navail, nrequest; 5848 int nactual, nrequired; 5849 int inum = 0; 5850 int x, y; 5851 int ddi_status = DDI_SUCCESS; 5852 nxge_status_t status = NXGE_OK; 5853 5854 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 5855 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5856 intrp->start_inum = 0; 5857 5858 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5859 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5860 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5861 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5862 "nintrs: %d", ddi_status, nintrs)); 5863 return (NXGE_ERROR | NXGE_DDI_FAILED); 5864 } 5865 5866 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5867 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5868 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5869 "ddi_intr_get_navail() failed, status: 0x%x%, " 5870 "nintrs: %d", ddi_status, navail)); 5871 return (NXGE_ERROR | NXGE_DDI_FAILED); 5872 } 5873 5874 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5875 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 5876 nintrs, navail)); 5877 5878 /* PSARC/2007/453 MSI-X interrupt limit override */ 5879 if (int_type == DDI_INTR_TYPE_MSIX) { 5880 nrequest = nxge_create_msi_property(nxgep); 5881 if (nrequest < navail) { 5882 navail = nrequest; 5883 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5884 "nxge_add_intrs_adv_type: nintrs %d " 5885 "navail %d (nrequest %d)", 5886 nintrs, navail, nrequest)); 5887 } 5888 } 5889 5890 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 5891 /* MSI must be power of 2 */ 5892 if ((navail & 16) == 16) { 5893 navail = 16; 5894 } else if ((navail & 8) == 8) { 5895 navail = 8; 5896 } else if ((navail & 4) == 4) { 5897 navail = 4; 5898 } else if ((navail & 2) == 2) { 5899 navail = 2; 5900 } else { 5901 navail = 1; 5902 } 5903 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5904 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 5905 "navail %d", nintrs, navail)); 5906 } 5907 5908 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5909 DDI_INTR_ALLOC_NORMAL); 5910 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 5911 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 5912 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5913 navail, &nactual, behavior); 5914 if (ddi_status != DDI_SUCCESS || nactual == 0) { 5915 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5916 " ddi_intr_alloc() failed: %d", 5917 ddi_status)); 5918 kmem_free(intrp->htable, intrp->intr_size); 5919 return (NXGE_ERROR | NXGE_DDI_FAILED); 5920 } 5921 5922 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 5923 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 5924 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5925 " ddi_intr_get_pri() failed: %d", 5926 ddi_status)); 5927 /* Free already allocated interrupts */ 5928 for (y = 0; y < nactual; y++) { 5929 (void) ddi_intr_free(intrp->htable[y]); 5930 } 5931 5932 kmem_free(intrp->htable, intrp->intr_size); 5933 return (NXGE_ERROR | NXGE_DDI_FAILED); 5934 } 5935 5936 nrequired = 0; 5937 switch (nxgep->niu_type) { 5938 default: 5939 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 5940 break; 5941 5942 case N2_NIU: 5943 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 5944 break; 5945 } 5946 5947 if (status != NXGE_OK) { 5948 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5949 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 5950 "failed: 0x%x", status)); 5951 /* Free already allocated interrupts */ 5952 for (y = 0; y < nactual; y++) { 5953 (void) ddi_intr_free(intrp->htable[y]); 5954 } 5955 5956 kmem_free(intrp->htable, intrp->intr_size); 5957 return (status); 5958 } 5959 5960 ldgp = nxgep->ldgvp->ldgp; 5961 for (x = 0; x < nrequired; x++, ldgp++) { 5962 ldgp->vector = (uint8_t)x; 5963 ldgp->intdata = SID_DATA(ldgp->func, x); 5964 arg1 = ldgp->ldvp; 5965 arg2 = nxgep; 5966 if (ldgp->nldvs == 1) { 5967 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 5968 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5969 "nxge_add_intrs_adv_type: " 5970 "arg1 0x%x arg2 0x%x: " 5971 "1-1 int handler (entry %d intdata 0x%x)\n", 5972 arg1, arg2, 5973 x, ldgp->intdata)); 5974 } else if (ldgp->nldvs > 1) { 5975 inthandler = (uint_t *)ldgp->sys_intr_handler; 5976 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5977 "nxge_add_intrs_adv_type: " 5978 "arg1 0x%x arg2 0x%x: " 5979 "nldevs %d int handler " 5980 "(entry %d intdata 0x%x)\n", 5981 arg1, arg2, 5982 ldgp->nldvs, x, ldgp->intdata)); 5983 } 5984 5985 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5986 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 5987 "htable 0x%llx", x, intrp->htable[x])); 5988 5989 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 5990 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 5991 != DDI_SUCCESS) { 5992 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5993 "==> nxge_add_intrs_adv_type: failed #%d " 5994 "status 0x%x", x, ddi_status)); 5995 for (y = 0; y < intrp->intr_added; y++) { 5996 (void) ddi_intr_remove_handler( 5997 intrp->htable[y]); 5998 } 5999 /* Free already allocated intr */ 6000 for (y = 0; y < nactual; y++) { 6001 (void) ddi_intr_free(intrp->htable[y]); 6002 } 6003 kmem_free(intrp->htable, intrp->intr_size); 6004 6005 (void) nxge_ldgv_uninit(nxgep); 6006 6007 return (NXGE_ERROR | NXGE_DDI_FAILED); 6008 } 6009 intrp->intr_added++; 6010 } 6011 6012 intrp->msi_intx_cnt = nactual; 6013 6014 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6015 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 6016 navail, nactual, 6017 intrp->msi_intx_cnt, 6018 intrp->intr_added)); 6019 6020 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6021 6022 (void) nxge_intr_ldgv_init(nxgep); 6023 6024 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 6025 6026 return (status); 6027 } 6028 6029 /*ARGSUSED*/ 6030 static nxge_status_t 6031 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 6032 { 6033 dev_info_t *dip = nxgep->dip; 6034 p_nxge_ldg_t ldgp; 6035 p_nxge_intr_t intrp; 6036 uint_t *inthandler; 6037 void *arg1, *arg2; 6038 int behavior; 6039 int nintrs, navail; 6040 int nactual, nrequired; 6041 int inum = 0; 6042 int x, y; 6043 int ddi_status = DDI_SUCCESS; 6044 nxge_status_t status = NXGE_OK; 6045 6046 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 6047 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6048 intrp->start_inum = 0; 6049 6050 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6051 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6052 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6053 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6054 "nintrs: %d", status, nintrs)); 6055 return (NXGE_ERROR | NXGE_DDI_FAILED); 6056 } 6057 6058 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6059 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6060 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6061 "ddi_intr_get_navail() failed, status: 0x%x%, " 6062 "nintrs: %d", ddi_status, navail)); 6063 return (NXGE_ERROR | NXGE_DDI_FAILED); 6064 } 6065 6066 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6067 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6068 nintrs, navail)); 6069 6070 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6071 DDI_INTR_ALLOC_NORMAL); 6072 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6073 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6074 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6075 navail, &nactual, behavior); 6076 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6077 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6078 " ddi_intr_alloc() failed: %d", 6079 ddi_status)); 6080 kmem_free(intrp->htable, intrp->intr_size); 6081 return (NXGE_ERROR | NXGE_DDI_FAILED); 6082 } 6083 6084 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6085 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6086 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6087 " ddi_intr_get_pri() failed: %d", 6088 ddi_status)); 6089 /* Free already allocated interrupts */ 6090 for (y = 0; y < nactual; y++) { 6091 (void) ddi_intr_free(intrp->htable[y]); 6092 } 6093 6094 kmem_free(intrp->htable, intrp->intr_size); 6095 return (NXGE_ERROR | NXGE_DDI_FAILED); 6096 } 6097 6098 nrequired = 0; 6099 switch (nxgep->niu_type) { 6100 default: 6101 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6102 break; 6103 6104 case N2_NIU: 6105 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6106 break; 6107 } 6108 6109 if (status != NXGE_OK) { 6110 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6111 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6112 "failed: 0x%x", status)); 6113 /* Free already allocated interrupts */ 6114 for (y = 0; y < nactual; y++) { 6115 (void) ddi_intr_free(intrp->htable[y]); 6116 } 6117 6118 kmem_free(intrp->htable, intrp->intr_size); 6119 return (status); 6120 } 6121 6122 ldgp = nxgep->ldgvp->ldgp; 6123 for (x = 0; x < nrequired; x++, ldgp++) { 6124 ldgp->vector = (uint8_t)x; 6125 if (nxgep->niu_type != N2_NIU) { 6126 ldgp->intdata = SID_DATA(ldgp->func, x); 6127 } 6128 6129 arg1 = ldgp->ldvp; 6130 arg2 = nxgep; 6131 if (ldgp->nldvs == 1) { 6132 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6133 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6134 "nxge_add_intrs_adv_type_fix: " 6135 "1-1 int handler(%d) ldg %d ldv %d " 6136 "arg1 $%p arg2 $%p\n", 6137 x, ldgp->ldg, ldgp->ldvp->ldv, 6138 arg1, arg2)); 6139 } else if (ldgp->nldvs > 1) { 6140 inthandler = (uint_t *)ldgp->sys_intr_handler; 6141 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6142 "nxge_add_intrs_adv_type_fix: " 6143 "shared ldv %d int handler(%d) ldv %d ldg %d" 6144 "arg1 0x%016llx arg2 0x%016llx\n", 6145 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6146 arg1, arg2)); 6147 } 6148 6149 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6150 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6151 != DDI_SUCCESS) { 6152 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6153 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6154 "status 0x%x", x, ddi_status)); 6155 for (y = 0; y < intrp->intr_added; y++) { 6156 (void) ddi_intr_remove_handler( 6157 intrp->htable[y]); 6158 } 6159 for (y = 0; y < nactual; y++) { 6160 (void) ddi_intr_free(intrp->htable[y]); 6161 } 6162 /* Free already allocated intr */ 6163 kmem_free(intrp->htable, intrp->intr_size); 6164 6165 (void) nxge_ldgv_uninit(nxgep); 6166 6167 return (NXGE_ERROR | NXGE_DDI_FAILED); 6168 } 6169 intrp->intr_added++; 6170 } 6171 6172 intrp->msi_intx_cnt = nactual; 6173 6174 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6175 6176 status = nxge_intr_ldgv_init(nxgep); 6177 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6178 6179 return (status); 6180 } 6181 6182 static void 6183 nxge_remove_intrs(p_nxge_t nxgep) 6184 { 6185 int i, inum; 6186 p_nxge_intr_t intrp; 6187 6188 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6189 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6190 if (!intrp->intr_registered) { 6191 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6192 "<== nxge_remove_intrs: interrupts not registered")); 6193 return; 6194 } 6195 6196 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6197 6198 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6199 (void) ddi_intr_block_disable(intrp->htable, 6200 intrp->intr_added); 6201 } else { 6202 for (i = 0; i < intrp->intr_added; i++) { 6203 (void) ddi_intr_disable(intrp->htable[i]); 6204 } 6205 } 6206 6207 for (inum = 0; inum < intrp->intr_added; inum++) { 6208 if (intrp->htable[inum]) { 6209 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6210 } 6211 } 6212 6213 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6214 if (intrp->htable[inum]) { 6215 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6216 "nxge_remove_intrs: ddi_intr_free inum %d " 6217 "msi_intx_cnt %d intr_added %d", 6218 inum, 6219 intrp->msi_intx_cnt, 6220 intrp->intr_added)); 6221 6222 (void) ddi_intr_free(intrp->htable[inum]); 6223 } 6224 } 6225 6226 kmem_free(intrp->htable, intrp->intr_size); 6227 intrp->intr_registered = B_FALSE; 6228 intrp->intr_enabled = B_FALSE; 6229 intrp->msi_intx_cnt = 0; 6230 intrp->intr_added = 0; 6231 6232 (void) nxge_ldgv_uninit(nxgep); 6233 6234 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6235 "#msix-request"); 6236 6237 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6238 } 6239 6240 /*ARGSUSED*/ 6241 static void 6242 nxge_intrs_enable(p_nxge_t nxgep) 6243 { 6244 p_nxge_intr_t intrp; 6245 int i; 6246 int status; 6247 6248 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6249 6250 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6251 6252 if (!intrp->intr_registered) { 6253 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6254 "interrupts are not registered")); 6255 return; 6256 } 6257 6258 if (intrp->intr_enabled) { 6259 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6260 "<== nxge_intrs_enable: already enabled")); 6261 return; 6262 } 6263 6264 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6265 status = ddi_intr_block_enable(intrp->htable, 6266 intrp->intr_added); 6267 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6268 "block enable - status 0x%x total inums #%d\n", 6269 status, intrp->intr_added)); 6270 } else { 6271 for (i = 0; i < intrp->intr_added; i++) { 6272 status = ddi_intr_enable(intrp->htable[i]); 6273 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6274 "ddi_intr_enable:enable - status 0x%x " 6275 "total inums %d enable inum #%d\n", 6276 status, intrp->intr_added, i)); 6277 if (status == DDI_SUCCESS) { 6278 intrp->intr_enabled = B_TRUE; 6279 } 6280 } 6281 } 6282 6283 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6284 } 6285 6286 /*ARGSUSED*/ 6287 static void 6288 nxge_intrs_disable(p_nxge_t nxgep) 6289 { 6290 p_nxge_intr_t intrp; 6291 int i; 6292 6293 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6294 6295 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6296 6297 if (!intrp->intr_registered) { 6298 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6299 "interrupts are not registered")); 6300 return; 6301 } 6302 6303 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6304 (void) ddi_intr_block_disable(intrp->htable, 6305 intrp->intr_added); 6306 } else { 6307 for (i = 0; i < intrp->intr_added; i++) { 6308 (void) ddi_intr_disable(intrp->htable[i]); 6309 } 6310 } 6311 6312 intrp->intr_enabled = B_FALSE; 6313 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6314 } 6315 6316 static nxge_status_t 6317 nxge_mac_register(p_nxge_t nxgep) 6318 { 6319 mac_register_t *macp; 6320 int status; 6321 6322 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6323 6324 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6325 return (NXGE_ERROR); 6326 6327 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6328 macp->m_driver = nxgep; 6329 macp->m_dip = nxgep->dip; 6330 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6331 macp->m_callbacks = &nxge_m_callbacks; 6332 macp->m_min_sdu = 0; 6333 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6334 NXGE_EHEADER_VLAN_CRC; 6335 macp->m_max_sdu = nxgep->mac.default_mtu; 6336 macp->m_margin = VLAN_TAGSZ; 6337 macp->m_priv_props = nxge_priv_props; 6338 macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 6339 macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1 | MAC_VIRT_SERIALIZE; 6340 6341 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6342 "==> nxge_mac_register: instance %d " 6343 "max_sdu %d margin %d maxframe %d (header %d)", 6344 nxgep->instance, 6345 macp->m_max_sdu, macp->m_margin, 6346 nxgep->mac.maxframesize, 6347 NXGE_EHEADER_VLAN_CRC)); 6348 6349 status = mac_register(macp, &nxgep->mach); 6350 mac_free(macp); 6351 6352 if (status != 0) { 6353 cmn_err(CE_WARN, 6354 "!nxge_mac_register failed (status %d instance %d)", 6355 status, nxgep->instance); 6356 return (NXGE_ERROR); 6357 } 6358 6359 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6360 "(instance %d)", nxgep->instance)); 6361 6362 return (NXGE_OK); 6363 } 6364 6365 void 6366 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6367 { 6368 ssize_t size; 6369 mblk_t *nmp; 6370 uint8_t blk_id; 6371 uint8_t chan; 6372 uint32_t err_id; 6373 err_inject_t *eip; 6374 6375 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6376 6377 size = 1024; 6378 nmp = mp->b_cont; 6379 eip = (err_inject_t *)nmp->b_rptr; 6380 blk_id = eip->blk_id; 6381 err_id = eip->err_id; 6382 chan = eip->chan; 6383 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6384 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6385 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6386 switch (blk_id) { 6387 case MAC_BLK_ID: 6388 break; 6389 case TXMAC_BLK_ID: 6390 break; 6391 case RXMAC_BLK_ID: 6392 break; 6393 case MIF_BLK_ID: 6394 break; 6395 case IPP_BLK_ID: 6396 nxge_ipp_inject_err(nxgep, err_id); 6397 break; 6398 case TXC_BLK_ID: 6399 nxge_txc_inject_err(nxgep, err_id); 6400 break; 6401 case TXDMA_BLK_ID: 6402 nxge_txdma_inject_err(nxgep, err_id, chan); 6403 break; 6404 case RXDMA_BLK_ID: 6405 nxge_rxdma_inject_err(nxgep, err_id, chan); 6406 break; 6407 case ZCP_BLK_ID: 6408 nxge_zcp_inject_err(nxgep, err_id); 6409 break; 6410 case ESPC_BLK_ID: 6411 break; 6412 case FFLP_BLK_ID: 6413 break; 6414 case PHY_BLK_ID: 6415 break; 6416 case ETHER_SERDES_BLK_ID: 6417 break; 6418 case PCIE_SERDES_BLK_ID: 6419 break; 6420 case VIR_BLK_ID: 6421 break; 6422 } 6423 6424 nmp->b_wptr = nmp->b_rptr + size; 6425 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6426 6427 miocack(wq, mp, (int)size, 0); 6428 } 6429 6430 static int 6431 nxge_init_common_dev(p_nxge_t nxgep) 6432 { 6433 p_nxge_hw_list_t hw_p; 6434 dev_info_t *p_dip; 6435 6436 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6437 6438 p_dip = nxgep->p_dip; 6439 MUTEX_ENTER(&nxge_common_lock); 6440 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6441 "==> nxge_init_common_dev:func # %d", 6442 nxgep->function_num)); 6443 /* 6444 * Loop through existing per neptune hardware list. 6445 */ 6446 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6447 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6448 "==> nxge_init_common_device:func # %d " 6449 "hw_p $%p parent dip $%p", 6450 nxgep->function_num, 6451 hw_p, 6452 p_dip)); 6453 if (hw_p->parent_devp == p_dip) { 6454 nxgep->nxge_hw_p = hw_p; 6455 hw_p->ndevs++; 6456 hw_p->nxge_p[nxgep->function_num] = nxgep; 6457 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6458 "==> nxge_init_common_device:func # %d " 6459 "hw_p $%p parent dip $%p " 6460 "ndevs %d (found)", 6461 nxgep->function_num, 6462 hw_p, 6463 p_dip, 6464 hw_p->ndevs)); 6465 break; 6466 } 6467 } 6468 6469 if (hw_p == NULL) { 6470 6471 char **prop_val; 6472 uint_t prop_len; 6473 int i; 6474 6475 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6476 "==> nxge_init_common_device:func # %d " 6477 "parent dip $%p (new)", 6478 nxgep->function_num, 6479 p_dip)); 6480 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6481 hw_p->parent_devp = p_dip; 6482 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6483 nxgep->nxge_hw_p = hw_p; 6484 hw_p->ndevs++; 6485 hw_p->nxge_p[nxgep->function_num] = nxgep; 6486 hw_p->next = nxge_hw_list; 6487 if (nxgep->niu_type == N2_NIU) { 6488 hw_p->niu_type = N2_NIU; 6489 hw_p->platform_type = P_NEPTUNE_NIU; 6490 } else { 6491 hw_p->niu_type = NIU_TYPE_NONE; 6492 hw_p->platform_type = P_NEPTUNE_NONE; 6493 } 6494 6495 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6496 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6497 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6498 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6499 6500 nxge_hw_list = hw_p; 6501 6502 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0, 6503 "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) { 6504 for (i = 0; i < prop_len; i++) { 6505 if ((strcmp((caddr_t)prop_val[i], 6506 NXGE_ROCK_COMPATIBLE) == 0)) { 6507 hw_p->platform_type = P_NEPTUNE_ROCK; 6508 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6509 "ROCK hw_p->platform_type %d", 6510 hw_p->platform_type)); 6511 break; 6512 } 6513 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6514 "nxge_init_common_dev: read compatible" 6515 " property[%d] val[%s]", 6516 i, (caddr_t)prop_val[i])); 6517 } 6518 } 6519 6520 ddi_prop_free(prop_val); 6521 6522 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6523 } 6524 6525 MUTEX_EXIT(&nxge_common_lock); 6526 6527 nxgep->platform_type = hw_p->platform_type; 6528 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxgep->platform_type %d", 6529 nxgep->platform_type)); 6530 if (nxgep->niu_type != N2_NIU) { 6531 nxgep->niu_type = hw_p->niu_type; 6532 } 6533 6534 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6535 "==> nxge_init_common_device (nxge_hw_list) $%p", 6536 nxge_hw_list)); 6537 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6538 6539 return (NXGE_OK); 6540 } 6541 6542 static void 6543 nxge_uninit_common_dev(p_nxge_t nxgep) 6544 { 6545 p_nxge_hw_list_t hw_p, h_hw_p; 6546 p_nxge_dma_pt_cfg_t p_dma_cfgp; 6547 p_nxge_hw_pt_cfg_t p_cfgp; 6548 dev_info_t *p_dip; 6549 6550 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6551 if (nxgep->nxge_hw_p == NULL) { 6552 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6553 "<== nxge_uninit_common_device (no common)")); 6554 return; 6555 } 6556 6557 MUTEX_ENTER(&nxge_common_lock); 6558 h_hw_p = nxge_hw_list; 6559 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6560 p_dip = hw_p->parent_devp; 6561 if (nxgep->nxge_hw_p == hw_p && 6562 p_dip == nxgep->p_dip && 6563 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6564 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6565 6566 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6567 "==> nxge_uninit_common_device:func # %d " 6568 "hw_p $%p parent dip $%p " 6569 "ndevs %d (found)", 6570 nxgep->function_num, 6571 hw_p, 6572 p_dip, 6573 hw_p->ndevs)); 6574 6575 /* 6576 * Release the RDC table, a shared resoruce 6577 * of the nxge hardware. The RDC table was 6578 * assigned to this instance of nxge in 6579 * nxge_use_cfg_dma_config(). 6580 */ 6581 if (!isLDOMguest(nxgep)) { 6582 p_dma_cfgp = 6583 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 6584 p_cfgp = 6585 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 6586 (void) nxge_fzc_rdc_tbl_unbind(nxgep, 6587 p_cfgp->def_mac_rxdma_grpid); 6588 6589 /* Cleanup any outstanding groups. */ 6590 nxge_grp_cleanup(nxgep); 6591 } 6592 6593 if (hw_p->ndevs) { 6594 hw_p->ndevs--; 6595 } 6596 hw_p->nxge_p[nxgep->function_num] = NULL; 6597 if (!hw_p->ndevs) { 6598 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6599 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6600 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6601 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6602 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6603 "==> nxge_uninit_common_device: " 6604 "func # %d " 6605 "hw_p $%p parent dip $%p " 6606 "ndevs %d (last)", 6607 nxgep->function_num, 6608 hw_p, 6609 p_dip, 6610 hw_p->ndevs)); 6611 6612 nxge_hio_uninit(nxgep); 6613 6614 if (hw_p == nxge_hw_list) { 6615 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6616 "==> nxge_uninit_common_device:" 6617 "remove head func # %d " 6618 "hw_p $%p parent dip $%p " 6619 "ndevs %d (head)", 6620 nxgep->function_num, 6621 hw_p, 6622 p_dip, 6623 hw_p->ndevs)); 6624 nxge_hw_list = hw_p->next; 6625 } else { 6626 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6627 "==> nxge_uninit_common_device:" 6628 "remove middle func # %d " 6629 "hw_p $%p parent dip $%p " 6630 "ndevs %d (middle)", 6631 nxgep->function_num, 6632 hw_p, 6633 p_dip, 6634 hw_p->ndevs)); 6635 h_hw_p->next = hw_p->next; 6636 } 6637 6638 nxgep->nxge_hw_p = NULL; 6639 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6640 } 6641 break; 6642 } else { 6643 h_hw_p = hw_p; 6644 } 6645 } 6646 6647 MUTEX_EXIT(&nxge_common_lock); 6648 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6649 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6650 nxge_hw_list)); 6651 6652 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6653 } 6654 6655 /* 6656 * Determines the number of ports from the niu_type or the platform type. 6657 * Returns the number of ports, or returns zero on failure. 6658 */ 6659 6660 int 6661 nxge_get_nports(p_nxge_t nxgep) 6662 { 6663 int nports = 0; 6664 6665 switch (nxgep->niu_type) { 6666 case N2_NIU: 6667 case NEPTUNE_2_10GF: 6668 nports = 2; 6669 break; 6670 case NEPTUNE_4_1GC: 6671 case NEPTUNE_2_10GF_2_1GC: 6672 case NEPTUNE_1_10GF_3_1GC: 6673 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6674 case NEPTUNE_2_10GF_2_1GRF: 6675 nports = 4; 6676 break; 6677 default: 6678 switch (nxgep->platform_type) { 6679 case P_NEPTUNE_NIU: 6680 case P_NEPTUNE_ATLAS_2PORT: 6681 nports = 2; 6682 break; 6683 case P_NEPTUNE_ATLAS_4PORT: 6684 case P_NEPTUNE_MARAMBA_P0: 6685 case P_NEPTUNE_MARAMBA_P1: 6686 case P_NEPTUNE_ROCK: 6687 case P_NEPTUNE_ALONSO: 6688 nports = 4; 6689 break; 6690 default: 6691 break; 6692 } 6693 break; 6694 } 6695 6696 return (nports); 6697 } 6698 6699 /* 6700 * The following two functions are to support 6701 * PSARC/2007/453 MSI-X interrupt limit override. 6702 */ 6703 static int 6704 nxge_create_msi_property(p_nxge_t nxgep) 6705 { 6706 int nmsi; 6707 extern int ncpus; 6708 6709 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6710 6711 switch (nxgep->mac.portmode) { 6712 case PORT_10G_COPPER: 6713 case PORT_10G_FIBER: 6714 case PORT_10G_TN1010: 6715 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6716 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6717 /* 6718 * The maximum MSI-X requested will be 8. 6719 * If the # of CPUs is less than 8, we will reqeust 6720 * # MSI-X based on the # of CPUs. 6721 */ 6722 if (ncpus >= NXGE_MSIX_REQUEST_10G) { 6723 nmsi = NXGE_MSIX_REQUEST_10G; 6724 } else { 6725 nmsi = ncpus; 6726 } 6727 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6728 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6729 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6730 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6731 break; 6732 6733 default: 6734 nmsi = NXGE_MSIX_REQUEST_1G; 6735 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6736 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6737 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6738 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6739 break; 6740 } 6741 6742 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6743 return (nmsi); 6744 } 6745 6746 /* ARGSUSED */ 6747 static int 6748 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6749 void *pr_val) 6750 { 6751 int err = 0; 6752 link_flowctrl_t fl; 6753 6754 switch (pr_num) { 6755 case MAC_PROP_AUTONEG: 6756 *(uint8_t *)pr_val = 1; 6757 break; 6758 case MAC_PROP_FLOWCTRL: 6759 if (pr_valsize < sizeof (link_flowctrl_t)) 6760 return (EINVAL); 6761 fl = LINK_FLOWCTRL_RX; 6762 bcopy(&fl, pr_val, sizeof (fl)); 6763 break; 6764 case MAC_PROP_ADV_1000FDX_CAP: 6765 case MAC_PROP_EN_1000FDX_CAP: 6766 *(uint8_t *)pr_val = 1; 6767 break; 6768 case MAC_PROP_ADV_100FDX_CAP: 6769 case MAC_PROP_EN_100FDX_CAP: 6770 *(uint8_t *)pr_val = 1; 6771 break; 6772 default: 6773 err = ENOTSUP; 6774 break; 6775 } 6776 return (err); 6777 } 6778 6779 6780 /* 6781 * The following is a software around for the Neptune hardware's 6782 * interrupt bugs; The Neptune hardware may generate spurious interrupts when 6783 * an interrupr handler is removed. 6784 */ 6785 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98 6786 #define NXGE_PIM_RESET (1ULL << 29) 6787 #define NXGE_GLU_RESET (1ULL << 30) 6788 #define NXGE_NIU_RESET (1ULL << 31) 6789 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \ 6790 NXGE_GLU_RESET | \ 6791 NXGE_NIU_RESET) 6792 6793 #define NXGE_WAIT_QUITE_TIME 200000 6794 #define NXGE_WAIT_QUITE_RETRY 40 6795 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */ 6796 6797 static void 6798 nxge_niu_peu_reset(p_nxge_t nxgep) 6799 { 6800 uint32_t rvalue; 6801 p_nxge_hw_list_t hw_p; 6802 p_nxge_t fnxgep; 6803 int i, j; 6804 6805 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset")); 6806 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 6807 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6808 "==> nxge_niu_peu_reset: NULL hardware pointer")); 6809 return; 6810 } 6811 6812 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6813 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d", 6814 hw_p->flags, nxgep->nxge_link_poll_timerid, 6815 nxgep->nxge_timerid)); 6816 6817 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 6818 /* 6819 * Make sure other instances from the same hardware 6820 * stop sending PIO and in quiescent state. 6821 */ 6822 for (i = 0; i < NXGE_MAX_PORTS; i++) { 6823 fnxgep = hw_p->nxge_p[i]; 6824 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6825 "==> nxge_niu_peu_reset: checking entry %d " 6826 "nxgep $%p", i, fnxgep)); 6827 #ifdef NXGE_DEBUG 6828 if (fnxgep) { 6829 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6830 "==> nxge_niu_peu_reset: entry %d (function %d) " 6831 "link timer id %d hw timer id %d", 6832 i, fnxgep->function_num, 6833 fnxgep->nxge_link_poll_timerid, 6834 fnxgep->nxge_timerid)); 6835 } 6836 #endif 6837 if (fnxgep && fnxgep != nxgep && 6838 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) { 6839 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6840 "==> nxge_niu_peu_reset: checking $%p " 6841 "(function %d) timer ids", 6842 fnxgep, fnxgep->function_num)); 6843 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) { 6844 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6845 "==> nxge_niu_peu_reset: waiting")); 6846 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6847 if (!fnxgep->nxge_timerid && 6848 !fnxgep->nxge_link_poll_timerid) { 6849 break; 6850 } 6851 } 6852 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6853 if (fnxgep->nxge_timerid || 6854 fnxgep->nxge_link_poll_timerid) { 6855 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6856 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6857 "<== nxge_niu_peu_reset: cannot reset " 6858 "hardware (devices are still in use)")); 6859 return; 6860 } 6861 } 6862 } 6863 6864 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) { 6865 hw_p->flags |= COMMON_RESET_NIU_PCI; 6866 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh, 6867 NXGE_PCI_PORT_LOGIC_OFFSET); 6868 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6869 "nxge_niu_peu_reset: read offset 0x%x (%d) " 6870 "(data 0x%x)", 6871 NXGE_PCI_PORT_LOGIC_OFFSET, 6872 NXGE_PCI_PORT_LOGIC_OFFSET, 6873 rvalue)); 6874 6875 rvalue |= NXGE_PCI_RESET_ALL; 6876 pci_config_put32(nxgep->dev_regs->nxge_pciregh, 6877 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue); 6878 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6879 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x", 6880 rvalue)); 6881 6882 NXGE_DELAY(NXGE_PCI_RESET_WAIT); 6883 } 6884 6885 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6886 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset")); 6887 } 6888 6889 static void 6890 nxge_set_pci_replay_timeout(p_nxge_t nxgep) 6891 { 6892 p_dev_regs_t dev_regs; 6893 uint32_t value; 6894 6895 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout")); 6896 6897 if (!nxge_set_replay_timer) { 6898 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6899 "==> nxge_set_pci_replay_timeout: will not change " 6900 "the timeout")); 6901 return; 6902 } 6903 6904 dev_regs = nxgep->dev_regs; 6905 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6906 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p", 6907 dev_regs, dev_regs->nxge_pciregh)); 6908 6909 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) { 6910 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6911 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or " 6912 "no PCI handle", 6913 dev_regs)); 6914 return; 6915 } 6916 value = (pci_config_get32(dev_regs->nxge_pciregh, 6917 PCI_REPLAY_TIMEOUT_CFG_OFFSET) | 6918 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT)); 6919 6920 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6921 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x " 6922 "(timeout value to set 0x%x at offset 0x%x) value 0x%x", 6923 pci_config_get32(dev_regs->nxge_pciregh, 6924 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout, 6925 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value)); 6926 6927 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET, 6928 value); 6929 6930 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6931 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x", 6932 pci_config_get32(dev_regs->nxge_pciregh, 6933 PCI_REPLAY_TIMEOUT_CFG_OFFSET))); 6934 6935 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout")); 6936 } 6937 6938 /* 6939 * quiesce(9E) entry point. 6940 * 6941 * This function is called when the system is single-threaded at high 6942 * PIL with preemption disabled. Therefore, this function must not be 6943 * blocked. 6944 * 6945 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 6946 * DDI_FAILURE indicates an error condition and should almost never happen. 6947 */ 6948 static int 6949 nxge_quiesce(dev_info_t *dip) 6950 { 6951 int instance = ddi_get_instance(dip); 6952 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 6953 6954 if (nxgep == NULL) 6955 return (DDI_FAILURE); 6956 6957 /* Turn off debugging */ 6958 nxge_debug_level = NO_DEBUG; 6959 nxgep->nxge_debug_level = NO_DEBUG; 6960 npi_debug_level = NO_DEBUG; 6961 6962 /* 6963 * Stop link monitor only when linkchkmod is interrupt based 6964 */ 6965 if (nxgep->mac.linkchkmode == LINKCHK_INTR) { 6966 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 6967 } 6968 6969 (void) nxge_intr_hw_disable(nxgep); 6970 6971 /* 6972 * Reset the receive MAC side. 6973 */ 6974 (void) nxge_rx_mac_disable(nxgep); 6975 6976 /* Disable and soft reset the IPP */ 6977 if (!isLDOMguest(nxgep)) 6978 (void) nxge_ipp_disable(nxgep); 6979 6980 /* 6981 * Reset the transmit/receive DMA side. 6982 */ 6983 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 6984 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 6985 6986 /* 6987 * Reset the transmit MAC side. 6988 */ 6989 (void) nxge_tx_mac_disable(nxgep); 6990 6991 return (DDI_SUCCESS); 6992 } 6993