1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 28 */ 29 #include <sys/nxge/nxge_impl.h> 30 #include <sys/nxge/nxge_hio.h> 31 #include <sys/nxge/nxge_rxdma.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * PSARC/2007/453 MSI-X interrupt limit override 39 * (This PSARC case is limited to MSI-X vectors 40 * and SPARC platforms only). 41 */ 42 #if defined(_BIG_ENDIAN) 43 uint32_t nxge_msi_enable = 2; 44 #else 45 uint32_t nxge_msi_enable = 1; 46 #endif 47 48 /* 49 * Software workaround for a Neptune (PCI-E) 50 * hardware interrupt bug which the hardware 51 * may generate spurious interrupts after the 52 * device interrupt handler was removed. If this flag 53 * is enabled, the driver will reset the 54 * hardware when devices are being detached. 55 */ 56 uint32_t nxge_peu_reset_enable = 0; 57 58 /* 59 * Software workaround for the hardware 60 * checksum bugs that affect packet transmission 61 * and receive: 62 * 63 * Usage of nxge_cksum_offload: 64 * 65 * (1) nxge_cksum_offload = 0 (default): 66 * - transmits packets: 67 * TCP: uses the hardware checksum feature. 68 * UDP: driver will compute the software checksum 69 * based on the partial checksum computed 70 * by the IP layer. 71 * - receives packets 72 * TCP: marks packets checksum flags based on hardware result. 73 * UDP: will not mark checksum flags. 74 * 75 * (2) nxge_cksum_offload = 1: 76 * - transmit packets: 77 * TCP/UDP: uses the hardware checksum feature. 78 * - receives packets 79 * TCP/UDP: marks packet checksum flags based on hardware result. 80 * 81 * (3) nxge_cksum_offload = 2: 82 * - The driver will not register its checksum capability. 83 * Checksum for both TCP and UDP will be computed 84 * by the stack. 85 * - The software LSO is not allowed in this case. 86 * 87 * (4) nxge_cksum_offload > 2: 88 * - Will be treated as it is set to 2 89 * (stack will compute the checksum). 90 * 91 * (5) If the hardware bug is fixed, this workaround 92 * needs to be updated accordingly to reflect 93 * the new hardware revision. 94 */ 95 uint32_t nxge_cksum_offload = 0; 96 97 /* 98 * Globals: tunable parameters (/etc/system or adb) 99 * 100 */ 101 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 102 uint32_t nxge_rbr_spare_size = 0; 103 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 104 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 105 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 106 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 107 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 108 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 109 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 110 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 111 boolean_t nxge_jumbo_enable = B_FALSE; 112 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 113 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 114 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 115 116 /* MAX LSO size */ 117 #define NXGE_LSO_MAXLEN 65535 118 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 119 120 121 /* 122 * Add tunable to reduce the amount of time spent in the 123 * ISR doing Rx Processing. 124 */ 125 uint32_t nxge_max_rx_pkts = 1024; 126 127 /* 128 * Tunables to manage the receive buffer blocks. 129 * 130 * nxge_rx_threshold_hi: copy all buffers. 131 * nxge_rx_bcopy_size_type: receive buffer block size type. 132 * nxge_rx_threshold_lo: copy only up to tunable block size type. 133 */ 134 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 135 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 136 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 137 138 /* Use kmem_alloc() to allocate data buffers. */ 139 #if defined(_BIG_ENDIAN) 140 uint32_t nxge_use_kmem_alloc = 1; 141 #else 142 uint32_t nxge_use_kmem_alloc = 0; 143 #endif 144 145 rtrace_t npi_rtracebuf; 146 147 /* 148 * The hardware sometimes fails to allow enough time for the link partner 149 * to send an acknowledgement for packets that the hardware sent to it. The 150 * hardware resends the packets earlier than it should be in those instances. 151 * This behavior caused some switches to acknowledge the wrong packets 152 * and it triggered the fatal error. 153 * This software workaround is to set the replay timer to a value 154 * suggested by the hardware team. 155 * 156 * PCI config space replay timer register: 157 * The following replay timeout value is 0xc 158 * for bit 14:18. 159 */ 160 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8 161 #define PCI_REPLAY_TIMEOUT_SHIFT 14 162 163 uint32_t nxge_set_replay_timer = 1; 164 uint32_t nxge_replay_timeout = 0xc; 165 166 /* 167 * The transmit serialization sometimes causes 168 * longer sleep before calling the driver transmit 169 * function as it sleeps longer than it should. 170 * The performace group suggests that a time wait tunable 171 * can be used to set the maximum wait time when needed 172 * and the default is set to 1 tick. 173 */ 174 uint32_t nxge_tx_serial_maxsleep = 1; 175 176 #if defined(sun4v) 177 /* 178 * Hypervisor N2/NIU services information. 179 */ 180 static hsvc_info_t niu_hsvc = { 181 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 182 NIU_MINOR_VER, "nxge" 183 }; 184 185 static int nxge_hsvc_register(p_nxge_t); 186 #endif 187 188 /* 189 * Function Prototypes 190 */ 191 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 192 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 193 static void nxge_unattach(p_nxge_t); 194 static int nxge_quiesce(dev_info_t *); 195 196 #if NXGE_PROPERTY 197 static void nxge_remove_hard_properties(p_nxge_t); 198 #endif 199 200 /* 201 * These two functions are required by nxge_hio.c 202 */ 203 extern int nxge_m_mmac_remove(void *arg, int slot); 204 extern void nxge_grp_cleanup(p_nxge_t nxge); 205 206 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 207 208 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 209 static void nxge_destroy_mutexes(p_nxge_t); 210 211 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 212 static void nxge_unmap_regs(p_nxge_t nxgep); 213 #ifdef NXGE_DEBUG 214 static void nxge_test_map_regs(p_nxge_t nxgep); 215 #endif 216 217 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 218 static void nxge_remove_intrs(p_nxge_t nxgep); 219 220 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 221 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 222 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 223 static void nxge_intrs_enable(p_nxge_t nxgep); 224 static void nxge_intrs_disable(p_nxge_t nxgep); 225 226 static void nxge_suspend(p_nxge_t); 227 static nxge_status_t nxge_resume(p_nxge_t); 228 229 static nxge_status_t nxge_setup_dev(p_nxge_t); 230 static void nxge_destroy_dev(p_nxge_t); 231 232 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 233 static void nxge_free_mem_pool(p_nxge_t); 234 235 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 236 static void nxge_free_rx_mem_pool(p_nxge_t); 237 238 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 239 static void nxge_free_tx_mem_pool(p_nxge_t); 240 241 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 242 struct ddi_dma_attr *, 243 size_t, ddi_device_acc_attr_t *, uint_t, 244 p_nxge_dma_common_t); 245 246 static void nxge_dma_mem_free(p_nxge_dma_common_t); 247 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 248 249 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 250 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 251 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 252 253 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 254 p_nxge_dma_common_t *, size_t); 255 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 256 257 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 258 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 259 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 260 261 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 262 p_nxge_dma_common_t *, 263 size_t); 264 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 265 266 static int nxge_init_common_dev(p_nxge_t); 267 static void nxge_uninit_common_dev(p_nxge_t); 268 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 269 char *, caddr_t); 270 271 /* 272 * The next declarations are for the GLDv3 interface. 273 */ 274 static int nxge_m_start(void *); 275 static void nxge_m_stop(void *); 276 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 277 static int nxge_m_promisc(void *, boolean_t); 278 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 279 static nxge_status_t nxge_mac_register(p_nxge_t); 280 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 281 int slot, int rdctbl, boolean_t usetbl); 282 void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, 283 boolean_t factory); 284 #if defined(sun4v) 285 extern mblk_t *nxge_m_tx(void *arg, mblk_t *mp); 286 #endif 287 288 static void nxge_m_getfactaddr(void *, uint_t, uint8_t *); 289 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 290 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 291 uint_t, const void *); 292 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 293 uint_t, uint_t, void *, uint_t *); 294 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 295 const void *); 296 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 297 void *, uint_t *); 298 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 299 static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int, 300 mac_ring_info_t *, mac_ring_handle_t); 301 static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t, 302 mac_ring_type_t); 303 static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t, 304 mac_ring_type_t); 305 306 static void nxge_niu_peu_reset(p_nxge_t nxgep); 307 static void nxge_set_pci_replay_timeout(nxge_t *); 308 309 mac_priv_prop_t nxge_priv_props[] = { 310 {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 311 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 312 {"_function_number", MAC_PROP_PERM_READ}, 313 {"_fw_version", MAC_PROP_PERM_READ}, 314 {"_port_mode", MAC_PROP_PERM_READ}, 315 {"_hot_swap_phy", MAC_PROP_PERM_READ}, 316 {"_accept_jumbo", MAC_PROP_PERM_RW}, 317 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 318 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 319 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 320 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 321 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 322 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 323 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 324 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 325 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 326 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 327 {"_soft_lso_enable", MAC_PROP_PERM_RW} 328 }; 329 330 #define NXGE_MAX_PRIV_PROPS \ 331 (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 332 333 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 334 #define MAX_DUMP_SZ 256 335 336 #define NXGE_M_CALLBACK_FLAGS \ 337 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 338 339 mac_callbacks_t nxge_m_callbacks = { 340 NXGE_M_CALLBACK_FLAGS, 341 nxge_m_stat, 342 nxge_m_start, 343 nxge_m_stop, 344 nxge_m_promisc, 345 nxge_m_multicst, 346 NULL, 347 NULL, 348 nxge_m_ioctl, 349 nxge_m_getcapab, 350 NULL, 351 NULL, 352 nxge_m_setprop, 353 nxge_m_getprop 354 }; 355 356 void 357 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 358 359 /* PSARC/2007/453 MSI-X interrupt limit override. */ 360 #define NXGE_MSIX_REQUEST_10G 8 361 #define NXGE_MSIX_REQUEST_1G 2 362 static int nxge_create_msi_property(p_nxge_t); 363 /* 364 * For applications that care about the 365 * latency, it was requested by PAE and the 366 * customers that the driver has tunables that 367 * allow the user to tune it to a higher number 368 * interrupts to spread the interrupts among 369 * multiple channels. The DDI framework limits 370 * the maximum number of MSI-X resources to allocate 371 * to 8 (ddi_msix_alloc_limit). If more than 8 372 * is set, ddi_msix_alloc_limit must be set accordingly. 373 * The default number of MSI interrupts are set to 374 * 8 for 10G and 2 for 1G link. 375 */ 376 #define NXGE_MSIX_MAX_ALLOWED 32 377 uint32_t nxge_msix_10g_intrs = NXGE_MSIX_REQUEST_10G; 378 uint32_t nxge_msix_1g_intrs = NXGE_MSIX_REQUEST_1G; 379 380 /* 381 * These global variables control the message 382 * output. 383 */ 384 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 385 uint64_t nxge_debug_level; 386 387 /* 388 * This list contains the instance structures for the Neptune 389 * devices present in the system. The lock exists to guarantee 390 * mutually exclusive access to the list. 391 */ 392 void *nxge_list = NULL; 393 394 void *nxge_hw_list = NULL; 395 nxge_os_mutex_t nxge_common_lock; 396 397 extern uint64_t npi_debug_level; 398 399 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 400 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 401 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 402 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 403 extern void nxge_fm_init(p_nxge_t, 404 ddi_device_acc_attr_t *, 405 ddi_device_acc_attr_t *, 406 ddi_dma_attr_t *); 407 extern void nxge_fm_fini(p_nxge_t); 408 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 409 410 /* 411 * Count used to maintain the number of buffers being used 412 * by Neptune instances and loaned up to the upper layers. 413 */ 414 uint32_t nxge_mblks_pending = 0; 415 416 /* 417 * Device register access attributes for PIO. 418 */ 419 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 420 DDI_DEVICE_ATTR_V0, 421 DDI_STRUCTURE_LE_ACC, 422 DDI_STRICTORDER_ACC, 423 }; 424 425 /* 426 * Device descriptor access attributes for DMA. 427 */ 428 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 429 DDI_DEVICE_ATTR_V0, 430 DDI_STRUCTURE_LE_ACC, 431 DDI_STRICTORDER_ACC 432 }; 433 434 /* 435 * Device buffer access attributes for DMA. 436 */ 437 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 438 DDI_DEVICE_ATTR_V0, 439 DDI_STRUCTURE_BE_ACC, 440 DDI_STRICTORDER_ACC 441 }; 442 443 ddi_dma_attr_t nxge_desc_dma_attr = { 444 DMA_ATTR_V0, /* version number. */ 445 0, /* low address */ 446 0xffffffffffffffff, /* high address */ 447 0xffffffffffffffff, /* address counter max */ 448 #ifndef NIU_PA_WORKAROUND 449 0x100000, /* alignment */ 450 #else 451 0x2000, 452 #endif 453 0xfc00fc, /* dlim_burstsizes */ 454 0x1, /* minimum transfer size */ 455 0xffffffffffffffff, /* maximum transfer size */ 456 0xffffffffffffffff, /* maximum segment size */ 457 1, /* scatter/gather list length */ 458 (unsigned int) 1, /* granularity */ 459 0 /* attribute flags */ 460 }; 461 462 ddi_dma_attr_t nxge_tx_dma_attr = { 463 DMA_ATTR_V0, /* version number. */ 464 0, /* low address */ 465 0xffffffffffffffff, /* high address */ 466 0xffffffffffffffff, /* address counter max */ 467 #if defined(_BIG_ENDIAN) 468 0x2000, /* alignment */ 469 #else 470 0x1000, /* alignment */ 471 #endif 472 0xfc00fc, /* dlim_burstsizes */ 473 0x1, /* minimum transfer size */ 474 0xffffffffffffffff, /* maximum transfer size */ 475 0xffffffffffffffff, /* maximum segment size */ 476 5, /* scatter/gather list length */ 477 (unsigned int) 1, /* granularity */ 478 0 /* attribute flags */ 479 }; 480 481 ddi_dma_attr_t nxge_rx_dma_attr = { 482 DMA_ATTR_V0, /* version number. */ 483 0, /* low address */ 484 0xffffffffffffffff, /* high address */ 485 0xffffffffffffffff, /* address counter max */ 486 0x2000, /* alignment */ 487 0xfc00fc, /* dlim_burstsizes */ 488 0x1, /* minimum transfer size */ 489 0xffffffffffffffff, /* maximum transfer size */ 490 0xffffffffffffffff, /* maximum segment size */ 491 1, /* scatter/gather list length */ 492 (unsigned int) 1, /* granularity */ 493 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 494 }; 495 496 ddi_dma_lim_t nxge_dma_limits = { 497 (uint_t)0, /* dlim_addr_lo */ 498 (uint_t)0xffffffff, /* dlim_addr_hi */ 499 (uint_t)0xffffffff, /* dlim_cntr_max */ 500 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 501 0x1, /* dlim_minxfer */ 502 1024 /* dlim_speed */ 503 }; 504 505 dma_method_t nxge_force_dma = DVMA; 506 507 /* 508 * dma chunk sizes. 509 * 510 * Try to allocate the largest possible size 511 * so that fewer number of dma chunks would be managed 512 */ 513 #ifdef NIU_PA_WORKAROUND 514 size_t alloc_sizes [] = {0x2000}; 515 #else 516 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 517 0x10000, 0x20000, 0x40000, 0x80000, 518 0x100000, 0x200000, 0x400000, 0x800000, 519 0x1000000, 0x2000000, 0x4000000}; 520 #endif 521 522 /* 523 * Translate "dev_t" to a pointer to the associated "dev_info_t". 524 */ 525 526 extern void nxge_get_environs(nxge_t *); 527 528 static int 529 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 530 { 531 p_nxge_t nxgep = NULL; 532 int instance; 533 int status = DDI_SUCCESS; 534 uint8_t portn; 535 nxge_mmac_t *mmac_info; 536 p_nxge_param_t param_arr; 537 538 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 539 540 /* 541 * Get the device instance since we'll need to setup 542 * or retrieve a soft state for this instance. 543 */ 544 instance = ddi_get_instance(dip); 545 546 switch (cmd) { 547 case DDI_ATTACH: 548 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 549 break; 550 551 case DDI_RESUME: 552 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 553 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 554 if (nxgep == NULL) { 555 status = DDI_FAILURE; 556 break; 557 } 558 if (nxgep->dip != dip) { 559 status = DDI_FAILURE; 560 break; 561 } 562 if (nxgep->suspended == DDI_PM_SUSPEND) { 563 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 564 } else { 565 status = nxge_resume(nxgep); 566 } 567 goto nxge_attach_exit; 568 569 case DDI_PM_RESUME: 570 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 571 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 572 if (nxgep == NULL) { 573 status = DDI_FAILURE; 574 break; 575 } 576 if (nxgep->dip != dip) { 577 status = DDI_FAILURE; 578 break; 579 } 580 status = nxge_resume(nxgep); 581 goto nxge_attach_exit; 582 583 default: 584 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 585 status = DDI_FAILURE; 586 goto nxge_attach_exit; 587 } 588 589 590 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 591 status = DDI_FAILURE; 592 goto nxge_attach_exit; 593 } 594 595 nxgep = ddi_get_soft_state(nxge_list, instance); 596 if (nxgep == NULL) { 597 status = NXGE_ERROR; 598 goto nxge_attach_fail2; 599 } 600 601 nxgep->nxge_magic = NXGE_MAGIC; 602 603 nxgep->drv_state = 0; 604 nxgep->dip = dip; 605 nxgep->instance = instance; 606 nxgep->p_dip = ddi_get_parent(dip); 607 nxgep->nxge_debug_level = nxge_debug_level; 608 npi_debug_level = nxge_debug_level; 609 610 /* Are we a guest running in a Hybrid I/O environment? */ 611 nxge_get_environs(nxgep); 612 613 status = nxge_map_regs(nxgep); 614 615 if (status != NXGE_OK) { 616 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 617 goto nxge_attach_fail3; 618 } 619 620 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, 621 &nxge_dev_desc_dma_acc_attr, 622 &nxge_rx_dma_attr); 623 624 /* Create & initialize the per-Neptune data structure */ 625 /* (even if we're a guest). */ 626 status = nxge_init_common_dev(nxgep); 627 if (status != NXGE_OK) { 628 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 629 "nxge_init_common_dev failed")); 630 goto nxge_attach_fail4; 631 } 632 633 /* 634 * Software workaround: set the replay timer. 635 */ 636 if (nxgep->niu_type != N2_NIU) { 637 nxge_set_pci_replay_timeout(nxgep); 638 } 639 #if defined(sun4v) 640 if (isLDOMguest(nxgep)) { 641 nxge_m_callbacks.mc_tx = nxge_m_tx; 642 } 643 #endif 644 645 #if defined(sun4v) 646 /* This is required by nxge_hio_init(), which follows. */ 647 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 648 goto nxge_attach_fail4; 649 #endif 650 651 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 652 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 653 "nxge_hio_init failed")); 654 goto nxge_attach_fail4; 655 } 656 657 if (nxgep->niu_type == NEPTUNE_2_10GF) { 658 if (nxgep->function_num > 1) { 659 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 660 " function %d. Only functions 0 and 1 are " 661 "supported for this card.", nxgep->function_num)); 662 status = NXGE_ERROR; 663 goto nxge_attach_fail4; 664 } 665 } 666 667 if (isLDOMguest(nxgep)) { 668 /* 669 * Use the function number here. 670 */ 671 nxgep->mac.portnum = nxgep->function_num; 672 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 673 674 /* XXX We'll set the MAC address counts to 1 for now. */ 675 mmac_info = &nxgep->nxge_mmac_info; 676 mmac_info->num_mmac = 1; 677 mmac_info->naddrfree = 1; 678 } else { 679 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 680 nxgep->mac.portnum = portn; 681 if ((portn == 0) || (portn == 1)) 682 nxgep->mac.porttype = PORT_TYPE_XMAC; 683 else 684 nxgep->mac.porttype = PORT_TYPE_BMAC; 685 /* 686 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 687 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 688 * The two types of MACs have different characterizations. 689 */ 690 mmac_info = &nxgep->nxge_mmac_info; 691 if (nxgep->function_num < 2) { 692 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 693 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 694 } else { 695 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 696 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 697 } 698 } 699 /* 700 * Setup the Ndd parameters for the this instance. 701 */ 702 nxge_init_param(nxgep); 703 704 /* 705 * Setup Register Tracing Buffer. 706 */ 707 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 708 709 /* init stats ptr */ 710 nxge_init_statsp(nxgep); 711 712 /* 713 * Copy the vpd info from eeprom to a local data 714 * structure, and then check its validity. 715 */ 716 if (!isLDOMguest(nxgep)) { 717 int *regp; 718 uint_t reglen; 719 int rv; 720 721 nxge_vpd_info_get(nxgep); 722 723 /* Find the NIU config handle. */ 724 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 725 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 726 "reg", ®p, ®len); 727 728 if (rv != DDI_PROP_SUCCESS) { 729 goto nxge_attach_fail5; 730 } 731 /* 732 * The address_hi, that is the first int, in the reg 733 * property consists of config handle, but need to remove 734 * the bits 28-31 which are OBP specific info. 735 */ 736 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 737 ddi_prop_free(regp); 738 } 739 740 if (isLDOMguest(nxgep)) { 741 uchar_t *prop_val; 742 uint_t prop_len; 743 uint32_t max_frame_size; 744 745 extern void nxge_get_logical_props(p_nxge_t); 746 747 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 748 nxgep->mac.portmode = PORT_LOGICAL; 749 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 750 "phy-type", "virtual transceiver"); 751 752 nxgep->nports = 1; 753 nxgep->board_ver = 0; /* XXX What? */ 754 755 /* 756 * local-mac-address property gives us info on which 757 * specific MAC address the Hybrid resource is associated 758 * with. 759 */ 760 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 761 "local-mac-address", &prop_val, 762 &prop_len) != DDI_PROP_SUCCESS) { 763 goto nxge_attach_fail5; 764 } 765 if (prop_len != ETHERADDRL) { 766 ddi_prop_free(prop_val); 767 goto nxge_attach_fail5; 768 } 769 ether_copy(prop_val, nxgep->hio_mac_addr); 770 ddi_prop_free(prop_val); 771 nxge_get_logical_props(nxgep); 772 773 /* 774 * Enable Jumbo property based on the "max-frame-size" 775 * property value. 776 */ 777 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY, 778 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 779 "max-frame-size", NXGE_MTU_DEFAULT_MAX); 780 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) && 781 (max_frame_size <= TX_JUMBO_MTU)) { 782 param_arr = nxgep->param_arr; 783 784 param_arr[param_accept_jumbo].value = 1; 785 nxgep->mac.is_jumbo = B_TRUE; 786 nxgep->mac.maxframesize = (uint16_t)max_frame_size; 787 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 788 NXGE_EHEADER_VLAN_CRC; 789 } 790 } else { 791 status = nxge_xcvr_find(nxgep); 792 793 if (status != NXGE_OK) { 794 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 795 " Couldn't determine card type" 796 " .... exit ")); 797 goto nxge_attach_fail5; 798 } 799 800 status = nxge_get_config_properties(nxgep); 801 802 if (status != NXGE_OK) { 803 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 804 "get_hw create failed")); 805 goto nxge_attach_fail; 806 } 807 } 808 809 /* 810 * Setup the Kstats for the driver. 811 */ 812 nxge_setup_kstats(nxgep); 813 814 if (!isLDOMguest(nxgep)) 815 nxge_setup_param(nxgep); 816 817 status = nxge_setup_system_dma_pages(nxgep); 818 if (status != NXGE_OK) { 819 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 820 goto nxge_attach_fail; 821 } 822 823 nxge_hw_id_init(nxgep); 824 825 if (!isLDOMguest(nxgep)) 826 nxge_hw_init_niu_common(nxgep); 827 828 status = nxge_setup_mutexes(nxgep); 829 if (status != NXGE_OK) { 830 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 831 goto nxge_attach_fail; 832 } 833 834 #if defined(sun4v) 835 if (isLDOMguest(nxgep)) { 836 /* Find our VR & channel sets. */ 837 status = nxge_hio_vr_add(nxgep); 838 if (status != NXGE_OK) { 839 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 840 "nxge_hio_vr_add failed")); 841 (void) hsvc_unregister(&nxgep->niu_hsvc); 842 nxgep->niu_hsvc_available = B_FALSE; 843 } 844 goto nxge_attach_exit; 845 } 846 #endif 847 848 status = nxge_setup_dev(nxgep); 849 if (status != DDI_SUCCESS) { 850 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 851 goto nxge_attach_fail; 852 } 853 854 status = nxge_add_intrs(nxgep); 855 if (status != DDI_SUCCESS) { 856 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 857 goto nxge_attach_fail; 858 } 859 860 /* If a guest, register with vio_net instead. */ 861 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 862 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 863 "unable to register to mac layer (%d)", status)); 864 goto nxge_attach_fail; 865 } 866 867 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 868 869 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 870 "registered to mac (instance %d)", instance)); 871 872 /* nxge_link_monitor calls xcvr.check_link recursively */ 873 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 874 875 goto nxge_attach_exit; 876 877 nxge_attach_fail: 878 nxge_unattach(nxgep); 879 goto nxge_attach_fail1; 880 881 nxge_attach_fail5: 882 /* 883 * Tear down the ndd parameters setup. 884 */ 885 nxge_destroy_param(nxgep); 886 887 /* 888 * Tear down the kstat setup. 889 */ 890 nxge_destroy_kstats(nxgep); 891 892 nxge_attach_fail4: 893 if (nxgep->nxge_hw_p) { 894 nxge_uninit_common_dev(nxgep); 895 nxgep->nxge_hw_p = NULL; 896 } 897 898 nxge_attach_fail3: 899 /* 900 * Unmap the register setup. 901 */ 902 nxge_unmap_regs(nxgep); 903 904 nxge_fm_fini(nxgep); 905 906 nxge_attach_fail2: 907 ddi_soft_state_free(nxge_list, nxgep->instance); 908 909 nxge_attach_fail1: 910 if (status != NXGE_OK) 911 status = (NXGE_ERROR | NXGE_DDI_FAILED); 912 nxgep = NULL; 913 914 nxge_attach_exit: 915 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 916 status)); 917 918 return (status); 919 } 920 921 static int 922 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 923 { 924 int status = DDI_SUCCESS; 925 int instance; 926 p_nxge_t nxgep = NULL; 927 928 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 929 instance = ddi_get_instance(dip); 930 nxgep = ddi_get_soft_state(nxge_list, instance); 931 if (nxgep == NULL) { 932 status = DDI_FAILURE; 933 goto nxge_detach_exit; 934 } 935 936 switch (cmd) { 937 case DDI_DETACH: 938 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 939 break; 940 941 case DDI_PM_SUSPEND: 942 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 943 nxgep->suspended = DDI_PM_SUSPEND; 944 nxge_suspend(nxgep); 945 break; 946 947 case DDI_SUSPEND: 948 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 949 if (nxgep->suspended != DDI_PM_SUSPEND) { 950 nxgep->suspended = DDI_SUSPEND; 951 nxge_suspend(nxgep); 952 } 953 break; 954 955 default: 956 status = DDI_FAILURE; 957 } 958 959 if (cmd != DDI_DETACH) 960 goto nxge_detach_exit; 961 962 /* 963 * Stop the xcvr polling. 964 */ 965 nxgep->suspended = cmd; 966 967 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 968 969 if (isLDOMguest(nxgep)) { 970 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 971 nxge_m_stop((void *)nxgep); 972 nxge_hio_unregister(nxgep); 973 } else if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 974 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 975 "<== nxge_detach status = 0x%08X", status)); 976 return (DDI_FAILURE); 977 } 978 979 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 980 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 981 982 nxge_unattach(nxgep); 983 nxgep = NULL; 984 985 nxge_detach_exit: 986 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 987 status)); 988 989 return (status); 990 } 991 992 static void 993 nxge_unattach(p_nxge_t nxgep) 994 { 995 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 996 997 if (nxgep == NULL || nxgep->dev_regs == NULL) { 998 return; 999 } 1000 1001 nxgep->nxge_magic = 0; 1002 1003 if (nxgep->nxge_timerid) { 1004 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1005 nxgep->nxge_timerid = 0; 1006 } 1007 1008 /* 1009 * If this flag is set, it will affect the Neptune 1010 * only. 1011 */ 1012 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { 1013 nxge_niu_peu_reset(nxgep); 1014 } 1015 1016 #if defined(sun4v) 1017 if (isLDOMguest(nxgep)) { 1018 (void) nxge_hio_vr_release(nxgep); 1019 } 1020 #endif 1021 1022 if (nxgep->nxge_hw_p) { 1023 nxge_uninit_common_dev(nxgep); 1024 nxgep->nxge_hw_p = NULL; 1025 } 1026 1027 #if defined(sun4v) 1028 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 1029 (void) hsvc_unregister(&nxgep->niu_hsvc); 1030 nxgep->niu_hsvc_available = B_FALSE; 1031 } 1032 #endif 1033 /* 1034 * Stop any further interrupts. 1035 */ 1036 nxge_remove_intrs(nxgep); 1037 1038 /* 1039 * Stop the device and free resources. 1040 */ 1041 if (!isLDOMguest(nxgep)) { 1042 nxge_destroy_dev(nxgep); 1043 } 1044 1045 /* 1046 * Tear down the ndd parameters setup. 1047 */ 1048 nxge_destroy_param(nxgep); 1049 1050 /* 1051 * Tear down the kstat setup. 1052 */ 1053 nxge_destroy_kstats(nxgep); 1054 1055 /* 1056 * Destroy all mutexes. 1057 */ 1058 nxge_destroy_mutexes(nxgep); 1059 1060 /* 1061 * Remove the list of ndd parameters which 1062 * were setup during attach. 1063 */ 1064 if (nxgep->dip) { 1065 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 1066 " nxge_unattach: remove all properties")); 1067 1068 (void) ddi_prop_remove_all(nxgep->dip); 1069 } 1070 1071 #if NXGE_PROPERTY 1072 nxge_remove_hard_properties(nxgep); 1073 #endif 1074 1075 /* 1076 * Unmap the register setup. 1077 */ 1078 nxge_unmap_regs(nxgep); 1079 1080 nxge_fm_fini(nxgep); 1081 1082 ddi_soft_state_free(nxge_list, nxgep->instance); 1083 1084 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1085 } 1086 1087 #if defined(sun4v) 1088 int 1089 nxge_hsvc_register(nxge_t *nxgep) 1090 { 1091 nxge_status_t status; 1092 1093 if (nxgep->niu_type == N2_NIU) { 1094 nxgep->niu_hsvc_available = B_FALSE; 1095 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 1096 if ((status = hsvc_register(&nxgep->niu_hsvc, 1097 &nxgep->niu_min_ver)) != 0) { 1098 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1099 "nxge_attach: %s: cannot negotiate " 1100 "hypervisor services revision %d group: 0x%lx " 1101 "major: 0x%lx minor: 0x%lx errno: %d", 1102 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1103 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1104 niu_hsvc.hsvc_minor, status)); 1105 return (DDI_FAILURE); 1106 } 1107 nxgep->niu_hsvc_available = B_TRUE; 1108 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1109 "NIU Hypervisor service enabled")); 1110 } 1111 1112 return (DDI_SUCCESS); 1113 } 1114 #endif 1115 1116 static char n2_siu_name[] = "niu"; 1117 1118 static nxge_status_t 1119 nxge_map_regs(p_nxge_t nxgep) 1120 { 1121 int ddi_status = DDI_SUCCESS; 1122 p_dev_regs_t dev_regs; 1123 char buf[MAXPATHLEN + 1]; 1124 char *devname; 1125 #ifdef NXGE_DEBUG 1126 char *sysname; 1127 #endif 1128 off_t regsize; 1129 nxge_status_t status = NXGE_OK; 1130 #if !defined(_BIG_ENDIAN) 1131 off_t pci_offset; 1132 uint16_t pcie_devctl; 1133 #endif 1134 1135 if (isLDOMguest(nxgep)) { 1136 return (nxge_guest_regs_map(nxgep)); 1137 } 1138 1139 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1140 nxgep->dev_regs = NULL; 1141 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1142 dev_regs->nxge_regh = NULL; 1143 dev_regs->nxge_pciregh = NULL; 1144 dev_regs->nxge_msix_regh = NULL; 1145 dev_regs->nxge_vir_regh = NULL; 1146 dev_regs->nxge_vir2_regh = NULL; 1147 nxgep->niu_type = NIU_TYPE_NONE; 1148 1149 devname = ddi_pathname(nxgep->dip, buf); 1150 ASSERT(strlen(devname) > 0); 1151 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1152 "nxge_map_regs: pathname devname %s", devname)); 1153 1154 /* 1155 * The driver is running on a N2-NIU system if devname is something 1156 * like "/niu@80/network@0" 1157 */ 1158 if (strstr(devname, n2_siu_name)) { 1159 /* N2/NIU */ 1160 nxgep->niu_type = N2_NIU; 1161 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1162 "nxge_map_regs: N2/NIU devname %s", devname)); 1163 /* get function number */ 1164 nxgep->function_num = 1165 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1166 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1167 "nxge_map_regs: N2/NIU function number %d", 1168 nxgep->function_num)); 1169 } else { 1170 int *prop_val; 1171 uint_t prop_len; 1172 uint8_t func_num; 1173 1174 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1175 0, "reg", 1176 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1177 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1178 "Reg property not found")); 1179 ddi_status = DDI_FAILURE; 1180 goto nxge_map_regs_fail0; 1181 1182 } else { 1183 func_num = (prop_val[0] >> 8) & 0x7; 1184 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1185 "Reg property found: fun # %d", 1186 func_num)); 1187 nxgep->function_num = func_num; 1188 if (isLDOMguest(nxgep)) { 1189 nxgep->function_num /= 2; 1190 return (NXGE_OK); 1191 } 1192 ddi_prop_free(prop_val); 1193 } 1194 } 1195 1196 switch (nxgep->niu_type) { 1197 default: 1198 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1199 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1200 "nxge_map_regs: pci config size 0x%x", regsize)); 1201 1202 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1203 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1204 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1205 if (ddi_status != DDI_SUCCESS) { 1206 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1207 "ddi_map_regs, nxge bus config regs failed")); 1208 goto nxge_map_regs_fail0; 1209 } 1210 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1211 "nxge_map_reg: PCI config addr 0x%0llx " 1212 " handle 0x%0llx", dev_regs->nxge_pciregp, 1213 dev_regs->nxge_pciregh)); 1214 /* 1215 * IMP IMP 1216 * workaround for bit swapping bug in HW 1217 * which ends up in no-snoop = yes 1218 * resulting, in DMA not synched properly 1219 */ 1220 #if !defined(_BIG_ENDIAN) 1221 /* workarounds for x86 systems */ 1222 pci_offset = 0x80 + PCIE_DEVCTL; 1223 pcie_devctl = 0x0; 1224 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 1225 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1226 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1227 pcie_devctl); 1228 #endif 1229 1230 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1231 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1232 "nxge_map_regs: pio size 0x%x", regsize)); 1233 /* set up the device mapped register */ 1234 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1235 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1236 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1237 if (ddi_status != DDI_SUCCESS) { 1238 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1239 "ddi_map_regs for Neptune global reg failed")); 1240 goto nxge_map_regs_fail1; 1241 } 1242 1243 /* set up the msi/msi-x mapped register */ 1244 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1245 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1246 "nxge_map_regs: msix size 0x%x", regsize)); 1247 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1248 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1249 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1250 if (ddi_status != DDI_SUCCESS) { 1251 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1252 "ddi_map_regs for msi reg failed")); 1253 goto nxge_map_regs_fail2; 1254 } 1255 1256 /* set up the vio region mapped register */ 1257 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1258 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1259 "nxge_map_regs: vio size 0x%x", regsize)); 1260 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1261 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1262 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1263 1264 if (ddi_status != DDI_SUCCESS) { 1265 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1266 "ddi_map_regs for nxge vio reg failed")); 1267 goto nxge_map_regs_fail3; 1268 } 1269 nxgep->dev_regs = dev_regs; 1270 1271 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1272 NPI_PCI_ADD_HANDLE_SET(nxgep, 1273 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1274 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1275 NPI_MSI_ADD_HANDLE_SET(nxgep, 1276 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1277 1278 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1279 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1280 1281 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1282 NPI_REG_ADD_HANDLE_SET(nxgep, 1283 (npi_reg_ptr_t)dev_regs->nxge_regp); 1284 1285 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1286 NPI_VREG_ADD_HANDLE_SET(nxgep, 1287 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1288 1289 break; 1290 1291 case N2_NIU: 1292 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1293 /* 1294 * Set up the device mapped register (FWARC 2006/556) 1295 * (changed back to 1: reg starts at 1!) 1296 */ 1297 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1298 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1299 "nxge_map_regs: dev size 0x%x", regsize)); 1300 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1301 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1302 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1303 1304 if (ddi_status != DDI_SUCCESS) { 1305 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1306 "ddi_map_regs for N2/NIU, global reg failed ")); 1307 goto nxge_map_regs_fail1; 1308 } 1309 1310 /* set up the first vio region mapped register */ 1311 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1312 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1313 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1314 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1315 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1316 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1317 1318 if (ddi_status != DDI_SUCCESS) { 1319 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1320 "ddi_map_regs for nxge vio reg failed")); 1321 goto nxge_map_regs_fail2; 1322 } 1323 /* set up the second vio region mapped register */ 1324 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1325 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1326 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1327 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1328 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1329 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1330 1331 if (ddi_status != DDI_SUCCESS) { 1332 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1333 "ddi_map_regs for nxge vio2 reg failed")); 1334 goto nxge_map_regs_fail3; 1335 } 1336 nxgep->dev_regs = dev_regs; 1337 1338 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1339 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1340 1341 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1342 NPI_REG_ADD_HANDLE_SET(nxgep, 1343 (npi_reg_ptr_t)dev_regs->nxge_regp); 1344 1345 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1346 NPI_VREG_ADD_HANDLE_SET(nxgep, 1347 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1348 1349 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1350 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1351 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1352 1353 break; 1354 } 1355 1356 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1357 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1358 1359 goto nxge_map_regs_exit; 1360 nxge_map_regs_fail3: 1361 if (dev_regs->nxge_msix_regh) { 1362 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1363 } 1364 if (dev_regs->nxge_vir_regh) { 1365 ddi_regs_map_free(&dev_regs->nxge_regh); 1366 } 1367 nxge_map_regs_fail2: 1368 if (dev_regs->nxge_regh) { 1369 ddi_regs_map_free(&dev_regs->nxge_regh); 1370 } 1371 nxge_map_regs_fail1: 1372 if (dev_regs->nxge_pciregh) { 1373 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1374 } 1375 nxge_map_regs_fail0: 1376 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1377 kmem_free(dev_regs, sizeof (dev_regs_t)); 1378 1379 nxge_map_regs_exit: 1380 if (ddi_status != DDI_SUCCESS) 1381 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1382 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1383 return (status); 1384 } 1385 1386 static void 1387 nxge_unmap_regs(p_nxge_t nxgep) 1388 { 1389 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1390 1391 if (isLDOMguest(nxgep)) { 1392 nxge_guest_regs_map_free(nxgep); 1393 return; 1394 } 1395 1396 if (nxgep->dev_regs) { 1397 if (nxgep->dev_regs->nxge_pciregh) { 1398 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1399 "==> nxge_unmap_regs: bus")); 1400 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1401 nxgep->dev_regs->nxge_pciregh = NULL; 1402 } 1403 if (nxgep->dev_regs->nxge_regh) { 1404 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1405 "==> nxge_unmap_regs: device registers")); 1406 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1407 nxgep->dev_regs->nxge_regh = NULL; 1408 } 1409 if (nxgep->dev_regs->nxge_msix_regh) { 1410 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1411 "==> nxge_unmap_regs: device interrupts")); 1412 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1413 nxgep->dev_regs->nxge_msix_regh = NULL; 1414 } 1415 if (nxgep->dev_regs->nxge_vir_regh) { 1416 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1417 "==> nxge_unmap_regs: vio region")); 1418 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1419 nxgep->dev_regs->nxge_vir_regh = NULL; 1420 } 1421 if (nxgep->dev_regs->nxge_vir2_regh) { 1422 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1423 "==> nxge_unmap_regs: vio2 region")); 1424 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1425 nxgep->dev_regs->nxge_vir2_regh = NULL; 1426 } 1427 1428 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1429 nxgep->dev_regs = NULL; 1430 } 1431 1432 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1433 } 1434 1435 static nxge_status_t 1436 nxge_setup_mutexes(p_nxge_t nxgep) 1437 { 1438 int ddi_status = DDI_SUCCESS; 1439 nxge_status_t status = NXGE_OK; 1440 nxge_classify_t *classify_ptr; 1441 int partition; 1442 1443 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1444 1445 /* 1446 * Get the interrupt cookie so the mutexes can be 1447 * Initialized. 1448 */ 1449 if (isLDOMguest(nxgep)) { 1450 nxgep->interrupt_cookie = 0; 1451 } else { 1452 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1453 &nxgep->interrupt_cookie); 1454 1455 if (ddi_status != DDI_SUCCESS) { 1456 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1457 "<== nxge_setup_mutexes: failed 0x%x", 1458 ddi_status)); 1459 goto nxge_setup_mutexes_exit; 1460 } 1461 } 1462 1463 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1464 MUTEX_INIT(&nxgep->poll_lock, NULL, 1465 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1466 1467 /* 1468 * Initialize mutexes for this device. 1469 */ 1470 MUTEX_INIT(nxgep->genlock, NULL, 1471 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1472 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1473 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1474 MUTEX_INIT(&nxgep->mif_lock, NULL, 1475 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1476 MUTEX_INIT(&nxgep->group_lock, NULL, 1477 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1478 RW_INIT(&nxgep->filter_lock, NULL, 1479 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1480 1481 classify_ptr = &nxgep->classifier; 1482 /* 1483 * FFLP Mutexes are never used in interrupt context 1484 * as fflp operation can take very long time to 1485 * complete and hence not suitable to invoke from interrupt 1486 * handlers. 1487 */ 1488 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1489 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1490 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1491 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1492 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1493 for (partition = 0; partition < MAX_PARTITION; partition++) { 1494 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1495 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1496 } 1497 } 1498 1499 nxge_setup_mutexes_exit: 1500 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1501 "<== nxge_setup_mutexes status = %x", status)); 1502 1503 if (ddi_status != DDI_SUCCESS) 1504 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1505 1506 return (status); 1507 } 1508 1509 static void 1510 nxge_destroy_mutexes(p_nxge_t nxgep) 1511 { 1512 int partition; 1513 nxge_classify_t *classify_ptr; 1514 1515 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1516 RW_DESTROY(&nxgep->filter_lock); 1517 MUTEX_DESTROY(&nxgep->group_lock); 1518 MUTEX_DESTROY(&nxgep->mif_lock); 1519 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1520 MUTEX_DESTROY(nxgep->genlock); 1521 1522 classify_ptr = &nxgep->classifier; 1523 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1524 1525 /* Destroy all polling resources. */ 1526 MUTEX_DESTROY(&nxgep->poll_lock); 1527 cv_destroy(&nxgep->poll_cv); 1528 1529 /* free data structures, based on HW type */ 1530 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1531 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1532 for (partition = 0; partition < MAX_PARTITION; partition++) { 1533 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1534 } 1535 } 1536 1537 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1538 } 1539 1540 nxge_status_t 1541 nxge_init(p_nxge_t nxgep) 1542 { 1543 nxge_status_t status = NXGE_OK; 1544 1545 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1546 1547 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1548 return (status); 1549 } 1550 1551 /* 1552 * Allocate system memory for the receive/transmit buffer blocks 1553 * and receive/transmit descriptor rings. 1554 */ 1555 status = nxge_alloc_mem_pool(nxgep); 1556 if (status != NXGE_OK) { 1557 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1558 goto nxge_init_fail1; 1559 } 1560 1561 if (!isLDOMguest(nxgep)) { 1562 /* 1563 * Initialize and enable the TXC registers. 1564 * (Globally enable the Tx controller, 1565 * enable the port, configure the dma channel bitmap, 1566 * configure the max burst size). 1567 */ 1568 status = nxge_txc_init(nxgep); 1569 if (status != NXGE_OK) { 1570 NXGE_ERROR_MSG((nxgep, 1571 NXGE_ERR_CTL, "init txc failed\n")); 1572 goto nxge_init_fail2; 1573 } 1574 } 1575 1576 /* 1577 * Initialize and enable TXDMA channels. 1578 */ 1579 status = nxge_init_txdma_channels(nxgep); 1580 if (status != NXGE_OK) { 1581 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1582 goto nxge_init_fail3; 1583 } 1584 1585 /* 1586 * Initialize and enable RXDMA channels. 1587 */ 1588 status = nxge_init_rxdma_channels(nxgep); 1589 if (status != NXGE_OK) { 1590 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1591 goto nxge_init_fail4; 1592 } 1593 1594 /* 1595 * The guest domain is now done. 1596 */ 1597 if (isLDOMguest(nxgep)) { 1598 nxgep->drv_state |= STATE_HW_INITIALIZED; 1599 goto nxge_init_exit; 1600 } 1601 1602 /* 1603 * Initialize TCAM and FCRAM (Neptune). 1604 */ 1605 status = nxge_classify_init(nxgep); 1606 if (status != NXGE_OK) { 1607 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1608 goto nxge_init_fail5; 1609 } 1610 1611 /* 1612 * Initialize ZCP 1613 */ 1614 status = nxge_zcp_init(nxgep); 1615 if (status != NXGE_OK) { 1616 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1617 goto nxge_init_fail5; 1618 } 1619 1620 /* 1621 * Initialize IPP. 1622 */ 1623 status = nxge_ipp_init(nxgep); 1624 if (status != NXGE_OK) { 1625 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1626 goto nxge_init_fail5; 1627 } 1628 1629 /* 1630 * Initialize the MAC block. 1631 */ 1632 status = nxge_mac_init(nxgep); 1633 if (status != NXGE_OK) { 1634 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1635 goto nxge_init_fail5; 1636 } 1637 1638 nxge_intrs_enable(nxgep); /* XXX What changes do I need to make here? */ 1639 1640 /* 1641 * Enable hardware interrupts. 1642 */ 1643 nxge_intr_hw_enable(nxgep); 1644 nxgep->drv_state |= STATE_HW_INITIALIZED; 1645 1646 goto nxge_init_exit; 1647 1648 nxge_init_fail5: 1649 nxge_uninit_rxdma_channels(nxgep); 1650 nxge_init_fail4: 1651 nxge_uninit_txdma_channels(nxgep); 1652 nxge_init_fail3: 1653 if (!isLDOMguest(nxgep)) { 1654 (void) nxge_txc_uninit(nxgep); 1655 } 1656 nxge_init_fail2: 1657 nxge_free_mem_pool(nxgep); 1658 nxge_init_fail1: 1659 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1660 "<== nxge_init status (failed) = 0x%08x", status)); 1661 return (status); 1662 1663 nxge_init_exit: 1664 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1665 status)); 1666 return (status); 1667 } 1668 1669 1670 timeout_id_t 1671 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1672 { 1673 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1674 return (timeout(func, (caddr_t)nxgep, 1675 drv_usectohz(1000 * msec))); 1676 } 1677 return (NULL); 1678 } 1679 1680 /*ARGSUSED*/ 1681 void 1682 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1683 { 1684 if (timerid) { 1685 (void) untimeout(timerid); 1686 } 1687 } 1688 1689 void 1690 nxge_uninit(p_nxge_t nxgep) 1691 { 1692 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1693 1694 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1695 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1696 "==> nxge_uninit: not initialized")); 1697 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1698 "<== nxge_uninit")); 1699 return; 1700 } 1701 1702 /* stop timer */ 1703 if (nxgep->nxge_timerid) { 1704 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1705 nxgep->nxge_timerid = 0; 1706 } 1707 1708 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1709 (void) nxge_intr_hw_disable(nxgep); 1710 1711 /* 1712 * Reset the receive MAC side. 1713 */ 1714 (void) nxge_rx_mac_disable(nxgep); 1715 1716 /* Disable and soft reset the IPP */ 1717 if (!isLDOMguest(nxgep)) 1718 (void) nxge_ipp_disable(nxgep); 1719 1720 /* Free classification resources */ 1721 (void) nxge_classify_uninit(nxgep); 1722 1723 /* 1724 * Reset the transmit/receive DMA side. 1725 */ 1726 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1727 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1728 1729 nxge_uninit_txdma_channels(nxgep); 1730 nxge_uninit_rxdma_channels(nxgep); 1731 1732 /* 1733 * Reset the transmit MAC side. 1734 */ 1735 (void) nxge_tx_mac_disable(nxgep); 1736 1737 nxge_free_mem_pool(nxgep); 1738 1739 /* 1740 * Start the timer if the reset flag is not set. 1741 * If this reset flag is set, the link monitor 1742 * will not be started in order to stop furthur bus 1743 * activities coming from this interface. 1744 * The driver will start the monitor function 1745 * if the interface was initialized again later. 1746 */ 1747 if (!nxge_peu_reset_enable) { 1748 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1749 } 1750 1751 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1752 1753 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1754 "nxge_mblks_pending %d", nxge_mblks_pending)); 1755 } 1756 1757 void 1758 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1759 { 1760 #if defined(__i386) 1761 size_t reg; 1762 #else 1763 uint64_t reg; 1764 #endif 1765 uint64_t regdata; 1766 int i, retry; 1767 1768 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1769 regdata = 0; 1770 retry = 1; 1771 1772 for (i = 0; i < retry; i++) { 1773 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1774 } 1775 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1776 } 1777 1778 void 1779 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1780 { 1781 #if defined(__i386) 1782 size_t reg; 1783 #else 1784 uint64_t reg; 1785 #endif 1786 uint64_t buf[2]; 1787 1788 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1789 #if defined(__i386) 1790 reg = (size_t)buf[0]; 1791 #else 1792 reg = buf[0]; 1793 #endif 1794 1795 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1796 } 1797 1798 1799 nxge_os_mutex_t nxgedebuglock; 1800 int nxge_debug_init = 0; 1801 1802 /*ARGSUSED*/ 1803 /*VARARGS*/ 1804 void 1805 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1806 { 1807 char msg_buffer[1048]; 1808 char prefix_buffer[32]; 1809 int instance; 1810 uint64_t debug_level; 1811 int cmn_level = CE_CONT; 1812 va_list ap; 1813 1814 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1815 /* In case a developer has changed nxge_debug_level. */ 1816 if (nxgep->nxge_debug_level != nxge_debug_level) 1817 nxgep->nxge_debug_level = nxge_debug_level; 1818 } 1819 1820 debug_level = (nxgep == NULL) ? nxge_debug_level : 1821 nxgep->nxge_debug_level; 1822 1823 if ((level & debug_level) || 1824 (level == NXGE_NOTE) || 1825 (level == NXGE_ERR_CTL)) { 1826 /* do the msg processing */ 1827 if (nxge_debug_init == 0) { 1828 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1829 nxge_debug_init = 1; 1830 } 1831 1832 MUTEX_ENTER(&nxgedebuglock); 1833 1834 if ((level & NXGE_NOTE)) { 1835 cmn_level = CE_NOTE; 1836 } 1837 1838 if (level & NXGE_ERR_CTL) { 1839 cmn_level = CE_WARN; 1840 } 1841 1842 va_start(ap, fmt); 1843 (void) vsprintf(msg_buffer, fmt, ap); 1844 va_end(ap); 1845 if (nxgep == NULL) { 1846 instance = -1; 1847 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1848 } else { 1849 instance = nxgep->instance; 1850 (void) sprintf(prefix_buffer, 1851 "%s%d :", "nxge", instance); 1852 } 1853 1854 MUTEX_EXIT(&nxgedebuglock); 1855 cmn_err(cmn_level, "!%s %s\n", 1856 prefix_buffer, msg_buffer); 1857 1858 } 1859 } 1860 1861 char * 1862 nxge_dump_packet(char *addr, int size) 1863 { 1864 uchar_t *ap = (uchar_t *)addr; 1865 int i; 1866 static char etherbuf[1024]; 1867 char *cp = etherbuf; 1868 char digits[] = "0123456789abcdef"; 1869 1870 if (!size) 1871 size = 60; 1872 1873 if (size > MAX_DUMP_SZ) { 1874 /* Dump the leading bytes */ 1875 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1876 if (*ap > 0x0f) 1877 *cp++ = digits[*ap >> 4]; 1878 *cp++ = digits[*ap++ & 0xf]; 1879 *cp++ = ':'; 1880 } 1881 for (i = 0; i < 20; i++) 1882 *cp++ = '.'; 1883 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1884 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1885 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1886 if (*ap > 0x0f) 1887 *cp++ = digits[*ap >> 4]; 1888 *cp++ = digits[*ap++ & 0xf]; 1889 *cp++ = ':'; 1890 } 1891 } else { 1892 for (i = 0; i < size; i++) { 1893 if (*ap > 0x0f) 1894 *cp++ = digits[*ap >> 4]; 1895 *cp++ = digits[*ap++ & 0xf]; 1896 *cp++ = ':'; 1897 } 1898 } 1899 *--cp = 0; 1900 return (etherbuf); 1901 } 1902 1903 #ifdef NXGE_DEBUG 1904 static void 1905 nxge_test_map_regs(p_nxge_t nxgep) 1906 { 1907 ddi_acc_handle_t cfg_handle; 1908 p_pci_cfg_t cfg_ptr; 1909 ddi_acc_handle_t dev_handle; 1910 char *dev_ptr; 1911 ddi_acc_handle_t pci_config_handle; 1912 uint32_t regval; 1913 int i; 1914 1915 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1916 1917 dev_handle = nxgep->dev_regs->nxge_regh; 1918 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1919 1920 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1921 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1922 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1923 1924 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1925 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1926 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1927 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1928 &cfg_ptr->vendorid)); 1929 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1930 "\tvendorid 0x%x devid 0x%x", 1931 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1932 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1933 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1934 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1935 "bar1c 0x%x", 1936 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1937 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1938 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1939 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1940 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1941 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1942 "base 28 0x%x bar2c 0x%x\n", 1943 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1944 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1945 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1946 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1947 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1948 "\nNeptune PCI BAR: base30 0x%x\n", 1949 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1950 1951 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1952 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1953 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1954 "first 0x%llx second 0x%llx third 0x%llx " 1955 "last 0x%llx ", 1956 NXGE_PIO_READ64(dev_handle, 1957 (uint64_t *)(dev_ptr + 0), 0), 1958 NXGE_PIO_READ64(dev_handle, 1959 (uint64_t *)(dev_ptr + 8), 0), 1960 NXGE_PIO_READ64(dev_handle, 1961 (uint64_t *)(dev_ptr + 16), 0), 1962 NXGE_PIO_READ64(cfg_handle, 1963 (uint64_t *)(dev_ptr + 24), 0))); 1964 } 1965 } 1966 1967 #endif 1968 1969 static void 1970 nxge_suspend(p_nxge_t nxgep) 1971 { 1972 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1973 1974 nxge_intrs_disable(nxgep); 1975 nxge_destroy_dev(nxgep); 1976 1977 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1978 } 1979 1980 static nxge_status_t 1981 nxge_resume(p_nxge_t nxgep) 1982 { 1983 nxge_status_t status = NXGE_OK; 1984 1985 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1986 1987 nxgep->suspended = DDI_RESUME; 1988 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1989 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1990 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1991 (void) nxge_rx_mac_enable(nxgep); 1992 (void) nxge_tx_mac_enable(nxgep); 1993 nxge_intrs_enable(nxgep); 1994 nxgep->suspended = 0; 1995 1996 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1997 "<== nxge_resume status = 0x%x", status)); 1998 return (status); 1999 } 2000 2001 static nxge_status_t 2002 nxge_setup_dev(p_nxge_t nxgep) 2003 { 2004 nxge_status_t status = NXGE_OK; 2005 2006 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 2007 nxgep->mac.portnum)); 2008 2009 status = nxge_link_init(nxgep); 2010 2011 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 2012 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2013 "port%d Bad register acc handle", nxgep->mac.portnum)); 2014 status = NXGE_ERROR; 2015 } 2016 2017 if (status != NXGE_OK) { 2018 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2019 " nxge_setup_dev status " 2020 "(xcvr init 0x%08x)", status)); 2021 goto nxge_setup_dev_exit; 2022 } 2023 2024 nxge_setup_dev_exit: 2025 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2026 "<== nxge_setup_dev port %d status = 0x%08x", 2027 nxgep->mac.portnum, status)); 2028 2029 return (status); 2030 } 2031 2032 static void 2033 nxge_destroy_dev(p_nxge_t nxgep) 2034 { 2035 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 2036 2037 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 2038 2039 (void) nxge_hw_stop(nxgep); 2040 2041 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 2042 } 2043 2044 static nxge_status_t 2045 nxge_setup_system_dma_pages(p_nxge_t nxgep) 2046 { 2047 int ddi_status = DDI_SUCCESS; 2048 uint_t count; 2049 ddi_dma_cookie_t cookie; 2050 uint_t iommu_pagesize; 2051 nxge_status_t status = NXGE_OK; 2052 2053 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 2054 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 2055 if (nxgep->niu_type != N2_NIU) { 2056 iommu_pagesize = dvma_pagesize(nxgep->dip); 2057 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2058 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2059 " default_block_size %d iommu_pagesize %d", 2060 nxgep->sys_page_sz, 2061 ddi_ptob(nxgep->dip, (ulong_t)1), 2062 nxgep->rx_default_block_size, 2063 iommu_pagesize)); 2064 2065 if (iommu_pagesize != 0) { 2066 if (nxgep->sys_page_sz == iommu_pagesize) { 2067 if (iommu_pagesize > 0x4000) 2068 nxgep->sys_page_sz = 0x4000; 2069 } else { 2070 if (nxgep->sys_page_sz > iommu_pagesize) 2071 nxgep->sys_page_sz = iommu_pagesize; 2072 } 2073 } 2074 } 2075 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2076 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2077 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2078 "default_block_size %d page mask %d", 2079 nxgep->sys_page_sz, 2080 ddi_ptob(nxgep->dip, (ulong_t)1), 2081 nxgep->rx_default_block_size, 2082 nxgep->sys_page_mask)); 2083 2084 2085 switch (nxgep->sys_page_sz) { 2086 default: 2087 nxgep->sys_page_sz = 0x1000; 2088 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2089 nxgep->rx_default_block_size = 0x1000; 2090 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2091 break; 2092 case 0x1000: 2093 nxgep->rx_default_block_size = 0x1000; 2094 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2095 break; 2096 case 0x2000: 2097 nxgep->rx_default_block_size = 0x2000; 2098 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2099 break; 2100 case 0x4000: 2101 nxgep->rx_default_block_size = 0x4000; 2102 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2103 break; 2104 case 0x8000: 2105 nxgep->rx_default_block_size = 0x8000; 2106 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2107 break; 2108 } 2109 2110 #ifndef USE_RX_BIG_BUF 2111 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2112 #else 2113 nxgep->rx_default_block_size = 0x2000; 2114 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2115 #endif 2116 /* 2117 * Get the system DMA burst size. 2118 */ 2119 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2120 DDI_DMA_DONTWAIT, 0, 2121 &nxgep->dmasparehandle); 2122 if (ddi_status != DDI_SUCCESS) { 2123 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2124 "ddi_dma_alloc_handle: failed " 2125 " status 0x%x", ddi_status)); 2126 goto nxge_get_soft_properties_exit; 2127 } 2128 2129 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2130 (caddr_t)nxgep->dmasparehandle, 2131 sizeof (nxgep->dmasparehandle), 2132 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2133 DDI_DMA_DONTWAIT, 0, 2134 &cookie, &count); 2135 if (ddi_status != DDI_DMA_MAPPED) { 2136 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2137 "Binding spare handle to find system" 2138 " burstsize failed.")); 2139 ddi_status = DDI_FAILURE; 2140 goto nxge_get_soft_properties_fail1; 2141 } 2142 2143 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2144 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2145 2146 nxge_get_soft_properties_fail1: 2147 ddi_dma_free_handle(&nxgep->dmasparehandle); 2148 2149 nxge_get_soft_properties_exit: 2150 2151 if (ddi_status != DDI_SUCCESS) 2152 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2153 2154 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2155 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2156 return (status); 2157 } 2158 2159 static nxge_status_t 2160 nxge_alloc_mem_pool(p_nxge_t nxgep) 2161 { 2162 nxge_status_t status = NXGE_OK; 2163 2164 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2165 2166 status = nxge_alloc_rx_mem_pool(nxgep); 2167 if (status != NXGE_OK) { 2168 return (NXGE_ERROR); 2169 } 2170 2171 status = nxge_alloc_tx_mem_pool(nxgep); 2172 if (status != NXGE_OK) { 2173 nxge_free_rx_mem_pool(nxgep); 2174 return (NXGE_ERROR); 2175 } 2176 2177 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2178 return (NXGE_OK); 2179 } 2180 2181 static void 2182 nxge_free_mem_pool(p_nxge_t nxgep) 2183 { 2184 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2185 2186 nxge_free_rx_mem_pool(nxgep); 2187 nxge_free_tx_mem_pool(nxgep); 2188 2189 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2190 } 2191 2192 nxge_status_t 2193 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2194 { 2195 uint32_t rdc_max; 2196 p_nxge_dma_pt_cfg_t p_all_cfgp; 2197 p_nxge_hw_pt_cfg_t p_cfgp; 2198 p_nxge_dma_pool_t dma_poolp; 2199 p_nxge_dma_common_t *dma_buf_p; 2200 p_nxge_dma_pool_t dma_cntl_poolp; 2201 p_nxge_dma_common_t *dma_cntl_p; 2202 uint32_t *num_chunks; /* per dma */ 2203 nxge_status_t status = NXGE_OK; 2204 2205 uint32_t nxge_port_rbr_size; 2206 uint32_t nxge_port_rbr_spare_size; 2207 uint32_t nxge_port_rcr_size; 2208 uint32_t rx_cntl_alloc_size; 2209 2210 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2211 2212 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2213 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2214 rdc_max = NXGE_MAX_RDCS; 2215 2216 /* 2217 * Allocate memory for the common DMA data structures. 2218 */ 2219 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2220 KM_SLEEP); 2221 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2222 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2223 2224 dma_cntl_poolp = (p_nxge_dma_pool_t) 2225 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2226 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2227 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2228 2229 num_chunks = (uint32_t *)KMEM_ZALLOC( 2230 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2231 2232 /* 2233 * Assume that each DMA channel will be configured with 2234 * the default block size. 2235 * rbr block counts are modulo the batch count (16). 2236 */ 2237 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2238 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2239 2240 if (!nxge_port_rbr_size) { 2241 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2242 } 2243 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2244 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2245 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2246 } 2247 2248 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2249 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2250 2251 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2252 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2253 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2254 } 2255 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2256 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2257 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2258 "set to default %d", 2259 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2260 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2261 } 2262 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2263 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2264 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2265 "set to default %d", 2266 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2267 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2268 } 2269 2270 /* 2271 * N2/NIU has limitation on the descriptor sizes (contiguous 2272 * memory allocation on data buffers to 4M (contig_mem_alloc) 2273 * and little endian for control buffers (must use the ddi/dki mem alloc 2274 * function). 2275 */ 2276 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2277 if (nxgep->niu_type == N2_NIU) { 2278 nxge_port_rbr_spare_size = 0; 2279 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2280 (!ISP2(nxge_port_rbr_size))) { 2281 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2282 } 2283 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2284 (!ISP2(nxge_port_rcr_size))) { 2285 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2286 } 2287 } 2288 #endif 2289 2290 /* 2291 * Addresses of receive block ring, receive completion ring and the 2292 * mailbox must be all cache-aligned (64 bytes). 2293 */ 2294 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2295 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2296 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2297 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2298 2299 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2300 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2301 "nxge_port_rcr_size = %d " 2302 "rx_cntl_alloc_size = %d", 2303 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2304 nxge_port_rcr_size, 2305 rx_cntl_alloc_size)); 2306 2307 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2308 if (nxgep->niu_type == N2_NIU) { 2309 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2310 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2311 2312 if (!ISP2(rx_buf_alloc_size)) { 2313 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2314 "==> nxge_alloc_rx_mem_pool: " 2315 " must be power of 2")); 2316 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2317 goto nxge_alloc_rx_mem_pool_exit; 2318 } 2319 2320 if (rx_buf_alloc_size > (1 << 22)) { 2321 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2322 "==> nxge_alloc_rx_mem_pool: " 2323 " limit size to 4M")); 2324 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2325 goto nxge_alloc_rx_mem_pool_exit; 2326 } 2327 2328 if (rx_cntl_alloc_size < 0x2000) { 2329 rx_cntl_alloc_size = 0x2000; 2330 } 2331 } 2332 #endif 2333 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2334 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2335 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2336 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2337 2338 dma_poolp->ndmas = p_cfgp->max_rdcs; 2339 dma_poolp->num_chunks = num_chunks; 2340 dma_poolp->buf_allocated = B_TRUE; 2341 nxgep->rx_buf_pool_p = dma_poolp; 2342 dma_poolp->dma_buf_pool_p = dma_buf_p; 2343 2344 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2345 dma_cntl_poolp->buf_allocated = B_TRUE; 2346 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2347 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2348 2349 /* Allocate the receive rings, too. */ 2350 nxgep->rx_rbr_rings = 2351 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2352 nxgep->rx_rbr_rings->rbr_rings = 2353 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2354 nxgep->rx_rcr_rings = 2355 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2356 nxgep->rx_rcr_rings->rcr_rings = 2357 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2358 nxgep->rx_mbox_areas_p = 2359 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2360 nxgep->rx_mbox_areas_p->rxmbox_areas = 2361 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2362 2363 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2364 p_cfgp->max_rdcs; 2365 2366 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2367 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2368 2369 nxge_alloc_rx_mem_pool_exit: 2370 return (status); 2371 } 2372 2373 /* 2374 * nxge_alloc_rxb 2375 * 2376 * Allocate buffers for an RDC. 2377 * 2378 * Arguments: 2379 * nxgep 2380 * channel The channel to map into our kernel space. 2381 * 2382 * Notes: 2383 * 2384 * NPI function calls: 2385 * 2386 * NXGE function calls: 2387 * 2388 * Registers accessed: 2389 * 2390 * Context: 2391 * 2392 * Taking apart: 2393 * 2394 * Open questions: 2395 * 2396 */ 2397 nxge_status_t 2398 nxge_alloc_rxb( 2399 p_nxge_t nxgep, 2400 int channel) 2401 { 2402 size_t rx_buf_alloc_size; 2403 nxge_status_t status = NXGE_OK; 2404 2405 nxge_dma_common_t **data; 2406 nxge_dma_common_t **control; 2407 uint32_t *num_chunks; 2408 2409 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2410 2411 /* 2412 * Allocate memory for the receive buffers and descriptor rings. 2413 * Replace these allocation functions with the interface functions 2414 * provided by the partition manager if/when they are available. 2415 */ 2416 2417 /* 2418 * Allocate memory for the receive buffer blocks. 2419 */ 2420 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2421 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2422 2423 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2424 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2425 2426 if ((status = nxge_alloc_rx_buf_dma( 2427 nxgep, channel, data, rx_buf_alloc_size, 2428 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2429 return (status); 2430 } 2431 2432 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2433 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2434 2435 /* 2436 * Allocate memory for descriptor rings and mailbox. 2437 */ 2438 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2439 2440 if ((status = nxge_alloc_rx_cntl_dma( 2441 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2442 != NXGE_OK) { 2443 nxge_free_rx_cntl_dma(nxgep, *control); 2444 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2445 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2446 return (status); 2447 } 2448 2449 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2450 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2451 2452 return (status); 2453 } 2454 2455 void 2456 nxge_free_rxb( 2457 p_nxge_t nxgep, 2458 int channel) 2459 { 2460 nxge_dma_common_t *data; 2461 nxge_dma_common_t *control; 2462 uint32_t num_chunks; 2463 2464 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2465 2466 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2467 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2468 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2469 2470 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2471 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2472 2473 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2474 nxge_free_rx_cntl_dma(nxgep, control); 2475 2476 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2477 2478 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2479 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2480 2481 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2482 } 2483 2484 static void 2485 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2486 { 2487 int rdc_max = NXGE_MAX_RDCS; 2488 2489 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2490 2491 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2492 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2493 "<== nxge_free_rx_mem_pool " 2494 "(null rx buf pool or buf not allocated")); 2495 return; 2496 } 2497 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2498 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2499 "<== nxge_free_rx_mem_pool " 2500 "(null rx cntl buf pool or cntl buf not allocated")); 2501 return; 2502 } 2503 2504 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2505 sizeof (p_nxge_dma_common_t) * rdc_max); 2506 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2507 2508 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2509 sizeof (uint32_t) * rdc_max); 2510 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2511 sizeof (p_nxge_dma_common_t) * rdc_max); 2512 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2513 2514 nxgep->rx_buf_pool_p = 0; 2515 nxgep->rx_cntl_pool_p = 0; 2516 2517 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2518 sizeof (p_rx_rbr_ring_t) * rdc_max); 2519 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2520 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2521 sizeof (p_rx_rcr_ring_t) * rdc_max); 2522 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2523 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2524 sizeof (p_rx_mbox_t) * rdc_max); 2525 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2526 2527 nxgep->rx_rbr_rings = 0; 2528 nxgep->rx_rcr_rings = 0; 2529 nxgep->rx_mbox_areas_p = 0; 2530 2531 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2532 } 2533 2534 2535 static nxge_status_t 2536 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2537 p_nxge_dma_common_t *dmap, 2538 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2539 { 2540 p_nxge_dma_common_t rx_dmap; 2541 nxge_status_t status = NXGE_OK; 2542 size_t total_alloc_size; 2543 size_t allocated = 0; 2544 int i, size_index, array_size; 2545 boolean_t use_kmem_alloc = B_FALSE; 2546 2547 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2548 2549 rx_dmap = (p_nxge_dma_common_t) 2550 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2551 KM_SLEEP); 2552 2553 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2554 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2555 dma_channel, alloc_size, block_size, dmap)); 2556 2557 total_alloc_size = alloc_size; 2558 2559 #if defined(RX_USE_RECLAIM_POST) 2560 total_alloc_size = alloc_size + alloc_size/4; 2561 #endif 2562 2563 i = 0; 2564 size_index = 0; 2565 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2566 while ((alloc_sizes[size_index] < alloc_size) && 2567 (size_index < array_size)) 2568 size_index++; 2569 if (size_index >= array_size) { 2570 size_index = array_size - 1; 2571 } 2572 2573 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2574 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2575 use_kmem_alloc = B_TRUE; 2576 #if defined(__i386) || defined(__amd64) 2577 size_index = 0; 2578 #endif 2579 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2580 "==> nxge_alloc_rx_buf_dma: " 2581 "Neptune use kmem_alloc() - size_index %d", 2582 size_index)); 2583 } 2584 2585 while ((allocated < total_alloc_size) && 2586 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2587 rx_dmap[i].dma_chunk_index = i; 2588 rx_dmap[i].block_size = block_size; 2589 rx_dmap[i].alength = alloc_sizes[size_index]; 2590 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2591 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2592 rx_dmap[i].dma_channel = dma_channel; 2593 rx_dmap[i].contig_alloc_type = B_FALSE; 2594 rx_dmap[i].kmem_alloc_type = B_FALSE; 2595 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2596 2597 /* 2598 * N2/NIU: data buffers must be contiguous as the driver 2599 * needs to call Hypervisor api to set up 2600 * logical pages. 2601 */ 2602 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2603 rx_dmap[i].contig_alloc_type = B_TRUE; 2604 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2605 } else if (use_kmem_alloc) { 2606 /* For Neptune, use kmem_alloc */ 2607 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2608 "==> nxge_alloc_rx_buf_dma: " 2609 "Neptune use kmem_alloc()")); 2610 rx_dmap[i].kmem_alloc_type = B_TRUE; 2611 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2612 } 2613 2614 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2615 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2616 "i %d nblocks %d alength %d", 2617 dma_channel, i, &rx_dmap[i], block_size, 2618 i, rx_dmap[i].nblocks, 2619 rx_dmap[i].alength)); 2620 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2621 &nxge_rx_dma_attr, 2622 rx_dmap[i].alength, 2623 &nxge_dev_buf_dma_acc_attr, 2624 DDI_DMA_READ | DDI_DMA_STREAMING, 2625 (p_nxge_dma_common_t)(&rx_dmap[i])); 2626 if (status != NXGE_OK) { 2627 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2628 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2629 "dma %d size_index %d size requested %d", 2630 dma_channel, 2631 size_index, 2632 rx_dmap[i].alength)); 2633 size_index--; 2634 } else { 2635 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2636 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2637 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2638 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2639 "buf_alloc_state %d alloc_type %d", 2640 dma_channel, 2641 &rx_dmap[i], 2642 rx_dmap[i].kaddrp, 2643 rx_dmap[i].alength, 2644 rx_dmap[i].buf_alloc_state, 2645 rx_dmap[i].buf_alloc_type)); 2646 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2647 " alloc_rx_buf_dma allocated rdc %d " 2648 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2649 dma_channel, i, rx_dmap[i].alength, 2650 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2651 rx_dmap[i].kaddrp)); 2652 i++; 2653 allocated += alloc_sizes[size_index]; 2654 } 2655 } 2656 2657 if (allocated < total_alloc_size) { 2658 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2659 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2660 "allocated 0x%x requested 0x%x", 2661 dma_channel, 2662 allocated, total_alloc_size)); 2663 status = NXGE_ERROR; 2664 goto nxge_alloc_rx_mem_fail1; 2665 } 2666 2667 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2668 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2669 "allocated 0x%x requested 0x%x", 2670 dma_channel, 2671 allocated, total_alloc_size)); 2672 2673 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2674 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2675 dma_channel, i)); 2676 *num_chunks = i; 2677 *dmap = rx_dmap; 2678 2679 goto nxge_alloc_rx_mem_exit; 2680 2681 nxge_alloc_rx_mem_fail1: 2682 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2683 2684 nxge_alloc_rx_mem_exit: 2685 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2686 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2687 2688 return (status); 2689 } 2690 2691 /*ARGSUSED*/ 2692 static void 2693 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2694 uint32_t num_chunks) 2695 { 2696 int i; 2697 2698 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2699 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2700 2701 if (dmap == 0) 2702 return; 2703 2704 for (i = 0; i < num_chunks; i++) { 2705 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2706 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2707 i, dmap)); 2708 nxge_dma_free_rx_data_buf(dmap++); 2709 } 2710 2711 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2712 } 2713 2714 /*ARGSUSED*/ 2715 static nxge_status_t 2716 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2717 p_nxge_dma_common_t *dmap, size_t size) 2718 { 2719 p_nxge_dma_common_t rx_dmap; 2720 nxge_status_t status = NXGE_OK; 2721 2722 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2723 2724 rx_dmap = (p_nxge_dma_common_t) 2725 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2726 2727 rx_dmap->contig_alloc_type = B_FALSE; 2728 rx_dmap->kmem_alloc_type = B_FALSE; 2729 2730 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2731 &nxge_desc_dma_attr, 2732 size, 2733 &nxge_dev_desc_dma_acc_attr, 2734 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2735 rx_dmap); 2736 if (status != NXGE_OK) { 2737 goto nxge_alloc_rx_cntl_dma_fail1; 2738 } 2739 2740 *dmap = rx_dmap; 2741 goto nxge_alloc_rx_cntl_dma_exit; 2742 2743 nxge_alloc_rx_cntl_dma_fail1: 2744 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2745 2746 nxge_alloc_rx_cntl_dma_exit: 2747 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2748 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2749 2750 return (status); 2751 } 2752 2753 /*ARGSUSED*/ 2754 static void 2755 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2756 { 2757 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2758 2759 if (dmap == 0) 2760 return; 2761 2762 nxge_dma_mem_free(dmap); 2763 2764 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2765 } 2766 2767 typedef struct { 2768 size_t tx_size; 2769 size_t cr_size; 2770 size_t threshhold; 2771 } nxge_tdc_sizes_t; 2772 2773 static 2774 nxge_status_t 2775 nxge_tdc_sizes( 2776 nxge_t *nxgep, 2777 nxge_tdc_sizes_t *sizes) 2778 { 2779 uint32_t threshhold; /* The bcopy() threshhold */ 2780 size_t tx_size; /* Transmit buffer size */ 2781 size_t cr_size; /* Completion ring size */ 2782 2783 /* 2784 * Assume that each DMA channel will be configured with the 2785 * default transmit buffer size for copying transmit data. 2786 * (If a packet is bigger than this, it will not be copied.) 2787 */ 2788 if (nxgep->niu_type == N2_NIU) { 2789 threshhold = TX_BCOPY_SIZE; 2790 } else { 2791 threshhold = nxge_bcopy_thresh; 2792 } 2793 tx_size = nxge_tx_ring_size * threshhold; 2794 2795 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2796 cr_size += sizeof (txdma_mailbox_t); 2797 2798 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2799 if (nxgep->niu_type == N2_NIU) { 2800 if (!ISP2(tx_size)) { 2801 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2802 "==> nxge_tdc_sizes: Tx size" 2803 " must be power of 2")); 2804 return (NXGE_ERROR); 2805 } 2806 2807 if (tx_size > (1 << 22)) { 2808 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2809 "==> nxge_tdc_sizes: Tx size" 2810 " limited to 4M")); 2811 return (NXGE_ERROR); 2812 } 2813 2814 if (cr_size < 0x2000) 2815 cr_size = 0x2000; 2816 } 2817 #endif 2818 2819 sizes->threshhold = threshhold; 2820 sizes->tx_size = tx_size; 2821 sizes->cr_size = cr_size; 2822 2823 return (NXGE_OK); 2824 } 2825 /* 2826 * nxge_alloc_txb 2827 * 2828 * Allocate buffers for an TDC. 2829 * 2830 * Arguments: 2831 * nxgep 2832 * channel The channel to map into our kernel space. 2833 * 2834 * Notes: 2835 * 2836 * NPI function calls: 2837 * 2838 * NXGE function calls: 2839 * 2840 * Registers accessed: 2841 * 2842 * Context: 2843 * 2844 * Taking apart: 2845 * 2846 * Open questions: 2847 * 2848 */ 2849 nxge_status_t 2850 nxge_alloc_txb( 2851 p_nxge_t nxgep, 2852 int channel) 2853 { 2854 nxge_dma_common_t **dma_buf_p; 2855 nxge_dma_common_t **dma_cntl_p; 2856 uint32_t *num_chunks; 2857 nxge_status_t status = NXGE_OK; 2858 2859 nxge_tdc_sizes_t sizes; 2860 2861 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2862 2863 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2864 return (NXGE_ERROR); 2865 2866 /* 2867 * Allocate memory for transmit buffers and descriptor rings. 2868 * Replace these allocation functions with the interface functions 2869 * provided by the partition manager Real Soon Now. 2870 */ 2871 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2872 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2873 2874 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2875 2876 /* 2877 * Allocate memory for transmit buffers and descriptor rings. 2878 * Replace allocation functions with interface functions provided 2879 * by the partition manager when it is available. 2880 * 2881 * Allocate memory for the transmit buffer pool. 2882 */ 2883 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2884 "sizes: tx: %ld, cr:%ld, th:%ld", 2885 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2886 2887 *num_chunks = 0; 2888 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2889 sizes.tx_size, sizes.threshhold, num_chunks); 2890 if (status != NXGE_OK) { 2891 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2892 return (status); 2893 } 2894 2895 /* 2896 * Allocate memory for descriptor rings and mailbox. 2897 */ 2898 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2899 sizes.cr_size); 2900 if (status != NXGE_OK) { 2901 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2902 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2903 return (status); 2904 } 2905 2906 return (NXGE_OK); 2907 } 2908 2909 void 2910 nxge_free_txb( 2911 p_nxge_t nxgep, 2912 int channel) 2913 { 2914 nxge_dma_common_t *data; 2915 nxge_dma_common_t *control; 2916 uint32_t num_chunks; 2917 2918 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2919 2920 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2921 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2922 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2923 2924 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2925 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2926 2927 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2928 nxge_free_tx_cntl_dma(nxgep, control); 2929 2930 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2931 2932 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2933 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2934 2935 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2936 } 2937 2938 /* 2939 * nxge_alloc_tx_mem_pool 2940 * 2941 * This function allocates all of the per-port TDC control data structures. 2942 * The per-channel (TDC) data structures are allocated when needed. 2943 * 2944 * Arguments: 2945 * nxgep 2946 * 2947 * Notes: 2948 * 2949 * Context: 2950 * Any domain 2951 */ 2952 nxge_status_t 2953 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2954 { 2955 nxge_hw_pt_cfg_t *p_cfgp; 2956 nxge_dma_pool_t *dma_poolp; 2957 nxge_dma_common_t **dma_buf_p; 2958 nxge_dma_pool_t *dma_cntl_poolp; 2959 nxge_dma_common_t **dma_cntl_p; 2960 uint32_t *num_chunks; /* per dma */ 2961 int tdc_max; 2962 2963 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2964 2965 p_cfgp = &nxgep->pt_config.hw_config; 2966 tdc_max = NXGE_MAX_TDCS; 2967 2968 /* 2969 * Allocate memory for each transmit DMA channel. 2970 */ 2971 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2972 KM_SLEEP); 2973 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2974 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2975 2976 dma_cntl_poolp = (p_nxge_dma_pool_t) 2977 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2978 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2979 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2980 2981 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2982 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2983 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2984 "set to default %d", 2985 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2986 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2987 } 2988 2989 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2990 /* 2991 * N2/NIU has limitation on the descriptor sizes (contiguous 2992 * memory allocation on data buffers to 4M (contig_mem_alloc) 2993 * and little endian for control buffers (must use the ddi/dki mem alloc 2994 * function). The transmit ring is limited to 8K (includes the 2995 * mailbox). 2996 */ 2997 if (nxgep->niu_type == N2_NIU) { 2998 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2999 (!ISP2(nxge_tx_ring_size))) { 3000 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 3001 } 3002 } 3003 #endif 3004 3005 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 3006 3007 num_chunks = (uint32_t *)KMEM_ZALLOC( 3008 sizeof (uint32_t) * tdc_max, KM_SLEEP); 3009 3010 dma_poolp->ndmas = p_cfgp->tdc.owned; 3011 dma_poolp->num_chunks = num_chunks; 3012 dma_poolp->dma_buf_pool_p = dma_buf_p; 3013 nxgep->tx_buf_pool_p = dma_poolp; 3014 3015 dma_poolp->buf_allocated = B_TRUE; 3016 3017 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 3018 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 3019 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 3020 3021 dma_cntl_poolp->buf_allocated = B_TRUE; 3022 3023 nxgep->tx_rings = 3024 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 3025 nxgep->tx_rings->rings = 3026 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 3027 nxgep->tx_mbox_areas_p = 3028 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 3029 nxgep->tx_mbox_areas_p->txmbox_areas_p = 3030 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 3031 3032 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 3033 3034 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3035 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 3036 tdc_max, dma_poolp->ndmas)); 3037 3038 return (NXGE_OK); 3039 } 3040 3041 nxge_status_t 3042 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 3043 p_nxge_dma_common_t *dmap, size_t alloc_size, 3044 size_t block_size, uint32_t *num_chunks) 3045 { 3046 p_nxge_dma_common_t tx_dmap; 3047 nxge_status_t status = NXGE_OK; 3048 size_t total_alloc_size; 3049 size_t allocated = 0; 3050 int i, size_index, array_size; 3051 3052 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 3053 3054 tx_dmap = (p_nxge_dma_common_t) 3055 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 3056 KM_SLEEP); 3057 3058 total_alloc_size = alloc_size; 3059 i = 0; 3060 size_index = 0; 3061 array_size = sizeof (alloc_sizes) / sizeof (size_t); 3062 while ((alloc_sizes[size_index] < alloc_size) && 3063 (size_index < array_size)) 3064 size_index++; 3065 if (size_index >= array_size) { 3066 size_index = array_size - 1; 3067 } 3068 3069 while ((allocated < total_alloc_size) && 3070 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 3071 3072 tx_dmap[i].dma_chunk_index = i; 3073 tx_dmap[i].block_size = block_size; 3074 tx_dmap[i].alength = alloc_sizes[size_index]; 3075 tx_dmap[i].orig_alength = tx_dmap[i].alength; 3076 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 3077 tx_dmap[i].dma_channel = dma_channel; 3078 tx_dmap[i].contig_alloc_type = B_FALSE; 3079 tx_dmap[i].kmem_alloc_type = B_FALSE; 3080 3081 /* 3082 * N2/NIU: data buffers must be contiguous as the driver 3083 * needs to call Hypervisor api to set up 3084 * logical pages. 3085 */ 3086 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 3087 tx_dmap[i].contig_alloc_type = B_TRUE; 3088 } 3089 3090 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3091 &nxge_tx_dma_attr, 3092 tx_dmap[i].alength, 3093 &nxge_dev_buf_dma_acc_attr, 3094 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3095 (p_nxge_dma_common_t)(&tx_dmap[i])); 3096 if (status != NXGE_OK) { 3097 size_index--; 3098 } else { 3099 i++; 3100 allocated += alloc_sizes[size_index]; 3101 } 3102 } 3103 3104 if (allocated < total_alloc_size) { 3105 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3106 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3107 "allocated 0x%x requested 0x%x", 3108 dma_channel, 3109 allocated, total_alloc_size)); 3110 status = NXGE_ERROR; 3111 goto nxge_alloc_tx_mem_fail1; 3112 } 3113 3114 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3115 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3116 "allocated 0x%x requested 0x%x", 3117 dma_channel, 3118 allocated, total_alloc_size)); 3119 3120 *num_chunks = i; 3121 *dmap = tx_dmap; 3122 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3123 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3124 *dmap, i)); 3125 goto nxge_alloc_tx_mem_exit; 3126 3127 nxge_alloc_tx_mem_fail1: 3128 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3129 3130 nxge_alloc_tx_mem_exit: 3131 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3132 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3133 3134 return (status); 3135 } 3136 3137 /*ARGSUSED*/ 3138 static void 3139 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3140 uint32_t num_chunks) 3141 { 3142 int i; 3143 3144 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3145 3146 if (dmap == 0) 3147 return; 3148 3149 for (i = 0; i < num_chunks; i++) { 3150 nxge_dma_mem_free(dmap++); 3151 } 3152 3153 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3154 } 3155 3156 /*ARGSUSED*/ 3157 nxge_status_t 3158 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3159 p_nxge_dma_common_t *dmap, size_t size) 3160 { 3161 p_nxge_dma_common_t tx_dmap; 3162 nxge_status_t status = NXGE_OK; 3163 3164 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3165 tx_dmap = (p_nxge_dma_common_t) 3166 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3167 3168 tx_dmap->contig_alloc_type = B_FALSE; 3169 tx_dmap->kmem_alloc_type = B_FALSE; 3170 3171 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3172 &nxge_desc_dma_attr, 3173 size, 3174 &nxge_dev_desc_dma_acc_attr, 3175 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3176 tx_dmap); 3177 if (status != NXGE_OK) { 3178 goto nxge_alloc_tx_cntl_dma_fail1; 3179 } 3180 3181 *dmap = tx_dmap; 3182 goto nxge_alloc_tx_cntl_dma_exit; 3183 3184 nxge_alloc_tx_cntl_dma_fail1: 3185 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3186 3187 nxge_alloc_tx_cntl_dma_exit: 3188 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3189 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3190 3191 return (status); 3192 } 3193 3194 /*ARGSUSED*/ 3195 static void 3196 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3197 { 3198 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3199 3200 if (dmap == 0) 3201 return; 3202 3203 nxge_dma_mem_free(dmap); 3204 3205 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3206 } 3207 3208 /* 3209 * nxge_free_tx_mem_pool 3210 * 3211 * This function frees all of the per-port TDC control data structures. 3212 * The per-channel (TDC) data structures are freed when the channel 3213 * is stopped. 3214 * 3215 * Arguments: 3216 * nxgep 3217 * 3218 * Notes: 3219 * 3220 * Context: 3221 * Any domain 3222 */ 3223 static void 3224 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3225 { 3226 int tdc_max = NXGE_MAX_TDCS; 3227 3228 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3229 3230 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3231 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3232 "<== nxge_free_tx_mem_pool " 3233 "(null tx buf pool or buf not allocated")); 3234 return; 3235 } 3236 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3237 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3238 "<== nxge_free_tx_mem_pool " 3239 "(null tx cntl buf pool or cntl buf not allocated")); 3240 return; 3241 } 3242 3243 /* 1. Free the mailboxes. */ 3244 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3245 sizeof (p_tx_mbox_t) * tdc_max); 3246 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3247 3248 nxgep->tx_mbox_areas_p = 0; 3249 3250 /* 2. Free the transmit ring arrays. */ 3251 KMEM_FREE(nxgep->tx_rings->rings, 3252 sizeof (p_tx_ring_t) * tdc_max); 3253 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3254 3255 nxgep->tx_rings = 0; 3256 3257 /* 3. Free the completion ring data structures. */ 3258 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3259 sizeof (p_nxge_dma_common_t) * tdc_max); 3260 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3261 3262 nxgep->tx_cntl_pool_p = 0; 3263 3264 /* 4. Free the data ring data structures. */ 3265 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3266 sizeof (uint32_t) * tdc_max); 3267 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3268 sizeof (p_nxge_dma_common_t) * tdc_max); 3269 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3270 3271 nxgep->tx_buf_pool_p = 0; 3272 3273 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3274 } 3275 3276 /*ARGSUSED*/ 3277 static nxge_status_t 3278 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3279 struct ddi_dma_attr *dma_attrp, 3280 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3281 p_nxge_dma_common_t dma_p) 3282 { 3283 caddr_t kaddrp; 3284 int ddi_status = DDI_SUCCESS; 3285 boolean_t contig_alloc_type; 3286 boolean_t kmem_alloc_type; 3287 3288 contig_alloc_type = dma_p->contig_alloc_type; 3289 3290 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3291 /* 3292 * contig_alloc_type for contiguous memory only allowed 3293 * for N2/NIU. 3294 */ 3295 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3296 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3297 dma_p->contig_alloc_type)); 3298 return (NXGE_ERROR | NXGE_DDI_FAILED); 3299 } 3300 3301 dma_p->dma_handle = NULL; 3302 dma_p->acc_handle = NULL; 3303 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3304 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3305 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3306 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3307 if (ddi_status != DDI_SUCCESS) { 3308 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3309 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3310 return (NXGE_ERROR | NXGE_DDI_FAILED); 3311 } 3312 3313 kmem_alloc_type = dma_p->kmem_alloc_type; 3314 3315 switch (contig_alloc_type) { 3316 case B_FALSE: 3317 switch (kmem_alloc_type) { 3318 case B_FALSE: 3319 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3320 length, 3321 acc_attr_p, 3322 xfer_flags, 3323 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3324 &dma_p->acc_handle); 3325 if (ddi_status != DDI_SUCCESS) { 3326 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3327 "nxge_dma_mem_alloc: " 3328 "ddi_dma_mem_alloc failed")); 3329 ddi_dma_free_handle(&dma_p->dma_handle); 3330 dma_p->dma_handle = NULL; 3331 return (NXGE_ERROR | NXGE_DDI_FAILED); 3332 } 3333 if (dma_p->alength < length) { 3334 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3335 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3336 "< length.")); 3337 ddi_dma_mem_free(&dma_p->acc_handle); 3338 ddi_dma_free_handle(&dma_p->dma_handle); 3339 dma_p->acc_handle = NULL; 3340 dma_p->dma_handle = NULL; 3341 return (NXGE_ERROR); 3342 } 3343 3344 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3345 NULL, 3346 kaddrp, dma_p->alength, xfer_flags, 3347 DDI_DMA_DONTWAIT, 3348 0, &dma_p->dma_cookie, &dma_p->ncookies); 3349 if (ddi_status != DDI_DMA_MAPPED) { 3350 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3351 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3352 "failed " 3353 "(staus 0x%x ncookies %d.)", ddi_status, 3354 dma_p->ncookies)); 3355 if (dma_p->acc_handle) { 3356 ddi_dma_mem_free(&dma_p->acc_handle); 3357 dma_p->acc_handle = NULL; 3358 } 3359 ddi_dma_free_handle(&dma_p->dma_handle); 3360 dma_p->dma_handle = NULL; 3361 return (NXGE_ERROR | NXGE_DDI_FAILED); 3362 } 3363 3364 if (dma_p->ncookies != 1) { 3365 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3366 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3367 "> 1 cookie" 3368 "(staus 0x%x ncookies %d.)", ddi_status, 3369 dma_p->ncookies)); 3370 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3371 if (dma_p->acc_handle) { 3372 ddi_dma_mem_free(&dma_p->acc_handle); 3373 dma_p->acc_handle = NULL; 3374 } 3375 ddi_dma_free_handle(&dma_p->dma_handle); 3376 dma_p->dma_handle = NULL; 3377 dma_p->acc_handle = NULL; 3378 return (NXGE_ERROR); 3379 } 3380 break; 3381 3382 case B_TRUE: 3383 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3384 if (kaddrp == NULL) { 3385 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3386 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3387 "kmem alloc failed")); 3388 return (NXGE_ERROR); 3389 } 3390 3391 dma_p->alength = length; 3392 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3393 NULL, kaddrp, dma_p->alength, xfer_flags, 3394 DDI_DMA_DONTWAIT, 0, 3395 &dma_p->dma_cookie, &dma_p->ncookies); 3396 if (ddi_status != DDI_DMA_MAPPED) { 3397 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3398 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3399 "(kmem_alloc) failed kaddrp $%p length %d " 3400 "(staus 0x%x (%d) ncookies %d.)", 3401 kaddrp, length, 3402 ddi_status, ddi_status, dma_p->ncookies)); 3403 KMEM_FREE(kaddrp, length); 3404 dma_p->acc_handle = NULL; 3405 ddi_dma_free_handle(&dma_p->dma_handle); 3406 dma_p->dma_handle = NULL; 3407 dma_p->kaddrp = NULL; 3408 return (NXGE_ERROR | NXGE_DDI_FAILED); 3409 } 3410 3411 if (dma_p->ncookies != 1) { 3412 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3413 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3414 "(kmem_alloc) > 1 cookie" 3415 "(staus 0x%x ncookies %d.)", ddi_status, 3416 dma_p->ncookies)); 3417 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3418 KMEM_FREE(kaddrp, length); 3419 ddi_dma_free_handle(&dma_p->dma_handle); 3420 dma_p->dma_handle = NULL; 3421 dma_p->acc_handle = NULL; 3422 dma_p->kaddrp = NULL; 3423 return (NXGE_ERROR); 3424 } 3425 3426 dma_p->kaddrp = kaddrp; 3427 3428 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3429 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3430 "kaddr $%p alength %d", 3431 dma_p, 3432 kaddrp, 3433 dma_p->alength)); 3434 break; 3435 } 3436 break; 3437 3438 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3439 case B_TRUE: 3440 kaddrp = (caddr_t)contig_mem_alloc(length); 3441 if (kaddrp == NULL) { 3442 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3443 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3444 ddi_dma_free_handle(&dma_p->dma_handle); 3445 return (NXGE_ERROR | NXGE_DDI_FAILED); 3446 } 3447 3448 dma_p->alength = length; 3449 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3450 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3451 &dma_p->dma_cookie, &dma_p->ncookies); 3452 if (ddi_status != DDI_DMA_MAPPED) { 3453 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3454 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3455 "(status 0x%x ncookies %d.)", ddi_status, 3456 dma_p->ncookies)); 3457 3458 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3459 "==> nxge_dma_mem_alloc: (not mapped)" 3460 "length %lu (0x%x) " 3461 "free contig kaddrp $%p " 3462 "va_to_pa $%p", 3463 length, length, 3464 kaddrp, 3465 va_to_pa(kaddrp))); 3466 3467 3468 contig_mem_free((void *)kaddrp, length); 3469 ddi_dma_free_handle(&dma_p->dma_handle); 3470 3471 dma_p->dma_handle = NULL; 3472 dma_p->acc_handle = NULL; 3473 dma_p->alength = NULL; 3474 dma_p->kaddrp = NULL; 3475 3476 return (NXGE_ERROR | NXGE_DDI_FAILED); 3477 } 3478 3479 if (dma_p->ncookies != 1 || 3480 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3481 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3482 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3483 "cookie or " 3484 "dmac_laddress is NULL $%p size %d " 3485 " (status 0x%x ncookies %d.)", 3486 ddi_status, 3487 dma_p->dma_cookie.dmac_laddress, 3488 dma_p->dma_cookie.dmac_size, 3489 dma_p->ncookies)); 3490 3491 contig_mem_free((void *)kaddrp, length); 3492 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3493 ddi_dma_free_handle(&dma_p->dma_handle); 3494 3495 dma_p->alength = 0; 3496 dma_p->dma_handle = NULL; 3497 dma_p->acc_handle = NULL; 3498 dma_p->kaddrp = NULL; 3499 3500 return (NXGE_ERROR | NXGE_DDI_FAILED); 3501 } 3502 break; 3503 3504 #else 3505 case B_TRUE: 3506 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3507 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3508 return (NXGE_ERROR | NXGE_DDI_FAILED); 3509 #endif 3510 } 3511 3512 dma_p->kaddrp = kaddrp; 3513 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3514 dma_p->alength - RXBUF_64B_ALIGNED; 3515 #if defined(__i386) 3516 dma_p->ioaddr_pp = 3517 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3518 #else 3519 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3520 #endif 3521 dma_p->last_ioaddr_pp = 3522 #if defined(__i386) 3523 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3524 #else 3525 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3526 #endif 3527 dma_p->alength - RXBUF_64B_ALIGNED; 3528 3529 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3530 3531 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3532 dma_p->orig_ioaddr_pp = 3533 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3534 dma_p->orig_alength = length; 3535 dma_p->orig_kaddrp = kaddrp; 3536 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3537 #endif 3538 3539 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3540 "dma buffer allocated: dma_p $%p " 3541 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3542 "dma_p->ioaddr_p $%p " 3543 "dma_p->orig_ioaddr_p $%p " 3544 "orig_vatopa $%p " 3545 "alength %d (0x%x) " 3546 "kaddrp $%p " 3547 "length %d (0x%x)", 3548 dma_p, 3549 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3550 dma_p->ioaddr_pp, 3551 dma_p->orig_ioaddr_pp, 3552 dma_p->orig_vatopa, 3553 dma_p->alength, dma_p->alength, 3554 kaddrp, 3555 length, length)); 3556 3557 return (NXGE_OK); 3558 } 3559 3560 static void 3561 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3562 { 3563 if (dma_p->dma_handle != NULL) { 3564 if (dma_p->ncookies) { 3565 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3566 dma_p->ncookies = 0; 3567 } 3568 ddi_dma_free_handle(&dma_p->dma_handle); 3569 dma_p->dma_handle = NULL; 3570 } 3571 3572 if (dma_p->acc_handle != NULL) { 3573 ddi_dma_mem_free(&dma_p->acc_handle); 3574 dma_p->acc_handle = NULL; 3575 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3576 } 3577 3578 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3579 if (dma_p->contig_alloc_type && 3580 dma_p->orig_kaddrp && dma_p->orig_alength) { 3581 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3582 "kaddrp $%p (orig_kaddrp $%p)" 3583 "mem type %d ", 3584 "orig_alength %d " 3585 "alength 0x%x (%d)", 3586 dma_p->kaddrp, 3587 dma_p->orig_kaddrp, 3588 dma_p->contig_alloc_type, 3589 dma_p->orig_alength, 3590 dma_p->alength, dma_p->alength)); 3591 3592 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3593 dma_p->orig_alength = NULL; 3594 dma_p->orig_kaddrp = NULL; 3595 dma_p->contig_alloc_type = B_FALSE; 3596 } 3597 #endif 3598 dma_p->kaddrp = NULL; 3599 dma_p->alength = NULL; 3600 } 3601 3602 static void 3603 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3604 { 3605 uint64_t kaddr; 3606 uint32_t buf_size; 3607 3608 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3609 3610 if (dma_p->dma_handle != NULL) { 3611 if (dma_p->ncookies) { 3612 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3613 dma_p->ncookies = 0; 3614 } 3615 ddi_dma_free_handle(&dma_p->dma_handle); 3616 dma_p->dma_handle = NULL; 3617 } 3618 3619 if (dma_p->acc_handle != NULL) { 3620 ddi_dma_mem_free(&dma_p->acc_handle); 3621 dma_p->acc_handle = NULL; 3622 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3623 } 3624 3625 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3626 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3627 dma_p, 3628 dma_p->buf_alloc_state)); 3629 3630 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3631 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3632 "<== nxge_dma_free_rx_data_buf: " 3633 "outstanding data buffers")); 3634 return; 3635 } 3636 3637 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3638 if (dma_p->contig_alloc_type && 3639 dma_p->orig_kaddrp && dma_p->orig_alength) { 3640 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3641 "kaddrp $%p (orig_kaddrp $%p)" 3642 "mem type %d ", 3643 "orig_alength %d " 3644 "alength 0x%x (%d)", 3645 dma_p->kaddrp, 3646 dma_p->orig_kaddrp, 3647 dma_p->contig_alloc_type, 3648 dma_p->orig_alength, 3649 dma_p->alength, dma_p->alength)); 3650 3651 kaddr = (uint64_t)dma_p->orig_kaddrp; 3652 buf_size = dma_p->orig_alength; 3653 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3654 dma_p->orig_alength = NULL; 3655 dma_p->orig_kaddrp = NULL; 3656 dma_p->contig_alloc_type = B_FALSE; 3657 dma_p->kaddrp = NULL; 3658 dma_p->alength = NULL; 3659 return; 3660 } 3661 #endif 3662 3663 if (dma_p->kmem_alloc_type) { 3664 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3665 "nxge_dma_free_rx_data_buf: free kmem " 3666 "kaddrp $%p (orig_kaddrp $%p)" 3667 "alloc type %d " 3668 "orig_alength %d " 3669 "alength 0x%x (%d)", 3670 dma_p->kaddrp, 3671 dma_p->orig_kaddrp, 3672 dma_p->kmem_alloc_type, 3673 dma_p->orig_alength, 3674 dma_p->alength, dma_p->alength)); 3675 #if defined(__i386) 3676 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3677 #else 3678 kaddr = (uint64_t)dma_p->kaddrp; 3679 #endif 3680 buf_size = dma_p->orig_alength; 3681 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3682 "nxge_dma_free_rx_data_buf: free dmap $%p " 3683 "kaddr $%p buf_size %d", 3684 dma_p, 3685 kaddr, buf_size)); 3686 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3687 dma_p->alength = 0; 3688 dma_p->orig_alength = 0; 3689 dma_p->kaddrp = NULL; 3690 dma_p->kmem_alloc_type = B_FALSE; 3691 } 3692 3693 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3694 } 3695 3696 /* 3697 * nxge_m_start() -- start transmitting and receiving. 3698 * 3699 * This function is called by the MAC layer when the first 3700 * stream is open to prepare the hardware ready for sending 3701 * and transmitting packets. 3702 */ 3703 static int 3704 nxge_m_start(void *arg) 3705 { 3706 p_nxge_t nxgep = (p_nxge_t)arg; 3707 3708 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3709 3710 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) { 3711 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 3712 } 3713 3714 MUTEX_ENTER(nxgep->genlock); 3715 if (nxge_init(nxgep) != NXGE_OK) { 3716 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3717 "<== nxge_m_start: initialization failed")); 3718 MUTEX_EXIT(nxgep->genlock); 3719 return (EIO); 3720 } 3721 3722 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 3723 goto nxge_m_start_exit; 3724 /* 3725 * Start timer to check the system error and tx hangs 3726 */ 3727 if (!isLDOMguest(nxgep)) 3728 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3729 nxge_check_hw_state, NXGE_CHECK_TIMER); 3730 #if defined(sun4v) 3731 else 3732 nxge_hio_start_timer(nxgep); 3733 #endif 3734 3735 nxgep->link_notify = B_TRUE; 3736 3737 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3738 3739 nxge_m_start_exit: 3740 MUTEX_EXIT(nxgep->genlock); 3741 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3742 return (0); 3743 } 3744 3745 3746 static boolean_t 3747 nxge_check_groups_stopped(p_nxge_t nxgep) 3748 { 3749 int i; 3750 3751 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 3752 if (nxgep->rx_hio_groups[i].started) 3753 return (B_FALSE); 3754 } 3755 3756 return (B_TRUE); 3757 } 3758 3759 /* 3760 * nxge_m_stop(): stop transmitting and receiving. 3761 */ 3762 static void 3763 nxge_m_stop(void *arg) 3764 { 3765 p_nxge_t nxgep = (p_nxge_t)arg; 3766 boolean_t groups_stopped; 3767 3768 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3769 3770 groups_stopped = nxge_check_groups_stopped(nxgep); 3771 #ifdef later 3772 ASSERT(groups_stopped == B_FALSE); 3773 #endif 3774 3775 if (!groups_stopped) { 3776 cmn_err(CE_WARN, "nxge(%d): groups are not stopped!\n", 3777 nxgep->instance); 3778 return; 3779 } 3780 3781 MUTEX_ENTER(nxgep->genlock); 3782 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3783 3784 if (nxgep->nxge_timerid) { 3785 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3786 nxgep->nxge_timerid = 0; 3787 } 3788 3789 nxge_uninit(nxgep); 3790 3791 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3792 3793 MUTEX_EXIT(nxgep->genlock); 3794 3795 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3796 } 3797 3798 static int 3799 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3800 { 3801 p_nxge_t nxgep = (p_nxge_t)arg; 3802 struct ether_addr addrp; 3803 3804 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3805 "==> nxge_m_multicst: add %d", add)); 3806 3807 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3808 if (add) { 3809 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3810 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3811 "<== nxge_m_multicst: add multicast failed")); 3812 return (EINVAL); 3813 } 3814 } else { 3815 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3816 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3817 "<== nxge_m_multicst: del multicast failed")); 3818 return (EINVAL); 3819 } 3820 } 3821 3822 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3823 3824 return (0); 3825 } 3826 3827 static int 3828 nxge_m_promisc(void *arg, boolean_t on) 3829 { 3830 p_nxge_t nxgep = (p_nxge_t)arg; 3831 3832 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3833 "==> nxge_m_promisc: on %d", on)); 3834 3835 if (nxge_set_promisc(nxgep, on)) { 3836 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3837 "<== nxge_m_promisc: set promisc failed")); 3838 return (EINVAL); 3839 } 3840 3841 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3842 "<== nxge_m_promisc: on %d", on)); 3843 3844 return (0); 3845 } 3846 3847 static void 3848 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3849 { 3850 p_nxge_t nxgep = (p_nxge_t)arg; 3851 struct iocblk *iocp; 3852 boolean_t need_privilege; 3853 int err; 3854 int cmd; 3855 3856 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3857 3858 iocp = (struct iocblk *)mp->b_rptr; 3859 iocp->ioc_error = 0; 3860 need_privilege = B_TRUE; 3861 cmd = iocp->ioc_cmd; 3862 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3863 switch (cmd) { 3864 default: 3865 miocnak(wq, mp, 0, EINVAL); 3866 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3867 return; 3868 3869 case LB_GET_INFO_SIZE: 3870 case LB_GET_INFO: 3871 case LB_GET_MODE: 3872 need_privilege = B_FALSE; 3873 break; 3874 case LB_SET_MODE: 3875 break; 3876 3877 3878 case NXGE_GET_MII: 3879 case NXGE_PUT_MII: 3880 case NXGE_GET64: 3881 case NXGE_PUT64: 3882 case NXGE_GET_TX_RING_SZ: 3883 case NXGE_GET_TX_DESC: 3884 case NXGE_TX_SIDE_RESET: 3885 case NXGE_RX_SIDE_RESET: 3886 case NXGE_GLOBAL_RESET: 3887 case NXGE_RESET_MAC: 3888 case NXGE_TX_REGS_DUMP: 3889 case NXGE_RX_REGS_DUMP: 3890 case NXGE_INT_REGS_DUMP: 3891 case NXGE_VIR_INT_REGS_DUMP: 3892 case NXGE_PUT_TCAM: 3893 case NXGE_GET_TCAM: 3894 case NXGE_RTRACE: 3895 case NXGE_RDUMP: 3896 3897 need_privilege = B_FALSE; 3898 break; 3899 case NXGE_INJECT_ERR: 3900 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3901 nxge_err_inject(nxgep, wq, mp); 3902 break; 3903 } 3904 3905 if (need_privilege) { 3906 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3907 if (err != 0) { 3908 miocnak(wq, mp, 0, err); 3909 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3910 "<== nxge_m_ioctl: no priv")); 3911 return; 3912 } 3913 } 3914 3915 switch (cmd) { 3916 3917 case LB_GET_MODE: 3918 case LB_SET_MODE: 3919 case LB_GET_INFO_SIZE: 3920 case LB_GET_INFO: 3921 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3922 break; 3923 3924 case NXGE_GET_MII: 3925 case NXGE_PUT_MII: 3926 case NXGE_PUT_TCAM: 3927 case NXGE_GET_TCAM: 3928 case NXGE_GET64: 3929 case NXGE_PUT64: 3930 case NXGE_GET_TX_RING_SZ: 3931 case NXGE_GET_TX_DESC: 3932 case NXGE_TX_SIDE_RESET: 3933 case NXGE_RX_SIDE_RESET: 3934 case NXGE_GLOBAL_RESET: 3935 case NXGE_RESET_MAC: 3936 case NXGE_TX_REGS_DUMP: 3937 case NXGE_RX_REGS_DUMP: 3938 case NXGE_INT_REGS_DUMP: 3939 case NXGE_VIR_INT_REGS_DUMP: 3940 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3941 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3942 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3943 break; 3944 } 3945 3946 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3947 } 3948 3949 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3950 3951 void 3952 nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory) 3953 { 3954 p_nxge_mmac_stats_t mmac_stats; 3955 int i; 3956 nxge_mmac_t *mmac_info; 3957 3958 mmac_info = &nxgep->nxge_mmac_info; 3959 3960 mmac_stats = &nxgep->statsp->mmac_stats; 3961 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3962 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3963 3964 for (i = 0; i < ETHERADDRL; i++) { 3965 if (factory) { 3966 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3967 = mmac_info->factory_mac_pool[slot][ 3968 (ETHERADDRL-1) - i]; 3969 } else { 3970 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3971 = mmac_info->mac_pool[slot].addr[ 3972 (ETHERADDRL - 1) - i]; 3973 } 3974 } 3975 } 3976 3977 /* 3978 * nxge_altmac_set() -- Set an alternate MAC address 3979 */ 3980 static int 3981 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, int slot, 3982 int rdctbl, boolean_t usetbl) 3983 { 3984 uint8_t addrn; 3985 uint8_t portn; 3986 npi_mac_addr_t altmac; 3987 hostinfo_t mac_rdc; 3988 p_nxge_class_pt_cfg_t clscfgp; 3989 3990 3991 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3992 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3993 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3994 3995 portn = nxgep->mac.portnum; 3996 addrn = (uint8_t)slot - 1; 3997 3998 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, 3999 nxgep->function_num, addrn, &altmac) != NPI_SUCCESS) 4000 return (EIO); 4001 4002 /* 4003 * Set the rdc table number for the host info entry 4004 * for this mac address slot. 4005 */ 4006 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 4007 mac_rdc.value = 0; 4008 if (usetbl) 4009 mac_rdc.bits.w0.rdc_tbl_num = rdctbl; 4010 else 4011 mac_rdc.bits.w0.rdc_tbl_num = 4012 clscfgp->mac_host_info[addrn].rdctbl; 4013 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 4014 4015 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 4016 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 4017 return (EIO); 4018 } 4019 4020 /* 4021 * Enable comparison with the alternate MAC address. 4022 * While the first alternate addr is enabled by bit 1 of register 4023 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 4024 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 4025 * accordingly before calling npi_mac_altaddr_entry. 4026 */ 4027 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4028 addrn = (uint8_t)slot - 1; 4029 else 4030 addrn = (uint8_t)slot; 4031 4032 if (npi_mac_altaddr_enable(nxgep->npi_handle, 4033 nxgep->function_num, addrn) != NPI_SUCCESS) { 4034 return (EIO); 4035 } 4036 4037 return (0); 4038 } 4039 4040 /* 4041 * nxeg_m_mmac_add_g() - find an unused address slot, set the address 4042 * value to the one specified, enable the port to start filtering on 4043 * the new MAC address. Returns 0 on success. 4044 */ 4045 int 4046 nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl, 4047 boolean_t usetbl) 4048 { 4049 p_nxge_t nxgep = arg; 4050 int slot; 4051 nxge_mmac_t *mmac_info; 4052 int err; 4053 nxge_status_t status; 4054 4055 mutex_enter(nxgep->genlock); 4056 4057 /* 4058 * Make sure that nxge is initialized, if _start() has 4059 * not been called. 4060 */ 4061 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4062 status = nxge_init(nxgep); 4063 if (status != NXGE_OK) { 4064 mutex_exit(nxgep->genlock); 4065 return (ENXIO); 4066 } 4067 } 4068 4069 mmac_info = &nxgep->nxge_mmac_info; 4070 if (mmac_info->naddrfree == 0) { 4071 mutex_exit(nxgep->genlock); 4072 return (ENOSPC); 4073 } 4074 4075 /* 4076 * Search for the first available slot. Because naddrfree 4077 * is not zero, we are guaranteed to find one. 4078 * Each of the first two ports of Neptune has 16 alternate 4079 * MAC slots but only the first 7 (of 15) slots have assigned factory 4080 * MAC addresses. We first search among the slots without bundled 4081 * factory MACs. If we fail to find one in that range, then we 4082 * search the slots with bundled factory MACs. A factory MAC 4083 * will be wasted while the slot is used with a user MAC address. 4084 * But the slot could be used by factory MAC again after calling 4085 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4086 */ 4087 for (slot = 0; slot <= mmac_info->num_mmac; slot++) { 4088 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4089 break; 4090 } 4091 4092 ASSERT(slot <= mmac_info->num_mmac); 4093 4094 if ((err = nxge_altmac_set(nxgep, (uint8_t *)maddr, slot, rdctbl, 4095 usetbl)) != 0) { 4096 mutex_exit(nxgep->genlock); 4097 return (err); 4098 } 4099 4100 bcopy(maddr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4101 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4102 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4103 mmac_info->naddrfree--; 4104 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4105 4106 mutex_exit(nxgep->genlock); 4107 return (0); 4108 } 4109 4110 /* 4111 * Remove the specified mac address and update the HW not to filter 4112 * the mac address anymore. 4113 */ 4114 int 4115 nxge_m_mmac_remove(void *arg, int slot) 4116 { 4117 p_nxge_t nxgep = arg; 4118 nxge_mmac_t *mmac_info; 4119 uint8_t addrn; 4120 uint8_t portn; 4121 int err = 0; 4122 nxge_status_t status; 4123 4124 mutex_enter(nxgep->genlock); 4125 4126 /* 4127 * Make sure that nxge is initialized, if _start() has 4128 * not been called. 4129 */ 4130 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4131 status = nxge_init(nxgep); 4132 if (status != NXGE_OK) { 4133 mutex_exit(nxgep->genlock); 4134 return (ENXIO); 4135 } 4136 } 4137 4138 mmac_info = &nxgep->nxge_mmac_info; 4139 if (slot < 1 || slot > mmac_info->num_mmac) { 4140 mutex_exit(nxgep->genlock); 4141 return (EINVAL); 4142 } 4143 4144 portn = nxgep->mac.portnum; 4145 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4146 addrn = (uint8_t)slot - 1; 4147 else 4148 addrn = (uint8_t)slot; 4149 4150 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4151 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4152 == NPI_SUCCESS) { 4153 mmac_info->naddrfree++; 4154 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4155 /* 4156 * Regardless if the MAC we just stopped filtering 4157 * is a user addr or a facory addr, we must set 4158 * the MMAC_VENDOR_ADDR flag if this slot has an 4159 * associated factory MAC to indicate that a factory 4160 * MAC is available. 4161 */ 4162 if (slot <= mmac_info->num_factory_mmac) { 4163 mmac_info->mac_pool[slot].flags 4164 |= MMAC_VENDOR_ADDR; 4165 } 4166 /* 4167 * Clear mac_pool[slot].addr so that kstat shows 0 4168 * alternate MAC address if the slot is not used. 4169 * (But nxge_m_mmac_get returns the factory MAC even 4170 * when the slot is not used!) 4171 */ 4172 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4173 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4174 } else { 4175 err = EIO; 4176 } 4177 } else { 4178 err = EINVAL; 4179 } 4180 4181 mutex_exit(nxgep->genlock); 4182 return (err); 4183 } 4184 4185 /* 4186 * The callback to query all the factory addresses. naddr must be the same as 4187 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and 4188 * mcm_addr is the space allocated for keep all the addresses, whose size is 4189 * naddr * MAXMACADDRLEN. 4190 */ 4191 static void 4192 nxge_m_getfactaddr(void *arg, uint_t naddr, uint8_t *addr) 4193 { 4194 nxge_t *nxgep = arg; 4195 nxge_mmac_t *mmac_info; 4196 int i; 4197 4198 mutex_enter(nxgep->genlock); 4199 4200 mmac_info = &nxgep->nxge_mmac_info; 4201 ASSERT(naddr == mmac_info->num_factory_mmac); 4202 4203 for (i = 0; i < naddr; i++) { 4204 bcopy(mmac_info->factory_mac_pool[i + 1], 4205 addr + i * MAXMACADDRLEN, ETHERADDRL); 4206 } 4207 4208 mutex_exit(nxgep->genlock); 4209 } 4210 4211 4212 static boolean_t 4213 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4214 { 4215 nxge_t *nxgep = arg; 4216 uint32_t *txflags = cap_data; 4217 4218 switch (cap) { 4219 case MAC_CAPAB_HCKSUM: 4220 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4221 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4222 if (nxge_cksum_offload <= 1) { 4223 *txflags = HCKSUM_INET_PARTIAL; 4224 } 4225 break; 4226 4227 case MAC_CAPAB_MULTIFACTADDR: { 4228 mac_capab_multifactaddr_t *mfacp = cap_data; 4229 4230 mutex_enter(nxgep->genlock); 4231 mfacp->mcm_naddr = nxgep->nxge_mmac_info.num_factory_mmac; 4232 mfacp->mcm_getaddr = nxge_m_getfactaddr; 4233 mutex_exit(nxgep->genlock); 4234 break; 4235 } 4236 4237 case MAC_CAPAB_LSO: { 4238 mac_capab_lso_t *cap_lso = cap_data; 4239 4240 if (nxgep->soft_lso_enable) { 4241 if (nxge_cksum_offload <= 1) { 4242 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4243 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4244 nxge_lso_max = NXGE_LSO_MAXLEN; 4245 } 4246 cap_lso->lso_basic_tcp_ipv4.lso_max = 4247 nxge_lso_max; 4248 } 4249 break; 4250 } else { 4251 return (B_FALSE); 4252 } 4253 } 4254 4255 case MAC_CAPAB_RINGS: { 4256 mac_capab_rings_t *cap_rings = cap_data; 4257 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 4258 4259 mutex_enter(nxgep->genlock); 4260 if (cap_rings->mr_type == MAC_RING_TYPE_RX) { 4261 cap_rings->mr_group_type = MAC_GROUP_TYPE_DYNAMIC; 4262 cap_rings->mr_rnum = p_cfgp->max_rdcs; 4263 cap_rings->mr_rget = nxge_fill_ring; 4264 cap_rings->mr_gnum = p_cfgp->max_rdc_grpids; 4265 cap_rings->mr_gget = nxge_hio_group_get; 4266 cap_rings->mr_gaddring = nxge_group_add_ring; 4267 cap_rings->mr_gremring = nxge_group_rem_ring; 4268 4269 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4270 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]", 4271 p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids)); 4272 } else { 4273 cap_rings->mr_group_type = MAC_GROUP_TYPE_DYNAMIC; 4274 cap_rings->mr_rnum = p_cfgp->tdc.count; 4275 cap_rings->mr_rget = nxge_fill_ring; 4276 if (isLDOMservice(nxgep)) { 4277 /* share capable */ 4278 /* Do not report the default ring: hence -1 */ 4279 cap_rings->mr_gnum = 4280 NXGE_MAX_TDC_GROUPS / nxgep->nports - 1; 4281 } else { 4282 cap_rings->mr_gnum = 0; 4283 } 4284 4285 cap_rings->mr_gget = nxge_hio_group_get; 4286 cap_rings->mr_gaddring = nxge_group_add_ring; 4287 cap_rings->mr_gremring = nxge_group_rem_ring; 4288 4289 NXGE_DEBUG_MSG((nxgep, TX_CTL, 4290 "==> nxge_m_getcapab: tx rings # of rings %d", 4291 p_cfgp->tdc.count)); 4292 } 4293 mutex_exit(nxgep->genlock); 4294 break; 4295 } 4296 4297 #if defined(sun4v) 4298 case MAC_CAPAB_SHARES: { 4299 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4300 4301 /* 4302 * Only the service domain driver responds to 4303 * this capability request. 4304 */ 4305 mutex_enter(nxgep->genlock); 4306 if (isLDOMservice(nxgep)) { 4307 mshares->ms_snum = 3; 4308 mshares->ms_handle = (void *)nxgep; 4309 mshares->ms_salloc = nxge_hio_share_alloc; 4310 mshares->ms_sfree = nxge_hio_share_free; 4311 mshares->ms_sadd = nxge_hio_share_add_group; 4312 mshares->ms_sremove = nxge_hio_share_rem_group; 4313 mshares->ms_squery = nxge_hio_share_query; 4314 mshares->ms_sbind = nxge_hio_share_bind; 4315 mshares->ms_sunbind = nxge_hio_share_unbind; 4316 mutex_exit(nxgep->genlock); 4317 } else { 4318 mutex_exit(nxgep->genlock); 4319 return (B_FALSE); 4320 } 4321 break; 4322 } 4323 #endif 4324 default: 4325 return (B_FALSE); 4326 } 4327 return (B_TRUE); 4328 } 4329 4330 static boolean_t 4331 nxge_param_locked(mac_prop_id_t pr_num) 4332 { 4333 /* 4334 * All adv_* parameters are locked (read-only) while 4335 * the device is in any sort of loopback mode ... 4336 */ 4337 switch (pr_num) { 4338 case MAC_PROP_ADV_1000FDX_CAP: 4339 case MAC_PROP_EN_1000FDX_CAP: 4340 case MAC_PROP_ADV_1000HDX_CAP: 4341 case MAC_PROP_EN_1000HDX_CAP: 4342 case MAC_PROP_ADV_100FDX_CAP: 4343 case MAC_PROP_EN_100FDX_CAP: 4344 case MAC_PROP_ADV_100HDX_CAP: 4345 case MAC_PROP_EN_100HDX_CAP: 4346 case MAC_PROP_ADV_10FDX_CAP: 4347 case MAC_PROP_EN_10FDX_CAP: 4348 case MAC_PROP_ADV_10HDX_CAP: 4349 case MAC_PROP_EN_10HDX_CAP: 4350 case MAC_PROP_AUTONEG: 4351 case MAC_PROP_FLOWCTRL: 4352 return (B_TRUE); 4353 } 4354 return (B_FALSE); 4355 } 4356 4357 /* 4358 * callback functions for set/get of properties 4359 */ 4360 static int 4361 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4362 uint_t pr_valsize, const void *pr_val) 4363 { 4364 nxge_t *nxgep = barg; 4365 p_nxge_param_t param_arr; 4366 p_nxge_stats_t statsp; 4367 int err = 0; 4368 uint8_t val; 4369 uint32_t cur_mtu, new_mtu, old_framesize; 4370 link_flowctrl_t fl; 4371 4372 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4373 param_arr = nxgep->param_arr; 4374 statsp = nxgep->statsp; 4375 mutex_enter(nxgep->genlock); 4376 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4377 nxge_param_locked(pr_num)) { 4378 /* 4379 * All adv_* parameters are locked (read-only) 4380 * while the device is in any sort of loopback mode. 4381 */ 4382 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4383 "==> nxge_m_setprop: loopback mode: read only")); 4384 mutex_exit(nxgep->genlock); 4385 return (EBUSY); 4386 } 4387 4388 val = *(uint8_t *)pr_val; 4389 switch (pr_num) { 4390 case MAC_PROP_EN_1000FDX_CAP: 4391 nxgep->param_en_1000fdx = val; 4392 param_arr[param_anar_1000fdx].value = val; 4393 4394 goto reprogram; 4395 4396 case MAC_PROP_EN_100FDX_CAP: 4397 nxgep->param_en_100fdx = val; 4398 param_arr[param_anar_100fdx].value = val; 4399 4400 goto reprogram; 4401 4402 case MAC_PROP_EN_10FDX_CAP: 4403 nxgep->param_en_10fdx = val; 4404 param_arr[param_anar_10fdx].value = val; 4405 4406 goto reprogram; 4407 4408 case MAC_PROP_EN_1000HDX_CAP: 4409 case MAC_PROP_EN_100HDX_CAP: 4410 case MAC_PROP_EN_10HDX_CAP: 4411 case MAC_PROP_ADV_1000FDX_CAP: 4412 case MAC_PROP_ADV_1000HDX_CAP: 4413 case MAC_PROP_ADV_100FDX_CAP: 4414 case MAC_PROP_ADV_100HDX_CAP: 4415 case MAC_PROP_ADV_10FDX_CAP: 4416 case MAC_PROP_ADV_10HDX_CAP: 4417 case MAC_PROP_STATUS: 4418 case MAC_PROP_SPEED: 4419 case MAC_PROP_DUPLEX: 4420 err = EINVAL; /* cannot set read-only properties */ 4421 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4422 "==> nxge_m_setprop: read only property %d", 4423 pr_num)); 4424 break; 4425 4426 case MAC_PROP_AUTONEG: 4427 param_arr[param_autoneg].value = val; 4428 4429 goto reprogram; 4430 4431 case MAC_PROP_MTU: 4432 cur_mtu = nxgep->mac.default_mtu; 4433 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4434 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4435 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4436 new_mtu, nxgep->mac.is_jumbo)); 4437 4438 if (new_mtu == cur_mtu) { 4439 err = 0; 4440 break; 4441 } 4442 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4443 err = EBUSY; 4444 break; 4445 } 4446 if (new_mtu < NXGE_DEFAULT_MTU || 4447 new_mtu > NXGE_MAXIMUM_MTU) { 4448 err = EINVAL; 4449 break; 4450 } 4451 4452 if ((new_mtu > NXGE_DEFAULT_MTU) && 4453 !nxgep->mac.is_jumbo) { 4454 err = EINVAL; 4455 break; 4456 } 4457 4458 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4459 nxgep->mac.maxframesize = (uint16_t) 4460 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4461 if (nxge_mac_set_framesize(nxgep)) { 4462 nxgep->mac.maxframesize = 4463 (uint16_t)old_framesize; 4464 err = EINVAL; 4465 break; 4466 } 4467 4468 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4469 if (err) { 4470 nxgep->mac.maxframesize = 4471 (uint16_t)old_framesize; 4472 err = EINVAL; 4473 break; 4474 } 4475 4476 nxgep->mac.default_mtu = new_mtu; 4477 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4478 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4479 new_mtu, nxgep->mac.maxframesize)); 4480 break; 4481 4482 case MAC_PROP_FLOWCTRL: 4483 bcopy(pr_val, &fl, sizeof (fl)); 4484 switch (fl) { 4485 default: 4486 err = EINVAL; 4487 break; 4488 4489 case LINK_FLOWCTRL_NONE: 4490 param_arr[param_anar_pause].value = 0; 4491 break; 4492 4493 case LINK_FLOWCTRL_RX: 4494 param_arr[param_anar_pause].value = 1; 4495 break; 4496 4497 case LINK_FLOWCTRL_TX: 4498 case LINK_FLOWCTRL_BI: 4499 err = EINVAL; 4500 break; 4501 } 4502 4503 reprogram: 4504 if (err == 0) { 4505 if (!nxge_param_link_update(nxgep)) { 4506 err = EINVAL; 4507 } 4508 } 4509 break; 4510 case MAC_PROP_PRIVATE: 4511 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4512 "==> nxge_m_setprop: private property")); 4513 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4514 pr_val); 4515 break; 4516 4517 default: 4518 err = ENOTSUP; 4519 break; 4520 } 4521 4522 mutex_exit(nxgep->genlock); 4523 4524 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4525 "<== nxge_m_setprop (return %d)", err)); 4526 return (err); 4527 } 4528 4529 static int 4530 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4531 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 4532 { 4533 nxge_t *nxgep = barg; 4534 p_nxge_param_t param_arr = nxgep->param_arr; 4535 p_nxge_stats_t statsp = nxgep->statsp; 4536 int err = 0; 4537 link_flowctrl_t fl; 4538 uint64_t tmp = 0; 4539 link_state_t ls; 4540 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4541 4542 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4543 "==> nxge_m_getprop: pr_num %d", pr_num)); 4544 4545 if (pr_valsize == 0) 4546 return (EINVAL); 4547 4548 *perm = MAC_PROP_PERM_RW; 4549 4550 if ((is_default) && (pr_num != MAC_PROP_PRIVATE)) { 4551 err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4552 return (err); 4553 } 4554 4555 bzero(pr_val, pr_valsize); 4556 switch (pr_num) { 4557 case MAC_PROP_DUPLEX: 4558 *perm = MAC_PROP_PERM_READ; 4559 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4560 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4561 "==> nxge_m_getprop: duplex mode %d", 4562 *(uint8_t *)pr_val)); 4563 break; 4564 4565 case MAC_PROP_SPEED: 4566 if (pr_valsize < sizeof (uint64_t)) 4567 return (EINVAL); 4568 *perm = MAC_PROP_PERM_READ; 4569 tmp = statsp->mac_stats.link_speed * 1000000ull; 4570 bcopy(&tmp, pr_val, sizeof (tmp)); 4571 break; 4572 4573 case MAC_PROP_STATUS: 4574 if (pr_valsize < sizeof (link_state_t)) 4575 return (EINVAL); 4576 *perm = MAC_PROP_PERM_READ; 4577 if (!statsp->mac_stats.link_up) 4578 ls = LINK_STATE_DOWN; 4579 else 4580 ls = LINK_STATE_UP; 4581 bcopy(&ls, pr_val, sizeof (ls)); 4582 break; 4583 4584 case MAC_PROP_AUTONEG: 4585 *(uint8_t *)pr_val = 4586 param_arr[param_autoneg].value; 4587 break; 4588 4589 case MAC_PROP_FLOWCTRL: 4590 if (pr_valsize < sizeof (link_flowctrl_t)) 4591 return (EINVAL); 4592 4593 fl = LINK_FLOWCTRL_NONE; 4594 if (param_arr[param_anar_pause].value) { 4595 fl = LINK_FLOWCTRL_RX; 4596 } 4597 bcopy(&fl, pr_val, sizeof (fl)); 4598 break; 4599 4600 case MAC_PROP_ADV_1000FDX_CAP: 4601 *perm = MAC_PROP_PERM_READ; 4602 *(uint8_t *)pr_val = 4603 param_arr[param_anar_1000fdx].value; 4604 break; 4605 4606 case MAC_PROP_EN_1000FDX_CAP: 4607 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4608 break; 4609 4610 case MAC_PROP_ADV_100FDX_CAP: 4611 *perm = MAC_PROP_PERM_READ; 4612 *(uint8_t *)pr_val = 4613 param_arr[param_anar_100fdx].value; 4614 break; 4615 4616 case MAC_PROP_EN_100FDX_CAP: 4617 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4618 break; 4619 4620 case MAC_PROP_ADV_10FDX_CAP: 4621 *perm = MAC_PROP_PERM_READ; 4622 *(uint8_t *)pr_val = 4623 param_arr[param_anar_10fdx].value; 4624 break; 4625 4626 case MAC_PROP_EN_10FDX_CAP: 4627 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4628 break; 4629 4630 case MAC_PROP_EN_1000HDX_CAP: 4631 case MAC_PROP_EN_100HDX_CAP: 4632 case MAC_PROP_EN_10HDX_CAP: 4633 case MAC_PROP_ADV_1000HDX_CAP: 4634 case MAC_PROP_ADV_100HDX_CAP: 4635 case MAC_PROP_ADV_10HDX_CAP: 4636 err = ENOTSUP; 4637 break; 4638 4639 case MAC_PROP_PRIVATE: 4640 err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4641 pr_valsize, pr_val, perm); 4642 break; 4643 default: 4644 err = EINVAL; 4645 break; 4646 } 4647 4648 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4649 4650 return (err); 4651 } 4652 4653 /* ARGSUSED */ 4654 static int 4655 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4656 const void *pr_val) 4657 { 4658 p_nxge_param_t param_arr = nxgep->param_arr; 4659 int err = 0; 4660 long result; 4661 4662 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4663 "==> nxge_set_priv_prop: name %s", pr_name)); 4664 4665 if (strcmp(pr_name, "_accept_jumbo") == 0) { 4666 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4667 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4668 "<== nxge_set_priv_prop: name %s " 4669 "pr_val %s result %d " 4670 "param %d is_jumbo %d", 4671 pr_name, pr_val, result, 4672 param_arr[param_accept_jumbo].value, 4673 nxgep->mac.is_jumbo)); 4674 4675 if (result > 1 || result < 0) { 4676 err = EINVAL; 4677 } else { 4678 if (nxgep->mac.is_jumbo == 4679 (uint32_t)result) { 4680 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4681 "no change (%d %d)", 4682 nxgep->mac.is_jumbo, 4683 result)); 4684 return (0); 4685 } 4686 } 4687 4688 param_arr[param_accept_jumbo].value = result; 4689 nxgep->mac.is_jumbo = B_FALSE; 4690 if (result) { 4691 nxgep->mac.is_jumbo = B_TRUE; 4692 } 4693 4694 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4695 "<== nxge_set_priv_prop: name %s (value %d) is_jumbo %d", 4696 pr_name, result, nxgep->mac.is_jumbo)); 4697 4698 return (err); 4699 } 4700 4701 /* Blanking */ 4702 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4703 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4704 (char *)pr_val, 4705 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4706 if (err) { 4707 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4708 "<== nxge_set_priv_prop: " 4709 "unable to set (%s)", pr_name)); 4710 err = EINVAL; 4711 } else { 4712 err = 0; 4713 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4714 "<== nxge_set_priv_prop: " 4715 "set (%s)", pr_name)); 4716 } 4717 4718 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4719 "<== nxge_set_priv_prop: name %s (value %d)", 4720 pr_name, result)); 4721 4722 return (err); 4723 } 4724 4725 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4726 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4727 (char *)pr_val, 4728 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4729 if (err) { 4730 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4731 "<== nxge_set_priv_prop: " 4732 "unable to set (%s)", pr_name)); 4733 err = EINVAL; 4734 } else { 4735 err = 0; 4736 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4737 "<== nxge_set_priv_prop: " 4738 "set (%s)", pr_name)); 4739 } 4740 4741 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4742 "<== nxge_set_priv_prop: name %s (value %d)", 4743 pr_name, result)); 4744 4745 return (err); 4746 } 4747 4748 /* Classification */ 4749 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4750 if (pr_val == NULL) { 4751 err = EINVAL; 4752 return (err); 4753 } 4754 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4755 4756 err = nxge_param_set_ip_opt(nxgep, NULL, 4757 NULL, (char *)pr_val, 4758 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4759 4760 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4761 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4762 pr_name, result)); 4763 4764 return (err); 4765 } 4766 4767 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 4768 if (pr_val == NULL) { 4769 err = EINVAL; 4770 return (err); 4771 } 4772 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4773 4774 err = nxge_param_set_ip_opt(nxgep, NULL, 4775 NULL, (char *)pr_val, 4776 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 4777 4778 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4779 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4780 pr_name, result)); 4781 4782 return (err); 4783 } 4784 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 4785 if (pr_val == NULL) { 4786 err = EINVAL; 4787 return (err); 4788 } 4789 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4790 4791 err = nxge_param_set_ip_opt(nxgep, NULL, 4792 NULL, (char *)pr_val, 4793 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 4794 4795 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4796 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4797 pr_name, result)); 4798 4799 return (err); 4800 } 4801 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 4802 if (pr_val == NULL) { 4803 err = EINVAL; 4804 return (err); 4805 } 4806 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4807 4808 err = nxge_param_set_ip_opt(nxgep, NULL, 4809 NULL, (char *)pr_val, 4810 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 4811 4812 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4813 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4814 pr_name, result)); 4815 4816 return (err); 4817 } 4818 4819 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 4820 if (pr_val == NULL) { 4821 err = EINVAL; 4822 return (err); 4823 } 4824 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4825 4826 err = nxge_param_set_ip_opt(nxgep, NULL, 4827 NULL, (char *)pr_val, 4828 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 4829 4830 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4831 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4832 pr_name, result)); 4833 4834 return (err); 4835 } 4836 4837 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 4838 if (pr_val == NULL) { 4839 err = EINVAL; 4840 return (err); 4841 } 4842 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4843 4844 err = nxge_param_set_ip_opt(nxgep, NULL, 4845 NULL, (char *)pr_val, 4846 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 4847 4848 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4849 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4850 pr_name, result)); 4851 4852 return (err); 4853 } 4854 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 4855 if (pr_val == NULL) { 4856 err = EINVAL; 4857 return (err); 4858 } 4859 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4860 4861 err = nxge_param_set_ip_opt(nxgep, NULL, 4862 NULL, (char *)pr_val, 4863 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 4864 4865 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4866 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4867 pr_name, result)); 4868 4869 return (err); 4870 } 4871 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 4872 if (pr_val == NULL) { 4873 err = EINVAL; 4874 return (err); 4875 } 4876 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4877 4878 err = nxge_param_set_ip_opt(nxgep, NULL, 4879 NULL, (char *)pr_val, 4880 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 4881 4882 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4883 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4884 pr_name, result)); 4885 4886 return (err); 4887 } 4888 4889 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 4890 if (pr_val == NULL) { 4891 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4892 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 4893 err = EINVAL; 4894 return (err); 4895 } 4896 4897 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4898 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4899 "<== nxge_set_priv_prop: name %s " 4900 "(lso %d pr_val %s value %d)", 4901 pr_name, nxgep->soft_lso_enable, pr_val, result)); 4902 4903 if (result > 1 || result < 0) { 4904 err = EINVAL; 4905 } else { 4906 if (nxgep->soft_lso_enable == (uint32_t)result) { 4907 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4908 "no change (%d %d)", 4909 nxgep->soft_lso_enable, result)); 4910 return (0); 4911 } 4912 } 4913 4914 nxgep->soft_lso_enable = (int)result; 4915 4916 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4917 "<== nxge_set_priv_prop: name %s (value %d)", 4918 pr_name, result)); 4919 4920 return (err); 4921 } 4922 /* 4923 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the 4924 * following code to be executed. 4925 */ 4926 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 4927 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 4928 (caddr_t)¶m_arr[param_anar_10gfdx]); 4929 return (err); 4930 } 4931 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 4932 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 4933 (caddr_t)¶m_arr[param_anar_pause]); 4934 return (err); 4935 } 4936 4937 return (EINVAL); 4938 } 4939 4940 static int 4941 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 4942 uint_t pr_valsize, void *pr_val, uint_t *perm) 4943 { 4944 p_nxge_param_t param_arr = nxgep->param_arr; 4945 char valstr[MAXNAMELEN]; 4946 int err = EINVAL; 4947 uint_t strsize; 4948 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4949 4950 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4951 "==> nxge_get_priv_prop: property %s", pr_name)); 4952 4953 /* function number */ 4954 if (strcmp(pr_name, "_function_number") == 0) { 4955 if (is_default) 4956 return (ENOTSUP); 4957 *perm = MAC_PROP_PERM_READ; 4958 (void) snprintf(valstr, sizeof (valstr), "%d", 4959 nxgep->function_num); 4960 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4961 "==> nxge_get_priv_prop: name %s " 4962 "(value %d valstr %s)", 4963 pr_name, nxgep->function_num, valstr)); 4964 4965 err = 0; 4966 goto done; 4967 } 4968 4969 /* Neptune firmware version */ 4970 if (strcmp(pr_name, "_fw_version") == 0) { 4971 if (is_default) 4972 return (ENOTSUP); 4973 *perm = MAC_PROP_PERM_READ; 4974 (void) snprintf(valstr, sizeof (valstr), "%s", 4975 nxgep->vpd_info.ver); 4976 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4977 "==> nxge_get_priv_prop: name %s " 4978 "(value %d valstr %s)", 4979 pr_name, nxgep->vpd_info.ver, valstr)); 4980 4981 err = 0; 4982 goto done; 4983 } 4984 4985 /* port PHY mode */ 4986 if (strcmp(pr_name, "_port_mode") == 0) { 4987 if (is_default) 4988 return (ENOTSUP); 4989 *perm = MAC_PROP_PERM_READ; 4990 switch (nxgep->mac.portmode) { 4991 case PORT_1G_COPPER: 4992 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 4993 nxgep->hot_swappable_phy ? 4994 "[Hot Swappable]" : ""); 4995 break; 4996 case PORT_1G_FIBER: 4997 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 4998 nxgep->hot_swappable_phy ? 4999 "[hot swappable]" : ""); 5000 break; 5001 case PORT_10G_COPPER: 5002 (void) snprintf(valstr, sizeof (valstr), 5003 "10G copper %s", 5004 nxgep->hot_swappable_phy ? 5005 "[hot swappable]" : ""); 5006 break; 5007 case PORT_10G_FIBER: 5008 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 5009 nxgep->hot_swappable_phy ? 5010 "[hot swappable]" : ""); 5011 break; 5012 case PORT_10G_SERDES: 5013 (void) snprintf(valstr, sizeof (valstr), 5014 "10G serdes %s", nxgep->hot_swappable_phy ? 5015 "[hot swappable]" : ""); 5016 break; 5017 case PORT_1G_SERDES: 5018 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5019 nxgep->hot_swappable_phy ? 5020 "[hot swappable]" : ""); 5021 break; 5022 case PORT_1G_TN1010: 5023 (void) snprintf(valstr, sizeof (valstr), 5024 "1G TN1010 copper %s", nxgep->hot_swappable_phy ? 5025 "[hot swappable]" : ""); 5026 break; 5027 case PORT_10G_TN1010: 5028 (void) snprintf(valstr, sizeof (valstr), 5029 "10G TN1010 copper %s", nxgep->hot_swappable_phy ? 5030 "[hot swappable]" : ""); 5031 break; 5032 case PORT_1G_RGMII_FIBER: 5033 (void) snprintf(valstr, sizeof (valstr), 5034 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5035 "[hot swappable]" : ""); 5036 break; 5037 case PORT_HSP_MODE: 5038 (void) snprintf(valstr, sizeof (valstr), 5039 "phy not present[hot swappable]"); 5040 break; 5041 default: 5042 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5043 nxgep->hot_swappable_phy ? 5044 "[hot swappable]" : ""); 5045 break; 5046 } 5047 5048 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5049 "==> nxge_get_priv_prop: name %s (value %s)", 5050 pr_name, valstr)); 5051 5052 err = 0; 5053 goto done; 5054 } 5055 5056 /* Hot swappable PHY */ 5057 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5058 if (is_default) 5059 return (ENOTSUP); 5060 *perm = MAC_PROP_PERM_READ; 5061 (void) snprintf(valstr, sizeof (valstr), "%s", 5062 nxgep->hot_swappable_phy ? 5063 "yes" : "no"); 5064 5065 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5066 "==> nxge_get_priv_prop: name %s " 5067 "(value %d valstr %s)", 5068 pr_name, nxgep->hot_swappable_phy, valstr)); 5069 5070 err = 0; 5071 goto done; 5072 } 5073 5074 5075 /* accept jumbo */ 5076 if (strcmp(pr_name, "_accept_jumbo") == 0) { 5077 if (is_default) 5078 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5079 else 5080 (void) snprintf(valstr, sizeof (valstr), 5081 "%d", nxgep->mac.is_jumbo); 5082 err = 0; 5083 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5084 "==> nxge_get_priv_prop: name %s (value %d (%d, %d))", 5085 pr_name, 5086 (uint32_t)param_arr[param_accept_jumbo].value, 5087 nxgep->mac.is_jumbo, 5088 nxge_jumbo_enable)); 5089 5090 goto done; 5091 } 5092 5093 /* Receive Interrupt Blanking Parameters */ 5094 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5095 err = 0; 5096 if (is_default) { 5097 (void) snprintf(valstr, sizeof (valstr), 5098 "%d", RXDMA_RCR_TO_DEFAULT); 5099 goto done; 5100 } 5101 5102 (void) snprintf(valstr, sizeof (valstr), "%d", 5103 nxgep->intr_timeout); 5104 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5105 "==> nxge_get_priv_prop: name %s (value %d)", 5106 pr_name, 5107 (uint32_t)nxgep->intr_timeout)); 5108 goto done; 5109 } 5110 5111 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5112 err = 0; 5113 if (is_default) { 5114 (void) snprintf(valstr, sizeof (valstr), 5115 "%d", RXDMA_RCR_PTHRES_DEFAULT); 5116 goto done; 5117 } 5118 (void) snprintf(valstr, sizeof (valstr), "%d", 5119 nxgep->intr_threshold); 5120 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5121 "==> nxge_get_priv_prop: name %s (value %d)", 5122 pr_name, (uint32_t)nxgep->intr_threshold)); 5123 5124 goto done; 5125 } 5126 5127 /* Classification and Load Distribution Configuration */ 5128 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5129 if (is_default) { 5130 (void) snprintf(valstr, sizeof (valstr), "%x", 5131 NXGE_CLASS_FLOW_GEN_SERVER); 5132 err = 0; 5133 goto done; 5134 } 5135 err = nxge_dld_get_ip_opt(nxgep, 5136 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5137 5138 (void) snprintf(valstr, sizeof (valstr), "%x", 5139 (int)param_arr[param_class_opt_ipv4_tcp].value); 5140 5141 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5142 "==> nxge_get_priv_prop: %s", valstr)); 5143 goto done; 5144 } 5145 5146 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5147 if (is_default) { 5148 (void) snprintf(valstr, sizeof (valstr), "%x", 5149 NXGE_CLASS_FLOW_GEN_SERVER); 5150 err = 0; 5151 goto done; 5152 } 5153 err = nxge_dld_get_ip_opt(nxgep, 5154 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5155 5156 (void) snprintf(valstr, sizeof (valstr), "%x", 5157 (int)param_arr[param_class_opt_ipv4_udp].value); 5158 5159 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5160 "==> nxge_get_priv_prop: %s", valstr)); 5161 goto done; 5162 } 5163 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5164 if (is_default) { 5165 (void) snprintf(valstr, sizeof (valstr), "%x", 5166 NXGE_CLASS_FLOW_GEN_SERVER); 5167 err = 0; 5168 goto done; 5169 } 5170 err = nxge_dld_get_ip_opt(nxgep, 5171 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5172 5173 (void) snprintf(valstr, sizeof (valstr), "%x", 5174 (int)param_arr[param_class_opt_ipv4_ah].value); 5175 5176 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5177 "==> nxge_get_priv_prop: %s", valstr)); 5178 goto done; 5179 } 5180 5181 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5182 if (is_default) { 5183 (void) snprintf(valstr, sizeof (valstr), "%x", 5184 NXGE_CLASS_FLOW_GEN_SERVER); 5185 err = 0; 5186 goto done; 5187 } 5188 err = nxge_dld_get_ip_opt(nxgep, 5189 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5190 5191 (void) snprintf(valstr, sizeof (valstr), "%x", 5192 (int)param_arr[param_class_opt_ipv4_sctp].value); 5193 5194 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5195 "==> nxge_get_priv_prop: %s", valstr)); 5196 goto done; 5197 } 5198 5199 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5200 if (is_default) { 5201 (void) snprintf(valstr, sizeof (valstr), "%x", 5202 NXGE_CLASS_FLOW_GEN_SERVER); 5203 err = 0; 5204 goto done; 5205 } 5206 err = nxge_dld_get_ip_opt(nxgep, 5207 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5208 5209 (void) snprintf(valstr, sizeof (valstr), "%x", 5210 (int)param_arr[param_class_opt_ipv6_tcp].value); 5211 5212 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5213 "==> nxge_get_priv_prop: %s", valstr)); 5214 goto done; 5215 } 5216 5217 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5218 if (is_default) { 5219 (void) snprintf(valstr, sizeof (valstr), "%x", 5220 NXGE_CLASS_FLOW_GEN_SERVER); 5221 err = 0; 5222 goto done; 5223 } 5224 err = nxge_dld_get_ip_opt(nxgep, 5225 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5226 5227 (void) snprintf(valstr, sizeof (valstr), "%x", 5228 (int)param_arr[param_class_opt_ipv6_udp].value); 5229 5230 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5231 "==> nxge_get_priv_prop: %s", valstr)); 5232 goto done; 5233 } 5234 5235 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5236 if (is_default) { 5237 (void) snprintf(valstr, sizeof (valstr), "%x", 5238 NXGE_CLASS_FLOW_GEN_SERVER); 5239 err = 0; 5240 goto done; 5241 } 5242 err = nxge_dld_get_ip_opt(nxgep, 5243 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5244 5245 (void) snprintf(valstr, sizeof (valstr), "%x", 5246 (int)param_arr[param_class_opt_ipv6_ah].value); 5247 5248 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5249 "==> nxge_get_priv_prop: %s", valstr)); 5250 goto done; 5251 } 5252 5253 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5254 if (is_default) { 5255 (void) snprintf(valstr, sizeof (valstr), "%x", 5256 NXGE_CLASS_FLOW_GEN_SERVER); 5257 err = 0; 5258 goto done; 5259 } 5260 err = nxge_dld_get_ip_opt(nxgep, 5261 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5262 5263 (void) snprintf(valstr, sizeof (valstr), "%x", 5264 (int)param_arr[param_class_opt_ipv6_sctp].value); 5265 5266 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5267 "==> nxge_get_priv_prop: %s", valstr)); 5268 goto done; 5269 } 5270 5271 /* Software LSO */ 5272 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5273 if (is_default) { 5274 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5275 err = 0; 5276 goto done; 5277 } 5278 (void) snprintf(valstr, sizeof (valstr), 5279 "%d", nxgep->soft_lso_enable); 5280 err = 0; 5281 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5282 "==> nxge_get_priv_prop: name %s (value %d)", 5283 pr_name, nxgep->soft_lso_enable)); 5284 5285 goto done; 5286 } 5287 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5288 err = 0; 5289 if (is_default || 5290 nxgep->param_arr[param_anar_10gfdx].value != 0) { 5291 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5292 goto done; 5293 } else { 5294 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5295 goto done; 5296 } 5297 } 5298 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5299 err = 0; 5300 if (is_default || 5301 nxgep->param_arr[param_anar_pause].value != 0) { 5302 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5303 goto done; 5304 } else { 5305 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5306 goto done; 5307 } 5308 } 5309 5310 done: 5311 if (err == 0) { 5312 strsize = (uint_t)strlen(valstr); 5313 if (pr_valsize < strsize) { 5314 err = ENOBUFS; 5315 } else { 5316 (void) strlcpy(pr_val, valstr, pr_valsize); 5317 } 5318 } 5319 5320 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5321 "<== nxge_get_priv_prop: return %d", err)); 5322 return (err); 5323 } 5324 5325 /* 5326 * Module loading and removing entry points. 5327 */ 5328 5329 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach, 5330 nodev, NULL, D_MP, NULL, nxge_quiesce); 5331 5332 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5333 5334 /* 5335 * Module linkage information for the kernel. 5336 */ 5337 static struct modldrv nxge_modldrv = { 5338 &mod_driverops, 5339 NXGE_DESC_VER, 5340 &nxge_dev_ops 5341 }; 5342 5343 static struct modlinkage modlinkage = { 5344 MODREV_1, (void *) &nxge_modldrv, NULL 5345 }; 5346 5347 int 5348 _init(void) 5349 { 5350 int status; 5351 5352 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5353 mac_init_ops(&nxge_dev_ops, "nxge"); 5354 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5355 if (status != 0) { 5356 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5357 "failed to init device soft state")); 5358 goto _init_exit; 5359 } 5360 status = mod_install(&modlinkage); 5361 if (status != 0) { 5362 ddi_soft_state_fini(&nxge_list); 5363 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5364 goto _init_exit; 5365 } 5366 5367 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5368 5369 _init_exit: 5370 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 5371 5372 return (status); 5373 } 5374 5375 int 5376 _fini(void) 5377 { 5378 int status; 5379 5380 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5381 5382 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5383 5384 if (nxge_mblks_pending) 5385 return (EBUSY); 5386 5387 status = mod_remove(&modlinkage); 5388 if (status != DDI_SUCCESS) { 5389 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5390 "Module removal failed 0x%08x", 5391 status)); 5392 goto _fini_exit; 5393 } 5394 5395 mac_fini_ops(&nxge_dev_ops); 5396 5397 ddi_soft_state_fini(&nxge_list); 5398 5399 MUTEX_DESTROY(&nxge_common_lock); 5400 _fini_exit: 5401 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 5402 5403 return (status); 5404 } 5405 5406 int 5407 _info(struct modinfo *modinfop) 5408 { 5409 int status; 5410 5411 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5412 status = mod_info(&modlinkage, modinfop); 5413 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5414 5415 return (status); 5416 } 5417 5418 /*ARGSUSED*/ 5419 static int 5420 nxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5421 { 5422 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5423 p_nxge_t nxgep = rhp->nxgep; 5424 uint32_t channel; 5425 p_tx_ring_t ring; 5426 5427 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5428 ring = nxgep->tx_rings->rings[channel]; 5429 5430 MUTEX_ENTER(&ring->lock); 5431 ring->tx_ring_handle = rhp->ring_handle; 5432 MUTEX_EXIT(&ring->lock); 5433 5434 return (0); 5435 } 5436 5437 static void 5438 nxge_tx_ring_stop(mac_ring_driver_t rdriver) 5439 { 5440 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5441 p_nxge_t nxgep = rhp->nxgep; 5442 uint32_t channel; 5443 p_tx_ring_t ring; 5444 5445 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5446 ring = nxgep->tx_rings->rings[channel]; 5447 5448 MUTEX_ENTER(&ring->lock); 5449 ring->tx_ring_handle = (mac_ring_handle_t)NULL; 5450 MUTEX_EXIT(&ring->lock); 5451 } 5452 5453 static int 5454 nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5455 { 5456 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5457 p_nxge_t nxgep = rhp->nxgep; 5458 uint32_t channel; 5459 p_rx_rcr_ring_t ring; 5460 int i; 5461 5462 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5463 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5464 5465 MUTEX_ENTER(&ring->lock); 5466 5467 if (nxgep->rx_channel_started[channel] == B_TRUE) { 5468 MUTEX_EXIT(&ring->lock); 5469 return (0); 5470 } 5471 5472 /* set rcr_ring */ 5473 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) { 5474 if ((nxgep->ldgvp->ldvp[i].is_rxdma == 1) && 5475 (nxgep->ldgvp->ldvp[i].channel == channel)) { 5476 ring->ldvp = &nxgep->ldgvp->ldvp[i]; 5477 ring->ldgp = nxgep->ldgvp->ldvp[i].ldgp; 5478 } 5479 } 5480 5481 nxgep->rx_channel_started[channel] = B_TRUE; 5482 ring->rcr_mac_handle = rhp->ring_handle; 5483 ring->rcr_gen_num = mr_gen_num; 5484 MUTEX_EXIT(&ring->lock); 5485 5486 return (0); 5487 } 5488 5489 static void 5490 nxge_rx_ring_stop(mac_ring_driver_t rdriver) 5491 { 5492 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5493 p_nxge_t nxgep = rhp->nxgep; 5494 uint32_t channel; 5495 p_rx_rcr_ring_t ring; 5496 5497 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5498 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5499 5500 MUTEX_ENTER(&ring->lock); 5501 nxgep->rx_channel_started[channel] = B_FALSE; 5502 ring->rcr_mac_handle = NULL; 5503 MUTEX_EXIT(&ring->lock); 5504 } 5505 5506 /* 5507 * Callback funtion for MAC layer to register all rings. 5508 */ 5509 static void 5510 nxge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 5511 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 5512 { 5513 p_nxge_t nxgep = (p_nxge_t)arg; 5514 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 5515 5516 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5517 "==> nxge_fill_ring 0x%x index %d", rtype, index)); 5518 5519 switch (rtype) { 5520 case MAC_RING_TYPE_TX: { 5521 p_nxge_ring_handle_t rhandlep; 5522 5523 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5524 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d", 5525 rtype, index, p_cfgp->tdc.count)); 5526 5527 ASSERT((index >= 0) && (index < p_cfgp->tdc.count)); 5528 rhandlep = &nxgep->tx_ring_handles[index]; 5529 rhandlep->nxgep = nxgep; 5530 rhandlep->index = index; 5531 rhandlep->ring_handle = rh; 5532 5533 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5534 infop->mri_start = nxge_tx_ring_start; 5535 infop->mri_stop = nxge_tx_ring_stop; 5536 infop->mri_tx = nxge_tx_ring_send; 5537 5538 break; 5539 } 5540 case MAC_RING_TYPE_RX: { 5541 p_nxge_ring_handle_t rhandlep; 5542 int nxge_rindex; 5543 mac_intr_t nxge_mac_intr; 5544 5545 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5546 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d", 5547 rtype, index, p_cfgp->max_rdcs)); 5548 5549 /* 5550 * 'index' is the ring index within the group. 5551 * Find the ring index in the nxge instance. 5552 */ 5553 nxge_rindex = nxge_get_rxring_index(nxgep, rg_index, index); 5554 5555 ASSERT((nxge_rindex >= 0) && (nxge_rindex < p_cfgp->max_rdcs)); 5556 rhandlep = &nxgep->rx_ring_handles[nxge_rindex]; 5557 rhandlep->nxgep = nxgep; 5558 rhandlep->index = nxge_rindex; 5559 rhandlep->ring_handle = rh; 5560 5561 /* 5562 * Entrypoint to enable interrupt (disable poll) and 5563 * disable interrupt (enable poll). 5564 */ 5565 nxge_mac_intr.mi_handle = (mac_intr_handle_t)rhandlep; 5566 nxge_mac_intr.mi_enable = (mac_intr_enable_t)nxge_disable_poll; 5567 nxge_mac_intr.mi_disable = (mac_intr_disable_t)nxge_enable_poll; 5568 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5569 infop->mri_start = nxge_rx_ring_start; 5570 infop->mri_stop = nxge_rx_ring_stop; 5571 infop->mri_intr = nxge_mac_intr; /* ??? */ 5572 infop->mri_poll = nxge_rx_poll; 5573 5574 break; 5575 } 5576 default: 5577 break; 5578 } 5579 5580 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_fill_ring 0x%x", 5581 rtype)); 5582 } 5583 5584 static void 5585 nxge_group_add_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5586 mac_ring_type_t type) 5587 { 5588 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5589 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5590 nxge_t *nxge; 5591 nxge_grp_t *grp; 5592 nxge_rdc_grp_t *rdc_grp; 5593 uint16_t channel; /* device-wise ring id */ 5594 int dev_gindex; 5595 int rv; 5596 5597 nxge = rgroup->nxgep; 5598 5599 switch (type) { 5600 case MAC_RING_TYPE_TX: 5601 /* 5602 * nxge_grp_dc_add takes a channel number which is a 5603 * "devise" ring ID. 5604 */ 5605 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5606 5607 /* 5608 * Remove the ring from the default group 5609 */ 5610 if (rgroup->gindex != 0) { 5611 (void) nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5612 } 5613 5614 /* 5615 * nxge->tx_set.group[] is an array of groups indexed by 5616 * a "port" group ID. 5617 */ 5618 grp = nxge->tx_set.group[rgroup->gindex]; 5619 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5620 if (rv != 0) { 5621 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5622 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5623 } 5624 break; 5625 5626 case MAC_RING_TYPE_RX: 5627 /* 5628 * nxge->rx_set.group[] is an array of groups indexed by 5629 * a "port" group ID. 5630 */ 5631 grp = nxge->rx_set.group[rgroup->gindex]; 5632 5633 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5634 rgroup->gindex; 5635 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5636 5637 /* 5638 * nxge_grp_dc_add takes a channel number which is a 5639 * "devise" ring ID. 5640 */ 5641 channel = nxge->pt_config.hw_config.start_rdc + rhandle->index; 5642 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_RX, channel); 5643 if (rv != 0) { 5644 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5645 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5646 } 5647 5648 rdc_grp->map |= (1 << channel); 5649 rdc_grp->max_rdcs++; 5650 5651 (void) nxge_init_fzc_rdc_tbl(nxge, rgroup->rdctbl); 5652 break; 5653 } 5654 } 5655 5656 static void 5657 nxge_group_rem_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5658 mac_ring_type_t type) 5659 { 5660 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5661 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5662 nxge_t *nxge; 5663 uint16_t channel; /* device-wise ring id */ 5664 nxge_rdc_grp_t *rdc_grp; 5665 int dev_gindex; 5666 5667 nxge = rgroup->nxgep; 5668 5669 switch (type) { 5670 case MAC_RING_TYPE_TX: 5671 dev_gindex = nxge->pt_config.hw_config.def_mac_txdma_grpid + 5672 rgroup->gindex; 5673 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5674 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5675 5676 /* 5677 * Add the ring back to the default group 5678 */ 5679 if (rgroup->gindex != 0) { 5680 nxge_grp_t *grp; 5681 grp = nxge->tx_set.group[0]; 5682 (void) nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5683 } 5684 break; 5685 5686 case MAC_RING_TYPE_RX: 5687 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5688 rgroup->gindex; 5689 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5690 channel = rdc_grp->start_rdc + rhandle->index; 5691 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 5692 5693 rdc_grp->map &= ~(1 << channel); 5694 rdc_grp->max_rdcs--; 5695 5696 (void) nxge_init_fzc_rdc_tbl(nxge, rgroup->rdctbl); 5697 break; 5698 } 5699 } 5700 5701 5702 /*ARGSUSED*/ 5703 static nxge_status_t 5704 nxge_add_intrs(p_nxge_t nxgep) 5705 { 5706 5707 int intr_types; 5708 int type = 0; 5709 int ddi_status = DDI_SUCCESS; 5710 nxge_status_t status = NXGE_OK; 5711 5712 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5713 5714 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5715 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5716 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5717 nxgep->nxge_intr_type.intr_added = 0; 5718 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5719 nxgep->nxge_intr_type.intr_type = 0; 5720 5721 if (nxgep->niu_type == N2_NIU) { 5722 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5723 } else if (nxge_msi_enable) { 5724 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5725 } 5726 5727 /* Get the supported interrupt types */ 5728 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5729 != DDI_SUCCESS) { 5730 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5731 "ddi_intr_get_supported_types failed: status 0x%08x", 5732 ddi_status)); 5733 return (NXGE_ERROR | NXGE_DDI_FAILED); 5734 } 5735 nxgep->nxge_intr_type.intr_types = intr_types; 5736 5737 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5738 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5739 5740 /* 5741 * Solaris MSIX is not supported yet. use MSI for now. 5742 * nxge_msi_enable (1): 5743 * 1 - MSI 2 - MSI-X others - FIXED 5744 */ 5745 switch (nxge_msi_enable) { 5746 default: 5747 type = DDI_INTR_TYPE_FIXED; 5748 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5749 "use fixed (intx emulation) type %08x", 5750 type)); 5751 break; 5752 5753 case 2: 5754 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5755 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5756 if (intr_types & DDI_INTR_TYPE_MSIX) { 5757 type = DDI_INTR_TYPE_MSIX; 5758 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5759 "ddi_intr_get_supported_types: MSIX 0x%08x", 5760 type)); 5761 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5762 type = DDI_INTR_TYPE_MSI; 5763 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5764 "ddi_intr_get_supported_types: MSI 0x%08x", 5765 type)); 5766 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5767 type = DDI_INTR_TYPE_FIXED; 5768 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5769 "ddi_intr_get_supported_types: MSXED0x%08x", 5770 type)); 5771 } 5772 break; 5773 5774 case 1: 5775 if (intr_types & DDI_INTR_TYPE_MSI) { 5776 type = DDI_INTR_TYPE_MSI; 5777 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5778 "ddi_intr_get_supported_types: MSI 0x%08x", 5779 type)); 5780 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5781 type = DDI_INTR_TYPE_MSIX; 5782 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5783 "ddi_intr_get_supported_types: MSIX 0x%08x", 5784 type)); 5785 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5786 type = DDI_INTR_TYPE_FIXED; 5787 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5788 "ddi_intr_get_supported_types: MSXED0x%08x", 5789 type)); 5790 } 5791 } 5792 5793 nxgep->nxge_intr_type.intr_type = type; 5794 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5795 type == DDI_INTR_TYPE_FIXED) && 5796 nxgep->nxge_intr_type.niu_msi_enable) { 5797 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5798 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5799 " nxge_add_intrs: " 5800 " nxge_add_intrs_adv failed: status 0x%08x", 5801 status)); 5802 return (status); 5803 } else { 5804 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5805 "interrupts registered : type %d", type)); 5806 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5807 5808 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5809 "\nAdded advanced nxge add_intr_adv " 5810 "intr type 0x%x\n", type)); 5811 5812 return (status); 5813 } 5814 } 5815 5816 if (!nxgep->nxge_intr_type.intr_registered) { 5817 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5818 "failed to register interrupts")); 5819 return (NXGE_ERROR | NXGE_DDI_FAILED); 5820 } 5821 5822 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5823 return (status); 5824 } 5825 5826 static nxge_status_t 5827 nxge_add_intrs_adv(p_nxge_t nxgep) 5828 { 5829 int intr_type; 5830 p_nxge_intr_t intrp; 5831 5832 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5833 5834 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5835 intr_type = intrp->intr_type; 5836 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5837 intr_type)); 5838 5839 switch (intr_type) { 5840 case DDI_INTR_TYPE_MSI: /* 0x2 */ 5841 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 5842 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 5843 5844 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 5845 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 5846 5847 default: 5848 return (NXGE_ERROR); 5849 } 5850 } 5851 5852 5853 /*ARGSUSED*/ 5854 static nxge_status_t 5855 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 5856 { 5857 dev_info_t *dip = nxgep->dip; 5858 p_nxge_ldg_t ldgp; 5859 p_nxge_intr_t intrp; 5860 uint_t *inthandler; 5861 void *arg1, *arg2; 5862 int behavior; 5863 int nintrs, navail, nrequest; 5864 int nactual, nrequired; 5865 int inum = 0; 5866 int x, y; 5867 int ddi_status = DDI_SUCCESS; 5868 nxge_status_t status = NXGE_OK; 5869 5870 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 5871 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5872 intrp->start_inum = 0; 5873 5874 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5875 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5876 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5877 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5878 "nintrs: %d", ddi_status, nintrs)); 5879 return (NXGE_ERROR | NXGE_DDI_FAILED); 5880 } 5881 5882 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5883 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5884 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5885 "ddi_intr_get_navail() failed, status: 0x%x%, " 5886 "nintrs: %d", ddi_status, navail)); 5887 return (NXGE_ERROR | NXGE_DDI_FAILED); 5888 } 5889 5890 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5891 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 5892 nintrs, navail)); 5893 5894 /* PSARC/2007/453 MSI-X interrupt limit override */ 5895 if (int_type == DDI_INTR_TYPE_MSIX) { 5896 nrequest = nxge_create_msi_property(nxgep); 5897 if (nrequest < navail) { 5898 navail = nrequest; 5899 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5900 "nxge_add_intrs_adv_type: nintrs %d " 5901 "navail %d (nrequest %d)", 5902 nintrs, navail, nrequest)); 5903 } 5904 } 5905 5906 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 5907 /* MSI must be power of 2 */ 5908 if ((navail & 16) == 16) { 5909 navail = 16; 5910 } else if ((navail & 8) == 8) { 5911 navail = 8; 5912 } else if ((navail & 4) == 4) { 5913 navail = 4; 5914 } else if ((navail & 2) == 2) { 5915 navail = 2; 5916 } else { 5917 navail = 1; 5918 } 5919 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5920 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 5921 "navail %d", nintrs, navail)); 5922 } 5923 5924 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5925 DDI_INTR_ALLOC_NORMAL); 5926 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 5927 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 5928 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5929 navail, &nactual, behavior); 5930 if (ddi_status != DDI_SUCCESS || nactual == 0) { 5931 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5932 " ddi_intr_alloc() failed: %d", 5933 ddi_status)); 5934 kmem_free(intrp->htable, intrp->intr_size); 5935 return (NXGE_ERROR | NXGE_DDI_FAILED); 5936 } 5937 5938 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 5939 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 5940 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5941 " ddi_intr_get_pri() failed: %d", 5942 ddi_status)); 5943 /* Free already allocated interrupts */ 5944 for (y = 0; y < nactual; y++) { 5945 (void) ddi_intr_free(intrp->htable[y]); 5946 } 5947 5948 kmem_free(intrp->htable, intrp->intr_size); 5949 return (NXGE_ERROR | NXGE_DDI_FAILED); 5950 } 5951 5952 nrequired = 0; 5953 switch (nxgep->niu_type) { 5954 default: 5955 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 5956 break; 5957 5958 case N2_NIU: 5959 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 5960 break; 5961 } 5962 5963 if (status != NXGE_OK) { 5964 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5965 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 5966 "failed: 0x%x", status)); 5967 /* Free already allocated interrupts */ 5968 for (y = 0; y < nactual; y++) { 5969 (void) ddi_intr_free(intrp->htable[y]); 5970 } 5971 5972 kmem_free(intrp->htable, intrp->intr_size); 5973 return (status); 5974 } 5975 5976 ldgp = nxgep->ldgvp->ldgp; 5977 for (x = 0; x < nrequired; x++, ldgp++) { 5978 ldgp->vector = (uint8_t)x; 5979 ldgp->intdata = SID_DATA(ldgp->func, x); 5980 arg1 = ldgp->ldvp; 5981 arg2 = nxgep; 5982 if (ldgp->nldvs == 1) { 5983 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 5984 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5985 "nxge_add_intrs_adv_type: " 5986 "arg1 0x%x arg2 0x%x: " 5987 "1-1 int handler (entry %d intdata 0x%x)\n", 5988 arg1, arg2, 5989 x, ldgp->intdata)); 5990 } else if (ldgp->nldvs > 1) { 5991 inthandler = (uint_t *)ldgp->sys_intr_handler; 5992 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5993 "nxge_add_intrs_adv_type: " 5994 "arg1 0x%x arg2 0x%x: " 5995 "nldevs %d int handler " 5996 "(entry %d intdata 0x%x)\n", 5997 arg1, arg2, 5998 ldgp->nldvs, x, ldgp->intdata)); 5999 } 6000 6001 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6002 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 6003 "htable 0x%llx", x, intrp->htable[x])); 6004 6005 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6006 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6007 != DDI_SUCCESS) { 6008 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6009 "==> nxge_add_intrs_adv_type: failed #%d " 6010 "status 0x%x", x, ddi_status)); 6011 for (y = 0; y < intrp->intr_added; y++) { 6012 (void) ddi_intr_remove_handler( 6013 intrp->htable[y]); 6014 } 6015 /* Free already allocated intr */ 6016 for (y = 0; y < nactual; y++) { 6017 (void) ddi_intr_free(intrp->htable[y]); 6018 } 6019 kmem_free(intrp->htable, intrp->intr_size); 6020 6021 (void) nxge_ldgv_uninit(nxgep); 6022 6023 return (NXGE_ERROR | NXGE_DDI_FAILED); 6024 } 6025 intrp->intr_added++; 6026 } 6027 6028 intrp->msi_intx_cnt = nactual; 6029 6030 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6031 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 6032 navail, nactual, 6033 intrp->msi_intx_cnt, 6034 intrp->intr_added)); 6035 6036 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6037 6038 (void) nxge_intr_ldgv_init(nxgep); 6039 6040 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 6041 6042 return (status); 6043 } 6044 6045 /*ARGSUSED*/ 6046 static nxge_status_t 6047 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 6048 { 6049 dev_info_t *dip = nxgep->dip; 6050 p_nxge_ldg_t ldgp; 6051 p_nxge_intr_t intrp; 6052 uint_t *inthandler; 6053 void *arg1, *arg2; 6054 int behavior; 6055 int nintrs, navail; 6056 int nactual, nrequired; 6057 int inum = 0; 6058 int x, y; 6059 int ddi_status = DDI_SUCCESS; 6060 nxge_status_t status = NXGE_OK; 6061 6062 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 6063 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6064 intrp->start_inum = 0; 6065 6066 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6067 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6068 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6069 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6070 "nintrs: %d", status, nintrs)); 6071 return (NXGE_ERROR | NXGE_DDI_FAILED); 6072 } 6073 6074 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6075 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6076 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6077 "ddi_intr_get_navail() failed, status: 0x%x%, " 6078 "nintrs: %d", ddi_status, navail)); 6079 return (NXGE_ERROR | NXGE_DDI_FAILED); 6080 } 6081 6082 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6083 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6084 nintrs, navail)); 6085 6086 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6087 DDI_INTR_ALLOC_NORMAL); 6088 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6089 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6090 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6091 navail, &nactual, behavior); 6092 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6093 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6094 " ddi_intr_alloc() failed: %d", 6095 ddi_status)); 6096 kmem_free(intrp->htable, intrp->intr_size); 6097 return (NXGE_ERROR | NXGE_DDI_FAILED); 6098 } 6099 6100 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6101 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6102 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6103 " ddi_intr_get_pri() failed: %d", 6104 ddi_status)); 6105 /* Free already allocated interrupts */ 6106 for (y = 0; y < nactual; y++) { 6107 (void) ddi_intr_free(intrp->htable[y]); 6108 } 6109 6110 kmem_free(intrp->htable, intrp->intr_size); 6111 return (NXGE_ERROR | NXGE_DDI_FAILED); 6112 } 6113 6114 nrequired = 0; 6115 switch (nxgep->niu_type) { 6116 default: 6117 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6118 break; 6119 6120 case N2_NIU: 6121 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6122 break; 6123 } 6124 6125 if (status != NXGE_OK) { 6126 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6127 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6128 "failed: 0x%x", status)); 6129 /* Free already allocated interrupts */ 6130 for (y = 0; y < nactual; y++) { 6131 (void) ddi_intr_free(intrp->htable[y]); 6132 } 6133 6134 kmem_free(intrp->htable, intrp->intr_size); 6135 return (status); 6136 } 6137 6138 ldgp = nxgep->ldgvp->ldgp; 6139 for (x = 0; x < nrequired; x++, ldgp++) { 6140 ldgp->vector = (uint8_t)x; 6141 if (nxgep->niu_type != N2_NIU) { 6142 ldgp->intdata = SID_DATA(ldgp->func, x); 6143 } 6144 6145 arg1 = ldgp->ldvp; 6146 arg2 = nxgep; 6147 if (ldgp->nldvs == 1) { 6148 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6149 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6150 "nxge_add_intrs_adv_type_fix: " 6151 "1-1 int handler(%d) ldg %d ldv %d " 6152 "arg1 $%p arg2 $%p\n", 6153 x, ldgp->ldg, ldgp->ldvp->ldv, 6154 arg1, arg2)); 6155 } else if (ldgp->nldvs > 1) { 6156 inthandler = (uint_t *)ldgp->sys_intr_handler; 6157 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6158 "nxge_add_intrs_adv_type_fix: " 6159 "shared ldv %d int handler(%d) ldv %d ldg %d" 6160 "arg1 0x%016llx arg2 0x%016llx\n", 6161 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6162 arg1, arg2)); 6163 } 6164 6165 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6166 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6167 != DDI_SUCCESS) { 6168 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6169 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6170 "status 0x%x", x, ddi_status)); 6171 for (y = 0; y < intrp->intr_added; y++) { 6172 (void) ddi_intr_remove_handler( 6173 intrp->htable[y]); 6174 } 6175 for (y = 0; y < nactual; y++) { 6176 (void) ddi_intr_free(intrp->htable[y]); 6177 } 6178 /* Free already allocated intr */ 6179 kmem_free(intrp->htable, intrp->intr_size); 6180 6181 (void) nxge_ldgv_uninit(nxgep); 6182 6183 return (NXGE_ERROR | NXGE_DDI_FAILED); 6184 } 6185 intrp->intr_added++; 6186 } 6187 6188 intrp->msi_intx_cnt = nactual; 6189 6190 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6191 6192 status = nxge_intr_ldgv_init(nxgep); 6193 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6194 6195 return (status); 6196 } 6197 6198 static void 6199 nxge_remove_intrs(p_nxge_t nxgep) 6200 { 6201 int i, inum; 6202 p_nxge_intr_t intrp; 6203 6204 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6205 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6206 if (!intrp->intr_registered) { 6207 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6208 "<== nxge_remove_intrs: interrupts not registered")); 6209 return; 6210 } 6211 6212 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6213 6214 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6215 (void) ddi_intr_block_disable(intrp->htable, 6216 intrp->intr_added); 6217 } else { 6218 for (i = 0; i < intrp->intr_added; i++) { 6219 (void) ddi_intr_disable(intrp->htable[i]); 6220 } 6221 } 6222 6223 for (inum = 0; inum < intrp->intr_added; inum++) { 6224 if (intrp->htable[inum]) { 6225 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6226 } 6227 } 6228 6229 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6230 if (intrp->htable[inum]) { 6231 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6232 "nxge_remove_intrs: ddi_intr_free inum %d " 6233 "msi_intx_cnt %d intr_added %d", 6234 inum, 6235 intrp->msi_intx_cnt, 6236 intrp->intr_added)); 6237 6238 (void) ddi_intr_free(intrp->htable[inum]); 6239 } 6240 } 6241 6242 kmem_free(intrp->htable, intrp->intr_size); 6243 intrp->intr_registered = B_FALSE; 6244 intrp->intr_enabled = B_FALSE; 6245 intrp->msi_intx_cnt = 0; 6246 intrp->intr_added = 0; 6247 6248 (void) nxge_ldgv_uninit(nxgep); 6249 6250 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6251 "#msix-request"); 6252 6253 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6254 } 6255 6256 /*ARGSUSED*/ 6257 static void 6258 nxge_intrs_enable(p_nxge_t nxgep) 6259 { 6260 p_nxge_intr_t intrp; 6261 int i; 6262 int status; 6263 6264 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6265 6266 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6267 6268 if (!intrp->intr_registered) { 6269 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6270 "interrupts are not registered")); 6271 return; 6272 } 6273 6274 if (intrp->intr_enabled) { 6275 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6276 "<== nxge_intrs_enable: already enabled")); 6277 return; 6278 } 6279 6280 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6281 status = ddi_intr_block_enable(intrp->htable, 6282 intrp->intr_added); 6283 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6284 "block enable - status 0x%x total inums #%d\n", 6285 status, intrp->intr_added)); 6286 } else { 6287 for (i = 0; i < intrp->intr_added; i++) { 6288 status = ddi_intr_enable(intrp->htable[i]); 6289 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6290 "ddi_intr_enable:enable - status 0x%x " 6291 "total inums %d enable inum #%d\n", 6292 status, intrp->intr_added, i)); 6293 if (status == DDI_SUCCESS) { 6294 intrp->intr_enabled = B_TRUE; 6295 } 6296 } 6297 } 6298 6299 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6300 } 6301 6302 /*ARGSUSED*/ 6303 static void 6304 nxge_intrs_disable(p_nxge_t nxgep) 6305 { 6306 p_nxge_intr_t intrp; 6307 int i; 6308 6309 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6310 6311 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6312 6313 if (!intrp->intr_registered) { 6314 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6315 "interrupts are not registered")); 6316 return; 6317 } 6318 6319 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6320 (void) ddi_intr_block_disable(intrp->htable, 6321 intrp->intr_added); 6322 } else { 6323 for (i = 0; i < intrp->intr_added; i++) { 6324 (void) ddi_intr_disable(intrp->htable[i]); 6325 } 6326 } 6327 6328 intrp->intr_enabled = B_FALSE; 6329 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6330 } 6331 6332 static nxge_status_t 6333 nxge_mac_register(p_nxge_t nxgep) 6334 { 6335 mac_register_t *macp; 6336 int status; 6337 6338 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6339 6340 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6341 return (NXGE_ERROR); 6342 6343 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6344 macp->m_driver = nxgep; 6345 macp->m_dip = nxgep->dip; 6346 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6347 macp->m_callbacks = &nxge_m_callbacks; 6348 macp->m_min_sdu = 0; 6349 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6350 NXGE_EHEADER_VLAN_CRC; 6351 macp->m_max_sdu = nxgep->mac.default_mtu; 6352 macp->m_margin = VLAN_TAGSZ; 6353 macp->m_priv_props = nxge_priv_props; 6354 macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 6355 macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1 | MAC_VIRT_SERIALIZE; 6356 6357 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6358 "==> nxge_mac_register: instance %d " 6359 "max_sdu %d margin %d maxframe %d (header %d)", 6360 nxgep->instance, 6361 macp->m_max_sdu, macp->m_margin, 6362 nxgep->mac.maxframesize, 6363 NXGE_EHEADER_VLAN_CRC)); 6364 6365 status = mac_register(macp, &nxgep->mach); 6366 mac_free(macp); 6367 6368 if (status != 0) { 6369 cmn_err(CE_WARN, 6370 "!nxge_mac_register failed (status %d instance %d)", 6371 status, nxgep->instance); 6372 return (NXGE_ERROR); 6373 } 6374 6375 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6376 "(instance %d)", nxgep->instance)); 6377 6378 return (NXGE_OK); 6379 } 6380 6381 void 6382 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6383 { 6384 ssize_t size; 6385 mblk_t *nmp; 6386 uint8_t blk_id; 6387 uint8_t chan; 6388 uint32_t err_id; 6389 err_inject_t *eip; 6390 6391 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6392 6393 size = 1024; 6394 nmp = mp->b_cont; 6395 eip = (err_inject_t *)nmp->b_rptr; 6396 blk_id = eip->blk_id; 6397 err_id = eip->err_id; 6398 chan = eip->chan; 6399 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6400 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6401 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6402 switch (blk_id) { 6403 case MAC_BLK_ID: 6404 break; 6405 case TXMAC_BLK_ID: 6406 break; 6407 case RXMAC_BLK_ID: 6408 break; 6409 case MIF_BLK_ID: 6410 break; 6411 case IPP_BLK_ID: 6412 nxge_ipp_inject_err(nxgep, err_id); 6413 break; 6414 case TXC_BLK_ID: 6415 nxge_txc_inject_err(nxgep, err_id); 6416 break; 6417 case TXDMA_BLK_ID: 6418 nxge_txdma_inject_err(nxgep, err_id, chan); 6419 break; 6420 case RXDMA_BLK_ID: 6421 nxge_rxdma_inject_err(nxgep, err_id, chan); 6422 break; 6423 case ZCP_BLK_ID: 6424 nxge_zcp_inject_err(nxgep, err_id); 6425 break; 6426 case ESPC_BLK_ID: 6427 break; 6428 case FFLP_BLK_ID: 6429 break; 6430 case PHY_BLK_ID: 6431 break; 6432 case ETHER_SERDES_BLK_ID: 6433 break; 6434 case PCIE_SERDES_BLK_ID: 6435 break; 6436 case VIR_BLK_ID: 6437 break; 6438 } 6439 6440 nmp->b_wptr = nmp->b_rptr + size; 6441 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6442 6443 miocack(wq, mp, (int)size, 0); 6444 } 6445 6446 static int 6447 nxge_init_common_dev(p_nxge_t nxgep) 6448 { 6449 p_nxge_hw_list_t hw_p; 6450 dev_info_t *p_dip; 6451 6452 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6453 6454 p_dip = nxgep->p_dip; 6455 MUTEX_ENTER(&nxge_common_lock); 6456 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6457 "==> nxge_init_common_dev:func # %d", 6458 nxgep->function_num)); 6459 /* 6460 * Loop through existing per neptune hardware list. 6461 */ 6462 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6463 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6464 "==> nxge_init_common_device:func # %d " 6465 "hw_p $%p parent dip $%p", 6466 nxgep->function_num, 6467 hw_p, 6468 p_dip)); 6469 if (hw_p->parent_devp == p_dip) { 6470 nxgep->nxge_hw_p = hw_p; 6471 hw_p->ndevs++; 6472 hw_p->nxge_p[nxgep->function_num] = nxgep; 6473 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6474 "==> nxge_init_common_device:func # %d " 6475 "hw_p $%p parent dip $%p " 6476 "ndevs %d (found)", 6477 nxgep->function_num, 6478 hw_p, 6479 p_dip, 6480 hw_p->ndevs)); 6481 break; 6482 } 6483 } 6484 6485 if (hw_p == NULL) { 6486 6487 char **prop_val; 6488 uint_t prop_len; 6489 int i; 6490 6491 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6492 "==> nxge_init_common_device:func # %d " 6493 "parent dip $%p (new)", 6494 nxgep->function_num, 6495 p_dip)); 6496 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6497 hw_p->parent_devp = p_dip; 6498 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6499 nxgep->nxge_hw_p = hw_p; 6500 hw_p->ndevs++; 6501 hw_p->nxge_p[nxgep->function_num] = nxgep; 6502 hw_p->next = nxge_hw_list; 6503 if (nxgep->niu_type == N2_NIU) { 6504 hw_p->niu_type = N2_NIU; 6505 hw_p->platform_type = P_NEPTUNE_NIU; 6506 } else { 6507 hw_p->niu_type = NIU_TYPE_NONE; 6508 hw_p->platform_type = P_NEPTUNE_NONE; 6509 } 6510 6511 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6512 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6513 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6514 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6515 6516 nxge_hw_list = hw_p; 6517 6518 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0, 6519 "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) { 6520 for (i = 0; i < prop_len; i++) { 6521 if ((strcmp((caddr_t)prop_val[i], 6522 NXGE_ROCK_COMPATIBLE) == 0)) { 6523 hw_p->platform_type = P_NEPTUNE_ROCK; 6524 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6525 "ROCK hw_p->platform_type %d", 6526 hw_p->platform_type)); 6527 break; 6528 } 6529 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6530 "nxge_init_common_dev: read compatible" 6531 " property[%d] val[%s]", 6532 i, (caddr_t)prop_val[i])); 6533 } 6534 } 6535 6536 ddi_prop_free(prop_val); 6537 6538 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6539 } 6540 6541 MUTEX_EXIT(&nxge_common_lock); 6542 6543 nxgep->platform_type = hw_p->platform_type; 6544 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxgep->platform_type %d", 6545 nxgep->platform_type)); 6546 if (nxgep->niu_type != N2_NIU) { 6547 nxgep->niu_type = hw_p->niu_type; 6548 } 6549 6550 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6551 "==> nxge_init_common_device (nxge_hw_list) $%p", 6552 nxge_hw_list)); 6553 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6554 6555 return (NXGE_OK); 6556 } 6557 6558 static void 6559 nxge_uninit_common_dev(p_nxge_t nxgep) 6560 { 6561 p_nxge_hw_list_t hw_p, h_hw_p; 6562 p_nxge_dma_pt_cfg_t p_dma_cfgp; 6563 p_nxge_hw_pt_cfg_t p_cfgp; 6564 dev_info_t *p_dip; 6565 6566 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6567 if (nxgep->nxge_hw_p == NULL) { 6568 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6569 "<== nxge_uninit_common_device (no common)")); 6570 return; 6571 } 6572 6573 MUTEX_ENTER(&nxge_common_lock); 6574 h_hw_p = nxge_hw_list; 6575 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6576 p_dip = hw_p->parent_devp; 6577 if (nxgep->nxge_hw_p == hw_p && 6578 p_dip == nxgep->p_dip && 6579 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6580 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6581 6582 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6583 "==> nxge_uninit_common_device:func # %d " 6584 "hw_p $%p parent dip $%p " 6585 "ndevs %d (found)", 6586 nxgep->function_num, 6587 hw_p, 6588 p_dip, 6589 hw_p->ndevs)); 6590 6591 /* 6592 * Release the RDC table, a shared resoruce 6593 * of the nxge hardware. The RDC table was 6594 * assigned to this instance of nxge in 6595 * nxge_use_cfg_dma_config(). 6596 */ 6597 if (!isLDOMguest(nxgep)) { 6598 p_dma_cfgp = 6599 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 6600 p_cfgp = 6601 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 6602 (void) nxge_fzc_rdc_tbl_unbind(nxgep, 6603 p_cfgp->def_mac_rxdma_grpid); 6604 6605 /* Cleanup any outstanding groups. */ 6606 nxge_grp_cleanup(nxgep); 6607 } 6608 6609 if (hw_p->ndevs) { 6610 hw_p->ndevs--; 6611 } 6612 hw_p->nxge_p[nxgep->function_num] = NULL; 6613 if (!hw_p->ndevs) { 6614 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6615 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6616 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6617 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6618 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6619 "==> nxge_uninit_common_device: " 6620 "func # %d " 6621 "hw_p $%p parent dip $%p " 6622 "ndevs %d (last)", 6623 nxgep->function_num, 6624 hw_p, 6625 p_dip, 6626 hw_p->ndevs)); 6627 6628 nxge_hio_uninit(nxgep); 6629 6630 if (hw_p == nxge_hw_list) { 6631 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6632 "==> nxge_uninit_common_device:" 6633 "remove head func # %d " 6634 "hw_p $%p parent dip $%p " 6635 "ndevs %d (head)", 6636 nxgep->function_num, 6637 hw_p, 6638 p_dip, 6639 hw_p->ndevs)); 6640 nxge_hw_list = hw_p->next; 6641 } else { 6642 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6643 "==> nxge_uninit_common_device:" 6644 "remove middle func # %d " 6645 "hw_p $%p parent dip $%p " 6646 "ndevs %d (middle)", 6647 nxgep->function_num, 6648 hw_p, 6649 p_dip, 6650 hw_p->ndevs)); 6651 h_hw_p->next = hw_p->next; 6652 } 6653 6654 nxgep->nxge_hw_p = NULL; 6655 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6656 } 6657 break; 6658 } else { 6659 h_hw_p = hw_p; 6660 } 6661 } 6662 6663 MUTEX_EXIT(&nxge_common_lock); 6664 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6665 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6666 nxge_hw_list)); 6667 6668 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6669 } 6670 6671 /* 6672 * Determines the number of ports from the niu_type or the platform type. 6673 * Returns the number of ports, or returns zero on failure. 6674 */ 6675 6676 int 6677 nxge_get_nports(p_nxge_t nxgep) 6678 { 6679 int nports = 0; 6680 6681 switch (nxgep->niu_type) { 6682 case N2_NIU: 6683 case NEPTUNE_2_10GF: 6684 nports = 2; 6685 break; 6686 case NEPTUNE_4_1GC: 6687 case NEPTUNE_2_10GF_2_1GC: 6688 case NEPTUNE_1_10GF_3_1GC: 6689 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6690 case NEPTUNE_2_10GF_2_1GRF: 6691 nports = 4; 6692 break; 6693 default: 6694 switch (nxgep->platform_type) { 6695 case P_NEPTUNE_NIU: 6696 case P_NEPTUNE_ATLAS_2PORT: 6697 nports = 2; 6698 break; 6699 case P_NEPTUNE_ATLAS_4PORT: 6700 case P_NEPTUNE_MARAMBA_P0: 6701 case P_NEPTUNE_MARAMBA_P1: 6702 case P_NEPTUNE_ROCK: 6703 case P_NEPTUNE_ALONSO: 6704 nports = 4; 6705 break; 6706 default: 6707 break; 6708 } 6709 break; 6710 } 6711 6712 return (nports); 6713 } 6714 6715 /* 6716 * The following two functions are to support 6717 * PSARC/2007/453 MSI-X interrupt limit override. 6718 */ 6719 static int 6720 nxge_create_msi_property(p_nxge_t nxgep) 6721 { 6722 int nmsi; 6723 extern int ncpus; 6724 6725 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6726 6727 switch (nxgep->mac.portmode) { 6728 case PORT_10G_COPPER: 6729 case PORT_10G_FIBER: 6730 case PORT_10G_TN1010: 6731 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6732 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6733 /* 6734 * The maximum MSI-X requested will be 8. 6735 * If the # of CPUs is less than 8, we will request 6736 * # MSI-X based on the # of CPUs (default). 6737 */ 6738 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6739 "==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d", 6740 nxge_msix_10g_intrs)); 6741 if ((nxge_msix_10g_intrs == 0) || 6742 (nxge_msix_10g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6743 nmsi = NXGE_MSIX_REQUEST_10G; 6744 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6745 "==>nxge_create_msi_property (10G): reset to 8")); 6746 } else { 6747 nmsi = nxge_msix_10g_intrs; 6748 } 6749 6750 /* 6751 * If # of interrupts requested is 8 (default), 6752 * the checking of the number of cpus will be 6753 * be maintained. 6754 */ 6755 if ((nmsi == NXGE_MSIX_REQUEST_10G) && 6756 (ncpus < nmsi)) { 6757 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6758 "==>nxge_create_msi_property (10G): reset to 8")); 6759 nmsi = ncpus; 6760 } 6761 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6762 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6763 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6764 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6765 break; 6766 6767 default: 6768 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6769 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6770 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6771 "==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d", 6772 nxge_msix_1g_intrs)); 6773 if ((nxge_msix_1g_intrs == 0) || 6774 (nxge_msix_1g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6775 nmsi = NXGE_MSIX_REQUEST_1G; 6776 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6777 "==>nxge_create_msi_property (1G): reset to 2")); 6778 } else { 6779 nmsi = nxge_msix_1g_intrs; 6780 } 6781 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6782 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6783 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6784 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6785 break; 6786 } 6787 6788 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6789 return (nmsi); 6790 } 6791 6792 /* ARGSUSED */ 6793 static int 6794 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6795 void *pr_val) 6796 { 6797 int err = 0; 6798 link_flowctrl_t fl; 6799 6800 switch (pr_num) { 6801 case MAC_PROP_AUTONEG: 6802 *(uint8_t *)pr_val = 1; 6803 break; 6804 case MAC_PROP_FLOWCTRL: 6805 if (pr_valsize < sizeof (link_flowctrl_t)) 6806 return (EINVAL); 6807 fl = LINK_FLOWCTRL_RX; 6808 bcopy(&fl, pr_val, sizeof (fl)); 6809 break; 6810 case MAC_PROP_ADV_1000FDX_CAP: 6811 case MAC_PROP_EN_1000FDX_CAP: 6812 *(uint8_t *)pr_val = 1; 6813 break; 6814 case MAC_PROP_ADV_100FDX_CAP: 6815 case MAC_PROP_EN_100FDX_CAP: 6816 *(uint8_t *)pr_val = 1; 6817 break; 6818 default: 6819 err = ENOTSUP; 6820 break; 6821 } 6822 return (err); 6823 } 6824 6825 6826 /* 6827 * The following is a software around for the Neptune hardware's 6828 * interrupt bugs; The Neptune hardware may generate spurious interrupts when 6829 * an interrupr handler is removed. 6830 */ 6831 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98 6832 #define NXGE_PIM_RESET (1ULL << 29) 6833 #define NXGE_GLU_RESET (1ULL << 30) 6834 #define NXGE_NIU_RESET (1ULL << 31) 6835 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \ 6836 NXGE_GLU_RESET | \ 6837 NXGE_NIU_RESET) 6838 6839 #define NXGE_WAIT_QUITE_TIME 200000 6840 #define NXGE_WAIT_QUITE_RETRY 40 6841 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */ 6842 6843 static void 6844 nxge_niu_peu_reset(p_nxge_t nxgep) 6845 { 6846 uint32_t rvalue; 6847 p_nxge_hw_list_t hw_p; 6848 p_nxge_t fnxgep; 6849 int i, j; 6850 6851 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset")); 6852 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 6853 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6854 "==> nxge_niu_peu_reset: NULL hardware pointer")); 6855 return; 6856 } 6857 6858 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6859 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d", 6860 hw_p->flags, nxgep->nxge_link_poll_timerid, 6861 nxgep->nxge_timerid)); 6862 6863 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 6864 /* 6865 * Make sure other instances from the same hardware 6866 * stop sending PIO and in quiescent state. 6867 */ 6868 for (i = 0; i < NXGE_MAX_PORTS; i++) { 6869 fnxgep = hw_p->nxge_p[i]; 6870 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6871 "==> nxge_niu_peu_reset: checking entry %d " 6872 "nxgep $%p", i, fnxgep)); 6873 #ifdef NXGE_DEBUG 6874 if (fnxgep) { 6875 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6876 "==> nxge_niu_peu_reset: entry %d (function %d) " 6877 "link timer id %d hw timer id %d", 6878 i, fnxgep->function_num, 6879 fnxgep->nxge_link_poll_timerid, 6880 fnxgep->nxge_timerid)); 6881 } 6882 #endif 6883 if (fnxgep && fnxgep != nxgep && 6884 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) { 6885 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6886 "==> nxge_niu_peu_reset: checking $%p " 6887 "(function %d) timer ids", 6888 fnxgep, fnxgep->function_num)); 6889 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) { 6890 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6891 "==> nxge_niu_peu_reset: waiting")); 6892 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6893 if (!fnxgep->nxge_timerid && 6894 !fnxgep->nxge_link_poll_timerid) { 6895 break; 6896 } 6897 } 6898 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6899 if (fnxgep->nxge_timerid || 6900 fnxgep->nxge_link_poll_timerid) { 6901 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6902 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6903 "<== nxge_niu_peu_reset: cannot reset " 6904 "hardware (devices are still in use)")); 6905 return; 6906 } 6907 } 6908 } 6909 6910 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) { 6911 hw_p->flags |= COMMON_RESET_NIU_PCI; 6912 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh, 6913 NXGE_PCI_PORT_LOGIC_OFFSET); 6914 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6915 "nxge_niu_peu_reset: read offset 0x%x (%d) " 6916 "(data 0x%x)", 6917 NXGE_PCI_PORT_LOGIC_OFFSET, 6918 NXGE_PCI_PORT_LOGIC_OFFSET, 6919 rvalue)); 6920 6921 rvalue |= NXGE_PCI_RESET_ALL; 6922 pci_config_put32(nxgep->dev_regs->nxge_pciregh, 6923 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue); 6924 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6925 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x", 6926 rvalue)); 6927 6928 NXGE_DELAY(NXGE_PCI_RESET_WAIT); 6929 } 6930 6931 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6932 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset")); 6933 } 6934 6935 static void 6936 nxge_set_pci_replay_timeout(p_nxge_t nxgep) 6937 { 6938 p_dev_regs_t dev_regs; 6939 uint32_t value; 6940 6941 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout")); 6942 6943 if (!nxge_set_replay_timer) { 6944 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6945 "==> nxge_set_pci_replay_timeout: will not change " 6946 "the timeout")); 6947 return; 6948 } 6949 6950 dev_regs = nxgep->dev_regs; 6951 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6952 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p", 6953 dev_regs, dev_regs->nxge_pciregh)); 6954 6955 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) { 6956 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6957 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or " 6958 "no PCI handle", 6959 dev_regs)); 6960 return; 6961 } 6962 value = (pci_config_get32(dev_regs->nxge_pciregh, 6963 PCI_REPLAY_TIMEOUT_CFG_OFFSET) | 6964 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT)); 6965 6966 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6967 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x " 6968 "(timeout value to set 0x%x at offset 0x%x) value 0x%x", 6969 pci_config_get32(dev_regs->nxge_pciregh, 6970 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout, 6971 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value)); 6972 6973 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET, 6974 value); 6975 6976 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6977 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x", 6978 pci_config_get32(dev_regs->nxge_pciregh, 6979 PCI_REPLAY_TIMEOUT_CFG_OFFSET))); 6980 6981 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout")); 6982 } 6983 6984 /* 6985 * quiesce(9E) entry point. 6986 * 6987 * This function is called when the system is single-threaded at high 6988 * PIL with preemption disabled. Therefore, this function must not be 6989 * blocked. 6990 * 6991 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 6992 * DDI_FAILURE indicates an error condition and should almost never happen. 6993 */ 6994 static int 6995 nxge_quiesce(dev_info_t *dip) 6996 { 6997 int instance = ddi_get_instance(dip); 6998 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 6999 7000 if (nxgep == NULL) 7001 return (DDI_FAILURE); 7002 7003 /* Turn off debugging */ 7004 nxge_debug_level = NO_DEBUG; 7005 nxgep->nxge_debug_level = NO_DEBUG; 7006 npi_debug_level = NO_DEBUG; 7007 7008 /* 7009 * Stop link monitor only when linkchkmod is interrupt based 7010 */ 7011 if (nxgep->mac.linkchkmode == LINKCHK_INTR) { 7012 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 7013 } 7014 7015 (void) nxge_intr_hw_disable(nxgep); 7016 7017 /* 7018 * Reset the receive MAC side. 7019 */ 7020 (void) nxge_rx_mac_disable(nxgep); 7021 7022 /* Disable and soft reset the IPP */ 7023 if (!isLDOMguest(nxgep)) 7024 (void) nxge_ipp_disable(nxgep); 7025 7026 /* 7027 * Reset the transmit/receive DMA side. 7028 */ 7029 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 7030 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 7031 7032 /* 7033 * Reset the transmit MAC side. 7034 */ 7035 (void) nxge_tx_mac_disable(nxgep); 7036 7037 return (DDI_SUCCESS); 7038 } 7039