1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 28 */ 29 #include <sys/nxge/nxge_impl.h> 30 #include <sys/nxge/nxge_hio.h> 31 #include <sys/nxge/nxge_rxdma.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * PSARC/2007/453 MSI-X interrupt limit override 39 */ 40 uint32_t nxge_msi_enable = 2; 41 42 /* 43 * Software workaround for a Neptune (PCI-E) 44 * hardware interrupt bug which the hardware 45 * may generate spurious interrupts after the 46 * device interrupt handler was removed. If this flag 47 * is enabled, the driver will reset the 48 * hardware when devices are being detached. 49 */ 50 uint32_t nxge_peu_reset_enable = 0; 51 52 /* 53 * Software workaround for the hardware 54 * checksum bugs that affect packet transmission 55 * and receive: 56 * 57 * Usage of nxge_cksum_offload: 58 * 59 * (1) nxge_cksum_offload = 0 (default): 60 * - transmits packets: 61 * TCP: uses the hardware checksum feature. 62 * UDP: driver will compute the software checksum 63 * based on the partial checksum computed 64 * by the IP layer. 65 * - receives packets 66 * TCP: marks packets checksum flags based on hardware result. 67 * UDP: will not mark checksum flags. 68 * 69 * (2) nxge_cksum_offload = 1: 70 * - transmit packets: 71 * TCP/UDP: uses the hardware checksum feature. 72 * - receives packets 73 * TCP/UDP: marks packet checksum flags based on hardware result. 74 * 75 * (3) nxge_cksum_offload = 2: 76 * - The driver will not register its checksum capability. 77 * Checksum for both TCP and UDP will be computed 78 * by the stack. 79 * - The software LSO is not allowed in this case. 80 * 81 * (4) nxge_cksum_offload > 2: 82 * - Will be treated as it is set to 2 83 * (stack will compute the checksum). 84 * 85 * (5) If the hardware bug is fixed, this workaround 86 * needs to be updated accordingly to reflect 87 * the new hardware revision. 88 */ 89 uint32_t nxge_cksum_offload = 0; 90 91 /* 92 * Globals: tunable parameters (/etc/system or adb) 93 * 94 */ 95 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 96 uint32_t nxge_rbr_spare_size = 0; 97 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 98 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 99 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 100 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 101 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 102 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 103 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 104 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 105 boolean_t nxge_jumbo_enable = B_FALSE; 106 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 107 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 108 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 109 110 /* MAX LSO size */ 111 #define NXGE_LSO_MAXLEN 65535 112 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 113 114 115 /* 116 * Add tunable to reduce the amount of time spent in the 117 * ISR doing Rx Processing. 118 */ 119 uint32_t nxge_max_rx_pkts = 1024; 120 121 /* 122 * Tunables to manage the receive buffer blocks. 123 * 124 * nxge_rx_threshold_hi: copy all buffers. 125 * nxge_rx_bcopy_size_type: receive buffer block size type. 126 * nxge_rx_threshold_lo: copy only up to tunable block size type. 127 */ 128 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 129 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 130 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 131 132 /* Use kmem_alloc() to allocate data buffers. */ 133 #if defined(_BIG_ENDIAN) 134 uint32_t nxge_use_kmem_alloc = 1; 135 #else 136 uint32_t nxge_use_kmem_alloc = 0; 137 #endif 138 139 rtrace_t npi_rtracebuf; 140 141 /* 142 * The hardware sometimes fails to allow enough time for the link partner 143 * to send an acknowledgement for packets that the hardware sent to it. The 144 * hardware resends the packets earlier than it should be in those instances. 145 * This behavior caused some switches to acknowledge the wrong packets 146 * and it triggered the fatal error. 147 * This software workaround is to set the replay timer to a value 148 * suggested by the hardware team. 149 * 150 * PCI config space replay timer register: 151 * The following replay timeout value is 0xc 152 * for bit 14:18. 153 */ 154 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8 155 #define PCI_REPLAY_TIMEOUT_SHIFT 14 156 157 uint32_t nxge_set_replay_timer = 1; 158 uint32_t nxge_replay_timeout = 0xc; 159 160 /* 161 * The transmit serialization sometimes causes 162 * longer sleep before calling the driver transmit 163 * function as it sleeps longer than it should. 164 * The performace group suggests that a time wait tunable 165 * can be used to set the maximum wait time when needed 166 * and the default is set to 1 tick. 167 */ 168 uint32_t nxge_tx_serial_maxsleep = 1; 169 170 #if defined(sun4v) 171 /* 172 * Hypervisor N2/NIU services information. 173 */ 174 static hsvc_info_t niu_hsvc = { 175 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 176 NIU_MINOR_VER, "nxge" 177 }; 178 179 static int nxge_hsvc_register(p_nxge_t); 180 #endif 181 182 /* 183 * Function Prototypes 184 */ 185 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 186 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 187 static void nxge_unattach(p_nxge_t); 188 static int nxge_quiesce(dev_info_t *); 189 190 #if NXGE_PROPERTY 191 static void nxge_remove_hard_properties(p_nxge_t); 192 #endif 193 194 /* 195 * These two functions are required by nxge_hio.c 196 */ 197 extern int nxge_m_mmac_remove(void *arg, int slot); 198 extern void nxge_grp_cleanup(p_nxge_t nxge); 199 200 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 201 202 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 203 static void nxge_destroy_mutexes(p_nxge_t); 204 205 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 206 static void nxge_unmap_regs(p_nxge_t nxgep); 207 #ifdef NXGE_DEBUG 208 static void nxge_test_map_regs(p_nxge_t nxgep); 209 #endif 210 211 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 212 static void nxge_remove_intrs(p_nxge_t nxgep); 213 214 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 215 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 216 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 217 static void nxge_intrs_enable(p_nxge_t nxgep); 218 static void nxge_intrs_disable(p_nxge_t nxgep); 219 220 static void nxge_suspend(p_nxge_t); 221 static nxge_status_t nxge_resume(p_nxge_t); 222 223 static nxge_status_t nxge_setup_dev(p_nxge_t); 224 static void nxge_destroy_dev(p_nxge_t); 225 226 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 227 static void nxge_free_mem_pool(p_nxge_t); 228 229 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 230 static void nxge_free_rx_mem_pool(p_nxge_t); 231 232 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 233 static void nxge_free_tx_mem_pool(p_nxge_t); 234 235 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 236 struct ddi_dma_attr *, 237 size_t, ddi_device_acc_attr_t *, uint_t, 238 p_nxge_dma_common_t); 239 240 static void nxge_dma_mem_free(p_nxge_dma_common_t); 241 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 242 243 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 244 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 245 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 246 247 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 248 p_nxge_dma_common_t *, size_t); 249 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 250 251 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 252 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 253 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 254 255 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 256 p_nxge_dma_common_t *, 257 size_t); 258 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 259 260 static int nxge_init_common_dev(p_nxge_t); 261 static void nxge_uninit_common_dev(p_nxge_t); 262 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 263 char *, caddr_t); 264 265 /* 266 * The next declarations are for the GLDv3 interface. 267 */ 268 static int nxge_m_start(void *); 269 static void nxge_m_stop(void *); 270 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 271 static int nxge_m_promisc(void *, boolean_t); 272 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 273 static nxge_status_t nxge_mac_register(p_nxge_t); 274 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 275 int slot, int rdctbl, boolean_t usetbl); 276 void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, 277 boolean_t factory); 278 #if defined(sun4v) 279 extern mblk_t *nxge_m_tx(void *arg, mblk_t *mp); 280 #endif 281 282 static void nxge_m_getfactaddr(void *, uint_t, uint8_t *); 283 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 284 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 285 uint_t, const void *); 286 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 287 uint_t, uint_t, void *, uint_t *); 288 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 289 const void *); 290 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 291 void *, uint_t *); 292 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 293 static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int, 294 mac_ring_info_t *, mac_ring_handle_t); 295 static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t, 296 mac_ring_type_t); 297 static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t, 298 mac_ring_type_t); 299 300 static void nxge_niu_peu_reset(p_nxge_t nxgep); 301 static void nxge_set_pci_replay_timeout(nxge_t *); 302 303 mac_priv_prop_t nxge_priv_props[] = { 304 {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 305 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 306 {"_function_number", MAC_PROP_PERM_READ}, 307 {"_fw_version", MAC_PROP_PERM_READ}, 308 {"_port_mode", MAC_PROP_PERM_READ}, 309 {"_hot_swap_phy", MAC_PROP_PERM_READ}, 310 {"_accept_jumbo", MAC_PROP_PERM_RW}, 311 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 312 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 313 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 314 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 315 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 316 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 317 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 318 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 319 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 320 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 321 {"_soft_lso_enable", MAC_PROP_PERM_RW} 322 }; 323 324 #define NXGE_MAX_PRIV_PROPS \ 325 (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 326 327 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 328 #define MAX_DUMP_SZ 256 329 330 #define NXGE_M_CALLBACK_FLAGS \ 331 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 332 333 mac_callbacks_t nxge_m_callbacks = { 334 NXGE_M_CALLBACK_FLAGS, 335 nxge_m_stat, 336 nxge_m_start, 337 nxge_m_stop, 338 nxge_m_promisc, 339 nxge_m_multicst, 340 NULL, 341 NULL, 342 nxge_m_ioctl, 343 nxge_m_getcapab, 344 NULL, 345 NULL, 346 nxge_m_setprop, 347 nxge_m_getprop 348 }; 349 350 void 351 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 352 353 /* PSARC/2007/453 MSI-X interrupt limit override. */ 354 #define NXGE_MSIX_REQUEST_10G 8 355 #define NXGE_MSIX_REQUEST_1G 2 356 static int nxge_create_msi_property(p_nxge_t); 357 /* 358 * For applications that care about the 359 * latency, it was requested by PAE and the 360 * customers that the driver has tunables that 361 * allow the user to tune it to a higher number 362 * interrupts to spread the interrupts among 363 * multiple channels. The DDI framework limits 364 * the maximum number of MSI-X resources to allocate 365 * to 8 (ddi_msix_alloc_limit). If more than 8 366 * is set, ddi_msix_alloc_limit must be set accordingly. 367 * The default number of MSI interrupts are set to 368 * 8 for 10G and 2 for 1G link. 369 */ 370 #define NXGE_MSIX_MAX_ALLOWED 32 371 uint32_t nxge_msix_10g_intrs = NXGE_MSIX_REQUEST_10G; 372 uint32_t nxge_msix_1g_intrs = NXGE_MSIX_REQUEST_1G; 373 374 /* 375 * These global variables control the message 376 * output. 377 */ 378 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 379 uint64_t nxge_debug_level; 380 381 /* 382 * This list contains the instance structures for the Neptune 383 * devices present in the system. The lock exists to guarantee 384 * mutually exclusive access to the list. 385 */ 386 void *nxge_list = NULL; 387 388 void *nxge_hw_list = NULL; 389 nxge_os_mutex_t nxge_common_lock; 390 391 extern uint64_t npi_debug_level; 392 393 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 394 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 395 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 396 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 397 extern void nxge_fm_init(p_nxge_t, 398 ddi_device_acc_attr_t *, 399 ddi_device_acc_attr_t *, 400 ddi_dma_attr_t *); 401 extern void nxge_fm_fini(p_nxge_t); 402 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 403 404 /* 405 * Count used to maintain the number of buffers being used 406 * by Neptune instances and loaned up to the upper layers. 407 */ 408 uint32_t nxge_mblks_pending = 0; 409 410 /* 411 * Device register access attributes for PIO. 412 */ 413 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 414 DDI_DEVICE_ATTR_V0, 415 DDI_STRUCTURE_LE_ACC, 416 DDI_STRICTORDER_ACC, 417 }; 418 419 /* 420 * Device descriptor access attributes for DMA. 421 */ 422 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 423 DDI_DEVICE_ATTR_V0, 424 DDI_STRUCTURE_LE_ACC, 425 DDI_STRICTORDER_ACC 426 }; 427 428 /* 429 * Device buffer access attributes for DMA. 430 */ 431 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 432 DDI_DEVICE_ATTR_V0, 433 DDI_STRUCTURE_BE_ACC, 434 DDI_STRICTORDER_ACC 435 }; 436 437 ddi_dma_attr_t nxge_desc_dma_attr = { 438 DMA_ATTR_V0, /* version number. */ 439 0, /* low address */ 440 0xffffffffffffffff, /* high address */ 441 0xffffffffffffffff, /* address counter max */ 442 #ifndef NIU_PA_WORKAROUND 443 0x100000, /* alignment */ 444 #else 445 0x2000, 446 #endif 447 0xfc00fc, /* dlim_burstsizes */ 448 0x1, /* minimum transfer size */ 449 0xffffffffffffffff, /* maximum transfer size */ 450 0xffffffffffffffff, /* maximum segment size */ 451 1, /* scatter/gather list length */ 452 (unsigned int) 1, /* granularity */ 453 0 /* attribute flags */ 454 }; 455 456 ddi_dma_attr_t nxge_tx_dma_attr = { 457 DMA_ATTR_V0, /* version number. */ 458 0, /* low address */ 459 0xffffffffffffffff, /* high address */ 460 0xffffffffffffffff, /* address counter max */ 461 #if defined(_BIG_ENDIAN) 462 0x2000, /* alignment */ 463 #else 464 0x1000, /* alignment */ 465 #endif 466 0xfc00fc, /* dlim_burstsizes */ 467 0x1, /* minimum transfer size */ 468 0xffffffffffffffff, /* maximum transfer size */ 469 0xffffffffffffffff, /* maximum segment size */ 470 5, /* scatter/gather list length */ 471 (unsigned int) 1, /* granularity */ 472 0 /* attribute flags */ 473 }; 474 475 ddi_dma_attr_t nxge_rx_dma_attr = { 476 DMA_ATTR_V0, /* version number. */ 477 0, /* low address */ 478 0xffffffffffffffff, /* high address */ 479 0xffffffffffffffff, /* address counter max */ 480 0x2000, /* alignment */ 481 0xfc00fc, /* dlim_burstsizes */ 482 0x1, /* minimum transfer size */ 483 0xffffffffffffffff, /* maximum transfer size */ 484 0xffffffffffffffff, /* maximum segment size */ 485 1, /* scatter/gather list length */ 486 (unsigned int) 1, /* granularity */ 487 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 488 }; 489 490 ddi_dma_lim_t nxge_dma_limits = { 491 (uint_t)0, /* dlim_addr_lo */ 492 (uint_t)0xffffffff, /* dlim_addr_hi */ 493 (uint_t)0xffffffff, /* dlim_cntr_max */ 494 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 495 0x1, /* dlim_minxfer */ 496 1024 /* dlim_speed */ 497 }; 498 499 dma_method_t nxge_force_dma = DVMA; 500 501 /* 502 * dma chunk sizes. 503 * 504 * Try to allocate the largest possible size 505 * so that fewer number of dma chunks would be managed 506 */ 507 #ifdef NIU_PA_WORKAROUND 508 size_t alloc_sizes [] = {0x2000}; 509 #else 510 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 511 0x10000, 0x20000, 0x40000, 0x80000, 512 0x100000, 0x200000, 0x400000, 0x800000, 513 0x1000000, 0x2000000, 0x4000000}; 514 #endif 515 516 /* 517 * Translate "dev_t" to a pointer to the associated "dev_info_t". 518 */ 519 520 extern void nxge_get_environs(nxge_t *); 521 522 static int 523 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 524 { 525 p_nxge_t nxgep = NULL; 526 int instance; 527 int status = DDI_SUCCESS; 528 uint8_t portn; 529 nxge_mmac_t *mmac_info; 530 p_nxge_param_t param_arr; 531 532 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 533 534 /* 535 * Get the device instance since we'll need to setup 536 * or retrieve a soft state for this instance. 537 */ 538 instance = ddi_get_instance(dip); 539 540 switch (cmd) { 541 case DDI_ATTACH: 542 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 543 break; 544 545 case DDI_RESUME: 546 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 547 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 548 if (nxgep == NULL) { 549 status = DDI_FAILURE; 550 break; 551 } 552 if (nxgep->dip != dip) { 553 status = DDI_FAILURE; 554 break; 555 } 556 if (nxgep->suspended == DDI_PM_SUSPEND) { 557 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 558 } else { 559 status = nxge_resume(nxgep); 560 } 561 goto nxge_attach_exit; 562 563 case DDI_PM_RESUME: 564 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 565 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 566 if (nxgep == NULL) { 567 status = DDI_FAILURE; 568 break; 569 } 570 if (nxgep->dip != dip) { 571 status = DDI_FAILURE; 572 break; 573 } 574 status = nxge_resume(nxgep); 575 goto nxge_attach_exit; 576 577 default: 578 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 579 status = DDI_FAILURE; 580 goto nxge_attach_exit; 581 } 582 583 584 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 585 status = DDI_FAILURE; 586 goto nxge_attach_exit; 587 } 588 589 nxgep = ddi_get_soft_state(nxge_list, instance); 590 if (nxgep == NULL) { 591 status = NXGE_ERROR; 592 goto nxge_attach_fail2; 593 } 594 595 nxgep->nxge_magic = NXGE_MAGIC; 596 597 nxgep->drv_state = 0; 598 nxgep->dip = dip; 599 nxgep->instance = instance; 600 nxgep->p_dip = ddi_get_parent(dip); 601 nxgep->nxge_debug_level = nxge_debug_level; 602 npi_debug_level = nxge_debug_level; 603 604 /* Are we a guest running in a Hybrid I/O environment? */ 605 nxge_get_environs(nxgep); 606 607 status = nxge_map_regs(nxgep); 608 609 if (status != NXGE_OK) { 610 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 611 goto nxge_attach_fail3; 612 } 613 614 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, 615 &nxge_dev_desc_dma_acc_attr, 616 &nxge_rx_dma_attr); 617 618 /* Create & initialize the per-Neptune data structure */ 619 /* (even if we're a guest). */ 620 status = nxge_init_common_dev(nxgep); 621 if (status != NXGE_OK) { 622 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 623 "nxge_init_common_dev failed")); 624 goto nxge_attach_fail4; 625 } 626 627 /* 628 * Software workaround: set the replay timer. 629 */ 630 if (nxgep->niu_type != N2_NIU) { 631 nxge_set_pci_replay_timeout(nxgep); 632 } 633 #if defined(sun4v) 634 if (isLDOMguest(nxgep)) { 635 nxge_m_callbacks.mc_tx = nxge_m_tx; 636 } 637 #endif 638 639 #if defined(sun4v) 640 /* This is required by nxge_hio_init(), which follows. */ 641 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 642 goto nxge_attach_fail4; 643 #endif 644 645 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 646 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 647 "nxge_hio_init failed")); 648 goto nxge_attach_fail4; 649 } 650 651 if (nxgep->niu_type == NEPTUNE_2_10GF) { 652 if (nxgep->function_num > 1) { 653 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 654 " function %d. Only functions 0 and 1 are " 655 "supported for this card.", nxgep->function_num)); 656 status = NXGE_ERROR; 657 goto nxge_attach_fail4; 658 } 659 } 660 661 if (isLDOMguest(nxgep)) { 662 /* 663 * Use the function number here. 664 */ 665 nxgep->mac.portnum = nxgep->function_num; 666 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 667 668 /* XXX We'll set the MAC address counts to 1 for now. */ 669 mmac_info = &nxgep->nxge_mmac_info; 670 mmac_info->num_mmac = 1; 671 mmac_info->naddrfree = 1; 672 } else { 673 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 674 nxgep->mac.portnum = portn; 675 if ((portn == 0) || (portn == 1)) 676 nxgep->mac.porttype = PORT_TYPE_XMAC; 677 else 678 nxgep->mac.porttype = PORT_TYPE_BMAC; 679 /* 680 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 681 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 682 * The two types of MACs have different characterizations. 683 */ 684 mmac_info = &nxgep->nxge_mmac_info; 685 if (nxgep->function_num < 2) { 686 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 687 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 688 } else { 689 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 690 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 691 } 692 } 693 /* 694 * Setup the Ndd parameters for the this instance. 695 */ 696 nxge_init_param(nxgep); 697 698 /* 699 * Setup Register Tracing Buffer. 700 */ 701 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 702 703 /* init stats ptr */ 704 nxge_init_statsp(nxgep); 705 706 /* 707 * Copy the vpd info from eeprom to a local data 708 * structure, and then check its validity. 709 */ 710 if (!isLDOMguest(nxgep)) { 711 int *regp; 712 uint_t reglen; 713 int rv; 714 715 nxge_vpd_info_get(nxgep); 716 717 /* Find the NIU config handle. */ 718 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 719 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 720 "reg", ®p, ®len); 721 722 if (rv != DDI_PROP_SUCCESS) { 723 goto nxge_attach_fail5; 724 } 725 /* 726 * The address_hi, that is the first int, in the reg 727 * property consists of config handle, but need to remove 728 * the bits 28-31 which are OBP specific info. 729 */ 730 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 731 ddi_prop_free(regp); 732 } 733 734 if (isLDOMguest(nxgep)) { 735 uchar_t *prop_val; 736 uint_t prop_len; 737 uint32_t max_frame_size; 738 739 extern void nxge_get_logical_props(p_nxge_t); 740 741 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 742 nxgep->mac.portmode = PORT_LOGICAL; 743 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 744 "phy-type", "virtual transceiver"); 745 746 nxgep->nports = 1; 747 nxgep->board_ver = 0; /* XXX What? */ 748 749 /* 750 * local-mac-address property gives us info on which 751 * specific MAC address the Hybrid resource is associated 752 * with. 753 */ 754 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 755 "local-mac-address", &prop_val, 756 &prop_len) != DDI_PROP_SUCCESS) { 757 goto nxge_attach_fail5; 758 } 759 if (prop_len != ETHERADDRL) { 760 ddi_prop_free(prop_val); 761 goto nxge_attach_fail5; 762 } 763 ether_copy(prop_val, nxgep->hio_mac_addr); 764 ddi_prop_free(prop_val); 765 nxge_get_logical_props(nxgep); 766 767 /* 768 * Enable Jumbo property based on the "max-frame-size" 769 * property value. 770 */ 771 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY, 772 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 773 "max-frame-size", NXGE_MTU_DEFAULT_MAX); 774 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) && 775 (max_frame_size <= TX_JUMBO_MTU)) { 776 param_arr = nxgep->param_arr; 777 778 param_arr[param_accept_jumbo].value = 1; 779 nxgep->mac.is_jumbo = B_TRUE; 780 nxgep->mac.maxframesize = (uint16_t)max_frame_size; 781 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 782 NXGE_EHEADER_VLAN_CRC; 783 } 784 } else { 785 status = nxge_xcvr_find(nxgep); 786 787 if (status != NXGE_OK) { 788 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 789 " Couldn't determine card type" 790 " .... exit ")); 791 goto nxge_attach_fail5; 792 } 793 794 status = nxge_get_config_properties(nxgep); 795 796 if (status != NXGE_OK) { 797 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 798 "get_hw create failed")); 799 goto nxge_attach_fail; 800 } 801 } 802 803 /* 804 * Setup the Kstats for the driver. 805 */ 806 nxge_setup_kstats(nxgep); 807 808 if (!isLDOMguest(nxgep)) 809 nxge_setup_param(nxgep); 810 811 status = nxge_setup_system_dma_pages(nxgep); 812 if (status != NXGE_OK) { 813 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 814 goto nxge_attach_fail; 815 } 816 817 nxge_hw_id_init(nxgep); 818 819 if (!isLDOMguest(nxgep)) 820 nxge_hw_init_niu_common(nxgep); 821 822 status = nxge_setup_mutexes(nxgep); 823 if (status != NXGE_OK) { 824 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 825 goto nxge_attach_fail; 826 } 827 828 #if defined(sun4v) 829 if (isLDOMguest(nxgep)) { 830 /* Find our VR & channel sets. */ 831 status = nxge_hio_vr_add(nxgep); 832 if (status != NXGE_OK) { 833 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 834 "nxge_hio_vr_add failed")); 835 (void) hsvc_unregister(&nxgep->niu_hsvc); 836 nxgep->niu_hsvc_available = B_FALSE; 837 } 838 goto nxge_attach_exit; 839 } 840 #endif 841 842 status = nxge_setup_dev(nxgep); 843 if (status != DDI_SUCCESS) { 844 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 845 goto nxge_attach_fail; 846 } 847 848 status = nxge_add_intrs(nxgep); 849 if (status != DDI_SUCCESS) { 850 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 851 goto nxge_attach_fail; 852 } 853 854 /* If a guest, register with vio_net instead. */ 855 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 856 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 857 "unable to register to mac layer (%d)", status)); 858 goto nxge_attach_fail; 859 } 860 861 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 862 863 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 864 "registered to mac (instance %d)", instance)); 865 866 /* nxge_link_monitor calls xcvr.check_link recursively */ 867 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 868 869 goto nxge_attach_exit; 870 871 nxge_attach_fail: 872 nxge_unattach(nxgep); 873 goto nxge_attach_fail1; 874 875 nxge_attach_fail5: 876 /* 877 * Tear down the ndd parameters setup. 878 */ 879 nxge_destroy_param(nxgep); 880 881 /* 882 * Tear down the kstat setup. 883 */ 884 nxge_destroy_kstats(nxgep); 885 886 nxge_attach_fail4: 887 if (nxgep->nxge_hw_p) { 888 nxge_uninit_common_dev(nxgep); 889 nxgep->nxge_hw_p = NULL; 890 } 891 892 nxge_attach_fail3: 893 /* 894 * Unmap the register setup. 895 */ 896 nxge_unmap_regs(nxgep); 897 898 nxge_fm_fini(nxgep); 899 900 nxge_attach_fail2: 901 ddi_soft_state_free(nxge_list, nxgep->instance); 902 903 nxge_attach_fail1: 904 if (status != NXGE_OK) 905 status = (NXGE_ERROR | NXGE_DDI_FAILED); 906 nxgep = NULL; 907 908 nxge_attach_exit: 909 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 910 status)); 911 912 return (status); 913 } 914 915 static int 916 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 917 { 918 int status = DDI_SUCCESS; 919 int instance; 920 p_nxge_t nxgep = NULL; 921 922 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 923 instance = ddi_get_instance(dip); 924 nxgep = ddi_get_soft_state(nxge_list, instance); 925 if (nxgep == NULL) { 926 status = DDI_FAILURE; 927 goto nxge_detach_exit; 928 } 929 930 switch (cmd) { 931 case DDI_DETACH: 932 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 933 break; 934 935 case DDI_PM_SUSPEND: 936 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 937 nxgep->suspended = DDI_PM_SUSPEND; 938 nxge_suspend(nxgep); 939 break; 940 941 case DDI_SUSPEND: 942 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 943 if (nxgep->suspended != DDI_PM_SUSPEND) { 944 nxgep->suspended = DDI_SUSPEND; 945 nxge_suspend(nxgep); 946 } 947 break; 948 949 default: 950 status = DDI_FAILURE; 951 } 952 953 if (cmd != DDI_DETACH) 954 goto nxge_detach_exit; 955 956 /* 957 * Stop the xcvr polling. 958 */ 959 nxgep->suspended = cmd; 960 961 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 962 963 if (isLDOMguest(nxgep)) { 964 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 965 nxge_m_stop((void *)nxgep); 966 nxge_hio_unregister(nxgep); 967 } else if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 968 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 969 "<== nxge_detach status = 0x%08X", status)); 970 return (DDI_FAILURE); 971 } 972 973 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 974 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 975 976 nxge_unattach(nxgep); 977 nxgep = NULL; 978 979 nxge_detach_exit: 980 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 981 status)); 982 983 return (status); 984 } 985 986 static void 987 nxge_unattach(p_nxge_t nxgep) 988 { 989 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 990 991 if (nxgep == NULL || nxgep->dev_regs == NULL) { 992 return; 993 } 994 995 nxgep->nxge_magic = 0; 996 997 if (nxgep->nxge_timerid) { 998 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 999 nxgep->nxge_timerid = 0; 1000 } 1001 1002 /* 1003 * If this flag is set, it will affect the Neptune 1004 * only. 1005 */ 1006 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { 1007 nxge_niu_peu_reset(nxgep); 1008 } 1009 1010 #if defined(sun4v) 1011 if (isLDOMguest(nxgep)) { 1012 (void) nxge_hio_vr_release(nxgep); 1013 } 1014 #endif 1015 1016 if (nxgep->nxge_hw_p) { 1017 nxge_uninit_common_dev(nxgep); 1018 nxgep->nxge_hw_p = NULL; 1019 } 1020 1021 #if defined(sun4v) 1022 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 1023 (void) hsvc_unregister(&nxgep->niu_hsvc); 1024 nxgep->niu_hsvc_available = B_FALSE; 1025 } 1026 #endif 1027 /* 1028 * Stop any further interrupts. 1029 */ 1030 nxge_remove_intrs(nxgep); 1031 1032 /* 1033 * Stop the device and free resources. 1034 */ 1035 if (!isLDOMguest(nxgep)) { 1036 nxge_destroy_dev(nxgep); 1037 } 1038 1039 /* 1040 * Tear down the ndd parameters setup. 1041 */ 1042 nxge_destroy_param(nxgep); 1043 1044 /* 1045 * Tear down the kstat setup. 1046 */ 1047 nxge_destroy_kstats(nxgep); 1048 1049 /* 1050 * Destroy all mutexes. 1051 */ 1052 nxge_destroy_mutexes(nxgep); 1053 1054 /* 1055 * Remove the list of ndd parameters which 1056 * were setup during attach. 1057 */ 1058 if (nxgep->dip) { 1059 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 1060 " nxge_unattach: remove all properties")); 1061 1062 (void) ddi_prop_remove_all(nxgep->dip); 1063 } 1064 1065 #if NXGE_PROPERTY 1066 nxge_remove_hard_properties(nxgep); 1067 #endif 1068 1069 /* 1070 * Unmap the register setup. 1071 */ 1072 nxge_unmap_regs(nxgep); 1073 1074 nxge_fm_fini(nxgep); 1075 1076 ddi_soft_state_free(nxge_list, nxgep->instance); 1077 1078 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1079 } 1080 1081 #if defined(sun4v) 1082 int 1083 nxge_hsvc_register(nxge_t *nxgep) 1084 { 1085 nxge_status_t status; 1086 1087 if (nxgep->niu_type == N2_NIU) { 1088 nxgep->niu_hsvc_available = B_FALSE; 1089 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 1090 if ((status = hsvc_register(&nxgep->niu_hsvc, 1091 &nxgep->niu_min_ver)) != 0) { 1092 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1093 "nxge_attach: %s: cannot negotiate " 1094 "hypervisor services revision %d group: 0x%lx " 1095 "major: 0x%lx minor: 0x%lx errno: %d", 1096 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1097 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1098 niu_hsvc.hsvc_minor, status)); 1099 return (DDI_FAILURE); 1100 } 1101 nxgep->niu_hsvc_available = B_TRUE; 1102 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1103 "NIU Hypervisor service enabled")); 1104 } 1105 1106 return (DDI_SUCCESS); 1107 } 1108 #endif 1109 1110 static char n2_siu_name[] = "niu"; 1111 1112 static nxge_status_t 1113 nxge_map_regs(p_nxge_t nxgep) 1114 { 1115 int ddi_status = DDI_SUCCESS; 1116 p_dev_regs_t dev_regs; 1117 char buf[MAXPATHLEN + 1]; 1118 char *devname; 1119 #ifdef NXGE_DEBUG 1120 char *sysname; 1121 #endif 1122 off_t regsize; 1123 nxge_status_t status = NXGE_OK; 1124 #if !defined(_BIG_ENDIAN) 1125 off_t pci_offset; 1126 uint16_t pcie_devctl; 1127 #endif 1128 1129 if (isLDOMguest(nxgep)) { 1130 return (nxge_guest_regs_map(nxgep)); 1131 } 1132 1133 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1134 nxgep->dev_regs = NULL; 1135 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1136 dev_regs->nxge_regh = NULL; 1137 dev_regs->nxge_pciregh = NULL; 1138 dev_regs->nxge_msix_regh = NULL; 1139 dev_regs->nxge_vir_regh = NULL; 1140 dev_regs->nxge_vir2_regh = NULL; 1141 nxgep->niu_type = NIU_TYPE_NONE; 1142 1143 devname = ddi_pathname(nxgep->dip, buf); 1144 ASSERT(strlen(devname) > 0); 1145 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1146 "nxge_map_regs: pathname devname %s", devname)); 1147 1148 /* 1149 * The driver is running on a N2-NIU system if devname is something 1150 * like "/niu@80/network@0" 1151 */ 1152 if (strstr(devname, n2_siu_name)) { 1153 /* N2/NIU */ 1154 nxgep->niu_type = N2_NIU; 1155 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1156 "nxge_map_regs: N2/NIU devname %s", devname)); 1157 /* get function number */ 1158 nxgep->function_num = 1159 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1160 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1161 "nxge_map_regs: N2/NIU function number %d", 1162 nxgep->function_num)); 1163 } else { 1164 int *prop_val; 1165 uint_t prop_len; 1166 uint8_t func_num; 1167 1168 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1169 0, "reg", 1170 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1171 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1172 "Reg property not found")); 1173 ddi_status = DDI_FAILURE; 1174 goto nxge_map_regs_fail0; 1175 1176 } else { 1177 func_num = (prop_val[0] >> 8) & 0x7; 1178 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1179 "Reg property found: fun # %d", 1180 func_num)); 1181 nxgep->function_num = func_num; 1182 if (isLDOMguest(nxgep)) { 1183 nxgep->function_num /= 2; 1184 return (NXGE_OK); 1185 } 1186 ddi_prop_free(prop_val); 1187 } 1188 } 1189 1190 switch (nxgep->niu_type) { 1191 default: 1192 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1193 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1194 "nxge_map_regs: pci config size 0x%x", regsize)); 1195 1196 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1197 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1198 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1199 if (ddi_status != DDI_SUCCESS) { 1200 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1201 "ddi_map_regs, nxge bus config regs failed")); 1202 goto nxge_map_regs_fail0; 1203 } 1204 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1205 "nxge_map_reg: PCI config addr 0x%0llx " 1206 " handle 0x%0llx", dev_regs->nxge_pciregp, 1207 dev_regs->nxge_pciregh)); 1208 /* 1209 * IMP IMP 1210 * workaround for bit swapping bug in HW 1211 * which ends up in no-snoop = yes 1212 * resulting, in DMA not synched properly 1213 */ 1214 #if !defined(_BIG_ENDIAN) 1215 /* workarounds for x86 systems */ 1216 pci_offset = 0x80 + PCIE_DEVCTL; 1217 pcie_devctl = 0x0; 1218 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 1219 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1220 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1221 pcie_devctl); 1222 #endif 1223 1224 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1225 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1226 "nxge_map_regs: pio size 0x%x", regsize)); 1227 /* set up the device mapped register */ 1228 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1229 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1230 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1231 if (ddi_status != DDI_SUCCESS) { 1232 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1233 "ddi_map_regs for Neptune global reg failed")); 1234 goto nxge_map_regs_fail1; 1235 } 1236 1237 /* set up the msi/msi-x mapped register */ 1238 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1239 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1240 "nxge_map_regs: msix size 0x%x", regsize)); 1241 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1242 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1243 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1244 if (ddi_status != DDI_SUCCESS) { 1245 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1246 "ddi_map_regs for msi reg failed")); 1247 goto nxge_map_regs_fail2; 1248 } 1249 1250 /* set up the vio region mapped register */ 1251 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1252 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1253 "nxge_map_regs: vio size 0x%x", regsize)); 1254 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1255 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1256 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1257 1258 if (ddi_status != DDI_SUCCESS) { 1259 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1260 "ddi_map_regs for nxge vio reg failed")); 1261 goto nxge_map_regs_fail3; 1262 } 1263 nxgep->dev_regs = dev_regs; 1264 1265 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1266 NPI_PCI_ADD_HANDLE_SET(nxgep, 1267 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1268 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1269 NPI_MSI_ADD_HANDLE_SET(nxgep, 1270 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1271 1272 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1273 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1274 1275 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1276 NPI_REG_ADD_HANDLE_SET(nxgep, 1277 (npi_reg_ptr_t)dev_regs->nxge_regp); 1278 1279 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1280 NPI_VREG_ADD_HANDLE_SET(nxgep, 1281 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1282 1283 break; 1284 1285 case N2_NIU: 1286 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1287 /* 1288 * Set up the device mapped register (FWARC 2006/556) 1289 * (changed back to 1: reg starts at 1!) 1290 */ 1291 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1292 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1293 "nxge_map_regs: dev size 0x%x", regsize)); 1294 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1295 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1296 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1297 1298 if (ddi_status != DDI_SUCCESS) { 1299 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1300 "ddi_map_regs for N2/NIU, global reg failed ")); 1301 goto nxge_map_regs_fail1; 1302 } 1303 1304 /* set up the first vio region mapped register */ 1305 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1306 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1307 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1308 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1309 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1310 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1311 1312 if (ddi_status != DDI_SUCCESS) { 1313 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1314 "ddi_map_regs for nxge vio reg failed")); 1315 goto nxge_map_regs_fail2; 1316 } 1317 /* set up the second vio region mapped register */ 1318 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1319 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1320 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1321 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1322 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1323 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1324 1325 if (ddi_status != DDI_SUCCESS) { 1326 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1327 "ddi_map_regs for nxge vio2 reg failed")); 1328 goto nxge_map_regs_fail3; 1329 } 1330 nxgep->dev_regs = dev_regs; 1331 1332 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1333 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1334 1335 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1336 NPI_REG_ADD_HANDLE_SET(nxgep, 1337 (npi_reg_ptr_t)dev_regs->nxge_regp); 1338 1339 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1340 NPI_VREG_ADD_HANDLE_SET(nxgep, 1341 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1342 1343 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1344 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1345 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1346 1347 break; 1348 } 1349 1350 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1351 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1352 1353 goto nxge_map_regs_exit; 1354 nxge_map_regs_fail3: 1355 if (dev_regs->nxge_msix_regh) { 1356 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1357 } 1358 if (dev_regs->nxge_vir_regh) { 1359 ddi_regs_map_free(&dev_regs->nxge_regh); 1360 } 1361 nxge_map_regs_fail2: 1362 if (dev_regs->nxge_regh) { 1363 ddi_regs_map_free(&dev_regs->nxge_regh); 1364 } 1365 nxge_map_regs_fail1: 1366 if (dev_regs->nxge_pciregh) { 1367 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1368 } 1369 nxge_map_regs_fail0: 1370 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1371 kmem_free(dev_regs, sizeof (dev_regs_t)); 1372 1373 nxge_map_regs_exit: 1374 if (ddi_status != DDI_SUCCESS) 1375 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1376 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1377 return (status); 1378 } 1379 1380 static void 1381 nxge_unmap_regs(p_nxge_t nxgep) 1382 { 1383 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1384 1385 if (isLDOMguest(nxgep)) { 1386 nxge_guest_regs_map_free(nxgep); 1387 return; 1388 } 1389 1390 if (nxgep->dev_regs) { 1391 if (nxgep->dev_regs->nxge_pciregh) { 1392 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1393 "==> nxge_unmap_regs: bus")); 1394 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1395 nxgep->dev_regs->nxge_pciregh = NULL; 1396 } 1397 if (nxgep->dev_regs->nxge_regh) { 1398 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1399 "==> nxge_unmap_regs: device registers")); 1400 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1401 nxgep->dev_regs->nxge_regh = NULL; 1402 } 1403 if (nxgep->dev_regs->nxge_msix_regh) { 1404 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1405 "==> nxge_unmap_regs: device interrupts")); 1406 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1407 nxgep->dev_regs->nxge_msix_regh = NULL; 1408 } 1409 if (nxgep->dev_regs->nxge_vir_regh) { 1410 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1411 "==> nxge_unmap_regs: vio region")); 1412 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1413 nxgep->dev_regs->nxge_vir_regh = NULL; 1414 } 1415 if (nxgep->dev_regs->nxge_vir2_regh) { 1416 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1417 "==> nxge_unmap_regs: vio2 region")); 1418 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1419 nxgep->dev_regs->nxge_vir2_regh = NULL; 1420 } 1421 1422 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1423 nxgep->dev_regs = NULL; 1424 } 1425 1426 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1427 } 1428 1429 static nxge_status_t 1430 nxge_setup_mutexes(p_nxge_t nxgep) 1431 { 1432 int ddi_status = DDI_SUCCESS; 1433 nxge_status_t status = NXGE_OK; 1434 nxge_classify_t *classify_ptr; 1435 int partition; 1436 1437 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1438 1439 /* 1440 * Get the interrupt cookie so the mutexes can be 1441 * Initialized. 1442 */ 1443 if (isLDOMguest(nxgep)) { 1444 nxgep->interrupt_cookie = 0; 1445 } else { 1446 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1447 &nxgep->interrupt_cookie); 1448 1449 if (ddi_status != DDI_SUCCESS) { 1450 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1451 "<== nxge_setup_mutexes: failed 0x%x", 1452 ddi_status)); 1453 goto nxge_setup_mutexes_exit; 1454 } 1455 } 1456 1457 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1458 MUTEX_INIT(&nxgep->poll_lock, NULL, 1459 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1460 1461 /* 1462 * Initialize mutexes for this device. 1463 */ 1464 MUTEX_INIT(nxgep->genlock, NULL, 1465 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1466 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1467 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1468 MUTEX_INIT(&nxgep->mif_lock, NULL, 1469 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1470 MUTEX_INIT(&nxgep->group_lock, NULL, 1471 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1472 RW_INIT(&nxgep->filter_lock, NULL, 1473 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1474 1475 classify_ptr = &nxgep->classifier; 1476 /* 1477 * FFLP Mutexes are never used in interrupt context 1478 * as fflp operation can take very long time to 1479 * complete and hence not suitable to invoke from interrupt 1480 * handlers. 1481 */ 1482 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1483 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1484 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1485 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1486 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1487 for (partition = 0; partition < MAX_PARTITION; partition++) { 1488 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1489 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1490 } 1491 } 1492 1493 nxge_setup_mutexes_exit: 1494 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1495 "<== nxge_setup_mutexes status = %x", status)); 1496 1497 if (ddi_status != DDI_SUCCESS) 1498 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1499 1500 return (status); 1501 } 1502 1503 static void 1504 nxge_destroy_mutexes(p_nxge_t nxgep) 1505 { 1506 int partition; 1507 nxge_classify_t *classify_ptr; 1508 1509 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1510 RW_DESTROY(&nxgep->filter_lock); 1511 MUTEX_DESTROY(&nxgep->group_lock); 1512 MUTEX_DESTROY(&nxgep->mif_lock); 1513 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1514 MUTEX_DESTROY(nxgep->genlock); 1515 1516 classify_ptr = &nxgep->classifier; 1517 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1518 1519 /* Destroy all polling resources. */ 1520 MUTEX_DESTROY(&nxgep->poll_lock); 1521 cv_destroy(&nxgep->poll_cv); 1522 1523 /* free data structures, based on HW type */ 1524 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1525 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1526 for (partition = 0; partition < MAX_PARTITION; partition++) { 1527 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1528 } 1529 } 1530 1531 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1532 } 1533 1534 nxge_status_t 1535 nxge_init(p_nxge_t nxgep) 1536 { 1537 nxge_status_t status = NXGE_OK; 1538 1539 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1540 1541 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1542 return (status); 1543 } 1544 1545 /* 1546 * Allocate system memory for the receive/transmit buffer blocks 1547 * and receive/transmit descriptor rings. 1548 */ 1549 status = nxge_alloc_mem_pool(nxgep); 1550 if (status != NXGE_OK) { 1551 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1552 goto nxge_init_fail1; 1553 } 1554 1555 if (!isLDOMguest(nxgep)) { 1556 /* 1557 * Initialize and enable the TXC registers. 1558 * (Globally enable the Tx controller, 1559 * enable the port, configure the dma channel bitmap, 1560 * configure the max burst size). 1561 */ 1562 status = nxge_txc_init(nxgep); 1563 if (status != NXGE_OK) { 1564 NXGE_ERROR_MSG((nxgep, 1565 NXGE_ERR_CTL, "init txc failed\n")); 1566 goto nxge_init_fail2; 1567 } 1568 } 1569 1570 /* 1571 * Initialize and enable TXDMA channels. 1572 */ 1573 status = nxge_init_txdma_channels(nxgep); 1574 if (status != NXGE_OK) { 1575 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1576 goto nxge_init_fail3; 1577 } 1578 1579 /* 1580 * Initialize and enable RXDMA channels. 1581 */ 1582 status = nxge_init_rxdma_channels(nxgep); 1583 if (status != NXGE_OK) { 1584 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1585 goto nxge_init_fail4; 1586 } 1587 1588 /* 1589 * The guest domain is now done. 1590 */ 1591 if (isLDOMguest(nxgep)) { 1592 nxgep->drv_state |= STATE_HW_INITIALIZED; 1593 goto nxge_init_exit; 1594 } 1595 1596 /* 1597 * Initialize TCAM and FCRAM (Neptune). 1598 */ 1599 status = nxge_classify_init(nxgep); 1600 if (status != NXGE_OK) { 1601 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1602 goto nxge_init_fail5; 1603 } 1604 1605 /* 1606 * Initialize ZCP 1607 */ 1608 status = nxge_zcp_init(nxgep); 1609 if (status != NXGE_OK) { 1610 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1611 goto nxge_init_fail5; 1612 } 1613 1614 /* 1615 * Initialize IPP. 1616 */ 1617 status = nxge_ipp_init(nxgep); 1618 if (status != NXGE_OK) { 1619 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1620 goto nxge_init_fail5; 1621 } 1622 1623 /* 1624 * Initialize the MAC block. 1625 */ 1626 status = nxge_mac_init(nxgep); 1627 if (status != NXGE_OK) { 1628 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1629 goto nxge_init_fail5; 1630 } 1631 1632 nxge_intrs_enable(nxgep); /* XXX What changes do I need to make here? */ 1633 1634 /* 1635 * Enable hardware interrupts. 1636 */ 1637 nxge_intr_hw_enable(nxgep); 1638 nxgep->drv_state |= STATE_HW_INITIALIZED; 1639 1640 goto nxge_init_exit; 1641 1642 nxge_init_fail5: 1643 nxge_uninit_rxdma_channels(nxgep); 1644 nxge_init_fail4: 1645 nxge_uninit_txdma_channels(nxgep); 1646 nxge_init_fail3: 1647 if (!isLDOMguest(nxgep)) { 1648 (void) nxge_txc_uninit(nxgep); 1649 } 1650 nxge_init_fail2: 1651 nxge_free_mem_pool(nxgep); 1652 nxge_init_fail1: 1653 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1654 "<== nxge_init status (failed) = 0x%08x", status)); 1655 return (status); 1656 1657 nxge_init_exit: 1658 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1659 status)); 1660 return (status); 1661 } 1662 1663 1664 timeout_id_t 1665 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1666 { 1667 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1668 return (timeout(func, (caddr_t)nxgep, 1669 drv_usectohz(1000 * msec))); 1670 } 1671 return (NULL); 1672 } 1673 1674 /*ARGSUSED*/ 1675 void 1676 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1677 { 1678 if (timerid) { 1679 (void) untimeout(timerid); 1680 } 1681 } 1682 1683 void 1684 nxge_uninit(p_nxge_t nxgep) 1685 { 1686 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1687 1688 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1689 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1690 "==> nxge_uninit: not initialized")); 1691 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1692 "<== nxge_uninit")); 1693 return; 1694 } 1695 1696 /* stop timer */ 1697 if (nxgep->nxge_timerid) { 1698 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1699 nxgep->nxge_timerid = 0; 1700 } 1701 1702 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1703 (void) nxge_intr_hw_disable(nxgep); 1704 1705 /* 1706 * Reset the receive MAC side. 1707 */ 1708 (void) nxge_rx_mac_disable(nxgep); 1709 1710 /* Disable and soft reset the IPP */ 1711 if (!isLDOMguest(nxgep)) 1712 (void) nxge_ipp_disable(nxgep); 1713 1714 /* Free classification resources */ 1715 (void) nxge_classify_uninit(nxgep); 1716 1717 /* 1718 * Reset the transmit/receive DMA side. 1719 */ 1720 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1721 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1722 1723 nxge_uninit_txdma_channels(nxgep); 1724 nxge_uninit_rxdma_channels(nxgep); 1725 1726 /* 1727 * Reset the transmit MAC side. 1728 */ 1729 (void) nxge_tx_mac_disable(nxgep); 1730 1731 nxge_free_mem_pool(nxgep); 1732 1733 /* 1734 * Start the timer if the reset flag is not set. 1735 * If this reset flag is set, the link monitor 1736 * will not be started in order to stop furthur bus 1737 * activities coming from this interface. 1738 * The driver will start the monitor function 1739 * if the interface was initialized again later. 1740 */ 1741 if (!nxge_peu_reset_enable) { 1742 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1743 } 1744 1745 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1746 1747 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1748 "nxge_mblks_pending %d", nxge_mblks_pending)); 1749 } 1750 1751 void 1752 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1753 { 1754 uint64_t reg; 1755 uint64_t regdata; 1756 int i, retry; 1757 1758 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1759 regdata = 0; 1760 retry = 1; 1761 1762 for (i = 0; i < retry; i++) { 1763 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1764 } 1765 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1766 } 1767 1768 void 1769 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1770 { 1771 uint64_t reg; 1772 uint64_t buf[2]; 1773 1774 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1775 reg = buf[0]; 1776 1777 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1778 } 1779 1780 1781 nxge_os_mutex_t nxgedebuglock; 1782 int nxge_debug_init = 0; 1783 1784 /*ARGSUSED*/ 1785 /*VARARGS*/ 1786 void 1787 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1788 { 1789 char msg_buffer[1048]; 1790 char prefix_buffer[32]; 1791 int instance; 1792 uint64_t debug_level; 1793 int cmn_level = CE_CONT; 1794 va_list ap; 1795 1796 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1797 /* In case a developer has changed nxge_debug_level. */ 1798 if (nxgep->nxge_debug_level != nxge_debug_level) 1799 nxgep->nxge_debug_level = nxge_debug_level; 1800 } 1801 1802 debug_level = (nxgep == NULL) ? nxge_debug_level : 1803 nxgep->nxge_debug_level; 1804 1805 if ((level & debug_level) || 1806 (level == NXGE_NOTE) || 1807 (level == NXGE_ERR_CTL)) { 1808 /* do the msg processing */ 1809 if (nxge_debug_init == 0) { 1810 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1811 nxge_debug_init = 1; 1812 } 1813 1814 MUTEX_ENTER(&nxgedebuglock); 1815 1816 if ((level & NXGE_NOTE)) { 1817 cmn_level = CE_NOTE; 1818 } 1819 1820 if (level & NXGE_ERR_CTL) { 1821 cmn_level = CE_WARN; 1822 } 1823 1824 va_start(ap, fmt); 1825 (void) vsprintf(msg_buffer, fmt, ap); 1826 va_end(ap); 1827 if (nxgep == NULL) { 1828 instance = -1; 1829 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1830 } else { 1831 instance = nxgep->instance; 1832 (void) sprintf(prefix_buffer, 1833 "%s%d :", "nxge", instance); 1834 } 1835 1836 MUTEX_EXIT(&nxgedebuglock); 1837 cmn_err(cmn_level, "!%s %s\n", 1838 prefix_buffer, msg_buffer); 1839 1840 } 1841 } 1842 1843 char * 1844 nxge_dump_packet(char *addr, int size) 1845 { 1846 uchar_t *ap = (uchar_t *)addr; 1847 int i; 1848 static char etherbuf[1024]; 1849 char *cp = etherbuf; 1850 char digits[] = "0123456789abcdef"; 1851 1852 if (!size) 1853 size = 60; 1854 1855 if (size > MAX_DUMP_SZ) { 1856 /* Dump the leading bytes */ 1857 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1858 if (*ap > 0x0f) 1859 *cp++ = digits[*ap >> 4]; 1860 *cp++ = digits[*ap++ & 0xf]; 1861 *cp++ = ':'; 1862 } 1863 for (i = 0; i < 20; i++) 1864 *cp++ = '.'; 1865 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1866 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1867 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1868 if (*ap > 0x0f) 1869 *cp++ = digits[*ap >> 4]; 1870 *cp++ = digits[*ap++ & 0xf]; 1871 *cp++ = ':'; 1872 } 1873 } else { 1874 for (i = 0; i < size; i++) { 1875 if (*ap > 0x0f) 1876 *cp++ = digits[*ap >> 4]; 1877 *cp++ = digits[*ap++ & 0xf]; 1878 *cp++ = ':'; 1879 } 1880 } 1881 *--cp = 0; 1882 return (etherbuf); 1883 } 1884 1885 #ifdef NXGE_DEBUG 1886 static void 1887 nxge_test_map_regs(p_nxge_t nxgep) 1888 { 1889 ddi_acc_handle_t cfg_handle; 1890 p_pci_cfg_t cfg_ptr; 1891 ddi_acc_handle_t dev_handle; 1892 char *dev_ptr; 1893 ddi_acc_handle_t pci_config_handle; 1894 uint32_t regval; 1895 int i; 1896 1897 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1898 1899 dev_handle = nxgep->dev_regs->nxge_regh; 1900 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1901 1902 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1903 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1904 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1905 1906 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1907 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1908 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1909 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1910 &cfg_ptr->vendorid)); 1911 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1912 "\tvendorid 0x%x devid 0x%x", 1913 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1914 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1915 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1916 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1917 "bar1c 0x%x", 1918 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1919 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1920 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1921 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1922 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1923 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1924 "base 28 0x%x bar2c 0x%x\n", 1925 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1926 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1927 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1928 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1929 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1930 "\nNeptune PCI BAR: base30 0x%x\n", 1931 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1932 1933 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1934 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1935 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1936 "first 0x%llx second 0x%llx third 0x%llx " 1937 "last 0x%llx ", 1938 NXGE_PIO_READ64(dev_handle, 1939 (uint64_t *)(dev_ptr + 0), 0), 1940 NXGE_PIO_READ64(dev_handle, 1941 (uint64_t *)(dev_ptr + 8), 0), 1942 NXGE_PIO_READ64(dev_handle, 1943 (uint64_t *)(dev_ptr + 16), 0), 1944 NXGE_PIO_READ64(cfg_handle, 1945 (uint64_t *)(dev_ptr + 24), 0))); 1946 } 1947 } 1948 1949 #endif 1950 1951 static void 1952 nxge_suspend(p_nxge_t nxgep) 1953 { 1954 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1955 1956 nxge_intrs_disable(nxgep); 1957 nxge_destroy_dev(nxgep); 1958 1959 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1960 } 1961 1962 static nxge_status_t 1963 nxge_resume(p_nxge_t nxgep) 1964 { 1965 nxge_status_t status = NXGE_OK; 1966 1967 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1968 1969 nxgep->suspended = DDI_RESUME; 1970 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1971 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1972 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1973 (void) nxge_rx_mac_enable(nxgep); 1974 (void) nxge_tx_mac_enable(nxgep); 1975 nxge_intrs_enable(nxgep); 1976 nxgep->suspended = 0; 1977 1978 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1979 "<== nxge_resume status = 0x%x", status)); 1980 return (status); 1981 } 1982 1983 static nxge_status_t 1984 nxge_setup_dev(p_nxge_t nxgep) 1985 { 1986 nxge_status_t status = NXGE_OK; 1987 1988 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1989 nxgep->mac.portnum)); 1990 1991 status = nxge_link_init(nxgep); 1992 1993 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1994 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1995 "port%d Bad register acc handle", nxgep->mac.portnum)); 1996 status = NXGE_ERROR; 1997 } 1998 1999 if (status != NXGE_OK) { 2000 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2001 " nxge_setup_dev status " 2002 "(xcvr init 0x%08x)", status)); 2003 goto nxge_setup_dev_exit; 2004 } 2005 2006 nxge_setup_dev_exit: 2007 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2008 "<== nxge_setup_dev port %d status = 0x%08x", 2009 nxgep->mac.portnum, status)); 2010 2011 return (status); 2012 } 2013 2014 static void 2015 nxge_destroy_dev(p_nxge_t nxgep) 2016 { 2017 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 2018 2019 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 2020 2021 (void) nxge_hw_stop(nxgep); 2022 2023 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 2024 } 2025 2026 static nxge_status_t 2027 nxge_setup_system_dma_pages(p_nxge_t nxgep) 2028 { 2029 int ddi_status = DDI_SUCCESS; 2030 uint_t count; 2031 ddi_dma_cookie_t cookie; 2032 uint_t iommu_pagesize; 2033 nxge_status_t status = NXGE_OK; 2034 2035 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 2036 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 2037 if (nxgep->niu_type != N2_NIU) { 2038 iommu_pagesize = dvma_pagesize(nxgep->dip); 2039 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2040 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2041 " default_block_size %d iommu_pagesize %d", 2042 nxgep->sys_page_sz, 2043 ddi_ptob(nxgep->dip, (ulong_t)1), 2044 nxgep->rx_default_block_size, 2045 iommu_pagesize)); 2046 2047 if (iommu_pagesize != 0) { 2048 if (nxgep->sys_page_sz == iommu_pagesize) { 2049 if (iommu_pagesize > 0x4000) 2050 nxgep->sys_page_sz = 0x4000; 2051 } else { 2052 if (nxgep->sys_page_sz > iommu_pagesize) 2053 nxgep->sys_page_sz = iommu_pagesize; 2054 } 2055 } 2056 } 2057 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2058 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2059 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2060 "default_block_size %d page mask %d", 2061 nxgep->sys_page_sz, 2062 ddi_ptob(nxgep->dip, (ulong_t)1), 2063 nxgep->rx_default_block_size, 2064 nxgep->sys_page_mask)); 2065 2066 2067 switch (nxgep->sys_page_sz) { 2068 default: 2069 nxgep->sys_page_sz = 0x1000; 2070 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2071 nxgep->rx_default_block_size = 0x1000; 2072 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2073 break; 2074 case 0x1000: 2075 nxgep->rx_default_block_size = 0x1000; 2076 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2077 break; 2078 case 0x2000: 2079 nxgep->rx_default_block_size = 0x2000; 2080 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2081 break; 2082 case 0x4000: 2083 nxgep->rx_default_block_size = 0x4000; 2084 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2085 break; 2086 case 0x8000: 2087 nxgep->rx_default_block_size = 0x8000; 2088 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2089 break; 2090 } 2091 2092 #ifndef USE_RX_BIG_BUF 2093 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2094 #else 2095 nxgep->rx_default_block_size = 0x2000; 2096 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2097 #endif 2098 /* 2099 * Get the system DMA burst size. 2100 */ 2101 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2102 DDI_DMA_DONTWAIT, 0, 2103 &nxgep->dmasparehandle); 2104 if (ddi_status != DDI_SUCCESS) { 2105 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2106 "ddi_dma_alloc_handle: failed " 2107 " status 0x%x", ddi_status)); 2108 goto nxge_get_soft_properties_exit; 2109 } 2110 2111 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2112 (caddr_t)nxgep->dmasparehandle, 2113 sizeof (nxgep->dmasparehandle), 2114 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2115 DDI_DMA_DONTWAIT, 0, 2116 &cookie, &count); 2117 if (ddi_status != DDI_DMA_MAPPED) { 2118 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2119 "Binding spare handle to find system" 2120 " burstsize failed.")); 2121 ddi_status = DDI_FAILURE; 2122 goto nxge_get_soft_properties_fail1; 2123 } 2124 2125 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2126 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2127 2128 nxge_get_soft_properties_fail1: 2129 ddi_dma_free_handle(&nxgep->dmasparehandle); 2130 2131 nxge_get_soft_properties_exit: 2132 2133 if (ddi_status != DDI_SUCCESS) 2134 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2135 2136 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2137 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2138 return (status); 2139 } 2140 2141 static nxge_status_t 2142 nxge_alloc_mem_pool(p_nxge_t nxgep) 2143 { 2144 nxge_status_t status = NXGE_OK; 2145 2146 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2147 2148 status = nxge_alloc_rx_mem_pool(nxgep); 2149 if (status != NXGE_OK) { 2150 return (NXGE_ERROR); 2151 } 2152 2153 status = nxge_alloc_tx_mem_pool(nxgep); 2154 if (status != NXGE_OK) { 2155 nxge_free_rx_mem_pool(nxgep); 2156 return (NXGE_ERROR); 2157 } 2158 2159 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2160 return (NXGE_OK); 2161 } 2162 2163 static void 2164 nxge_free_mem_pool(p_nxge_t nxgep) 2165 { 2166 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2167 2168 nxge_free_rx_mem_pool(nxgep); 2169 nxge_free_tx_mem_pool(nxgep); 2170 2171 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2172 } 2173 2174 nxge_status_t 2175 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2176 { 2177 uint32_t rdc_max; 2178 p_nxge_dma_pt_cfg_t p_all_cfgp; 2179 p_nxge_hw_pt_cfg_t p_cfgp; 2180 p_nxge_dma_pool_t dma_poolp; 2181 p_nxge_dma_common_t *dma_buf_p; 2182 p_nxge_dma_pool_t dma_cntl_poolp; 2183 p_nxge_dma_common_t *dma_cntl_p; 2184 uint32_t *num_chunks; /* per dma */ 2185 nxge_status_t status = NXGE_OK; 2186 2187 uint32_t nxge_port_rbr_size; 2188 uint32_t nxge_port_rbr_spare_size; 2189 uint32_t nxge_port_rcr_size; 2190 uint32_t rx_cntl_alloc_size; 2191 2192 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2193 2194 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2195 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2196 rdc_max = NXGE_MAX_RDCS; 2197 2198 /* 2199 * Allocate memory for the common DMA data structures. 2200 */ 2201 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2202 KM_SLEEP); 2203 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2204 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2205 2206 dma_cntl_poolp = (p_nxge_dma_pool_t) 2207 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2208 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2209 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2210 2211 num_chunks = (uint32_t *)KMEM_ZALLOC( 2212 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2213 2214 /* 2215 * Assume that each DMA channel will be configured with 2216 * the default block size. 2217 * rbr block counts are modulo the batch count (16). 2218 */ 2219 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2220 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2221 2222 if (!nxge_port_rbr_size) { 2223 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2224 } 2225 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2226 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2227 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2228 } 2229 2230 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2231 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2232 2233 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2234 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2235 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2236 } 2237 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2238 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2239 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2240 "set to default %d", 2241 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2242 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2243 } 2244 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2245 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2246 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2247 "set to default %d", 2248 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2249 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2250 } 2251 2252 /* 2253 * N2/NIU has limitation on the descriptor sizes (contiguous 2254 * memory allocation on data buffers to 4M (contig_mem_alloc) 2255 * and little endian for control buffers (must use the ddi/dki mem alloc 2256 * function). 2257 */ 2258 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2259 if (nxgep->niu_type == N2_NIU) { 2260 nxge_port_rbr_spare_size = 0; 2261 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2262 (!ISP2(nxge_port_rbr_size))) { 2263 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2264 } 2265 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2266 (!ISP2(nxge_port_rcr_size))) { 2267 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2268 } 2269 } 2270 #endif 2271 2272 /* 2273 * Addresses of receive block ring, receive completion ring and the 2274 * mailbox must be all cache-aligned (64 bytes). 2275 */ 2276 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2277 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2278 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2279 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2280 2281 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2282 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2283 "nxge_port_rcr_size = %d " 2284 "rx_cntl_alloc_size = %d", 2285 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2286 nxge_port_rcr_size, 2287 rx_cntl_alloc_size)); 2288 2289 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2290 if (nxgep->niu_type == N2_NIU) { 2291 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2292 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2293 2294 if (!ISP2(rx_buf_alloc_size)) { 2295 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2296 "==> nxge_alloc_rx_mem_pool: " 2297 " must be power of 2")); 2298 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2299 goto nxge_alloc_rx_mem_pool_exit; 2300 } 2301 2302 if (rx_buf_alloc_size > (1 << 22)) { 2303 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2304 "==> nxge_alloc_rx_mem_pool: " 2305 " limit size to 4M")); 2306 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2307 goto nxge_alloc_rx_mem_pool_exit; 2308 } 2309 2310 if (rx_cntl_alloc_size < 0x2000) { 2311 rx_cntl_alloc_size = 0x2000; 2312 } 2313 } 2314 #endif 2315 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2316 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2317 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2318 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2319 2320 dma_poolp->ndmas = p_cfgp->max_rdcs; 2321 dma_poolp->num_chunks = num_chunks; 2322 dma_poolp->buf_allocated = B_TRUE; 2323 nxgep->rx_buf_pool_p = dma_poolp; 2324 dma_poolp->dma_buf_pool_p = dma_buf_p; 2325 2326 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2327 dma_cntl_poolp->buf_allocated = B_TRUE; 2328 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2329 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2330 2331 /* Allocate the receive rings, too. */ 2332 nxgep->rx_rbr_rings = 2333 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2334 nxgep->rx_rbr_rings->rbr_rings = 2335 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2336 nxgep->rx_rcr_rings = 2337 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2338 nxgep->rx_rcr_rings->rcr_rings = 2339 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2340 nxgep->rx_mbox_areas_p = 2341 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2342 nxgep->rx_mbox_areas_p->rxmbox_areas = 2343 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2344 2345 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2346 p_cfgp->max_rdcs; 2347 2348 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2349 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2350 2351 nxge_alloc_rx_mem_pool_exit: 2352 return (status); 2353 } 2354 2355 /* 2356 * nxge_alloc_rxb 2357 * 2358 * Allocate buffers for an RDC. 2359 * 2360 * Arguments: 2361 * nxgep 2362 * channel The channel to map into our kernel space. 2363 * 2364 * Notes: 2365 * 2366 * NPI function calls: 2367 * 2368 * NXGE function calls: 2369 * 2370 * Registers accessed: 2371 * 2372 * Context: 2373 * 2374 * Taking apart: 2375 * 2376 * Open questions: 2377 * 2378 */ 2379 nxge_status_t 2380 nxge_alloc_rxb( 2381 p_nxge_t nxgep, 2382 int channel) 2383 { 2384 size_t rx_buf_alloc_size; 2385 nxge_status_t status = NXGE_OK; 2386 2387 nxge_dma_common_t **data; 2388 nxge_dma_common_t **control; 2389 uint32_t *num_chunks; 2390 2391 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2392 2393 /* 2394 * Allocate memory for the receive buffers and descriptor rings. 2395 * Replace these allocation functions with the interface functions 2396 * provided by the partition manager if/when they are available. 2397 */ 2398 2399 /* 2400 * Allocate memory for the receive buffer blocks. 2401 */ 2402 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2403 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2404 2405 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2406 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2407 2408 if ((status = nxge_alloc_rx_buf_dma( 2409 nxgep, channel, data, rx_buf_alloc_size, 2410 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2411 return (status); 2412 } 2413 2414 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2415 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2416 2417 /* 2418 * Allocate memory for descriptor rings and mailbox. 2419 */ 2420 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2421 2422 if ((status = nxge_alloc_rx_cntl_dma( 2423 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2424 != NXGE_OK) { 2425 nxge_free_rx_cntl_dma(nxgep, *control); 2426 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2427 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2428 return (status); 2429 } 2430 2431 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2432 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2433 2434 return (status); 2435 } 2436 2437 void 2438 nxge_free_rxb( 2439 p_nxge_t nxgep, 2440 int channel) 2441 { 2442 nxge_dma_common_t *data; 2443 nxge_dma_common_t *control; 2444 uint32_t num_chunks; 2445 2446 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2447 2448 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2449 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2450 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2451 2452 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2453 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2454 2455 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2456 nxge_free_rx_cntl_dma(nxgep, control); 2457 2458 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2459 2460 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2461 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2462 2463 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2464 } 2465 2466 static void 2467 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2468 { 2469 int rdc_max = NXGE_MAX_RDCS; 2470 2471 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2472 2473 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2474 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2475 "<== nxge_free_rx_mem_pool " 2476 "(null rx buf pool or buf not allocated")); 2477 return; 2478 } 2479 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2480 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2481 "<== nxge_free_rx_mem_pool " 2482 "(null rx cntl buf pool or cntl buf not allocated")); 2483 return; 2484 } 2485 2486 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2487 sizeof (p_nxge_dma_common_t) * rdc_max); 2488 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2489 2490 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2491 sizeof (uint32_t) * rdc_max); 2492 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2493 sizeof (p_nxge_dma_common_t) * rdc_max); 2494 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2495 2496 nxgep->rx_buf_pool_p = 0; 2497 nxgep->rx_cntl_pool_p = 0; 2498 2499 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2500 sizeof (p_rx_rbr_ring_t) * rdc_max); 2501 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2502 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2503 sizeof (p_rx_rcr_ring_t) * rdc_max); 2504 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2505 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2506 sizeof (p_rx_mbox_t) * rdc_max); 2507 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2508 2509 nxgep->rx_rbr_rings = 0; 2510 nxgep->rx_rcr_rings = 0; 2511 nxgep->rx_mbox_areas_p = 0; 2512 2513 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2514 } 2515 2516 2517 static nxge_status_t 2518 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2519 p_nxge_dma_common_t *dmap, 2520 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2521 { 2522 p_nxge_dma_common_t rx_dmap; 2523 nxge_status_t status = NXGE_OK; 2524 size_t total_alloc_size; 2525 size_t allocated = 0; 2526 int i, size_index, array_size; 2527 boolean_t use_kmem_alloc = B_FALSE; 2528 2529 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2530 2531 rx_dmap = (p_nxge_dma_common_t) 2532 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2533 KM_SLEEP); 2534 2535 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2536 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2537 dma_channel, alloc_size, block_size, dmap)); 2538 2539 total_alloc_size = alloc_size; 2540 2541 #if defined(RX_USE_RECLAIM_POST) 2542 total_alloc_size = alloc_size + alloc_size/4; 2543 #endif 2544 2545 i = 0; 2546 size_index = 0; 2547 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2548 while ((size_index < array_size) && 2549 (alloc_sizes[size_index] < alloc_size)) 2550 size_index++; 2551 if (size_index >= array_size) { 2552 size_index = array_size - 1; 2553 } 2554 2555 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2556 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2557 use_kmem_alloc = B_TRUE; 2558 #if defined(__i386) || defined(__amd64) 2559 size_index = 0; 2560 #endif 2561 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2562 "==> nxge_alloc_rx_buf_dma: " 2563 "Neptune use kmem_alloc() - size_index %d", 2564 size_index)); 2565 } 2566 2567 while ((allocated < total_alloc_size) && 2568 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2569 rx_dmap[i].dma_chunk_index = i; 2570 rx_dmap[i].block_size = block_size; 2571 rx_dmap[i].alength = alloc_sizes[size_index]; 2572 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2573 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2574 rx_dmap[i].dma_channel = dma_channel; 2575 rx_dmap[i].contig_alloc_type = B_FALSE; 2576 rx_dmap[i].kmem_alloc_type = B_FALSE; 2577 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2578 2579 /* 2580 * N2/NIU: data buffers must be contiguous as the driver 2581 * needs to call Hypervisor api to set up 2582 * logical pages. 2583 */ 2584 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2585 rx_dmap[i].contig_alloc_type = B_TRUE; 2586 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2587 } else if (use_kmem_alloc) { 2588 /* For Neptune, use kmem_alloc */ 2589 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2590 "==> nxge_alloc_rx_buf_dma: " 2591 "Neptune use kmem_alloc()")); 2592 rx_dmap[i].kmem_alloc_type = B_TRUE; 2593 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2594 } 2595 2596 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2597 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2598 "i %d nblocks %d alength %d", 2599 dma_channel, i, &rx_dmap[i], block_size, 2600 i, rx_dmap[i].nblocks, 2601 rx_dmap[i].alength)); 2602 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2603 &nxge_rx_dma_attr, 2604 rx_dmap[i].alength, 2605 &nxge_dev_buf_dma_acc_attr, 2606 DDI_DMA_READ | DDI_DMA_STREAMING, 2607 (p_nxge_dma_common_t)(&rx_dmap[i])); 2608 if (status != NXGE_OK) { 2609 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2610 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2611 "dma %d size_index %d size requested %d", 2612 dma_channel, 2613 size_index, 2614 rx_dmap[i].alength)); 2615 size_index--; 2616 } else { 2617 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2618 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2619 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2620 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2621 "buf_alloc_state %d alloc_type %d", 2622 dma_channel, 2623 &rx_dmap[i], 2624 rx_dmap[i].kaddrp, 2625 rx_dmap[i].alength, 2626 rx_dmap[i].buf_alloc_state, 2627 rx_dmap[i].buf_alloc_type)); 2628 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2629 " alloc_rx_buf_dma allocated rdc %d " 2630 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2631 dma_channel, i, rx_dmap[i].alength, 2632 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2633 rx_dmap[i].kaddrp)); 2634 i++; 2635 allocated += alloc_sizes[size_index]; 2636 } 2637 } 2638 2639 if (allocated < total_alloc_size) { 2640 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2641 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2642 "allocated 0x%x requested 0x%x", 2643 dma_channel, 2644 allocated, total_alloc_size)); 2645 status = NXGE_ERROR; 2646 goto nxge_alloc_rx_mem_fail1; 2647 } 2648 2649 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2650 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2651 "allocated 0x%x requested 0x%x", 2652 dma_channel, 2653 allocated, total_alloc_size)); 2654 2655 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2656 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2657 dma_channel, i)); 2658 *num_chunks = i; 2659 *dmap = rx_dmap; 2660 2661 goto nxge_alloc_rx_mem_exit; 2662 2663 nxge_alloc_rx_mem_fail1: 2664 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2665 2666 nxge_alloc_rx_mem_exit: 2667 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2668 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2669 2670 return (status); 2671 } 2672 2673 /*ARGSUSED*/ 2674 static void 2675 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2676 uint32_t num_chunks) 2677 { 2678 int i; 2679 2680 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2681 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2682 2683 if (dmap == 0) 2684 return; 2685 2686 for (i = 0; i < num_chunks; i++) { 2687 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2688 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2689 i, dmap)); 2690 nxge_dma_free_rx_data_buf(dmap++); 2691 } 2692 2693 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2694 } 2695 2696 /*ARGSUSED*/ 2697 static nxge_status_t 2698 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2699 p_nxge_dma_common_t *dmap, size_t size) 2700 { 2701 p_nxge_dma_common_t rx_dmap; 2702 nxge_status_t status = NXGE_OK; 2703 2704 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2705 2706 rx_dmap = (p_nxge_dma_common_t) 2707 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2708 2709 rx_dmap->contig_alloc_type = B_FALSE; 2710 rx_dmap->kmem_alloc_type = B_FALSE; 2711 2712 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2713 &nxge_desc_dma_attr, 2714 size, 2715 &nxge_dev_desc_dma_acc_attr, 2716 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2717 rx_dmap); 2718 if (status != NXGE_OK) { 2719 goto nxge_alloc_rx_cntl_dma_fail1; 2720 } 2721 2722 *dmap = rx_dmap; 2723 goto nxge_alloc_rx_cntl_dma_exit; 2724 2725 nxge_alloc_rx_cntl_dma_fail1: 2726 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2727 2728 nxge_alloc_rx_cntl_dma_exit: 2729 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2730 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2731 2732 return (status); 2733 } 2734 2735 /*ARGSUSED*/ 2736 static void 2737 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2738 { 2739 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2740 2741 if (dmap == 0) 2742 return; 2743 2744 nxge_dma_mem_free(dmap); 2745 2746 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2747 } 2748 2749 typedef struct { 2750 size_t tx_size; 2751 size_t cr_size; 2752 size_t threshhold; 2753 } nxge_tdc_sizes_t; 2754 2755 static 2756 nxge_status_t 2757 nxge_tdc_sizes( 2758 nxge_t *nxgep, 2759 nxge_tdc_sizes_t *sizes) 2760 { 2761 uint32_t threshhold; /* The bcopy() threshhold */ 2762 size_t tx_size; /* Transmit buffer size */ 2763 size_t cr_size; /* Completion ring size */ 2764 2765 /* 2766 * Assume that each DMA channel will be configured with the 2767 * default transmit buffer size for copying transmit data. 2768 * (If a packet is bigger than this, it will not be copied.) 2769 */ 2770 if (nxgep->niu_type == N2_NIU) { 2771 threshhold = TX_BCOPY_SIZE; 2772 } else { 2773 threshhold = nxge_bcopy_thresh; 2774 } 2775 tx_size = nxge_tx_ring_size * threshhold; 2776 2777 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2778 cr_size += sizeof (txdma_mailbox_t); 2779 2780 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2781 if (nxgep->niu_type == N2_NIU) { 2782 if (!ISP2(tx_size)) { 2783 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2784 "==> nxge_tdc_sizes: Tx size" 2785 " must be power of 2")); 2786 return (NXGE_ERROR); 2787 } 2788 2789 if (tx_size > (1 << 22)) { 2790 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2791 "==> nxge_tdc_sizes: Tx size" 2792 " limited to 4M")); 2793 return (NXGE_ERROR); 2794 } 2795 2796 if (cr_size < 0x2000) 2797 cr_size = 0x2000; 2798 } 2799 #endif 2800 2801 sizes->threshhold = threshhold; 2802 sizes->tx_size = tx_size; 2803 sizes->cr_size = cr_size; 2804 2805 return (NXGE_OK); 2806 } 2807 /* 2808 * nxge_alloc_txb 2809 * 2810 * Allocate buffers for an TDC. 2811 * 2812 * Arguments: 2813 * nxgep 2814 * channel The channel to map into our kernel space. 2815 * 2816 * Notes: 2817 * 2818 * NPI function calls: 2819 * 2820 * NXGE function calls: 2821 * 2822 * Registers accessed: 2823 * 2824 * Context: 2825 * 2826 * Taking apart: 2827 * 2828 * Open questions: 2829 * 2830 */ 2831 nxge_status_t 2832 nxge_alloc_txb( 2833 p_nxge_t nxgep, 2834 int channel) 2835 { 2836 nxge_dma_common_t **dma_buf_p; 2837 nxge_dma_common_t **dma_cntl_p; 2838 uint32_t *num_chunks; 2839 nxge_status_t status = NXGE_OK; 2840 2841 nxge_tdc_sizes_t sizes; 2842 2843 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2844 2845 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2846 return (NXGE_ERROR); 2847 2848 /* 2849 * Allocate memory for transmit buffers and descriptor rings. 2850 * Replace these allocation functions with the interface functions 2851 * provided by the partition manager Real Soon Now. 2852 */ 2853 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2854 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2855 2856 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2857 2858 /* 2859 * Allocate memory for transmit buffers and descriptor rings. 2860 * Replace allocation functions with interface functions provided 2861 * by the partition manager when it is available. 2862 * 2863 * Allocate memory for the transmit buffer pool. 2864 */ 2865 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2866 "sizes: tx: %ld, cr:%ld, th:%ld", 2867 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2868 2869 *num_chunks = 0; 2870 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2871 sizes.tx_size, sizes.threshhold, num_chunks); 2872 if (status != NXGE_OK) { 2873 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2874 return (status); 2875 } 2876 2877 /* 2878 * Allocate memory for descriptor rings and mailbox. 2879 */ 2880 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2881 sizes.cr_size); 2882 if (status != NXGE_OK) { 2883 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2884 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2885 return (status); 2886 } 2887 2888 return (NXGE_OK); 2889 } 2890 2891 void 2892 nxge_free_txb( 2893 p_nxge_t nxgep, 2894 int channel) 2895 { 2896 nxge_dma_common_t *data; 2897 nxge_dma_common_t *control; 2898 uint32_t num_chunks; 2899 2900 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2901 2902 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2903 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2904 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2905 2906 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2907 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2908 2909 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2910 nxge_free_tx_cntl_dma(nxgep, control); 2911 2912 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2913 2914 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2915 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2916 2917 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2918 } 2919 2920 /* 2921 * nxge_alloc_tx_mem_pool 2922 * 2923 * This function allocates all of the per-port TDC control data structures. 2924 * The per-channel (TDC) data structures are allocated when needed. 2925 * 2926 * Arguments: 2927 * nxgep 2928 * 2929 * Notes: 2930 * 2931 * Context: 2932 * Any domain 2933 */ 2934 nxge_status_t 2935 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2936 { 2937 nxge_hw_pt_cfg_t *p_cfgp; 2938 nxge_dma_pool_t *dma_poolp; 2939 nxge_dma_common_t **dma_buf_p; 2940 nxge_dma_pool_t *dma_cntl_poolp; 2941 nxge_dma_common_t **dma_cntl_p; 2942 uint32_t *num_chunks; /* per dma */ 2943 int tdc_max; 2944 2945 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2946 2947 p_cfgp = &nxgep->pt_config.hw_config; 2948 tdc_max = NXGE_MAX_TDCS; 2949 2950 /* 2951 * Allocate memory for each transmit DMA channel. 2952 */ 2953 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2954 KM_SLEEP); 2955 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2956 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2957 2958 dma_cntl_poolp = (p_nxge_dma_pool_t) 2959 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2960 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2961 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2962 2963 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2964 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2965 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2966 "set to default %d", 2967 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2968 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2969 } 2970 2971 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2972 /* 2973 * N2/NIU has limitation on the descriptor sizes (contiguous 2974 * memory allocation on data buffers to 4M (contig_mem_alloc) 2975 * and little endian for control buffers (must use the ddi/dki mem alloc 2976 * function). The transmit ring is limited to 8K (includes the 2977 * mailbox). 2978 */ 2979 if (nxgep->niu_type == N2_NIU) { 2980 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2981 (!ISP2(nxge_tx_ring_size))) { 2982 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2983 } 2984 } 2985 #endif 2986 2987 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2988 2989 num_chunks = (uint32_t *)KMEM_ZALLOC( 2990 sizeof (uint32_t) * tdc_max, KM_SLEEP); 2991 2992 dma_poolp->ndmas = p_cfgp->tdc.owned; 2993 dma_poolp->num_chunks = num_chunks; 2994 dma_poolp->dma_buf_pool_p = dma_buf_p; 2995 nxgep->tx_buf_pool_p = dma_poolp; 2996 2997 dma_poolp->buf_allocated = B_TRUE; 2998 2999 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 3000 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 3001 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 3002 3003 dma_cntl_poolp->buf_allocated = B_TRUE; 3004 3005 nxgep->tx_rings = 3006 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 3007 nxgep->tx_rings->rings = 3008 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 3009 nxgep->tx_mbox_areas_p = 3010 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 3011 nxgep->tx_mbox_areas_p->txmbox_areas_p = 3012 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 3013 3014 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 3015 3016 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3017 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 3018 tdc_max, dma_poolp->ndmas)); 3019 3020 return (NXGE_OK); 3021 } 3022 3023 nxge_status_t 3024 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 3025 p_nxge_dma_common_t *dmap, size_t alloc_size, 3026 size_t block_size, uint32_t *num_chunks) 3027 { 3028 p_nxge_dma_common_t tx_dmap; 3029 nxge_status_t status = NXGE_OK; 3030 size_t total_alloc_size; 3031 size_t allocated = 0; 3032 int i, size_index, array_size; 3033 3034 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 3035 3036 tx_dmap = (p_nxge_dma_common_t) 3037 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 3038 KM_SLEEP); 3039 3040 total_alloc_size = alloc_size; 3041 i = 0; 3042 size_index = 0; 3043 array_size = sizeof (alloc_sizes) / sizeof (size_t); 3044 while ((size_index < array_size) && 3045 (alloc_sizes[size_index] < alloc_size)) 3046 size_index++; 3047 if (size_index >= array_size) { 3048 size_index = array_size - 1; 3049 } 3050 3051 while ((allocated < total_alloc_size) && 3052 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 3053 3054 tx_dmap[i].dma_chunk_index = i; 3055 tx_dmap[i].block_size = block_size; 3056 tx_dmap[i].alength = alloc_sizes[size_index]; 3057 tx_dmap[i].orig_alength = tx_dmap[i].alength; 3058 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 3059 tx_dmap[i].dma_channel = dma_channel; 3060 tx_dmap[i].contig_alloc_type = B_FALSE; 3061 tx_dmap[i].kmem_alloc_type = B_FALSE; 3062 3063 /* 3064 * N2/NIU: data buffers must be contiguous as the driver 3065 * needs to call Hypervisor api to set up 3066 * logical pages. 3067 */ 3068 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 3069 tx_dmap[i].contig_alloc_type = B_TRUE; 3070 } 3071 3072 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3073 &nxge_tx_dma_attr, 3074 tx_dmap[i].alength, 3075 &nxge_dev_buf_dma_acc_attr, 3076 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3077 (p_nxge_dma_common_t)(&tx_dmap[i])); 3078 if (status != NXGE_OK) { 3079 size_index--; 3080 } else { 3081 i++; 3082 allocated += alloc_sizes[size_index]; 3083 } 3084 } 3085 3086 if (allocated < total_alloc_size) { 3087 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3088 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3089 "allocated 0x%x requested 0x%x", 3090 dma_channel, 3091 allocated, total_alloc_size)); 3092 status = NXGE_ERROR; 3093 goto nxge_alloc_tx_mem_fail1; 3094 } 3095 3096 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3097 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3098 "allocated 0x%x requested 0x%x", 3099 dma_channel, 3100 allocated, total_alloc_size)); 3101 3102 *num_chunks = i; 3103 *dmap = tx_dmap; 3104 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3105 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3106 *dmap, i)); 3107 goto nxge_alloc_tx_mem_exit; 3108 3109 nxge_alloc_tx_mem_fail1: 3110 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3111 3112 nxge_alloc_tx_mem_exit: 3113 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3114 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3115 3116 return (status); 3117 } 3118 3119 /*ARGSUSED*/ 3120 static void 3121 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3122 uint32_t num_chunks) 3123 { 3124 int i; 3125 3126 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3127 3128 if (dmap == 0) 3129 return; 3130 3131 for (i = 0; i < num_chunks; i++) { 3132 nxge_dma_mem_free(dmap++); 3133 } 3134 3135 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3136 } 3137 3138 /*ARGSUSED*/ 3139 nxge_status_t 3140 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3141 p_nxge_dma_common_t *dmap, size_t size) 3142 { 3143 p_nxge_dma_common_t tx_dmap; 3144 nxge_status_t status = NXGE_OK; 3145 3146 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3147 tx_dmap = (p_nxge_dma_common_t) 3148 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3149 3150 tx_dmap->contig_alloc_type = B_FALSE; 3151 tx_dmap->kmem_alloc_type = B_FALSE; 3152 3153 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3154 &nxge_desc_dma_attr, 3155 size, 3156 &nxge_dev_desc_dma_acc_attr, 3157 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3158 tx_dmap); 3159 if (status != NXGE_OK) { 3160 goto nxge_alloc_tx_cntl_dma_fail1; 3161 } 3162 3163 *dmap = tx_dmap; 3164 goto nxge_alloc_tx_cntl_dma_exit; 3165 3166 nxge_alloc_tx_cntl_dma_fail1: 3167 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3168 3169 nxge_alloc_tx_cntl_dma_exit: 3170 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3171 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3172 3173 return (status); 3174 } 3175 3176 /*ARGSUSED*/ 3177 static void 3178 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3179 { 3180 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3181 3182 if (dmap == 0) 3183 return; 3184 3185 nxge_dma_mem_free(dmap); 3186 3187 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3188 } 3189 3190 /* 3191 * nxge_free_tx_mem_pool 3192 * 3193 * This function frees all of the per-port TDC control data structures. 3194 * The per-channel (TDC) data structures are freed when the channel 3195 * is stopped. 3196 * 3197 * Arguments: 3198 * nxgep 3199 * 3200 * Notes: 3201 * 3202 * Context: 3203 * Any domain 3204 */ 3205 static void 3206 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3207 { 3208 int tdc_max = NXGE_MAX_TDCS; 3209 3210 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3211 3212 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3213 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3214 "<== nxge_free_tx_mem_pool " 3215 "(null tx buf pool or buf not allocated")); 3216 return; 3217 } 3218 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3219 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3220 "<== nxge_free_tx_mem_pool " 3221 "(null tx cntl buf pool or cntl buf not allocated")); 3222 return; 3223 } 3224 3225 /* 1. Free the mailboxes. */ 3226 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3227 sizeof (p_tx_mbox_t) * tdc_max); 3228 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3229 3230 nxgep->tx_mbox_areas_p = 0; 3231 3232 /* 2. Free the transmit ring arrays. */ 3233 KMEM_FREE(nxgep->tx_rings->rings, 3234 sizeof (p_tx_ring_t) * tdc_max); 3235 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3236 3237 nxgep->tx_rings = 0; 3238 3239 /* 3. Free the completion ring data structures. */ 3240 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3241 sizeof (p_nxge_dma_common_t) * tdc_max); 3242 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3243 3244 nxgep->tx_cntl_pool_p = 0; 3245 3246 /* 4. Free the data ring data structures. */ 3247 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3248 sizeof (uint32_t) * tdc_max); 3249 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3250 sizeof (p_nxge_dma_common_t) * tdc_max); 3251 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3252 3253 nxgep->tx_buf_pool_p = 0; 3254 3255 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3256 } 3257 3258 /*ARGSUSED*/ 3259 static nxge_status_t 3260 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3261 struct ddi_dma_attr *dma_attrp, 3262 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3263 p_nxge_dma_common_t dma_p) 3264 { 3265 caddr_t kaddrp; 3266 int ddi_status = DDI_SUCCESS; 3267 boolean_t contig_alloc_type; 3268 boolean_t kmem_alloc_type; 3269 3270 contig_alloc_type = dma_p->contig_alloc_type; 3271 3272 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3273 /* 3274 * contig_alloc_type for contiguous memory only allowed 3275 * for N2/NIU. 3276 */ 3277 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3278 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3279 dma_p->contig_alloc_type)); 3280 return (NXGE_ERROR | NXGE_DDI_FAILED); 3281 } 3282 3283 dma_p->dma_handle = NULL; 3284 dma_p->acc_handle = NULL; 3285 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3286 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3287 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3288 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3289 if (ddi_status != DDI_SUCCESS) { 3290 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3291 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3292 return (NXGE_ERROR | NXGE_DDI_FAILED); 3293 } 3294 3295 kmem_alloc_type = dma_p->kmem_alloc_type; 3296 3297 switch (contig_alloc_type) { 3298 case B_FALSE: 3299 switch (kmem_alloc_type) { 3300 case B_FALSE: 3301 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3302 length, 3303 acc_attr_p, 3304 xfer_flags, 3305 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3306 &dma_p->acc_handle); 3307 if (ddi_status != DDI_SUCCESS) { 3308 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3309 "nxge_dma_mem_alloc: " 3310 "ddi_dma_mem_alloc failed")); 3311 ddi_dma_free_handle(&dma_p->dma_handle); 3312 dma_p->dma_handle = NULL; 3313 return (NXGE_ERROR | NXGE_DDI_FAILED); 3314 } 3315 if (dma_p->alength < length) { 3316 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3317 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3318 "< length.")); 3319 ddi_dma_mem_free(&dma_p->acc_handle); 3320 ddi_dma_free_handle(&dma_p->dma_handle); 3321 dma_p->acc_handle = NULL; 3322 dma_p->dma_handle = NULL; 3323 return (NXGE_ERROR); 3324 } 3325 3326 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3327 NULL, 3328 kaddrp, dma_p->alength, xfer_flags, 3329 DDI_DMA_DONTWAIT, 3330 0, &dma_p->dma_cookie, &dma_p->ncookies); 3331 if (ddi_status != DDI_DMA_MAPPED) { 3332 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3333 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3334 "failed " 3335 "(staus 0x%x ncookies %d.)", ddi_status, 3336 dma_p->ncookies)); 3337 if (dma_p->acc_handle) { 3338 ddi_dma_mem_free(&dma_p->acc_handle); 3339 dma_p->acc_handle = NULL; 3340 } 3341 ddi_dma_free_handle(&dma_p->dma_handle); 3342 dma_p->dma_handle = NULL; 3343 return (NXGE_ERROR | NXGE_DDI_FAILED); 3344 } 3345 3346 if (dma_p->ncookies != 1) { 3347 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3348 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3349 "> 1 cookie" 3350 "(staus 0x%x ncookies %d.)", ddi_status, 3351 dma_p->ncookies)); 3352 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3353 if (dma_p->acc_handle) { 3354 ddi_dma_mem_free(&dma_p->acc_handle); 3355 dma_p->acc_handle = NULL; 3356 } 3357 ddi_dma_free_handle(&dma_p->dma_handle); 3358 dma_p->dma_handle = NULL; 3359 dma_p->acc_handle = NULL; 3360 return (NXGE_ERROR); 3361 } 3362 break; 3363 3364 case B_TRUE: 3365 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3366 if (kaddrp == NULL) { 3367 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3368 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3369 "kmem alloc failed")); 3370 return (NXGE_ERROR); 3371 } 3372 3373 dma_p->alength = length; 3374 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3375 NULL, kaddrp, dma_p->alength, xfer_flags, 3376 DDI_DMA_DONTWAIT, 0, 3377 &dma_p->dma_cookie, &dma_p->ncookies); 3378 if (ddi_status != DDI_DMA_MAPPED) { 3379 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3380 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3381 "(kmem_alloc) failed kaddrp $%p length %d " 3382 "(staus 0x%x (%d) ncookies %d.)", 3383 kaddrp, length, 3384 ddi_status, ddi_status, dma_p->ncookies)); 3385 KMEM_FREE(kaddrp, length); 3386 dma_p->acc_handle = NULL; 3387 ddi_dma_free_handle(&dma_p->dma_handle); 3388 dma_p->dma_handle = NULL; 3389 dma_p->kaddrp = NULL; 3390 return (NXGE_ERROR | NXGE_DDI_FAILED); 3391 } 3392 3393 if (dma_p->ncookies != 1) { 3394 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3395 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3396 "(kmem_alloc) > 1 cookie" 3397 "(staus 0x%x ncookies %d.)", ddi_status, 3398 dma_p->ncookies)); 3399 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3400 KMEM_FREE(kaddrp, length); 3401 ddi_dma_free_handle(&dma_p->dma_handle); 3402 dma_p->dma_handle = NULL; 3403 dma_p->acc_handle = NULL; 3404 dma_p->kaddrp = NULL; 3405 return (NXGE_ERROR); 3406 } 3407 3408 dma_p->kaddrp = kaddrp; 3409 3410 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3411 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3412 "kaddr $%p alength %d", 3413 dma_p, 3414 kaddrp, 3415 dma_p->alength)); 3416 break; 3417 } 3418 break; 3419 3420 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3421 case B_TRUE: 3422 kaddrp = (caddr_t)contig_mem_alloc(length); 3423 if (kaddrp == NULL) { 3424 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3425 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3426 ddi_dma_free_handle(&dma_p->dma_handle); 3427 return (NXGE_ERROR | NXGE_DDI_FAILED); 3428 } 3429 3430 dma_p->alength = length; 3431 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3432 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3433 &dma_p->dma_cookie, &dma_p->ncookies); 3434 if (ddi_status != DDI_DMA_MAPPED) { 3435 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3436 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3437 "(status 0x%x ncookies %d.)", ddi_status, 3438 dma_p->ncookies)); 3439 3440 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3441 "==> nxge_dma_mem_alloc: (not mapped)" 3442 "length %lu (0x%x) " 3443 "free contig kaddrp $%p " 3444 "va_to_pa $%p", 3445 length, length, 3446 kaddrp, 3447 va_to_pa(kaddrp))); 3448 3449 3450 contig_mem_free((void *)kaddrp, length); 3451 ddi_dma_free_handle(&dma_p->dma_handle); 3452 3453 dma_p->dma_handle = NULL; 3454 dma_p->acc_handle = NULL; 3455 dma_p->alength = NULL; 3456 dma_p->kaddrp = NULL; 3457 3458 return (NXGE_ERROR | NXGE_DDI_FAILED); 3459 } 3460 3461 if (dma_p->ncookies != 1 || 3462 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3463 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3464 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3465 "cookie or " 3466 "dmac_laddress is NULL $%p size %d " 3467 " (status 0x%x ncookies %d.)", 3468 ddi_status, 3469 dma_p->dma_cookie.dmac_laddress, 3470 dma_p->dma_cookie.dmac_size, 3471 dma_p->ncookies)); 3472 3473 contig_mem_free((void *)kaddrp, length); 3474 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3475 ddi_dma_free_handle(&dma_p->dma_handle); 3476 3477 dma_p->alength = 0; 3478 dma_p->dma_handle = NULL; 3479 dma_p->acc_handle = NULL; 3480 dma_p->kaddrp = NULL; 3481 3482 return (NXGE_ERROR | NXGE_DDI_FAILED); 3483 } 3484 break; 3485 3486 #else 3487 case B_TRUE: 3488 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3489 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3490 return (NXGE_ERROR | NXGE_DDI_FAILED); 3491 #endif 3492 } 3493 3494 dma_p->kaddrp = kaddrp; 3495 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3496 dma_p->alength - RXBUF_64B_ALIGNED; 3497 #if defined(__i386) 3498 dma_p->ioaddr_pp = 3499 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3500 #else 3501 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3502 #endif 3503 dma_p->last_ioaddr_pp = 3504 #if defined(__i386) 3505 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3506 #else 3507 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3508 #endif 3509 dma_p->alength - RXBUF_64B_ALIGNED; 3510 3511 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3512 3513 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3514 dma_p->orig_ioaddr_pp = 3515 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3516 dma_p->orig_alength = length; 3517 dma_p->orig_kaddrp = kaddrp; 3518 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3519 #endif 3520 3521 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3522 "dma buffer allocated: dma_p $%p " 3523 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3524 "dma_p->ioaddr_p $%p " 3525 "dma_p->orig_ioaddr_p $%p " 3526 "orig_vatopa $%p " 3527 "alength %d (0x%x) " 3528 "kaddrp $%p " 3529 "length %d (0x%x)", 3530 dma_p, 3531 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3532 dma_p->ioaddr_pp, 3533 dma_p->orig_ioaddr_pp, 3534 dma_p->orig_vatopa, 3535 dma_p->alength, dma_p->alength, 3536 kaddrp, 3537 length, length)); 3538 3539 return (NXGE_OK); 3540 } 3541 3542 static void 3543 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3544 { 3545 if (dma_p->dma_handle != NULL) { 3546 if (dma_p->ncookies) { 3547 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3548 dma_p->ncookies = 0; 3549 } 3550 ddi_dma_free_handle(&dma_p->dma_handle); 3551 dma_p->dma_handle = NULL; 3552 } 3553 3554 if (dma_p->acc_handle != NULL) { 3555 ddi_dma_mem_free(&dma_p->acc_handle); 3556 dma_p->acc_handle = NULL; 3557 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3558 } 3559 3560 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3561 if (dma_p->contig_alloc_type && 3562 dma_p->orig_kaddrp && dma_p->orig_alength) { 3563 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3564 "kaddrp $%p (orig_kaddrp $%p)" 3565 "mem type %d ", 3566 "orig_alength %d " 3567 "alength 0x%x (%d)", 3568 dma_p->kaddrp, 3569 dma_p->orig_kaddrp, 3570 dma_p->contig_alloc_type, 3571 dma_p->orig_alength, 3572 dma_p->alength, dma_p->alength)); 3573 3574 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3575 dma_p->orig_alength = NULL; 3576 dma_p->orig_kaddrp = NULL; 3577 dma_p->contig_alloc_type = B_FALSE; 3578 } 3579 #endif 3580 dma_p->kaddrp = NULL; 3581 dma_p->alength = NULL; 3582 } 3583 3584 static void 3585 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3586 { 3587 uint64_t kaddr; 3588 uint32_t buf_size; 3589 3590 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3591 3592 if (dma_p->dma_handle != NULL) { 3593 if (dma_p->ncookies) { 3594 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3595 dma_p->ncookies = 0; 3596 } 3597 ddi_dma_free_handle(&dma_p->dma_handle); 3598 dma_p->dma_handle = NULL; 3599 } 3600 3601 if (dma_p->acc_handle != NULL) { 3602 ddi_dma_mem_free(&dma_p->acc_handle); 3603 dma_p->acc_handle = NULL; 3604 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3605 } 3606 3607 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3608 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3609 dma_p, 3610 dma_p->buf_alloc_state)); 3611 3612 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3613 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3614 "<== nxge_dma_free_rx_data_buf: " 3615 "outstanding data buffers")); 3616 return; 3617 } 3618 3619 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3620 if (dma_p->contig_alloc_type && 3621 dma_p->orig_kaddrp && dma_p->orig_alength) { 3622 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3623 "kaddrp $%p (orig_kaddrp $%p)" 3624 "mem type %d ", 3625 "orig_alength %d " 3626 "alength 0x%x (%d)", 3627 dma_p->kaddrp, 3628 dma_p->orig_kaddrp, 3629 dma_p->contig_alloc_type, 3630 dma_p->orig_alength, 3631 dma_p->alength, dma_p->alength)); 3632 3633 kaddr = (uint64_t)dma_p->orig_kaddrp; 3634 buf_size = dma_p->orig_alength; 3635 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3636 dma_p->orig_alength = NULL; 3637 dma_p->orig_kaddrp = NULL; 3638 dma_p->contig_alloc_type = B_FALSE; 3639 dma_p->kaddrp = NULL; 3640 dma_p->alength = NULL; 3641 return; 3642 } 3643 #endif 3644 3645 if (dma_p->kmem_alloc_type) { 3646 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3647 "nxge_dma_free_rx_data_buf: free kmem " 3648 "kaddrp $%p (orig_kaddrp $%p)" 3649 "alloc type %d " 3650 "orig_alength %d " 3651 "alength 0x%x (%d)", 3652 dma_p->kaddrp, 3653 dma_p->orig_kaddrp, 3654 dma_p->kmem_alloc_type, 3655 dma_p->orig_alength, 3656 dma_p->alength, dma_p->alength)); 3657 #if defined(__i386) 3658 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3659 #else 3660 kaddr = (uint64_t)dma_p->kaddrp; 3661 #endif 3662 buf_size = dma_p->orig_alength; 3663 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3664 "nxge_dma_free_rx_data_buf: free dmap $%p " 3665 "kaddr $%p buf_size %d", 3666 dma_p, 3667 kaddr, buf_size)); 3668 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3669 dma_p->alength = 0; 3670 dma_p->orig_alength = 0; 3671 dma_p->kaddrp = NULL; 3672 dma_p->kmem_alloc_type = B_FALSE; 3673 } 3674 3675 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3676 } 3677 3678 /* 3679 * nxge_m_start() -- start transmitting and receiving. 3680 * 3681 * This function is called by the MAC layer when the first 3682 * stream is open to prepare the hardware ready for sending 3683 * and transmitting packets. 3684 */ 3685 static int 3686 nxge_m_start(void *arg) 3687 { 3688 p_nxge_t nxgep = (p_nxge_t)arg; 3689 3690 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3691 3692 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) { 3693 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 3694 } 3695 3696 MUTEX_ENTER(nxgep->genlock); 3697 if (nxge_init(nxgep) != NXGE_OK) { 3698 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3699 "<== nxge_m_start: initialization failed")); 3700 MUTEX_EXIT(nxgep->genlock); 3701 return (EIO); 3702 } 3703 3704 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 3705 goto nxge_m_start_exit; 3706 /* 3707 * Start timer to check the system error and tx hangs 3708 */ 3709 if (!isLDOMguest(nxgep)) 3710 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3711 nxge_check_hw_state, NXGE_CHECK_TIMER); 3712 #if defined(sun4v) 3713 else 3714 nxge_hio_start_timer(nxgep); 3715 #endif 3716 3717 nxgep->link_notify = B_TRUE; 3718 3719 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3720 3721 nxge_m_start_exit: 3722 MUTEX_EXIT(nxgep->genlock); 3723 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3724 return (0); 3725 } 3726 3727 3728 static boolean_t 3729 nxge_check_groups_stopped(p_nxge_t nxgep) 3730 { 3731 int i; 3732 3733 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 3734 if (nxgep->rx_hio_groups[i].started) 3735 return (B_FALSE); 3736 } 3737 3738 return (B_TRUE); 3739 } 3740 3741 /* 3742 * nxge_m_stop(): stop transmitting and receiving. 3743 */ 3744 static void 3745 nxge_m_stop(void *arg) 3746 { 3747 p_nxge_t nxgep = (p_nxge_t)arg; 3748 boolean_t groups_stopped; 3749 3750 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3751 3752 groups_stopped = nxge_check_groups_stopped(nxgep); 3753 #ifdef later 3754 ASSERT(groups_stopped == B_FALSE); 3755 #endif 3756 3757 if (!groups_stopped) { 3758 cmn_err(CE_WARN, "nxge(%d): groups are not stopped!\n", 3759 nxgep->instance); 3760 return; 3761 } 3762 3763 MUTEX_ENTER(nxgep->genlock); 3764 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3765 3766 if (nxgep->nxge_timerid) { 3767 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3768 nxgep->nxge_timerid = 0; 3769 } 3770 3771 nxge_uninit(nxgep); 3772 3773 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3774 3775 MUTEX_EXIT(nxgep->genlock); 3776 3777 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3778 } 3779 3780 static int 3781 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3782 { 3783 p_nxge_t nxgep = (p_nxge_t)arg; 3784 struct ether_addr addrp; 3785 3786 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3787 "==> nxge_m_multicst: add %d", add)); 3788 3789 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3790 if (add) { 3791 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3792 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3793 "<== nxge_m_multicst: add multicast failed")); 3794 return (EINVAL); 3795 } 3796 } else { 3797 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3798 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3799 "<== nxge_m_multicst: del multicast failed")); 3800 return (EINVAL); 3801 } 3802 } 3803 3804 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3805 3806 return (0); 3807 } 3808 3809 static int 3810 nxge_m_promisc(void *arg, boolean_t on) 3811 { 3812 p_nxge_t nxgep = (p_nxge_t)arg; 3813 3814 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3815 "==> nxge_m_promisc: on %d", on)); 3816 3817 if (nxge_set_promisc(nxgep, on)) { 3818 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3819 "<== nxge_m_promisc: set promisc failed")); 3820 return (EINVAL); 3821 } 3822 3823 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3824 "<== nxge_m_promisc: on %d", on)); 3825 3826 return (0); 3827 } 3828 3829 static void 3830 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3831 { 3832 p_nxge_t nxgep = (p_nxge_t)arg; 3833 struct iocblk *iocp; 3834 boolean_t need_privilege; 3835 int err; 3836 int cmd; 3837 3838 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3839 3840 iocp = (struct iocblk *)mp->b_rptr; 3841 iocp->ioc_error = 0; 3842 need_privilege = B_TRUE; 3843 cmd = iocp->ioc_cmd; 3844 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3845 switch (cmd) { 3846 default: 3847 miocnak(wq, mp, 0, EINVAL); 3848 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3849 return; 3850 3851 case LB_GET_INFO_SIZE: 3852 case LB_GET_INFO: 3853 case LB_GET_MODE: 3854 need_privilege = B_FALSE; 3855 break; 3856 case LB_SET_MODE: 3857 break; 3858 3859 3860 case NXGE_GET_MII: 3861 case NXGE_PUT_MII: 3862 case NXGE_GET64: 3863 case NXGE_PUT64: 3864 case NXGE_GET_TX_RING_SZ: 3865 case NXGE_GET_TX_DESC: 3866 case NXGE_TX_SIDE_RESET: 3867 case NXGE_RX_SIDE_RESET: 3868 case NXGE_GLOBAL_RESET: 3869 case NXGE_RESET_MAC: 3870 case NXGE_TX_REGS_DUMP: 3871 case NXGE_RX_REGS_DUMP: 3872 case NXGE_INT_REGS_DUMP: 3873 case NXGE_VIR_INT_REGS_DUMP: 3874 case NXGE_PUT_TCAM: 3875 case NXGE_GET_TCAM: 3876 case NXGE_RTRACE: 3877 case NXGE_RDUMP: 3878 3879 need_privilege = B_FALSE; 3880 break; 3881 case NXGE_INJECT_ERR: 3882 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3883 nxge_err_inject(nxgep, wq, mp); 3884 break; 3885 } 3886 3887 if (need_privilege) { 3888 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3889 if (err != 0) { 3890 miocnak(wq, mp, 0, err); 3891 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3892 "<== nxge_m_ioctl: no priv")); 3893 return; 3894 } 3895 } 3896 3897 switch (cmd) { 3898 3899 case LB_GET_MODE: 3900 case LB_SET_MODE: 3901 case LB_GET_INFO_SIZE: 3902 case LB_GET_INFO: 3903 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3904 break; 3905 3906 case NXGE_GET_MII: 3907 case NXGE_PUT_MII: 3908 case NXGE_PUT_TCAM: 3909 case NXGE_GET_TCAM: 3910 case NXGE_GET64: 3911 case NXGE_PUT64: 3912 case NXGE_GET_TX_RING_SZ: 3913 case NXGE_GET_TX_DESC: 3914 case NXGE_TX_SIDE_RESET: 3915 case NXGE_RX_SIDE_RESET: 3916 case NXGE_GLOBAL_RESET: 3917 case NXGE_RESET_MAC: 3918 case NXGE_TX_REGS_DUMP: 3919 case NXGE_RX_REGS_DUMP: 3920 case NXGE_INT_REGS_DUMP: 3921 case NXGE_VIR_INT_REGS_DUMP: 3922 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3923 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3924 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3925 break; 3926 } 3927 3928 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3929 } 3930 3931 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3932 3933 void 3934 nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory) 3935 { 3936 p_nxge_mmac_stats_t mmac_stats; 3937 int i; 3938 nxge_mmac_t *mmac_info; 3939 3940 mmac_info = &nxgep->nxge_mmac_info; 3941 3942 mmac_stats = &nxgep->statsp->mmac_stats; 3943 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3944 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3945 3946 for (i = 0; i < ETHERADDRL; i++) { 3947 if (factory) { 3948 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3949 = mmac_info->factory_mac_pool[slot][ 3950 (ETHERADDRL-1) - i]; 3951 } else { 3952 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3953 = mmac_info->mac_pool[slot].addr[ 3954 (ETHERADDRL - 1) - i]; 3955 } 3956 } 3957 } 3958 3959 /* 3960 * nxge_altmac_set() -- Set an alternate MAC address 3961 */ 3962 static int 3963 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, int slot, 3964 int rdctbl, boolean_t usetbl) 3965 { 3966 uint8_t addrn; 3967 uint8_t portn; 3968 npi_mac_addr_t altmac; 3969 hostinfo_t mac_rdc; 3970 p_nxge_class_pt_cfg_t clscfgp; 3971 3972 3973 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3974 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3975 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3976 3977 portn = nxgep->mac.portnum; 3978 addrn = (uint8_t)slot - 1; 3979 3980 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, 3981 nxgep->function_num, addrn, &altmac) != NPI_SUCCESS) 3982 return (EIO); 3983 3984 /* 3985 * Set the rdc table number for the host info entry 3986 * for this mac address slot. 3987 */ 3988 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 3989 mac_rdc.value = 0; 3990 if (usetbl) 3991 mac_rdc.bits.w0.rdc_tbl_num = rdctbl; 3992 else 3993 mac_rdc.bits.w0.rdc_tbl_num = 3994 clscfgp->mac_host_info[addrn].rdctbl; 3995 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 3996 3997 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 3998 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 3999 return (EIO); 4000 } 4001 4002 /* 4003 * Enable comparison with the alternate MAC address. 4004 * While the first alternate addr is enabled by bit 1 of register 4005 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 4006 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 4007 * accordingly before calling npi_mac_altaddr_entry. 4008 */ 4009 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4010 addrn = (uint8_t)slot - 1; 4011 else 4012 addrn = (uint8_t)slot; 4013 4014 if (npi_mac_altaddr_enable(nxgep->npi_handle, 4015 nxgep->function_num, addrn) != NPI_SUCCESS) { 4016 return (EIO); 4017 } 4018 4019 return (0); 4020 } 4021 4022 /* 4023 * nxeg_m_mmac_add_g() - find an unused address slot, set the address 4024 * value to the one specified, enable the port to start filtering on 4025 * the new MAC address. Returns 0 on success. 4026 */ 4027 int 4028 nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl, 4029 boolean_t usetbl) 4030 { 4031 p_nxge_t nxgep = arg; 4032 int slot; 4033 nxge_mmac_t *mmac_info; 4034 int err; 4035 nxge_status_t status; 4036 4037 mutex_enter(nxgep->genlock); 4038 4039 /* 4040 * Make sure that nxge is initialized, if _start() has 4041 * not been called. 4042 */ 4043 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4044 status = nxge_init(nxgep); 4045 if (status != NXGE_OK) { 4046 mutex_exit(nxgep->genlock); 4047 return (ENXIO); 4048 } 4049 } 4050 4051 mmac_info = &nxgep->nxge_mmac_info; 4052 if (mmac_info->naddrfree == 0) { 4053 mutex_exit(nxgep->genlock); 4054 return (ENOSPC); 4055 } 4056 4057 /* 4058 * Search for the first available slot. Because naddrfree 4059 * is not zero, we are guaranteed to find one. 4060 * Each of the first two ports of Neptune has 16 alternate 4061 * MAC slots but only the first 7 (of 15) slots have assigned factory 4062 * MAC addresses. We first search among the slots without bundled 4063 * factory MACs. If we fail to find one in that range, then we 4064 * search the slots with bundled factory MACs. A factory MAC 4065 * will be wasted while the slot is used with a user MAC address. 4066 * But the slot could be used by factory MAC again after calling 4067 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4068 */ 4069 for (slot = 0; slot <= mmac_info->num_mmac; slot++) { 4070 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4071 break; 4072 } 4073 4074 ASSERT(slot <= mmac_info->num_mmac); 4075 4076 if ((err = nxge_altmac_set(nxgep, (uint8_t *)maddr, slot, rdctbl, 4077 usetbl)) != 0) { 4078 mutex_exit(nxgep->genlock); 4079 return (err); 4080 } 4081 4082 bcopy(maddr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4083 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4084 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4085 mmac_info->naddrfree--; 4086 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4087 4088 mutex_exit(nxgep->genlock); 4089 return (0); 4090 } 4091 4092 /* 4093 * Remove the specified mac address and update the HW not to filter 4094 * the mac address anymore. 4095 */ 4096 int 4097 nxge_m_mmac_remove(void *arg, int slot) 4098 { 4099 p_nxge_t nxgep = arg; 4100 nxge_mmac_t *mmac_info; 4101 uint8_t addrn; 4102 uint8_t portn; 4103 int err = 0; 4104 nxge_status_t status; 4105 4106 mutex_enter(nxgep->genlock); 4107 4108 /* 4109 * Make sure that nxge is initialized, if _start() has 4110 * not been called. 4111 */ 4112 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4113 status = nxge_init(nxgep); 4114 if (status != NXGE_OK) { 4115 mutex_exit(nxgep->genlock); 4116 return (ENXIO); 4117 } 4118 } 4119 4120 mmac_info = &nxgep->nxge_mmac_info; 4121 if (slot < 1 || slot > mmac_info->num_mmac) { 4122 mutex_exit(nxgep->genlock); 4123 return (EINVAL); 4124 } 4125 4126 portn = nxgep->mac.portnum; 4127 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4128 addrn = (uint8_t)slot - 1; 4129 else 4130 addrn = (uint8_t)slot; 4131 4132 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4133 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4134 == NPI_SUCCESS) { 4135 mmac_info->naddrfree++; 4136 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4137 /* 4138 * Regardless if the MAC we just stopped filtering 4139 * is a user addr or a facory addr, we must set 4140 * the MMAC_VENDOR_ADDR flag if this slot has an 4141 * associated factory MAC to indicate that a factory 4142 * MAC is available. 4143 */ 4144 if (slot <= mmac_info->num_factory_mmac) { 4145 mmac_info->mac_pool[slot].flags 4146 |= MMAC_VENDOR_ADDR; 4147 } 4148 /* 4149 * Clear mac_pool[slot].addr so that kstat shows 0 4150 * alternate MAC address if the slot is not used. 4151 * (But nxge_m_mmac_get returns the factory MAC even 4152 * when the slot is not used!) 4153 */ 4154 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4155 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4156 } else { 4157 err = EIO; 4158 } 4159 } else { 4160 err = EINVAL; 4161 } 4162 4163 mutex_exit(nxgep->genlock); 4164 return (err); 4165 } 4166 4167 /* 4168 * The callback to query all the factory addresses. naddr must be the same as 4169 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and 4170 * mcm_addr is the space allocated for keep all the addresses, whose size is 4171 * naddr * MAXMACADDRLEN. 4172 */ 4173 static void 4174 nxge_m_getfactaddr(void *arg, uint_t naddr, uint8_t *addr) 4175 { 4176 nxge_t *nxgep = arg; 4177 nxge_mmac_t *mmac_info; 4178 int i; 4179 4180 mutex_enter(nxgep->genlock); 4181 4182 mmac_info = &nxgep->nxge_mmac_info; 4183 ASSERT(naddr == mmac_info->num_factory_mmac); 4184 4185 for (i = 0; i < naddr; i++) { 4186 bcopy(mmac_info->factory_mac_pool[i + 1], 4187 addr + i * MAXMACADDRLEN, ETHERADDRL); 4188 } 4189 4190 mutex_exit(nxgep->genlock); 4191 } 4192 4193 4194 static boolean_t 4195 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4196 { 4197 nxge_t *nxgep = arg; 4198 uint32_t *txflags = cap_data; 4199 4200 switch (cap) { 4201 case MAC_CAPAB_HCKSUM: 4202 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4203 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4204 if (nxge_cksum_offload <= 1) { 4205 *txflags = HCKSUM_INET_PARTIAL; 4206 } 4207 break; 4208 4209 case MAC_CAPAB_MULTIFACTADDR: { 4210 mac_capab_multifactaddr_t *mfacp = cap_data; 4211 4212 mutex_enter(nxgep->genlock); 4213 mfacp->mcm_naddr = nxgep->nxge_mmac_info.num_factory_mmac; 4214 mfacp->mcm_getaddr = nxge_m_getfactaddr; 4215 mutex_exit(nxgep->genlock); 4216 break; 4217 } 4218 4219 case MAC_CAPAB_LSO: { 4220 mac_capab_lso_t *cap_lso = cap_data; 4221 4222 if (nxgep->soft_lso_enable) { 4223 if (nxge_cksum_offload <= 1) { 4224 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4225 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4226 nxge_lso_max = NXGE_LSO_MAXLEN; 4227 } 4228 cap_lso->lso_basic_tcp_ipv4.lso_max = 4229 nxge_lso_max; 4230 } 4231 break; 4232 } else { 4233 return (B_FALSE); 4234 } 4235 } 4236 4237 case MAC_CAPAB_RINGS: { 4238 mac_capab_rings_t *cap_rings = cap_data; 4239 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 4240 4241 mutex_enter(nxgep->genlock); 4242 if (cap_rings->mr_type == MAC_RING_TYPE_RX) { 4243 cap_rings->mr_group_type = MAC_GROUP_TYPE_DYNAMIC; 4244 cap_rings->mr_rnum = p_cfgp->max_rdcs; 4245 cap_rings->mr_rget = nxge_fill_ring; 4246 cap_rings->mr_gnum = p_cfgp->max_rdc_grpids; 4247 cap_rings->mr_gget = nxge_hio_group_get; 4248 cap_rings->mr_gaddring = nxge_group_add_ring; 4249 cap_rings->mr_gremring = nxge_group_rem_ring; 4250 4251 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4252 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]", 4253 p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids)); 4254 } else { 4255 cap_rings->mr_group_type = MAC_GROUP_TYPE_DYNAMIC; 4256 cap_rings->mr_rnum = p_cfgp->tdc.count; 4257 cap_rings->mr_rget = nxge_fill_ring; 4258 if (isLDOMservice(nxgep)) { 4259 /* share capable */ 4260 /* Do not report the default ring: hence -1 */ 4261 cap_rings->mr_gnum = 4262 NXGE_MAX_TDC_GROUPS / nxgep->nports - 1; 4263 } else { 4264 cap_rings->mr_gnum = 0; 4265 } 4266 4267 cap_rings->mr_gget = nxge_hio_group_get; 4268 cap_rings->mr_gaddring = nxge_group_add_ring; 4269 cap_rings->mr_gremring = nxge_group_rem_ring; 4270 4271 NXGE_DEBUG_MSG((nxgep, TX_CTL, 4272 "==> nxge_m_getcapab: tx rings # of rings %d", 4273 p_cfgp->tdc.count)); 4274 } 4275 mutex_exit(nxgep->genlock); 4276 break; 4277 } 4278 4279 #if defined(sun4v) 4280 case MAC_CAPAB_SHARES: { 4281 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4282 4283 /* 4284 * Only the service domain driver responds to 4285 * this capability request. 4286 */ 4287 mutex_enter(nxgep->genlock); 4288 if (isLDOMservice(nxgep)) { 4289 mshares->ms_snum = 3; 4290 mshares->ms_handle = (void *)nxgep; 4291 mshares->ms_salloc = nxge_hio_share_alloc; 4292 mshares->ms_sfree = nxge_hio_share_free; 4293 mshares->ms_sadd = nxge_hio_share_add_group; 4294 mshares->ms_sremove = nxge_hio_share_rem_group; 4295 mshares->ms_squery = nxge_hio_share_query; 4296 mshares->ms_sbind = nxge_hio_share_bind; 4297 mshares->ms_sunbind = nxge_hio_share_unbind; 4298 mutex_exit(nxgep->genlock); 4299 } else { 4300 mutex_exit(nxgep->genlock); 4301 return (B_FALSE); 4302 } 4303 break; 4304 } 4305 #endif 4306 default: 4307 return (B_FALSE); 4308 } 4309 return (B_TRUE); 4310 } 4311 4312 static boolean_t 4313 nxge_param_locked(mac_prop_id_t pr_num) 4314 { 4315 /* 4316 * All adv_* parameters are locked (read-only) while 4317 * the device is in any sort of loopback mode ... 4318 */ 4319 switch (pr_num) { 4320 case MAC_PROP_ADV_1000FDX_CAP: 4321 case MAC_PROP_EN_1000FDX_CAP: 4322 case MAC_PROP_ADV_1000HDX_CAP: 4323 case MAC_PROP_EN_1000HDX_CAP: 4324 case MAC_PROP_ADV_100FDX_CAP: 4325 case MAC_PROP_EN_100FDX_CAP: 4326 case MAC_PROP_ADV_100HDX_CAP: 4327 case MAC_PROP_EN_100HDX_CAP: 4328 case MAC_PROP_ADV_10FDX_CAP: 4329 case MAC_PROP_EN_10FDX_CAP: 4330 case MAC_PROP_ADV_10HDX_CAP: 4331 case MAC_PROP_EN_10HDX_CAP: 4332 case MAC_PROP_AUTONEG: 4333 case MAC_PROP_FLOWCTRL: 4334 return (B_TRUE); 4335 } 4336 return (B_FALSE); 4337 } 4338 4339 /* 4340 * callback functions for set/get of properties 4341 */ 4342 static int 4343 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4344 uint_t pr_valsize, const void *pr_val) 4345 { 4346 nxge_t *nxgep = barg; 4347 p_nxge_param_t param_arr; 4348 p_nxge_stats_t statsp; 4349 int err = 0; 4350 uint8_t val; 4351 uint32_t cur_mtu, new_mtu, old_framesize; 4352 link_flowctrl_t fl; 4353 4354 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4355 param_arr = nxgep->param_arr; 4356 statsp = nxgep->statsp; 4357 mutex_enter(nxgep->genlock); 4358 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4359 nxge_param_locked(pr_num)) { 4360 /* 4361 * All adv_* parameters are locked (read-only) 4362 * while the device is in any sort of loopback mode. 4363 */ 4364 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4365 "==> nxge_m_setprop: loopback mode: read only")); 4366 mutex_exit(nxgep->genlock); 4367 return (EBUSY); 4368 } 4369 4370 val = *(uint8_t *)pr_val; 4371 switch (pr_num) { 4372 case MAC_PROP_EN_1000FDX_CAP: 4373 nxgep->param_en_1000fdx = val; 4374 param_arr[param_anar_1000fdx].value = val; 4375 4376 goto reprogram; 4377 4378 case MAC_PROP_EN_100FDX_CAP: 4379 nxgep->param_en_100fdx = val; 4380 param_arr[param_anar_100fdx].value = val; 4381 4382 goto reprogram; 4383 4384 case MAC_PROP_EN_10FDX_CAP: 4385 nxgep->param_en_10fdx = val; 4386 param_arr[param_anar_10fdx].value = val; 4387 4388 goto reprogram; 4389 4390 case MAC_PROP_EN_1000HDX_CAP: 4391 case MAC_PROP_EN_100HDX_CAP: 4392 case MAC_PROP_EN_10HDX_CAP: 4393 case MAC_PROP_ADV_1000FDX_CAP: 4394 case MAC_PROP_ADV_1000HDX_CAP: 4395 case MAC_PROP_ADV_100FDX_CAP: 4396 case MAC_PROP_ADV_100HDX_CAP: 4397 case MAC_PROP_ADV_10FDX_CAP: 4398 case MAC_PROP_ADV_10HDX_CAP: 4399 case MAC_PROP_STATUS: 4400 case MAC_PROP_SPEED: 4401 case MAC_PROP_DUPLEX: 4402 err = EINVAL; /* cannot set read-only properties */ 4403 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4404 "==> nxge_m_setprop: read only property %d", 4405 pr_num)); 4406 break; 4407 4408 case MAC_PROP_AUTONEG: 4409 param_arr[param_autoneg].value = val; 4410 4411 goto reprogram; 4412 4413 case MAC_PROP_MTU: 4414 cur_mtu = nxgep->mac.default_mtu; 4415 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4416 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4417 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4418 new_mtu, nxgep->mac.is_jumbo)); 4419 4420 if (new_mtu == cur_mtu) { 4421 err = 0; 4422 break; 4423 } 4424 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4425 err = EBUSY; 4426 break; 4427 } 4428 if (new_mtu < NXGE_DEFAULT_MTU || 4429 new_mtu > NXGE_MAXIMUM_MTU) { 4430 err = EINVAL; 4431 break; 4432 } 4433 4434 if ((new_mtu > NXGE_DEFAULT_MTU) && 4435 !nxgep->mac.is_jumbo) { 4436 err = EINVAL; 4437 break; 4438 } 4439 4440 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4441 nxgep->mac.maxframesize = (uint16_t) 4442 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4443 if (nxge_mac_set_framesize(nxgep)) { 4444 nxgep->mac.maxframesize = 4445 (uint16_t)old_framesize; 4446 err = EINVAL; 4447 break; 4448 } 4449 4450 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4451 if (err) { 4452 nxgep->mac.maxframesize = 4453 (uint16_t)old_framesize; 4454 err = EINVAL; 4455 break; 4456 } 4457 4458 nxgep->mac.default_mtu = new_mtu; 4459 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4460 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4461 new_mtu, nxgep->mac.maxframesize)); 4462 break; 4463 4464 case MAC_PROP_FLOWCTRL: 4465 bcopy(pr_val, &fl, sizeof (fl)); 4466 switch (fl) { 4467 default: 4468 err = EINVAL; 4469 break; 4470 4471 case LINK_FLOWCTRL_NONE: 4472 param_arr[param_anar_pause].value = 0; 4473 break; 4474 4475 case LINK_FLOWCTRL_RX: 4476 param_arr[param_anar_pause].value = 1; 4477 break; 4478 4479 case LINK_FLOWCTRL_TX: 4480 case LINK_FLOWCTRL_BI: 4481 err = EINVAL; 4482 break; 4483 } 4484 4485 reprogram: 4486 if (err == 0) { 4487 if (!nxge_param_link_update(nxgep)) { 4488 err = EINVAL; 4489 } 4490 } 4491 break; 4492 case MAC_PROP_PRIVATE: 4493 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4494 "==> nxge_m_setprop: private property")); 4495 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4496 pr_val); 4497 break; 4498 4499 default: 4500 err = ENOTSUP; 4501 break; 4502 } 4503 4504 mutex_exit(nxgep->genlock); 4505 4506 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4507 "<== nxge_m_setprop (return %d)", err)); 4508 return (err); 4509 } 4510 4511 static int 4512 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4513 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 4514 { 4515 nxge_t *nxgep = barg; 4516 p_nxge_param_t param_arr = nxgep->param_arr; 4517 p_nxge_stats_t statsp = nxgep->statsp; 4518 int err = 0; 4519 link_flowctrl_t fl; 4520 uint64_t tmp = 0; 4521 link_state_t ls; 4522 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4523 4524 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4525 "==> nxge_m_getprop: pr_num %d", pr_num)); 4526 4527 if (pr_valsize == 0) 4528 return (EINVAL); 4529 4530 *perm = MAC_PROP_PERM_RW; 4531 4532 if ((is_default) && (pr_num != MAC_PROP_PRIVATE)) { 4533 err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4534 return (err); 4535 } 4536 4537 bzero(pr_val, pr_valsize); 4538 switch (pr_num) { 4539 case MAC_PROP_DUPLEX: 4540 *perm = MAC_PROP_PERM_READ; 4541 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4542 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4543 "==> nxge_m_getprop: duplex mode %d", 4544 *(uint8_t *)pr_val)); 4545 break; 4546 4547 case MAC_PROP_SPEED: 4548 if (pr_valsize < sizeof (uint64_t)) 4549 return (EINVAL); 4550 *perm = MAC_PROP_PERM_READ; 4551 tmp = statsp->mac_stats.link_speed * 1000000ull; 4552 bcopy(&tmp, pr_val, sizeof (tmp)); 4553 break; 4554 4555 case MAC_PROP_STATUS: 4556 if (pr_valsize < sizeof (link_state_t)) 4557 return (EINVAL); 4558 *perm = MAC_PROP_PERM_READ; 4559 if (!statsp->mac_stats.link_up) 4560 ls = LINK_STATE_DOWN; 4561 else 4562 ls = LINK_STATE_UP; 4563 bcopy(&ls, pr_val, sizeof (ls)); 4564 break; 4565 4566 case MAC_PROP_AUTONEG: 4567 *(uint8_t *)pr_val = 4568 param_arr[param_autoneg].value; 4569 break; 4570 4571 case MAC_PROP_FLOWCTRL: 4572 if (pr_valsize < sizeof (link_flowctrl_t)) 4573 return (EINVAL); 4574 4575 fl = LINK_FLOWCTRL_NONE; 4576 if (param_arr[param_anar_pause].value) { 4577 fl = LINK_FLOWCTRL_RX; 4578 } 4579 bcopy(&fl, pr_val, sizeof (fl)); 4580 break; 4581 4582 case MAC_PROP_ADV_1000FDX_CAP: 4583 *perm = MAC_PROP_PERM_READ; 4584 *(uint8_t *)pr_val = 4585 param_arr[param_anar_1000fdx].value; 4586 break; 4587 4588 case MAC_PROP_EN_1000FDX_CAP: 4589 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4590 break; 4591 4592 case MAC_PROP_ADV_100FDX_CAP: 4593 *perm = MAC_PROP_PERM_READ; 4594 *(uint8_t *)pr_val = 4595 param_arr[param_anar_100fdx].value; 4596 break; 4597 4598 case MAC_PROP_EN_100FDX_CAP: 4599 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4600 break; 4601 4602 case MAC_PROP_ADV_10FDX_CAP: 4603 *perm = MAC_PROP_PERM_READ; 4604 *(uint8_t *)pr_val = 4605 param_arr[param_anar_10fdx].value; 4606 break; 4607 4608 case MAC_PROP_EN_10FDX_CAP: 4609 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4610 break; 4611 4612 case MAC_PROP_EN_1000HDX_CAP: 4613 case MAC_PROP_EN_100HDX_CAP: 4614 case MAC_PROP_EN_10HDX_CAP: 4615 case MAC_PROP_ADV_1000HDX_CAP: 4616 case MAC_PROP_ADV_100HDX_CAP: 4617 case MAC_PROP_ADV_10HDX_CAP: 4618 err = ENOTSUP; 4619 break; 4620 4621 case MAC_PROP_PRIVATE: 4622 err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4623 pr_valsize, pr_val, perm); 4624 break; 4625 default: 4626 err = EINVAL; 4627 break; 4628 } 4629 4630 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4631 4632 return (err); 4633 } 4634 4635 /* ARGSUSED */ 4636 static int 4637 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4638 const void *pr_val) 4639 { 4640 p_nxge_param_t param_arr = nxgep->param_arr; 4641 int err = 0; 4642 long result; 4643 4644 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4645 "==> nxge_set_priv_prop: name %s", pr_name)); 4646 4647 if (strcmp(pr_name, "_accept_jumbo") == 0) { 4648 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4649 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4650 "<== nxge_set_priv_prop: name %s " 4651 "pr_val %s result %d " 4652 "param %d is_jumbo %d", 4653 pr_name, pr_val, result, 4654 param_arr[param_accept_jumbo].value, 4655 nxgep->mac.is_jumbo)); 4656 4657 if (result > 1 || result < 0) { 4658 err = EINVAL; 4659 } else { 4660 if (nxgep->mac.is_jumbo == 4661 (uint32_t)result) { 4662 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4663 "no change (%d %d)", 4664 nxgep->mac.is_jumbo, 4665 result)); 4666 return (0); 4667 } 4668 } 4669 4670 param_arr[param_accept_jumbo].value = result; 4671 nxgep->mac.is_jumbo = B_FALSE; 4672 if (result) { 4673 nxgep->mac.is_jumbo = B_TRUE; 4674 } 4675 4676 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4677 "<== nxge_set_priv_prop: name %s (value %d) is_jumbo %d", 4678 pr_name, result, nxgep->mac.is_jumbo)); 4679 4680 return (err); 4681 } 4682 4683 /* Blanking */ 4684 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4685 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4686 (char *)pr_val, 4687 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4688 if (err) { 4689 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4690 "<== nxge_set_priv_prop: " 4691 "unable to set (%s)", pr_name)); 4692 err = EINVAL; 4693 } else { 4694 err = 0; 4695 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4696 "<== nxge_set_priv_prop: " 4697 "set (%s)", pr_name)); 4698 } 4699 4700 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4701 "<== nxge_set_priv_prop: name %s (value %d)", 4702 pr_name, result)); 4703 4704 return (err); 4705 } 4706 4707 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4708 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4709 (char *)pr_val, 4710 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4711 if (err) { 4712 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4713 "<== nxge_set_priv_prop: " 4714 "unable to set (%s)", pr_name)); 4715 err = EINVAL; 4716 } else { 4717 err = 0; 4718 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4719 "<== nxge_set_priv_prop: " 4720 "set (%s)", pr_name)); 4721 } 4722 4723 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4724 "<== nxge_set_priv_prop: name %s (value %d)", 4725 pr_name, result)); 4726 4727 return (err); 4728 } 4729 4730 /* Classification */ 4731 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4732 if (pr_val == NULL) { 4733 err = EINVAL; 4734 return (err); 4735 } 4736 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4737 4738 err = nxge_param_set_ip_opt(nxgep, NULL, 4739 NULL, (char *)pr_val, 4740 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4741 4742 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4743 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4744 pr_name, result)); 4745 4746 return (err); 4747 } 4748 4749 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 4750 if (pr_val == NULL) { 4751 err = EINVAL; 4752 return (err); 4753 } 4754 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4755 4756 err = nxge_param_set_ip_opt(nxgep, NULL, 4757 NULL, (char *)pr_val, 4758 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 4759 4760 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4761 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4762 pr_name, result)); 4763 4764 return (err); 4765 } 4766 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 4767 if (pr_val == NULL) { 4768 err = EINVAL; 4769 return (err); 4770 } 4771 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4772 4773 err = nxge_param_set_ip_opt(nxgep, NULL, 4774 NULL, (char *)pr_val, 4775 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 4776 4777 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4778 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4779 pr_name, result)); 4780 4781 return (err); 4782 } 4783 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 4784 if (pr_val == NULL) { 4785 err = EINVAL; 4786 return (err); 4787 } 4788 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4789 4790 err = nxge_param_set_ip_opt(nxgep, NULL, 4791 NULL, (char *)pr_val, 4792 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 4793 4794 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4795 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4796 pr_name, result)); 4797 4798 return (err); 4799 } 4800 4801 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 4802 if (pr_val == NULL) { 4803 err = EINVAL; 4804 return (err); 4805 } 4806 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4807 4808 err = nxge_param_set_ip_opt(nxgep, NULL, 4809 NULL, (char *)pr_val, 4810 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 4811 4812 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4813 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4814 pr_name, result)); 4815 4816 return (err); 4817 } 4818 4819 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 4820 if (pr_val == NULL) { 4821 err = EINVAL; 4822 return (err); 4823 } 4824 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4825 4826 err = nxge_param_set_ip_opt(nxgep, NULL, 4827 NULL, (char *)pr_val, 4828 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 4829 4830 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4831 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4832 pr_name, result)); 4833 4834 return (err); 4835 } 4836 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 4837 if (pr_val == NULL) { 4838 err = EINVAL; 4839 return (err); 4840 } 4841 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4842 4843 err = nxge_param_set_ip_opt(nxgep, NULL, 4844 NULL, (char *)pr_val, 4845 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 4846 4847 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4848 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4849 pr_name, result)); 4850 4851 return (err); 4852 } 4853 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 4854 if (pr_val == NULL) { 4855 err = EINVAL; 4856 return (err); 4857 } 4858 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4859 4860 err = nxge_param_set_ip_opt(nxgep, NULL, 4861 NULL, (char *)pr_val, 4862 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 4863 4864 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4865 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4866 pr_name, result)); 4867 4868 return (err); 4869 } 4870 4871 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 4872 if (pr_val == NULL) { 4873 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4874 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 4875 err = EINVAL; 4876 return (err); 4877 } 4878 4879 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4880 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4881 "<== nxge_set_priv_prop: name %s " 4882 "(lso %d pr_val %s value %d)", 4883 pr_name, nxgep->soft_lso_enable, pr_val, result)); 4884 4885 if (result > 1 || result < 0) { 4886 err = EINVAL; 4887 } else { 4888 if (nxgep->soft_lso_enable == (uint32_t)result) { 4889 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4890 "no change (%d %d)", 4891 nxgep->soft_lso_enable, result)); 4892 return (0); 4893 } 4894 } 4895 4896 nxgep->soft_lso_enable = (int)result; 4897 4898 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4899 "<== nxge_set_priv_prop: name %s (value %d)", 4900 pr_name, result)); 4901 4902 return (err); 4903 } 4904 /* 4905 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the 4906 * following code to be executed. 4907 */ 4908 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 4909 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 4910 (caddr_t)¶m_arr[param_anar_10gfdx]); 4911 return (err); 4912 } 4913 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 4914 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 4915 (caddr_t)¶m_arr[param_anar_pause]); 4916 return (err); 4917 } 4918 4919 return (EINVAL); 4920 } 4921 4922 static int 4923 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 4924 uint_t pr_valsize, void *pr_val, uint_t *perm) 4925 { 4926 p_nxge_param_t param_arr = nxgep->param_arr; 4927 char valstr[MAXNAMELEN]; 4928 int err = EINVAL; 4929 uint_t strsize; 4930 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4931 4932 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4933 "==> nxge_get_priv_prop: property %s", pr_name)); 4934 4935 /* function number */ 4936 if (strcmp(pr_name, "_function_number") == 0) { 4937 if (is_default) 4938 return (ENOTSUP); 4939 *perm = MAC_PROP_PERM_READ; 4940 (void) snprintf(valstr, sizeof (valstr), "%d", 4941 nxgep->function_num); 4942 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4943 "==> nxge_get_priv_prop: name %s " 4944 "(value %d valstr %s)", 4945 pr_name, nxgep->function_num, valstr)); 4946 4947 err = 0; 4948 goto done; 4949 } 4950 4951 /* Neptune firmware version */ 4952 if (strcmp(pr_name, "_fw_version") == 0) { 4953 if (is_default) 4954 return (ENOTSUP); 4955 *perm = MAC_PROP_PERM_READ; 4956 (void) snprintf(valstr, sizeof (valstr), "%s", 4957 nxgep->vpd_info.ver); 4958 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4959 "==> nxge_get_priv_prop: name %s " 4960 "(value %d valstr %s)", 4961 pr_name, nxgep->vpd_info.ver, valstr)); 4962 4963 err = 0; 4964 goto done; 4965 } 4966 4967 /* port PHY mode */ 4968 if (strcmp(pr_name, "_port_mode") == 0) { 4969 if (is_default) 4970 return (ENOTSUP); 4971 *perm = MAC_PROP_PERM_READ; 4972 switch (nxgep->mac.portmode) { 4973 case PORT_1G_COPPER: 4974 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 4975 nxgep->hot_swappable_phy ? 4976 "[Hot Swappable]" : ""); 4977 break; 4978 case PORT_1G_FIBER: 4979 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 4980 nxgep->hot_swappable_phy ? 4981 "[hot swappable]" : ""); 4982 break; 4983 case PORT_10G_COPPER: 4984 (void) snprintf(valstr, sizeof (valstr), 4985 "10G copper %s", 4986 nxgep->hot_swappable_phy ? 4987 "[hot swappable]" : ""); 4988 break; 4989 case PORT_10G_FIBER: 4990 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 4991 nxgep->hot_swappable_phy ? 4992 "[hot swappable]" : ""); 4993 break; 4994 case PORT_10G_SERDES: 4995 (void) snprintf(valstr, sizeof (valstr), 4996 "10G serdes %s", nxgep->hot_swappable_phy ? 4997 "[hot swappable]" : ""); 4998 break; 4999 case PORT_1G_SERDES: 5000 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5001 nxgep->hot_swappable_phy ? 5002 "[hot swappable]" : ""); 5003 break; 5004 case PORT_1G_TN1010: 5005 (void) snprintf(valstr, sizeof (valstr), 5006 "1G TN1010 copper %s", nxgep->hot_swappable_phy ? 5007 "[hot swappable]" : ""); 5008 break; 5009 case PORT_10G_TN1010: 5010 (void) snprintf(valstr, sizeof (valstr), 5011 "10G TN1010 copper %s", nxgep->hot_swappable_phy ? 5012 "[hot swappable]" : ""); 5013 break; 5014 case PORT_1G_RGMII_FIBER: 5015 (void) snprintf(valstr, sizeof (valstr), 5016 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5017 "[hot swappable]" : ""); 5018 break; 5019 case PORT_HSP_MODE: 5020 (void) snprintf(valstr, sizeof (valstr), 5021 "phy not present[hot swappable]"); 5022 break; 5023 default: 5024 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5025 nxgep->hot_swappable_phy ? 5026 "[hot swappable]" : ""); 5027 break; 5028 } 5029 5030 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5031 "==> nxge_get_priv_prop: name %s (value %s)", 5032 pr_name, valstr)); 5033 5034 err = 0; 5035 goto done; 5036 } 5037 5038 /* Hot swappable PHY */ 5039 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5040 if (is_default) 5041 return (ENOTSUP); 5042 *perm = MAC_PROP_PERM_READ; 5043 (void) snprintf(valstr, sizeof (valstr), "%s", 5044 nxgep->hot_swappable_phy ? 5045 "yes" : "no"); 5046 5047 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5048 "==> nxge_get_priv_prop: name %s " 5049 "(value %d valstr %s)", 5050 pr_name, nxgep->hot_swappable_phy, valstr)); 5051 5052 err = 0; 5053 goto done; 5054 } 5055 5056 5057 /* accept jumbo */ 5058 if (strcmp(pr_name, "_accept_jumbo") == 0) { 5059 if (is_default) 5060 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5061 else 5062 (void) snprintf(valstr, sizeof (valstr), 5063 "%d", nxgep->mac.is_jumbo); 5064 err = 0; 5065 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5066 "==> nxge_get_priv_prop: name %s (value %d (%d, %d))", 5067 pr_name, 5068 (uint32_t)param_arr[param_accept_jumbo].value, 5069 nxgep->mac.is_jumbo, 5070 nxge_jumbo_enable)); 5071 5072 goto done; 5073 } 5074 5075 /* Receive Interrupt Blanking Parameters */ 5076 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5077 err = 0; 5078 if (is_default) { 5079 (void) snprintf(valstr, sizeof (valstr), 5080 "%d", RXDMA_RCR_TO_DEFAULT); 5081 goto done; 5082 } 5083 5084 (void) snprintf(valstr, sizeof (valstr), "%d", 5085 nxgep->intr_timeout); 5086 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5087 "==> nxge_get_priv_prop: name %s (value %d)", 5088 pr_name, 5089 (uint32_t)nxgep->intr_timeout)); 5090 goto done; 5091 } 5092 5093 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5094 err = 0; 5095 if (is_default) { 5096 (void) snprintf(valstr, sizeof (valstr), 5097 "%d", RXDMA_RCR_PTHRES_DEFAULT); 5098 goto done; 5099 } 5100 (void) snprintf(valstr, sizeof (valstr), "%d", 5101 nxgep->intr_threshold); 5102 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5103 "==> nxge_get_priv_prop: name %s (value %d)", 5104 pr_name, (uint32_t)nxgep->intr_threshold)); 5105 5106 goto done; 5107 } 5108 5109 /* Classification and Load Distribution Configuration */ 5110 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5111 if (is_default) { 5112 (void) snprintf(valstr, sizeof (valstr), "%x", 5113 NXGE_CLASS_FLOW_GEN_SERVER); 5114 err = 0; 5115 goto done; 5116 } 5117 err = nxge_dld_get_ip_opt(nxgep, 5118 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5119 5120 (void) snprintf(valstr, sizeof (valstr), "%x", 5121 (int)param_arr[param_class_opt_ipv4_tcp].value); 5122 5123 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5124 "==> nxge_get_priv_prop: %s", valstr)); 5125 goto done; 5126 } 5127 5128 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5129 if (is_default) { 5130 (void) snprintf(valstr, sizeof (valstr), "%x", 5131 NXGE_CLASS_FLOW_GEN_SERVER); 5132 err = 0; 5133 goto done; 5134 } 5135 err = nxge_dld_get_ip_opt(nxgep, 5136 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5137 5138 (void) snprintf(valstr, sizeof (valstr), "%x", 5139 (int)param_arr[param_class_opt_ipv4_udp].value); 5140 5141 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5142 "==> nxge_get_priv_prop: %s", valstr)); 5143 goto done; 5144 } 5145 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5146 if (is_default) { 5147 (void) snprintf(valstr, sizeof (valstr), "%x", 5148 NXGE_CLASS_FLOW_GEN_SERVER); 5149 err = 0; 5150 goto done; 5151 } 5152 err = nxge_dld_get_ip_opt(nxgep, 5153 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5154 5155 (void) snprintf(valstr, sizeof (valstr), "%x", 5156 (int)param_arr[param_class_opt_ipv4_ah].value); 5157 5158 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5159 "==> nxge_get_priv_prop: %s", valstr)); 5160 goto done; 5161 } 5162 5163 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5164 if (is_default) { 5165 (void) snprintf(valstr, sizeof (valstr), "%x", 5166 NXGE_CLASS_FLOW_GEN_SERVER); 5167 err = 0; 5168 goto done; 5169 } 5170 err = nxge_dld_get_ip_opt(nxgep, 5171 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5172 5173 (void) snprintf(valstr, sizeof (valstr), "%x", 5174 (int)param_arr[param_class_opt_ipv4_sctp].value); 5175 5176 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5177 "==> nxge_get_priv_prop: %s", valstr)); 5178 goto done; 5179 } 5180 5181 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5182 if (is_default) { 5183 (void) snprintf(valstr, sizeof (valstr), "%x", 5184 NXGE_CLASS_FLOW_GEN_SERVER); 5185 err = 0; 5186 goto done; 5187 } 5188 err = nxge_dld_get_ip_opt(nxgep, 5189 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5190 5191 (void) snprintf(valstr, sizeof (valstr), "%x", 5192 (int)param_arr[param_class_opt_ipv6_tcp].value); 5193 5194 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5195 "==> nxge_get_priv_prop: %s", valstr)); 5196 goto done; 5197 } 5198 5199 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5200 if (is_default) { 5201 (void) snprintf(valstr, sizeof (valstr), "%x", 5202 NXGE_CLASS_FLOW_GEN_SERVER); 5203 err = 0; 5204 goto done; 5205 } 5206 err = nxge_dld_get_ip_opt(nxgep, 5207 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5208 5209 (void) snprintf(valstr, sizeof (valstr), "%x", 5210 (int)param_arr[param_class_opt_ipv6_udp].value); 5211 5212 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5213 "==> nxge_get_priv_prop: %s", valstr)); 5214 goto done; 5215 } 5216 5217 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5218 if (is_default) { 5219 (void) snprintf(valstr, sizeof (valstr), "%x", 5220 NXGE_CLASS_FLOW_GEN_SERVER); 5221 err = 0; 5222 goto done; 5223 } 5224 err = nxge_dld_get_ip_opt(nxgep, 5225 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5226 5227 (void) snprintf(valstr, sizeof (valstr), "%x", 5228 (int)param_arr[param_class_opt_ipv6_ah].value); 5229 5230 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5231 "==> nxge_get_priv_prop: %s", valstr)); 5232 goto done; 5233 } 5234 5235 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5236 if (is_default) { 5237 (void) snprintf(valstr, sizeof (valstr), "%x", 5238 NXGE_CLASS_FLOW_GEN_SERVER); 5239 err = 0; 5240 goto done; 5241 } 5242 err = nxge_dld_get_ip_opt(nxgep, 5243 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5244 5245 (void) snprintf(valstr, sizeof (valstr), "%x", 5246 (int)param_arr[param_class_opt_ipv6_sctp].value); 5247 5248 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5249 "==> nxge_get_priv_prop: %s", valstr)); 5250 goto done; 5251 } 5252 5253 /* Software LSO */ 5254 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5255 if (is_default) { 5256 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5257 err = 0; 5258 goto done; 5259 } 5260 (void) snprintf(valstr, sizeof (valstr), 5261 "%d", nxgep->soft_lso_enable); 5262 err = 0; 5263 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5264 "==> nxge_get_priv_prop: name %s (value %d)", 5265 pr_name, nxgep->soft_lso_enable)); 5266 5267 goto done; 5268 } 5269 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5270 err = 0; 5271 if (is_default || 5272 nxgep->param_arr[param_anar_10gfdx].value != 0) { 5273 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5274 goto done; 5275 } else { 5276 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5277 goto done; 5278 } 5279 } 5280 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5281 err = 0; 5282 if (is_default || 5283 nxgep->param_arr[param_anar_pause].value != 0) { 5284 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5285 goto done; 5286 } else { 5287 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5288 goto done; 5289 } 5290 } 5291 5292 done: 5293 if (err == 0) { 5294 strsize = (uint_t)strlen(valstr); 5295 if (pr_valsize < strsize) { 5296 err = ENOBUFS; 5297 } else { 5298 (void) strlcpy(pr_val, valstr, pr_valsize); 5299 } 5300 } 5301 5302 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5303 "<== nxge_get_priv_prop: return %d", err)); 5304 return (err); 5305 } 5306 5307 /* 5308 * Module loading and removing entry points. 5309 */ 5310 5311 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach, 5312 nodev, NULL, D_MP, NULL, nxge_quiesce); 5313 5314 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5315 5316 /* 5317 * Module linkage information for the kernel. 5318 */ 5319 static struct modldrv nxge_modldrv = { 5320 &mod_driverops, 5321 NXGE_DESC_VER, 5322 &nxge_dev_ops 5323 }; 5324 5325 static struct modlinkage modlinkage = { 5326 MODREV_1, (void *) &nxge_modldrv, NULL 5327 }; 5328 5329 int 5330 _init(void) 5331 { 5332 int status; 5333 5334 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5335 mac_init_ops(&nxge_dev_ops, "nxge"); 5336 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5337 if (status != 0) { 5338 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5339 "failed to init device soft state")); 5340 goto _init_exit; 5341 } 5342 status = mod_install(&modlinkage); 5343 if (status != 0) { 5344 ddi_soft_state_fini(&nxge_list); 5345 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5346 goto _init_exit; 5347 } 5348 5349 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5350 5351 _init_exit: 5352 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 5353 5354 return (status); 5355 } 5356 5357 int 5358 _fini(void) 5359 { 5360 int status; 5361 5362 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5363 5364 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5365 5366 if (nxge_mblks_pending) 5367 return (EBUSY); 5368 5369 status = mod_remove(&modlinkage); 5370 if (status != DDI_SUCCESS) { 5371 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5372 "Module removal failed 0x%08x", 5373 status)); 5374 goto _fini_exit; 5375 } 5376 5377 mac_fini_ops(&nxge_dev_ops); 5378 5379 ddi_soft_state_fini(&nxge_list); 5380 5381 MUTEX_DESTROY(&nxge_common_lock); 5382 _fini_exit: 5383 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 5384 5385 return (status); 5386 } 5387 5388 int 5389 _info(struct modinfo *modinfop) 5390 { 5391 int status; 5392 5393 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5394 status = mod_info(&modlinkage, modinfop); 5395 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5396 5397 return (status); 5398 } 5399 5400 /*ARGSUSED*/ 5401 static int 5402 nxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5403 { 5404 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5405 p_nxge_t nxgep = rhp->nxgep; 5406 uint32_t channel; 5407 p_tx_ring_t ring; 5408 5409 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5410 ring = nxgep->tx_rings->rings[channel]; 5411 5412 MUTEX_ENTER(&ring->lock); 5413 ring->tx_ring_handle = rhp->ring_handle; 5414 MUTEX_EXIT(&ring->lock); 5415 5416 return (0); 5417 } 5418 5419 static void 5420 nxge_tx_ring_stop(mac_ring_driver_t rdriver) 5421 { 5422 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5423 p_nxge_t nxgep = rhp->nxgep; 5424 uint32_t channel; 5425 p_tx_ring_t ring; 5426 5427 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5428 ring = nxgep->tx_rings->rings[channel]; 5429 5430 MUTEX_ENTER(&ring->lock); 5431 ring->tx_ring_handle = (mac_ring_handle_t)NULL; 5432 MUTEX_EXIT(&ring->lock); 5433 } 5434 5435 static int 5436 nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5437 { 5438 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5439 p_nxge_t nxgep = rhp->nxgep; 5440 uint32_t channel; 5441 p_rx_rcr_ring_t ring; 5442 int i; 5443 5444 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5445 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5446 5447 MUTEX_ENTER(&ring->lock); 5448 5449 if (nxgep->rx_channel_started[channel] == B_TRUE) { 5450 MUTEX_EXIT(&ring->lock); 5451 return (0); 5452 } 5453 5454 /* set rcr_ring */ 5455 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) { 5456 if ((nxgep->ldgvp->ldvp[i].is_rxdma == 1) && 5457 (nxgep->ldgvp->ldvp[i].channel == channel)) { 5458 ring->ldvp = &nxgep->ldgvp->ldvp[i]; 5459 ring->ldgp = nxgep->ldgvp->ldvp[i].ldgp; 5460 } 5461 } 5462 5463 nxgep->rx_channel_started[channel] = B_TRUE; 5464 ring->rcr_mac_handle = rhp->ring_handle; 5465 ring->rcr_gen_num = mr_gen_num; 5466 MUTEX_EXIT(&ring->lock); 5467 5468 return (0); 5469 } 5470 5471 static void 5472 nxge_rx_ring_stop(mac_ring_driver_t rdriver) 5473 { 5474 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5475 p_nxge_t nxgep = rhp->nxgep; 5476 uint32_t channel; 5477 p_rx_rcr_ring_t ring; 5478 5479 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5480 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5481 5482 MUTEX_ENTER(&ring->lock); 5483 nxgep->rx_channel_started[channel] = B_FALSE; 5484 ring->rcr_mac_handle = NULL; 5485 MUTEX_EXIT(&ring->lock); 5486 } 5487 5488 /* 5489 * Callback funtion for MAC layer to register all rings. 5490 */ 5491 static void 5492 nxge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 5493 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 5494 { 5495 p_nxge_t nxgep = (p_nxge_t)arg; 5496 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 5497 5498 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5499 "==> nxge_fill_ring 0x%x index %d", rtype, index)); 5500 5501 switch (rtype) { 5502 case MAC_RING_TYPE_TX: { 5503 p_nxge_ring_handle_t rhandlep; 5504 5505 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5506 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d", 5507 rtype, index, p_cfgp->tdc.count)); 5508 5509 ASSERT((index >= 0) && (index < p_cfgp->tdc.count)); 5510 rhandlep = &nxgep->tx_ring_handles[index]; 5511 rhandlep->nxgep = nxgep; 5512 rhandlep->index = index; 5513 rhandlep->ring_handle = rh; 5514 5515 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5516 infop->mri_start = nxge_tx_ring_start; 5517 infop->mri_stop = nxge_tx_ring_stop; 5518 infop->mri_tx = nxge_tx_ring_send; 5519 5520 break; 5521 } 5522 case MAC_RING_TYPE_RX: { 5523 p_nxge_ring_handle_t rhandlep; 5524 int nxge_rindex; 5525 mac_intr_t nxge_mac_intr; 5526 5527 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5528 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d", 5529 rtype, index, p_cfgp->max_rdcs)); 5530 5531 /* 5532 * 'index' is the ring index within the group. 5533 * Find the ring index in the nxge instance. 5534 */ 5535 nxge_rindex = nxge_get_rxring_index(nxgep, rg_index, index); 5536 5537 ASSERT((nxge_rindex >= 0) && (nxge_rindex < p_cfgp->max_rdcs)); 5538 rhandlep = &nxgep->rx_ring_handles[nxge_rindex]; 5539 rhandlep->nxgep = nxgep; 5540 rhandlep->index = nxge_rindex; 5541 rhandlep->ring_handle = rh; 5542 5543 /* 5544 * Entrypoint to enable interrupt (disable poll) and 5545 * disable interrupt (enable poll). 5546 */ 5547 nxge_mac_intr.mi_handle = (mac_intr_handle_t)rhandlep; 5548 nxge_mac_intr.mi_enable = (mac_intr_enable_t)nxge_disable_poll; 5549 nxge_mac_intr.mi_disable = (mac_intr_disable_t)nxge_enable_poll; 5550 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5551 infop->mri_start = nxge_rx_ring_start; 5552 infop->mri_stop = nxge_rx_ring_stop; 5553 infop->mri_intr = nxge_mac_intr; /* ??? */ 5554 infop->mri_poll = nxge_rx_poll; 5555 5556 break; 5557 } 5558 default: 5559 break; 5560 } 5561 5562 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_fill_ring 0x%x", 5563 rtype)); 5564 } 5565 5566 static void 5567 nxge_group_add_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5568 mac_ring_type_t type) 5569 { 5570 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5571 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5572 nxge_t *nxge; 5573 nxge_grp_t *grp; 5574 nxge_rdc_grp_t *rdc_grp; 5575 uint16_t channel; /* device-wise ring id */ 5576 int dev_gindex; 5577 int rv; 5578 5579 nxge = rgroup->nxgep; 5580 5581 switch (type) { 5582 case MAC_RING_TYPE_TX: 5583 /* 5584 * nxge_grp_dc_add takes a channel number which is a 5585 * "devise" ring ID. 5586 */ 5587 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5588 5589 /* 5590 * Remove the ring from the default group 5591 */ 5592 if (rgroup->gindex != 0) { 5593 (void) nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5594 } 5595 5596 /* 5597 * nxge->tx_set.group[] is an array of groups indexed by 5598 * a "port" group ID. 5599 */ 5600 grp = nxge->tx_set.group[rgroup->gindex]; 5601 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5602 if (rv != 0) { 5603 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5604 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5605 } 5606 break; 5607 5608 case MAC_RING_TYPE_RX: 5609 /* 5610 * nxge->rx_set.group[] is an array of groups indexed by 5611 * a "port" group ID. 5612 */ 5613 grp = nxge->rx_set.group[rgroup->gindex]; 5614 5615 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5616 rgroup->gindex; 5617 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5618 5619 /* 5620 * nxge_grp_dc_add takes a channel number which is a 5621 * "devise" ring ID. 5622 */ 5623 channel = nxge->pt_config.hw_config.start_rdc + rhandle->index; 5624 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_RX, channel); 5625 if (rv != 0) { 5626 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5627 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5628 } 5629 5630 rdc_grp->map |= (1 << channel); 5631 rdc_grp->max_rdcs++; 5632 5633 (void) nxge_init_fzc_rdc_tbl(nxge, rgroup->rdctbl); 5634 break; 5635 } 5636 } 5637 5638 static void 5639 nxge_group_rem_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5640 mac_ring_type_t type) 5641 { 5642 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5643 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5644 nxge_t *nxge; 5645 uint16_t channel; /* device-wise ring id */ 5646 nxge_rdc_grp_t *rdc_grp; 5647 int dev_gindex; 5648 5649 nxge = rgroup->nxgep; 5650 5651 switch (type) { 5652 case MAC_RING_TYPE_TX: 5653 dev_gindex = nxge->pt_config.hw_config.def_mac_txdma_grpid + 5654 rgroup->gindex; 5655 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5656 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5657 5658 /* 5659 * Add the ring back to the default group 5660 */ 5661 if (rgroup->gindex != 0) { 5662 nxge_grp_t *grp; 5663 grp = nxge->tx_set.group[0]; 5664 (void) nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5665 } 5666 break; 5667 5668 case MAC_RING_TYPE_RX: 5669 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5670 rgroup->gindex; 5671 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5672 channel = rdc_grp->start_rdc + rhandle->index; 5673 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 5674 5675 rdc_grp->map &= ~(1 << channel); 5676 rdc_grp->max_rdcs--; 5677 5678 (void) nxge_init_fzc_rdc_tbl(nxge, rgroup->rdctbl); 5679 break; 5680 } 5681 } 5682 5683 5684 /*ARGSUSED*/ 5685 static nxge_status_t 5686 nxge_add_intrs(p_nxge_t nxgep) 5687 { 5688 5689 int intr_types; 5690 int type = 0; 5691 int ddi_status = DDI_SUCCESS; 5692 nxge_status_t status = NXGE_OK; 5693 5694 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5695 5696 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5697 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5698 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5699 nxgep->nxge_intr_type.intr_added = 0; 5700 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5701 nxgep->nxge_intr_type.intr_type = 0; 5702 5703 if (nxgep->niu_type == N2_NIU) { 5704 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5705 } else if (nxge_msi_enable) { 5706 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5707 } 5708 5709 /* Get the supported interrupt types */ 5710 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5711 != DDI_SUCCESS) { 5712 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5713 "ddi_intr_get_supported_types failed: status 0x%08x", 5714 ddi_status)); 5715 return (NXGE_ERROR | NXGE_DDI_FAILED); 5716 } 5717 nxgep->nxge_intr_type.intr_types = intr_types; 5718 5719 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5720 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5721 5722 /* 5723 * Solaris MSIX is not supported yet. use MSI for now. 5724 * nxge_msi_enable (1): 5725 * 1 - MSI 2 - MSI-X others - FIXED 5726 */ 5727 switch (nxge_msi_enable) { 5728 default: 5729 type = DDI_INTR_TYPE_FIXED; 5730 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5731 "use fixed (intx emulation) type %08x", 5732 type)); 5733 break; 5734 5735 case 2: 5736 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5737 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5738 if (intr_types & DDI_INTR_TYPE_MSIX) { 5739 type = DDI_INTR_TYPE_MSIX; 5740 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5741 "ddi_intr_get_supported_types: MSIX 0x%08x", 5742 type)); 5743 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5744 type = DDI_INTR_TYPE_MSI; 5745 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5746 "ddi_intr_get_supported_types: MSI 0x%08x", 5747 type)); 5748 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5749 type = DDI_INTR_TYPE_FIXED; 5750 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5751 "ddi_intr_get_supported_types: MSXED0x%08x", 5752 type)); 5753 } 5754 break; 5755 5756 case 1: 5757 if (intr_types & DDI_INTR_TYPE_MSI) { 5758 type = DDI_INTR_TYPE_MSI; 5759 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5760 "ddi_intr_get_supported_types: MSI 0x%08x", 5761 type)); 5762 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5763 type = DDI_INTR_TYPE_MSIX; 5764 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5765 "ddi_intr_get_supported_types: MSIX 0x%08x", 5766 type)); 5767 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5768 type = DDI_INTR_TYPE_FIXED; 5769 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5770 "ddi_intr_get_supported_types: MSXED0x%08x", 5771 type)); 5772 } 5773 } 5774 5775 nxgep->nxge_intr_type.intr_type = type; 5776 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5777 type == DDI_INTR_TYPE_FIXED) && 5778 nxgep->nxge_intr_type.niu_msi_enable) { 5779 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5780 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5781 " nxge_add_intrs: " 5782 " nxge_add_intrs_adv failed: status 0x%08x", 5783 status)); 5784 return (status); 5785 } else { 5786 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5787 "interrupts registered : type %d", type)); 5788 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5789 5790 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5791 "\nAdded advanced nxge add_intr_adv " 5792 "intr type 0x%x\n", type)); 5793 5794 return (status); 5795 } 5796 } 5797 5798 if (!nxgep->nxge_intr_type.intr_registered) { 5799 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5800 "failed to register interrupts")); 5801 return (NXGE_ERROR | NXGE_DDI_FAILED); 5802 } 5803 5804 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5805 return (status); 5806 } 5807 5808 static nxge_status_t 5809 nxge_add_intrs_adv(p_nxge_t nxgep) 5810 { 5811 int intr_type; 5812 p_nxge_intr_t intrp; 5813 5814 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5815 5816 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5817 intr_type = intrp->intr_type; 5818 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5819 intr_type)); 5820 5821 switch (intr_type) { 5822 case DDI_INTR_TYPE_MSI: /* 0x2 */ 5823 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 5824 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 5825 5826 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 5827 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 5828 5829 default: 5830 return (NXGE_ERROR); 5831 } 5832 } 5833 5834 5835 /*ARGSUSED*/ 5836 static nxge_status_t 5837 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 5838 { 5839 dev_info_t *dip = nxgep->dip; 5840 p_nxge_ldg_t ldgp; 5841 p_nxge_intr_t intrp; 5842 uint_t *inthandler; 5843 void *arg1, *arg2; 5844 int behavior; 5845 int nintrs, navail, nrequest; 5846 int nactual, nrequired; 5847 int inum = 0; 5848 int x, y; 5849 int ddi_status = DDI_SUCCESS; 5850 nxge_status_t status = NXGE_OK; 5851 5852 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 5853 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5854 intrp->start_inum = 0; 5855 5856 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5857 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5858 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5859 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5860 "nintrs: %d", ddi_status, nintrs)); 5861 return (NXGE_ERROR | NXGE_DDI_FAILED); 5862 } 5863 5864 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5865 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5866 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5867 "ddi_intr_get_navail() failed, status: 0x%x%, " 5868 "nintrs: %d", ddi_status, navail)); 5869 return (NXGE_ERROR | NXGE_DDI_FAILED); 5870 } 5871 5872 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5873 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 5874 nintrs, navail)); 5875 5876 /* PSARC/2007/453 MSI-X interrupt limit override */ 5877 if (int_type == DDI_INTR_TYPE_MSIX) { 5878 nrequest = nxge_create_msi_property(nxgep); 5879 if (nrequest < navail) { 5880 navail = nrequest; 5881 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5882 "nxge_add_intrs_adv_type: nintrs %d " 5883 "navail %d (nrequest %d)", 5884 nintrs, navail, nrequest)); 5885 } 5886 } 5887 5888 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 5889 /* MSI must be power of 2 */ 5890 if ((navail & 16) == 16) { 5891 navail = 16; 5892 } else if ((navail & 8) == 8) { 5893 navail = 8; 5894 } else if ((navail & 4) == 4) { 5895 navail = 4; 5896 } else if ((navail & 2) == 2) { 5897 navail = 2; 5898 } else { 5899 navail = 1; 5900 } 5901 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5902 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 5903 "navail %d", nintrs, navail)); 5904 } 5905 5906 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5907 DDI_INTR_ALLOC_NORMAL); 5908 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 5909 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 5910 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5911 navail, &nactual, behavior); 5912 if (ddi_status != DDI_SUCCESS || nactual == 0) { 5913 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5914 " ddi_intr_alloc() failed: %d", 5915 ddi_status)); 5916 kmem_free(intrp->htable, intrp->intr_size); 5917 return (NXGE_ERROR | NXGE_DDI_FAILED); 5918 } 5919 5920 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 5921 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 5922 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5923 " ddi_intr_get_pri() failed: %d", 5924 ddi_status)); 5925 /* Free already allocated interrupts */ 5926 for (y = 0; y < nactual; y++) { 5927 (void) ddi_intr_free(intrp->htable[y]); 5928 } 5929 5930 kmem_free(intrp->htable, intrp->intr_size); 5931 return (NXGE_ERROR | NXGE_DDI_FAILED); 5932 } 5933 5934 nrequired = 0; 5935 switch (nxgep->niu_type) { 5936 default: 5937 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 5938 break; 5939 5940 case N2_NIU: 5941 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 5942 break; 5943 } 5944 5945 if (status != NXGE_OK) { 5946 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5947 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 5948 "failed: 0x%x", status)); 5949 /* Free already allocated interrupts */ 5950 for (y = 0; y < nactual; y++) { 5951 (void) ddi_intr_free(intrp->htable[y]); 5952 } 5953 5954 kmem_free(intrp->htable, intrp->intr_size); 5955 return (status); 5956 } 5957 5958 ldgp = nxgep->ldgvp->ldgp; 5959 for (x = 0; x < nrequired; x++, ldgp++) { 5960 ldgp->vector = (uint8_t)x; 5961 ldgp->intdata = SID_DATA(ldgp->func, x); 5962 arg1 = ldgp->ldvp; 5963 arg2 = nxgep; 5964 if (ldgp->nldvs == 1) { 5965 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 5966 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5967 "nxge_add_intrs_adv_type: " 5968 "arg1 0x%x arg2 0x%x: " 5969 "1-1 int handler (entry %d intdata 0x%x)\n", 5970 arg1, arg2, 5971 x, ldgp->intdata)); 5972 } else if (ldgp->nldvs > 1) { 5973 inthandler = (uint_t *)ldgp->sys_intr_handler; 5974 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5975 "nxge_add_intrs_adv_type: " 5976 "arg1 0x%x arg2 0x%x: " 5977 "nldevs %d int handler " 5978 "(entry %d intdata 0x%x)\n", 5979 arg1, arg2, 5980 ldgp->nldvs, x, ldgp->intdata)); 5981 } 5982 5983 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5984 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 5985 "htable 0x%llx", x, intrp->htable[x])); 5986 5987 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 5988 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 5989 != DDI_SUCCESS) { 5990 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5991 "==> nxge_add_intrs_adv_type: failed #%d " 5992 "status 0x%x", x, ddi_status)); 5993 for (y = 0; y < intrp->intr_added; y++) { 5994 (void) ddi_intr_remove_handler( 5995 intrp->htable[y]); 5996 } 5997 /* Free already allocated intr */ 5998 for (y = 0; y < nactual; y++) { 5999 (void) ddi_intr_free(intrp->htable[y]); 6000 } 6001 kmem_free(intrp->htable, intrp->intr_size); 6002 6003 (void) nxge_ldgv_uninit(nxgep); 6004 6005 return (NXGE_ERROR | NXGE_DDI_FAILED); 6006 } 6007 intrp->intr_added++; 6008 } 6009 6010 intrp->msi_intx_cnt = nactual; 6011 6012 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6013 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 6014 navail, nactual, 6015 intrp->msi_intx_cnt, 6016 intrp->intr_added)); 6017 6018 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6019 6020 (void) nxge_intr_ldgv_init(nxgep); 6021 6022 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 6023 6024 return (status); 6025 } 6026 6027 /*ARGSUSED*/ 6028 static nxge_status_t 6029 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 6030 { 6031 dev_info_t *dip = nxgep->dip; 6032 p_nxge_ldg_t ldgp; 6033 p_nxge_intr_t intrp; 6034 uint_t *inthandler; 6035 void *arg1, *arg2; 6036 int behavior; 6037 int nintrs, navail; 6038 int nactual, nrequired; 6039 int inum = 0; 6040 int x, y; 6041 int ddi_status = DDI_SUCCESS; 6042 nxge_status_t status = NXGE_OK; 6043 6044 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 6045 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6046 intrp->start_inum = 0; 6047 6048 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6049 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6050 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6051 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6052 "nintrs: %d", status, nintrs)); 6053 return (NXGE_ERROR | NXGE_DDI_FAILED); 6054 } 6055 6056 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6057 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6058 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6059 "ddi_intr_get_navail() failed, status: 0x%x%, " 6060 "nintrs: %d", ddi_status, navail)); 6061 return (NXGE_ERROR | NXGE_DDI_FAILED); 6062 } 6063 6064 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6065 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6066 nintrs, navail)); 6067 6068 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6069 DDI_INTR_ALLOC_NORMAL); 6070 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6071 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6072 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6073 navail, &nactual, behavior); 6074 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6075 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6076 " ddi_intr_alloc() failed: %d", 6077 ddi_status)); 6078 kmem_free(intrp->htable, intrp->intr_size); 6079 return (NXGE_ERROR | NXGE_DDI_FAILED); 6080 } 6081 6082 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6083 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6084 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6085 " ddi_intr_get_pri() failed: %d", 6086 ddi_status)); 6087 /* Free already allocated interrupts */ 6088 for (y = 0; y < nactual; y++) { 6089 (void) ddi_intr_free(intrp->htable[y]); 6090 } 6091 6092 kmem_free(intrp->htable, intrp->intr_size); 6093 return (NXGE_ERROR | NXGE_DDI_FAILED); 6094 } 6095 6096 nrequired = 0; 6097 switch (nxgep->niu_type) { 6098 default: 6099 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6100 break; 6101 6102 case N2_NIU: 6103 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6104 break; 6105 } 6106 6107 if (status != NXGE_OK) { 6108 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6109 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6110 "failed: 0x%x", status)); 6111 /* Free already allocated interrupts */ 6112 for (y = 0; y < nactual; y++) { 6113 (void) ddi_intr_free(intrp->htable[y]); 6114 } 6115 6116 kmem_free(intrp->htable, intrp->intr_size); 6117 return (status); 6118 } 6119 6120 ldgp = nxgep->ldgvp->ldgp; 6121 for (x = 0; x < nrequired; x++, ldgp++) { 6122 ldgp->vector = (uint8_t)x; 6123 if (nxgep->niu_type != N2_NIU) { 6124 ldgp->intdata = SID_DATA(ldgp->func, x); 6125 } 6126 6127 arg1 = ldgp->ldvp; 6128 arg2 = nxgep; 6129 if (ldgp->nldvs == 1) { 6130 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6131 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6132 "nxge_add_intrs_adv_type_fix: " 6133 "1-1 int handler(%d) ldg %d ldv %d " 6134 "arg1 $%p arg2 $%p\n", 6135 x, ldgp->ldg, ldgp->ldvp->ldv, 6136 arg1, arg2)); 6137 } else if (ldgp->nldvs > 1) { 6138 inthandler = (uint_t *)ldgp->sys_intr_handler; 6139 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6140 "nxge_add_intrs_adv_type_fix: " 6141 "shared ldv %d int handler(%d) ldv %d ldg %d" 6142 "arg1 0x%016llx arg2 0x%016llx\n", 6143 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6144 arg1, arg2)); 6145 } 6146 6147 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6148 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6149 != DDI_SUCCESS) { 6150 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6151 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6152 "status 0x%x", x, ddi_status)); 6153 for (y = 0; y < intrp->intr_added; y++) { 6154 (void) ddi_intr_remove_handler( 6155 intrp->htable[y]); 6156 } 6157 for (y = 0; y < nactual; y++) { 6158 (void) ddi_intr_free(intrp->htable[y]); 6159 } 6160 /* Free already allocated intr */ 6161 kmem_free(intrp->htable, intrp->intr_size); 6162 6163 (void) nxge_ldgv_uninit(nxgep); 6164 6165 return (NXGE_ERROR | NXGE_DDI_FAILED); 6166 } 6167 intrp->intr_added++; 6168 } 6169 6170 intrp->msi_intx_cnt = nactual; 6171 6172 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6173 6174 status = nxge_intr_ldgv_init(nxgep); 6175 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6176 6177 return (status); 6178 } 6179 6180 static void 6181 nxge_remove_intrs(p_nxge_t nxgep) 6182 { 6183 int i, inum; 6184 p_nxge_intr_t intrp; 6185 6186 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6187 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6188 if (!intrp->intr_registered) { 6189 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6190 "<== nxge_remove_intrs: interrupts not registered")); 6191 return; 6192 } 6193 6194 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6195 6196 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6197 (void) ddi_intr_block_disable(intrp->htable, 6198 intrp->intr_added); 6199 } else { 6200 for (i = 0; i < intrp->intr_added; i++) { 6201 (void) ddi_intr_disable(intrp->htable[i]); 6202 } 6203 } 6204 6205 for (inum = 0; inum < intrp->intr_added; inum++) { 6206 if (intrp->htable[inum]) { 6207 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6208 } 6209 } 6210 6211 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6212 if (intrp->htable[inum]) { 6213 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6214 "nxge_remove_intrs: ddi_intr_free inum %d " 6215 "msi_intx_cnt %d intr_added %d", 6216 inum, 6217 intrp->msi_intx_cnt, 6218 intrp->intr_added)); 6219 6220 (void) ddi_intr_free(intrp->htable[inum]); 6221 } 6222 } 6223 6224 kmem_free(intrp->htable, intrp->intr_size); 6225 intrp->intr_registered = B_FALSE; 6226 intrp->intr_enabled = B_FALSE; 6227 intrp->msi_intx_cnt = 0; 6228 intrp->intr_added = 0; 6229 6230 (void) nxge_ldgv_uninit(nxgep); 6231 6232 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6233 "#msix-request"); 6234 6235 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6236 } 6237 6238 /*ARGSUSED*/ 6239 static void 6240 nxge_intrs_enable(p_nxge_t nxgep) 6241 { 6242 p_nxge_intr_t intrp; 6243 int i; 6244 int status; 6245 6246 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6247 6248 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6249 6250 if (!intrp->intr_registered) { 6251 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6252 "interrupts are not registered")); 6253 return; 6254 } 6255 6256 if (intrp->intr_enabled) { 6257 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6258 "<== nxge_intrs_enable: already enabled")); 6259 return; 6260 } 6261 6262 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6263 status = ddi_intr_block_enable(intrp->htable, 6264 intrp->intr_added); 6265 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6266 "block enable - status 0x%x total inums #%d\n", 6267 status, intrp->intr_added)); 6268 } else { 6269 for (i = 0; i < intrp->intr_added; i++) { 6270 status = ddi_intr_enable(intrp->htable[i]); 6271 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6272 "ddi_intr_enable:enable - status 0x%x " 6273 "total inums %d enable inum #%d\n", 6274 status, intrp->intr_added, i)); 6275 if (status == DDI_SUCCESS) { 6276 intrp->intr_enabled = B_TRUE; 6277 } 6278 } 6279 } 6280 6281 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6282 } 6283 6284 /*ARGSUSED*/ 6285 static void 6286 nxge_intrs_disable(p_nxge_t nxgep) 6287 { 6288 p_nxge_intr_t intrp; 6289 int i; 6290 6291 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6292 6293 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6294 6295 if (!intrp->intr_registered) { 6296 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6297 "interrupts are not registered")); 6298 return; 6299 } 6300 6301 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6302 (void) ddi_intr_block_disable(intrp->htable, 6303 intrp->intr_added); 6304 } else { 6305 for (i = 0; i < intrp->intr_added; i++) { 6306 (void) ddi_intr_disable(intrp->htable[i]); 6307 } 6308 } 6309 6310 intrp->intr_enabled = B_FALSE; 6311 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6312 } 6313 6314 static nxge_status_t 6315 nxge_mac_register(p_nxge_t nxgep) 6316 { 6317 mac_register_t *macp; 6318 int status; 6319 6320 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6321 6322 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6323 return (NXGE_ERROR); 6324 6325 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6326 macp->m_driver = nxgep; 6327 macp->m_dip = nxgep->dip; 6328 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6329 macp->m_callbacks = &nxge_m_callbacks; 6330 macp->m_min_sdu = 0; 6331 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6332 NXGE_EHEADER_VLAN_CRC; 6333 macp->m_max_sdu = nxgep->mac.default_mtu; 6334 macp->m_margin = VLAN_TAGSZ; 6335 macp->m_priv_props = nxge_priv_props; 6336 macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 6337 macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1 | MAC_VIRT_SERIALIZE; 6338 6339 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6340 "==> nxge_mac_register: instance %d " 6341 "max_sdu %d margin %d maxframe %d (header %d)", 6342 nxgep->instance, 6343 macp->m_max_sdu, macp->m_margin, 6344 nxgep->mac.maxframesize, 6345 NXGE_EHEADER_VLAN_CRC)); 6346 6347 status = mac_register(macp, &nxgep->mach); 6348 mac_free(macp); 6349 6350 if (status != 0) { 6351 cmn_err(CE_WARN, 6352 "!nxge_mac_register failed (status %d instance %d)", 6353 status, nxgep->instance); 6354 return (NXGE_ERROR); 6355 } 6356 6357 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6358 "(instance %d)", nxgep->instance)); 6359 6360 return (NXGE_OK); 6361 } 6362 6363 void 6364 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6365 { 6366 ssize_t size; 6367 mblk_t *nmp; 6368 uint8_t blk_id; 6369 uint8_t chan; 6370 uint32_t err_id; 6371 err_inject_t *eip; 6372 6373 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6374 6375 size = 1024; 6376 nmp = mp->b_cont; 6377 eip = (err_inject_t *)nmp->b_rptr; 6378 blk_id = eip->blk_id; 6379 err_id = eip->err_id; 6380 chan = eip->chan; 6381 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6382 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6383 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6384 switch (blk_id) { 6385 case MAC_BLK_ID: 6386 break; 6387 case TXMAC_BLK_ID: 6388 break; 6389 case RXMAC_BLK_ID: 6390 break; 6391 case MIF_BLK_ID: 6392 break; 6393 case IPP_BLK_ID: 6394 nxge_ipp_inject_err(nxgep, err_id); 6395 break; 6396 case TXC_BLK_ID: 6397 nxge_txc_inject_err(nxgep, err_id); 6398 break; 6399 case TXDMA_BLK_ID: 6400 nxge_txdma_inject_err(nxgep, err_id, chan); 6401 break; 6402 case RXDMA_BLK_ID: 6403 nxge_rxdma_inject_err(nxgep, err_id, chan); 6404 break; 6405 case ZCP_BLK_ID: 6406 nxge_zcp_inject_err(nxgep, err_id); 6407 break; 6408 case ESPC_BLK_ID: 6409 break; 6410 case FFLP_BLK_ID: 6411 break; 6412 case PHY_BLK_ID: 6413 break; 6414 case ETHER_SERDES_BLK_ID: 6415 break; 6416 case PCIE_SERDES_BLK_ID: 6417 break; 6418 case VIR_BLK_ID: 6419 break; 6420 } 6421 6422 nmp->b_wptr = nmp->b_rptr + size; 6423 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6424 6425 miocack(wq, mp, (int)size, 0); 6426 } 6427 6428 static int 6429 nxge_init_common_dev(p_nxge_t nxgep) 6430 { 6431 p_nxge_hw_list_t hw_p; 6432 dev_info_t *p_dip; 6433 6434 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6435 6436 p_dip = nxgep->p_dip; 6437 MUTEX_ENTER(&nxge_common_lock); 6438 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6439 "==> nxge_init_common_dev:func # %d", 6440 nxgep->function_num)); 6441 /* 6442 * Loop through existing per neptune hardware list. 6443 */ 6444 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6445 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6446 "==> nxge_init_common_device:func # %d " 6447 "hw_p $%p parent dip $%p", 6448 nxgep->function_num, 6449 hw_p, 6450 p_dip)); 6451 if (hw_p->parent_devp == p_dip) { 6452 nxgep->nxge_hw_p = hw_p; 6453 hw_p->ndevs++; 6454 hw_p->nxge_p[nxgep->function_num] = nxgep; 6455 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6456 "==> nxge_init_common_device:func # %d " 6457 "hw_p $%p parent dip $%p " 6458 "ndevs %d (found)", 6459 nxgep->function_num, 6460 hw_p, 6461 p_dip, 6462 hw_p->ndevs)); 6463 break; 6464 } 6465 } 6466 6467 if (hw_p == NULL) { 6468 6469 char **prop_val; 6470 uint_t prop_len; 6471 int i; 6472 6473 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6474 "==> nxge_init_common_device:func # %d " 6475 "parent dip $%p (new)", 6476 nxgep->function_num, 6477 p_dip)); 6478 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6479 hw_p->parent_devp = p_dip; 6480 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6481 nxgep->nxge_hw_p = hw_p; 6482 hw_p->ndevs++; 6483 hw_p->nxge_p[nxgep->function_num] = nxgep; 6484 hw_p->next = nxge_hw_list; 6485 if (nxgep->niu_type == N2_NIU) { 6486 hw_p->niu_type = N2_NIU; 6487 hw_p->platform_type = P_NEPTUNE_NIU; 6488 } else { 6489 hw_p->niu_type = NIU_TYPE_NONE; 6490 hw_p->platform_type = P_NEPTUNE_NONE; 6491 } 6492 6493 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6494 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6495 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6496 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6497 6498 nxge_hw_list = hw_p; 6499 6500 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0, 6501 "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) { 6502 for (i = 0; i < prop_len; i++) { 6503 if ((strcmp((caddr_t)prop_val[i], 6504 NXGE_ROCK_COMPATIBLE) == 0)) { 6505 hw_p->platform_type = P_NEPTUNE_ROCK; 6506 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6507 "ROCK hw_p->platform_type %d", 6508 hw_p->platform_type)); 6509 break; 6510 } 6511 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6512 "nxge_init_common_dev: read compatible" 6513 " property[%d] val[%s]", 6514 i, (caddr_t)prop_val[i])); 6515 } 6516 } 6517 6518 ddi_prop_free(prop_val); 6519 6520 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6521 } 6522 6523 MUTEX_EXIT(&nxge_common_lock); 6524 6525 nxgep->platform_type = hw_p->platform_type; 6526 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxgep->platform_type %d", 6527 nxgep->platform_type)); 6528 if (nxgep->niu_type != N2_NIU) { 6529 nxgep->niu_type = hw_p->niu_type; 6530 } 6531 6532 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6533 "==> nxge_init_common_device (nxge_hw_list) $%p", 6534 nxge_hw_list)); 6535 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6536 6537 return (NXGE_OK); 6538 } 6539 6540 static void 6541 nxge_uninit_common_dev(p_nxge_t nxgep) 6542 { 6543 p_nxge_hw_list_t hw_p, h_hw_p; 6544 p_nxge_dma_pt_cfg_t p_dma_cfgp; 6545 p_nxge_hw_pt_cfg_t p_cfgp; 6546 dev_info_t *p_dip; 6547 6548 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6549 if (nxgep->nxge_hw_p == NULL) { 6550 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6551 "<== nxge_uninit_common_device (no common)")); 6552 return; 6553 } 6554 6555 MUTEX_ENTER(&nxge_common_lock); 6556 h_hw_p = nxge_hw_list; 6557 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6558 p_dip = hw_p->parent_devp; 6559 if (nxgep->nxge_hw_p == hw_p && 6560 p_dip == nxgep->p_dip && 6561 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6562 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6563 6564 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6565 "==> nxge_uninit_common_device:func # %d " 6566 "hw_p $%p parent dip $%p " 6567 "ndevs %d (found)", 6568 nxgep->function_num, 6569 hw_p, 6570 p_dip, 6571 hw_p->ndevs)); 6572 6573 /* 6574 * Release the RDC table, a shared resoruce 6575 * of the nxge hardware. The RDC table was 6576 * assigned to this instance of nxge in 6577 * nxge_use_cfg_dma_config(). 6578 */ 6579 if (!isLDOMguest(nxgep)) { 6580 p_dma_cfgp = 6581 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 6582 p_cfgp = 6583 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 6584 (void) nxge_fzc_rdc_tbl_unbind(nxgep, 6585 p_cfgp->def_mac_rxdma_grpid); 6586 6587 /* Cleanup any outstanding groups. */ 6588 nxge_grp_cleanup(nxgep); 6589 } 6590 6591 if (hw_p->ndevs) { 6592 hw_p->ndevs--; 6593 } 6594 hw_p->nxge_p[nxgep->function_num] = NULL; 6595 if (!hw_p->ndevs) { 6596 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6597 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6598 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6599 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6600 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6601 "==> nxge_uninit_common_device: " 6602 "func # %d " 6603 "hw_p $%p parent dip $%p " 6604 "ndevs %d (last)", 6605 nxgep->function_num, 6606 hw_p, 6607 p_dip, 6608 hw_p->ndevs)); 6609 6610 nxge_hio_uninit(nxgep); 6611 6612 if (hw_p == nxge_hw_list) { 6613 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6614 "==> nxge_uninit_common_device:" 6615 "remove head func # %d " 6616 "hw_p $%p parent dip $%p " 6617 "ndevs %d (head)", 6618 nxgep->function_num, 6619 hw_p, 6620 p_dip, 6621 hw_p->ndevs)); 6622 nxge_hw_list = hw_p->next; 6623 } else { 6624 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6625 "==> nxge_uninit_common_device:" 6626 "remove middle func # %d " 6627 "hw_p $%p parent dip $%p " 6628 "ndevs %d (middle)", 6629 nxgep->function_num, 6630 hw_p, 6631 p_dip, 6632 hw_p->ndevs)); 6633 h_hw_p->next = hw_p->next; 6634 } 6635 6636 nxgep->nxge_hw_p = NULL; 6637 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6638 } 6639 break; 6640 } else { 6641 h_hw_p = hw_p; 6642 } 6643 } 6644 6645 MUTEX_EXIT(&nxge_common_lock); 6646 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6647 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6648 nxge_hw_list)); 6649 6650 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6651 } 6652 6653 /* 6654 * Determines the number of ports from the niu_type or the platform type. 6655 * Returns the number of ports, or returns zero on failure. 6656 */ 6657 6658 int 6659 nxge_get_nports(p_nxge_t nxgep) 6660 { 6661 int nports = 0; 6662 6663 switch (nxgep->niu_type) { 6664 case N2_NIU: 6665 case NEPTUNE_2_10GF: 6666 nports = 2; 6667 break; 6668 case NEPTUNE_4_1GC: 6669 case NEPTUNE_2_10GF_2_1GC: 6670 case NEPTUNE_1_10GF_3_1GC: 6671 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6672 case NEPTUNE_2_10GF_2_1GRF: 6673 nports = 4; 6674 break; 6675 default: 6676 switch (nxgep->platform_type) { 6677 case P_NEPTUNE_NIU: 6678 case P_NEPTUNE_ATLAS_2PORT: 6679 nports = 2; 6680 break; 6681 case P_NEPTUNE_ATLAS_4PORT: 6682 case P_NEPTUNE_MARAMBA_P0: 6683 case P_NEPTUNE_MARAMBA_P1: 6684 case P_NEPTUNE_ROCK: 6685 case P_NEPTUNE_ALONSO: 6686 nports = 4; 6687 break; 6688 default: 6689 break; 6690 } 6691 break; 6692 } 6693 6694 return (nports); 6695 } 6696 6697 /* 6698 * The following two functions are to support 6699 * PSARC/2007/453 MSI-X interrupt limit override. 6700 */ 6701 static int 6702 nxge_create_msi_property(p_nxge_t nxgep) 6703 { 6704 int nmsi; 6705 extern int ncpus; 6706 6707 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6708 6709 switch (nxgep->mac.portmode) { 6710 case PORT_10G_COPPER: 6711 case PORT_10G_FIBER: 6712 case PORT_10G_TN1010: 6713 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6714 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6715 /* 6716 * The maximum MSI-X requested will be 8. 6717 * If the # of CPUs is less than 8, we will request 6718 * # MSI-X based on the # of CPUs (default). 6719 */ 6720 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6721 "==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d", 6722 nxge_msix_10g_intrs)); 6723 if ((nxge_msix_10g_intrs == 0) || 6724 (nxge_msix_10g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6725 nmsi = NXGE_MSIX_REQUEST_10G; 6726 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6727 "==>nxge_create_msi_property (10G): reset to 8")); 6728 } else { 6729 nmsi = nxge_msix_10g_intrs; 6730 } 6731 6732 /* 6733 * If # of interrupts requested is 8 (default), 6734 * the checking of the number of cpus will be 6735 * be maintained. 6736 */ 6737 if ((nmsi == NXGE_MSIX_REQUEST_10G) && 6738 (ncpus < nmsi)) { 6739 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6740 "==>nxge_create_msi_property (10G): reset to 8")); 6741 nmsi = ncpus; 6742 } 6743 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6744 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6745 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6746 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6747 break; 6748 6749 default: 6750 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6751 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6752 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6753 "==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d", 6754 nxge_msix_1g_intrs)); 6755 if ((nxge_msix_1g_intrs == 0) || 6756 (nxge_msix_1g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6757 nmsi = NXGE_MSIX_REQUEST_1G; 6758 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6759 "==>nxge_create_msi_property (1G): reset to 2")); 6760 } else { 6761 nmsi = nxge_msix_1g_intrs; 6762 } 6763 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6764 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6765 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6766 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6767 break; 6768 } 6769 6770 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6771 return (nmsi); 6772 } 6773 6774 /* ARGSUSED */ 6775 static int 6776 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6777 void *pr_val) 6778 { 6779 int err = 0; 6780 link_flowctrl_t fl; 6781 6782 switch (pr_num) { 6783 case MAC_PROP_AUTONEG: 6784 *(uint8_t *)pr_val = 1; 6785 break; 6786 case MAC_PROP_FLOWCTRL: 6787 if (pr_valsize < sizeof (link_flowctrl_t)) 6788 return (EINVAL); 6789 fl = LINK_FLOWCTRL_RX; 6790 bcopy(&fl, pr_val, sizeof (fl)); 6791 break; 6792 case MAC_PROP_ADV_1000FDX_CAP: 6793 case MAC_PROP_EN_1000FDX_CAP: 6794 *(uint8_t *)pr_val = 1; 6795 break; 6796 case MAC_PROP_ADV_100FDX_CAP: 6797 case MAC_PROP_EN_100FDX_CAP: 6798 *(uint8_t *)pr_val = 1; 6799 break; 6800 default: 6801 err = ENOTSUP; 6802 break; 6803 } 6804 return (err); 6805 } 6806 6807 6808 /* 6809 * The following is a software around for the Neptune hardware's 6810 * interrupt bugs; The Neptune hardware may generate spurious interrupts when 6811 * an interrupr handler is removed. 6812 */ 6813 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98 6814 #define NXGE_PIM_RESET (1ULL << 29) 6815 #define NXGE_GLU_RESET (1ULL << 30) 6816 #define NXGE_NIU_RESET (1ULL << 31) 6817 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \ 6818 NXGE_GLU_RESET | \ 6819 NXGE_NIU_RESET) 6820 6821 #define NXGE_WAIT_QUITE_TIME 200000 6822 #define NXGE_WAIT_QUITE_RETRY 40 6823 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */ 6824 6825 static void 6826 nxge_niu_peu_reset(p_nxge_t nxgep) 6827 { 6828 uint32_t rvalue; 6829 p_nxge_hw_list_t hw_p; 6830 p_nxge_t fnxgep; 6831 int i, j; 6832 6833 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset")); 6834 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 6835 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6836 "==> nxge_niu_peu_reset: NULL hardware pointer")); 6837 return; 6838 } 6839 6840 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6841 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d", 6842 hw_p->flags, nxgep->nxge_link_poll_timerid, 6843 nxgep->nxge_timerid)); 6844 6845 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 6846 /* 6847 * Make sure other instances from the same hardware 6848 * stop sending PIO and in quiescent state. 6849 */ 6850 for (i = 0; i < NXGE_MAX_PORTS; i++) { 6851 fnxgep = hw_p->nxge_p[i]; 6852 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6853 "==> nxge_niu_peu_reset: checking entry %d " 6854 "nxgep $%p", i, fnxgep)); 6855 #ifdef NXGE_DEBUG 6856 if (fnxgep) { 6857 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6858 "==> nxge_niu_peu_reset: entry %d (function %d) " 6859 "link timer id %d hw timer id %d", 6860 i, fnxgep->function_num, 6861 fnxgep->nxge_link_poll_timerid, 6862 fnxgep->nxge_timerid)); 6863 } 6864 #endif 6865 if (fnxgep && fnxgep != nxgep && 6866 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) { 6867 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6868 "==> nxge_niu_peu_reset: checking $%p " 6869 "(function %d) timer ids", 6870 fnxgep, fnxgep->function_num)); 6871 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) { 6872 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6873 "==> nxge_niu_peu_reset: waiting")); 6874 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6875 if (!fnxgep->nxge_timerid && 6876 !fnxgep->nxge_link_poll_timerid) { 6877 break; 6878 } 6879 } 6880 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6881 if (fnxgep->nxge_timerid || 6882 fnxgep->nxge_link_poll_timerid) { 6883 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6884 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6885 "<== nxge_niu_peu_reset: cannot reset " 6886 "hardware (devices are still in use)")); 6887 return; 6888 } 6889 } 6890 } 6891 6892 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) { 6893 hw_p->flags |= COMMON_RESET_NIU_PCI; 6894 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh, 6895 NXGE_PCI_PORT_LOGIC_OFFSET); 6896 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6897 "nxge_niu_peu_reset: read offset 0x%x (%d) " 6898 "(data 0x%x)", 6899 NXGE_PCI_PORT_LOGIC_OFFSET, 6900 NXGE_PCI_PORT_LOGIC_OFFSET, 6901 rvalue)); 6902 6903 rvalue |= NXGE_PCI_RESET_ALL; 6904 pci_config_put32(nxgep->dev_regs->nxge_pciregh, 6905 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue); 6906 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6907 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x", 6908 rvalue)); 6909 6910 NXGE_DELAY(NXGE_PCI_RESET_WAIT); 6911 } 6912 6913 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6914 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset")); 6915 } 6916 6917 static void 6918 nxge_set_pci_replay_timeout(p_nxge_t nxgep) 6919 { 6920 p_dev_regs_t dev_regs; 6921 uint32_t value; 6922 6923 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout")); 6924 6925 if (!nxge_set_replay_timer) { 6926 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6927 "==> nxge_set_pci_replay_timeout: will not change " 6928 "the timeout")); 6929 return; 6930 } 6931 6932 dev_regs = nxgep->dev_regs; 6933 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6934 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p", 6935 dev_regs, dev_regs->nxge_pciregh)); 6936 6937 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) { 6938 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6939 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or " 6940 "no PCI handle", 6941 dev_regs)); 6942 return; 6943 } 6944 value = (pci_config_get32(dev_regs->nxge_pciregh, 6945 PCI_REPLAY_TIMEOUT_CFG_OFFSET) | 6946 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT)); 6947 6948 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6949 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x " 6950 "(timeout value to set 0x%x at offset 0x%x) value 0x%x", 6951 pci_config_get32(dev_regs->nxge_pciregh, 6952 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout, 6953 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value)); 6954 6955 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET, 6956 value); 6957 6958 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6959 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x", 6960 pci_config_get32(dev_regs->nxge_pciregh, 6961 PCI_REPLAY_TIMEOUT_CFG_OFFSET))); 6962 6963 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout")); 6964 } 6965 6966 /* 6967 * quiesce(9E) entry point. 6968 * 6969 * This function is called when the system is single-threaded at high 6970 * PIL with preemption disabled. Therefore, this function must not be 6971 * blocked. 6972 * 6973 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 6974 * DDI_FAILURE indicates an error condition and should almost never happen. 6975 */ 6976 static int 6977 nxge_quiesce(dev_info_t *dip) 6978 { 6979 int instance = ddi_get_instance(dip); 6980 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 6981 6982 if (nxgep == NULL) 6983 return (DDI_FAILURE); 6984 6985 /* Turn off debugging */ 6986 nxge_debug_level = NO_DEBUG; 6987 nxgep->nxge_debug_level = NO_DEBUG; 6988 npi_debug_level = NO_DEBUG; 6989 6990 /* 6991 * Stop link monitor only when linkchkmod is interrupt based 6992 */ 6993 if (nxgep->mac.linkchkmode == LINKCHK_INTR) { 6994 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 6995 } 6996 6997 (void) nxge_intr_hw_disable(nxgep); 6998 6999 /* 7000 * Reset the receive MAC side. 7001 */ 7002 (void) nxge_rx_mac_disable(nxgep); 7003 7004 /* Disable and soft reset the IPP */ 7005 if (!isLDOMguest(nxgep)) 7006 (void) nxge_ipp_disable(nxgep); 7007 7008 /* 7009 * Reset the transmit/receive DMA side. 7010 */ 7011 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 7012 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 7013 7014 /* 7015 * Reset the transmit MAC side. 7016 */ 7017 (void) nxge_tx_mac_disable(nxgep); 7018 7019 return (DDI_SUCCESS); 7020 } 7021