1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 28 */ 29 #include <sys/nxge/nxge_impl.h> 30 #include <sys/nxge/nxge_hio.h> 31 #include <sys/nxge/nxge_rxdma.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * PSARC/2007/453 MSI-X interrupt limit override 39 * (This PSARC case is limited to MSI-X vectors 40 * and SPARC platforms only). 41 */ 42 #if defined(_BIG_ENDIAN) 43 uint32_t nxge_msi_enable = 2; 44 #else 45 uint32_t nxge_msi_enable = 1; 46 #endif 47 48 /* 49 * Software workaround for a Neptune (PCI-E) 50 * hardware interrupt bug which the hardware 51 * may generate spurious interrupts after the 52 * device interrupt handler was removed. If this flag 53 * is enabled, the driver will reset the 54 * hardware when devices are being detached. 55 */ 56 uint32_t nxge_peu_reset_enable = 0; 57 58 /* 59 * Software workaround for the hardware 60 * checksum bugs that affect packet transmission 61 * and receive: 62 * 63 * Usage of nxge_cksum_offload: 64 * 65 * (1) nxge_cksum_offload = 0 (default): 66 * - transmits packets: 67 * TCP: uses the hardware checksum feature. 68 * UDP: driver will compute the software checksum 69 * based on the partial checksum computed 70 * by the IP layer. 71 * - receives packets 72 * TCP: marks packets checksum flags based on hardware result. 73 * UDP: will not mark checksum flags. 74 * 75 * (2) nxge_cksum_offload = 1: 76 * - transmit packets: 77 * TCP/UDP: uses the hardware checksum feature. 78 * - receives packets 79 * TCP/UDP: marks packet checksum flags based on hardware result. 80 * 81 * (3) nxge_cksum_offload = 2: 82 * - The driver will not register its checksum capability. 83 * Checksum for both TCP and UDP will be computed 84 * by the stack. 85 * - The software LSO is not allowed in this case. 86 * 87 * (4) nxge_cksum_offload > 2: 88 * - Will be treated as it is set to 2 89 * (stack will compute the checksum). 90 * 91 * (5) If the hardware bug is fixed, this workaround 92 * needs to be updated accordingly to reflect 93 * the new hardware revision. 94 */ 95 uint32_t nxge_cksum_offload = 0; 96 97 /* 98 * Globals: tunable parameters (/etc/system or adb) 99 * 100 */ 101 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 102 uint32_t nxge_rbr_spare_size = 0; 103 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 104 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 105 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 106 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 107 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 108 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 109 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 110 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 111 boolean_t nxge_jumbo_enable = B_FALSE; 112 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 113 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 114 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 115 116 /* MAX LSO size */ 117 #define NXGE_LSO_MAXLEN 65535 118 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 119 120 121 /* 122 * Add tunable to reduce the amount of time spent in the 123 * ISR doing Rx Processing. 124 */ 125 uint32_t nxge_max_rx_pkts = 1024; 126 127 /* 128 * Tunables to manage the receive buffer blocks. 129 * 130 * nxge_rx_threshold_hi: copy all buffers. 131 * nxge_rx_bcopy_size_type: receive buffer block size type. 132 * nxge_rx_threshold_lo: copy only up to tunable block size type. 133 */ 134 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 135 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 136 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 137 138 /* Use kmem_alloc() to allocate data buffers. */ 139 #if defined(_BIG_ENDIAN) 140 uint32_t nxge_use_kmem_alloc = 1; 141 #else 142 uint32_t nxge_use_kmem_alloc = 0; 143 #endif 144 145 rtrace_t npi_rtracebuf; 146 147 /* 148 * The hardware sometimes fails to allow enough time for the link partner 149 * to send an acknowledgement for packets that the hardware sent to it. The 150 * hardware resends the packets earlier than it should be in those instances. 151 * This behavior caused some switches to acknowledge the wrong packets 152 * and it triggered the fatal error. 153 * This software workaround is to set the replay timer to a value 154 * suggested by the hardware team. 155 * 156 * PCI config space replay timer register: 157 * The following replay timeout value is 0xc 158 * for bit 14:18. 159 */ 160 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8 161 #define PCI_REPLAY_TIMEOUT_SHIFT 14 162 163 uint32_t nxge_set_replay_timer = 1; 164 uint32_t nxge_replay_timeout = 0xc; 165 166 /* 167 * The transmit serialization sometimes causes 168 * longer sleep before calling the driver transmit 169 * function as it sleeps longer than it should. 170 * The performace group suggests that a time wait tunable 171 * can be used to set the maximum wait time when needed 172 * and the default is set to 1 tick. 173 */ 174 uint32_t nxge_tx_serial_maxsleep = 1; 175 176 #if defined(sun4v) 177 /* 178 * Hypervisor N2/NIU services information. 179 */ 180 static hsvc_info_t niu_hsvc = { 181 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 182 NIU_MINOR_VER, "nxge" 183 }; 184 185 static int nxge_hsvc_register(p_nxge_t); 186 #endif 187 188 /* 189 * Function Prototypes 190 */ 191 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 192 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 193 static void nxge_unattach(p_nxge_t); 194 static int nxge_quiesce(dev_info_t *); 195 196 #if NXGE_PROPERTY 197 static void nxge_remove_hard_properties(p_nxge_t); 198 #endif 199 200 /* 201 * These two functions are required by nxge_hio.c 202 */ 203 extern int nxge_m_mmac_remove(void *arg, int slot); 204 extern void nxge_grp_cleanup(p_nxge_t nxge); 205 206 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 207 208 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 209 static void nxge_destroy_mutexes(p_nxge_t); 210 211 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 212 static void nxge_unmap_regs(p_nxge_t nxgep); 213 #ifdef NXGE_DEBUG 214 static void nxge_test_map_regs(p_nxge_t nxgep); 215 #endif 216 217 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 218 static void nxge_remove_intrs(p_nxge_t nxgep); 219 220 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 221 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 222 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 223 static void nxge_intrs_enable(p_nxge_t nxgep); 224 static void nxge_intrs_disable(p_nxge_t nxgep); 225 226 static void nxge_suspend(p_nxge_t); 227 static nxge_status_t nxge_resume(p_nxge_t); 228 229 static nxge_status_t nxge_setup_dev(p_nxge_t); 230 static void nxge_destroy_dev(p_nxge_t); 231 232 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 233 static void nxge_free_mem_pool(p_nxge_t); 234 235 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 236 static void nxge_free_rx_mem_pool(p_nxge_t); 237 238 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 239 static void nxge_free_tx_mem_pool(p_nxge_t); 240 241 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 242 struct ddi_dma_attr *, 243 size_t, ddi_device_acc_attr_t *, uint_t, 244 p_nxge_dma_common_t); 245 246 static void nxge_dma_mem_free(p_nxge_dma_common_t); 247 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 248 249 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 250 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 251 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 252 253 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 254 p_nxge_dma_common_t *, size_t); 255 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 256 257 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 258 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 259 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 260 261 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 262 p_nxge_dma_common_t *, 263 size_t); 264 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 265 266 static int nxge_init_common_dev(p_nxge_t); 267 static void nxge_uninit_common_dev(p_nxge_t); 268 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 269 char *, caddr_t); 270 271 /* 272 * The next declarations are for the GLDv3 interface. 273 */ 274 static int nxge_m_start(void *); 275 static void nxge_m_stop(void *); 276 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 277 static int nxge_m_promisc(void *, boolean_t); 278 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 279 static nxge_status_t nxge_mac_register(p_nxge_t); 280 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 281 int slot, int rdctbl, boolean_t usetbl); 282 void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, 283 boolean_t factory); 284 #if defined(sun4v) 285 extern mblk_t *nxge_m_tx(void *arg, mblk_t *mp); 286 #endif 287 288 static void nxge_m_getfactaddr(void *, uint_t, uint8_t *); 289 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 290 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 291 uint_t, const void *); 292 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 293 uint_t, uint_t, void *, uint_t *); 294 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 295 const void *); 296 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 297 void *, uint_t *); 298 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 299 static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int, 300 mac_ring_info_t *, mac_ring_handle_t); 301 static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t, 302 mac_ring_type_t); 303 static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t, 304 mac_ring_type_t); 305 306 static void nxge_niu_peu_reset(p_nxge_t nxgep); 307 static void nxge_set_pci_replay_timeout(nxge_t *); 308 309 mac_priv_prop_t nxge_priv_props[] = { 310 {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 311 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 312 {"_function_number", MAC_PROP_PERM_READ}, 313 {"_fw_version", MAC_PROP_PERM_READ}, 314 {"_port_mode", MAC_PROP_PERM_READ}, 315 {"_hot_swap_phy", MAC_PROP_PERM_READ}, 316 {"_accept_jumbo", MAC_PROP_PERM_RW}, 317 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 318 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 319 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 320 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 321 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 322 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 323 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 324 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 325 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 326 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 327 {"_soft_lso_enable", MAC_PROP_PERM_RW} 328 }; 329 330 #define NXGE_MAX_PRIV_PROPS \ 331 (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 332 333 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 334 #define MAX_DUMP_SZ 256 335 336 #define NXGE_M_CALLBACK_FLAGS \ 337 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 338 339 mac_callbacks_t nxge_m_callbacks = { 340 NXGE_M_CALLBACK_FLAGS, 341 nxge_m_stat, 342 nxge_m_start, 343 nxge_m_stop, 344 nxge_m_promisc, 345 nxge_m_multicst, 346 NULL, 347 NULL, 348 nxge_m_ioctl, 349 nxge_m_getcapab, 350 NULL, 351 NULL, 352 nxge_m_setprop, 353 nxge_m_getprop 354 }; 355 356 void 357 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 358 359 /* PSARC/2007/453 MSI-X interrupt limit override. */ 360 #define NXGE_MSIX_REQUEST_10G 8 361 #define NXGE_MSIX_REQUEST_1G 2 362 static int nxge_create_msi_property(p_nxge_t); 363 /* 364 * For applications that care about the 365 * latency, it was requested by PAE and the 366 * customers that the driver has tunables that 367 * allow the user to tune it to a higher number 368 * interrupts to spread the interrupts among 369 * multiple channels. The DDI framework limits 370 * the maximum number of MSI-X resources to allocate 371 * to 8 (ddi_msix_alloc_limit). If more than 8 372 * is set, ddi_msix_alloc_limit must be set accordingly. 373 * The default number of MSI interrupts are set to 374 * 8 for 10G and 2 for 1G link. 375 */ 376 #define NXGE_MSIX_MAX_ALLOWED 32 377 uint32_t nxge_msix_10g_intrs = NXGE_MSIX_REQUEST_10G; 378 uint32_t nxge_msix_1g_intrs = NXGE_MSIX_REQUEST_1G; 379 380 /* 381 * These global variables control the message 382 * output. 383 */ 384 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 385 uint64_t nxge_debug_level; 386 387 /* 388 * This list contains the instance structures for the Neptune 389 * devices present in the system. The lock exists to guarantee 390 * mutually exclusive access to the list. 391 */ 392 void *nxge_list = NULL; 393 394 void *nxge_hw_list = NULL; 395 nxge_os_mutex_t nxge_common_lock; 396 397 extern uint64_t npi_debug_level; 398 399 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 400 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 401 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 402 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 403 extern void nxge_fm_init(p_nxge_t, 404 ddi_device_acc_attr_t *, 405 ddi_device_acc_attr_t *, 406 ddi_dma_attr_t *); 407 extern void nxge_fm_fini(p_nxge_t); 408 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 409 410 /* 411 * Count used to maintain the number of buffers being used 412 * by Neptune instances and loaned up to the upper layers. 413 */ 414 uint32_t nxge_mblks_pending = 0; 415 416 /* 417 * Device register access attributes for PIO. 418 */ 419 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 420 DDI_DEVICE_ATTR_V0, 421 DDI_STRUCTURE_LE_ACC, 422 DDI_STRICTORDER_ACC, 423 }; 424 425 /* 426 * Device descriptor access attributes for DMA. 427 */ 428 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 429 DDI_DEVICE_ATTR_V0, 430 DDI_STRUCTURE_LE_ACC, 431 DDI_STRICTORDER_ACC 432 }; 433 434 /* 435 * Device buffer access attributes for DMA. 436 */ 437 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 438 DDI_DEVICE_ATTR_V0, 439 DDI_STRUCTURE_BE_ACC, 440 DDI_STRICTORDER_ACC 441 }; 442 443 ddi_dma_attr_t nxge_desc_dma_attr = { 444 DMA_ATTR_V0, /* version number. */ 445 0, /* low address */ 446 0xffffffffffffffff, /* high address */ 447 0xffffffffffffffff, /* address counter max */ 448 #ifndef NIU_PA_WORKAROUND 449 0x100000, /* alignment */ 450 #else 451 0x2000, 452 #endif 453 0xfc00fc, /* dlim_burstsizes */ 454 0x1, /* minimum transfer size */ 455 0xffffffffffffffff, /* maximum transfer size */ 456 0xffffffffffffffff, /* maximum segment size */ 457 1, /* scatter/gather list length */ 458 (unsigned int) 1, /* granularity */ 459 0 /* attribute flags */ 460 }; 461 462 ddi_dma_attr_t nxge_tx_dma_attr = { 463 DMA_ATTR_V0, /* version number. */ 464 0, /* low address */ 465 0xffffffffffffffff, /* high address */ 466 0xffffffffffffffff, /* address counter max */ 467 #if defined(_BIG_ENDIAN) 468 0x2000, /* alignment */ 469 #else 470 0x1000, /* alignment */ 471 #endif 472 0xfc00fc, /* dlim_burstsizes */ 473 0x1, /* minimum transfer size */ 474 0xffffffffffffffff, /* maximum transfer size */ 475 0xffffffffffffffff, /* maximum segment size */ 476 5, /* scatter/gather list length */ 477 (unsigned int) 1, /* granularity */ 478 0 /* attribute flags */ 479 }; 480 481 ddi_dma_attr_t nxge_rx_dma_attr = { 482 DMA_ATTR_V0, /* version number. */ 483 0, /* low address */ 484 0xffffffffffffffff, /* high address */ 485 0xffffffffffffffff, /* address counter max */ 486 0x2000, /* alignment */ 487 0xfc00fc, /* dlim_burstsizes */ 488 0x1, /* minimum transfer size */ 489 0xffffffffffffffff, /* maximum transfer size */ 490 0xffffffffffffffff, /* maximum segment size */ 491 1, /* scatter/gather list length */ 492 (unsigned int) 1, /* granularity */ 493 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 494 }; 495 496 ddi_dma_lim_t nxge_dma_limits = { 497 (uint_t)0, /* dlim_addr_lo */ 498 (uint_t)0xffffffff, /* dlim_addr_hi */ 499 (uint_t)0xffffffff, /* dlim_cntr_max */ 500 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 501 0x1, /* dlim_minxfer */ 502 1024 /* dlim_speed */ 503 }; 504 505 dma_method_t nxge_force_dma = DVMA; 506 507 /* 508 * dma chunk sizes. 509 * 510 * Try to allocate the largest possible size 511 * so that fewer number of dma chunks would be managed 512 */ 513 #ifdef NIU_PA_WORKAROUND 514 size_t alloc_sizes [] = {0x2000}; 515 #else 516 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 517 0x10000, 0x20000, 0x40000, 0x80000, 518 0x100000, 0x200000, 0x400000, 0x800000, 519 0x1000000, 0x2000000, 0x4000000}; 520 #endif 521 522 /* 523 * Translate "dev_t" to a pointer to the associated "dev_info_t". 524 */ 525 526 extern void nxge_get_environs(nxge_t *); 527 528 static int 529 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 530 { 531 p_nxge_t nxgep = NULL; 532 int instance; 533 int status = DDI_SUCCESS; 534 uint8_t portn; 535 nxge_mmac_t *mmac_info; 536 p_nxge_param_t param_arr; 537 538 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 539 540 /* 541 * Get the device instance since we'll need to setup 542 * or retrieve a soft state for this instance. 543 */ 544 instance = ddi_get_instance(dip); 545 546 switch (cmd) { 547 case DDI_ATTACH: 548 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 549 break; 550 551 case DDI_RESUME: 552 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 553 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 554 if (nxgep == NULL) { 555 status = DDI_FAILURE; 556 break; 557 } 558 if (nxgep->dip != dip) { 559 status = DDI_FAILURE; 560 break; 561 } 562 if (nxgep->suspended == DDI_PM_SUSPEND) { 563 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 564 } else { 565 status = nxge_resume(nxgep); 566 } 567 goto nxge_attach_exit; 568 569 case DDI_PM_RESUME: 570 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 571 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 572 if (nxgep == NULL) { 573 status = DDI_FAILURE; 574 break; 575 } 576 if (nxgep->dip != dip) { 577 status = DDI_FAILURE; 578 break; 579 } 580 status = nxge_resume(nxgep); 581 goto nxge_attach_exit; 582 583 default: 584 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 585 status = DDI_FAILURE; 586 goto nxge_attach_exit; 587 } 588 589 590 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 591 status = DDI_FAILURE; 592 goto nxge_attach_exit; 593 } 594 595 nxgep = ddi_get_soft_state(nxge_list, instance); 596 if (nxgep == NULL) { 597 status = NXGE_ERROR; 598 goto nxge_attach_fail2; 599 } 600 601 nxgep->nxge_magic = NXGE_MAGIC; 602 603 nxgep->drv_state = 0; 604 nxgep->dip = dip; 605 nxgep->instance = instance; 606 nxgep->p_dip = ddi_get_parent(dip); 607 nxgep->nxge_debug_level = nxge_debug_level; 608 npi_debug_level = nxge_debug_level; 609 610 /* Are we a guest running in a Hybrid I/O environment? */ 611 nxge_get_environs(nxgep); 612 613 status = nxge_map_regs(nxgep); 614 615 if (status != NXGE_OK) { 616 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 617 goto nxge_attach_fail3; 618 } 619 620 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, 621 &nxge_dev_desc_dma_acc_attr, 622 &nxge_rx_dma_attr); 623 624 /* Create & initialize the per-Neptune data structure */ 625 /* (even if we're a guest). */ 626 status = nxge_init_common_dev(nxgep); 627 if (status != NXGE_OK) { 628 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 629 "nxge_init_common_dev failed")); 630 goto nxge_attach_fail4; 631 } 632 633 /* 634 * Software workaround: set the replay timer. 635 */ 636 if (nxgep->niu_type != N2_NIU) { 637 nxge_set_pci_replay_timeout(nxgep); 638 } 639 #if defined(sun4v) 640 if (isLDOMguest(nxgep)) { 641 nxge_m_callbacks.mc_tx = nxge_m_tx; 642 } 643 #endif 644 645 #if defined(sun4v) 646 /* This is required by nxge_hio_init(), which follows. */ 647 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 648 goto nxge_attach_fail4; 649 #endif 650 651 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 652 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 653 "nxge_hio_init failed")); 654 goto nxge_attach_fail4; 655 } 656 657 if (nxgep->niu_type == NEPTUNE_2_10GF) { 658 if (nxgep->function_num > 1) { 659 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 660 " function %d. Only functions 0 and 1 are " 661 "supported for this card.", nxgep->function_num)); 662 status = NXGE_ERROR; 663 goto nxge_attach_fail4; 664 } 665 } 666 667 if (isLDOMguest(nxgep)) { 668 /* 669 * Use the function number here. 670 */ 671 nxgep->mac.portnum = nxgep->function_num; 672 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 673 674 /* XXX We'll set the MAC address counts to 1 for now. */ 675 mmac_info = &nxgep->nxge_mmac_info; 676 mmac_info->num_mmac = 1; 677 mmac_info->naddrfree = 1; 678 } else { 679 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 680 nxgep->mac.portnum = portn; 681 if ((portn == 0) || (portn == 1)) 682 nxgep->mac.porttype = PORT_TYPE_XMAC; 683 else 684 nxgep->mac.porttype = PORT_TYPE_BMAC; 685 /* 686 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 687 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 688 * The two types of MACs have different characterizations. 689 */ 690 mmac_info = &nxgep->nxge_mmac_info; 691 if (nxgep->function_num < 2) { 692 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 693 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 694 } else { 695 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 696 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 697 } 698 } 699 /* 700 * Setup the Ndd parameters for the this instance. 701 */ 702 nxge_init_param(nxgep); 703 704 /* 705 * Setup Register Tracing Buffer. 706 */ 707 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 708 709 /* init stats ptr */ 710 nxge_init_statsp(nxgep); 711 712 /* 713 * Copy the vpd info from eeprom to a local data 714 * structure, and then check its validity. 715 */ 716 if (!isLDOMguest(nxgep)) { 717 int *regp; 718 uint_t reglen; 719 int rv; 720 721 nxge_vpd_info_get(nxgep); 722 723 /* Find the NIU config handle. */ 724 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 725 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 726 "reg", ®p, ®len); 727 728 if (rv != DDI_PROP_SUCCESS) { 729 goto nxge_attach_fail5; 730 } 731 /* 732 * The address_hi, that is the first int, in the reg 733 * property consists of config handle, but need to remove 734 * the bits 28-31 which are OBP specific info. 735 */ 736 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 737 ddi_prop_free(regp); 738 } 739 740 if (isLDOMguest(nxgep)) { 741 uchar_t *prop_val; 742 uint_t prop_len; 743 uint32_t max_frame_size; 744 745 extern void nxge_get_logical_props(p_nxge_t); 746 747 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 748 nxgep->mac.portmode = PORT_LOGICAL; 749 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 750 "phy-type", "virtual transceiver"); 751 752 nxgep->nports = 1; 753 nxgep->board_ver = 0; /* XXX What? */ 754 755 /* 756 * local-mac-address property gives us info on which 757 * specific MAC address the Hybrid resource is associated 758 * with. 759 */ 760 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 761 "local-mac-address", &prop_val, 762 &prop_len) != DDI_PROP_SUCCESS) { 763 goto nxge_attach_fail5; 764 } 765 if (prop_len != ETHERADDRL) { 766 ddi_prop_free(prop_val); 767 goto nxge_attach_fail5; 768 } 769 ether_copy(prop_val, nxgep->hio_mac_addr); 770 ddi_prop_free(prop_val); 771 nxge_get_logical_props(nxgep); 772 773 /* 774 * Enable Jumbo property based on the "max-frame-size" 775 * property value. 776 */ 777 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY, 778 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 779 "max-frame-size", NXGE_MTU_DEFAULT_MAX); 780 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) && 781 (max_frame_size <= TX_JUMBO_MTU)) { 782 param_arr = nxgep->param_arr; 783 784 param_arr[param_accept_jumbo].value = 1; 785 nxgep->mac.is_jumbo = B_TRUE; 786 nxgep->mac.maxframesize = (uint16_t)max_frame_size; 787 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 788 NXGE_EHEADER_VLAN_CRC; 789 } 790 } else { 791 status = nxge_xcvr_find(nxgep); 792 793 if (status != NXGE_OK) { 794 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 795 " Couldn't determine card type" 796 " .... exit ")); 797 goto nxge_attach_fail5; 798 } 799 800 status = nxge_get_config_properties(nxgep); 801 802 if (status != NXGE_OK) { 803 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 804 "get_hw create failed")); 805 goto nxge_attach_fail; 806 } 807 } 808 809 /* 810 * Setup the Kstats for the driver. 811 */ 812 nxge_setup_kstats(nxgep); 813 814 if (!isLDOMguest(nxgep)) 815 nxge_setup_param(nxgep); 816 817 status = nxge_setup_system_dma_pages(nxgep); 818 if (status != NXGE_OK) { 819 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 820 goto nxge_attach_fail; 821 } 822 823 nxge_hw_id_init(nxgep); 824 825 if (!isLDOMguest(nxgep)) 826 nxge_hw_init_niu_common(nxgep); 827 828 status = nxge_setup_mutexes(nxgep); 829 if (status != NXGE_OK) { 830 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 831 goto nxge_attach_fail; 832 } 833 834 #if defined(sun4v) 835 if (isLDOMguest(nxgep)) { 836 /* Find our VR & channel sets. */ 837 status = nxge_hio_vr_add(nxgep); 838 if (status != NXGE_OK) { 839 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 840 "nxge_hio_vr_add failed")); 841 (void) hsvc_unregister(&nxgep->niu_hsvc); 842 nxgep->niu_hsvc_available = B_FALSE; 843 } 844 goto nxge_attach_exit; 845 } 846 #endif 847 848 status = nxge_setup_dev(nxgep); 849 if (status != DDI_SUCCESS) { 850 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 851 goto nxge_attach_fail; 852 } 853 854 status = nxge_add_intrs(nxgep); 855 if (status != DDI_SUCCESS) { 856 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 857 goto nxge_attach_fail; 858 } 859 860 /* If a guest, register with vio_net instead. */ 861 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 862 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 863 "unable to register to mac layer (%d)", status)); 864 goto nxge_attach_fail; 865 } 866 867 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 868 869 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 870 "registered to mac (instance %d)", instance)); 871 872 /* nxge_link_monitor calls xcvr.check_link recursively */ 873 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 874 875 goto nxge_attach_exit; 876 877 nxge_attach_fail: 878 nxge_unattach(nxgep); 879 goto nxge_attach_fail1; 880 881 nxge_attach_fail5: 882 /* 883 * Tear down the ndd parameters setup. 884 */ 885 nxge_destroy_param(nxgep); 886 887 /* 888 * Tear down the kstat setup. 889 */ 890 nxge_destroy_kstats(nxgep); 891 892 nxge_attach_fail4: 893 if (nxgep->nxge_hw_p) { 894 nxge_uninit_common_dev(nxgep); 895 nxgep->nxge_hw_p = NULL; 896 } 897 898 nxge_attach_fail3: 899 /* 900 * Unmap the register setup. 901 */ 902 nxge_unmap_regs(nxgep); 903 904 nxge_fm_fini(nxgep); 905 906 nxge_attach_fail2: 907 ddi_soft_state_free(nxge_list, nxgep->instance); 908 909 nxge_attach_fail1: 910 if (status != NXGE_OK) 911 status = (NXGE_ERROR | NXGE_DDI_FAILED); 912 nxgep = NULL; 913 914 nxge_attach_exit: 915 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 916 status)); 917 918 return (status); 919 } 920 921 static int 922 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 923 { 924 int status = DDI_SUCCESS; 925 int instance; 926 p_nxge_t nxgep = NULL; 927 928 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 929 instance = ddi_get_instance(dip); 930 nxgep = ddi_get_soft_state(nxge_list, instance); 931 if (nxgep == NULL) { 932 status = DDI_FAILURE; 933 goto nxge_detach_exit; 934 } 935 936 switch (cmd) { 937 case DDI_DETACH: 938 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 939 break; 940 941 case DDI_PM_SUSPEND: 942 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 943 nxgep->suspended = DDI_PM_SUSPEND; 944 nxge_suspend(nxgep); 945 break; 946 947 case DDI_SUSPEND: 948 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 949 if (nxgep->suspended != DDI_PM_SUSPEND) { 950 nxgep->suspended = DDI_SUSPEND; 951 nxge_suspend(nxgep); 952 } 953 break; 954 955 default: 956 status = DDI_FAILURE; 957 } 958 959 if (cmd != DDI_DETACH) 960 goto nxge_detach_exit; 961 962 /* 963 * Stop the xcvr polling. 964 */ 965 nxgep->suspended = cmd; 966 967 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 968 969 if (isLDOMguest(nxgep)) { 970 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 971 nxge_m_stop((void *)nxgep); 972 nxge_hio_unregister(nxgep); 973 } else if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 974 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 975 "<== nxge_detach status = 0x%08X", status)); 976 return (DDI_FAILURE); 977 } 978 979 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 980 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 981 982 nxge_unattach(nxgep); 983 nxgep = NULL; 984 985 nxge_detach_exit: 986 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 987 status)); 988 989 return (status); 990 } 991 992 static void 993 nxge_unattach(p_nxge_t nxgep) 994 { 995 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 996 997 if (nxgep == NULL || nxgep->dev_regs == NULL) { 998 return; 999 } 1000 1001 nxgep->nxge_magic = 0; 1002 1003 if (nxgep->nxge_timerid) { 1004 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1005 nxgep->nxge_timerid = 0; 1006 } 1007 1008 /* 1009 * If this flag is set, it will affect the Neptune 1010 * only. 1011 */ 1012 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { 1013 nxge_niu_peu_reset(nxgep); 1014 } 1015 1016 #if defined(sun4v) 1017 if (isLDOMguest(nxgep)) { 1018 (void) nxge_hio_vr_release(nxgep); 1019 } 1020 #endif 1021 1022 if (nxgep->nxge_hw_p) { 1023 nxge_uninit_common_dev(nxgep); 1024 nxgep->nxge_hw_p = NULL; 1025 } 1026 1027 #if defined(sun4v) 1028 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 1029 (void) hsvc_unregister(&nxgep->niu_hsvc); 1030 nxgep->niu_hsvc_available = B_FALSE; 1031 } 1032 #endif 1033 /* 1034 * Stop any further interrupts. 1035 */ 1036 nxge_remove_intrs(nxgep); 1037 1038 /* 1039 * Stop the device and free resources. 1040 */ 1041 if (!isLDOMguest(nxgep)) { 1042 nxge_destroy_dev(nxgep); 1043 } 1044 1045 /* 1046 * Tear down the ndd parameters setup. 1047 */ 1048 nxge_destroy_param(nxgep); 1049 1050 /* 1051 * Tear down the kstat setup. 1052 */ 1053 nxge_destroy_kstats(nxgep); 1054 1055 /* 1056 * Destroy all mutexes. 1057 */ 1058 nxge_destroy_mutexes(nxgep); 1059 1060 /* 1061 * Remove the list of ndd parameters which 1062 * were setup during attach. 1063 */ 1064 if (nxgep->dip) { 1065 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 1066 " nxge_unattach: remove all properties")); 1067 1068 (void) ddi_prop_remove_all(nxgep->dip); 1069 } 1070 1071 #if NXGE_PROPERTY 1072 nxge_remove_hard_properties(nxgep); 1073 #endif 1074 1075 /* 1076 * Unmap the register setup. 1077 */ 1078 nxge_unmap_regs(nxgep); 1079 1080 nxge_fm_fini(nxgep); 1081 1082 ddi_soft_state_free(nxge_list, nxgep->instance); 1083 1084 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1085 } 1086 1087 #if defined(sun4v) 1088 int 1089 nxge_hsvc_register(nxge_t *nxgep) 1090 { 1091 nxge_status_t status; 1092 1093 if (nxgep->niu_type == N2_NIU) { 1094 nxgep->niu_hsvc_available = B_FALSE; 1095 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 1096 if ((status = hsvc_register(&nxgep->niu_hsvc, 1097 &nxgep->niu_min_ver)) != 0) { 1098 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1099 "nxge_attach: %s: cannot negotiate " 1100 "hypervisor services revision %d group: 0x%lx " 1101 "major: 0x%lx minor: 0x%lx errno: %d", 1102 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1103 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1104 niu_hsvc.hsvc_minor, status)); 1105 return (DDI_FAILURE); 1106 } 1107 nxgep->niu_hsvc_available = B_TRUE; 1108 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1109 "NIU Hypervisor service enabled")); 1110 } 1111 1112 return (DDI_SUCCESS); 1113 } 1114 #endif 1115 1116 static char n2_siu_name[] = "niu"; 1117 1118 static nxge_status_t 1119 nxge_map_regs(p_nxge_t nxgep) 1120 { 1121 int ddi_status = DDI_SUCCESS; 1122 p_dev_regs_t dev_regs; 1123 char buf[MAXPATHLEN + 1]; 1124 char *devname; 1125 #ifdef NXGE_DEBUG 1126 char *sysname; 1127 #endif 1128 off_t regsize; 1129 nxge_status_t status = NXGE_OK; 1130 #if !defined(_BIG_ENDIAN) 1131 off_t pci_offset; 1132 uint16_t pcie_devctl; 1133 #endif 1134 1135 if (isLDOMguest(nxgep)) { 1136 return (nxge_guest_regs_map(nxgep)); 1137 } 1138 1139 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1140 nxgep->dev_regs = NULL; 1141 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1142 dev_regs->nxge_regh = NULL; 1143 dev_regs->nxge_pciregh = NULL; 1144 dev_regs->nxge_msix_regh = NULL; 1145 dev_regs->nxge_vir_regh = NULL; 1146 dev_regs->nxge_vir2_regh = NULL; 1147 nxgep->niu_type = NIU_TYPE_NONE; 1148 1149 devname = ddi_pathname(nxgep->dip, buf); 1150 ASSERT(strlen(devname) > 0); 1151 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1152 "nxge_map_regs: pathname devname %s", devname)); 1153 1154 /* 1155 * The driver is running on a N2-NIU system if devname is something 1156 * like "/niu@80/network@0" 1157 */ 1158 if (strstr(devname, n2_siu_name)) { 1159 /* N2/NIU */ 1160 nxgep->niu_type = N2_NIU; 1161 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1162 "nxge_map_regs: N2/NIU devname %s", devname)); 1163 /* get function number */ 1164 nxgep->function_num = 1165 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1166 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1167 "nxge_map_regs: N2/NIU function number %d", 1168 nxgep->function_num)); 1169 } else { 1170 int *prop_val; 1171 uint_t prop_len; 1172 uint8_t func_num; 1173 1174 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1175 0, "reg", 1176 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1177 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1178 "Reg property not found")); 1179 ddi_status = DDI_FAILURE; 1180 goto nxge_map_regs_fail0; 1181 1182 } else { 1183 func_num = (prop_val[0] >> 8) & 0x7; 1184 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1185 "Reg property found: fun # %d", 1186 func_num)); 1187 nxgep->function_num = func_num; 1188 if (isLDOMguest(nxgep)) { 1189 nxgep->function_num /= 2; 1190 return (NXGE_OK); 1191 } 1192 ddi_prop_free(prop_val); 1193 } 1194 } 1195 1196 switch (nxgep->niu_type) { 1197 default: 1198 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1199 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1200 "nxge_map_regs: pci config size 0x%x", regsize)); 1201 1202 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1203 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1204 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1205 if (ddi_status != DDI_SUCCESS) { 1206 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1207 "ddi_map_regs, nxge bus config regs failed")); 1208 goto nxge_map_regs_fail0; 1209 } 1210 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1211 "nxge_map_reg: PCI config addr 0x%0llx " 1212 " handle 0x%0llx", dev_regs->nxge_pciregp, 1213 dev_regs->nxge_pciregh)); 1214 /* 1215 * IMP IMP 1216 * workaround for bit swapping bug in HW 1217 * which ends up in no-snoop = yes 1218 * resulting, in DMA not synched properly 1219 */ 1220 #if !defined(_BIG_ENDIAN) 1221 /* workarounds for x86 systems */ 1222 pci_offset = 0x80 + PCIE_DEVCTL; 1223 pcie_devctl = 0x0; 1224 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 1225 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1226 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1227 pcie_devctl); 1228 #endif 1229 1230 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1231 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1232 "nxge_map_regs: pio size 0x%x", regsize)); 1233 /* set up the device mapped register */ 1234 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1235 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1236 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1237 if (ddi_status != DDI_SUCCESS) { 1238 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1239 "ddi_map_regs for Neptune global reg failed")); 1240 goto nxge_map_regs_fail1; 1241 } 1242 1243 /* set up the msi/msi-x mapped register */ 1244 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1245 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1246 "nxge_map_regs: msix size 0x%x", regsize)); 1247 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1248 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1249 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1250 if (ddi_status != DDI_SUCCESS) { 1251 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1252 "ddi_map_regs for msi reg failed")); 1253 goto nxge_map_regs_fail2; 1254 } 1255 1256 /* set up the vio region mapped register */ 1257 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1258 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1259 "nxge_map_regs: vio size 0x%x", regsize)); 1260 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1261 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1262 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1263 1264 if (ddi_status != DDI_SUCCESS) { 1265 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1266 "ddi_map_regs for nxge vio reg failed")); 1267 goto nxge_map_regs_fail3; 1268 } 1269 nxgep->dev_regs = dev_regs; 1270 1271 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1272 NPI_PCI_ADD_HANDLE_SET(nxgep, 1273 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1274 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1275 NPI_MSI_ADD_HANDLE_SET(nxgep, 1276 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1277 1278 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1279 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1280 1281 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1282 NPI_REG_ADD_HANDLE_SET(nxgep, 1283 (npi_reg_ptr_t)dev_regs->nxge_regp); 1284 1285 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1286 NPI_VREG_ADD_HANDLE_SET(nxgep, 1287 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1288 1289 break; 1290 1291 case N2_NIU: 1292 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1293 /* 1294 * Set up the device mapped register (FWARC 2006/556) 1295 * (changed back to 1: reg starts at 1!) 1296 */ 1297 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1298 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1299 "nxge_map_regs: dev size 0x%x", regsize)); 1300 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1301 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1302 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1303 1304 if (ddi_status != DDI_SUCCESS) { 1305 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1306 "ddi_map_regs for N2/NIU, global reg failed ")); 1307 goto nxge_map_regs_fail1; 1308 } 1309 1310 /* set up the first vio region mapped register */ 1311 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1312 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1313 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1314 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1315 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1316 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1317 1318 if (ddi_status != DDI_SUCCESS) { 1319 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1320 "ddi_map_regs for nxge vio reg failed")); 1321 goto nxge_map_regs_fail2; 1322 } 1323 /* set up the second vio region mapped register */ 1324 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1325 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1326 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1327 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1328 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1329 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1330 1331 if (ddi_status != DDI_SUCCESS) { 1332 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1333 "ddi_map_regs for nxge vio2 reg failed")); 1334 goto nxge_map_regs_fail3; 1335 } 1336 nxgep->dev_regs = dev_regs; 1337 1338 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1339 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1340 1341 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1342 NPI_REG_ADD_HANDLE_SET(nxgep, 1343 (npi_reg_ptr_t)dev_regs->nxge_regp); 1344 1345 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1346 NPI_VREG_ADD_HANDLE_SET(nxgep, 1347 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1348 1349 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1350 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1351 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1352 1353 break; 1354 } 1355 1356 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1357 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1358 1359 goto nxge_map_regs_exit; 1360 nxge_map_regs_fail3: 1361 if (dev_regs->nxge_msix_regh) { 1362 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1363 } 1364 if (dev_regs->nxge_vir_regh) { 1365 ddi_regs_map_free(&dev_regs->nxge_regh); 1366 } 1367 nxge_map_regs_fail2: 1368 if (dev_regs->nxge_regh) { 1369 ddi_regs_map_free(&dev_regs->nxge_regh); 1370 } 1371 nxge_map_regs_fail1: 1372 if (dev_regs->nxge_pciregh) { 1373 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1374 } 1375 nxge_map_regs_fail0: 1376 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1377 kmem_free(dev_regs, sizeof (dev_regs_t)); 1378 1379 nxge_map_regs_exit: 1380 if (ddi_status != DDI_SUCCESS) 1381 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1382 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1383 return (status); 1384 } 1385 1386 static void 1387 nxge_unmap_regs(p_nxge_t nxgep) 1388 { 1389 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1390 1391 if (isLDOMguest(nxgep)) { 1392 nxge_guest_regs_map_free(nxgep); 1393 return; 1394 } 1395 1396 if (nxgep->dev_regs) { 1397 if (nxgep->dev_regs->nxge_pciregh) { 1398 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1399 "==> nxge_unmap_regs: bus")); 1400 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1401 nxgep->dev_regs->nxge_pciregh = NULL; 1402 } 1403 if (nxgep->dev_regs->nxge_regh) { 1404 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1405 "==> nxge_unmap_regs: device registers")); 1406 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1407 nxgep->dev_regs->nxge_regh = NULL; 1408 } 1409 if (nxgep->dev_regs->nxge_msix_regh) { 1410 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1411 "==> nxge_unmap_regs: device interrupts")); 1412 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1413 nxgep->dev_regs->nxge_msix_regh = NULL; 1414 } 1415 if (nxgep->dev_regs->nxge_vir_regh) { 1416 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1417 "==> nxge_unmap_regs: vio region")); 1418 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1419 nxgep->dev_regs->nxge_vir_regh = NULL; 1420 } 1421 if (nxgep->dev_regs->nxge_vir2_regh) { 1422 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1423 "==> nxge_unmap_regs: vio2 region")); 1424 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1425 nxgep->dev_regs->nxge_vir2_regh = NULL; 1426 } 1427 1428 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1429 nxgep->dev_regs = NULL; 1430 } 1431 1432 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1433 } 1434 1435 static nxge_status_t 1436 nxge_setup_mutexes(p_nxge_t nxgep) 1437 { 1438 int ddi_status = DDI_SUCCESS; 1439 nxge_status_t status = NXGE_OK; 1440 nxge_classify_t *classify_ptr; 1441 int partition; 1442 1443 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1444 1445 /* 1446 * Get the interrupt cookie so the mutexes can be 1447 * Initialized. 1448 */ 1449 if (isLDOMguest(nxgep)) { 1450 nxgep->interrupt_cookie = 0; 1451 } else { 1452 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1453 &nxgep->interrupt_cookie); 1454 1455 if (ddi_status != DDI_SUCCESS) { 1456 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1457 "<== nxge_setup_mutexes: failed 0x%x", 1458 ddi_status)); 1459 goto nxge_setup_mutexes_exit; 1460 } 1461 } 1462 1463 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1464 MUTEX_INIT(&nxgep->poll_lock, NULL, 1465 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1466 1467 /* 1468 * Initialize mutexes for this device. 1469 */ 1470 MUTEX_INIT(nxgep->genlock, NULL, 1471 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1472 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1473 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1474 MUTEX_INIT(&nxgep->mif_lock, NULL, 1475 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1476 MUTEX_INIT(&nxgep->group_lock, NULL, 1477 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1478 RW_INIT(&nxgep->filter_lock, NULL, 1479 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1480 1481 classify_ptr = &nxgep->classifier; 1482 /* 1483 * FFLP Mutexes are never used in interrupt context 1484 * as fflp operation can take very long time to 1485 * complete and hence not suitable to invoke from interrupt 1486 * handlers. 1487 */ 1488 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1489 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1490 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1491 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1492 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1493 for (partition = 0; partition < MAX_PARTITION; partition++) { 1494 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1495 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1496 } 1497 } 1498 1499 nxge_setup_mutexes_exit: 1500 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1501 "<== nxge_setup_mutexes status = %x", status)); 1502 1503 if (ddi_status != DDI_SUCCESS) 1504 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1505 1506 return (status); 1507 } 1508 1509 static void 1510 nxge_destroy_mutexes(p_nxge_t nxgep) 1511 { 1512 int partition; 1513 nxge_classify_t *classify_ptr; 1514 1515 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1516 RW_DESTROY(&nxgep->filter_lock); 1517 MUTEX_DESTROY(&nxgep->group_lock); 1518 MUTEX_DESTROY(&nxgep->mif_lock); 1519 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1520 MUTEX_DESTROY(nxgep->genlock); 1521 1522 classify_ptr = &nxgep->classifier; 1523 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1524 1525 /* Destroy all polling resources. */ 1526 MUTEX_DESTROY(&nxgep->poll_lock); 1527 cv_destroy(&nxgep->poll_cv); 1528 1529 /* free data structures, based on HW type */ 1530 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1531 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1532 for (partition = 0; partition < MAX_PARTITION; partition++) { 1533 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1534 } 1535 } 1536 1537 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1538 } 1539 1540 nxge_status_t 1541 nxge_init(p_nxge_t nxgep) 1542 { 1543 nxge_status_t status = NXGE_OK; 1544 1545 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1546 1547 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1548 return (status); 1549 } 1550 1551 /* 1552 * Allocate system memory for the receive/transmit buffer blocks 1553 * and receive/transmit descriptor rings. 1554 */ 1555 status = nxge_alloc_mem_pool(nxgep); 1556 if (status != NXGE_OK) { 1557 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1558 goto nxge_init_fail1; 1559 } 1560 1561 if (!isLDOMguest(nxgep)) { 1562 /* 1563 * Initialize and enable the TXC registers. 1564 * (Globally enable the Tx controller, 1565 * enable the port, configure the dma channel bitmap, 1566 * configure the max burst size). 1567 */ 1568 status = nxge_txc_init(nxgep); 1569 if (status != NXGE_OK) { 1570 NXGE_ERROR_MSG((nxgep, 1571 NXGE_ERR_CTL, "init txc failed\n")); 1572 goto nxge_init_fail2; 1573 } 1574 } 1575 1576 /* 1577 * Initialize and enable TXDMA channels. 1578 */ 1579 status = nxge_init_txdma_channels(nxgep); 1580 if (status != NXGE_OK) { 1581 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1582 goto nxge_init_fail3; 1583 } 1584 1585 /* 1586 * Initialize and enable RXDMA channels. 1587 */ 1588 status = nxge_init_rxdma_channels(nxgep); 1589 if (status != NXGE_OK) { 1590 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1591 goto nxge_init_fail4; 1592 } 1593 1594 /* 1595 * The guest domain is now done. 1596 */ 1597 if (isLDOMguest(nxgep)) { 1598 nxgep->drv_state |= STATE_HW_INITIALIZED; 1599 goto nxge_init_exit; 1600 } 1601 1602 /* 1603 * Initialize TCAM and FCRAM (Neptune). 1604 */ 1605 status = nxge_classify_init(nxgep); 1606 if (status != NXGE_OK) { 1607 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1608 goto nxge_init_fail5; 1609 } 1610 1611 /* 1612 * Initialize ZCP 1613 */ 1614 status = nxge_zcp_init(nxgep); 1615 if (status != NXGE_OK) { 1616 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1617 goto nxge_init_fail5; 1618 } 1619 1620 /* 1621 * Initialize IPP. 1622 */ 1623 status = nxge_ipp_init(nxgep); 1624 if (status != NXGE_OK) { 1625 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1626 goto nxge_init_fail5; 1627 } 1628 1629 /* 1630 * Initialize the MAC block. 1631 */ 1632 status = nxge_mac_init(nxgep); 1633 if (status != NXGE_OK) { 1634 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1635 goto nxge_init_fail5; 1636 } 1637 1638 nxge_intrs_enable(nxgep); /* XXX What changes do I need to make here? */ 1639 1640 /* 1641 * Enable hardware interrupts. 1642 */ 1643 nxge_intr_hw_enable(nxgep); 1644 nxgep->drv_state |= STATE_HW_INITIALIZED; 1645 1646 goto nxge_init_exit; 1647 1648 nxge_init_fail5: 1649 nxge_uninit_rxdma_channels(nxgep); 1650 nxge_init_fail4: 1651 nxge_uninit_txdma_channels(nxgep); 1652 nxge_init_fail3: 1653 if (!isLDOMguest(nxgep)) { 1654 (void) nxge_txc_uninit(nxgep); 1655 } 1656 nxge_init_fail2: 1657 nxge_free_mem_pool(nxgep); 1658 nxge_init_fail1: 1659 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1660 "<== nxge_init status (failed) = 0x%08x", status)); 1661 return (status); 1662 1663 nxge_init_exit: 1664 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1665 status)); 1666 return (status); 1667 } 1668 1669 1670 timeout_id_t 1671 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1672 { 1673 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1674 return (timeout(func, (caddr_t)nxgep, 1675 drv_usectohz(1000 * msec))); 1676 } 1677 return (NULL); 1678 } 1679 1680 /*ARGSUSED*/ 1681 void 1682 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1683 { 1684 if (timerid) { 1685 (void) untimeout(timerid); 1686 } 1687 } 1688 1689 void 1690 nxge_uninit(p_nxge_t nxgep) 1691 { 1692 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1693 1694 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1695 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1696 "==> nxge_uninit: not initialized")); 1697 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1698 "<== nxge_uninit")); 1699 return; 1700 } 1701 1702 /* stop timer */ 1703 if (nxgep->nxge_timerid) { 1704 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1705 nxgep->nxge_timerid = 0; 1706 } 1707 1708 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1709 (void) nxge_intr_hw_disable(nxgep); 1710 1711 /* 1712 * Reset the receive MAC side. 1713 */ 1714 (void) nxge_rx_mac_disable(nxgep); 1715 1716 /* Disable and soft reset the IPP */ 1717 if (!isLDOMguest(nxgep)) 1718 (void) nxge_ipp_disable(nxgep); 1719 1720 /* Free classification resources */ 1721 (void) nxge_classify_uninit(nxgep); 1722 1723 /* 1724 * Reset the transmit/receive DMA side. 1725 */ 1726 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1727 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1728 1729 nxge_uninit_txdma_channels(nxgep); 1730 nxge_uninit_rxdma_channels(nxgep); 1731 1732 /* 1733 * Reset the transmit MAC side. 1734 */ 1735 (void) nxge_tx_mac_disable(nxgep); 1736 1737 nxge_free_mem_pool(nxgep); 1738 1739 /* 1740 * Start the timer if the reset flag is not set. 1741 * If this reset flag is set, the link monitor 1742 * will not be started in order to stop furthur bus 1743 * activities coming from this interface. 1744 * The driver will start the monitor function 1745 * if the interface was initialized again later. 1746 */ 1747 if (!nxge_peu_reset_enable) { 1748 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1749 } 1750 1751 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1752 1753 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1754 "nxge_mblks_pending %d", nxge_mblks_pending)); 1755 } 1756 1757 void 1758 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1759 { 1760 uint64_t reg; 1761 uint64_t regdata; 1762 int i, retry; 1763 1764 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1765 regdata = 0; 1766 retry = 1; 1767 1768 for (i = 0; i < retry; i++) { 1769 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1770 } 1771 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1772 } 1773 1774 void 1775 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1776 { 1777 uint64_t reg; 1778 uint64_t buf[2]; 1779 1780 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1781 reg = buf[0]; 1782 1783 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1784 } 1785 1786 1787 nxge_os_mutex_t nxgedebuglock; 1788 int nxge_debug_init = 0; 1789 1790 /*ARGSUSED*/ 1791 /*VARARGS*/ 1792 void 1793 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1794 { 1795 char msg_buffer[1048]; 1796 char prefix_buffer[32]; 1797 int instance; 1798 uint64_t debug_level; 1799 int cmn_level = CE_CONT; 1800 va_list ap; 1801 1802 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1803 /* In case a developer has changed nxge_debug_level. */ 1804 if (nxgep->nxge_debug_level != nxge_debug_level) 1805 nxgep->nxge_debug_level = nxge_debug_level; 1806 } 1807 1808 debug_level = (nxgep == NULL) ? nxge_debug_level : 1809 nxgep->nxge_debug_level; 1810 1811 if ((level & debug_level) || 1812 (level == NXGE_NOTE) || 1813 (level == NXGE_ERR_CTL)) { 1814 /* do the msg processing */ 1815 if (nxge_debug_init == 0) { 1816 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1817 nxge_debug_init = 1; 1818 } 1819 1820 MUTEX_ENTER(&nxgedebuglock); 1821 1822 if ((level & NXGE_NOTE)) { 1823 cmn_level = CE_NOTE; 1824 } 1825 1826 if (level & NXGE_ERR_CTL) { 1827 cmn_level = CE_WARN; 1828 } 1829 1830 va_start(ap, fmt); 1831 (void) vsprintf(msg_buffer, fmt, ap); 1832 va_end(ap); 1833 if (nxgep == NULL) { 1834 instance = -1; 1835 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1836 } else { 1837 instance = nxgep->instance; 1838 (void) sprintf(prefix_buffer, 1839 "%s%d :", "nxge", instance); 1840 } 1841 1842 MUTEX_EXIT(&nxgedebuglock); 1843 cmn_err(cmn_level, "!%s %s\n", 1844 prefix_buffer, msg_buffer); 1845 1846 } 1847 } 1848 1849 char * 1850 nxge_dump_packet(char *addr, int size) 1851 { 1852 uchar_t *ap = (uchar_t *)addr; 1853 int i; 1854 static char etherbuf[1024]; 1855 char *cp = etherbuf; 1856 char digits[] = "0123456789abcdef"; 1857 1858 if (!size) 1859 size = 60; 1860 1861 if (size > MAX_DUMP_SZ) { 1862 /* Dump the leading bytes */ 1863 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1864 if (*ap > 0x0f) 1865 *cp++ = digits[*ap >> 4]; 1866 *cp++ = digits[*ap++ & 0xf]; 1867 *cp++ = ':'; 1868 } 1869 for (i = 0; i < 20; i++) 1870 *cp++ = '.'; 1871 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1872 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1873 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1874 if (*ap > 0x0f) 1875 *cp++ = digits[*ap >> 4]; 1876 *cp++ = digits[*ap++ & 0xf]; 1877 *cp++ = ':'; 1878 } 1879 } else { 1880 for (i = 0; i < size; i++) { 1881 if (*ap > 0x0f) 1882 *cp++ = digits[*ap >> 4]; 1883 *cp++ = digits[*ap++ & 0xf]; 1884 *cp++ = ':'; 1885 } 1886 } 1887 *--cp = 0; 1888 return (etherbuf); 1889 } 1890 1891 #ifdef NXGE_DEBUG 1892 static void 1893 nxge_test_map_regs(p_nxge_t nxgep) 1894 { 1895 ddi_acc_handle_t cfg_handle; 1896 p_pci_cfg_t cfg_ptr; 1897 ddi_acc_handle_t dev_handle; 1898 char *dev_ptr; 1899 ddi_acc_handle_t pci_config_handle; 1900 uint32_t regval; 1901 int i; 1902 1903 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1904 1905 dev_handle = nxgep->dev_regs->nxge_regh; 1906 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1907 1908 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1909 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1910 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1911 1912 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1913 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1914 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1915 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1916 &cfg_ptr->vendorid)); 1917 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1918 "\tvendorid 0x%x devid 0x%x", 1919 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1920 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1921 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1922 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1923 "bar1c 0x%x", 1924 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1925 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1926 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1927 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1928 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1929 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1930 "base 28 0x%x bar2c 0x%x\n", 1931 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1932 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1933 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1934 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1935 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1936 "\nNeptune PCI BAR: base30 0x%x\n", 1937 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1938 1939 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1940 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1941 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1942 "first 0x%llx second 0x%llx third 0x%llx " 1943 "last 0x%llx ", 1944 NXGE_PIO_READ64(dev_handle, 1945 (uint64_t *)(dev_ptr + 0), 0), 1946 NXGE_PIO_READ64(dev_handle, 1947 (uint64_t *)(dev_ptr + 8), 0), 1948 NXGE_PIO_READ64(dev_handle, 1949 (uint64_t *)(dev_ptr + 16), 0), 1950 NXGE_PIO_READ64(cfg_handle, 1951 (uint64_t *)(dev_ptr + 24), 0))); 1952 } 1953 } 1954 1955 #endif 1956 1957 static void 1958 nxge_suspend(p_nxge_t nxgep) 1959 { 1960 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1961 1962 nxge_intrs_disable(nxgep); 1963 nxge_destroy_dev(nxgep); 1964 1965 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1966 } 1967 1968 static nxge_status_t 1969 nxge_resume(p_nxge_t nxgep) 1970 { 1971 nxge_status_t status = NXGE_OK; 1972 1973 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1974 1975 nxgep->suspended = DDI_RESUME; 1976 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1977 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1978 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1979 (void) nxge_rx_mac_enable(nxgep); 1980 (void) nxge_tx_mac_enable(nxgep); 1981 nxge_intrs_enable(nxgep); 1982 nxgep->suspended = 0; 1983 1984 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1985 "<== nxge_resume status = 0x%x", status)); 1986 return (status); 1987 } 1988 1989 static nxge_status_t 1990 nxge_setup_dev(p_nxge_t nxgep) 1991 { 1992 nxge_status_t status = NXGE_OK; 1993 1994 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1995 nxgep->mac.portnum)); 1996 1997 status = nxge_link_init(nxgep); 1998 1999 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 2000 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2001 "port%d Bad register acc handle", nxgep->mac.portnum)); 2002 status = NXGE_ERROR; 2003 } 2004 2005 if (status != NXGE_OK) { 2006 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2007 " nxge_setup_dev status " 2008 "(xcvr init 0x%08x)", status)); 2009 goto nxge_setup_dev_exit; 2010 } 2011 2012 nxge_setup_dev_exit: 2013 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2014 "<== nxge_setup_dev port %d status = 0x%08x", 2015 nxgep->mac.portnum, status)); 2016 2017 return (status); 2018 } 2019 2020 static void 2021 nxge_destroy_dev(p_nxge_t nxgep) 2022 { 2023 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 2024 2025 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 2026 2027 (void) nxge_hw_stop(nxgep); 2028 2029 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 2030 } 2031 2032 static nxge_status_t 2033 nxge_setup_system_dma_pages(p_nxge_t nxgep) 2034 { 2035 int ddi_status = DDI_SUCCESS; 2036 uint_t count; 2037 ddi_dma_cookie_t cookie; 2038 uint_t iommu_pagesize; 2039 nxge_status_t status = NXGE_OK; 2040 2041 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 2042 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 2043 if (nxgep->niu_type != N2_NIU) { 2044 iommu_pagesize = dvma_pagesize(nxgep->dip); 2045 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2046 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2047 " default_block_size %d iommu_pagesize %d", 2048 nxgep->sys_page_sz, 2049 ddi_ptob(nxgep->dip, (ulong_t)1), 2050 nxgep->rx_default_block_size, 2051 iommu_pagesize)); 2052 2053 if (iommu_pagesize != 0) { 2054 if (nxgep->sys_page_sz == iommu_pagesize) { 2055 if (iommu_pagesize > 0x4000) 2056 nxgep->sys_page_sz = 0x4000; 2057 } else { 2058 if (nxgep->sys_page_sz > iommu_pagesize) 2059 nxgep->sys_page_sz = iommu_pagesize; 2060 } 2061 } 2062 } 2063 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2064 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2065 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2066 "default_block_size %d page mask %d", 2067 nxgep->sys_page_sz, 2068 ddi_ptob(nxgep->dip, (ulong_t)1), 2069 nxgep->rx_default_block_size, 2070 nxgep->sys_page_mask)); 2071 2072 2073 switch (nxgep->sys_page_sz) { 2074 default: 2075 nxgep->sys_page_sz = 0x1000; 2076 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2077 nxgep->rx_default_block_size = 0x1000; 2078 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2079 break; 2080 case 0x1000: 2081 nxgep->rx_default_block_size = 0x1000; 2082 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2083 break; 2084 case 0x2000: 2085 nxgep->rx_default_block_size = 0x2000; 2086 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2087 break; 2088 case 0x4000: 2089 nxgep->rx_default_block_size = 0x4000; 2090 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2091 break; 2092 case 0x8000: 2093 nxgep->rx_default_block_size = 0x8000; 2094 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2095 break; 2096 } 2097 2098 #ifndef USE_RX_BIG_BUF 2099 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2100 #else 2101 nxgep->rx_default_block_size = 0x2000; 2102 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2103 #endif 2104 /* 2105 * Get the system DMA burst size. 2106 */ 2107 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2108 DDI_DMA_DONTWAIT, 0, 2109 &nxgep->dmasparehandle); 2110 if (ddi_status != DDI_SUCCESS) { 2111 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2112 "ddi_dma_alloc_handle: failed " 2113 " status 0x%x", ddi_status)); 2114 goto nxge_get_soft_properties_exit; 2115 } 2116 2117 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2118 (caddr_t)nxgep->dmasparehandle, 2119 sizeof (nxgep->dmasparehandle), 2120 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2121 DDI_DMA_DONTWAIT, 0, 2122 &cookie, &count); 2123 if (ddi_status != DDI_DMA_MAPPED) { 2124 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2125 "Binding spare handle to find system" 2126 " burstsize failed.")); 2127 ddi_status = DDI_FAILURE; 2128 goto nxge_get_soft_properties_fail1; 2129 } 2130 2131 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2132 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2133 2134 nxge_get_soft_properties_fail1: 2135 ddi_dma_free_handle(&nxgep->dmasparehandle); 2136 2137 nxge_get_soft_properties_exit: 2138 2139 if (ddi_status != DDI_SUCCESS) 2140 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2141 2142 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2143 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2144 return (status); 2145 } 2146 2147 static nxge_status_t 2148 nxge_alloc_mem_pool(p_nxge_t nxgep) 2149 { 2150 nxge_status_t status = NXGE_OK; 2151 2152 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2153 2154 status = nxge_alloc_rx_mem_pool(nxgep); 2155 if (status != NXGE_OK) { 2156 return (NXGE_ERROR); 2157 } 2158 2159 status = nxge_alloc_tx_mem_pool(nxgep); 2160 if (status != NXGE_OK) { 2161 nxge_free_rx_mem_pool(nxgep); 2162 return (NXGE_ERROR); 2163 } 2164 2165 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2166 return (NXGE_OK); 2167 } 2168 2169 static void 2170 nxge_free_mem_pool(p_nxge_t nxgep) 2171 { 2172 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2173 2174 nxge_free_rx_mem_pool(nxgep); 2175 nxge_free_tx_mem_pool(nxgep); 2176 2177 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2178 } 2179 2180 nxge_status_t 2181 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2182 { 2183 uint32_t rdc_max; 2184 p_nxge_dma_pt_cfg_t p_all_cfgp; 2185 p_nxge_hw_pt_cfg_t p_cfgp; 2186 p_nxge_dma_pool_t dma_poolp; 2187 p_nxge_dma_common_t *dma_buf_p; 2188 p_nxge_dma_pool_t dma_cntl_poolp; 2189 p_nxge_dma_common_t *dma_cntl_p; 2190 uint32_t *num_chunks; /* per dma */ 2191 nxge_status_t status = NXGE_OK; 2192 2193 uint32_t nxge_port_rbr_size; 2194 uint32_t nxge_port_rbr_spare_size; 2195 uint32_t nxge_port_rcr_size; 2196 uint32_t rx_cntl_alloc_size; 2197 2198 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2199 2200 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2201 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2202 rdc_max = NXGE_MAX_RDCS; 2203 2204 /* 2205 * Allocate memory for the common DMA data structures. 2206 */ 2207 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2208 KM_SLEEP); 2209 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2210 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2211 2212 dma_cntl_poolp = (p_nxge_dma_pool_t) 2213 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2214 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2215 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2216 2217 num_chunks = (uint32_t *)KMEM_ZALLOC( 2218 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2219 2220 /* 2221 * Assume that each DMA channel will be configured with 2222 * the default block size. 2223 * rbr block counts are modulo the batch count (16). 2224 */ 2225 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2226 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2227 2228 if (!nxge_port_rbr_size) { 2229 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2230 } 2231 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2232 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2233 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2234 } 2235 2236 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2237 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2238 2239 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2240 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2241 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2242 } 2243 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2244 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2245 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2246 "set to default %d", 2247 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2248 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2249 } 2250 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2251 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2252 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2253 "set to default %d", 2254 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2255 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2256 } 2257 2258 /* 2259 * N2/NIU has limitation on the descriptor sizes (contiguous 2260 * memory allocation on data buffers to 4M (contig_mem_alloc) 2261 * and little endian for control buffers (must use the ddi/dki mem alloc 2262 * function). 2263 */ 2264 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2265 if (nxgep->niu_type == N2_NIU) { 2266 nxge_port_rbr_spare_size = 0; 2267 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2268 (!ISP2(nxge_port_rbr_size))) { 2269 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2270 } 2271 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2272 (!ISP2(nxge_port_rcr_size))) { 2273 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2274 } 2275 } 2276 #endif 2277 2278 /* 2279 * Addresses of receive block ring, receive completion ring and the 2280 * mailbox must be all cache-aligned (64 bytes). 2281 */ 2282 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2283 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2284 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2285 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2286 2287 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2288 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2289 "nxge_port_rcr_size = %d " 2290 "rx_cntl_alloc_size = %d", 2291 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2292 nxge_port_rcr_size, 2293 rx_cntl_alloc_size)); 2294 2295 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2296 if (nxgep->niu_type == N2_NIU) { 2297 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2298 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2299 2300 if (!ISP2(rx_buf_alloc_size)) { 2301 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2302 "==> nxge_alloc_rx_mem_pool: " 2303 " must be power of 2")); 2304 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2305 goto nxge_alloc_rx_mem_pool_exit; 2306 } 2307 2308 if (rx_buf_alloc_size > (1 << 22)) { 2309 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2310 "==> nxge_alloc_rx_mem_pool: " 2311 " limit size to 4M")); 2312 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2313 goto nxge_alloc_rx_mem_pool_exit; 2314 } 2315 2316 if (rx_cntl_alloc_size < 0x2000) { 2317 rx_cntl_alloc_size = 0x2000; 2318 } 2319 } 2320 #endif 2321 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2322 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2323 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2324 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2325 2326 dma_poolp->ndmas = p_cfgp->max_rdcs; 2327 dma_poolp->num_chunks = num_chunks; 2328 dma_poolp->buf_allocated = B_TRUE; 2329 nxgep->rx_buf_pool_p = dma_poolp; 2330 dma_poolp->dma_buf_pool_p = dma_buf_p; 2331 2332 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2333 dma_cntl_poolp->buf_allocated = B_TRUE; 2334 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2335 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2336 2337 /* Allocate the receive rings, too. */ 2338 nxgep->rx_rbr_rings = 2339 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2340 nxgep->rx_rbr_rings->rbr_rings = 2341 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2342 nxgep->rx_rcr_rings = 2343 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2344 nxgep->rx_rcr_rings->rcr_rings = 2345 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2346 nxgep->rx_mbox_areas_p = 2347 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2348 nxgep->rx_mbox_areas_p->rxmbox_areas = 2349 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2350 2351 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2352 p_cfgp->max_rdcs; 2353 2354 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2355 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2356 2357 nxge_alloc_rx_mem_pool_exit: 2358 return (status); 2359 } 2360 2361 /* 2362 * nxge_alloc_rxb 2363 * 2364 * Allocate buffers for an RDC. 2365 * 2366 * Arguments: 2367 * nxgep 2368 * channel The channel to map into our kernel space. 2369 * 2370 * Notes: 2371 * 2372 * NPI function calls: 2373 * 2374 * NXGE function calls: 2375 * 2376 * Registers accessed: 2377 * 2378 * Context: 2379 * 2380 * Taking apart: 2381 * 2382 * Open questions: 2383 * 2384 */ 2385 nxge_status_t 2386 nxge_alloc_rxb( 2387 p_nxge_t nxgep, 2388 int channel) 2389 { 2390 size_t rx_buf_alloc_size; 2391 nxge_status_t status = NXGE_OK; 2392 2393 nxge_dma_common_t **data; 2394 nxge_dma_common_t **control; 2395 uint32_t *num_chunks; 2396 2397 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2398 2399 /* 2400 * Allocate memory for the receive buffers and descriptor rings. 2401 * Replace these allocation functions with the interface functions 2402 * provided by the partition manager if/when they are available. 2403 */ 2404 2405 /* 2406 * Allocate memory for the receive buffer blocks. 2407 */ 2408 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2409 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2410 2411 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2412 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2413 2414 if ((status = nxge_alloc_rx_buf_dma( 2415 nxgep, channel, data, rx_buf_alloc_size, 2416 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2417 return (status); 2418 } 2419 2420 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2421 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2422 2423 /* 2424 * Allocate memory for descriptor rings and mailbox. 2425 */ 2426 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2427 2428 if ((status = nxge_alloc_rx_cntl_dma( 2429 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2430 != NXGE_OK) { 2431 nxge_free_rx_cntl_dma(nxgep, *control); 2432 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2433 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2434 return (status); 2435 } 2436 2437 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2438 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2439 2440 return (status); 2441 } 2442 2443 void 2444 nxge_free_rxb( 2445 p_nxge_t nxgep, 2446 int channel) 2447 { 2448 nxge_dma_common_t *data; 2449 nxge_dma_common_t *control; 2450 uint32_t num_chunks; 2451 2452 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2453 2454 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2455 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2456 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2457 2458 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2459 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2460 2461 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2462 nxge_free_rx_cntl_dma(nxgep, control); 2463 2464 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2465 2466 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2467 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2468 2469 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2470 } 2471 2472 static void 2473 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2474 { 2475 int rdc_max = NXGE_MAX_RDCS; 2476 2477 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2478 2479 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2480 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2481 "<== nxge_free_rx_mem_pool " 2482 "(null rx buf pool or buf not allocated")); 2483 return; 2484 } 2485 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2486 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2487 "<== nxge_free_rx_mem_pool " 2488 "(null rx cntl buf pool or cntl buf not allocated")); 2489 return; 2490 } 2491 2492 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2493 sizeof (p_nxge_dma_common_t) * rdc_max); 2494 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2495 2496 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2497 sizeof (uint32_t) * rdc_max); 2498 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2499 sizeof (p_nxge_dma_common_t) * rdc_max); 2500 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2501 2502 nxgep->rx_buf_pool_p = 0; 2503 nxgep->rx_cntl_pool_p = 0; 2504 2505 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2506 sizeof (p_rx_rbr_ring_t) * rdc_max); 2507 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2508 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2509 sizeof (p_rx_rcr_ring_t) * rdc_max); 2510 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2511 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2512 sizeof (p_rx_mbox_t) * rdc_max); 2513 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2514 2515 nxgep->rx_rbr_rings = 0; 2516 nxgep->rx_rcr_rings = 0; 2517 nxgep->rx_mbox_areas_p = 0; 2518 2519 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2520 } 2521 2522 2523 static nxge_status_t 2524 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2525 p_nxge_dma_common_t *dmap, 2526 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2527 { 2528 p_nxge_dma_common_t rx_dmap; 2529 nxge_status_t status = NXGE_OK; 2530 size_t total_alloc_size; 2531 size_t allocated = 0; 2532 int i, size_index, array_size; 2533 boolean_t use_kmem_alloc = B_FALSE; 2534 2535 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2536 2537 rx_dmap = (p_nxge_dma_common_t) 2538 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2539 KM_SLEEP); 2540 2541 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2542 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2543 dma_channel, alloc_size, block_size, dmap)); 2544 2545 total_alloc_size = alloc_size; 2546 2547 #if defined(RX_USE_RECLAIM_POST) 2548 total_alloc_size = alloc_size + alloc_size/4; 2549 #endif 2550 2551 i = 0; 2552 size_index = 0; 2553 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2554 while ((size_index < array_size) && 2555 (alloc_sizes[size_index] < alloc_size)) 2556 size_index++; 2557 if (size_index >= array_size) { 2558 size_index = array_size - 1; 2559 } 2560 2561 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2562 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2563 use_kmem_alloc = B_TRUE; 2564 #if defined(__i386) || defined(__amd64) 2565 size_index = 0; 2566 #endif 2567 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2568 "==> nxge_alloc_rx_buf_dma: " 2569 "Neptune use kmem_alloc() - size_index %d", 2570 size_index)); 2571 } 2572 2573 while ((allocated < total_alloc_size) && 2574 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2575 rx_dmap[i].dma_chunk_index = i; 2576 rx_dmap[i].block_size = block_size; 2577 rx_dmap[i].alength = alloc_sizes[size_index]; 2578 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2579 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2580 rx_dmap[i].dma_channel = dma_channel; 2581 rx_dmap[i].contig_alloc_type = B_FALSE; 2582 rx_dmap[i].kmem_alloc_type = B_FALSE; 2583 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2584 2585 /* 2586 * N2/NIU: data buffers must be contiguous as the driver 2587 * needs to call Hypervisor api to set up 2588 * logical pages. 2589 */ 2590 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2591 rx_dmap[i].contig_alloc_type = B_TRUE; 2592 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2593 } else if (use_kmem_alloc) { 2594 /* For Neptune, use kmem_alloc */ 2595 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2596 "==> nxge_alloc_rx_buf_dma: " 2597 "Neptune use kmem_alloc()")); 2598 rx_dmap[i].kmem_alloc_type = B_TRUE; 2599 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2600 } 2601 2602 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2603 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2604 "i %d nblocks %d alength %d", 2605 dma_channel, i, &rx_dmap[i], block_size, 2606 i, rx_dmap[i].nblocks, 2607 rx_dmap[i].alength)); 2608 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2609 &nxge_rx_dma_attr, 2610 rx_dmap[i].alength, 2611 &nxge_dev_buf_dma_acc_attr, 2612 DDI_DMA_READ | DDI_DMA_STREAMING, 2613 (p_nxge_dma_common_t)(&rx_dmap[i])); 2614 if (status != NXGE_OK) { 2615 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2616 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2617 "dma %d size_index %d size requested %d", 2618 dma_channel, 2619 size_index, 2620 rx_dmap[i].alength)); 2621 size_index--; 2622 } else { 2623 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2624 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2625 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2626 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2627 "buf_alloc_state %d alloc_type %d", 2628 dma_channel, 2629 &rx_dmap[i], 2630 rx_dmap[i].kaddrp, 2631 rx_dmap[i].alength, 2632 rx_dmap[i].buf_alloc_state, 2633 rx_dmap[i].buf_alloc_type)); 2634 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2635 " alloc_rx_buf_dma allocated rdc %d " 2636 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2637 dma_channel, i, rx_dmap[i].alength, 2638 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2639 rx_dmap[i].kaddrp)); 2640 i++; 2641 allocated += alloc_sizes[size_index]; 2642 } 2643 } 2644 2645 if (allocated < total_alloc_size) { 2646 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2647 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2648 "allocated 0x%x requested 0x%x", 2649 dma_channel, 2650 allocated, total_alloc_size)); 2651 status = NXGE_ERROR; 2652 goto nxge_alloc_rx_mem_fail1; 2653 } 2654 2655 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2656 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2657 "allocated 0x%x requested 0x%x", 2658 dma_channel, 2659 allocated, total_alloc_size)); 2660 2661 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2662 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2663 dma_channel, i)); 2664 *num_chunks = i; 2665 *dmap = rx_dmap; 2666 2667 goto nxge_alloc_rx_mem_exit; 2668 2669 nxge_alloc_rx_mem_fail1: 2670 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2671 2672 nxge_alloc_rx_mem_exit: 2673 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2674 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2675 2676 return (status); 2677 } 2678 2679 /*ARGSUSED*/ 2680 static void 2681 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2682 uint32_t num_chunks) 2683 { 2684 int i; 2685 2686 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2687 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2688 2689 if (dmap == 0) 2690 return; 2691 2692 for (i = 0; i < num_chunks; i++) { 2693 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2694 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2695 i, dmap)); 2696 nxge_dma_free_rx_data_buf(dmap++); 2697 } 2698 2699 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2700 } 2701 2702 /*ARGSUSED*/ 2703 static nxge_status_t 2704 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2705 p_nxge_dma_common_t *dmap, size_t size) 2706 { 2707 p_nxge_dma_common_t rx_dmap; 2708 nxge_status_t status = NXGE_OK; 2709 2710 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2711 2712 rx_dmap = (p_nxge_dma_common_t) 2713 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2714 2715 rx_dmap->contig_alloc_type = B_FALSE; 2716 rx_dmap->kmem_alloc_type = B_FALSE; 2717 2718 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2719 &nxge_desc_dma_attr, 2720 size, 2721 &nxge_dev_desc_dma_acc_attr, 2722 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2723 rx_dmap); 2724 if (status != NXGE_OK) { 2725 goto nxge_alloc_rx_cntl_dma_fail1; 2726 } 2727 2728 *dmap = rx_dmap; 2729 goto nxge_alloc_rx_cntl_dma_exit; 2730 2731 nxge_alloc_rx_cntl_dma_fail1: 2732 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2733 2734 nxge_alloc_rx_cntl_dma_exit: 2735 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2736 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2737 2738 return (status); 2739 } 2740 2741 /*ARGSUSED*/ 2742 static void 2743 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2744 { 2745 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2746 2747 if (dmap == 0) 2748 return; 2749 2750 nxge_dma_mem_free(dmap); 2751 2752 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2753 } 2754 2755 typedef struct { 2756 size_t tx_size; 2757 size_t cr_size; 2758 size_t threshhold; 2759 } nxge_tdc_sizes_t; 2760 2761 static 2762 nxge_status_t 2763 nxge_tdc_sizes( 2764 nxge_t *nxgep, 2765 nxge_tdc_sizes_t *sizes) 2766 { 2767 uint32_t threshhold; /* The bcopy() threshhold */ 2768 size_t tx_size; /* Transmit buffer size */ 2769 size_t cr_size; /* Completion ring size */ 2770 2771 /* 2772 * Assume that each DMA channel will be configured with the 2773 * default transmit buffer size for copying transmit data. 2774 * (If a packet is bigger than this, it will not be copied.) 2775 */ 2776 if (nxgep->niu_type == N2_NIU) { 2777 threshhold = TX_BCOPY_SIZE; 2778 } else { 2779 threshhold = nxge_bcopy_thresh; 2780 } 2781 tx_size = nxge_tx_ring_size * threshhold; 2782 2783 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2784 cr_size += sizeof (txdma_mailbox_t); 2785 2786 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2787 if (nxgep->niu_type == N2_NIU) { 2788 if (!ISP2(tx_size)) { 2789 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2790 "==> nxge_tdc_sizes: Tx size" 2791 " must be power of 2")); 2792 return (NXGE_ERROR); 2793 } 2794 2795 if (tx_size > (1 << 22)) { 2796 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2797 "==> nxge_tdc_sizes: Tx size" 2798 " limited to 4M")); 2799 return (NXGE_ERROR); 2800 } 2801 2802 if (cr_size < 0x2000) 2803 cr_size = 0x2000; 2804 } 2805 #endif 2806 2807 sizes->threshhold = threshhold; 2808 sizes->tx_size = tx_size; 2809 sizes->cr_size = cr_size; 2810 2811 return (NXGE_OK); 2812 } 2813 /* 2814 * nxge_alloc_txb 2815 * 2816 * Allocate buffers for an TDC. 2817 * 2818 * Arguments: 2819 * nxgep 2820 * channel The channel to map into our kernel space. 2821 * 2822 * Notes: 2823 * 2824 * NPI function calls: 2825 * 2826 * NXGE function calls: 2827 * 2828 * Registers accessed: 2829 * 2830 * Context: 2831 * 2832 * Taking apart: 2833 * 2834 * Open questions: 2835 * 2836 */ 2837 nxge_status_t 2838 nxge_alloc_txb( 2839 p_nxge_t nxgep, 2840 int channel) 2841 { 2842 nxge_dma_common_t **dma_buf_p; 2843 nxge_dma_common_t **dma_cntl_p; 2844 uint32_t *num_chunks; 2845 nxge_status_t status = NXGE_OK; 2846 2847 nxge_tdc_sizes_t sizes; 2848 2849 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2850 2851 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2852 return (NXGE_ERROR); 2853 2854 /* 2855 * Allocate memory for transmit buffers and descriptor rings. 2856 * Replace these allocation functions with the interface functions 2857 * provided by the partition manager Real Soon Now. 2858 */ 2859 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2860 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2861 2862 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2863 2864 /* 2865 * Allocate memory for transmit buffers and descriptor rings. 2866 * Replace allocation functions with interface functions provided 2867 * by the partition manager when it is available. 2868 * 2869 * Allocate memory for the transmit buffer pool. 2870 */ 2871 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2872 "sizes: tx: %ld, cr:%ld, th:%ld", 2873 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2874 2875 *num_chunks = 0; 2876 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2877 sizes.tx_size, sizes.threshhold, num_chunks); 2878 if (status != NXGE_OK) { 2879 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2880 return (status); 2881 } 2882 2883 /* 2884 * Allocate memory for descriptor rings and mailbox. 2885 */ 2886 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2887 sizes.cr_size); 2888 if (status != NXGE_OK) { 2889 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2890 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2891 return (status); 2892 } 2893 2894 return (NXGE_OK); 2895 } 2896 2897 void 2898 nxge_free_txb( 2899 p_nxge_t nxgep, 2900 int channel) 2901 { 2902 nxge_dma_common_t *data; 2903 nxge_dma_common_t *control; 2904 uint32_t num_chunks; 2905 2906 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2907 2908 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2909 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2910 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2911 2912 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2913 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2914 2915 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2916 nxge_free_tx_cntl_dma(nxgep, control); 2917 2918 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2919 2920 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2921 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2922 2923 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2924 } 2925 2926 /* 2927 * nxge_alloc_tx_mem_pool 2928 * 2929 * This function allocates all of the per-port TDC control data structures. 2930 * The per-channel (TDC) data structures are allocated when needed. 2931 * 2932 * Arguments: 2933 * nxgep 2934 * 2935 * Notes: 2936 * 2937 * Context: 2938 * Any domain 2939 */ 2940 nxge_status_t 2941 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2942 { 2943 nxge_hw_pt_cfg_t *p_cfgp; 2944 nxge_dma_pool_t *dma_poolp; 2945 nxge_dma_common_t **dma_buf_p; 2946 nxge_dma_pool_t *dma_cntl_poolp; 2947 nxge_dma_common_t **dma_cntl_p; 2948 uint32_t *num_chunks; /* per dma */ 2949 int tdc_max; 2950 2951 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2952 2953 p_cfgp = &nxgep->pt_config.hw_config; 2954 tdc_max = NXGE_MAX_TDCS; 2955 2956 /* 2957 * Allocate memory for each transmit DMA channel. 2958 */ 2959 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2960 KM_SLEEP); 2961 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2962 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2963 2964 dma_cntl_poolp = (p_nxge_dma_pool_t) 2965 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2966 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2967 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2968 2969 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2970 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2971 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2972 "set to default %d", 2973 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2974 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2975 } 2976 2977 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2978 /* 2979 * N2/NIU has limitation on the descriptor sizes (contiguous 2980 * memory allocation on data buffers to 4M (contig_mem_alloc) 2981 * and little endian for control buffers (must use the ddi/dki mem alloc 2982 * function). The transmit ring is limited to 8K (includes the 2983 * mailbox). 2984 */ 2985 if (nxgep->niu_type == N2_NIU) { 2986 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2987 (!ISP2(nxge_tx_ring_size))) { 2988 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2989 } 2990 } 2991 #endif 2992 2993 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2994 2995 num_chunks = (uint32_t *)KMEM_ZALLOC( 2996 sizeof (uint32_t) * tdc_max, KM_SLEEP); 2997 2998 dma_poolp->ndmas = p_cfgp->tdc.owned; 2999 dma_poolp->num_chunks = num_chunks; 3000 dma_poolp->dma_buf_pool_p = dma_buf_p; 3001 nxgep->tx_buf_pool_p = dma_poolp; 3002 3003 dma_poolp->buf_allocated = B_TRUE; 3004 3005 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 3006 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 3007 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 3008 3009 dma_cntl_poolp->buf_allocated = B_TRUE; 3010 3011 nxgep->tx_rings = 3012 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 3013 nxgep->tx_rings->rings = 3014 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 3015 nxgep->tx_mbox_areas_p = 3016 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 3017 nxgep->tx_mbox_areas_p->txmbox_areas_p = 3018 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 3019 3020 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 3021 3022 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3023 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 3024 tdc_max, dma_poolp->ndmas)); 3025 3026 return (NXGE_OK); 3027 } 3028 3029 nxge_status_t 3030 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 3031 p_nxge_dma_common_t *dmap, size_t alloc_size, 3032 size_t block_size, uint32_t *num_chunks) 3033 { 3034 p_nxge_dma_common_t tx_dmap; 3035 nxge_status_t status = NXGE_OK; 3036 size_t total_alloc_size; 3037 size_t allocated = 0; 3038 int i, size_index, array_size; 3039 3040 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 3041 3042 tx_dmap = (p_nxge_dma_common_t) 3043 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 3044 KM_SLEEP); 3045 3046 total_alloc_size = alloc_size; 3047 i = 0; 3048 size_index = 0; 3049 array_size = sizeof (alloc_sizes) / sizeof (size_t); 3050 while ((size_index < array_size) && 3051 (alloc_sizes[size_index] < alloc_size)) 3052 size_index++; 3053 if (size_index >= array_size) { 3054 size_index = array_size - 1; 3055 } 3056 3057 while ((allocated < total_alloc_size) && 3058 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 3059 3060 tx_dmap[i].dma_chunk_index = i; 3061 tx_dmap[i].block_size = block_size; 3062 tx_dmap[i].alength = alloc_sizes[size_index]; 3063 tx_dmap[i].orig_alength = tx_dmap[i].alength; 3064 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 3065 tx_dmap[i].dma_channel = dma_channel; 3066 tx_dmap[i].contig_alloc_type = B_FALSE; 3067 tx_dmap[i].kmem_alloc_type = B_FALSE; 3068 3069 /* 3070 * N2/NIU: data buffers must be contiguous as the driver 3071 * needs to call Hypervisor api to set up 3072 * logical pages. 3073 */ 3074 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 3075 tx_dmap[i].contig_alloc_type = B_TRUE; 3076 } 3077 3078 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3079 &nxge_tx_dma_attr, 3080 tx_dmap[i].alength, 3081 &nxge_dev_buf_dma_acc_attr, 3082 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3083 (p_nxge_dma_common_t)(&tx_dmap[i])); 3084 if (status != NXGE_OK) { 3085 size_index--; 3086 } else { 3087 i++; 3088 allocated += alloc_sizes[size_index]; 3089 } 3090 } 3091 3092 if (allocated < total_alloc_size) { 3093 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3094 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3095 "allocated 0x%x requested 0x%x", 3096 dma_channel, 3097 allocated, total_alloc_size)); 3098 status = NXGE_ERROR; 3099 goto nxge_alloc_tx_mem_fail1; 3100 } 3101 3102 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3103 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3104 "allocated 0x%x requested 0x%x", 3105 dma_channel, 3106 allocated, total_alloc_size)); 3107 3108 *num_chunks = i; 3109 *dmap = tx_dmap; 3110 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3111 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3112 *dmap, i)); 3113 goto nxge_alloc_tx_mem_exit; 3114 3115 nxge_alloc_tx_mem_fail1: 3116 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3117 3118 nxge_alloc_tx_mem_exit: 3119 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3120 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3121 3122 return (status); 3123 } 3124 3125 /*ARGSUSED*/ 3126 static void 3127 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3128 uint32_t num_chunks) 3129 { 3130 int i; 3131 3132 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3133 3134 if (dmap == 0) 3135 return; 3136 3137 for (i = 0; i < num_chunks; i++) { 3138 nxge_dma_mem_free(dmap++); 3139 } 3140 3141 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3142 } 3143 3144 /*ARGSUSED*/ 3145 nxge_status_t 3146 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3147 p_nxge_dma_common_t *dmap, size_t size) 3148 { 3149 p_nxge_dma_common_t tx_dmap; 3150 nxge_status_t status = NXGE_OK; 3151 3152 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3153 tx_dmap = (p_nxge_dma_common_t) 3154 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3155 3156 tx_dmap->contig_alloc_type = B_FALSE; 3157 tx_dmap->kmem_alloc_type = B_FALSE; 3158 3159 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3160 &nxge_desc_dma_attr, 3161 size, 3162 &nxge_dev_desc_dma_acc_attr, 3163 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3164 tx_dmap); 3165 if (status != NXGE_OK) { 3166 goto nxge_alloc_tx_cntl_dma_fail1; 3167 } 3168 3169 *dmap = tx_dmap; 3170 goto nxge_alloc_tx_cntl_dma_exit; 3171 3172 nxge_alloc_tx_cntl_dma_fail1: 3173 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3174 3175 nxge_alloc_tx_cntl_dma_exit: 3176 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3177 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3178 3179 return (status); 3180 } 3181 3182 /*ARGSUSED*/ 3183 static void 3184 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3185 { 3186 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3187 3188 if (dmap == 0) 3189 return; 3190 3191 nxge_dma_mem_free(dmap); 3192 3193 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3194 } 3195 3196 /* 3197 * nxge_free_tx_mem_pool 3198 * 3199 * This function frees all of the per-port TDC control data structures. 3200 * The per-channel (TDC) data structures are freed when the channel 3201 * is stopped. 3202 * 3203 * Arguments: 3204 * nxgep 3205 * 3206 * Notes: 3207 * 3208 * Context: 3209 * Any domain 3210 */ 3211 static void 3212 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3213 { 3214 int tdc_max = NXGE_MAX_TDCS; 3215 3216 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3217 3218 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3219 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3220 "<== nxge_free_tx_mem_pool " 3221 "(null tx buf pool or buf not allocated")); 3222 return; 3223 } 3224 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3225 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3226 "<== nxge_free_tx_mem_pool " 3227 "(null tx cntl buf pool or cntl buf not allocated")); 3228 return; 3229 } 3230 3231 /* 1. Free the mailboxes. */ 3232 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3233 sizeof (p_tx_mbox_t) * tdc_max); 3234 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3235 3236 nxgep->tx_mbox_areas_p = 0; 3237 3238 /* 2. Free the transmit ring arrays. */ 3239 KMEM_FREE(nxgep->tx_rings->rings, 3240 sizeof (p_tx_ring_t) * tdc_max); 3241 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3242 3243 nxgep->tx_rings = 0; 3244 3245 /* 3. Free the completion ring data structures. */ 3246 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3247 sizeof (p_nxge_dma_common_t) * tdc_max); 3248 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3249 3250 nxgep->tx_cntl_pool_p = 0; 3251 3252 /* 4. Free the data ring data structures. */ 3253 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3254 sizeof (uint32_t) * tdc_max); 3255 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3256 sizeof (p_nxge_dma_common_t) * tdc_max); 3257 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3258 3259 nxgep->tx_buf_pool_p = 0; 3260 3261 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3262 } 3263 3264 /*ARGSUSED*/ 3265 static nxge_status_t 3266 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3267 struct ddi_dma_attr *dma_attrp, 3268 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3269 p_nxge_dma_common_t dma_p) 3270 { 3271 caddr_t kaddrp; 3272 int ddi_status = DDI_SUCCESS; 3273 boolean_t contig_alloc_type; 3274 boolean_t kmem_alloc_type; 3275 3276 contig_alloc_type = dma_p->contig_alloc_type; 3277 3278 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3279 /* 3280 * contig_alloc_type for contiguous memory only allowed 3281 * for N2/NIU. 3282 */ 3283 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3284 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3285 dma_p->contig_alloc_type)); 3286 return (NXGE_ERROR | NXGE_DDI_FAILED); 3287 } 3288 3289 dma_p->dma_handle = NULL; 3290 dma_p->acc_handle = NULL; 3291 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3292 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3293 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3294 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3295 if (ddi_status != DDI_SUCCESS) { 3296 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3297 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3298 return (NXGE_ERROR | NXGE_DDI_FAILED); 3299 } 3300 3301 kmem_alloc_type = dma_p->kmem_alloc_type; 3302 3303 switch (contig_alloc_type) { 3304 case B_FALSE: 3305 switch (kmem_alloc_type) { 3306 case B_FALSE: 3307 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3308 length, 3309 acc_attr_p, 3310 xfer_flags, 3311 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3312 &dma_p->acc_handle); 3313 if (ddi_status != DDI_SUCCESS) { 3314 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3315 "nxge_dma_mem_alloc: " 3316 "ddi_dma_mem_alloc failed")); 3317 ddi_dma_free_handle(&dma_p->dma_handle); 3318 dma_p->dma_handle = NULL; 3319 return (NXGE_ERROR | NXGE_DDI_FAILED); 3320 } 3321 if (dma_p->alength < length) { 3322 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3323 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3324 "< length.")); 3325 ddi_dma_mem_free(&dma_p->acc_handle); 3326 ddi_dma_free_handle(&dma_p->dma_handle); 3327 dma_p->acc_handle = NULL; 3328 dma_p->dma_handle = NULL; 3329 return (NXGE_ERROR); 3330 } 3331 3332 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3333 NULL, 3334 kaddrp, dma_p->alength, xfer_flags, 3335 DDI_DMA_DONTWAIT, 3336 0, &dma_p->dma_cookie, &dma_p->ncookies); 3337 if (ddi_status != DDI_DMA_MAPPED) { 3338 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3339 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3340 "failed " 3341 "(staus 0x%x ncookies %d.)", ddi_status, 3342 dma_p->ncookies)); 3343 if (dma_p->acc_handle) { 3344 ddi_dma_mem_free(&dma_p->acc_handle); 3345 dma_p->acc_handle = NULL; 3346 } 3347 ddi_dma_free_handle(&dma_p->dma_handle); 3348 dma_p->dma_handle = NULL; 3349 return (NXGE_ERROR | NXGE_DDI_FAILED); 3350 } 3351 3352 if (dma_p->ncookies != 1) { 3353 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3354 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3355 "> 1 cookie" 3356 "(staus 0x%x ncookies %d.)", ddi_status, 3357 dma_p->ncookies)); 3358 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3359 if (dma_p->acc_handle) { 3360 ddi_dma_mem_free(&dma_p->acc_handle); 3361 dma_p->acc_handle = NULL; 3362 } 3363 ddi_dma_free_handle(&dma_p->dma_handle); 3364 dma_p->dma_handle = NULL; 3365 dma_p->acc_handle = NULL; 3366 return (NXGE_ERROR); 3367 } 3368 break; 3369 3370 case B_TRUE: 3371 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3372 if (kaddrp == NULL) { 3373 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3374 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3375 "kmem alloc failed")); 3376 return (NXGE_ERROR); 3377 } 3378 3379 dma_p->alength = length; 3380 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3381 NULL, kaddrp, dma_p->alength, xfer_flags, 3382 DDI_DMA_DONTWAIT, 0, 3383 &dma_p->dma_cookie, &dma_p->ncookies); 3384 if (ddi_status != DDI_DMA_MAPPED) { 3385 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3386 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3387 "(kmem_alloc) failed kaddrp $%p length %d " 3388 "(staus 0x%x (%d) ncookies %d.)", 3389 kaddrp, length, 3390 ddi_status, ddi_status, dma_p->ncookies)); 3391 KMEM_FREE(kaddrp, length); 3392 dma_p->acc_handle = NULL; 3393 ddi_dma_free_handle(&dma_p->dma_handle); 3394 dma_p->dma_handle = NULL; 3395 dma_p->kaddrp = NULL; 3396 return (NXGE_ERROR | NXGE_DDI_FAILED); 3397 } 3398 3399 if (dma_p->ncookies != 1) { 3400 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3401 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3402 "(kmem_alloc) > 1 cookie" 3403 "(staus 0x%x ncookies %d.)", ddi_status, 3404 dma_p->ncookies)); 3405 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3406 KMEM_FREE(kaddrp, length); 3407 ddi_dma_free_handle(&dma_p->dma_handle); 3408 dma_p->dma_handle = NULL; 3409 dma_p->acc_handle = NULL; 3410 dma_p->kaddrp = NULL; 3411 return (NXGE_ERROR); 3412 } 3413 3414 dma_p->kaddrp = kaddrp; 3415 3416 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3417 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3418 "kaddr $%p alength %d", 3419 dma_p, 3420 kaddrp, 3421 dma_p->alength)); 3422 break; 3423 } 3424 break; 3425 3426 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3427 case B_TRUE: 3428 kaddrp = (caddr_t)contig_mem_alloc(length); 3429 if (kaddrp == NULL) { 3430 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3431 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3432 ddi_dma_free_handle(&dma_p->dma_handle); 3433 return (NXGE_ERROR | NXGE_DDI_FAILED); 3434 } 3435 3436 dma_p->alength = length; 3437 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3438 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3439 &dma_p->dma_cookie, &dma_p->ncookies); 3440 if (ddi_status != DDI_DMA_MAPPED) { 3441 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3442 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3443 "(status 0x%x ncookies %d.)", ddi_status, 3444 dma_p->ncookies)); 3445 3446 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3447 "==> nxge_dma_mem_alloc: (not mapped)" 3448 "length %lu (0x%x) " 3449 "free contig kaddrp $%p " 3450 "va_to_pa $%p", 3451 length, length, 3452 kaddrp, 3453 va_to_pa(kaddrp))); 3454 3455 3456 contig_mem_free((void *)kaddrp, length); 3457 ddi_dma_free_handle(&dma_p->dma_handle); 3458 3459 dma_p->dma_handle = NULL; 3460 dma_p->acc_handle = NULL; 3461 dma_p->alength = NULL; 3462 dma_p->kaddrp = NULL; 3463 3464 return (NXGE_ERROR | NXGE_DDI_FAILED); 3465 } 3466 3467 if (dma_p->ncookies != 1 || 3468 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3469 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3470 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3471 "cookie or " 3472 "dmac_laddress is NULL $%p size %d " 3473 " (status 0x%x ncookies %d.)", 3474 ddi_status, 3475 dma_p->dma_cookie.dmac_laddress, 3476 dma_p->dma_cookie.dmac_size, 3477 dma_p->ncookies)); 3478 3479 contig_mem_free((void *)kaddrp, length); 3480 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3481 ddi_dma_free_handle(&dma_p->dma_handle); 3482 3483 dma_p->alength = 0; 3484 dma_p->dma_handle = NULL; 3485 dma_p->acc_handle = NULL; 3486 dma_p->kaddrp = NULL; 3487 3488 return (NXGE_ERROR | NXGE_DDI_FAILED); 3489 } 3490 break; 3491 3492 #else 3493 case B_TRUE: 3494 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3495 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3496 return (NXGE_ERROR | NXGE_DDI_FAILED); 3497 #endif 3498 } 3499 3500 dma_p->kaddrp = kaddrp; 3501 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3502 dma_p->alength - RXBUF_64B_ALIGNED; 3503 #if defined(__i386) 3504 dma_p->ioaddr_pp = 3505 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3506 #else 3507 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3508 #endif 3509 dma_p->last_ioaddr_pp = 3510 #if defined(__i386) 3511 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3512 #else 3513 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3514 #endif 3515 dma_p->alength - RXBUF_64B_ALIGNED; 3516 3517 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3518 3519 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3520 dma_p->orig_ioaddr_pp = 3521 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3522 dma_p->orig_alength = length; 3523 dma_p->orig_kaddrp = kaddrp; 3524 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3525 #endif 3526 3527 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3528 "dma buffer allocated: dma_p $%p " 3529 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3530 "dma_p->ioaddr_p $%p " 3531 "dma_p->orig_ioaddr_p $%p " 3532 "orig_vatopa $%p " 3533 "alength %d (0x%x) " 3534 "kaddrp $%p " 3535 "length %d (0x%x)", 3536 dma_p, 3537 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3538 dma_p->ioaddr_pp, 3539 dma_p->orig_ioaddr_pp, 3540 dma_p->orig_vatopa, 3541 dma_p->alength, dma_p->alength, 3542 kaddrp, 3543 length, length)); 3544 3545 return (NXGE_OK); 3546 } 3547 3548 static void 3549 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3550 { 3551 if (dma_p->dma_handle != NULL) { 3552 if (dma_p->ncookies) { 3553 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3554 dma_p->ncookies = 0; 3555 } 3556 ddi_dma_free_handle(&dma_p->dma_handle); 3557 dma_p->dma_handle = NULL; 3558 } 3559 3560 if (dma_p->acc_handle != NULL) { 3561 ddi_dma_mem_free(&dma_p->acc_handle); 3562 dma_p->acc_handle = NULL; 3563 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3564 } 3565 3566 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3567 if (dma_p->contig_alloc_type && 3568 dma_p->orig_kaddrp && dma_p->orig_alength) { 3569 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3570 "kaddrp $%p (orig_kaddrp $%p)" 3571 "mem type %d ", 3572 "orig_alength %d " 3573 "alength 0x%x (%d)", 3574 dma_p->kaddrp, 3575 dma_p->orig_kaddrp, 3576 dma_p->contig_alloc_type, 3577 dma_p->orig_alength, 3578 dma_p->alength, dma_p->alength)); 3579 3580 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3581 dma_p->orig_alength = NULL; 3582 dma_p->orig_kaddrp = NULL; 3583 dma_p->contig_alloc_type = B_FALSE; 3584 } 3585 #endif 3586 dma_p->kaddrp = NULL; 3587 dma_p->alength = NULL; 3588 } 3589 3590 static void 3591 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3592 { 3593 uint64_t kaddr; 3594 uint32_t buf_size; 3595 3596 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3597 3598 if (dma_p->dma_handle != NULL) { 3599 if (dma_p->ncookies) { 3600 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3601 dma_p->ncookies = 0; 3602 } 3603 ddi_dma_free_handle(&dma_p->dma_handle); 3604 dma_p->dma_handle = NULL; 3605 } 3606 3607 if (dma_p->acc_handle != NULL) { 3608 ddi_dma_mem_free(&dma_p->acc_handle); 3609 dma_p->acc_handle = NULL; 3610 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3611 } 3612 3613 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3614 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3615 dma_p, 3616 dma_p->buf_alloc_state)); 3617 3618 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3619 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3620 "<== nxge_dma_free_rx_data_buf: " 3621 "outstanding data buffers")); 3622 return; 3623 } 3624 3625 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3626 if (dma_p->contig_alloc_type && 3627 dma_p->orig_kaddrp && dma_p->orig_alength) { 3628 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3629 "kaddrp $%p (orig_kaddrp $%p)" 3630 "mem type %d ", 3631 "orig_alength %d " 3632 "alength 0x%x (%d)", 3633 dma_p->kaddrp, 3634 dma_p->orig_kaddrp, 3635 dma_p->contig_alloc_type, 3636 dma_p->orig_alength, 3637 dma_p->alength, dma_p->alength)); 3638 3639 kaddr = (uint64_t)dma_p->orig_kaddrp; 3640 buf_size = dma_p->orig_alength; 3641 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3642 dma_p->orig_alength = NULL; 3643 dma_p->orig_kaddrp = NULL; 3644 dma_p->contig_alloc_type = B_FALSE; 3645 dma_p->kaddrp = NULL; 3646 dma_p->alength = NULL; 3647 return; 3648 } 3649 #endif 3650 3651 if (dma_p->kmem_alloc_type) { 3652 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3653 "nxge_dma_free_rx_data_buf: free kmem " 3654 "kaddrp $%p (orig_kaddrp $%p)" 3655 "alloc type %d " 3656 "orig_alength %d " 3657 "alength 0x%x (%d)", 3658 dma_p->kaddrp, 3659 dma_p->orig_kaddrp, 3660 dma_p->kmem_alloc_type, 3661 dma_p->orig_alength, 3662 dma_p->alength, dma_p->alength)); 3663 #if defined(__i386) 3664 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3665 #else 3666 kaddr = (uint64_t)dma_p->kaddrp; 3667 #endif 3668 buf_size = dma_p->orig_alength; 3669 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3670 "nxge_dma_free_rx_data_buf: free dmap $%p " 3671 "kaddr $%p buf_size %d", 3672 dma_p, 3673 kaddr, buf_size)); 3674 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3675 dma_p->alength = 0; 3676 dma_p->orig_alength = 0; 3677 dma_p->kaddrp = NULL; 3678 dma_p->kmem_alloc_type = B_FALSE; 3679 } 3680 3681 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3682 } 3683 3684 /* 3685 * nxge_m_start() -- start transmitting and receiving. 3686 * 3687 * This function is called by the MAC layer when the first 3688 * stream is open to prepare the hardware ready for sending 3689 * and transmitting packets. 3690 */ 3691 static int 3692 nxge_m_start(void *arg) 3693 { 3694 p_nxge_t nxgep = (p_nxge_t)arg; 3695 3696 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3697 3698 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) { 3699 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 3700 } 3701 3702 MUTEX_ENTER(nxgep->genlock); 3703 if (nxge_init(nxgep) != NXGE_OK) { 3704 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3705 "<== nxge_m_start: initialization failed")); 3706 MUTEX_EXIT(nxgep->genlock); 3707 return (EIO); 3708 } 3709 3710 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 3711 goto nxge_m_start_exit; 3712 /* 3713 * Start timer to check the system error and tx hangs 3714 */ 3715 if (!isLDOMguest(nxgep)) 3716 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3717 nxge_check_hw_state, NXGE_CHECK_TIMER); 3718 #if defined(sun4v) 3719 else 3720 nxge_hio_start_timer(nxgep); 3721 #endif 3722 3723 nxgep->link_notify = B_TRUE; 3724 3725 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3726 3727 nxge_m_start_exit: 3728 MUTEX_EXIT(nxgep->genlock); 3729 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3730 return (0); 3731 } 3732 3733 3734 static boolean_t 3735 nxge_check_groups_stopped(p_nxge_t nxgep) 3736 { 3737 int i; 3738 3739 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 3740 if (nxgep->rx_hio_groups[i].started) 3741 return (B_FALSE); 3742 } 3743 3744 return (B_TRUE); 3745 } 3746 3747 /* 3748 * nxge_m_stop(): stop transmitting and receiving. 3749 */ 3750 static void 3751 nxge_m_stop(void *arg) 3752 { 3753 p_nxge_t nxgep = (p_nxge_t)arg; 3754 boolean_t groups_stopped; 3755 3756 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3757 3758 groups_stopped = nxge_check_groups_stopped(nxgep); 3759 #ifdef later 3760 ASSERT(groups_stopped == B_FALSE); 3761 #endif 3762 3763 if (!groups_stopped) { 3764 cmn_err(CE_WARN, "nxge(%d): groups are not stopped!\n", 3765 nxgep->instance); 3766 return; 3767 } 3768 3769 MUTEX_ENTER(nxgep->genlock); 3770 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3771 3772 if (nxgep->nxge_timerid) { 3773 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3774 nxgep->nxge_timerid = 0; 3775 } 3776 3777 nxge_uninit(nxgep); 3778 3779 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3780 3781 MUTEX_EXIT(nxgep->genlock); 3782 3783 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3784 } 3785 3786 static int 3787 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3788 { 3789 p_nxge_t nxgep = (p_nxge_t)arg; 3790 struct ether_addr addrp; 3791 3792 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3793 "==> nxge_m_multicst: add %d", add)); 3794 3795 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3796 if (add) { 3797 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3798 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3799 "<== nxge_m_multicst: add multicast failed")); 3800 return (EINVAL); 3801 } 3802 } else { 3803 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3804 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3805 "<== nxge_m_multicst: del multicast failed")); 3806 return (EINVAL); 3807 } 3808 } 3809 3810 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3811 3812 return (0); 3813 } 3814 3815 static int 3816 nxge_m_promisc(void *arg, boolean_t on) 3817 { 3818 p_nxge_t nxgep = (p_nxge_t)arg; 3819 3820 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3821 "==> nxge_m_promisc: on %d", on)); 3822 3823 if (nxge_set_promisc(nxgep, on)) { 3824 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3825 "<== nxge_m_promisc: set promisc failed")); 3826 return (EINVAL); 3827 } 3828 3829 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3830 "<== nxge_m_promisc: on %d", on)); 3831 3832 return (0); 3833 } 3834 3835 static void 3836 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3837 { 3838 p_nxge_t nxgep = (p_nxge_t)arg; 3839 struct iocblk *iocp; 3840 boolean_t need_privilege; 3841 int err; 3842 int cmd; 3843 3844 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3845 3846 iocp = (struct iocblk *)mp->b_rptr; 3847 iocp->ioc_error = 0; 3848 need_privilege = B_TRUE; 3849 cmd = iocp->ioc_cmd; 3850 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3851 switch (cmd) { 3852 default: 3853 miocnak(wq, mp, 0, EINVAL); 3854 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3855 return; 3856 3857 case LB_GET_INFO_SIZE: 3858 case LB_GET_INFO: 3859 case LB_GET_MODE: 3860 need_privilege = B_FALSE; 3861 break; 3862 case LB_SET_MODE: 3863 break; 3864 3865 3866 case NXGE_GET_MII: 3867 case NXGE_PUT_MII: 3868 case NXGE_GET64: 3869 case NXGE_PUT64: 3870 case NXGE_GET_TX_RING_SZ: 3871 case NXGE_GET_TX_DESC: 3872 case NXGE_TX_SIDE_RESET: 3873 case NXGE_RX_SIDE_RESET: 3874 case NXGE_GLOBAL_RESET: 3875 case NXGE_RESET_MAC: 3876 case NXGE_TX_REGS_DUMP: 3877 case NXGE_RX_REGS_DUMP: 3878 case NXGE_INT_REGS_DUMP: 3879 case NXGE_VIR_INT_REGS_DUMP: 3880 case NXGE_PUT_TCAM: 3881 case NXGE_GET_TCAM: 3882 case NXGE_RTRACE: 3883 case NXGE_RDUMP: 3884 3885 need_privilege = B_FALSE; 3886 break; 3887 case NXGE_INJECT_ERR: 3888 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3889 nxge_err_inject(nxgep, wq, mp); 3890 break; 3891 } 3892 3893 if (need_privilege) { 3894 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3895 if (err != 0) { 3896 miocnak(wq, mp, 0, err); 3897 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3898 "<== nxge_m_ioctl: no priv")); 3899 return; 3900 } 3901 } 3902 3903 switch (cmd) { 3904 3905 case LB_GET_MODE: 3906 case LB_SET_MODE: 3907 case LB_GET_INFO_SIZE: 3908 case LB_GET_INFO: 3909 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3910 break; 3911 3912 case NXGE_GET_MII: 3913 case NXGE_PUT_MII: 3914 case NXGE_PUT_TCAM: 3915 case NXGE_GET_TCAM: 3916 case NXGE_GET64: 3917 case NXGE_PUT64: 3918 case NXGE_GET_TX_RING_SZ: 3919 case NXGE_GET_TX_DESC: 3920 case NXGE_TX_SIDE_RESET: 3921 case NXGE_RX_SIDE_RESET: 3922 case NXGE_GLOBAL_RESET: 3923 case NXGE_RESET_MAC: 3924 case NXGE_TX_REGS_DUMP: 3925 case NXGE_RX_REGS_DUMP: 3926 case NXGE_INT_REGS_DUMP: 3927 case NXGE_VIR_INT_REGS_DUMP: 3928 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3929 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3930 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3931 break; 3932 } 3933 3934 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3935 } 3936 3937 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3938 3939 void 3940 nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory) 3941 { 3942 p_nxge_mmac_stats_t mmac_stats; 3943 int i; 3944 nxge_mmac_t *mmac_info; 3945 3946 mmac_info = &nxgep->nxge_mmac_info; 3947 3948 mmac_stats = &nxgep->statsp->mmac_stats; 3949 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3950 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3951 3952 for (i = 0; i < ETHERADDRL; i++) { 3953 if (factory) { 3954 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3955 = mmac_info->factory_mac_pool[slot][ 3956 (ETHERADDRL-1) - i]; 3957 } else { 3958 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3959 = mmac_info->mac_pool[slot].addr[ 3960 (ETHERADDRL - 1) - i]; 3961 } 3962 } 3963 } 3964 3965 /* 3966 * nxge_altmac_set() -- Set an alternate MAC address 3967 */ 3968 static int 3969 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, int slot, 3970 int rdctbl, boolean_t usetbl) 3971 { 3972 uint8_t addrn; 3973 uint8_t portn; 3974 npi_mac_addr_t altmac; 3975 hostinfo_t mac_rdc; 3976 p_nxge_class_pt_cfg_t clscfgp; 3977 3978 3979 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3980 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3981 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3982 3983 portn = nxgep->mac.portnum; 3984 addrn = (uint8_t)slot - 1; 3985 3986 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, 3987 nxgep->function_num, addrn, &altmac) != NPI_SUCCESS) 3988 return (EIO); 3989 3990 /* 3991 * Set the rdc table number for the host info entry 3992 * for this mac address slot. 3993 */ 3994 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 3995 mac_rdc.value = 0; 3996 if (usetbl) 3997 mac_rdc.bits.w0.rdc_tbl_num = rdctbl; 3998 else 3999 mac_rdc.bits.w0.rdc_tbl_num = 4000 clscfgp->mac_host_info[addrn].rdctbl; 4001 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 4002 4003 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 4004 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 4005 return (EIO); 4006 } 4007 4008 /* 4009 * Enable comparison with the alternate MAC address. 4010 * While the first alternate addr is enabled by bit 1 of register 4011 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 4012 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 4013 * accordingly before calling npi_mac_altaddr_entry. 4014 */ 4015 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4016 addrn = (uint8_t)slot - 1; 4017 else 4018 addrn = (uint8_t)slot; 4019 4020 if (npi_mac_altaddr_enable(nxgep->npi_handle, 4021 nxgep->function_num, addrn) != NPI_SUCCESS) { 4022 return (EIO); 4023 } 4024 4025 return (0); 4026 } 4027 4028 /* 4029 * nxeg_m_mmac_add_g() - find an unused address slot, set the address 4030 * value to the one specified, enable the port to start filtering on 4031 * the new MAC address. Returns 0 on success. 4032 */ 4033 int 4034 nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl, 4035 boolean_t usetbl) 4036 { 4037 p_nxge_t nxgep = arg; 4038 int slot; 4039 nxge_mmac_t *mmac_info; 4040 int err; 4041 nxge_status_t status; 4042 4043 mutex_enter(nxgep->genlock); 4044 4045 /* 4046 * Make sure that nxge is initialized, if _start() has 4047 * not been called. 4048 */ 4049 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4050 status = nxge_init(nxgep); 4051 if (status != NXGE_OK) { 4052 mutex_exit(nxgep->genlock); 4053 return (ENXIO); 4054 } 4055 } 4056 4057 mmac_info = &nxgep->nxge_mmac_info; 4058 if (mmac_info->naddrfree == 0) { 4059 mutex_exit(nxgep->genlock); 4060 return (ENOSPC); 4061 } 4062 4063 /* 4064 * Search for the first available slot. Because naddrfree 4065 * is not zero, we are guaranteed to find one. 4066 * Each of the first two ports of Neptune has 16 alternate 4067 * MAC slots but only the first 7 (of 15) slots have assigned factory 4068 * MAC addresses. We first search among the slots without bundled 4069 * factory MACs. If we fail to find one in that range, then we 4070 * search the slots with bundled factory MACs. A factory MAC 4071 * will be wasted while the slot is used with a user MAC address. 4072 * But the slot could be used by factory MAC again after calling 4073 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4074 */ 4075 for (slot = 0; slot <= mmac_info->num_mmac; slot++) { 4076 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4077 break; 4078 } 4079 4080 ASSERT(slot <= mmac_info->num_mmac); 4081 4082 if ((err = nxge_altmac_set(nxgep, (uint8_t *)maddr, slot, rdctbl, 4083 usetbl)) != 0) { 4084 mutex_exit(nxgep->genlock); 4085 return (err); 4086 } 4087 4088 bcopy(maddr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4089 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4090 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4091 mmac_info->naddrfree--; 4092 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4093 4094 mutex_exit(nxgep->genlock); 4095 return (0); 4096 } 4097 4098 /* 4099 * Remove the specified mac address and update the HW not to filter 4100 * the mac address anymore. 4101 */ 4102 int 4103 nxge_m_mmac_remove(void *arg, int slot) 4104 { 4105 p_nxge_t nxgep = arg; 4106 nxge_mmac_t *mmac_info; 4107 uint8_t addrn; 4108 uint8_t portn; 4109 int err = 0; 4110 nxge_status_t status; 4111 4112 mutex_enter(nxgep->genlock); 4113 4114 /* 4115 * Make sure that nxge is initialized, if _start() has 4116 * not been called. 4117 */ 4118 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4119 status = nxge_init(nxgep); 4120 if (status != NXGE_OK) { 4121 mutex_exit(nxgep->genlock); 4122 return (ENXIO); 4123 } 4124 } 4125 4126 mmac_info = &nxgep->nxge_mmac_info; 4127 if (slot < 1 || slot > mmac_info->num_mmac) { 4128 mutex_exit(nxgep->genlock); 4129 return (EINVAL); 4130 } 4131 4132 portn = nxgep->mac.portnum; 4133 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4134 addrn = (uint8_t)slot - 1; 4135 else 4136 addrn = (uint8_t)slot; 4137 4138 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4139 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4140 == NPI_SUCCESS) { 4141 mmac_info->naddrfree++; 4142 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4143 /* 4144 * Regardless if the MAC we just stopped filtering 4145 * is a user addr or a facory addr, we must set 4146 * the MMAC_VENDOR_ADDR flag if this slot has an 4147 * associated factory MAC to indicate that a factory 4148 * MAC is available. 4149 */ 4150 if (slot <= mmac_info->num_factory_mmac) { 4151 mmac_info->mac_pool[slot].flags 4152 |= MMAC_VENDOR_ADDR; 4153 } 4154 /* 4155 * Clear mac_pool[slot].addr so that kstat shows 0 4156 * alternate MAC address if the slot is not used. 4157 * (But nxge_m_mmac_get returns the factory MAC even 4158 * when the slot is not used!) 4159 */ 4160 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4161 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4162 } else { 4163 err = EIO; 4164 } 4165 } else { 4166 err = EINVAL; 4167 } 4168 4169 mutex_exit(nxgep->genlock); 4170 return (err); 4171 } 4172 4173 /* 4174 * The callback to query all the factory addresses. naddr must be the same as 4175 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and 4176 * mcm_addr is the space allocated for keep all the addresses, whose size is 4177 * naddr * MAXMACADDRLEN. 4178 */ 4179 static void 4180 nxge_m_getfactaddr(void *arg, uint_t naddr, uint8_t *addr) 4181 { 4182 nxge_t *nxgep = arg; 4183 nxge_mmac_t *mmac_info; 4184 int i; 4185 4186 mutex_enter(nxgep->genlock); 4187 4188 mmac_info = &nxgep->nxge_mmac_info; 4189 ASSERT(naddr == mmac_info->num_factory_mmac); 4190 4191 for (i = 0; i < naddr; i++) { 4192 bcopy(mmac_info->factory_mac_pool[i + 1], 4193 addr + i * MAXMACADDRLEN, ETHERADDRL); 4194 } 4195 4196 mutex_exit(nxgep->genlock); 4197 } 4198 4199 4200 static boolean_t 4201 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4202 { 4203 nxge_t *nxgep = arg; 4204 uint32_t *txflags = cap_data; 4205 4206 switch (cap) { 4207 case MAC_CAPAB_HCKSUM: 4208 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4209 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4210 if (nxge_cksum_offload <= 1) { 4211 *txflags = HCKSUM_INET_PARTIAL; 4212 } 4213 break; 4214 4215 case MAC_CAPAB_MULTIFACTADDR: { 4216 mac_capab_multifactaddr_t *mfacp = cap_data; 4217 4218 mutex_enter(nxgep->genlock); 4219 mfacp->mcm_naddr = nxgep->nxge_mmac_info.num_factory_mmac; 4220 mfacp->mcm_getaddr = nxge_m_getfactaddr; 4221 mutex_exit(nxgep->genlock); 4222 break; 4223 } 4224 4225 case MAC_CAPAB_LSO: { 4226 mac_capab_lso_t *cap_lso = cap_data; 4227 4228 if (nxgep->soft_lso_enable) { 4229 if (nxge_cksum_offload <= 1) { 4230 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4231 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4232 nxge_lso_max = NXGE_LSO_MAXLEN; 4233 } 4234 cap_lso->lso_basic_tcp_ipv4.lso_max = 4235 nxge_lso_max; 4236 } 4237 break; 4238 } else { 4239 return (B_FALSE); 4240 } 4241 } 4242 4243 case MAC_CAPAB_RINGS: { 4244 mac_capab_rings_t *cap_rings = cap_data; 4245 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 4246 4247 mutex_enter(nxgep->genlock); 4248 if (cap_rings->mr_type == MAC_RING_TYPE_RX) { 4249 cap_rings->mr_group_type = MAC_GROUP_TYPE_DYNAMIC; 4250 cap_rings->mr_rnum = p_cfgp->max_rdcs; 4251 cap_rings->mr_rget = nxge_fill_ring; 4252 cap_rings->mr_gnum = p_cfgp->max_rdc_grpids; 4253 cap_rings->mr_gget = nxge_hio_group_get; 4254 cap_rings->mr_gaddring = nxge_group_add_ring; 4255 cap_rings->mr_gremring = nxge_group_rem_ring; 4256 4257 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4258 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]", 4259 p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids)); 4260 } else { 4261 cap_rings->mr_group_type = MAC_GROUP_TYPE_DYNAMIC; 4262 cap_rings->mr_rnum = p_cfgp->tdc.count; 4263 cap_rings->mr_rget = nxge_fill_ring; 4264 if (isLDOMservice(nxgep)) { 4265 /* share capable */ 4266 /* Do not report the default ring: hence -1 */ 4267 cap_rings->mr_gnum = 4268 NXGE_MAX_TDC_GROUPS / nxgep->nports - 1; 4269 } else { 4270 cap_rings->mr_gnum = 0; 4271 } 4272 4273 cap_rings->mr_gget = nxge_hio_group_get; 4274 cap_rings->mr_gaddring = nxge_group_add_ring; 4275 cap_rings->mr_gremring = nxge_group_rem_ring; 4276 4277 NXGE_DEBUG_MSG((nxgep, TX_CTL, 4278 "==> nxge_m_getcapab: tx rings # of rings %d", 4279 p_cfgp->tdc.count)); 4280 } 4281 mutex_exit(nxgep->genlock); 4282 break; 4283 } 4284 4285 #if defined(sun4v) 4286 case MAC_CAPAB_SHARES: { 4287 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4288 4289 /* 4290 * Only the service domain driver responds to 4291 * this capability request. 4292 */ 4293 mutex_enter(nxgep->genlock); 4294 if (isLDOMservice(nxgep)) { 4295 mshares->ms_snum = 3; 4296 mshares->ms_handle = (void *)nxgep; 4297 mshares->ms_salloc = nxge_hio_share_alloc; 4298 mshares->ms_sfree = nxge_hio_share_free; 4299 mshares->ms_sadd = nxge_hio_share_add_group; 4300 mshares->ms_sremove = nxge_hio_share_rem_group; 4301 mshares->ms_squery = nxge_hio_share_query; 4302 mshares->ms_sbind = nxge_hio_share_bind; 4303 mshares->ms_sunbind = nxge_hio_share_unbind; 4304 mutex_exit(nxgep->genlock); 4305 } else { 4306 mutex_exit(nxgep->genlock); 4307 return (B_FALSE); 4308 } 4309 break; 4310 } 4311 #endif 4312 default: 4313 return (B_FALSE); 4314 } 4315 return (B_TRUE); 4316 } 4317 4318 static boolean_t 4319 nxge_param_locked(mac_prop_id_t pr_num) 4320 { 4321 /* 4322 * All adv_* parameters are locked (read-only) while 4323 * the device is in any sort of loopback mode ... 4324 */ 4325 switch (pr_num) { 4326 case MAC_PROP_ADV_1000FDX_CAP: 4327 case MAC_PROP_EN_1000FDX_CAP: 4328 case MAC_PROP_ADV_1000HDX_CAP: 4329 case MAC_PROP_EN_1000HDX_CAP: 4330 case MAC_PROP_ADV_100FDX_CAP: 4331 case MAC_PROP_EN_100FDX_CAP: 4332 case MAC_PROP_ADV_100HDX_CAP: 4333 case MAC_PROP_EN_100HDX_CAP: 4334 case MAC_PROP_ADV_10FDX_CAP: 4335 case MAC_PROP_EN_10FDX_CAP: 4336 case MAC_PROP_ADV_10HDX_CAP: 4337 case MAC_PROP_EN_10HDX_CAP: 4338 case MAC_PROP_AUTONEG: 4339 case MAC_PROP_FLOWCTRL: 4340 return (B_TRUE); 4341 } 4342 return (B_FALSE); 4343 } 4344 4345 /* 4346 * callback functions for set/get of properties 4347 */ 4348 static int 4349 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4350 uint_t pr_valsize, const void *pr_val) 4351 { 4352 nxge_t *nxgep = barg; 4353 p_nxge_param_t param_arr; 4354 p_nxge_stats_t statsp; 4355 int err = 0; 4356 uint8_t val; 4357 uint32_t cur_mtu, new_mtu, old_framesize; 4358 link_flowctrl_t fl; 4359 4360 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4361 param_arr = nxgep->param_arr; 4362 statsp = nxgep->statsp; 4363 mutex_enter(nxgep->genlock); 4364 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4365 nxge_param_locked(pr_num)) { 4366 /* 4367 * All adv_* parameters are locked (read-only) 4368 * while the device is in any sort of loopback mode. 4369 */ 4370 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4371 "==> nxge_m_setprop: loopback mode: read only")); 4372 mutex_exit(nxgep->genlock); 4373 return (EBUSY); 4374 } 4375 4376 val = *(uint8_t *)pr_val; 4377 switch (pr_num) { 4378 case MAC_PROP_EN_1000FDX_CAP: 4379 nxgep->param_en_1000fdx = val; 4380 param_arr[param_anar_1000fdx].value = val; 4381 4382 goto reprogram; 4383 4384 case MAC_PROP_EN_100FDX_CAP: 4385 nxgep->param_en_100fdx = val; 4386 param_arr[param_anar_100fdx].value = val; 4387 4388 goto reprogram; 4389 4390 case MAC_PROP_EN_10FDX_CAP: 4391 nxgep->param_en_10fdx = val; 4392 param_arr[param_anar_10fdx].value = val; 4393 4394 goto reprogram; 4395 4396 case MAC_PROP_EN_1000HDX_CAP: 4397 case MAC_PROP_EN_100HDX_CAP: 4398 case MAC_PROP_EN_10HDX_CAP: 4399 case MAC_PROP_ADV_1000FDX_CAP: 4400 case MAC_PROP_ADV_1000HDX_CAP: 4401 case MAC_PROP_ADV_100FDX_CAP: 4402 case MAC_PROP_ADV_100HDX_CAP: 4403 case MAC_PROP_ADV_10FDX_CAP: 4404 case MAC_PROP_ADV_10HDX_CAP: 4405 case MAC_PROP_STATUS: 4406 case MAC_PROP_SPEED: 4407 case MAC_PROP_DUPLEX: 4408 err = EINVAL; /* cannot set read-only properties */ 4409 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4410 "==> nxge_m_setprop: read only property %d", 4411 pr_num)); 4412 break; 4413 4414 case MAC_PROP_AUTONEG: 4415 param_arr[param_autoneg].value = val; 4416 4417 goto reprogram; 4418 4419 case MAC_PROP_MTU: 4420 cur_mtu = nxgep->mac.default_mtu; 4421 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4422 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4423 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4424 new_mtu, nxgep->mac.is_jumbo)); 4425 4426 if (new_mtu == cur_mtu) { 4427 err = 0; 4428 break; 4429 } 4430 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4431 err = EBUSY; 4432 break; 4433 } 4434 if (new_mtu < NXGE_DEFAULT_MTU || 4435 new_mtu > NXGE_MAXIMUM_MTU) { 4436 err = EINVAL; 4437 break; 4438 } 4439 4440 if ((new_mtu > NXGE_DEFAULT_MTU) && 4441 !nxgep->mac.is_jumbo) { 4442 err = EINVAL; 4443 break; 4444 } 4445 4446 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4447 nxgep->mac.maxframesize = (uint16_t) 4448 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4449 if (nxge_mac_set_framesize(nxgep)) { 4450 nxgep->mac.maxframesize = 4451 (uint16_t)old_framesize; 4452 err = EINVAL; 4453 break; 4454 } 4455 4456 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4457 if (err) { 4458 nxgep->mac.maxframesize = 4459 (uint16_t)old_framesize; 4460 err = EINVAL; 4461 break; 4462 } 4463 4464 nxgep->mac.default_mtu = new_mtu; 4465 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4466 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4467 new_mtu, nxgep->mac.maxframesize)); 4468 break; 4469 4470 case MAC_PROP_FLOWCTRL: 4471 bcopy(pr_val, &fl, sizeof (fl)); 4472 switch (fl) { 4473 default: 4474 err = EINVAL; 4475 break; 4476 4477 case LINK_FLOWCTRL_NONE: 4478 param_arr[param_anar_pause].value = 0; 4479 break; 4480 4481 case LINK_FLOWCTRL_RX: 4482 param_arr[param_anar_pause].value = 1; 4483 break; 4484 4485 case LINK_FLOWCTRL_TX: 4486 case LINK_FLOWCTRL_BI: 4487 err = EINVAL; 4488 break; 4489 } 4490 4491 reprogram: 4492 if (err == 0) { 4493 if (!nxge_param_link_update(nxgep)) { 4494 err = EINVAL; 4495 } 4496 } 4497 break; 4498 case MAC_PROP_PRIVATE: 4499 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4500 "==> nxge_m_setprop: private property")); 4501 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4502 pr_val); 4503 break; 4504 4505 default: 4506 err = ENOTSUP; 4507 break; 4508 } 4509 4510 mutex_exit(nxgep->genlock); 4511 4512 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4513 "<== nxge_m_setprop (return %d)", err)); 4514 return (err); 4515 } 4516 4517 static int 4518 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4519 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 4520 { 4521 nxge_t *nxgep = barg; 4522 p_nxge_param_t param_arr = nxgep->param_arr; 4523 p_nxge_stats_t statsp = nxgep->statsp; 4524 int err = 0; 4525 link_flowctrl_t fl; 4526 uint64_t tmp = 0; 4527 link_state_t ls; 4528 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4529 4530 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4531 "==> nxge_m_getprop: pr_num %d", pr_num)); 4532 4533 if (pr_valsize == 0) 4534 return (EINVAL); 4535 4536 *perm = MAC_PROP_PERM_RW; 4537 4538 if ((is_default) && (pr_num != MAC_PROP_PRIVATE)) { 4539 err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4540 return (err); 4541 } 4542 4543 bzero(pr_val, pr_valsize); 4544 switch (pr_num) { 4545 case MAC_PROP_DUPLEX: 4546 *perm = MAC_PROP_PERM_READ; 4547 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4548 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4549 "==> nxge_m_getprop: duplex mode %d", 4550 *(uint8_t *)pr_val)); 4551 break; 4552 4553 case MAC_PROP_SPEED: 4554 if (pr_valsize < sizeof (uint64_t)) 4555 return (EINVAL); 4556 *perm = MAC_PROP_PERM_READ; 4557 tmp = statsp->mac_stats.link_speed * 1000000ull; 4558 bcopy(&tmp, pr_val, sizeof (tmp)); 4559 break; 4560 4561 case MAC_PROP_STATUS: 4562 if (pr_valsize < sizeof (link_state_t)) 4563 return (EINVAL); 4564 *perm = MAC_PROP_PERM_READ; 4565 if (!statsp->mac_stats.link_up) 4566 ls = LINK_STATE_DOWN; 4567 else 4568 ls = LINK_STATE_UP; 4569 bcopy(&ls, pr_val, sizeof (ls)); 4570 break; 4571 4572 case MAC_PROP_AUTONEG: 4573 *(uint8_t *)pr_val = 4574 param_arr[param_autoneg].value; 4575 break; 4576 4577 case MAC_PROP_FLOWCTRL: 4578 if (pr_valsize < sizeof (link_flowctrl_t)) 4579 return (EINVAL); 4580 4581 fl = LINK_FLOWCTRL_NONE; 4582 if (param_arr[param_anar_pause].value) { 4583 fl = LINK_FLOWCTRL_RX; 4584 } 4585 bcopy(&fl, pr_val, sizeof (fl)); 4586 break; 4587 4588 case MAC_PROP_ADV_1000FDX_CAP: 4589 *perm = MAC_PROP_PERM_READ; 4590 *(uint8_t *)pr_val = 4591 param_arr[param_anar_1000fdx].value; 4592 break; 4593 4594 case MAC_PROP_EN_1000FDX_CAP: 4595 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4596 break; 4597 4598 case MAC_PROP_ADV_100FDX_CAP: 4599 *perm = MAC_PROP_PERM_READ; 4600 *(uint8_t *)pr_val = 4601 param_arr[param_anar_100fdx].value; 4602 break; 4603 4604 case MAC_PROP_EN_100FDX_CAP: 4605 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4606 break; 4607 4608 case MAC_PROP_ADV_10FDX_CAP: 4609 *perm = MAC_PROP_PERM_READ; 4610 *(uint8_t *)pr_val = 4611 param_arr[param_anar_10fdx].value; 4612 break; 4613 4614 case MAC_PROP_EN_10FDX_CAP: 4615 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4616 break; 4617 4618 case MAC_PROP_EN_1000HDX_CAP: 4619 case MAC_PROP_EN_100HDX_CAP: 4620 case MAC_PROP_EN_10HDX_CAP: 4621 case MAC_PROP_ADV_1000HDX_CAP: 4622 case MAC_PROP_ADV_100HDX_CAP: 4623 case MAC_PROP_ADV_10HDX_CAP: 4624 err = ENOTSUP; 4625 break; 4626 4627 case MAC_PROP_PRIVATE: 4628 err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4629 pr_valsize, pr_val, perm); 4630 break; 4631 default: 4632 err = EINVAL; 4633 break; 4634 } 4635 4636 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4637 4638 return (err); 4639 } 4640 4641 /* ARGSUSED */ 4642 static int 4643 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4644 const void *pr_val) 4645 { 4646 p_nxge_param_t param_arr = nxgep->param_arr; 4647 int err = 0; 4648 long result; 4649 4650 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4651 "==> nxge_set_priv_prop: name %s", pr_name)); 4652 4653 if (strcmp(pr_name, "_accept_jumbo") == 0) { 4654 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4655 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4656 "<== nxge_set_priv_prop: name %s " 4657 "pr_val %s result %d " 4658 "param %d is_jumbo %d", 4659 pr_name, pr_val, result, 4660 param_arr[param_accept_jumbo].value, 4661 nxgep->mac.is_jumbo)); 4662 4663 if (result > 1 || result < 0) { 4664 err = EINVAL; 4665 } else { 4666 if (nxgep->mac.is_jumbo == 4667 (uint32_t)result) { 4668 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4669 "no change (%d %d)", 4670 nxgep->mac.is_jumbo, 4671 result)); 4672 return (0); 4673 } 4674 } 4675 4676 param_arr[param_accept_jumbo].value = result; 4677 nxgep->mac.is_jumbo = B_FALSE; 4678 if (result) { 4679 nxgep->mac.is_jumbo = B_TRUE; 4680 } 4681 4682 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4683 "<== nxge_set_priv_prop: name %s (value %d) is_jumbo %d", 4684 pr_name, result, nxgep->mac.is_jumbo)); 4685 4686 return (err); 4687 } 4688 4689 /* Blanking */ 4690 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4691 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4692 (char *)pr_val, 4693 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4694 if (err) { 4695 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4696 "<== nxge_set_priv_prop: " 4697 "unable to set (%s)", pr_name)); 4698 err = EINVAL; 4699 } else { 4700 err = 0; 4701 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4702 "<== nxge_set_priv_prop: " 4703 "set (%s)", pr_name)); 4704 } 4705 4706 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4707 "<== nxge_set_priv_prop: name %s (value %d)", 4708 pr_name, result)); 4709 4710 return (err); 4711 } 4712 4713 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4714 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4715 (char *)pr_val, 4716 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4717 if (err) { 4718 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4719 "<== nxge_set_priv_prop: " 4720 "unable to set (%s)", pr_name)); 4721 err = EINVAL; 4722 } else { 4723 err = 0; 4724 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4725 "<== nxge_set_priv_prop: " 4726 "set (%s)", pr_name)); 4727 } 4728 4729 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4730 "<== nxge_set_priv_prop: name %s (value %d)", 4731 pr_name, result)); 4732 4733 return (err); 4734 } 4735 4736 /* Classification */ 4737 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4738 if (pr_val == NULL) { 4739 err = EINVAL; 4740 return (err); 4741 } 4742 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4743 4744 err = nxge_param_set_ip_opt(nxgep, NULL, 4745 NULL, (char *)pr_val, 4746 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4747 4748 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4749 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4750 pr_name, result)); 4751 4752 return (err); 4753 } 4754 4755 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 4756 if (pr_val == NULL) { 4757 err = EINVAL; 4758 return (err); 4759 } 4760 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4761 4762 err = nxge_param_set_ip_opt(nxgep, NULL, 4763 NULL, (char *)pr_val, 4764 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 4765 4766 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4767 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4768 pr_name, result)); 4769 4770 return (err); 4771 } 4772 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 4773 if (pr_val == NULL) { 4774 err = EINVAL; 4775 return (err); 4776 } 4777 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4778 4779 err = nxge_param_set_ip_opt(nxgep, NULL, 4780 NULL, (char *)pr_val, 4781 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 4782 4783 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4784 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4785 pr_name, result)); 4786 4787 return (err); 4788 } 4789 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 4790 if (pr_val == NULL) { 4791 err = EINVAL; 4792 return (err); 4793 } 4794 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4795 4796 err = nxge_param_set_ip_opt(nxgep, NULL, 4797 NULL, (char *)pr_val, 4798 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 4799 4800 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4801 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4802 pr_name, result)); 4803 4804 return (err); 4805 } 4806 4807 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 4808 if (pr_val == NULL) { 4809 err = EINVAL; 4810 return (err); 4811 } 4812 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4813 4814 err = nxge_param_set_ip_opt(nxgep, NULL, 4815 NULL, (char *)pr_val, 4816 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 4817 4818 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4819 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4820 pr_name, result)); 4821 4822 return (err); 4823 } 4824 4825 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 4826 if (pr_val == NULL) { 4827 err = EINVAL; 4828 return (err); 4829 } 4830 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4831 4832 err = nxge_param_set_ip_opt(nxgep, NULL, 4833 NULL, (char *)pr_val, 4834 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 4835 4836 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4837 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4838 pr_name, result)); 4839 4840 return (err); 4841 } 4842 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 4843 if (pr_val == NULL) { 4844 err = EINVAL; 4845 return (err); 4846 } 4847 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4848 4849 err = nxge_param_set_ip_opt(nxgep, NULL, 4850 NULL, (char *)pr_val, 4851 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 4852 4853 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4854 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4855 pr_name, result)); 4856 4857 return (err); 4858 } 4859 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 4860 if (pr_val == NULL) { 4861 err = EINVAL; 4862 return (err); 4863 } 4864 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4865 4866 err = nxge_param_set_ip_opt(nxgep, NULL, 4867 NULL, (char *)pr_val, 4868 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 4869 4870 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4871 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4872 pr_name, result)); 4873 4874 return (err); 4875 } 4876 4877 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 4878 if (pr_val == NULL) { 4879 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4880 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 4881 err = EINVAL; 4882 return (err); 4883 } 4884 4885 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4886 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4887 "<== nxge_set_priv_prop: name %s " 4888 "(lso %d pr_val %s value %d)", 4889 pr_name, nxgep->soft_lso_enable, pr_val, result)); 4890 4891 if (result > 1 || result < 0) { 4892 err = EINVAL; 4893 } else { 4894 if (nxgep->soft_lso_enable == (uint32_t)result) { 4895 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4896 "no change (%d %d)", 4897 nxgep->soft_lso_enable, result)); 4898 return (0); 4899 } 4900 } 4901 4902 nxgep->soft_lso_enable = (int)result; 4903 4904 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4905 "<== nxge_set_priv_prop: name %s (value %d)", 4906 pr_name, result)); 4907 4908 return (err); 4909 } 4910 /* 4911 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the 4912 * following code to be executed. 4913 */ 4914 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 4915 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 4916 (caddr_t)¶m_arr[param_anar_10gfdx]); 4917 return (err); 4918 } 4919 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 4920 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 4921 (caddr_t)¶m_arr[param_anar_pause]); 4922 return (err); 4923 } 4924 4925 return (EINVAL); 4926 } 4927 4928 static int 4929 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 4930 uint_t pr_valsize, void *pr_val, uint_t *perm) 4931 { 4932 p_nxge_param_t param_arr = nxgep->param_arr; 4933 char valstr[MAXNAMELEN]; 4934 int err = EINVAL; 4935 uint_t strsize; 4936 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4937 4938 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4939 "==> nxge_get_priv_prop: property %s", pr_name)); 4940 4941 /* function number */ 4942 if (strcmp(pr_name, "_function_number") == 0) { 4943 if (is_default) 4944 return (ENOTSUP); 4945 *perm = MAC_PROP_PERM_READ; 4946 (void) snprintf(valstr, sizeof (valstr), "%d", 4947 nxgep->function_num); 4948 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4949 "==> nxge_get_priv_prop: name %s " 4950 "(value %d valstr %s)", 4951 pr_name, nxgep->function_num, valstr)); 4952 4953 err = 0; 4954 goto done; 4955 } 4956 4957 /* Neptune firmware version */ 4958 if (strcmp(pr_name, "_fw_version") == 0) { 4959 if (is_default) 4960 return (ENOTSUP); 4961 *perm = MAC_PROP_PERM_READ; 4962 (void) snprintf(valstr, sizeof (valstr), "%s", 4963 nxgep->vpd_info.ver); 4964 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4965 "==> nxge_get_priv_prop: name %s " 4966 "(value %d valstr %s)", 4967 pr_name, nxgep->vpd_info.ver, valstr)); 4968 4969 err = 0; 4970 goto done; 4971 } 4972 4973 /* port PHY mode */ 4974 if (strcmp(pr_name, "_port_mode") == 0) { 4975 if (is_default) 4976 return (ENOTSUP); 4977 *perm = MAC_PROP_PERM_READ; 4978 switch (nxgep->mac.portmode) { 4979 case PORT_1G_COPPER: 4980 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 4981 nxgep->hot_swappable_phy ? 4982 "[Hot Swappable]" : ""); 4983 break; 4984 case PORT_1G_FIBER: 4985 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 4986 nxgep->hot_swappable_phy ? 4987 "[hot swappable]" : ""); 4988 break; 4989 case PORT_10G_COPPER: 4990 (void) snprintf(valstr, sizeof (valstr), 4991 "10G copper %s", 4992 nxgep->hot_swappable_phy ? 4993 "[hot swappable]" : ""); 4994 break; 4995 case PORT_10G_FIBER: 4996 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 4997 nxgep->hot_swappable_phy ? 4998 "[hot swappable]" : ""); 4999 break; 5000 case PORT_10G_SERDES: 5001 (void) snprintf(valstr, sizeof (valstr), 5002 "10G serdes %s", nxgep->hot_swappable_phy ? 5003 "[hot swappable]" : ""); 5004 break; 5005 case PORT_1G_SERDES: 5006 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5007 nxgep->hot_swappable_phy ? 5008 "[hot swappable]" : ""); 5009 break; 5010 case PORT_1G_TN1010: 5011 (void) snprintf(valstr, sizeof (valstr), 5012 "1G TN1010 copper %s", nxgep->hot_swappable_phy ? 5013 "[hot swappable]" : ""); 5014 break; 5015 case PORT_10G_TN1010: 5016 (void) snprintf(valstr, sizeof (valstr), 5017 "10G TN1010 copper %s", nxgep->hot_swappable_phy ? 5018 "[hot swappable]" : ""); 5019 break; 5020 case PORT_1G_RGMII_FIBER: 5021 (void) snprintf(valstr, sizeof (valstr), 5022 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5023 "[hot swappable]" : ""); 5024 break; 5025 case PORT_HSP_MODE: 5026 (void) snprintf(valstr, sizeof (valstr), 5027 "phy not present[hot swappable]"); 5028 break; 5029 default: 5030 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5031 nxgep->hot_swappable_phy ? 5032 "[hot swappable]" : ""); 5033 break; 5034 } 5035 5036 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5037 "==> nxge_get_priv_prop: name %s (value %s)", 5038 pr_name, valstr)); 5039 5040 err = 0; 5041 goto done; 5042 } 5043 5044 /* Hot swappable PHY */ 5045 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5046 if (is_default) 5047 return (ENOTSUP); 5048 *perm = MAC_PROP_PERM_READ; 5049 (void) snprintf(valstr, sizeof (valstr), "%s", 5050 nxgep->hot_swappable_phy ? 5051 "yes" : "no"); 5052 5053 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5054 "==> nxge_get_priv_prop: name %s " 5055 "(value %d valstr %s)", 5056 pr_name, nxgep->hot_swappable_phy, valstr)); 5057 5058 err = 0; 5059 goto done; 5060 } 5061 5062 5063 /* accept jumbo */ 5064 if (strcmp(pr_name, "_accept_jumbo") == 0) { 5065 if (is_default) 5066 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5067 else 5068 (void) snprintf(valstr, sizeof (valstr), 5069 "%d", nxgep->mac.is_jumbo); 5070 err = 0; 5071 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5072 "==> nxge_get_priv_prop: name %s (value %d (%d, %d))", 5073 pr_name, 5074 (uint32_t)param_arr[param_accept_jumbo].value, 5075 nxgep->mac.is_jumbo, 5076 nxge_jumbo_enable)); 5077 5078 goto done; 5079 } 5080 5081 /* Receive Interrupt Blanking Parameters */ 5082 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5083 err = 0; 5084 if (is_default) { 5085 (void) snprintf(valstr, sizeof (valstr), 5086 "%d", RXDMA_RCR_TO_DEFAULT); 5087 goto done; 5088 } 5089 5090 (void) snprintf(valstr, sizeof (valstr), "%d", 5091 nxgep->intr_timeout); 5092 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5093 "==> nxge_get_priv_prop: name %s (value %d)", 5094 pr_name, 5095 (uint32_t)nxgep->intr_timeout)); 5096 goto done; 5097 } 5098 5099 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5100 err = 0; 5101 if (is_default) { 5102 (void) snprintf(valstr, sizeof (valstr), 5103 "%d", RXDMA_RCR_PTHRES_DEFAULT); 5104 goto done; 5105 } 5106 (void) snprintf(valstr, sizeof (valstr), "%d", 5107 nxgep->intr_threshold); 5108 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5109 "==> nxge_get_priv_prop: name %s (value %d)", 5110 pr_name, (uint32_t)nxgep->intr_threshold)); 5111 5112 goto done; 5113 } 5114 5115 /* Classification and Load Distribution Configuration */ 5116 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5117 if (is_default) { 5118 (void) snprintf(valstr, sizeof (valstr), "%x", 5119 NXGE_CLASS_FLOW_GEN_SERVER); 5120 err = 0; 5121 goto done; 5122 } 5123 err = nxge_dld_get_ip_opt(nxgep, 5124 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5125 5126 (void) snprintf(valstr, sizeof (valstr), "%x", 5127 (int)param_arr[param_class_opt_ipv4_tcp].value); 5128 5129 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5130 "==> nxge_get_priv_prop: %s", valstr)); 5131 goto done; 5132 } 5133 5134 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5135 if (is_default) { 5136 (void) snprintf(valstr, sizeof (valstr), "%x", 5137 NXGE_CLASS_FLOW_GEN_SERVER); 5138 err = 0; 5139 goto done; 5140 } 5141 err = nxge_dld_get_ip_opt(nxgep, 5142 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5143 5144 (void) snprintf(valstr, sizeof (valstr), "%x", 5145 (int)param_arr[param_class_opt_ipv4_udp].value); 5146 5147 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5148 "==> nxge_get_priv_prop: %s", valstr)); 5149 goto done; 5150 } 5151 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5152 if (is_default) { 5153 (void) snprintf(valstr, sizeof (valstr), "%x", 5154 NXGE_CLASS_FLOW_GEN_SERVER); 5155 err = 0; 5156 goto done; 5157 } 5158 err = nxge_dld_get_ip_opt(nxgep, 5159 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5160 5161 (void) snprintf(valstr, sizeof (valstr), "%x", 5162 (int)param_arr[param_class_opt_ipv4_ah].value); 5163 5164 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5165 "==> nxge_get_priv_prop: %s", valstr)); 5166 goto done; 5167 } 5168 5169 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5170 if (is_default) { 5171 (void) snprintf(valstr, sizeof (valstr), "%x", 5172 NXGE_CLASS_FLOW_GEN_SERVER); 5173 err = 0; 5174 goto done; 5175 } 5176 err = nxge_dld_get_ip_opt(nxgep, 5177 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5178 5179 (void) snprintf(valstr, sizeof (valstr), "%x", 5180 (int)param_arr[param_class_opt_ipv4_sctp].value); 5181 5182 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5183 "==> nxge_get_priv_prop: %s", valstr)); 5184 goto done; 5185 } 5186 5187 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5188 if (is_default) { 5189 (void) snprintf(valstr, sizeof (valstr), "%x", 5190 NXGE_CLASS_FLOW_GEN_SERVER); 5191 err = 0; 5192 goto done; 5193 } 5194 err = nxge_dld_get_ip_opt(nxgep, 5195 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5196 5197 (void) snprintf(valstr, sizeof (valstr), "%x", 5198 (int)param_arr[param_class_opt_ipv6_tcp].value); 5199 5200 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5201 "==> nxge_get_priv_prop: %s", valstr)); 5202 goto done; 5203 } 5204 5205 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5206 if (is_default) { 5207 (void) snprintf(valstr, sizeof (valstr), "%x", 5208 NXGE_CLASS_FLOW_GEN_SERVER); 5209 err = 0; 5210 goto done; 5211 } 5212 err = nxge_dld_get_ip_opt(nxgep, 5213 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5214 5215 (void) snprintf(valstr, sizeof (valstr), "%x", 5216 (int)param_arr[param_class_opt_ipv6_udp].value); 5217 5218 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5219 "==> nxge_get_priv_prop: %s", valstr)); 5220 goto done; 5221 } 5222 5223 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5224 if (is_default) { 5225 (void) snprintf(valstr, sizeof (valstr), "%x", 5226 NXGE_CLASS_FLOW_GEN_SERVER); 5227 err = 0; 5228 goto done; 5229 } 5230 err = nxge_dld_get_ip_opt(nxgep, 5231 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5232 5233 (void) snprintf(valstr, sizeof (valstr), "%x", 5234 (int)param_arr[param_class_opt_ipv6_ah].value); 5235 5236 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5237 "==> nxge_get_priv_prop: %s", valstr)); 5238 goto done; 5239 } 5240 5241 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5242 if (is_default) { 5243 (void) snprintf(valstr, sizeof (valstr), "%x", 5244 NXGE_CLASS_FLOW_GEN_SERVER); 5245 err = 0; 5246 goto done; 5247 } 5248 err = nxge_dld_get_ip_opt(nxgep, 5249 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5250 5251 (void) snprintf(valstr, sizeof (valstr), "%x", 5252 (int)param_arr[param_class_opt_ipv6_sctp].value); 5253 5254 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5255 "==> nxge_get_priv_prop: %s", valstr)); 5256 goto done; 5257 } 5258 5259 /* Software LSO */ 5260 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5261 if (is_default) { 5262 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5263 err = 0; 5264 goto done; 5265 } 5266 (void) snprintf(valstr, sizeof (valstr), 5267 "%d", nxgep->soft_lso_enable); 5268 err = 0; 5269 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5270 "==> nxge_get_priv_prop: name %s (value %d)", 5271 pr_name, nxgep->soft_lso_enable)); 5272 5273 goto done; 5274 } 5275 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5276 err = 0; 5277 if (is_default || 5278 nxgep->param_arr[param_anar_10gfdx].value != 0) { 5279 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5280 goto done; 5281 } else { 5282 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5283 goto done; 5284 } 5285 } 5286 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5287 err = 0; 5288 if (is_default || 5289 nxgep->param_arr[param_anar_pause].value != 0) { 5290 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5291 goto done; 5292 } else { 5293 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5294 goto done; 5295 } 5296 } 5297 5298 done: 5299 if (err == 0) { 5300 strsize = (uint_t)strlen(valstr); 5301 if (pr_valsize < strsize) { 5302 err = ENOBUFS; 5303 } else { 5304 (void) strlcpy(pr_val, valstr, pr_valsize); 5305 } 5306 } 5307 5308 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5309 "<== nxge_get_priv_prop: return %d", err)); 5310 return (err); 5311 } 5312 5313 /* 5314 * Module loading and removing entry points. 5315 */ 5316 5317 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach, 5318 nodev, NULL, D_MP, NULL, nxge_quiesce); 5319 5320 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5321 5322 /* 5323 * Module linkage information for the kernel. 5324 */ 5325 static struct modldrv nxge_modldrv = { 5326 &mod_driverops, 5327 NXGE_DESC_VER, 5328 &nxge_dev_ops 5329 }; 5330 5331 static struct modlinkage modlinkage = { 5332 MODREV_1, (void *) &nxge_modldrv, NULL 5333 }; 5334 5335 int 5336 _init(void) 5337 { 5338 int status; 5339 5340 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5341 mac_init_ops(&nxge_dev_ops, "nxge"); 5342 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5343 if (status != 0) { 5344 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5345 "failed to init device soft state")); 5346 goto _init_exit; 5347 } 5348 status = mod_install(&modlinkage); 5349 if (status != 0) { 5350 ddi_soft_state_fini(&nxge_list); 5351 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5352 goto _init_exit; 5353 } 5354 5355 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5356 5357 _init_exit: 5358 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 5359 5360 return (status); 5361 } 5362 5363 int 5364 _fini(void) 5365 { 5366 int status; 5367 5368 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5369 5370 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5371 5372 if (nxge_mblks_pending) 5373 return (EBUSY); 5374 5375 status = mod_remove(&modlinkage); 5376 if (status != DDI_SUCCESS) { 5377 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5378 "Module removal failed 0x%08x", 5379 status)); 5380 goto _fini_exit; 5381 } 5382 5383 mac_fini_ops(&nxge_dev_ops); 5384 5385 ddi_soft_state_fini(&nxge_list); 5386 5387 MUTEX_DESTROY(&nxge_common_lock); 5388 _fini_exit: 5389 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 5390 5391 return (status); 5392 } 5393 5394 int 5395 _info(struct modinfo *modinfop) 5396 { 5397 int status; 5398 5399 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5400 status = mod_info(&modlinkage, modinfop); 5401 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5402 5403 return (status); 5404 } 5405 5406 /*ARGSUSED*/ 5407 static int 5408 nxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5409 { 5410 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5411 p_nxge_t nxgep = rhp->nxgep; 5412 uint32_t channel; 5413 p_tx_ring_t ring; 5414 5415 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5416 ring = nxgep->tx_rings->rings[channel]; 5417 5418 MUTEX_ENTER(&ring->lock); 5419 ring->tx_ring_handle = rhp->ring_handle; 5420 MUTEX_EXIT(&ring->lock); 5421 5422 return (0); 5423 } 5424 5425 static void 5426 nxge_tx_ring_stop(mac_ring_driver_t rdriver) 5427 { 5428 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5429 p_nxge_t nxgep = rhp->nxgep; 5430 uint32_t channel; 5431 p_tx_ring_t ring; 5432 5433 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5434 ring = nxgep->tx_rings->rings[channel]; 5435 5436 MUTEX_ENTER(&ring->lock); 5437 ring->tx_ring_handle = (mac_ring_handle_t)NULL; 5438 MUTEX_EXIT(&ring->lock); 5439 } 5440 5441 static int 5442 nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5443 { 5444 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5445 p_nxge_t nxgep = rhp->nxgep; 5446 uint32_t channel; 5447 p_rx_rcr_ring_t ring; 5448 int i; 5449 5450 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5451 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5452 5453 MUTEX_ENTER(&ring->lock); 5454 5455 if (nxgep->rx_channel_started[channel] == B_TRUE) { 5456 MUTEX_EXIT(&ring->lock); 5457 return (0); 5458 } 5459 5460 /* set rcr_ring */ 5461 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) { 5462 if ((nxgep->ldgvp->ldvp[i].is_rxdma == 1) && 5463 (nxgep->ldgvp->ldvp[i].channel == channel)) { 5464 ring->ldvp = &nxgep->ldgvp->ldvp[i]; 5465 ring->ldgp = nxgep->ldgvp->ldvp[i].ldgp; 5466 } 5467 } 5468 5469 nxgep->rx_channel_started[channel] = B_TRUE; 5470 ring->rcr_mac_handle = rhp->ring_handle; 5471 ring->rcr_gen_num = mr_gen_num; 5472 MUTEX_EXIT(&ring->lock); 5473 5474 return (0); 5475 } 5476 5477 static void 5478 nxge_rx_ring_stop(mac_ring_driver_t rdriver) 5479 { 5480 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5481 p_nxge_t nxgep = rhp->nxgep; 5482 uint32_t channel; 5483 p_rx_rcr_ring_t ring; 5484 5485 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5486 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5487 5488 MUTEX_ENTER(&ring->lock); 5489 nxgep->rx_channel_started[channel] = B_FALSE; 5490 ring->rcr_mac_handle = NULL; 5491 MUTEX_EXIT(&ring->lock); 5492 } 5493 5494 /* 5495 * Callback funtion for MAC layer to register all rings. 5496 */ 5497 static void 5498 nxge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 5499 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 5500 { 5501 p_nxge_t nxgep = (p_nxge_t)arg; 5502 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 5503 5504 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5505 "==> nxge_fill_ring 0x%x index %d", rtype, index)); 5506 5507 switch (rtype) { 5508 case MAC_RING_TYPE_TX: { 5509 p_nxge_ring_handle_t rhandlep; 5510 5511 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5512 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d", 5513 rtype, index, p_cfgp->tdc.count)); 5514 5515 ASSERT((index >= 0) && (index < p_cfgp->tdc.count)); 5516 rhandlep = &nxgep->tx_ring_handles[index]; 5517 rhandlep->nxgep = nxgep; 5518 rhandlep->index = index; 5519 rhandlep->ring_handle = rh; 5520 5521 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5522 infop->mri_start = nxge_tx_ring_start; 5523 infop->mri_stop = nxge_tx_ring_stop; 5524 infop->mri_tx = nxge_tx_ring_send; 5525 5526 break; 5527 } 5528 case MAC_RING_TYPE_RX: { 5529 p_nxge_ring_handle_t rhandlep; 5530 int nxge_rindex; 5531 mac_intr_t nxge_mac_intr; 5532 5533 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5534 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d", 5535 rtype, index, p_cfgp->max_rdcs)); 5536 5537 /* 5538 * 'index' is the ring index within the group. 5539 * Find the ring index in the nxge instance. 5540 */ 5541 nxge_rindex = nxge_get_rxring_index(nxgep, rg_index, index); 5542 5543 ASSERT((nxge_rindex >= 0) && (nxge_rindex < p_cfgp->max_rdcs)); 5544 rhandlep = &nxgep->rx_ring_handles[nxge_rindex]; 5545 rhandlep->nxgep = nxgep; 5546 rhandlep->index = nxge_rindex; 5547 rhandlep->ring_handle = rh; 5548 5549 /* 5550 * Entrypoint to enable interrupt (disable poll) and 5551 * disable interrupt (enable poll). 5552 */ 5553 nxge_mac_intr.mi_handle = (mac_intr_handle_t)rhandlep; 5554 nxge_mac_intr.mi_enable = (mac_intr_enable_t)nxge_disable_poll; 5555 nxge_mac_intr.mi_disable = (mac_intr_disable_t)nxge_enable_poll; 5556 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5557 infop->mri_start = nxge_rx_ring_start; 5558 infop->mri_stop = nxge_rx_ring_stop; 5559 infop->mri_intr = nxge_mac_intr; /* ??? */ 5560 infop->mri_poll = nxge_rx_poll; 5561 5562 break; 5563 } 5564 default: 5565 break; 5566 } 5567 5568 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_fill_ring 0x%x", 5569 rtype)); 5570 } 5571 5572 static void 5573 nxge_group_add_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5574 mac_ring_type_t type) 5575 { 5576 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5577 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5578 nxge_t *nxge; 5579 nxge_grp_t *grp; 5580 nxge_rdc_grp_t *rdc_grp; 5581 uint16_t channel; /* device-wise ring id */ 5582 int dev_gindex; 5583 int rv; 5584 5585 nxge = rgroup->nxgep; 5586 5587 switch (type) { 5588 case MAC_RING_TYPE_TX: 5589 /* 5590 * nxge_grp_dc_add takes a channel number which is a 5591 * "devise" ring ID. 5592 */ 5593 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5594 5595 /* 5596 * Remove the ring from the default group 5597 */ 5598 if (rgroup->gindex != 0) { 5599 (void) nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5600 } 5601 5602 /* 5603 * nxge->tx_set.group[] is an array of groups indexed by 5604 * a "port" group ID. 5605 */ 5606 grp = nxge->tx_set.group[rgroup->gindex]; 5607 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5608 if (rv != 0) { 5609 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5610 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5611 } 5612 break; 5613 5614 case MAC_RING_TYPE_RX: 5615 /* 5616 * nxge->rx_set.group[] is an array of groups indexed by 5617 * a "port" group ID. 5618 */ 5619 grp = nxge->rx_set.group[rgroup->gindex]; 5620 5621 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5622 rgroup->gindex; 5623 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5624 5625 /* 5626 * nxge_grp_dc_add takes a channel number which is a 5627 * "devise" ring ID. 5628 */ 5629 channel = nxge->pt_config.hw_config.start_rdc + rhandle->index; 5630 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_RX, channel); 5631 if (rv != 0) { 5632 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5633 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5634 } 5635 5636 rdc_grp->map |= (1 << channel); 5637 rdc_grp->max_rdcs++; 5638 5639 (void) nxge_init_fzc_rdc_tbl(nxge, rgroup->rdctbl); 5640 break; 5641 } 5642 } 5643 5644 static void 5645 nxge_group_rem_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5646 mac_ring_type_t type) 5647 { 5648 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5649 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5650 nxge_t *nxge; 5651 uint16_t channel; /* device-wise ring id */ 5652 nxge_rdc_grp_t *rdc_grp; 5653 int dev_gindex; 5654 5655 nxge = rgroup->nxgep; 5656 5657 switch (type) { 5658 case MAC_RING_TYPE_TX: 5659 dev_gindex = nxge->pt_config.hw_config.def_mac_txdma_grpid + 5660 rgroup->gindex; 5661 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5662 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5663 5664 /* 5665 * Add the ring back to the default group 5666 */ 5667 if (rgroup->gindex != 0) { 5668 nxge_grp_t *grp; 5669 grp = nxge->tx_set.group[0]; 5670 (void) nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5671 } 5672 break; 5673 5674 case MAC_RING_TYPE_RX: 5675 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5676 rgroup->gindex; 5677 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5678 channel = rdc_grp->start_rdc + rhandle->index; 5679 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 5680 5681 rdc_grp->map &= ~(1 << channel); 5682 rdc_grp->max_rdcs--; 5683 5684 (void) nxge_init_fzc_rdc_tbl(nxge, rgroup->rdctbl); 5685 break; 5686 } 5687 } 5688 5689 5690 /*ARGSUSED*/ 5691 static nxge_status_t 5692 nxge_add_intrs(p_nxge_t nxgep) 5693 { 5694 5695 int intr_types; 5696 int type = 0; 5697 int ddi_status = DDI_SUCCESS; 5698 nxge_status_t status = NXGE_OK; 5699 5700 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5701 5702 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5703 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5704 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5705 nxgep->nxge_intr_type.intr_added = 0; 5706 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5707 nxgep->nxge_intr_type.intr_type = 0; 5708 5709 if (nxgep->niu_type == N2_NIU) { 5710 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5711 } else if (nxge_msi_enable) { 5712 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5713 } 5714 5715 /* Get the supported interrupt types */ 5716 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5717 != DDI_SUCCESS) { 5718 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5719 "ddi_intr_get_supported_types failed: status 0x%08x", 5720 ddi_status)); 5721 return (NXGE_ERROR | NXGE_DDI_FAILED); 5722 } 5723 nxgep->nxge_intr_type.intr_types = intr_types; 5724 5725 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5726 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5727 5728 /* 5729 * Solaris MSIX is not supported yet. use MSI for now. 5730 * nxge_msi_enable (1): 5731 * 1 - MSI 2 - MSI-X others - FIXED 5732 */ 5733 switch (nxge_msi_enable) { 5734 default: 5735 type = DDI_INTR_TYPE_FIXED; 5736 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5737 "use fixed (intx emulation) type %08x", 5738 type)); 5739 break; 5740 5741 case 2: 5742 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5743 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5744 if (intr_types & DDI_INTR_TYPE_MSIX) { 5745 type = DDI_INTR_TYPE_MSIX; 5746 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5747 "ddi_intr_get_supported_types: MSIX 0x%08x", 5748 type)); 5749 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5750 type = DDI_INTR_TYPE_MSI; 5751 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5752 "ddi_intr_get_supported_types: MSI 0x%08x", 5753 type)); 5754 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5755 type = DDI_INTR_TYPE_FIXED; 5756 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5757 "ddi_intr_get_supported_types: MSXED0x%08x", 5758 type)); 5759 } 5760 break; 5761 5762 case 1: 5763 if (intr_types & DDI_INTR_TYPE_MSI) { 5764 type = DDI_INTR_TYPE_MSI; 5765 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5766 "ddi_intr_get_supported_types: MSI 0x%08x", 5767 type)); 5768 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5769 type = DDI_INTR_TYPE_MSIX; 5770 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5771 "ddi_intr_get_supported_types: MSIX 0x%08x", 5772 type)); 5773 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5774 type = DDI_INTR_TYPE_FIXED; 5775 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5776 "ddi_intr_get_supported_types: MSXED0x%08x", 5777 type)); 5778 } 5779 } 5780 5781 nxgep->nxge_intr_type.intr_type = type; 5782 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5783 type == DDI_INTR_TYPE_FIXED) && 5784 nxgep->nxge_intr_type.niu_msi_enable) { 5785 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5786 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5787 " nxge_add_intrs: " 5788 " nxge_add_intrs_adv failed: status 0x%08x", 5789 status)); 5790 return (status); 5791 } else { 5792 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5793 "interrupts registered : type %d", type)); 5794 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5795 5796 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5797 "\nAdded advanced nxge add_intr_adv " 5798 "intr type 0x%x\n", type)); 5799 5800 return (status); 5801 } 5802 } 5803 5804 if (!nxgep->nxge_intr_type.intr_registered) { 5805 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5806 "failed to register interrupts")); 5807 return (NXGE_ERROR | NXGE_DDI_FAILED); 5808 } 5809 5810 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5811 return (status); 5812 } 5813 5814 static nxge_status_t 5815 nxge_add_intrs_adv(p_nxge_t nxgep) 5816 { 5817 int intr_type; 5818 p_nxge_intr_t intrp; 5819 5820 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5821 5822 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5823 intr_type = intrp->intr_type; 5824 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5825 intr_type)); 5826 5827 switch (intr_type) { 5828 case DDI_INTR_TYPE_MSI: /* 0x2 */ 5829 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 5830 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 5831 5832 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 5833 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 5834 5835 default: 5836 return (NXGE_ERROR); 5837 } 5838 } 5839 5840 5841 /*ARGSUSED*/ 5842 static nxge_status_t 5843 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 5844 { 5845 dev_info_t *dip = nxgep->dip; 5846 p_nxge_ldg_t ldgp; 5847 p_nxge_intr_t intrp; 5848 uint_t *inthandler; 5849 void *arg1, *arg2; 5850 int behavior; 5851 int nintrs, navail, nrequest; 5852 int nactual, nrequired; 5853 int inum = 0; 5854 int x, y; 5855 int ddi_status = DDI_SUCCESS; 5856 nxge_status_t status = NXGE_OK; 5857 5858 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 5859 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5860 intrp->start_inum = 0; 5861 5862 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5863 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5864 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5865 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5866 "nintrs: %d", ddi_status, nintrs)); 5867 return (NXGE_ERROR | NXGE_DDI_FAILED); 5868 } 5869 5870 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5871 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5872 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5873 "ddi_intr_get_navail() failed, status: 0x%x%, " 5874 "nintrs: %d", ddi_status, navail)); 5875 return (NXGE_ERROR | NXGE_DDI_FAILED); 5876 } 5877 5878 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5879 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 5880 nintrs, navail)); 5881 5882 /* PSARC/2007/453 MSI-X interrupt limit override */ 5883 if (int_type == DDI_INTR_TYPE_MSIX) { 5884 nrequest = nxge_create_msi_property(nxgep); 5885 if (nrequest < navail) { 5886 navail = nrequest; 5887 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5888 "nxge_add_intrs_adv_type: nintrs %d " 5889 "navail %d (nrequest %d)", 5890 nintrs, navail, nrequest)); 5891 } 5892 } 5893 5894 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 5895 /* MSI must be power of 2 */ 5896 if ((navail & 16) == 16) { 5897 navail = 16; 5898 } else if ((navail & 8) == 8) { 5899 navail = 8; 5900 } else if ((navail & 4) == 4) { 5901 navail = 4; 5902 } else if ((navail & 2) == 2) { 5903 navail = 2; 5904 } else { 5905 navail = 1; 5906 } 5907 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5908 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 5909 "navail %d", nintrs, navail)); 5910 } 5911 5912 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5913 DDI_INTR_ALLOC_NORMAL); 5914 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 5915 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 5916 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5917 navail, &nactual, behavior); 5918 if (ddi_status != DDI_SUCCESS || nactual == 0) { 5919 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5920 " ddi_intr_alloc() failed: %d", 5921 ddi_status)); 5922 kmem_free(intrp->htable, intrp->intr_size); 5923 return (NXGE_ERROR | NXGE_DDI_FAILED); 5924 } 5925 5926 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 5927 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 5928 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5929 " ddi_intr_get_pri() failed: %d", 5930 ddi_status)); 5931 /* Free already allocated interrupts */ 5932 for (y = 0; y < nactual; y++) { 5933 (void) ddi_intr_free(intrp->htable[y]); 5934 } 5935 5936 kmem_free(intrp->htable, intrp->intr_size); 5937 return (NXGE_ERROR | NXGE_DDI_FAILED); 5938 } 5939 5940 nrequired = 0; 5941 switch (nxgep->niu_type) { 5942 default: 5943 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 5944 break; 5945 5946 case N2_NIU: 5947 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 5948 break; 5949 } 5950 5951 if (status != NXGE_OK) { 5952 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5953 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 5954 "failed: 0x%x", status)); 5955 /* Free already allocated interrupts */ 5956 for (y = 0; y < nactual; y++) { 5957 (void) ddi_intr_free(intrp->htable[y]); 5958 } 5959 5960 kmem_free(intrp->htable, intrp->intr_size); 5961 return (status); 5962 } 5963 5964 ldgp = nxgep->ldgvp->ldgp; 5965 for (x = 0; x < nrequired; x++, ldgp++) { 5966 ldgp->vector = (uint8_t)x; 5967 ldgp->intdata = SID_DATA(ldgp->func, x); 5968 arg1 = ldgp->ldvp; 5969 arg2 = nxgep; 5970 if (ldgp->nldvs == 1) { 5971 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 5972 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5973 "nxge_add_intrs_adv_type: " 5974 "arg1 0x%x arg2 0x%x: " 5975 "1-1 int handler (entry %d intdata 0x%x)\n", 5976 arg1, arg2, 5977 x, ldgp->intdata)); 5978 } else if (ldgp->nldvs > 1) { 5979 inthandler = (uint_t *)ldgp->sys_intr_handler; 5980 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5981 "nxge_add_intrs_adv_type: " 5982 "arg1 0x%x arg2 0x%x: " 5983 "nldevs %d int handler " 5984 "(entry %d intdata 0x%x)\n", 5985 arg1, arg2, 5986 ldgp->nldvs, x, ldgp->intdata)); 5987 } 5988 5989 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5990 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 5991 "htable 0x%llx", x, intrp->htable[x])); 5992 5993 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 5994 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 5995 != DDI_SUCCESS) { 5996 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5997 "==> nxge_add_intrs_adv_type: failed #%d " 5998 "status 0x%x", x, ddi_status)); 5999 for (y = 0; y < intrp->intr_added; y++) { 6000 (void) ddi_intr_remove_handler( 6001 intrp->htable[y]); 6002 } 6003 /* Free already allocated intr */ 6004 for (y = 0; y < nactual; y++) { 6005 (void) ddi_intr_free(intrp->htable[y]); 6006 } 6007 kmem_free(intrp->htable, intrp->intr_size); 6008 6009 (void) nxge_ldgv_uninit(nxgep); 6010 6011 return (NXGE_ERROR | NXGE_DDI_FAILED); 6012 } 6013 intrp->intr_added++; 6014 } 6015 6016 intrp->msi_intx_cnt = nactual; 6017 6018 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6019 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 6020 navail, nactual, 6021 intrp->msi_intx_cnt, 6022 intrp->intr_added)); 6023 6024 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6025 6026 (void) nxge_intr_ldgv_init(nxgep); 6027 6028 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 6029 6030 return (status); 6031 } 6032 6033 /*ARGSUSED*/ 6034 static nxge_status_t 6035 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 6036 { 6037 dev_info_t *dip = nxgep->dip; 6038 p_nxge_ldg_t ldgp; 6039 p_nxge_intr_t intrp; 6040 uint_t *inthandler; 6041 void *arg1, *arg2; 6042 int behavior; 6043 int nintrs, navail; 6044 int nactual, nrequired; 6045 int inum = 0; 6046 int x, y; 6047 int ddi_status = DDI_SUCCESS; 6048 nxge_status_t status = NXGE_OK; 6049 6050 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 6051 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6052 intrp->start_inum = 0; 6053 6054 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6055 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6056 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6057 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6058 "nintrs: %d", status, nintrs)); 6059 return (NXGE_ERROR | NXGE_DDI_FAILED); 6060 } 6061 6062 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6063 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6064 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6065 "ddi_intr_get_navail() failed, status: 0x%x%, " 6066 "nintrs: %d", ddi_status, navail)); 6067 return (NXGE_ERROR | NXGE_DDI_FAILED); 6068 } 6069 6070 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6071 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6072 nintrs, navail)); 6073 6074 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6075 DDI_INTR_ALLOC_NORMAL); 6076 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6077 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6078 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6079 navail, &nactual, behavior); 6080 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6081 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6082 " ddi_intr_alloc() failed: %d", 6083 ddi_status)); 6084 kmem_free(intrp->htable, intrp->intr_size); 6085 return (NXGE_ERROR | NXGE_DDI_FAILED); 6086 } 6087 6088 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6089 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6090 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6091 " ddi_intr_get_pri() failed: %d", 6092 ddi_status)); 6093 /* Free already allocated interrupts */ 6094 for (y = 0; y < nactual; y++) { 6095 (void) ddi_intr_free(intrp->htable[y]); 6096 } 6097 6098 kmem_free(intrp->htable, intrp->intr_size); 6099 return (NXGE_ERROR | NXGE_DDI_FAILED); 6100 } 6101 6102 nrequired = 0; 6103 switch (nxgep->niu_type) { 6104 default: 6105 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6106 break; 6107 6108 case N2_NIU: 6109 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6110 break; 6111 } 6112 6113 if (status != NXGE_OK) { 6114 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6115 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6116 "failed: 0x%x", status)); 6117 /* Free already allocated interrupts */ 6118 for (y = 0; y < nactual; y++) { 6119 (void) ddi_intr_free(intrp->htable[y]); 6120 } 6121 6122 kmem_free(intrp->htable, intrp->intr_size); 6123 return (status); 6124 } 6125 6126 ldgp = nxgep->ldgvp->ldgp; 6127 for (x = 0; x < nrequired; x++, ldgp++) { 6128 ldgp->vector = (uint8_t)x; 6129 if (nxgep->niu_type != N2_NIU) { 6130 ldgp->intdata = SID_DATA(ldgp->func, x); 6131 } 6132 6133 arg1 = ldgp->ldvp; 6134 arg2 = nxgep; 6135 if (ldgp->nldvs == 1) { 6136 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6137 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6138 "nxge_add_intrs_adv_type_fix: " 6139 "1-1 int handler(%d) ldg %d ldv %d " 6140 "arg1 $%p arg2 $%p\n", 6141 x, ldgp->ldg, ldgp->ldvp->ldv, 6142 arg1, arg2)); 6143 } else if (ldgp->nldvs > 1) { 6144 inthandler = (uint_t *)ldgp->sys_intr_handler; 6145 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6146 "nxge_add_intrs_adv_type_fix: " 6147 "shared ldv %d int handler(%d) ldv %d ldg %d" 6148 "arg1 0x%016llx arg2 0x%016llx\n", 6149 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6150 arg1, arg2)); 6151 } 6152 6153 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6154 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6155 != DDI_SUCCESS) { 6156 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6157 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6158 "status 0x%x", x, ddi_status)); 6159 for (y = 0; y < intrp->intr_added; y++) { 6160 (void) ddi_intr_remove_handler( 6161 intrp->htable[y]); 6162 } 6163 for (y = 0; y < nactual; y++) { 6164 (void) ddi_intr_free(intrp->htable[y]); 6165 } 6166 /* Free already allocated intr */ 6167 kmem_free(intrp->htable, intrp->intr_size); 6168 6169 (void) nxge_ldgv_uninit(nxgep); 6170 6171 return (NXGE_ERROR | NXGE_DDI_FAILED); 6172 } 6173 intrp->intr_added++; 6174 } 6175 6176 intrp->msi_intx_cnt = nactual; 6177 6178 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6179 6180 status = nxge_intr_ldgv_init(nxgep); 6181 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6182 6183 return (status); 6184 } 6185 6186 static void 6187 nxge_remove_intrs(p_nxge_t nxgep) 6188 { 6189 int i, inum; 6190 p_nxge_intr_t intrp; 6191 6192 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6193 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6194 if (!intrp->intr_registered) { 6195 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6196 "<== nxge_remove_intrs: interrupts not registered")); 6197 return; 6198 } 6199 6200 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6201 6202 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6203 (void) ddi_intr_block_disable(intrp->htable, 6204 intrp->intr_added); 6205 } else { 6206 for (i = 0; i < intrp->intr_added; i++) { 6207 (void) ddi_intr_disable(intrp->htable[i]); 6208 } 6209 } 6210 6211 for (inum = 0; inum < intrp->intr_added; inum++) { 6212 if (intrp->htable[inum]) { 6213 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6214 } 6215 } 6216 6217 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6218 if (intrp->htable[inum]) { 6219 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6220 "nxge_remove_intrs: ddi_intr_free inum %d " 6221 "msi_intx_cnt %d intr_added %d", 6222 inum, 6223 intrp->msi_intx_cnt, 6224 intrp->intr_added)); 6225 6226 (void) ddi_intr_free(intrp->htable[inum]); 6227 } 6228 } 6229 6230 kmem_free(intrp->htable, intrp->intr_size); 6231 intrp->intr_registered = B_FALSE; 6232 intrp->intr_enabled = B_FALSE; 6233 intrp->msi_intx_cnt = 0; 6234 intrp->intr_added = 0; 6235 6236 (void) nxge_ldgv_uninit(nxgep); 6237 6238 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6239 "#msix-request"); 6240 6241 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6242 } 6243 6244 /*ARGSUSED*/ 6245 static void 6246 nxge_intrs_enable(p_nxge_t nxgep) 6247 { 6248 p_nxge_intr_t intrp; 6249 int i; 6250 int status; 6251 6252 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6253 6254 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6255 6256 if (!intrp->intr_registered) { 6257 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6258 "interrupts are not registered")); 6259 return; 6260 } 6261 6262 if (intrp->intr_enabled) { 6263 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6264 "<== nxge_intrs_enable: already enabled")); 6265 return; 6266 } 6267 6268 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6269 status = ddi_intr_block_enable(intrp->htable, 6270 intrp->intr_added); 6271 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6272 "block enable - status 0x%x total inums #%d\n", 6273 status, intrp->intr_added)); 6274 } else { 6275 for (i = 0; i < intrp->intr_added; i++) { 6276 status = ddi_intr_enable(intrp->htable[i]); 6277 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6278 "ddi_intr_enable:enable - status 0x%x " 6279 "total inums %d enable inum #%d\n", 6280 status, intrp->intr_added, i)); 6281 if (status == DDI_SUCCESS) { 6282 intrp->intr_enabled = B_TRUE; 6283 } 6284 } 6285 } 6286 6287 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6288 } 6289 6290 /*ARGSUSED*/ 6291 static void 6292 nxge_intrs_disable(p_nxge_t nxgep) 6293 { 6294 p_nxge_intr_t intrp; 6295 int i; 6296 6297 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6298 6299 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6300 6301 if (!intrp->intr_registered) { 6302 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6303 "interrupts are not registered")); 6304 return; 6305 } 6306 6307 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6308 (void) ddi_intr_block_disable(intrp->htable, 6309 intrp->intr_added); 6310 } else { 6311 for (i = 0; i < intrp->intr_added; i++) { 6312 (void) ddi_intr_disable(intrp->htable[i]); 6313 } 6314 } 6315 6316 intrp->intr_enabled = B_FALSE; 6317 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6318 } 6319 6320 static nxge_status_t 6321 nxge_mac_register(p_nxge_t nxgep) 6322 { 6323 mac_register_t *macp; 6324 int status; 6325 6326 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6327 6328 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6329 return (NXGE_ERROR); 6330 6331 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6332 macp->m_driver = nxgep; 6333 macp->m_dip = nxgep->dip; 6334 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6335 macp->m_callbacks = &nxge_m_callbacks; 6336 macp->m_min_sdu = 0; 6337 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6338 NXGE_EHEADER_VLAN_CRC; 6339 macp->m_max_sdu = nxgep->mac.default_mtu; 6340 macp->m_margin = VLAN_TAGSZ; 6341 macp->m_priv_props = nxge_priv_props; 6342 macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 6343 macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1 | MAC_VIRT_SERIALIZE; 6344 6345 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6346 "==> nxge_mac_register: instance %d " 6347 "max_sdu %d margin %d maxframe %d (header %d)", 6348 nxgep->instance, 6349 macp->m_max_sdu, macp->m_margin, 6350 nxgep->mac.maxframesize, 6351 NXGE_EHEADER_VLAN_CRC)); 6352 6353 status = mac_register(macp, &nxgep->mach); 6354 mac_free(macp); 6355 6356 if (status != 0) { 6357 cmn_err(CE_WARN, 6358 "!nxge_mac_register failed (status %d instance %d)", 6359 status, nxgep->instance); 6360 return (NXGE_ERROR); 6361 } 6362 6363 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6364 "(instance %d)", nxgep->instance)); 6365 6366 return (NXGE_OK); 6367 } 6368 6369 void 6370 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6371 { 6372 ssize_t size; 6373 mblk_t *nmp; 6374 uint8_t blk_id; 6375 uint8_t chan; 6376 uint32_t err_id; 6377 err_inject_t *eip; 6378 6379 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6380 6381 size = 1024; 6382 nmp = mp->b_cont; 6383 eip = (err_inject_t *)nmp->b_rptr; 6384 blk_id = eip->blk_id; 6385 err_id = eip->err_id; 6386 chan = eip->chan; 6387 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6388 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6389 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6390 switch (blk_id) { 6391 case MAC_BLK_ID: 6392 break; 6393 case TXMAC_BLK_ID: 6394 break; 6395 case RXMAC_BLK_ID: 6396 break; 6397 case MIF_BLK_ID: 6398 break; 6399 case IPP_BLK_ID: 6400 nxge_ipp_inject_err(nxgep, err_id); 6401 break; 6402 case TXC_BLK_ID: 6403 nxge_txc_inject_err(nxgep, err_id); 6404 break; 6405 case TXDMA_BLK_ID: 6406 nxge_txdma_inject_err(nxgep, err_id, chan); 6407 break; 6408 case RXDMA_BLK_ID: 6409 nxge_rxdma_inject_err(nxgep, err_id, chan); 6410 break; 6411 case ZCP_BLK_ID: 6412 nxge_zcp_inject_err(nxgep, err_id); 6413 break; 6414 case ESPC_BLK_ID: 6415 break; 6416 case FFLP_BLK_ID: 6417 break; 6418 case PHY_BLK_ID: 6419 break; 6420 case ETHER_SERDES_BLK_ID: 6421 break; 6422 case PCIE_SERDES_BLK_ID: 6423 break; 6424 case VIR_BLK_ID: 6425 break; 6426 } 6427 6428 nmp->b_wptr = nmp->b_rptr + size; 6429 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6430 6431 miocack(wq, mp, (int)size, 0); 6432 } 6433 6434 static int 6435 nxge_init_common_dev(p_nxge_t nxgep) 6436 { 6437 p_nxge_hw_list_t hw_p; 6438 dev_info_t *p_dip; 6439 6440 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6441 6442 p_dip = nxgep->p_dip; 6443 MUTEX_ENTER(&nxge_common_lock); 6444 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6445 "==> nxge_init_common_dev:func # %d", 6446 nxgep->function_num)); 6447 /* 6448 * Loop through existing per neptune hardware list. 6449 */ 6450 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6451 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6452 "==> nxge_init_common_device:func # %d " 6453 "hw_p $%p parent dip $%p", 6454 nxgep->function_num, 6455 hw_p, 6456 p_dip)); 6457 if (hw_p->parent_devp == p_dip) { 6458 nxgep->nxge_hw_p = hw_p; 6459 hw_p->ndevs++; 6460 hw_p->nxge_p[nxgep->function_num] = nxgep; 6461 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6462 "==> nxge_init_common_device:func # %d " 6463 "hw_p $%p parent dip $%p " 6464 "ndevs %d (found)", 6465 nxgep->function_num, 6466 hw_p, 6467 p_dip, 6468 hw_p->ndevs)); 6469 break; 6470 } 6471 } 6472 6473 if (hw_p == NULL) { 6474 6475 char **prop_val; 6476 uint_t prop_len; 6477 int i; 6478 6479 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6480 "==> nxge_init_common_device:func # %d " 6481 "parent dip $%p (new)", 6482 nxgep->function_num, 6483 p_dip)); 6484 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6485 hw_p->parent_devp = p_dip; 6486 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6487 nxgep->nxge_hw_p = hw_p; 6488 hw_p->ndevs++; 6489 hw_p->nxge_p[nxgep->function_num] = nxgep; 6490 hw_p->next = nxge_hw_list; 6491 if (nxgep->niu_type == N2_NIU) { 6492 hw_p->niu_type = N2_NIU; 6493 hw_p->platform_type = P_NEPTUNE_NIU; 6494 } else { 6495 hw_p->niu_type = NIU_TYPE_NONE; 6496 hw_p->platform_type = P_NEPTUNE_NONE; 6497 } 6498 6499 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6500 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6501 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6502 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6503 6504 nxge_hw_list = hw_p; 6505 6506 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0, 6507 "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) { 6508 for (i = 0; i < prop_len; i++) { 6509 if ((strcmp((caddr_t)prop_val[i], 6510 NXGE_ROCK_COMPATIBLE) == 0)) { 6511 hw_p->platform_type = P_NEPTUNE_ROCK; 6512 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6513 "ROCK hw_p->platform_type %d", 6514 hw_p->platform_type)); 6515 break; 6516 } 6517 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6518 "nxge_init_common_dev: read compatible" 6519 " property[%d] val[%s]", 6520 i, (caddr_t)prop_val[i])); 6521 } 6522 } 6523 6524 ddi_prop_free(prop_val); 6525 6526 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6527 } 6528 6529 MUTEX_EXIT(&nxge_common_lock); 6530 6531 nxgep->platform_type = hw_p->platform_type; 6532 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxgep->platform_type %d", 6533 nxgep->platform_type)); 6534 if (nxgep->niu_type != N2_NIU) { 6535 nxgep->niu_type = hw_p->niu_type; 6536 } 6537 6538 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6539 "==> nxge_init_common_device (nxge_hw_list) $%p", 6540 nxge_hw_list)); 6541 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6542 6543 return (NXGE_OK); 6544 } 6545 6546 static void 6547 nxge_uninit_common_dev(p_nxge_t nxgep) 6548 { 6549 p_nxge_hw_list_t hw_p, h_hw_p; 6550 p_nxge_dma_pt_cfg_t p_dma_cfgp; 6551 p_nxge_hw_pt_cfg_t p_cfgp; 6552 dev_info_t *p_dip; 6553 6554 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6555 if (nxgep->nxge_hw_p == NULL) { 6556 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6557 "<== nxge_uninit_common_device (no common)")); 6558 return; 6559 } 6560 6561 MUTEX_ENTER(&nxge_common_lock); 6562 h_hw_p = nxge_hw_list; 6563 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6564 p_dip = hw_p->parent_devp; 6565 if (nxgep->nxge_hw_p == hw_p && 6566 p_dip == nxgep->p_dip && 6567 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6568 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6569 6570 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6571 "==> nxge_uninit_common_device:func # %d " 6572 "hw_p $%p parent dip $%p " 6573 "ndevs %d (found)", 6574 nxgep->function_num, 6575 hw_p, 6576 p_dip, 6577 hw_p->ndevs)); 6578 6579 /* 6580 * Release the RDC table, a shared resoruce 6581 * of the nxge hardware. The RDC table was 6582 * assigned to this instance of nxge in 6583 * nxge_use_cfg_dma_config(). 6584 */ 6585 if (!isLDOMguest(nxgep)) { 6586 p_dma_cfgp = 6587 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 6588 p_cfgp = 6589 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 6590 (void) nxge_fzc_rdc_tbl_unbind(nxgep, 6591 p_cfgp->def_mac_rxdma_grpid); 6592 6593 /* Cleanup any outstanding groups. */ 6594 nxge_grp_cleanup(nxgep); 6595 } 6596 6597 if (hw_p->ndevs) { 6598 hw_p->ndevs--; 6599 } 6600 hw_p->nxge_p[nxgep->function_num] = NULL; 6601 if (!hw_p->ndevs) { 6602 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6603 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6604 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6605 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6606 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6607 "==> nxge_uninit_common_device: " 6608 "func # %d " 6609 "hw_p $%p parent dip $%p " 6610 "ndevs %d (last)", 6611 nxgep->function_num, 6612 hw_p, 6613 p_dip, 6614 hw_p->ndevs)); 6615 6616 nxge_hio_uninit(nxgep); 6617 6618 if (hw_p == nxge_hw_list) { 6619 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6620 "==> nxge_uninit_common_device:" 6621 "remove head func # %d " 6622 "hw_p $%p parent dip $%p " 6623 "ndevs %d (head)", 6624 nxgep->function_num, 6625 hw_p, 6626 p_dip, 6627 hw_p->ndevs)); 6628 nxge_hw_list = hw_p->next; 6629 } else { 6630 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6631 "==> nxge_uninit_common_device:" 6632 "remove middle func # %d " 6633 "hw_p $%p parent dip $%p " 6634 "ndevs %d (middle)", 6635 nxgep->function_num, 6636 hw_p, 6637 p_dip, 6638 hw_p->ndevs)); 6639 h_hw_p->next = hw_p->next; 6640 } 6641 6642 nxgep->nxge_hw_p = NULL; 6643 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6644 } 6645 break; 6646 } else { 6647 h_hw_p = hw_p; 6648 } 6649 } 6650 6651 MUTEX_EXIT(&nxge_common_lock); 6652 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6653 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6654 nxge_hw_list)); 6655 6656 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6657 } 6658 6659 /* 6660 * Determines the number of ports from the niu_type or the platform type. 6661 * Returns the number of ports, or returns zero on failure. 6662 */ 6663 6664 int 6665 nxge_get_nports(p_nxge_t nxgep) 6666 { 6667 int nports = 0; 6668 6669 switch (nxgep->niu_type) { 6670 case N2_NIU: 6671 case NEPTUNE_2_10GF: 6672 nports = 2; 6673 break; 6674 case NEPTUNE_4_1GC: 6675 case NEPTUNE_2_10GF_2_1GC: 6676 case NEPTUNE_1_10GF_3_1GC: 6677 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6678 case NEPTUNE_2_10GF_2_1GRF: 6679 nports = 4; 6680 break; 6681 default: 6682 switch (nxgep->platform_type) { 6683 case P_NEPTUNE_NIU: 6684 case P_NEPTUNE_ATLAS_2PORT: 6685 nports = 2; 6686 break; 6687 case P_NEPTUNE_ATLAS_4PORT: 6688 case P_NEPTUNE_MARAMBA_P0: 6689 case P_NEPTUNE_MARAMBA_P1: 6690 case P_NEPTUNE_ROCK: 6691 case P_NEPTUNE_ALONSO: 6692 nports = 4; 6693 break; 6694 default: 6695 break; 6696 } 6697 break; 6698 } 6699 6700 return (nports); 6701 } 6702 6703 /* 6704 * The following two functions are to support 6705 * PSARC/2007/453 MSI-X interrupt limit override. 6706 */ 6707 static int 6708 nxge_create_msi_property(p_nxge_t nxgep) 6709 { 6710 int nmsi; 6711 extern int ncpus; 6712 6713 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6714 6715 switch (nxgep->mac.portmode) { 6716 case PORT_10G_COPPER: 6717 case PORT_10G_FIBER: 6718 case PORT_10G_TN1010: 6719 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6720 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6721 /* 6722 * The maximum MSI-X requested will be 8. 6723 * If the # of CPUs is less than 8, we will request 6724 * # MSI-X based on the # of CPUs (default). 6725 */ 6726 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6727 "==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d", 6728 nxge_msix_10g_intrs)); 6729 if ((nxge_msix_10g_intrs == 0) || 6730 (nxge_msix_10g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6731 nmsi = NXGE_MSIX_REQUEST_10G; 6732 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6733 "==>nxge_create_msi_property (10G): reset to 8")); 6734 } else { 6735 nmsi = nxge_msix_10g_intrs; 6736 } 6737 6738 /* 6739 * If # of interrupts requested is 8 (default), 6740 * the checking of the number of cpus will be 6741 * be maintained. 6742 */ 6743 if ((nmsi == NXGE_MSIX_REQUEST_10G) && 6744 (ncpus < nmsi)) { 6745 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6746 "==>nxge_create_msi_property (10G): reset to 8")); 6747 nmsi = ncpus; 6748 } 6749 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6750 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6751 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6752 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6753 break; 6754 6755 default: 6756 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6757 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6758 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6759 "==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d", 6760 nxge_msix_1g_intrs)); 6761 if ((nxge_msix_1g_intrs == 0) || 6762 (nxge_msix_1g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6763 nmsi = NXGE_MSIX_REQUEST_1G; 6764 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6765 "==>nxge_create_msi_property (1G): reset to 2")); 6766 } else { 6767 nmsi = nxge_msix_1g_intrs; 6768 } 6769 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6770 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6771 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6772 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6773 break; 6774 } 6775 6776 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6777 return (nmsi); 6778 } 6779 6780 /* ARGSUSED */ 6781 static int 6782 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6783 void *pr_val) 6784 { 6785 int err = 0; 6786 link_flowctrl_t fl; 6787 6788 switch (pr_num) { 6789 case MAC_PROP_AUTONEG: 6790 *(uint8_t *)pr_val = 1; 6791 break; 6792 case MAC_PROP_FLOWCTRL: 6793 if (pr_valsize < sizeof (link_flowctrl_t)) 6794 return (EINVAL); 6795 fl = LINK_FLOWCTRL_RX; 6796 bcopy(&fl, pr_val, sizeof (fl)); 6797 break; 6798 case MAC_PROP_ADV_1000FDX_CAP: 6799 case MAC_PROP_EN_1000FDX_CAP: 6800 *(uint8_t *)pr_val = 1; 6801 break; 6802 case MAC_PROP_ADV_100FDX_CAP: 6803 case MAC_PROP_EN_100FDX_CAP: 6804 *(uint8_t *)pr_val = 1; 6805 break; 6806 default: 6807 err = ENOTSUP; 6808 break; 6809 } 6810 return (err); 6811 } 6812 6813 6814 /* 6815 * The following is a software around for the Neptune hardware's 6816 * interrupt bugs; The Neptune hardware may generate spurious interrupts when 6817 * an interrupr handler is removed. 6818 */ 6819 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98 6820 #define NXGE_PIM_RESET (1ULL << 29) 6821 #define NXGE_GLU_RESET (1ULL << 30) 6822 #define NXGE_NIU_RESET (1ULL << 31) 6823 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \ 6824 NXGE_GLU_RESET | \ 6825 NXGE_NIU_RESET) 6826 6827 #define NXGE_WAIT_QUITE_TIME 200000 6828 #define NXGE_WAIT_QUITE_RETRY 40 6829 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */ 6830 6831 static void 6832 nxge_niu_peu_reset(p_nxge_t nxgep) 6833 { 6834 uint32_t rvalue; 6835 p_nxge_hw_list_t hw_p; 6836 p_nxge_t fnxgep; 6837 int i, j; 6838 6839 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset")); 6840 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 6841 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6842 "==> nxge_niu_peu_reset: NULL hardware pointer")); 6843 return; 6844 } 6845 6846 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6847 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d", 6848 hw_p->flags, nxgep->nxge_link_poll_timerid, 6849 nxgep->nxge_timerid)); 6850 6851 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 6852 /* 6853 * Make sure other instances from the same hardware 6854 * stop sending PIO and in quiescent state. 6855 */ 6856 for (i = 0; i < NXGE_MAX_PORTS; i++) { 6857 fnxgep = hw_p->nxge_p[i]; 6858 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6859 "==> nxge_niu_peu_reset: checking entry %d " 6860 "nxgep $%p", i, fnxgep)); 6861 #ifdef NXGE_DEBUG 6862 if (fnxgep) { 6863 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6864 "==> nxge_niu_peu_reset: entry %d (function %d) " 6865 "link timer id %d hw timer id %d", 6866 i, fnxgep->function_num, 6867 fnxgep->nxge_link_poll_timerid, 6868 fnxgep->nxge_timerid)); 6869 } 6870 #endif 6871 if (fnxgep && fnxgep != nxgep && 6872 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) { 6873 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6874 "==> nxge_niu_peu_reset: checking $%p " 6875 "(function %d) timer ids", 6876 fnxgep, fnxgep->function_num)); 6877 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) { 6878 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6879 "==> nxge_niu_peu_reset: waiting")); 6880 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6881 if (!fnxgep->nxge_timerid && 6882 !fnxgep->nxge_link_poll_timerid) { 6883 break; 6884 } 6885 } 6886 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6887 if (fnxgep->nxge_timerid || 6888 fnxgep->nxge_link_poll_timerid) { 6889 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6890 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6891 "<== nxge_niu_peu_reset: cannot reset " 6892 "hardware (devices are still in use)")); 6893 return; 6894 } 6895 } 6896 } 6897 6898 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) { 6899 hw_p->flags |= COMMON_RESET_NIU_PCI; 6900 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh, 6901 NXGE_PCI_PORT_LOGIC_OFFSET); 6902 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6903 "nxge_niu_peu_reset: read offset 0x%x (%d) " 6904 "(data 0x%x)", 6905 NXGE_PCI_PORT_LOGIC_OFFSET, 6906 NXGE_PCI_PORT_LOGIC_OFFSET, 6907 rvalue)); 6908 6909 rvalue |= NXGE_PCI_RESET_ALL; 6910 pci_config_put32(nxgep->dev_regs->nxge_pciregh, 6911 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue); 6912 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6913 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x", 6914 rvalue)); 6915 6916 NXGE_DELAY(NXGE_PCI_RESET_WAIT); 6917 } 6918 6919 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6920 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset")); 6921 } 6922 6923 static void 6924 nxge_set_pci_replay_timeout(p_nxge_t nxgep) 6925 { 6926 p_dev_regs_t dev_regs; 6927 uint32_t value; 6928 6929 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout")); 6930 6931 if (!nxge_set_replay_timer) { 6932 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6933 "==> nxge_set_pci_replay_timeout: will not change " 6934 "the timeout")); 6935 return; 6936 } 6937 6938 dev_regs = nxgep->dev_regs; 6939 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6940 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p", 6941 dev_regs, dev_regs->nxge_pciregh)); 6942 6943 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) { 6944 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6945 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or " 6946 "no PCI handle", 6947 dev_regs)); 6948 return; 6949 } 6950 value = (pci_config_get32(dev_regs->nxge_pciregh, 6951 PCI_REPLAY_TIMEOUT_CFG_OFFSET) | 6952 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT)); 6953 6954 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6955 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x " 6956 "(timeout value to set 0x%x at offset 0x%x) value 0x%x", 6957 pci_config_get32(dev_regs->nxge_pciregh, 6958 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout, 6959 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value)); 6960 6961 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET, 6962 value); 6963 6964 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6965 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x", 6966 pci_config_get32(dev_regs->nxge_pciregh, 6967 PCI_REPLAY_TIMEOUT_CFG_OFFSET))); 6968 6969 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout")); 6970 } 6971 6972 /* 6973 * quiesce(9E) entry point. 6974 * 6975 * This function is called when the system is single-threaded at high 6976 * PIL with preemption disabled. Therefore, this function must not be 6977 * blocked. 6978 * 6979 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 6980 * DDI_FAILURE indicates an error condition and should almost never happen. 6981 */ 6982 static int 6983 nxge_quiesce(dev_info_t *dip) 6984 { 6985 int instance = ddi_get_instance(dip); 6986 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 6987 6988 if (nxgep == NULL) 6989 return (DDI_FAILURE); 6990 6991 /* Turn off debugging */ 6992 nxge_debug_level = NO_DEBUG; 6993 nxgep->nxge_debug_level = NO_DEBUG; 6994 npi_debug_level = NO_DEBUG; 6995 6996 /* 6997 * Stop link monitor only when linkchkmod is interrupt based 6998 */ 6999 if (nxgep->mac.linkchkmode == LINKCHK_INTR) { 7000 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 7001 } 7002 7003 (void) nxge_intr_hw_disable(nxgep); 7004 7005 /* 7006 * Reset the receive MAC side. 7007 */ 7008 (void) nxge_rx_mac_disable(nxgep); 7009 7010 /* Disable and soft reset the IPP */ 7011 if (!isLDOMguest(nxgep)) 7012 (void) nxge_ipp_disable(nxgep); 7013 7014 /* 7015 * Reset the transmit/receive DMA side. 7016 */ 7017 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 7018 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 7019 7020 /* 7021 * Reset the transmit MAC side. 7022 */ 7023 (void) nxge_tx_mac_disable(nxgep); 7024 7025 return (DDI_SUCCESS); 7026 } 7027