1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 28 */ 29 #include <sys/nxge/nxge_impl.h> 30 #include <sys/nxge/nxge_hio.h> 31 #include <sys/nxge/nxge_rxdma.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * PSARC/2007/453 MSI-X interrupt limit override 39 */ 40 uint32_t nxge_msi_enable = 2; 41 42 /* 43 * Software workaround for a Neptune (PCI-E) 44 * hardware interrupt bug which the hardware 45 * may generate spurious interrupts after the 46 * device interrupt handler was removed. If this flag 47 * is enabled, the driver will reset the 48 * hardware when devices are being detached. 49 */ 50 uint32_t nxge_peu_reset_enable = 0; 51 52 /* 53 * Software workaround for the hardware 54 * checksum bugs that affect packet transmission 55 * and receive: 56 * 57 * Usage of nxge_cksum_offload: 58 * 59 * (1) nxge_cksum_offload = 0 (default): 60 * - transmits packets: 61 * TCP: uses the hardware checksum feature. 62 * UDP: driver will compute the software checksum 63 * based on the partial checksum computed 64 * by the IP layer. 65 * - receives packets 66 * TCP: marks packets checksum flags based on hardware result. 67 * UDP: will not mark checksum flags. 68 * 69 * (2) nxge_cksum_offload = 1: 70 * - transmit packets: 71 * TCP/UDP: uses the hardware checksum feature. 72 * - receives packets 73 * TCP/UDP: marks packet checksum flags based on hardware result. 74 * 75 * (3) nxge_cksum_offload = 2: 76 * - The driver will not register its checksum capability. 77 * Checksum for both TCP and UDP will be computed 78 * by the stack. 79 * - The software LSO is not allowed in this case. 80 * 81 * (4) nxge_cksum_offload > 2: 82 * - Will be treated as it is set to 2 83 * (stack will compute the checksum). 84 * 85 * (5) If the hardware bug is fixed, this workaround 86 * needs to be updated accordingly to reflect 87 * the new hardware revision. 88 */ 89 uint32_t nxge_cksum_offload = 0; 90 91 /* 92 * Globals: tunable parameters (/etc/system or adb) 93 * 94 */ 95 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 96 uint32_t nxge_rbr_spare_size = 0; 97 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 98 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 99 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 100 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 101 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 102 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 103 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 104 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 105 boolean_t nxge_jumbo_enable = B_FALSE; 106 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 107 108 /* MAX LSO size */ 109 #define NXGE_LSO_MAXLEN 65535 110 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 111 112 113 /* 114 * Add tunable to reduce the amount of time spent in the 115 * ISR doing Rx Processing. 116 */ 117 uint32_t nxge_max_rx_pkts = 1024; 118 119 /* 120 * Tunables to manage the receive buffer blocks. 121 * 122 * nxge_rx_threshold_hi: copy all buffers. 123 * nxge_rx_bcopy_size_type: receive buffer block size type. 124 * nxge_rx_threshold_lo: copy only up to tunable block size type. 125 */ 126 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 127 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 128 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 129 130 /* Use kmem_alloc() to allocate data buffers. */ 131 #if defined(_BIG_ENDIAN) 132 uint32_t nxge_use_kmem_alloc = 1; 133 #else 134 uint32_t nxge_use_kmem_alloc = 0; 135 #endif 136 137 rtrace_t npi_rtracebuf; 138 139 /* 140 * The hardware sometimes fails to allow enough time for the link partner 141 * to send an acknowledgement for packets that the hardware sent to it. The 142 * hardware resends the packets earlier than it should be in those instances. 143 * This behavior caused some switches to acknowledge the wrong packets 144 * and it triggered the fatal error. 145 * This software workaround is to set the replay timer to a value 146 * suggested by the hardware team. 147 * 148 * PCI config space replay timer register: 149 * The following replay timeout value is 0xc 150 * for bit 14:18. 151 */ 152 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8 153 #define PCI_REPLAY_TIMEOUT_SHIFT 14 154 155 uint32_t nxge_set_replay_timer = 1; 156 uint32_t nxge_replay_timeout = 0xc; 157 158 /* 159 * The transmit serialization sometimes causes 160 * longer sleep before calling the driver transmit 161 * function as it sleeps longer than it should. 162 * The performace group suggests that a time wait tunable 163 * can be used to set the maximum wait time when needed 164 * and the default is set to 1 tick. 165 */ 166 uint32_t nxge_tx_serial_maxsleep = 1; 167 168 #if defined(sun4v) 169 /* 170 * Hypervisor N2/NIU services information. 171 */ 172 static hsvc_info_t niu_hsvc = { 173 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 174 NIU_MINOR_VER, "nxge" 175 }; 176 177 static int nxge_hsvc_register(p_nxge_t); 178 #endif 179 180 /* 181 * Function Prototypes 182 */ 183 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 184 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 185 static void nxge_unattach(p_nxge_t); 186 static int nxge_quiesce(dev_info_t *); 187 188 #if NXGE_PROPERTY 189 static void nxge_remove_hard_properties(p_nxge_t); 190 #endif 191 192 /* 193 * These two functions are required by nxge_hio.c 194 */ 195 extern int nxge_m_mmac_remove(void *arg, int slot); 196 extern void nxge_grp_cleanup(p_nxge_t nxge); 197 198 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 199 200 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 201 static void nxge_destroy_mutexes(p_nxge_t); 202 203 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 204 static void nxge_unmap_regs(p_nxge_t nxgep); 205 #ifdef NXGE_DEBUG 206 static void nxge_test_map_regs(p_nxge_t nxgep); 207 #endif 208 209 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 210 static void nxge_remove_intrs(p_nxge_t nxgep); 211 212 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 213 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 214 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 215 static void nxge_intrs_enable(p_nxge_t nxgep); 216 static void nxge_intrs_disable(p_nxge_t nxgep); 217 218 static void nxge_suspend(p_nxge_t); 219 static nxge_status_t nxge_resume(p_nxge_t); 220 221 static nxge_status_t nxge_setup_dev(p_nxge_t); 222 static void nxge_destroy_dev(p_nxge_t); 223 224 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 225 static void nxge_free_mem_pool(p_nxge_t); 226 227 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 228 static void nxge_free_rx_mem_pool(p_nxge_t); 229 230 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 231 static void nxge_free_tx_mem_pool(p_nxge_t); 232 233 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 234 struct ddi_dma_attr *, 235 size_t, ddi_device_acc_attr_t *, uint_t, 236 p_nxge_dma_common_t); 237 238 static void nxge_dma_mem_free(p_nxge_dma_common_t); 239 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 240 241 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 242 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 243 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 244 245 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 246 p_nxge_dma_common_t *, size_t); 247 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 248 249 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 250 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 251 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 252 253 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 254 p_nxge_dma_common_t *, 255 size_t); 256 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 257 258 static int nxge_init_common_dev(p_nxge_t); 259 static void nxge_uninit_common_dev(p_nxge_t); 260 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 261 char *, caddr_t); 262 #if defined(sun4v) 263 extern nxge_status_t nxge_hio_rdc_enable(p_nxge_t nxgep); 264 extern nxge_status_t nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm); 265 #endif 266 267 /* 268 * The next declarations are for the GLDv3 interface. 269 */ 270 static int nxge_m_start(void *); 271 static void nxge_m_stop(void *); 272 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 273 static int nxge_m_promisc(void *, boolean_t); 274 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 275 static nxge_status_t nxge_mac_register(p_nxge_t); 276 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 277 int slot, int rdctbl, boolean_t usetbl); 278 void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, 279 boolean_t factory); 280 #if defined(sun4v) 281 extern mblk_t *nxge_m_tx(void *arg, mblk_t *mp); 282 #endif 283 284 static void nxge_m_getfactaddr(void *, uint_t, uint8_t *); 285 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 286 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 287 uint_t, const void *); 288 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 289 uint_t, uint_t, void *, uint_t *); 290 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 291 const void *); 292 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 293 void *, uint_t *); 294 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 295 static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int, 296 mac_ring_info_t *, mac_ring_handle_t); 297 static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t, 298 mac_ring_type_t); 299 static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t, 300 mac_ring_type_t); 301 302 static void nxge_niu_peu_reset(p_nxge_t nxgep); 303 static void nxge_set_pci_replay_timeout(nxge_t *); 304 305 mac_priv_prop_t nxge_priv_props[] = { 306 {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 307 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 308 {"_function_number", MAC_PROP_PERM_READ}, 309 {"_fw_version", MAC_PROP_PERM_READ}, 310 {"_port_mode", MAC_PROP_PERM_READ}, 311 {"_hot_swap_phy", MAC_PROP_PERM_READ}, 312 {"_accept_jumbo", MAC_PROP_PERM_RW}, 313 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 314 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 315 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 316 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 317 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 318 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 319 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 320 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 321 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 322 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 323 {"_soft_lso_enable", MAC_PROP_PERM_RW} 324 }; 325 326 #define NXGE_MAX_PRIV_PROPS \ 327 (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 328 329 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 330 #define MAX_DUMP_SZ 256 331 332 #define NXGE_M_CALLBACK_FLAGS \ 333 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 334 335 mac_callbacks_t nxge_m_callbacks = { 336 NXGE_M_CALLBACK_FLAGS, 337 nxge_m_stat, 338 nxge_m_start, 339 nxge_m_stop, 340 nxge_m_promisc, 341 nxge_m_multicst, 342 NULL, 343 NULL, 344 nxge_m_ioctl, 345 nxge_m_getcapab, 346 NULL, 347 NULL, 348 nxge_m_setprop, 349 nxge_m_getprop 350 }; 351 352 void 353 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 354 355 /* PSARC/2007/453 MSI-X interrupt limit override. */ 356 #define NXGE_MSIX_REQUEST_10G 8 357 #define NXGE_MSIX_REQUEST_1G 2 358 static int nxge_create_msi_property(p_nxge_t); 359 /* 360 * For applications that care about the 361 * latency, it was requested by PAE and the 362 * customers that the driver has tunables that 363 * allow the user to tune it to a higher number 364 * interrupts to spread the interrupts among 365 * multiple channels. The DDI framework limits 366 * the maximum number of MSI-X resources to allocate 367 * to 8 (ddi_msix_alloc_limit). If more than 8 368 * is set, ddi_msix_alloc_limit must be set accordingly. 369 * The default number of MSI interrupts are set to 370 * 8 for 10G and 2 for 1G link. 371 */ 372 #define NXGE_MSIX_MAX_ALLOWED 32 373 uint32_t nxge_msix_10g_intrs = NXGE_MSIX_REQUEST_10G; 374 uint32_t nxge_msix_1g_intrs = NXGE_MSIX_REQUEST_1G; 375 376 /* 377 * These global variables control the message 378 * output. 379 */ 380 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 381 uint64_t nxge_debug_level; 382 383 /* 384 * This list contains the instance structures for the Neptune 385 * devices present in the system. The lock exists to guarantee 386 * mutually exclusive access to the list. 387 */ 388 void *nxge_list = NULL; 389 390 void *nxge_hw_list = NULL; 391 nxge_os_mutex_t nxge_common_lock; 392 393 extern uint64_t npi_debug_level; 394 395 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 396 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 397 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 398 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 399 extern void nxge_fm_init(p_nxge_t, 400 ddi_device_acc_attr_t *, 401 ddi_device_acc_attr_t *, 402 ddi_dma_attr_t *); 403 extern void nxge_fm_fini(p_nxge_t); 404 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 405 406 /* 407 * Count used to maintain the number of buffers being used 408 * by Neptune instances and loaned up to the upper layers. 409 */ 410 uint32_t nxge_mblks_pending = 0; 411 412 /* 413 * Device register access attributes for PIO. 414 */ 415 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 416 DDI_DEVICE_ATTR_V0, 417 DDI_STRUCTURE_LE_ACC, 418 DDI_STRICTORDER_ACC, 419 }; 420 421 /* 422 * Device descriptor access attributes for DMA. 423 */ 424 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 425 DDI_DEVICE_ATTR_V0, 426 DDI_STRUCTURE_LE_ACC, 427 DDI_STRICTORDER_ACC 428 }; 429 430 /* 431 * Device buffer access attributes for DMA. 432 */ 433 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 434 DDI_DEVICE_ATTR_V0, 435 DDI_STRUCTURE_BE_ACC, 436 DDI_STRICTORDER_ACC 437 }; 438 439 ddi_dma_attr_t nxge_desc_dma_attr = { 440 DMA_ATTR_V0, /* version number. */ 441 0, /* low address */ 442 0xffffffffffffffff, /* high address */ 443 0xffffffffffffffff, /* address counter max */ 444 #ifndef NIU_PA_WORKAROUND 445 0x100000, /* alignment */ 446 #else 447 0x2000, 448 #endif 449 0xfc00fc, /* dlim_burstsizes */ 450 0x1, /* minimum transfer size */ 451 0xffffffffffffffff, /* maximum transfer size */ 452 0xffffffffffffffff, /* maximum segment size */ 453 1, /* scatter/gather list length */ 454 (unsigned int) 1, /* granularity */ 455 0 /* attribute flags */ 456 }; 457 458 ddi_dma_attr_t nxge_tx_dma_attr = { 459 DMA_ATTR_V0, /* version number. */ 460 0, /* low address */ 461 0xffffffffffffffff, /* high address */ 462 0xffffffffffffffff, /* address counter max */ 463 #if defined(_BIG_ENDIAN) 464 0x2000, /* alignment */ 465 #else 466 0x1000, /* alignment */ 467 #endif 468 0xfc00fc, /* dlim_burstsizes */ 469 0x1, /* minimum transfer size */ 470 0xffffffffffffffff, /* maximum transfer size */ 471 0xffffffffffffffff, /* maximum segment size */ 472 5, /* scatter/gather list length */ 473 (unsigned int) 1, /* granularity */ 474 0 /* attribute flags */ 475 }; 476 477 ddi_dma_attr_t nxge_rx_dma_attr = { 478 DMA_ATTR_V0, /* version number. */ 479 0, /* low address */ 480 0xffffffffffffffff, /* high address */ 481 0xffffffffffffffff, /* address counter max */ 482 0x2000, /* alignment */ 483 0xfc00fc, /* dlim_burstsizes */ 484 0x1, /* minimum transfer size */ 485 0xffffffffffffffff, /* maximum transfer size */ 486 0xffffffffffffffff, /* maximum segment size */ 487 1, /* scatter/gather list length */ 488 (unsigned int) 1, /* granularity */ 489 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 490 }; 491 492 ddi_dma_lim_t nxge_dma_limits = { 493 (uint_t)0, /* dlim_addr_lo */ 494 (uint_t)0xffffffff, /* dlim_addr_hi */ 495 (uint_t)0xffffffff, /* dlim_cntr_max */ 496 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 497 0x1, /* dlim_minxfer */ 498 1024 /* dlim_speed */ 499 }; 500 501 dma_method_t nxge_force_dma = DVMA; 502 503 /* 504 * dma chunk sizes. 505 * 506 * Try to allocate the largest possible size 507 * so that fewer number of dma chunks would be managed 508 */ 509 #ifdef NIU_PA_WORKAROUND 510 size_t alloc_sizes [] = {0x2000}; 511 #else 512 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 513 0x10000, 0x20000, 0x40000, 0x80000, 514 0x100000, 0x200000, 0x400000, 0x800000, 515 0x1000000, 0x2000000, 0x4000000}; 516 #endif 517 518 /* 519 * Translate "dev_t" to a pointer to the associated "dev_info_t". 520 */ 521 522 extern void nxge_get_environs(nxge_t *); 523 524 static int 525 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 526 { 527 p_nxge_t nxgep = NULL; 528 int instance; 529 int status = DDI_SUCCESS; 530 uint8_t portn; 531 nxge_mmac_t *mmac_info; 532 p_nxge_param_t param_arr; 533 534 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 535 536 /* 537 * Get the device instance since we'll need to setup 538 * or retrieve a soft state for this instance. 539 */ 540 instance = ddi_get_instance(dip); 541 542 switch (cmd) { 543 case DDI_ATTACH: 544 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 545 break; 546 547 case DDI_RESUME: 548 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 549 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 550 if (nxgep == NULL) { 551 status = DDI_FAILURE; 552 break; 553 } 554 if (nxgep->dip != dip) { 555 status = DDI_FAILURE; 556 break; 557 } 558 if (nxgep->suspended == DDI_PM_SUSPEND) { 559 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 560 } else { 561 status = nxge_resume(nxgep); 562 } 563 goto nxge_attach_exit; 564 565 case DDI_PM_RESUME: 566 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 567 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 568 if (nxgep == NULL) { 569 status = DDI_FAILURE; 570 break; 571 } 572 if (nxgep->dip != dip) { 573 status = DDI_FAILURE; 574 break; 575 } 576 status = nxge_resume(nxgep); 577 goto nxge_attach_exit; 578 579 default: 580 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 581 status = DDI_FAILURE; 582 goto nxge_attach_exit; 583 } 584 585 586 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 587 status = DDI_FAILURE; 588 goto nxge_attach_exit; 589 } 590 591 nxgep = ddi_get_soft_state(nxge_list, instance); 592 if (nxgep == NULL) { 593 status = NXGE_ERROR; 594 goto nxge_attach_fail2; 595 } 596 597 nxgep->nxge_magic = NXGE_MAGIC; 598 599 nxgep->drv_state = 0; 600 nxgep->dip = dip; 601 nxgep->instance = instance; 602 nxgep->p_dip = ddi_get_parent(dip); 603 nxgep->nxge_debug_level = nxge_debug_level; 604 npi_debug_level = nxge_debug_level; 605 606 /* Are we a guest running in a Hybrid I/O environment? */ 607 nxge_get_environs(nxgep); 608 609 status = nxge_map_regs(nxgep); 610 611 if (status != NXGE_OK) { 612 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 613 goto nxge_attach_fail3; 614 } 615 616 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, 617 &nxge_dev_desc_dma_acc_attr, 618 &nxge_rx_dma_attr); 619 620 /* Create & initialize the per-Neptune data structure */ 621 /* (even if we're a guest). */ 622 status = nxge_init_common_dev(nxgep); 623 if (status != NXGE_OK) { 624 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 625 "nxge_init_common_dev failed")); 626 goto nxge_attach_fail4; 627 } 628 629 /* 630 * Software workaround: set the replay timer. 631 */ 632 if (nxgep->niu_type != N2_NIU) { 633 nxge_set_pci_replay_timeout(nxgep); 634 } 635 #if defined(sun4v) 636 if (isLDOMguest(nxgep)) { 637 nxge_m_callbacks.mc_tx = nxge_m_tx; 638 } 639 #endif 640 641 #if defined(sun4v) 642 /* This is required by nxge_hio_init(), which follows. */ 643 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 644 goto nxge_attach_fail4; 645 #endif 646 647 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 648 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 649 "nxge_hio_init failed")); 650 goto nxge_attach_fail4; 651 } 652 653 if (nxgep->niu_type == NEPTUNE_2_10GF) { 654 if (nxgep->function_num > 1) { 655 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 656 " function %d. Only functions 0 and 1 are " 657 "supported for this card.", nxgep->function_num)); 658 status = NXGE_ERROR; 659 goto nxge_attach_fail4; 660 } 661 } 662 663 if (isLDOMguest(nxgep)) { 664 /* 665 * Use the function number here. 666 */ 667 nxgep->mac.portnum = nxgep->function_num; 668 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 669 670 /* XXX We'll set the MAC address counts to 1 for now. */ 671 mmac_info = &nxgep->nxge_mmac_info; 672 mmac_info->num_mmac = 1; 673 mmac_info->naddrfree = 1; 674 } else { 675 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 676 nxgep->mac.portnum = portn; 677 if ((portn == 0) || (portn == 1)) 678 nxgep->mac.porttype = PORT_TYPE_XMAC; 679 else 680 nxgep->mac.porttype = PORT_TYPE_BMAC; 681 /* 682 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 683 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 684 * The two types of MACs have different characterizations. 685 */ 686 mmac_info = &nxgep->nxge_mmac_info; 687 if (nxgep->function_num < 2) { 688 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 689 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 690 } else { 691 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 692 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 693 } 694 } 695 /* 696 * Setup the Ndd parameters for the this instance. 697 */ 698 nxge_init_param(nxgep); 699 700 /* 701 * Setup Register Tracing Buffer. 702 */ 703 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 704 705 /* init stats ptr */ 706 nxge_init_statsp(nxgep); 707 708 /* 709 * Copy the vpd info from eeprom to a local data 710 * structure, and then check its validity. 711 */ 712 if (!isLDOMguest(nxgep)) { 713 int *regp; 714 uint_t reglen; 715 int rv; 716 717 nxge_vpd_info_get(nxgep); 718 719 /* Find the NIU config handle. */ 720 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 721 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 722 "reg", ®p, ®len); 723 724 if (rv != DDI_PROP_SUCCESS) { 725 goto nxge_attach_fail5; 726 } 727 /* 728 * The address_hi, that is the first int, in the reg 729 * property consists of config handle, but need to remove 730 * the bits 28-31 which are OBP specific info. 731 */ 732 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 733 ddi_prop_free(regp); 734 } 735 736 if (isLDOMguest(nxgep)) { 737 uchar_t *prop_val; 738 uint_t prop_len; 739 uint32_t max_frame_size; 740 741 extern void nxge_get_logical_props(p_nxge_t); 742 743 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 744 nxgep->mac.portmode = PORT_LOGICAL; 745 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 746 "phy-type", "virtual transceiver"); 747 748 nxgep->nports = 1; 749 nxgep->board_ver = 0; /* XXX What? */ 750 751 /* 752 * local-mac-address property gives us info on which 753 * specific MAC address the Hybrid resource is associated 754 * with. 755 */ 756 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 757 "local-mac-address", &prop_val, 758 &prop_len) != DDI_PROP_SUCCESS) { 759 goto nxge_attach_fail5; 760 } 761 if (prop_len != ETHERADDRL) { 762 ddi_prop_free(prop_val); 763 goto nxge_attach_fail5; 764 } 765 ether_copy(prop_val, nxgep->hio_mac_addr); 766 ddi_prop_free(prop_val); 767 nxge_get_logical_props(nxgep); 768 769 /* 770 * Enable Jumbo property based on the "max-frame-size" 771 * property value. 772 */ 773 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY, 774 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 775 "max-frame-size", NXGE_MTU_DEFAULT_MAX); 776 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) && 777 (max_frame_size <= TX_JUMBO_MTU)) { 778 param_arr = nxgep->param_arr; 779 780 param_arr[param_accept_jumbo].value = 1; 781 nxgep->mac.is_jumbo = B_TRUE; 782 nxgep->mac.maxframesize = (uint16_t)max_frame_size; 783 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 784 NXGE_EHEADER_VLAN_CRC; 785 } 786 } else { 787 status = nxge_xcvr_find(nxgep); 788 789 if (status != NXGE_OK) { 790 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 791 " Couldn't determine card type" 792 " .... exit ")); 793 goto nxge_attach_fail5; 794 } 795 796 status = nxge_get_config_properties(nxgep); 797 798 if (status != NXGE_OK) { 799 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 800 "get_hw create failed")); 801 goto nxge_attach_fail; 802 } 803 } 804 805 /* 806 * Setup the Kstats for the driver. 807 */ 808 nxge_setup_kstats(nxgep); 809 810 if (!isLDOMguest(nxgep)) 811 nxge_setup_param(nxgep); 812 813 status = nxge_setup_system_dma_pages(nxgep); 814 if (status != NXGE_OK) { 815 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 816 goto nxge_attach_fail; 817 } 818 819 nxge_hw_id_init(nxgep); 820 821 if (!isLDOMguest(nxgep)) 822 nxge_hw_init_niu_common(nxgep); 823 824 status = nxge_setup_mutexes(nxgep); 825 if (status != NXGE_OK) { 826 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 827 goto nxge_attach_fail; 828 } 829 830 #if defined(sun4v) 831 if (isLDOMguest(nxgep)) { 832 /* Find our VR & channel sets. */ 833 status = nxge_hio_vr_add(nxgep); 834 if (status != NXGE_OK) { 835 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 836 "nxge_hio_vr_add failed")); 837 (void) hsvc_unregister(&nxgep->niu_hsvc); 838 nxgep->niu_hsvc_available = B_FALSE; 839 } 840 goto nxge_attach_exit; 841 } 842 #endif 843 844 status = nxge_setup_dev(nxgep); 845 if (status != DDI_SUCCESS) { 846 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 847 goto nxge_attach_fail; 848 } 849 850 status = nxge_add_intrs(nxgep); 851 if (status != DDI_SUCCESS) { 852 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 853 goto nxge_attach_fail; 854 } 855 856 /* If a guest, register with vio_net instead. */ 857 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 858 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 859 "unable to register to mac layer (%d)", status)); 860 goto nxge_attach_fail; 861 } 862 863 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 864 865 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 866 "registered to mac (instance %d)", instance)); 867 868 /* nxge_link_monitor calls xcvr.check_link recursively */ 869 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 870 871 goto nxge_attach_exit; 872 873 nxge_attach_fail: 874 nxge_unattach(nxgep); 875 goto nxge_attach_fail1; 876 877 nxge_attach_fail5: 878 /* 879 * Tear down the ndd parameters setup. 880 */ 881 nxge_destroy_param(nxgep); 882 883 /* 884 * Tear down the kstat setup. 885 */ 886 nxge_destroy_kstats(nxgep); 887 888 nxge_attach_fail4: 889 if (nxgep->nxge_hw_p) { 890 nxge_uninit_common_dev(nxgep); 891 nxgep->nxge_hw_p = NULL; 892 } 893 894 nxge_attach_fail3: 895 /* 896 * Unmap the register setup. 897 */ 898 nxge_unmap_regs(nxgep); 899 900 nxge_fm_fini(nxgep); 901 902 nxge_attach_fail2: 903 ddi_soft_state_free(nxge_list, nxgep->instance); 904 905 nxge_attach_fail1: 906 if (status != NXGE_OK) 907 status = (NXGE_ERROR | NXGE_DDI_FAILED); 908 nxgep = NULL; 909 910 nxge_attach_exit: 911 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 912 status)); 913 914 return (status); 915 } 916 917 static int 918 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 919 { 920 int status = DDI_SUCCESS; 921 int instance; 922 p_nxge_t nxgep = NULL; 923 924 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 925 instance = ddi_get_instance(dip); 926 nxgep = ddi_get_soft_state(nxge_list, instance); 927 if (nxgep == NULL) { 928 status = DDI_FAILURE; 929 goto nxge_detach_exit; 930 } 931 932 switch (cmd) { 933 case DDI_DETACH: 934 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 935 break; 936 937 case DDI_PM_SUSPEND: 938 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 939 nxgep->suspended = DDI_PM_SUSPEND; 940 nxge_suspend(nxgep); 941 break; 942 943 case DDI_SUSPEND: 944 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 945 if (nxgep->suspended != DDI_PM_SUSPEND) { 946 nxgep->suspended = DDI_SUSPEND; 947 nxge_suspend(nxgep); 948 } 949 break; 950 951 default: 952 status = DDI_FAILURE; 953 } 954 955 if (cmd != DDI_DETACH) 956 goto nxge_detach_exit; 957 958 /* 959 * Stop the xcvr polling. 960 */ 961 nxgep->suspended = cmd; 962 963 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 964 965 if (isLDOMguest(nxgep)) { 966 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 967 nxge_m_stop((void *)nxgep); 968 nxge_hio_unregister(nxgep); 969 } else if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 970 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 971 "<== nxge_detach status = 0x%08X", status)); 972 return (DDI_FAILURE); 973 } 974 975 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 976 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 977 978 nxge_unattach(nxgep); 979 nxgep = NULL; 980 981 nxge_detach_exit: 982 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 983 status)); 984 985 return (status); 986 } 987 988 static void 989 nxge_unattach(p_nxge_t nxgep) 990 { 991 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 992 993 if (nxgep == NULL || nxgep->dev_regs == NULL) { 994 return; 995 } 996 997 nxgep->nxge_magic = 0; 998 999 if (nxgep->nxge_timerid) { 1000 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1001 nxgep->nxge_timerid = 0; 1002 } 1003 1004 /* 1005 * If this flag is set, it will affect the Neptune 1006 * only. 1007 */ 1008 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { 1009 nxge_niu_peu_reset(nxgep); 1010 } 1011 1012 #if defined(sun4v) 1013 if (isLDOMguest(nxgep)) { 1014 (void) nxge_hio_vr_release(nxgep); 1015 } 1016 #endif 1017 1018 if (nxgep->nxge_hw_p) { 1019 nxge_uninit_common_dev(nxgep); 1020 nxgep->nxge_hw_p = NULL; 1021 } 1022 1023 #if defined(sun4v) 1024 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 1025 (void) hsvc_unregister(&nxgep->niu_hsvc); 1026 nxgep->niu_hsvc_available = B_FALSE; 1027 } 1028 #endif 1029 /* 1030 * Stop any further interrupts. 1031 */ 1032 nxge_remove_intrs(nxgep); 1033 1034 /* 1035 * Stop the device and free resources. 1036 */ 1037 if (!isLDOMguest(nxgep)) { 1038 nxge_destroy_dev(nxgep); 1039 } 1040 1041 /* 1042 * Tear down the ndd parameters setup. 1043 */ 1044 nxge_destroy_param(nxgep); 1045 1046 /* 1047 * Tear down the kstat setup. 1048 */ 1049 nxge_destroy_kstats(nxgep); 1050 1051 /* 1052 * Destroy all mutexes. 1053 */ 1054 nxge_destroy_mutexes(nxgep); 1055 1056 /* 1057 * Remove the list of ndd parameters which 1058 * were setup during attach. 1059 */ 1060 if (nxgep->dip) { 1061 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 1062 " nxge_unattach: remove all properties")); 1063 1064 (void) ddi_prop_remove_all(nxgep->dip); 1065 } 1066 1067 #if NXGE_PROPERTY 1068 nxge_remove_hard_properties(nxgep); 1069 #endif 1070 1071 /* 1072 * Unmap the register setup. 1073 */ 1074 nxge_unmap_regs(nxgep); 1075 1076 nxge_fm_fini(nxgep); 1077 1078 ddi_soft_state_free(nxge_list, nxgep->instance); 1079 1080 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1081 } 1082 1083 #if defined(sun4v) 1084 int 1085 nxge_hsvc_register(nxge_t *nxgep) 1086 { 1087 nxge_status_t status; 1088 1089 if (nxgep->niu_type == N2_NIU) { 1090 nxgep->niu_hsvc_available = B_FALSE; 1091 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 1092 if ((status = hsvc_register(&nxgep->niu_hsvc, 1093 &nxgep->niu_min_ver)) != 0) { 1094 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1095 "nxge_attach: %s: cannot negotiate " 1096 "hypervisor services revision %d group: 0x%lx " 1097 "major: 0x%lx minor: 0x%lx errno: %d", 1098 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1099 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1100 niu_hsvc.hsvc_minor, status)); 1101 return (DDI_FAILURE); 1102 } 1103 nxgep->niu_hsvc_available = B_TRUE; 1104 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1105 "NIU Hypervisor service enabled")); 1106 } 1107 1108 return (DDI_SUCCESS); 1109 } 1110 #endif 1111 1112 static char n2_siu_name[] = "niu"; 1113 1114 static nxge_status_t 1115 nxge_map_regs(p_nxge_t nxgep) 1116 { 1117 int ddi_status = DDI_SUCCESS; 1118 p_dev_regs_t dev_regs; 1119 char buf[MAXPATHLEN + 1]; 1120 char *devname; 1121 #ifdef NXGE_DEBUG 1122 char *sysname; 1123 #endif 1124 off_t regsize; 1125 nxge_status_t status = NXGE_OK; 1126 #if !defined(_BIG_ENDIAN) 1127 off_t pci_offset; 1128 uint16_t pcie_devctl; 1129 #endif 1130 1131 if (isLDOMguest(nxgep)) { 1132 return (nxge_guest_regs_map(nxgep)); 1133 } 1134 1135 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1136 nxgep->dev_regs = NULL; 1137 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1138 dev_regs->nxge_regh = NULL; 1139 dev_regs->nxge_pciregh = NULL; 1140 dev_regs->nxge_msix_regh = NULL; 1141 dev_regs->nxge_vir_regh = NULL; 1142 dev_regs->nxge_vir2_regh = NULL; 1143 nxgep->niu_type = NIU_TYPE_NONE; 1144 1145 devname = ddi_pathname(nxgep->dip, buf); 1146 ASSERT(strlen(devname) > 0); 1147 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1148 "nxge_map_regs: pathname devname %s", devname)); 1149 1150 /* 1151 * The driver is running on a N2-NIU system if devname is something 1152 * like "/niu@80/network@0" 1153 */ 1154 if (strstr(devname, n2_siu_name)) { 1155 /* N2/NIU */ 1156 nxgep->niu_type = N2_NIU; 1157 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1158 "nxge_map_regs: N2/NIU devname %s", devname)); 1159 /* get function number */ 1160 nxgep->function_num = 1161 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1162 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1163 "nxge_map_regs: N2/NIU function number %d", 1164 nxgep->function_num)); 1165 } else { 1166 int *prop_val; 1167 uint_t prop_len; 1168 uint8_t func_num; 1169 1170 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1171 0, "reg", 1172 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1173 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1174 "Reg property not found")); 1175 ddi_status = DDI_FAILURE; 1176 goto nxge_map_regs_fail0; 1177 1178 } else { 1179 func_num = (prop_val[0] >> 8) & 0x7; 1180 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1181 "Reg property found: fun # %d", 1182 func_num)); 1183 nxgep->function_num = func_num; 1184 if (isLDOMguest(nxgep)) { 1185 nxgep->function_num /= 2; 1186 return (NXGE_OK); 1187 } 1188 ddi_prop_free(prop_val); 1189 } 1190 } 1191 1192 switch (nxgep->niu_type) { 1193 default: 1194 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1195 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1196 "nxge_map_regs: pci config size 0x%x", regsize)); 1197 1198 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1199 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1200 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1201 if (ddi_status != DDI_SUCCESS) { 1202 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1203 "ddi_map_regs, nxge bus config regs failed")); 1204 goto nxge_map_regs_fail0; 1205 } 1206 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1207 "nxge_map_reg: PCI config addr 0x%0llx " 1208 " handle 0x%0llx", dev_regs->nxge_pciregp, 1209 dev_regs->nxge_pciregh)); 1210 /* 1211 * IMP IMP 1212 * workaround for bit swapping bug in HW 1213 * which ends up in no-snoop = yes 1214 * resulting, in DMA not synched properly 1215 */ 1216 #if !defined(_BIG_ENDIAN) 1217 /* workarounds for x86 systems */ 1218 pci_offset = 0x80 + PCIE_DEVCTL; 1219 pcie_devctl = 0x0; 1220 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 1221 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1222 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1223 pcie_devctl); 1224 #endif 1225 1226 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1227 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1228 "nxge_map_regs: pio size 0x%x", regsize)); 1229 /* set up the device mapped register */ 1230 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1231 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1232 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1233 if (ddi_status != DDI_SUCCESS) { 1234 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1235 "ddi_map_regs for Neptune global reg failed")); 1236 goto nxge_map_regs_fail1; 1237 } 1238 1239 /* set up the msi/msi-x mapped register */ 1240 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1241 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1242 "nxge_map_regs: msix size 0x%x", regsize)); 1243 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1244 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1245 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1246 if (ddi_status != DDI_SUCCESS) { 1247 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1248 "ddi_map_regs for msi reg failed")); 1249 goto nxge_map_regs_fail2; 1250 } 1251 1252 /* set up the vio region mapped register */ 1253 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1254 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1255 "nxge_map_regs: vio size 0x%x", regsize)); 1256 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1257 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1258 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1259 1260 if (ddi_status != DDI_SUCCESS) { 1261 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1262 "ddi_map_regs for nxge vio reg failed")); 1263 goto nxge_map_regs_fail3; 1264 } 1265 nxgep->dev_regs = dev_regs; 1266 1267 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1268 NPI_PCI_ADD_HANDLE_SET(nxgep, 1269 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1270 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1271 NPI_MSI_ADD_HANDLE_SET(nxgep, 1272 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1273 1274 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1275 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1276 1277 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1278 NPI_REG_ADD_HANDLE_SET(nxgep, 1279 (npi_reg_ptr_t)dev_regs->nxge_regp); 1280 1281 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1282 NPI_VREG_ADD_HANDLE_SET(nxgep, 1283 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1284 1285 break; 1286 1287 case N2_NIU: 1288 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1289 /* 1290 * Set up the device mapped register (FWARC 2006/556) 1291 * (changed back to 1: reg starts at 1!) 1292 */ 1293 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1294 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1295 "nxge_map_regs: dev size 0x%x", regsize)); 1296 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1297 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1298 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1299 1300 if (ddi_status != DDI_SUCCESS) { 1301 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1302 "ddi_map_regs for N2/NIU, global reg failed ")); 1303 goto nxge_map_regs_fail1; 1304 } 1305 1306 /* set up the first vio region mapped register */ 1307 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1308 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1309 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1310 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1311 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1312 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1313 1314 if (ddi_status != DDI_SUCCESS) { 1315 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1316 "ddi_map_regs for nxge vio reg failed")); 1317 goto nxge_map_regs_fail2; 1318 } 1319 /* set up the second vio region mapped register */ 1320 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1321 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1322 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1323 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1324 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1325 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1326 1327 if (ddi_status != DDI_SUCCESS) { 1328 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1329 "ddi_map_regs for nxge vio2 reg failed")); 1330 goto nxge_map_regs_fail3; 1331 } 1332 nxgep->dev_regs = dev_regs; 1333 1334 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1335 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1336 1337 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1338 NPI_REG_ADD_HANDLE_SET(nxgep, 1339 (npi_reg_ptr_t)dev_regs->nxge_regp); 1340 1341 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1342 NPI_VREG_ADD_HANDLE_SET(nxgep, 1343 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1344 1345 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1346 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1347 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1348 1349 break; 1350 } 1351 1352 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1353 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1354 1355 goto nxge_map_regs_exit; 1356 nxge_map_regs_fail3: 1357 if (dev_regs->nxge_msix_regh) { 1358 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1359 } 1360 if (dev_regs->nxge_vir_regh) { 1361 ddi_regs_map_free(&dev_regs->nxge_regh); 1362 } 1363 nxge_map_regs_fail2: 1364 if (dev_regs->nxge_regh) { 1365 ddi_regs_map_free(&dev_regs->nxge_regh); 1366 } 1367 nxge_map_regs_fail1: 1368 if (dev_regs->nxge_pciregh) { 1369 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1370 } 1371 nxge_map_regs_fail0: 1372 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1373 kmem_free(dev_regs, sizeof (dev_regs_t)); 1374 1375 nxge_map_regs_exit: 1376 if (ddi_status != DDI_SUCCESS) 1377 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1378 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1379 return (status); 1380 } 1381 1382 static void 1383 nxge_unmap_regs(p_nxge_t nxgep) 1384 { 1385 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1386 1387 if (isLDOMguest(nxgep)) { 1388 nxge_guest_regs_map_free(nxgep); 1389 return; 1390 } 1391 1392 if (nxgep->dev_regs) { 1393 if (nxgep->dev_regs->nxge_pciregh) { 1394 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1395 "==> nxge_unmap_regs: bus")); 1396 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1397 nxgep->dev_regs->nxge_pciregh = NULL; 1398 } 1399 if (nxgep->dev_regs->nxge_regh) { 1400 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1401 "==> nxge_unmap_regs: device registers")); 1402 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1403 nxgep->dev_regs->nxge_regh = NULL; 1404 } 1405 if (nxgep->dev_regs->nxge_msix_regh) { 1406 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1407 "==> nxge_unmap_regs: device interrupts")); 1408 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1409 nxgep->dev_regs->nxge_msix_regh = NULL; 1410 } 1411 if (nxgep->dev_regs->nxge_vir_regh) { 1412 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1413 "==> nxge_unmap_regs: vio region")); 1414 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1415 nxgep->dev_regs->nxge_vir_regh = NULL; 1416 } 1417 if (nxgep->dev_regs->nxge_vir2_regh) { 1418 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1419 "==> nxge_unmap_regs: vio2 region")); 1420 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1421 nxgep->dev_regs->nxge_vir2_regh = NULL; 1422 } 1423 1424 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1425 nxgep->dev_regs = NULL; 1426 } 1427 1428 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1429 } 1430 1431 static nxge_status_t 1432 nxge_setup_mutexes(p_nxge_t nxgep) 1433 { 1434 int ddi_status = DDI_SUCCESS; 1435 nxge_status_t status = NXGE_OK; 1436 nxge_classify_t *classify_ptr; 1437 int partition; 1438 1439 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1440 1441 /* 1442 * Get the interrupt cookie so the mutexes can be 1443 * Initialized. 1444 */ 1445 if (isLDOMguest(nxgep)) { 1446 nxgep->interrupt_cookie = 0; 1447 } else { 1448 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1449 &nxgep->interrupt_cookie); 1450 1451 if (ddi_status != DDI_SUCCESS) { 1452 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1453 "<== nxge_setup_mutexes: failed 0x%x", 1454 ddi_status)); 1455 goto nxge_setup_mutexes_exit; 1456 } 1457 } 1458 1459 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1460 MUTEX_INIT(&nxgep->poll_lock, NULL, 1461 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1462 1463 /* 1464 * Initialize mutexes for this device. 1465 */ 1466 MUTEX_INIT(nxgep->genlock, NULL, 1467 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1468 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1469 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1470 MUTEX_INIT(&nxgep->mif_lock, NULL, 1471 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1472 MUTEX_INIT(&nxgep->group_lock, NULL, 1473 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1474 RW_INIT(&nxgep->filter_lock, NULL, 1475 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1476 1477 classify_ptr = &nxgep->classifier; 1478 /* 1479 * FFLP Mutexes are never used in interrupt context 1480 * as fflp operation can take very long time to 1481 * complete and hence not suitable to invoke from interrupt 1482 * handlers. 1483 */ 1484 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1485 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1486 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1487 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1488 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1489 for (partition = 0; partition < MAX_PARTITION; partition++) { 1490 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1491 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1492 } 1493 } 1494 1495 nxge_setup_mutexes_exit: 1496 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1497 "<== nxge_setup_mutexes status = %x", status)); 1498 1499 if (ddi_status != DDI_SUCCESS) 1500 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1501 1502 return (status); 1503 } 1504 1505 static void 1506 nxge_destroy_mutexes(p_nxge_t nxgep) 1507 { 1508 int partition; 1509 nxge_classify_t *classify_ptr; 1510 1511 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1512 RW_DESTROY(&nxgep->filter_lock); 1513 MUTEX_DESTROY(&nxgep->group_lock); 1514 MUTEX_DESTROY(&nxgep->mif_lock); 1515 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1516 MUTEX_DESTROY(nxgep->genlock); 1517 1518 classify_ptr = &nxgep->classifier; 1519 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1520 1521 /* Destroy all polling resources. */ 1522 MUTEX_DESTROY(&nxgep->poll_lock); 1523 cv_destroy(&nxgep->poll_cv); 1524 1525 /* free data structures, based on HW type */ 1526 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1527 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1528 for (partition = 0; partition < MAX_PARTITION; partition++) { 1529 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1530 } 1531 } 1532 1533 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1534 } 1535 1536 nxge_status_t 1537 nxge_init(p_nxge_t nxgep) 1538 { 1539 nxge_status_t status = NXGE_OK; 1540 1541 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1542 1543 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1544 return (status); 1545 } 1546 1547 /* 1548 * Allocate system memory for the receive/transmit buffer blocks 1549 * and receive/transmit descriptor rings. 1550 */ 1551 status = nxge_alloc_mem_pool(nxgep); 1552 if (status != NXGE_OK) { 1553 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1554 goto nxge_init_fail1; 1555 } 1556 1557 if (!isLDOMguest(nxgep)) { 1558 /* 1559 * Initialize and enable the TXC registers. 1560 * (Globally enable the Tx controller, 1561 * enable the port, configure the dma channel bitmap, 1562 * configure the max burst size). 1563 */ 1564 status = nxge_txc_init(nxgep); 1565 if (status != NXGE_OK) { 1566 NXGE_ERROR_MSG((nxgep, 1567 NXGE_ERR_CTL, "init txc failed\n")); 1568 goto nxge_init_fail2; 1569 } 1570 } 1571 1572 /* 1573 * Initialize and enable TXDMA channels. 1574 */ 1575 status = nxge_init_txdma_channels(nxgep); 1576 if (status != NXGE_OK) { 1577 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1578 goto nxge_init_fail3; 1579 } 1580 1581 /* 1582 * Initialize and enable RXDMA channels. 1583 */ 1584 status = nxge_init_rxdma_channels(nxgep); 1585 if (status != NXGE_OK) { 1586 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1587 goto nxge_init_fail4; 1588 } 1589 1590 /* 1591 * The guest domain is now done. 1592 */ 1593 if (isLDOMguest(nxgep)) { 1594 nxgep->drv_state |= STATE_HW_INITIALIZED; 1595 goto nxge_init_exit; 1596 } 1597 1598 /* 1599 * Initialize TCAM and FCRAM (Neptune). 1600 */ 1601 status = nxge_classify_init(nxgep); 1602 if (status != NXGE_OK) { 1603 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1604 goto nxge_init_fail5; 1605 } 1606 1607 /* 1608 * Initialize ZCP 1609 */ 1610 status = nxge_zcp_init(nxgep); 1611 if (status != NXGE_OK) { 1612 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1613 goto nxge_init_fail5; 1614 } 1615 1616 /* 1617 * Initialize IPP. 1618 */ 1619 status = nxge_ipp_init(nxgep); 1620 if (status != NXGE_OK) { 1621 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1622 goto nxge_init_fail5; 1623 } 1624 1625 /* 1626 * Initialize the MAC block. 1627 */ 1628 status = nxge_mac_init(nxgep); 1629 if (status != NXGE_OK) { 1630 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1631 goto nxge_init_fail5; 1632 } 1633 1634 /* 1635 * Enable the interrrupts for DDI. 1636 */ 1637 nxge_intrs_enable(nxgep); 1638 1639 nxgep->drv_state |= STATE_HW_INITIALIZED; 1640 1641 goto nxge_init_exit; 1642 1643 nxge_init_fail5: 1644 nxge_uninit_rxdma_channels(nxgep); 1645 nxge_init_fail4: 1646 nxge_uninit_txdma_channels(nxgep); 1647 nxge_init_fail3: 1648 if (!isLDOMguest(nxgep)) { 1649 (void) nxge_txc_uninit(nxgep); 1650 } 1651 nxge_init_fail2: 1652 nxge_free_mem_pool(nxgep); 1653 nxge_init_fail1: 1654 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1655 "<== nxge_init status (failed) = 0x%08x", status)); 1656 return (status); 1657 1658 nxge_init_exit: 1659 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1660 status)); 1661 return (status); 1662 } 1663 1664 1665 timeout_id_t 1666 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1667 { 1668 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1669 return (timeout(func, (caddr_t)nxgep, 1670 drv_usectohz(1000 * msec))); 1671 } 1672 return (NULL); 1673 } 1674 1675 /*ARGSUSED*/ 1676 void 1677 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1678 { 1679 if (timerid) { 1680 (void) untimeout(timerid); 1681 } 1682 } 1683 1684 void 1685 nxge_uninit(p_nxge_t nxgep) 1686 { 1687 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1688 1689 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1690 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1691 "==> nxge_uninit: not initialized")); 1692 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1693 "<== nxge_uninit")); 1694 return; 1695 } 1696 1697 if (!isLDOMguest(nxgep)) { 1698 /* 1699 * Reset the receive MAC side. 1700 */ 1701 (void) nxge_rx_mac_disable(nxgep); 1702 1703 /* 1704 * Drain the IPP. 1705 */ 1706 (void) nxge_ipp_drain(nxgep); 1707 } 1708 1709 /* stop timer */ 1710 if (nxgep->nxge_timerid) { 1711 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1712 nxgep->nxge_timerid = 0; 1713 } 1714 1715 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1716 (void) nxge_intr_hw_disable(nxgep); 1717 1718 1719 /* Disable and soft reset the IPP */ 1720 if (!isLDOMguest(nxgep)) 1721 (void) nxge_ipp_disable(nxgep); 1722 1723 /* Free classification resources */ 1724 (void) nxge_classify_uninit(nxgep); 1725 1726 /* 1727 * Reset the transmit/receive DMA side. 1728 */ 1729 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1730 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1731 1732 nxge_uninit_txdma_channels(nxgep); 1733 nxge_uninit_rxdma_channels(nxgep); 1734 1735 /* 1736 * Reset the transmit MAC side. 1737 */ 1738 (void) nxge_tx_mac_disable(nxgep); 1739 1740 nxge_free_mem_pool(nxgep); 1741 1742 /* 1743 * Start the timer if the reset flag is not set. 1744 * If this reset flag is set, the link monitor 1745 * will not be started in order to stop furthur bus 1746 * activities coming from this interface. 1747 * The driver will start the monitor function 1748 * if the interface was initialized again later. 1749 */ 1750 if (!nxge_peu_reset_enable) { 1751 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1752 } 1753 1754 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1755 1756 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1757 "nxge_mblks_pending %d", nxge_mblks_pending)); 1758 } 1759 1760 void 1761 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1762 { 1763 uint64_t reg; 1764 uint64_t regdata; 1765 int i, retry; 1766 1767 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1768 regdata = 0; 1769 retry = 1; 1770 1771 for (i = 0; i < retry; i++) { 1772 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1773 } 1774 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1775 } 1776 1777 void 1778 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1779 { 1780 uint64_t reg; 1781 uint64_t buf[2]; 1782 1783 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1784 reg = buf[0]; 1785 1786 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1787 } 1788 1789 1790 nxge_os_mutex_t nxgedebuglock; 1791 int nxge_debug_init = 0; 1792 1793 /*ARGSUSED*/ 1794 /*VARARGS*/ 1795 void 1796 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1797 { 1798 char msg_buffer[1048]; 1799 char prefix_buffer[32]; 1800 int instance; 1801 uint64_t debug_level; 1802 int cmn_level = CE_CONT; 1803 va_list ap; 1804 1805 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1806 /* In case a developer has changed nxge_debug_level. */ 1807 if (nxgep->nxge_debug_level != nxge_debug_level) 1808 nxgep->nxge_debug_level = nxge_debug_level; 1809 } 1810 1811 debug_level = (nxgep == NULL) ? nxge_debug_level : 1812 nxgep->nxge_debug_level; 1813 1814 if ((level & debug_level) || 1815 (level == NXGE_NOTE) || 1816 (level == NXGE_ERR_CTL)) { 1817 /* do the msg processing */ 1818 if (nxge_debug_init == 0) { 1819 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1820 nxge_debug_init = 1; 1821 } 1822 1823 MUTEX_ENTER(&nxgedebuglock); 1824 1825 if ((level & NXGE_NOTE)) { 1826 cmn_level = CE_NOTE; 1827 } 1828 1829 if (level & NXGE_ERR_CTL) { 1830 cmn_level = CE_WARN; 1831 } 1832 1833 va_start(ap, fmt); 1834 (void) vsprintf(msg_buffer, fmt, ap); 1835 va_end(ap); 1836 if (nxgep == NULL) { 1837 instance = -1; 1838 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1839 } else { 1840 instance = nxgep->instance; 1841 (void) sprintf(prefix_buffer, 1842 "%s%d :", "nxge", instance); 1843 } 1844 1845 MUTEX_EXIT(&nxgedebuglock); 1846 cmn_err(cmn_level, "!%s %s\n", 1847 prefix_buffer, msg_buffer); 1848 1849 } 1850 } 1851 1852 char * 1853 nxge_dump_packet(char *addr, int size) 1854 { 1855 uchar_t *ap = (uchar_t *)addr; 1856 int i; 1857 static char etherbuf[1024]; 1858 char *cp = etherbuf; 1859 char digits[] = "0123456789abcdef"; 1860 1861 if (!size) 1862 size = 60; 1863 1864 if (size > MAX_DUMP_SZ) { 1865 /* Dump the leading bytes */ 1866 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1867 if (*ap > 0x0f) 1868 *cp++ = digits[*ap >> 4]; 1869 *cp++ = digits[*ap++ & 0xf]; 1870 *cp++ = ':'; 1871 } 1872 for (i = 0; i < 20; i++) 1873 *cp++ = '.'; 1874 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1875 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1876 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1877 if (*ap > 0x0f) 1878 *cp++ = digits[*ap >> 4]; 1879 *cp++ = digits[*ap++ & 0xf]; 1880 *cp++ = ':'; 1881 } 1882 } else { 1883 for (i = 0; i < size; i++) { 1884 if (*ap > 0x0f) 1885 *cp++ = digits[*ap >> 4]; 1886 *cp++ = digits[*ap++ & 0xf]; 1887 *cp++ = ':'; 1888 } 1889 } 1890 *--cp = 0; 1891 return (etherbuf); 1892 } 1893 1894 #ifdef NXGE_DEBUG 1895 static void 1896 nxge_test_map_regs(p_nxge_t nxgep) 1897 { 1898 ddi_acc_handle_t cfg_handle; 1899 p_pci_cfg_t cfg_ptr; 1900 ddi_acc_handle_t dev_handle; 1901 char *dev_ptr; 1902 ddi_acc_handle_t pci_config_handle; 1903 uint32_t regval; 1904 int i; 1905 1906 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1907 1908 dev_handle = nxgep->dev_regs->nxge_regh; 1909 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1910 1911 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1912 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1913 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1914 1915 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1916 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1917 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1918 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1919 &cfg_ptr->vendorid)); 1920 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1921 "\tvendorid 0x%x devid 0x%x", 1922 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1923 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1924 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1925 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1926 "bar1c 0x%x", 1927 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1928 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1929 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1930 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1931 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1932 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1933 "base 28 0x%x bar2c 0x%x\n", 1934 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1935 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1936 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1937 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1938 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1939 "\nNeptune PCI BAR: base30 0x%x\n", 1940 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1941 1942 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1943 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1944 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1945 "first 0x%llx second 0x%llx third 0x%llx " 1946 "last 0x%llx ", 1947 NXGE_PIO_READ64(dev_handle, 1948 (uint64_t *)(dev_ptr + 0), 0), 1949 NXGE_PIO_READ64(dev_handle, 1950 (uint64_t *)(dev_ptr + 8), 0), 1951 NXGE_PIO_READ64(dev_handle, 1952 (uint64_t *)(dev_ptr + 16), 0), 1953 NXGE_PIO_READ64(cfg_handle, 1954 (uint64_t *)(dev_ptr + 24), 0))); 1955 } 1956 } 1957 1958 #endif 1959 1960 static void 1961 nxge_suspend(p_nxge_t nxgep) 1962 { 1963 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1964 1965 nxge_intrs_disable(nxgep); 1966 nxge_destroy_dev(nxgep); 1967 1968 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1969 } 1970 1971 static nxge_status_t 1972 nxge_resume(p_nxge_t nxgep) 1973 { 1974 nxge_status_t status = NXGE_OK; 1975 1976 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1977 1978 nxgep->suspended = DDI_RESUME; 1979 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1980 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1981 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1982 (void) nxge_rx_mac_enable(nxgep); 1983 (void) nxge_tx_mac_enable(nxgep); 1984 nxge_intrs_enable(nxgep); 1985 nxgep->suspended = 0; 1986 1987 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1988 "<== nxge_resume status = 0x%x", status)); 1989 return (status); 1990 } 1991 1992 static nxge_status_t 1993 nxge_setup_dev(p_nxge_t nxgep) 1994 { 1995 nxge_status_t status = NXGE_OK; 1996 1997 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1998 nxgep->mac.portnum)); 1999 2000 status = nxge_link_init(nxgep); 2001 2002 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 2003 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2004 "port%d Bad register acc handle", nxgep->mac.portnum)); 2005 status = NXGE_ERROR; 2006 } 2007 2008 if (status != NXGE_OK) { 2009 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2010 " nxge_setup_dev status " 2011 "(xcvr init 0x%08x)", status)); 2012 goto nxge_setup_dev_exit; 2013 } 2014 2015 nxge_setup_dev_exit: 2016 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2017 "<== nxge_setup_dev port %d status = 0x%08x", 2018 nxgep->mac.portnum, status)); 2019 2020 return (status); 2021 } 2022 2023 static void 2024 nxge_destroy_dev(p_nxge_t nxgep) 2025 { 2026 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 2027 2028 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 2029 2030 (void) nxge_hw_stop(nxgep); 2031 2032 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 2033 } 2034 2035 static nxge_status_t 2036 nxge_setup_system_dma_pages(p_nxge_t nxgep) 2037 { 2038 int ddi_status = DDI_SUCCESS; 2039 uint_t count; 2040 ddi_dma_cookie_t cookie; 2041 uint_t iommu_pagesize; 2042 nxge_status_t status = NXGE_OK; 2043 2044 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 2045 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 2046 if (nxgep->niu_type != N2_NIU) { 2047 iommu_pagesize = dvma_pagesize(nxgep->dip); 2048 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2049 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2050 " default_block_size %d iommu_pagesize %d", 2051 nxgep->sys_page_sz, 2052 ddi_ptob(nxgep->dip, (ulong_t)1), 2053 nxgep->rx_default_block_size, 2054 iommu_pagesize)); 2055 2056 if (iommu_pagesize != 0) { 2057 if (nxgep->sys_page_sz == iommu_pagesize) { 2058 if (iommu_pagesize > 0x4000) 2059 nxgep->sys_page_sz = 0x4000; 2060 } else { 2061 if (nxgep->sys_page_sz > iommu_pagesize) 2062 nxgep->sys_page_sz = iommu_pagesize; 2063 } 2064 } 2065 } 2066 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2067 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2068 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2069 "default_block_size %d page mask %d", 2070 nxgep->sys_page_sz, 2071 ddi_ptob(nxgep->dip, (ulong_t)1), 2072 nxgep->rx_default_block_size, 2073 nxgep->sys_page_mask)); 2074 2075 2076 switch (nxgep->sys_page_sz) { 2077 default: 2078 nxgep->sys_page_sz = 0x1000; 2079 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2080 nxgep->rx_default_block_size = 0x1000; 2081 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2082 break; 2083 case 0x1000: 2084 nxgep->rx_default_block_size = 0x1000; 2085 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2086 break; 2087 case 0x2000: 2088 nxgep->rx_default_block_size = 0x2000; 2089 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2090 break; 2091 case 0x4000: 2092 nxgep->rx_default_block_size = 0x4000; 2093 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2094 break; 2095 case 0x8000: 2096 nxgep->rx_default_block_size = 0x8000; 2097 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2098 break; 2099 } 2100 2101 #ifndef USE_RX_BIG_BUF 2102 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2103 #else 2104 nxgep->rx_default_block_size = 0x2000; 2105 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2106 #endif 2107 /* 2108 * Get the system DMA burst size. 2109 */ 2110 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2111 DDI_DMA_DONTWAIT, 0, 2112 &nxgep->dmasparehandle); 2113 if (ddi_status != DDI_SUCCESS) { 2114 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2115 "ddi_dma_alloc_handle: failed " 2116 " status 0x%x", ddi_status)); 2117 goto nxge_get_soft_properties_exit; 2118 } 2119 2120 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2121 (caddr_t)nxgep->dmasparehandle, 2122 sizeof (nxgep->dmasparehandle), 2123 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2124 DDI_DMA_DONTWAIT, 0, 2125 &cookie, &count); 2126 if (ddi_status != DDI_DMA_MAPPED) { 2127 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2128 "Binding spare handle to find system" 2129 " burstsize failed.")); 2130 ddi_status = DDI_FAILURE; 2131 goto nxge_get_soft_properties_fail1; 2132 } 2133 2134 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2135 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2136 2137 nxge_get_soft_properties_fail1: 2138 ddi_dma_free_handle(&nxgep->dmasparehandle); 2139 2140 nxge_get_soft_properties_exit: 2141 2142 if (ddi_status != DDI_SUCCESS) 2143 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2144 2145 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2146 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2147 return (status); 2148 } 2149 2150 static nxge_status_t 2151 nxge_alloc_mem_pool(p_nxge_t nxgep) 2152 { 2153 nxge_status_t status = NXGE_OK; 2154 2155 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2156 2157 status = nxge_alloc_rx_mem_pool(nxgep); 2158 if (status != NXGE_OK) { 2159 return (NXGE_ERROR); 2160 } 2161 2162 status = nxge_alloc_tx_mem_pool(nxgep); 2163 if (status != NXGE_OK) { 2164 nxge_free_rx_mem_pool(nxgep); 2165 return (NXGE_ERROR); 2166 } 2167 2168 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2169 return (NXGE_OK); 2170 } 2171 2172 static void 2173 nxge_free_mem_pool(p_nxge_t nxgep) 2174 { 2175 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2176 2177 nxge_free_rx_mem_pool(nxgep); 2178 nxge_free_tx_mem_pool(nxgep); 2179 2180 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2181 } 2182 2183 nxge_status_t 2184 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2185 { 2186 uint32_t rdc_max; 2187 p_nxge_dma_pt_cfg_t p_all_cfgp; 2188 p_nxge_hw_pt_cfg_t p_cfgp; 2189 p_nxge_dma_pool_t dma_poolp; 2190 p_nxge_dma_common_t *dma_buf_p; 2191 p_nxge_dma_pool_t dma_cntl_poolp; 2192 p_nxge_dma_common_t *dma_cntl_p; 2193 uint32_t *num_chunks; /* per dma */ 2194 nxge_status_t status = NXGE_OK; 2195 2196 uint32_t nxge_port_rbr_size; 2197 uint32_t nxge_port_rbr_spare_size; 2198 uint32_t nxge_port_rcr_size; 2199 uint32_t rx_cntl_alloc_size; 2200 2201 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2202 2203 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2204 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2205 rdc_max = NXGE_MAX_RDCS; 2206 2207 /* 2208 * Allocate memory for the common DMA data structures. 2209 */ 2210 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2211 KM_SLEEP); 2212 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2213 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2214 2215 dma_cntl_poolp = (p_nxge_dma_pool_t) 2216 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2217 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2218 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2219 2220 num_chunks = (uint32_t *)KMEM_ZALLOC( 2221 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2222 2223 /* 2224 * Assume that each DMA channel will be configured with 2225 * the default block size. 2226 * rbr block counts are modulo the batch count (16). 2227 */ 2228 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2229 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2230 2231 if (!nxge_port_rbr_size) { 2232 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2233 } 2234 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2235 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2236 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2237 } 2238 2239 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2240 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2241 2242 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2243 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2244 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2245 } 2246 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2247 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2248 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2249 "set to default %d", 2250 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2251 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2252 } 2253 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2254 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2255 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2256 "set to default %d", 2257 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2258 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2259 } 2260 2261 /* 2262 * N2/NIU has limitation on the descriptor sizes (contiguous 2263 * memory allocation on data buffers to 4M (contig_mem_alloc) 2264 * and little endian for control buffers (must use the ddi/dki mem alloc 2265 * function). 2266 */ 2267 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2268 if (nxgep->niu_type == N2_NIU) { 2269 nxge_port_rbr_spare_size = 0; 2270 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2271 (!ISP2(nxge_port_rbr_size))) { 2272 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2273 } 2274 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2275 (!ISP2(nxge_port_rcr_size))) { 2276 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2277 } 2278 } 2279 #endif 2280 2281 /* 2282 * Addresses of receive block ring, receive completion ring and the 2283 * mailbox must be all cache-aligned (64 bytes). 2284 */ 2285 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2286 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2287 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2288 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2289 2290 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2291 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2292 "nxge_port_rcr_size = %d " 2293 "rx_cntl_alloc_size = %d", 2294 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2295 nxge_port_rcr_size, 2296 rx_cntl_alloc_size)); 2297 2298 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2299 if (nxgep->niu_type == N2_NIU) { 2300 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2301 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2302 2303 if (!ISP2(rx_buf_alloc_size)) { 2304 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2305 "==> nxge_alloc_rx_mem_pool: " 2306 " must be power of 2")); 2307 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2308 goto nxge_alloc_rx_mem_pool_exit; 2309 } 2310 2311 if (rx_buf_alloc_size > (1 << 22)) { 2312 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2313 "==> nxge_alloc_rx_mem_pool: " 2314 " limit size to 4M")); 2315 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2316 goto nxge_alloc_rx_mem_pool_exit; 2317 } 2318 2319 if (rx_cntl_alloc_size < 0x2000) { 2320 rx_cntl_alloc_size = 0x2000; 2321 } 2322 } 2323 #endif 2324 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2325 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2326 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2327 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2328 2329 dma_poolp->ndmas = p_cfgp->max_rdcs; 2330 dma_poolp->num_chunks = num_chunks; 2331 dma_poolp->buf_allocated = B_TRUE; 2332 nxgep->rx_buf_pool_p = dma_poolp; 2333 dma_poolp->dma_buf_pool_p = dma_buf_p; 2334 2335 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2336 dma_cntl_poolp->buf_allocated = B_TRUE; 2337 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2338 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2339 2340 /* Allocate the receive rings, too. */ 2341 nxgep->rx_rbr_rings = 2342 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2343 nxgep->rx_rbr_rings->rbr_rings = 2344 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2345 nxgep->rx_rcr_rings = 2346 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2347 nxgep->rx_rcr_rings->rcr_rings = 2348 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2349 nxgep->rx_mbox_areas_p = 2350 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2351 nxgep->rx_mbox_areas_p->rxmbox_areas = 2352 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2353 2354 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2355 p_cfgp->max_rdcs; 2356 2357 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2358 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2359 2360 nxge_alloc_rx_mem_pool_exit: 2361 return (status); 2362 } 2363 2364 /* 2365 * nxge_alloc_rxb 2366 * 2367 * Allocate buffers for an RDC. 2368 * 2369 * Arguments: 2370 * nxgep 2371 * channel The channel to map into our kernel space. 2372 * 2373 * Notes: 2374 * 2375 * NPI function calls: 2376 * 2377 * NXGE function calls: 2378 * 2379 * Registers accessed: 2380 * 2381 * Context: 2382 * 2383 * Taking apart: 2384 * 2385 * Open questions: 2386 * 2387 */ 2388 nxge_status_t 2389 nxge_alloc_rxb( 2390 p_nxge_t nxgep, 2391 int channel) 2392 { 2393 size_t rx_buf_alloc_size; 2394 nxge_status_t status = NXGE_OK; 2395 2396 nxge_dma_common_t **data; 2397 nxge_dma_common_t **control; 2398 uint32_t *num_chunks; 2399 2400 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2401 2402 /* 2403 * Allocate memory for the receive buffers and descriptor rings. 2404 * Replace these allocation functions with the interface functions 2405 * provided by the partition manager if/when they are available. 2406 */ 2407 2408 /* 2409 * Allocate memory for the receive buffer blocks. 2410 */ 2411 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2412 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2413 2414 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2415 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2416 2417 if ((status = nxge_alloc_rx_buf_dma( 2418 nxgep, channel, data, rx_buf_alloc_size, 2419 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2420 return (status); 2421 } 2422 2423 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2424 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2425 2426 /* 2427 * Allocate memory for descriptor rings and mailbox. 2428 */ 2429 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2430 2431 if ((status = nxge_alloc_rx_cntl_dma( 2432 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2433 != NXGE_OK) { 2434 nxge_free_rx_cntl_dma(nxgep, *control); 2435 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2436 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2437 return (status); 2438 } 2439 2440 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2441 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2442 2443 return (status); 2444 } 2445 2446 void 2447 nxge_free_rxb( 2448 p_nxge_t nxgep, 2449 int channel) 2450 { 2451 nxge_dma_common_t *data; 2452 nxge_dma_common_t *control; 2453 uint32_t num_chunks; 2454 2455 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2456 2457 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2458 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2459 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2460 2461 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2462 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2463 2464 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2465 nxge_free_rx_cntl_dma(nxgep, control); 2466 2467 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2468 2469 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2470 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2471 2472 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2473 } 2474 2475 static void 2476 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2477 { 2478 int rdc_max = NXGE_MAX_RDCS; 2479 2480 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2481 2482 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2483 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2484 "<== nxge_free_rx_mem_pool " 2485 "(null rx buf pool or buf not allocated")); 2486 return; 2487 } 2488 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2489 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2490 "<== nxge_free_rx_mem_pool " 2491 "(null rx cntl buf pool or cntl buf not allocated")); 2492 return; 2493 } 2494 2495 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2496 sizeof (p_nxge_dma_common_t) * rdc_max); 2497 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2498 2499 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2500 sizeof (uint32_t) * rdc_max); 2501 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2502 sizeof (p_nxge_dma_common_t) * rdc_max); 2503 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2504 2505 nxgep->rx_buf_pool_p = 0; 2506 nxgep->rx_cntl_pool_p = 0; 2507 2508 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2509 sizeof (p_rx_rbr_ring_t) * rdc_max); 2510 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2511 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2512 sizeof (p_rx_rcr_ring_t) * rdc_max); 2513 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2514 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2515 sizeof (p_rx_mbox_t) * rdc_max); 2516 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2517 2518 nxgep->rx_rbr_rings = 0; 2519 nxgep->rx_rcr_rings = 0; 2520 nxgep->rx_mbox_areas_p = 0; 2521 2522 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2523 } 2524 2525 2526 static nxge_status_t 2527 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2528 p_nxge_dma_common_t *dmap, 2529 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2530 { 2531 p_nxge_dma_common_t rx_dmap; 2532 nxge_status_t status = NXGE_OK; 2533 size_t total_alloc_size; 2534 size_t allocated = 0; 2535 int i, size_index, array_size; 2536 boolean_t use_kmem_alloc = B_FALSE; 2537 2538 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2539 2540 rx_dmap = (p_nxge_dma_common_t) 2541 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2542 KM_SLEEP); 2543 2544 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2545 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2546 dma_channel, alloc_size, block_size, dmap)); 2547 2548 total_alloc_size = alloc_size; 2549 2550 #if defined(RX_USE_RECLAIM_POST) 2551 total_alloc_size = alloc_size + alloc_size/4; 2552 #endif 2553 2554 i = 0; 2555 size_index = 0; 2556 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2557 while ((size_index < array_size) && 2558 (alloc_sizes[size_index] < alloc_size)) 2559 size_index++; 2560 if (size_index >= array_size) { 2561 size_index = array_size - 1; 2562 } 2563 2564 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2565 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2566 use_kmem_alloc = B_TRUE; 2567 #if defined(__i386) || defined(__amd64) 2568 size_index = 0; 2569 #endif 2570 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2571 "==> nxge_alloc_rx_buf_dma: " 2572 "Neptune use kmem_alloc() - size_index %d", 2573 size_index)); 2574 } 2575 2576 while ((allocated < total_alloc_size) && 2577 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2578 rx_dmap[i].dma_chunk_index = i; 2579 rx_dmap[i].block_size = block_size; 2580 rx_dmap[i].alength = alloc_sizes[size_index]; 2581 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2582 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2583 rx_dmap[i].dma_channel = dma_channel; 2584 rx_dmap[i].contig_alloc_type = B_FALSE; 2585 rx_dmap[i].kmem_alloc_type = B_FALSE; 2586 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2587 2588 /* 2589 * N2/NIU: data buffers must be contiguous as the driver 2590 * needs to call Hypervisor api to set up 2591 * logical pages. 2592 */ 2593 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2594 rx_dmap[i].contig_alloc_type = B_TRUE; 2595 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2596 } else if (use_kmem_alloc) { 2597 /* For Neptune, use kmem_alloc */ 2598 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2599 "==> nxge_alloc_rx_buf_dma: " 2600 "Neptune use kmem_alloc()")); 2601 rx_dmap[i].kmem_alloc_type = B_TRUE; 2602 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2603 } 2604 2605 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2606 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2607 "i %d nblocks %d alength %d", 2608 dma_channel, i, &rx_dmap[i], block_size, 2609 i, rx_dmap[i].nblocks, 2610 rx_dmap[i].alength)); 2611 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2612 &nxge_rx_dma_attr, 2613 rx_dmap[i].alength, 2614 &nxge_dev_buf_dma_acc_attr, 2615 DDI_DMA_READ | DDI_DMA_STREAMING, 2616 (p_nxge_dma_common_t)(&rx_dmap[i])); 2617 if (status != NXGE_OK) { 2618 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2619 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2620 "dma %d size_index %d size requested %d", 2621 dma_channel, 2622 size_index, 2623 rx_dmap[i].alength)); 2624 size_index--; 2625 } else { 2626 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2627 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2628 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2629 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2630 "buf_alloc_state %d alloc_type %d", 2631 dma_channel, 2632 &rx_dmap[i], 2633 rx_dmap[i].kaddrp, 2634 rx_dmap[i].alength, 2635 rx_dmap[i].buf_alloc_state, 2636 rx_dmap[i].buf_alloc_type)); 2637 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2638 " alloc_rx_buf_dma allocated rdc %d " 2639 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2640 dma_channel, i, rx_dmap[i].alength, 2641 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2642 rx_dmap[i].kaddrp)); 2643 i++; 2644 allocated += alloc_sizes[size_index]; 2645 } 2646 } 2647 2648 if (allocated < total_alloc_size) { 2649 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2650 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2651 "allocated 0x%x requested 0x%x", 2652 dma_channel, 2653 allocated, total_alloc_size)); 2654 status = NXGE_ERROR; 2655 goto nxge_alloc_rx_mem_fail1; 2656 } 2657 2658 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2659 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2660 "allocated 0x%x requested 0x%x", 2661 dma_channel, 2662 allocated, total_alloc_size)); 2663 2664 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2665 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2666 dma_channel, i)); 2667 *num_chunks = i; 2668 *dmap = rx_dmap; 2669 2670 goto nxge_alloc_rx_mem_exit; 2671 2672 nxge_alloc_rx_mem_fail1: 2673 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2674 2675 nxge_alloc_rx_mem_exit: 2676 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2677 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2678 2679 return (status); 2680 } 2681 2682 /*ARGSUSED*/ 2683 static void 2684 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2685 uint32_t num_chunks) 2686 { 2687 int i; 2688 2689 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2690 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2691 2692 if (dmap == 0) 2693 return; 2694 2695 for (i = 0; i < num_chunks; i++) { 2696 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2697 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2698 i, dmap)); 2699 nxge_dma_free_rx_data_buf(dmap++); 2700 } 2701 2702 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2703 } 2704 2705 /*ARGSUSED*/ 2706 static nxge_status_t 2707 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2708 p_nxge_dma_common_t *dmap, size_t size) 2709 { 2710 p_nxge_dma_common_t rx_dmap; 2711 nxge_status_t status = NXGE_OK; 2712 2713 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2714 2715 rx_dmap = (p_nxge_dma_common_t) 2716 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2717 2718 rx_dmap->contig_alloc_type = B_FALSE; 2719 rx_dmap->kmem_alloc_type = B_FALSE; 2720 2721 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2722 &nxge_desc_dma_attr, 2723 size, 2724 &nxge_dev_desc_dma_acc_attr, 2725 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2726 rx_dmap); 2727 if (status != NXGE_OK) { 2728 goto nxge_alloc_rx_cntl_dma_fail1; 2729 } 2730 2731 *dmap = rx_dmap; 2732 goto nxge_alloc_rx_cntl_dma_exit; 2733 2734 nxge_alloc_rx_cntl_dma_fail1: 2735 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2736 2737 nxge_alloc_rx_cntl_dma_exit: 2738 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2739 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2740 2741 return (status); 2742 } 2743 2744 /*ARGSUSED*/ 2745 static void 2746 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2747 { 2748 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2749 2750 if (dmap == 0) 2751 return; 2752 2753 nxge_dma_mem_free(dmap); 2754 2755 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2756 } 2757 2758 typedef struct { 2759 size_t tx_size; 2760 size_t cr_size; 2761 size_t threshhold; 2762 } nxge_tdc_sizes_t; 2763 2764 static 2765 nxge_status_t 2766 nxge_tdc_sizes( 2767 nxge_t *nxgep, 2768 nxge_tdc_sizes_t *sizes) 2769 { 2770 uint32_t threshhold; /* The bcopy() threshhold */ 2771 size_t tx_size; /* Transmit buffer size */ 2772 size_t cr_size; /* Completion ring size */ 2773 2774 /* 2775 * Assume that each DMA channel will be configured with the 2776 * default transmit buffer size for copying transmit data. 2777 * (If a packet is bigger than this, it will not be copied.) 2778 */ 2779 if (nxgep->niu_type == N2_NIU) { 2780 threshhold = TX_BCOPY_SIZE; 2781 } else { 2782 threshhold = nxge_bcopy_thresh; 2783 } 2784 tx_size = nxge_tx_ring_size * threshhold; 2785 2786 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2787 cr_size += sizeof (txdma_mailbox_t); 2788 2789 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2790 if (nxgep->niu_type == N2_NIU) { 2791 if (!ISP2(tx_size)) { 2792 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2793 "==> nxge_tdc_sizes: Tx size" 2794 " must be power of 2")); 2795 return (NXGE_ERROR); 2796 } 2797 2798 if (tx_size > (1 << 22)) { 2799 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2800 "==> nxge_tdc_sizes: Tx size" 2801 " limited to 4M")); 2802 return (NXGE_ERROR); 2803 } 2804 2805 if (cr_size < 0x2000) 2806 cr_size = 0x2000; 2807 } 2808 #endif 2809 2810 sizes->threshhold = threshhold; 2811 sizes->tx_size = tx_size; 2812 sizes->cr_size = cr_size; 2813 2814 return (NXGE_OK); 2815 } 2816 /* 2817 * nxge_alloc_txb 2818 * 2819 * Allocate buffers for an TDC. 2820 * 2821 * Arguments: 2822 * nxgep 2823 * channel The channel to map into our kernel space. 2824 * 2825 * Notes: 2826 * 2827 * NPI function calls: 2828 * 2829 * NXGE function calls: 2830 * 2831 * Registers accessed: 2832 * 2833 * Context: 2834 * 2835 * Taking apart: 2836 * 2837 * Open questions: 2838 * 2839 */ 2840 nxge_status_t 2841 nxge_alloc_txb( 2842 p_nxge_t nxgep, 2843 int channel) 2844 { 2845 nxge_dma_common_t **dma_buf_p; 2846 nxge_dma_common_t **dma_cntl_p; 2847 uint32_t *num_chunks; 2848 nxge_status_t status = NXGE_OK; 2849 2850 nxge_tdc_sizes_t sizes; 2851 2852 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2853 2854 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2855 return (NXGE_ERROR); 2856 2857 /* 2858 * Allocate memory for transmit buffers and descriptor rings. 2859 * Replace these allocation functions with the interface functions 2860 * provided by the partition manager Real Soon Now. 2861 */ 2862 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2863 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2864 2865 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2866 2867 /* 2868 * Allocate memory for transmit buffers and descriptor rings. 2869 * Replace allocation functions with interface functions provided 2870 * by the partition manager when it is available. 2871 * 2872 * Allocate memory for the transmit buffer pool. 2873 */ 2874 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2875 "sizes: tx: %ld, cr:%ld, th:%ld", 2876 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2877 2878 *num_chunks = 0; 2879 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2880 sizes.tx_size, sizes.threshhold, num_chunks); 2881 if (status != NXGE_OK) { 2882 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2883 return (status); 2884 } 2885 2886 /* 2887 * Allocate memory for descriptor rings and mailbox. 2888 */ 2889 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2890 sizes.cr_size); 2891 if (status != NXGE_OK) { 2892 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2893 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2894 return (status); 2895 } 2896 2897 return (NXGE_OK); 2898 } 2899 2900 void 2901 nxge_free_txb( 2902 p_nxge_t nxgep, 2903 int channel) 2904 { 2905 nxge_dma_common_t *data; 2906 nxge_dma_common_t *control; 2907 uint32_t num_chunks; 2908 2909 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2910 2911 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2912 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2913 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2914 2915 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2916 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2917 2918 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2919 nxge_free_tx_cntl_dma(nxgep, control); 2920 2921 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2922 2923 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2924 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2925 2926 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2927 } 2928 2929 /* 2930 * nxge_alloc_tx_mem_pool 2931 * 2932 * This function allocates all of the per-port TDC control data structures. 2933 * The per-channel (TDC) data structures are allocated when needed. 2934 * 2935 * Arguments: 2936 * nxgep 2937 * 2938 * Notes: 2939 * 2940 * Context: 2941 * Any domain 2942 */ 2943 nxge_status_t 2944 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2945 { 2946 nxge_hw_pt_cfg_t *p_cfgp; 2947 nxge_dma_pool_t *dma_poolp; 2948 nxge_dma_common_t **dma_buf_p; 2949 nxge_dma_pool_t *dma_cntl_poolp; 2950 nxge_dma_common_t **dma_cntl_p; 2951 uint32_t *num_chunks; /* per dma */ 2952 int tdc_max; 2953 2954 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2955 2956 p_cfgp = &nxgep->pt_config.hw_config; 2957 tdc_max = NXGE_MAX_TDCS; 2958 2959 /* 2960 * Allocate memory for each transmit DMA channel. 2961 */ 2962 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2963 KM_SLEEP); 2964 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2965 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2966 2967 dma_cntl_poolp = (p_nxge_dma_pool_t) 2968 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2969 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2970 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2971 2972 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2973 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2974 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2975 "set to default %d", 2976 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2977 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2978 } 2979 2980 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2981 /* 2982 * N2/NIU has limitation on the descriptor sizes (contiguous 2983 * memory allocation on data buffers to 4M (contig_mem_alloc) 2984 * and little endian for control buffers (must use the ddi/dki mem alloc 2985 * function). The transmit ring is limited to 8K (includes the 2986 * mailbox). 2987 */ 2988 if (nxgep->niu_type == N2_NIU) { 2989 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2990 (!ISP2(nxge_tx_ring_size))) { 2991 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2992 } 2993 } 2994 #endif 2995 2996 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2997 2998 num_chunks = (uint32_t *)KMEM_ZALLOC( 2999 sizeof (uint32_t) * tdc_max, KM_SLEEP); 3000 3001 dma_poolp->ndmas = p_cfgp->tdc.owned; 3002 dma_poolp->num_chunks = num_chunks; 3003 dma_poolp->dma_buf_pool_p = dma_buf_p; 3004 nxgep->tx_buf_pool_p = dma_poolp; 3005 3006 dma_poolp->buf_allocated = B_TRUE; 3007 3008 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 3009 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 3010 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 3011 3012 dma_cntl_poolp->buf_allocated = B_TRUE; 3013 3014 nxgep->tx_rings = 3015 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 3016 nxgep->tx_rings->rings = 3017 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 3018 nxgep->tx_mbox_areas_p = 3019 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 3020 nxgep->tx_mbox_areas_p->txmbox_areas_p = 3021 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 3022 3023 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 3024 3025 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3026 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 3027 tdc_max, dma_poolp->ndmas)); 3028 3029 return (NXGE_OK); 3030 } 3031 3032 nxge_status_t 3033 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 3034 p_nxge_dma_common_t *dmap, size_t alloc_size, 3035 size_t block_size, uint32_t *num_chunks) 3036 { 3037 p_nxge_dma_common_t tx_dmap; 3038 nxge_status_t status = NXGE_OK; 3039 size_t total_alloc_size; 3040 size_t allocated = 0; 3041 int i, size_index, array_size; 3042 3043 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 3044 3045 tx_dmap = (p_nxge_dma_common_t) 3046 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 3047 KM_SLEEP); 3048 3049 total_alloc_size = alloc_size; 3050 i = 0; 3051 size_index = 0; 3052 array_size = sizeof (alloc_sizes) / sizeof (size_t); 3053 while ((size_index < array_size) && 3054 (alloc_sizes[size_index] < alloc_size)) 3055 size_index++; 3056 if (size_index >= array_size) { 3057 size_index = array_size - 1; 3058 } 3059 3060 while ((allocated < total_alloc_size) && 3061 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 3062 3063 tx_dmap[i].dma_chunk_index = i; 3064 tx_dmap[i].block_size = block_size; 3065 tx_dmap[i].alength = alloc_sizes[size_index]; 3066 tx_dmap[i].orig_alength = tx_dmap[i].alength; 3067 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 3068 tx_dmap[i].dma_channel = dma_channel; 3069 tx_dmap[i].contig_alloc_type = B_FALSE; 3070 tx_dmap[i].kmem_alloc_type = B_FALSE; 3071 3072 /* 3073 * N2/NIU: data buffers must be contiguous as the driver 3074 * needs to call Hypervisor api to set up 3075 * logical pages. 3076 */ 3077 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 3078 tx_dmap[i].contig_alloc_type = B_TRUE; 3079 } 3080 3081 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3082 &nxge_tx_dma_attr, 3083 tx_dmap[i].alength, 3084 &nxge_dev_buf_dma_acc_attr, 3085 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3086 (p_nxge_dma_common_t)(&tx_dmap[i])); 3087 if (status != NXGE_OK) { 3088 size_index--; 3089 } else { 3090 i++; 3091 allocated += alloc_sizes[size_index]; 3092 } 3093 } 3094 3095 if (allocated < total_alloc_size) { 3096 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3097 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3098 "allocated 0x%x requested 0x%x", 3099 dma_channel, 3100 allocated, total_alloc_size)); 3101 status = NXGE_ERROR; 3102 goto nxge_alloc_tx_mem_fail1; 3103 } 3104 3105 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3106 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3107 "allocated 0x%x requested 0x%x", 3108 dma_channel, 3109 allocated, total_alloc_size)); 3110 3111 *num_chunks = i; 3112 *dmap = tx_dmap; 3113 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3114 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3115 *dmap, i)); 3116 goto nxge_alloc_tx_mem_exit; 3117 3118 nxge_alloc_tx_mem_fail1: 3119 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3120 3121 nxge_alloc_tx_mem_exit: 3122 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3123 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3124 3125 return (status); 3126 } 3127 3128 /*ARGSUSED*/ 3129 static void 3130 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3131 uint32_t num_chunks) 3132 { 3133 int i; 3134 3135 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3136 3137 if (dmap == 0) 3138 return; 3139 3140 for (i = 0; i < num_chunks; i++) { 3141 nxge_dma_mem_free(dmap++); 3142 } 3143 3144 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3145 } 3146 3147 /*ARGSUSED*/ 3148 nxge_status_t 3149 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3150 p_nxge_dma_common_t *dmap, size_t size) 3151 { 3152 p_nxge_dma_common_t tx_dmap; 3153 nxge_status_t status = NXGE_OK; 3154 3155 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3156 tx_dmap = (p_nxge_dma_common_t) 3157 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3158 3159 tx_dmap->contig_alloc_type = B_FALSE; 3160 tx_dmap->kmem_alloc_type = B_FALSE; 3161 3162 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3163 &nxge_desc_dma_attr, 3164 size, 3165 &nxge_dev_desc_dma_acc_attr, 3166 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3167 tx_dmap); 3168 if (status != NXGE_OK) { 3169 goto nxge_alloc_tx_cntl_dma_fail1; 3170 } 3171 3172 *dmap = tx_dmap; 3173 goto nxge_alloc_tx_cntl_dma_exit; 3174 3175 nxge_alloc_tx_cntl_dma_fail1: 3176 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3177 3178 nxge_alloc_tx_cntl_dma_exit: 3179 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3180 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3181 3182 return (status); 3183 } 3184 3185 /*ARGSUSED*/ 3186 static void 3187 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3188 { 3189 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3190 3191 if (dmap == 0) 3192 return; 3193 3194 nxge_dma_mem_free(dmap); 3195 3196 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3197 } 3198 3199 /* 3200 * nxge_free_tx_mem_pool 3201 * 3202 * This function frees all of the per-port TDC control data structures. 3203 * The per-channel (TDC) data structures are freed when the channel 3204 * is stopped. 3205 * 3206 * Arguments: 3207 * nxgep 3208 * 3209 * Notes: 3210 * 3211 * Context: 3212 * Any domain 3213 */ 3214 static void 3215 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3216 { 3217 int tdc_max = NXGE_MAX_TDCS; 3218 3219 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3220 3221 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3222 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3223 "<== nxge_free_tx_mem_pool " 3224 "(null tx buf pool or buf not allocated")); 3225 return; 3226 } 3227 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3228 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3229 "<== nxge_free_tx_mem_pool " 3230 "(null tx cntl buf pool or cntl buf not allocated")); 3231 return; 3232 } 3233 3234 /* 1. Free the mailboxes. */ 3235 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3236 sizeof (p_tx_mbox_t) * tdc_max); 3237 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3238 3239 nxgep->tx_mbox_areas_p = 0; 3240 3241 /* 2. Free the transmit ring arrays. */ 3242 KMEM_FREE(nxgep->tx_rings->rings, 3243 sizeof (p_tx_ring_t) * tdc_max); 3244 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3245 3246 nxgep->tx_rings = 0; 3247 3248 /* 3. Free the completion ring data structures. */ 3249 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3250 sizeof (p_nxge_dma_common_t) * tdc_max); 3251 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3252 3253 nxgep->tx_cntl_pool_p = 0; 3254 3255 /* 4. Free the data ring data structures. */ 3256 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3257 sizeof (uint32_t) * tdc_max); 3258 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3259 sizeof (p_nxge_dma_common_t) * tdc_max); 3260 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3261 3262 nxgep->tx_buf_pool_p = 0; 3263 3264 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3265 } 3266 3267 /*ARGSUSED*/ 3268 static nxge_status_t 3269 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3270 struct ddi_dma_attr *dma_attrp, 3271 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3272 p_nxge_dma_common_t dma_p) 3273 { 3274 caddr_t kaddrp; 3275 int ddi_status = DDI_SUCCESS; 3276 boolean_t contig_alloc_type; 3277 boolean_t kmem_alloc_type; 3278 3279 contig_alloc_type = dma_p->contig_alloc_type; 3280 3281 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3282 /* 3283 * contig_alloc_type for contiguous memory only allowed 3284 * for N2/NIU. 3285 */ 3286 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3287 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3288 dma_p->contig_alloc_type)); 3289 return (NXGE_ERROR | NXGE_DDI_FAILED); 3290 } 3291 3292 dma_p->dma_handle = NULL; 3293 dma_p->acc_handle = NULL; 3294 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3295 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3296 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3297 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3298 if (ddi_status != DDI_SUCCESS) { 3299 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3300 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3301 return (NXGE_ERROR | NXGE_DDI_FAILED); 3302 } 3303 3304 kmem_alloc_type = dma_p->kmem_alloc_type; 3305 3306 switch (contig_alloc_type) { 3307 case B_FALSE: 3308 switch (kmem_alloc_type) { 3309 case B_FALSE: 3310 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3311 length, 3312 acc_attr_p, 3313 xfer_flags, 3314 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3315 &dma_p->acc_handle); 3316 if (ddi_status != DDI_SUCCESS) { 3317 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3318 "nxge_dma_mem_alloc: " 3319 "ddi_dma_mem_alloc failed")); 3320 ddi_dma_free_handle(&dma_p->dma_handle); 3321 dma_p->dma_handle = NULL; 3322 return (NXGE_ERROR | NXGE_DDI_FAILED); 3323 } 3324 if (dma_p->alength < length) { 3325 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3326 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3327 "< length.")); 3328 ddi_dma_mem_free(&dma_p->acc_handle); 3329 ddi_dma_free_handle(&dma_p->dma_handle); 3330 dma_p->acc_handle = NULL; 3331 dma_p->dma_handle = NULL; 3332 return (NXGE_ERROR); 3333 } 3334 3335 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3336 NULL, 3337 kaddrp, dma_p->alength, xfer_flags, 3338 DDI_DMA_DONTWAIT, 3339 0, &dma_p->dma_cookie, &dma_p->ncookies); 3340 if (ddi_status != DDI_DMA_MAPPED) { 3341 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3342 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3343 "failed " 3344 "(staus 0x%x ncookies %d.)", ddi_status, 3345 dma_p->ncookies)); 3346 if (dma_p->acc_handle) { 3347 ddi_dma_mem_free(&dma_p->acc_handle); 3348 dma_p->acc_handle = NULL; 3349 } 3350 ddi_dma_free_handle(&dma_p->dma_handle); 3351 dma_p->dma_handle = NULL; 3352 return (NXGE_ERROR | NXGE_DDI_FAILED); 3353 } 3354 3355 if (dma_p->ncookies != 1) { 3356 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3357 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3358 "> 1 cookie" 3359 "(staus 0x%x ncookies %d.)", ddi_status, 3360 dma_p->ncookies)); 3361 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3362 if (dma_p->acc_handle) { 3363 ddi_dma_mem_free(&dma_p->acc_handle); 3364 dma_p->acc_handle = NULL; 3365 } 3366 ddi_dma_free_handle(&dma_p->dma_handle); 3367 dma_p->dma_handle = NULL; 3368 dma_p->acc_handle = NULL; 3369 return (NXGE_ERROR); 3370 } 3371 break; 3372 3373 case B_TRUE: 3374 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3375 if (kaddrp == NULL) { 3376 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3377 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3378 "kmem alloc failed")); 3379 return (NXGE_ERROR); 3380 } 3381 3382 dma_p->alength = length; 3383 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3384 NULL, kaddrp, dma_p->alength, xfer_flags, 3385 DDI_DMA_DONTWAIT, 0, 3386 &dma_p->dma_cookie, &dma_p->ncookies); 3387 if (ddi_status != DDI_DMA_MAPPED) { 3388 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3389 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3390 "(kmem_alloc) failed kaddrp $%p length %d " 3391 "(staus 0x%x (%d) ncookies %d.)", 3392 kaddrp, length, 3393 ddi_status, ddi_status, dma_p->ncookies)); 3394 KMEM_FREE(kaddrp, length); 3395 dma_p->acc_handle = NULL; 3396 ddi_dma_free_handle(&dma_p->dma_handle); 3397 dma_p->dma_handle = NULL; 3398 dma_p->kaddrp = NULL; 3399 return (NXGE_ERROR | NXGE_DDI_FAILED); 3400 } 3401 3402 if (dma_p->ncookies != 1) { 3403 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3404 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3405 "(kmem_alloc) > 1 cookie" 3406 "(staus 0x%x ncookies %d.)", ddi_status, 3407 dma_p->ncookies)); 3408 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3409 KMEM_FREE(kaddrp, length); 3410 ddi_dma_free_handle(&dma_p->dma_handle); 3411 dma_p->dma_handle = NULL; 3412 dma_p->acc_handle = NULL; 3413 dma_p->kaddrp = NULL; 3414 return (NXGE_ERROR); 3415 } 3416 3417 dma_p->kaddrp = kaddrp; 3418 3419 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3420 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3421 "kaddr $%p alength %d", 3422 dma_p, 3423 kaddrp, 3424 dma_p->alength)); 3425 break; 3426 } 3427 break; 3428 3429 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3430 case B_TRUE: 3431 kaddrp = (caddr_t)contig_mem_alloc(length); 3432 if (kaddrp == NULL) { 3433 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3434 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3435 ddi_dma_free_handle(&dma_p->dma_handle); 3436 return (NXGE_ERROR | NXGE_DDI_FAILED); 3437 } 3438 3439 dma_p->alength = length; 3440 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3441 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3442 &dma_p->dma_cookie, &dma_p->ncookies); 3443 if (ddi_status != DDI_DMA_MAPPED) { 3444 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3445 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3446 "(status 0x%x ncookies %d.)", ddi_status, 3447 dma_p->ncookies)); 3448 3449 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3450 "==> nxge_dma_mem_alloc: (not mapped)" 3451 "length %lu (0x%x) " 3452 "free contig kaddrp $%p " 3453 "va_to_pa $%p", 3454 length, length, 3455 kaddrp, 3456 va_to_pa(kaddrp))); 3457 3458 3459 contig_mem_free((void *)kaddrp, length); 3460 ddi_dma_free_handle(&dma_p->dma_handle); 3461 3462 dma_p->dma_handle = NULL; 3463 dma_p->acc_handle = NULL; 3464 dma_p->alength = NULL; 3465 dma_p->kaddrp = NULL; 3466 3467 return (NXGE_ERROR | NXGE_DDI_FAILED); 3468 } 3469 3470 if (dma_p->ncookies != 1 || 3471 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3472 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3473 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3474 "cookie or " 3475 "dmac_laddress is NULL $%p size %d " 3476 " (status 0x%x ncookies %d.)", 3477 ddi_status, 3478 dma_p->dma_cookie.dmac_laddress, 3479 dma_p->dma_cookie.dmac_size, 3480 dma_p->ncookies)); 3481 3482 contig_mem_free((void *)kaddrp, length); 3483 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3484 ddi_dma_free_handle(&dma_p->dma_handle); 3485 3486 dma_p->alength = 0; 3487 dma_p->dma_handle = NULL; 3488 dma_p->acc_handle = NULL; 3489 dma_p->kaddrp = NULL; 3490 3491 return (NXGE_ERROR | NXGE_DDI_FAILED); 3492 } 3493 break; 3494 3495 #else 3496 case B_TRUE: 3497 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3498 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3499 return (NXGE_ERROR | NXGE_DDI_FAILED); 3500 #endif 3501 } 3502 3503 dma_p->kaddrp = kaddrp; 3504 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3505 dma_p->alength - RXBUF_64B_ALIGNED; 3506 #if defined(__i386) 3507 dma_p->ioaddr_pp = 3508 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3509 #else 3510 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3511 #endif 3512 dma_p->last_ioaddr_pp = 3513 #if defined(__i386) 3514 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3515 #else 3516 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3517 #endif 3518 dma_p->alength - RXBUF_64B_ALIGNED; 3519 3520 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3521 3522 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3523 dma_p->orig_ioaddr_pp = 3524 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3525 dma_p->orig_alength = length; 3526 dma_p->orig_kaddrp = kaddrp; 3527 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3528 #endif 3529 3530 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3531 "dma buffer allocated: dma_p $%p " 3532 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3533 "dma_p->ioaddr_p $%p " 3534 "dma_p->orig_ioaddr_p $%p " 3535 "orig_vatopa $%p " 3536 "alength %d (0x%x) " 3537 "kaddrp $%p " 3538 "length %d (0x%x)", 3539 dma_p, 3540 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3541 dma_p->ioaddr_pp, 3542 dma_p->orig_ioaddr_pp, 3543 dma_p->orig_vatopa, 3544 dma_p->alength, dma_p->alength, 3545 kaddrp, 3546 length, length)); 3547 3548 return (NXGE_OK); 3549 } 3550 3551 static void 3552 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3553 { 3554 if (dma_p->dma_handle != NULL) { 3555 if (dma_p->ncookies) { 3556 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3557 dma_p->ncookies = 0; 3558 } 3559 ddi_dma_free_handle(&dma_p->dma_handle); 3560 dma_p->dma_handle = NULL; 3561 } 3562 3563 if (dma_p->acc_handle != NULL) { 3564 ddi_dma_mem_free(&dma_p->acc_handle); 3565 dma_p->acc_handle = NULL; 3566 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3567 } 3568 3569 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3570 if (dma_p->contig_alloc_type && 3571 dma_p->orig_kaddrp && dma_p->orig_alength) { 3572 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3573 "kaddrp $%p (orig_kaddrp $%p)" 3574 "mem type %d ", 3575 "orig_alength %d " 3576 "alength 0x%x (%d)", 3577 dma_p->kaddrp, 3578 dma_p->orig_kaddrp, 3579 dma_p->contig_alloc_type, 3580 dma_p->orig_alength, 3581 dma_p->alength, dma_p->alength)); 3582 3583 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3584 dma_p->orig_alength = NULL; 3585 dma_p->orig_kaddrp = NULL; 3586 dma_p->contig_alloc_type = B_FALSE; 3587 } 3588 #endif 3589 dma_p->kaddrp = NULL; 3590 dma_p->alength = NULL; 3591 } 3592 3593 static void 3594 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3595 { 3596 uint64_t kaddr; 3597 uint32_t buf_size; 3598 3599 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3600 3601 if (dma_p->dma_handle != NULL) { 3602 if (dma_p->ncookies) { 3603 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3604 dma_p->ncookies = 0; 3605 } 3606 ddi_dma_free_handle(&dma_p->dma_handle); 3607 dma_p->dma_handle = NULL; 3608 } 3609 3610 if (dma_p->acc_handle != NULL) { 3611 ddi_dma_mem_free(&dma_p->acc_handle); 3612 dma_p->acc_handle = NULL; 3613 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3614 } 3615 3616 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3617 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3618 dma_p, 3619 dma_p->buf_alloc_state)); 3620 3621 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3622 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3623 "<== nxge_dma_free_rx_data_buf: " 3624 "outstanding data buffers")); 3625 return; 3626 } 3627 3628 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3629 if (dma_p->contig_alloc_type && 3630 dma_p->orig_kaddrp && dma_p->orig_alength) { 3631 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3632 "kaddrp $%p (orig_kaddrp $%p)" 3633 "mem type %d ", 3634 "orig_alength %d " 3635 "alength 0x%x (%d)", 3636 dma_p->kaddrp, 3637 dma_p->orig_kaddrp, 3638 dma_p->contig_alloc_type, 3639 dma_p->orig_alength, 3640 dma_p->alength, dma_p->alength)); 3641 3642 kaddr = (uint64_t)dma_p->orig_kaddrp; 3643 buf_size = dma_p->orig_alength; 3644 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3645 dma_p->orig_alength = NULL; 3646 dma_p->orig_kaddrp = NULL; 3647 dma_p->contig_alloc_type = B_FALSE; 3648 dma_p->kaddrp = NULL; 3649 dma_p->alength = NULL; 3650 return; 3651 } 3652 #endif 3653 3654 if (dma_p->kmem_alloc_type) { 3655 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3656 "nxge_dma_free_rx_data_buf: free kmem " 3657 "kaddrp $%p (orig_kaddrp $%p)" 3658 "alloc type %d " 3659 "orig_alength %d " 3660 "alength 0x%x (%d)", 3661 dma_p->kaddrp, 3662 dma_p->orig_kaddrp, 3663 dma_p->kmem_alloc_type, 3664 dma_p->orig_alength, 3665 dma_p->alength, dma_p->alength)); 3666 #if defined(__i386) 3667 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3668 #else 3669 kaddr = (uint64_t)dma_p->kaddrp; 3670 #endif 3671 buf_size = dma_p->orig_alength; 3672 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3673 "nxge_dma_free_rx_data_buf: free dmap $%p " 3674 "kaddr $%p buf_size %d", 3675 dma_p, 3676 kaddr, buf_size)); 3677 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3678 dma_p->alength = 0; 3679 dma_p->orig_alength = 0; 3680 dma_p->kaddrp = NULL; 3681 dma_p->kmem_alloc_type = B_FALSE; 3682 } 3683 3684 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3685 } 3686 3687 /* 3688 * nxge_m_start() -- start transmitting and receiving. 3689 * 3690 * This function is called by the MAC layer when the first 3691 * stream is open to prepare the hardware ready for sending 3692 * and transmitting packets. 3693 */ 3694 static int 3695 nxge_m_start(void *arg) 3696 { 3697 p_nxge_t nxgep = (p_nxge_t)arg; 3698 3699 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3700 3701 /* 3702 * Are we already started? 3703 */ 3704 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 3705 return (0); 3706 } 3707 3708 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) { 3709 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 3710 } 3711 3712 /* 3713 * Make sure RX MAC is disabled while we initialize. 3714 */ 3715 if (!isLDOMguest(nxgep)) { 3716 (void) nxge_rx_mac_disable(nxgep); 3717 } 3718 3719 /* 3720 * Grab the global lock. 3721 */ 3722 MUTEX_ENTER(nxgep->genlock); 3723 3724 /* 3725 * Initialize the driver and hardware. 3726 */ 3727 if (nxge_init(nxgep) != NXGE_OK) { 3728 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3729 "<== nxge_m_start: initialization failed")); 3730 MUTEX_EXIT(nxgep->genlock); 3731 return (EIO); 3732 } 3733 3734 /* 3735 * Start timer to check the system error and tx hangs 3736 */ 3737 if (!isLDOMguest(nxgep)) 3738 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3739 nxge_check_hw_state, NXGE_CHECK_TIMER); 3740 #if defined(sun4v) 3741 else 3742 nxge_hio_start_timer(nxgep); 3743 #endif 3744 3745 nxgep->link_notify = B_TRUE; 3746 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3747 3748 /* 3749 * Let the global lock go, since we are intialized. 3750 */ 3751 MUTEX_EXIT(nxgep->genlock); 3752 3753 /* 3754 * Let the MAC start receiving packets, now that 3755 * we are initialized. 3756 */ 3757 if (!isLDOMguest(nxgep)) { 3758 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 3759 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3760 "<== nxge_m_start: enable of RX mac failed")); 3761 return (EIO); 3762 } 3763 3764 /* 3765 * Enable hardware interrupts. 3766 */ 3767 nxge_intr_hw_enable(nxgep); 3768 } 3769 #if defined(sun4v) 3770 else { 3771 /* 3772 * In guest domain we enable RDCs and their interrupts as 3773 * the last step. 3774 */ 3775 if (nxge_hio_rdc_enable(nxgep) != NXGE_OK) { 3776 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3777 "<== nxge_m_start: enable of RDCs failed")); 3778 return (EIO); 3779 } 3780 3781 if (nxge_hio_rdc_intr_arm(nxgep, B_TRUE) != NXGE_OK) { 3782 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3783 "<== nxge_m_start: intrs enable for RDCs failed")); 3784 return (EIO); 3785 } 3786 } 3787 #endif 3788 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3789 return (0); 3790 } 3791 3792 static boolean_t 3793 nxge_check_groups_stopped(p_nxge_t nxgep) 3794 { 3795 int i; 3796 3797 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 3798 if (nxgep->rx_hio_groups[i].started) 3799 return (B_FALSE); 3800 } 3801 3802 return (B_TRUE); 3803 } 3804 3805 /* 3806 * nxge_m_stop(): stop transmitting and receiving. 3807 */ 3808 static void 3809 nxge_m_stop(void *arg) 3810 { 3811 p_nxge_t nxgep = (p_nxge_t)arg; 3812 boolean_t groups_stopped; 3813 3814 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3815 3816 /* 3817 * Are the groups stopped? 3818 */ 3819 groups_stopped = nxge_check_groups_stopped(nxgep); 3820 ASSERT(groups_stopped == B_TRUE); 3821 if (!groups_stopped) { 3822 cmn_err(CE_WARN, "nxge(%d): groups are not stopped!\n", 3823 nxgep->instance); 3824 return; 3825 } 3826 3827 if (!isLDOMguest(nxgep)) { 3828 /* 3829 * Disable the RX mac. 3830 */ 3831 (void) nxge_rx_mac_disable(nxgep); 3832 3833 /* 3834 * Wait for the IPP to drain. 3835 */ 3836 (void) nxge_ipp_drain(nxgep); 3837 3838 /* 3839 * Disable hardware interrupts. 3840 */ 3841 nxge_intr_hw_disable(nxgep); 3842 } 3843 #if defined(sun4v) 3844 else { 3845 (void) nxge_hio_rdc_intr_arm(nxgep, B_FALSE); 3846 } 3847 #endif 3848 3849 /* 3850 * Grab the global lock. 3851 */ 3852 MUTEX_ENTER(nxgep->genlock); 3853 3854 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3855 if (nxgep->nxge_timerid) { 3856 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3857 nxgep->nxge_timerid = 0; 3858 } 3859 3860 /* 3861 * Clean up. 3862 */ 3863 nxge_uninit(nxgep); 3864 3865 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3866 3867 /* 3868 * Let go of the global lock. 3869 */ 3870 MUTEX_EXIT(nxgep->genlock); 3871 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3872 } 3873 3874 static int 3875 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3876 { 3877 p_nxge_t nxgep = (p_nxge_t)arg; 3878 struct ether_addr addrp; 3879 3880 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3881 "==> nxge_m_multicst: add %d", add)); 3882 3883 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3884 if (add) { 3885 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3886 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3887 "<== nxge_m_multicst: add multicast failed")); 3888 return (EINVAL); 3889 } 3890 } else { 3891 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3892 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3893 "<== nxge_m_multicst: del multicast failed")); 3894 return (EINVAL); 3895 } 3896 } 3897 3898 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3899 3900 return (0); 3901 } 3902 3903 static int 3904 nxge_m_promisc(void *arg, boolean_t on) 3905 { 3906 p_nxge_t nxgep = (p_nxge_t)arg; 3907 3908 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3909 "==> nxge_m_promisc: on %d", on)); 3910 3911 if (nxge_set_promisc(nxgep, on)) { 3912 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3913 "<== nxge_m_promisc: set promisc failed")); 3914 return (EINVAL); 3915 } 3916 3917 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3918 "<== nxge_m_promisc: on %d", on)); 3919 3920 return (0); 3921 } 3922 3923 static void 3924 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3925 { 3926 p_nxge_t nxgep = (p_nxge_t)arg; 3927 struct iocblk *iocp; 3928 boolean_t need_privilege; 3929 int err; 3930 int cmd; 3931 3932 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3933 3934 iocp = (struct iocblk *)mp->b_rptr; 3935 iocp->ioc_error = 0; 3936 need_privilege = B_TRUE; 3937 cmd = iocp->ioc_cmd; 3938 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3939 switch (cmd) { 3940 default: 3941 miocnak(wq, mp, 0, EINVAL); 3942 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3943 return; 3944 3945 case LB_GET_INFO_SIZE: 3946 case LB_GET_INFO: 3947 case LB_GET_MODE: 3948 need_privilege = B_FALSE; 3949 break; 3950 case LB_SET_MODE: 3951 break; 3952 3953 3954 case NXGE_GET_MII: 3955 case NXGE_PUT_MII: 3956 case NXGE_GET64: 3957 case NXGE_PUT64: 3958 case NXGE_GET_TX_RING_SZ: 3959 case NXGE_GET_TX_DESC: 3960 case NXGE_TX_SIDE_RESET: 3961 case NXGE_RX_SIDE_RESET: 3962 case NXGE_GLOBAL_RESET: 3963 case NXGE_RESET_MAC: 3964 case NXGE_TX_REGS_DUMP: 3965 case NXGE_RX_REGS_DUMP: 3966 case NXGE_INT_REGS_DUMP: 3967 case NXGE_VIR_INT_REGS_DUMP: 3968 case NXGE_PUT_TCAM: 3969 case NXGE_GET_TCAM: 3970 case NXGE_RTRACE: 3971 case NXGE_RDUMP: 3972 3973 need_privilege = B_FALSE; 3974 break; 3975 case NXGE_INJECT_ERR: 3976 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3977 nxge_err_inject(nxgep, wq, mp); 3978 break; 3979 } 3980 3981 if (need_privilege) { 3982 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3983 if (err != 0) { 3984 miocnak(wq, mp, 0, err); 3985 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3986 "<== nxge_m_ioctl: no priv")); 3987 return; 3988 } 3989 } 3990 3991 switch (cmd) { 3992 3993 case LB_GET_MODE: 3994 case LB_SET_MODE: 3995 case LB_GET_INFO_SIZE: 3996 case LB_GET_INFO: 3997 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3998 break; 3999 4000 case NXGE_GET_MII: 4001 case NXGE_PUT_MII: 4002 case NXGE_PUT_TCAM: 4003 case NXGE_GET_TCAM: 4004 case NXGE_GET64: 4005 case NXGE_PUT64: 4006 case NXGE_GET_TX_RING_SZ: 4007 case NXGE_GET_TX_DESC: 4008 case NXGE_TX_SIDE_RESET: 4009 case NXGE_RX_SIDE_RESET: 4010 case NXGE_GLOBAL_RESET: 4011 case NXGE_RESET_MAC: 4012 case NXGE_TX_REGS_DUMP: 4013 case NXGE_RX_REGS_DUMP: 4014 case NXGE_INT_REGS_DUMP: 4015 case NXGE_VIR_INT_REGS_DUMP: 4016 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4017 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 4018 nxge_hw_ioctl(nxgep, wq, mp, iocp); 4019 break; 4020 } 4021 4022 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 4023 } 4024 4025 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 4026 4027 void 4028 nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory) 4029 { 4030 p_nxge_mmac_stats_t mmac_stats; 4031 int i; 4032 nxge_mmac_t *mmac_info; 4033 4034 mmac_info = &nxgep->nxge_mmac_info; 4035 4036 mmac_stats = &nxgep->statsp->mmac_stats; 4037 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 4038 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 4039 4040 for (i = 0; i < ETHERADDRL; i++) { 4041 if (factory) { 4042 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4043 = mmac_info->factory_mac_pool[slot][ 4044 (ETHERADDRL-1) - i]; 4045 } else { 4046 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4047 = mmac_info->mac_pool[slot].addr[ 4048 (ETHERADDRL - 1) - i]; 4049 } 4050 } 4051 } 4052 4053 /* 4054 * nxge_altmac_set() -- Set an alternate MAC address 4055 */ 4056 static int 4057 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, int slot, 4058 int rdctbl, boolean_t usetbl) 4059 { 4060 uint8_t addrn; 4061 uint8_t portn; 4062 npi_mac_addr_t altmac; 4063 hostinfo_t mac_rdc; 4064 p_nxge_class_pt_cfg_t clscfgp; 4065 4066 4067 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 4068 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 4069 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 4070 4071 portn = nxgep->mac.portnum; 4072 addrn = (uint8_t)slot - 1; 4073 4074 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, 4075 nxgep->function_num, addrn, &altmac) != NPI_SUCCESS) 4076 return (EIO); 4077 4078 /* 4079 * Set the rdc table number for the host info entry 4080 * for this mac address slot. 4081 */ 4082 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 4083 mac_rdc.value = 0; 4084 if (usetbl) 4085 mac_rdc.bits.w0.rdc_tbl_num = rdctbl; 4086 else 4087 mac_rdc.bits.w0.rdc_tbl_num = 4088 clscfgp->mac_host_info[addrn].rdctbl; 4089 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 4090 4091 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 4092 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 4093 return (EIO); 4094 } 4095 4096 /* 4097 * Enable comparison with the alternate MAC address. 4098 * While the first alternate addr is enabled by bit 1 of register 4099 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 4100 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 4101 * accordingly before calling npi_mac_altaddr_entry. 4102 */ 4103 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4104 addrn = (uint8_t)slot - 1; 4105 else 4106 addrn = (uint8_t)slot; 4107 4108 if (npi_mac_altaddr_enable(nxgep->npi_handle, 4109 nxgep->function_num, addrn) != NPI_SUCCESS) { 4110 return (EIO); 4111 } 4112 4113 return (0); 4114 } 4115 4116 /* 4117 * nxeg_m_mmac_add_g() - find an unused address slot, set the address 4118 * value to the one specified, enable the port to start filtering on 4119 * the new MAC address. Returns 0 on success. 4120 */ 4121 int 4122 nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl, 4123 boolean_t usetbl) 4124 { 4125 p_nxge_t nxgep = arg; 4126 int slot; 4127 nxge_mmac_t *mmac_info; 4128 int err; 4129 nxge_status_t status; 4130 4131 mutex_enter(nxgep->genlock); 4132 4133 /* 4134 * Make sure that nxge is initialized, if _start() has 4135 * not been called. 4136 */ 4137 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4138 status = nxge_init(nxgep); 4139 if (status != NXGE_OK) { 4140 mutex_exit(nxgep->genlock); 4141 return (ENXIO); 4142 } 4143 } 4144 4145 mmac_info = &nxgep->nxge_mmac_info; 4146 if (mmac_info->naddrfree == 0) { 4147 mutex_exit(nxgep->genlock); 4148 return (ENOSPC); 4149 } 4150 4151 /* 4152 * Search for the first available slot. Because naddrfree 4153 * is not zero, we are guaranteed to find one. 4154 * Each of the first two ports of Neptune has 16 alternate 4155 * MAC slots but only the first 7 (of 15) slots have assigned factory 4156 * MAC addresses. We first search among the slots without bundled 4157 * factory MACs. If we fail to find one in that range, then we 4158 * search the slots with bundled factory MACs. A factory MAC 4159 * will be wasted while the slot is used with a user MAC address. 4160 * But the slot could be used by factory MAC again after calling 4161 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4162 */ 4163 for (slot = 0; slot <= mmac_info->num_mmac; slot++) { 4164 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4165 break; 4166 } 4167 4168 ASSERT(slot <= mmac_info->num_mmac); 4169 4170 if ((err = nxge_altmac_set(nxgep, (uint8_t *)maddr, slot, rdctbl, 4171 usetbl)) != 0) { 4172 mutex_exit(nxgep->genlock); 4173 return (err); 4174 } 4175 4176 bcopy(maddr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4177 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4178 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4179 mmac_info->naddrfree--; 4180 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4181 4182 mutex_exit(nxgep->genlock); 4183 return (0); 4184 } 4185 4186 /* 4187 * Remove the specified mac address and update the HW not to filter 4188 * the mac address anymore. 4189 */ 4190 int 4191 nxge_m_mmac_remove(void *arg, int slot) 4192 { 4193 p_nxge_t nxgep = arg; 4194 nxge_mmac_t *mmac_info; 4195 uint8_t addrn; 4196 uint8_t portn; 4197 int err = 0; 4198 nxge_status_t status; 4199 4200 mutex_enter(nxgep->genlock); 4201 4202 /* 4203 * Make sure that nxge is initialized, if _start() has 4204 * not been called. 4205 */ 4206 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4207 status = nxge_init(nxgep); 4208 if (status != NXGE_OK) { 4209 mutex_exit(nxgep->genlock); 4210 return (ENXIO); 4211 } 4212 } 4213 4214 mmac_info = &nxgep->nxge_mmac_info; 4215 if (slot < 1 || slot > mmac_info->num_mmac) { 4216 mutex_exit(nxgep->genlock); 4217 return (EINVAL); 4218 } 4219 4220 portn = nxgep->mac.portnum; 4221 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4222 addrn = (uint8_t)slot - 1; 4223 else 4224 addrn = (uint8_t)slot; 4225 4226 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4227 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4228 == NPI_SUCCESS) { 4229 mmac_info->naddrfree++; 4230 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4231 /* 4232 * Regardless if the MAC we just stopped filtering 4233 * is a user addr or a facory addr, we must set 4234 * the MMAC_VENDOR_ADDR flag if this slot has an 4235 * associated factory MAC to indicate that a factory 4236 * MAC is available. 4237 */ 4238 if (slot <= mmac_info->num_factory_mmac) { 4239 mmac_info->mac_pool[slot].flags 4240 |= MMAC_VENDOR_ADDR; 4241 } 4242 /* 4243 * Clear mac_pool[slot].addr so that kstat shows 0 4244 * alternate MAC address if the slot is not used. 4245 * (But nxge_m_mmac_get returns the factory MAC even 4246 * when the slot is not used!) 4247 */ 4248 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4249 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4250 } else { 4251 err = EIO; 4252 } 4253 } else { 4254 err = EINVAL; 4255 } 4256 4257 mutex_exit(nxgep->genlock); 4258 return (err); 4259 } 4260 4261 /* 4262 * The callback to query all the factory addresses. naddr must be the same as 4263 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and 4264 * mcm_addr is the space allocated for keep all the addresses, whose size is 4265 * naddr * MAXMACADDRLEN. 4266 */ 4267 static void 4268 nxge_m_getfactaddr(void *arg, uint_t naddr, uint8_t *addr) 4269 { 4270 nxge_t *nxgep = arg; 4271 nxge_mmac_t *mmac_info; 4272 int i; 4273 4274 mutex_enter(nxgep->genlock); 4275 4276 mmac_info = &nxgep->nxge_mmac_info; 4277 ASSERT(naddr == mmac_info->num_factory_mmac); 4278 4279 for (i = 0; i < naddr; i++) { 4280 bcopy(mmac_info->factory_mac_pool[i + 1], 4281 addr + i * MAXMACADDRLEN, ETHERADDRL); 4282 } 4283 4284 mutex_exit(nxgep->genlock); 4285 } 4286 4287 4288 static boolean_t 4289 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4290 { 4291 nxge_t *nxgep = arg; 4292 uint32_t *txflags = cap_data; 4293 4294 switch (cap) { 4295 case MAC_CAPAB_HCKSUM: 4296 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4297 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4298 if (nxge_cksum_offload <= 1) { 4299 *txflags = HCKSUM_INET_PARTIAL; 4300 } 4301 break; 4302 4303 case MAC_CAPAB_MULTIFACTADDR: { 4304 mac_capab_multifactaddr_t *mfacp = cap_data; 4305 4306 mutex_enter(nxgep->genlock); 4307 mfacp->mcm_naddr = nxgep->nxge_mmac_info.num_factory_mmac; 4308 mfacp->mcm_getaddr = nxge_m_getfactaddr; 4309 mutex_exit(nxgep->genlock); 4310 break; 4311 } 4312 4313 case MAC_CAPAB_LSO: { 4314 mac_capab_lso_t *cap_lso = cap_data; 4315 4316 if (nxgep->soft_lso_enable) { 4317 if (nxge_cksum_offload <= 1) { 4318 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4319 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4320 nxge_lso_max = NXGE_LSO_MAXLEN; 4321 } 4322 cap_lso->lso_basic_tcp_ipv4.lso_max = 4323 nxge_lso_max; 4324 } 4325 break; 4326 } else { 4327 return (B_FALSE); 4328 } 4329 } 4330 4331 case MAC_CAPAB_RINGS: { 4332 mac_capab_rings_t *cap_rings = cap_data; 4333 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 4334 4335 mutex_enter(nxgep->genlock); 4336 if (cap_rings->mr_type == MAC_RING_TYPE_RX) { 4337 cap_rings->mr_group_type = MAC_GROUP_TYPE_DYNAMIC; 4338 cap_rings->mr_rnum = p_cfgp->max_rdcs; 4339 cap_rings->mr_rget = nxge_fill_ring; 4340 cap_rings->mr_gnum = p_cfgp->max_rdc_grpids; 4341 cap_rings->mr_gget = nxge_hio_group_get; 4342 cap_rings->mr_gaddring = nxge_group_add_ring; 4343 cap_rings->mr_gremring = nxge_group_rem_ring; 4344 4345 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4346 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]", 4347 p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids)); 4348 } else { 4349 cap_rings->mr_group_type = MAC_GROUP_TYPE_DYNAMIC; 4350 cap_rings->mr_rnum = p_cfgp->tdc.count; 4351 cap_rings->mr_rget = nxge_fill_ring; 4352 if (isLDOMservice(nxgep)) { 4353 /* share capable */ 4354 /* Do not report the default ring: hence -1 */ 4355 cap_rings->mr_gnum = 4356 NXGE_MAX_TDC_GROUPS / nxgep->nports - 1; 4357 } else { 4358 cap_rings->mr_gnum = 0; 4359 } 4360 4361 cap_rings->mr_gget = nxge_hio_group_get; 4362 cap_rings->mr_gaddring = nxge_group_add_ring; 4363 cap_rings->mr_gremring = nxge_group_rem_ring; 4364 4365 NXGE_DEBUG_MSG((nxgep, TX_CTL, 4366 "==> nxge_m_getcapab: tx rings # of rings %d", 4367 p_cfgp->tdc.count)); 4368 } 4369 mutex_exit(nxgep->genlock); 4370 break; 4371 } 4372 4373 #if defined(sun4v) 4374 case MAC_CAPAB_SHARES: { 4375 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4376 4377 /* 4378 * Only the service domain driver responds to 4379 * this capability request. 4380 */ 4381 mutex_enter(nxgep->genlock); 4382 if (isLDOMservice(nxgep)) { 4383 mshares->ms_snum = 3; 4384 mshares->ms_handle = (void *)nxgep; 4385 mshares->ms_salloc = nxge_hio_share_alloc; 4386 mshares->ms_sfree = nxge_hio_share_free; 4387 mshares->ms_sadd = nxge_hio_share_add_group; 4388 mshares->ms_sremove = nxge_hio_share_rem_group; 4389 mshares->ms_squery = nxge_hio_share_query; 4390 mshares->ms_sbind = nxge_hio_share_bind; 4391 mshares->ms_sunbind = nxge_hio_share_unbind; 4392 mutex_exit(nxgep->genlock); 4393 } else { 4394 mutex_exit(nxgep->genlock); 4395 return (B_FALSE); 4396 } 4397 break; 4398 } 4399 #endif 4400 default: 4401 return (B_FALSE); 4402 } 4403 return (B_TRUE); 4404 } 4405 4406 static boolean_t 4407 nxge_param_locked(mac_prop_id_t pr_num) 4408 { 4409 /* 4410 * All adv_* parameters are locked (read-only) while 4411 * the device is in any sort of loopback mode ... 4412 */ 4413 switch (pr_num) { 4414 case MAC_PROP_ADV_1000FDX_CAP: 4415 case MAC_PROP_EN_1000FDX_CAP: 4416 case MAC_PROP_ADV_1000HDX_CAP: 4417 case MAC_PROP_EN_1000HDX_CAP: 4418 case MAC_PROP_ADV_100FDX_CAP: 4419 case MAC_PROP_EN_100FDX_CAP: 4420 case MAC_PROP_ADV_100HDX_CAP: 4421 case MAC_PROP_EN_100HDX_CAP: 4422 case MAC_PROP_ADV_10FDX_CAP: 4423 case MAC_PROP_EN_10FDX_CAP: 4424 case MAC_PROP_ADV_10HDX_CAP: 4425 case MAC_PROP_EN_10HDX_CAP: 4426 case MAC_PROP_AUTONEG: 4427 case MAC_PROP_FLOWCTRL: 4428 return (B_TRUE); 4429 } 4430 return (B_FALSE); 4431 } 4432 4433 /* 4434 * callback functions for set/get of properties 4435 */ 4436 static int 4437 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4438 uint_t pr_valsize, const void *pr_val) 4439 { 4440 nxge_t *nxgep = barg; 4441 p_nxge_param_t param_arr; 4442 p_nxge_stats_t statsp; 4443 int err = 0; 4444 uint8_t val; 4445 uint32_t cur_mtu, new_mtu, old_framesize; 4446 link_flowctrl_t fl; 4447 4448 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4449 param_arr = nxgep->param_arr; 4450 statsp = nxgep->statsp; 4451 mutex_enter(nxgep->genlock); 4452 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4453 nxge_param_locked(pr_num)) { 4454 /* 4455 * All adv_* parameters are locked (read-only) 4456 * while the device is in any sort of loopback mode. 4457 */ 4458 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4459 "==> nxge_m_setprop: loopback mode: read only")); 4460 mutex_exit(nxgep->genlock); 4461 return (EBUSY); 4462 } 4463 4464 val = *(uint8_t *)pr_val; 4465 switch (pr_num) { 4466 case MAC_PROP_EN_1000FDX_CAP: 4467 nxgep->param_en_1000fdx = val; 4468 param_arr[param_anar_1000fdx].value = val; 4469 4470 goto reprogram; 4471 4472 case MAC_PROP_EN_100FDX_CAP: 4473 nxgep->param_en_100fdx = val; 4474 param_arr[param_anar_100fdx].value = val; 4475 4476 goto reprogram; 4477 4478 case MAC_PROP_EN_10FDX_CAP: 4479 nxgep->param_en_10fdx = val; 4480 param_arr[param_anar_10fdx].value = val; 4481 4482 goto reprogram; 4483 4484 case MAC_PROP_EN_1000HDX_CAP: 4485 case MAC_PROP_EN_100HDX_CAP: 4486 case MAC_PROP_EN_10HDX_CAP: 4487 case MAC_PROP_ADV_1000FDX_CAP: 4488 case MAC_PROP_ADV_1000HDX_CAP: 4489 case MAC_PROP_ADV_100FDX_CAP: 4490 case MAC_PROP_ADV_100HDX_CAP: 4491 case MAC_PROP_ADV_10FDX_CAP: 4492 case MAC_PROP_ADV_10HDX_CAP: 4493 case MAC_PROP_STATUS: 4494 case MAC_PROP_SPEED: 4495 case MAC_PROP_DUPLEX: 4496 err = EINVAL; /* cannot set read-only properties */ 4497 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4498 "==> nxge_m_setprop: read only property %d", 4499 pr_num)); 4500 break; 4501 4502 case MAC_PROP_AUTONEG: 4503 param_arr[param_autoneg].value = val; 4504 4505 goto reprogram; 4506 4507 case MAC_PROP_MTU: 4508 cur_mtu = nxgep->mac.default_mtu; 4509 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4510 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4511 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4512 new_mtu, nxgep->mac.is_jumbo)); 4513 4514 if (new_mtu == cur_mtu) { 4515 err = 0; 4516 break; 4517 } 4518 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4519 err = EBUSY; 4520 break; 4521 } 4522 if (new_mtu < NXGE_DEFAULT_MTU || 4523 new_mtu > NXGE_MAXIMUM_MTU) { 4524 err = EINVAL; 4525 break; 4526 } 4527 4528 if ((new_mtu > NXGE_DEFAULT_MTU) && 4529 !nxgep->mac.is_jumbo) { 4530 err = EINVAL; 4531 break; 4532 } 4533 4534 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4535 nxgep->mac.maxframesize = (uint16_t) 4536 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4537 if (nxge_mac_set_framesize(nxgep)) { 4538 nxgep->mac.maxframesize = 4539 (uint16_t)old_framesize; 4540 err = EINVAL; 4541 break; 4542 } 4543 4544 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4545 if (err) { 4546 nxgep->mac.maxframesize = 4547 (uint16_t)old_framesize; 4548 err = EINVAL; 4549 break; 4550 } 4551 4552 nxgep->mac.default_mtu = new_mtu; 4553 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4554 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4555 new_mtu, nxgep->mac.maxframesize)); 4556 break; 4557 4558 case MAC_PROP_FLOWCTRL: 4559 bcopy(pr_val, &fl, sizeof (fl)); 4560 switch (fl) { 4561 default: 4562 err = EINVAL; 4563 break; 4564 4565 case LINK_FLOWCTRL_NONE: 4566 param_arr[param_anar_pause].value = 0; 4567 break; 4568 4569 case LINK_FLOWCTRL_RX: 4570 param_arr[param_anar_pause].value = 1; 4571 break; 4572 4573 case LINK_FLOWCTRL_TX: 4574 case LINK_FLOWCTRL_BI: 4575 err = EINVAL; 4576 break; 4577 } 4578 4579 reprogram: 4580 if (err == 0) { 4581 if (!nxge_param_link_update(nxgep)) { 4582 err = EINVAL; 4583 } 4584 } 4585 break; 4586 case MAC_PROP_PRIVATE: 4587 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4588 "==> nxge_m_setprop: private property")); 4589 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4590 pr_val); 4591 break; 4592 4593 default: 4594 err = ENOTSUP; 4595 break; 4596 } 4597 4598 mutex_exit(nxgep->genlock); 4599 4600 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4601 "<== nxge_m_setprop (return %d)", err)); 4602 return (err); 4603 } 4604 4605 static int 4606 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4607 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 4608 { 4609 nxge_t *nxgep = barg; 4610 p_nxge_param_t param_arr = nxgep->param_arr; 4611 p_nxge_stats_t statsp = nxgep->statsp; 4612 int err = 0; 4613 link_flowctrl_t fl; 4614 uint64_t tmp = 0; 4615 link_state_t ls; 4616 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4617 4618 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4619 "==> nxge_m_getprop: pr_num %d", pr_num)); 4620 4621 if (pr_valsize == 0) 4622 return (EINVAL); 4623 4624 *perm = MAC_PROP_PERM_RW; 4625 4626 if ((is_default) && (pr_num != MAC_PROP_PRIVATE)) { 4627 err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4628 return (err); 4629 } 4630 4631 bzero(pr_val, pr_valsize); 4632 switch (pr_num) { 4633 case MAC_PROP_DUPLEX: 4634 *perm = MAC_PROP_PERM_READ; 4635 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4636 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4637 "==> nxge_m_getprop: duplex mode %d", 4638 *(uint8_t *)pr_val)); 4639 break; 4640 4641 case MAC_PROP_SPEED: 4642 if (pr_valsize < sizeof (uint64_t)) 4643 return (EINVAL); 4644 *perm = MAC_PROP_PERM_READ; 4645 tmp = statsp->mac_stats.link_speed * 1000000ull; 4646 bcopy(&tmp, pr_val, sizeof (tmp)); 4647 break; 4648 4649 case MAC_PROP_STATUS: 4650 if (pr_valsize < sizeof (link_state_t)) 4651 return (EINVAL); 4652 *perm = MAC_PROP_PERM_READ; 4653 if (!statsp->mac_stats.link_up) 4654 ls = LINK_STATE_DOWN; 4655 else 4656 ls = LINK_STATE_UP; 4657 bcopy(&ls, pr_val, sizeof (ls)); 4658 break; 4659 4660 case MAC_PROP_AUTONEG: 4661 *(uint8_t *)pr_val = 4662 param_arr[param_autoneg].value; 4663 break; 4664 4665 case MAC_PROP_FLOWCTRL: 4666 if (pr_valsize < sizeof (link_flowctrl_t)) 4667 return (EINVAL); 4668 4669 fl = LINK_FLOWCTRL_NONE; 4670 if (param_arr[param_anar_pause].value) { 4671 fl = LINK_FLOWCTRL_RX; 4672 } 4673 bcopy(&fl, pr_val, sizeof (fl)); 4674 break; 4675 4676 case MAC_PROP_ADV_1000FDX_CAP: 4677 *perm = MAC_PROP_PERM_READ; 4678 *(uint8_t *)pr_val = 4679 param_arr[param_anar_1000fdx].value; 4680 break; 4681 4682 case MAC_PROP_EN_1000FDX_CAP: 4683 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4684 break; 4685 4686 case MAC_PROP_ADV_100FDX_CAP: 4687 *perm = MAC_PROP_PERM_READ; 4688 *(uint8_t *)pr_val = 4689 param_arr[param_anar_100fdx].value; 4690 break; 4691 4692 case MAC_PROP_EN_100FDX_CAP: 4693 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4694 break; 4695 4696 case MAC_PROP_ADV_10FDX_CAP: 4697 *perm = MAC_PROP_PERM_READ; 4698 *(uint8_t *)pr_val = 4699 param_arr[param_anar_10fdx].value; 4700 break; 4701 4702 case MAC_PROP_EN_10FDX_CAP: 4703 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4704 break; 4705 4706 case MAC_PROP_EN_1000HDX_CAP: 4707 case MAC_PROP_EN_100HDX_CAP: 4708 case MAC_PROP_EN_10HDX_CAP: 4709 case MAC_PROP_ADV_1000HDX_CAP: 4710 case MAC_PROP_ADV_100HDX_CAP: 4711 case MAC_PROP_ADV_10HDX_CAP: 4712 err = ENOTSUP; 4713 break; 4714 4715 case MAC_PROP_PRIVATE: 4716 err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4717 pr_valsize, pr_val, perm); 4718 break; 4719 4720 case MAC_PROP_MTU: { 4721 mac_propval_range_t range; 4722 4723 if (!(pr_flags & MAC_PROP_POSSIBLE)) 4724 return (ENOTSUP); 4725 if (pr_valsize < sizeof (mac_propval_range_t)) 4726 return (EINVAL); 4727 range.mpr_count = 1; 4728 range.mpr_type = MAC_PROPVAL_UINT32; 4729 range.range_uint32[0].mpur_min = 4730 range.range_uint32[0].mpur_max = NXGE_DEFAULT_MTU; 4731 if (nxgep->mac.is_jumbo) 4732 range.range_uint32[0].mpur_max = 4733 NXGE_MAXIMUM_MTU; 4734 bcopy(&range, pr_val, sizeof (range)); 4735 break; 4736 } 4737 default: 4738 err = EINVAL; 4739 break; 4740 } 4741 4742 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4743 4744 return (err); 4745 } 4746 4747 /* ARGSUSED */ 4748 static int 4749 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4750 const void *pr_val) 4751 { 4752 p_nxge_param_t param_arr = nxgep->param_arr; 4753 int err = 0; 4754 long result; 4755 4756 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4757 "==> nxge_set_priv_prop: name %s", pr_name)); 4758 4759 if (strcmp(pr_name, "_accept_jumbo") == 0) { 4760 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4761 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4762 "<== nxge_set_priv_prop: name %s " 4763 "pr_val %s result %d " 4764 "param %d is_jumbo %d", 4765 pr_name, pr_val, result, 4766 param_arr[param_accept_jumbo].value, 4767 nxgep->mac.is_jumbo)); 4768 4769 if (result > 1 || result < 0) { 4770 err = EINVAL; 4771 } else { 4772 if (nxgep->mac.is_jumbo == 4773 (uint32_t)result) { 4774 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4775 "no change (%d %d)", 4776 nxgep->mac.is_jumbo, 4777 result)); 4778 return (0); 4779 } 4780 } 4781 4782 param_arr[param_accept_jumbo].value = result; 4783 nxgep->mac.is_jumbo = B_FALSE; 4784 if (result) { 4785 nxgep->mac.is_jumbo = B_TRUE; 4786 } 4787 4788 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4789 "<== nxge_set_priv_prop: name %s (value %d) is_jumbo %d", 4790 pr_name, result, nxgep->mac.is_jumbo)); 4791 4792 return (err); 4793 } 4794 4795 /* Blanking */ 4796 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4797 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4798 (char *)pr_val, 4799 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4800 if (err) { 4801 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4802 "<== nxge_set_priv_prop: " 4803 "unable to set (%s)", pr_name)); 4804 err = EINVAL; 4805 } else { 4806 err = 0; 4807 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4808 "<== nxge_set_priv_prop: " 4809 "set (%s)", pr_name)); 4810 } 4811 4812 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4813 "<== nxge_set_priv_prop: name %s (value %d)", 4814 pr_name, result)); 4815 4816 return (err); 4817 } 4818 4819 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4820 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4821 (char *)pr_val, 4822 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4823 if (err) { 4824 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4825 "<== nxge_set_priv_prop: " 4826 "unable to set (%s)", pr_name)); 4827 err = EINVAL; 4828 } else { 4829 err = 0; 4830 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4831 "<== nxge_set_priv_prop: " 4832 "set (%s)", pr_name)); 4833 } 4834 4835 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4836 "<== nxge_set_priv_prop: name %s (value %d)", 4837 pr_name, result)); 4838 4839 return (err); 4840 } 4841 4842 /* Classification */ 4843 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4844 if (pr_val == NULL) { 4845 err = EINVAL; 4846 return (err); 4847 } 4848 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4849 4850 err = nxge_param_set_ip_opt(nxgep, NULL, 4851 NULL, (char *)pr_val, 4852 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4853 4854 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4855 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4856 pr_name, result)); 4857 4858 return (err); 4859 } 4860 4861 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 4862 if (pr_val == NULL) { 4863 err = EINVAL; 4864 return (err); 4865 } 4866 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4867 4868 err = nxge_param_set_ip_opt(nxgep, NULL, 4869 NULL, (char *)pr_val, 4870 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 4871 4872 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4873 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4874 pr_name, result)); 4875 4876 return (err); 4877 } 4878 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 4879 if (pr_val == NULL) { 4880 err = EINVAL; 4881 return (err); 4882 } 4883 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4884 4885 err = nxge_param_set_ip_opt(nxgep, NULL, 4886 NULL, (char *)pr_val, 4887 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 4888 4889 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4890 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4891 pr_name, result)); 4892 4893 return (err); 4894 } 4895 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 4896 if (pr_val == NULL) { 4897 err = EINVAL; 4898 return (err); 4899 } 4900 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4901 4902 err = nxge_param_set_ip_opt(nxgep, NULL, 4903 NULL, (char *)pr_val, 4904 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 4905 4906 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4907 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4908 pr_name, result)); 4909 4910 return (err); 4911 } 4912 4913 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 4914 if (pr_val == NULL) { 4915 err = EINVAL; 4916 return (err); 4917 } 4918 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4919 4920 err = nxge_param_set_ip_opt(nxgep, NULL, 4921 NULL, (char *)pr_val, 4922 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 4923 4924 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4925 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4926 pr_name, result)); 4927 4928 return (err); 4929 } 4930 4931 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 4932 if (pr_val == NULL) { 4933 err = EINVAL; 4934 return (err); 4935 } 4936 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4937 4938 err = nxge_param_set_ip_opt(nxgep, NULL, 4939 NULL, (char *)pr_val, 4940 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 4941 4942 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4943 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4944 pr_name, result)); 4945 4946 return (err); 4947 } 4948 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 4949 if (pr_val == NULL) { 4950 err = EINVAL; 4951 return (err); 4952 } 4953 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4954 4955 err = nxge_param_set_ip_opt(nxgep, NULL, 4956 NULL, (char *)pr_val, 4957 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 4958 4959 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4960 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4961 pr_name, result)); 4962 4963 return (err); 4964 } 4965 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 4966 if (pr_val == NULL) { 4967 err = EINVAL; 4968 return (err); 4969 } 4970 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4971 4972 err = nxge_param_set_ip_opt(nxgep, NULL, 4973 NULL, (char *)pr_val, 4974 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 4975 4976 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4977 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4978 pr_name, result)); 4979 4980 return (err); 4981 } 4982 4983 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 4984 if (pr_val == NULL) { 4985 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4986 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 4987 err = EINVAL; 4988 return (err); 4989 } 4990 4991 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4992 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4993 "<== nxge_set_priv_prop: name %s " 4994 "(lso %d pr_val %s value %d)", 4995 pr_name, nxgep->soft_lso_enable, pr_val, result)); 4996 4997 if (result > 1 || result < 0) { 4998 err = EINVAL; 4999 } else { 5000 if (nxgep->soft_lso_enable == (uint32_t)result) { 5001 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5002 "no change (%d %d)", 5003 nxgep->soft_lso_enable, result)); 5004 return (0); 5005 } 5006 } 5007 5008 nxgep->soft_lso_enable = (int)result; 5009 5010 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5011 "<== nxge_set_priv_prop: name %s (value %d)", 5012 pr_name, result)); 5013 5014 return (err); 5015 } 5016 /* 5017 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the 5018 * following code to be executed. 5019 */ 5020 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5021 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5022 (caddr_t)¶m_arr[param_anar_10gfdx]); 5023 return (err); 5024 } 5025 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5026 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5027 (caddr_t)¶m_arr[param_anar_pause]); 5028 return (err); 5029 } 5030 5031 return (EINVAL); 5032 } 5033 5034 static int 5035 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 5036 uint_t pr_valsize, void *pr_val, uint_t *perm) 5037 { 5038 p_nxge_param_t param_arr = nxgep->param_arr; 5039 char valstr[MAXNAMELEN]; 5040 int err = EINVAL; 5041 uint_t strsize; 5042 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 5043 5044 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5045 "==> nxge_get_priv_prop: property %s", pr_name)); 5046 5047 /* function number */ 5048 if (strcmp(pr_name, "_function_number") == 0) { 5049 if (is_default) 5050 return (ENOTSUP); 5051 *perm = MAC_PROP_PERM_READ; 5052 (void) snprintf(valstr, sizeof (valstr), "%d", 5053 nxgep->function_num); 5054 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5055 "==> nxge_get_priv_prop: name %s " 5056 "(value %d valstr %s)", 5057 pr_name, nxgep->function_num, valstr)); 5058 5059 err = 0; 5060 goto done; 5061 } 5062 5063 /* Neptune firmware version */ 5064 if (strcmp(pr_name, "_fw_version") == 0) { 5065 if (is_default) 5066 return (ENOTSUP); 5067 *perm = MAC_PROP_PERM_READ; 5068 (void) snprintf(valstr, sizeof (valstr), "%s", 5069 nxgep->vpd_info.ver); 5070 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5071 "==> nxge_get_priv_prop: name %s " 5072 "(value %d valstr %s)", 5073 pr_name, nxgep->vpd_info.ver, valstr)); 5074 5075 err = 0; 5076 goto done; 5077 } 5078 5079 /* port PHY mode */ 5080 if (strcmp(pr_name, "_port_mode") == 0) { 5081 if (is_default) 5082 return (ENOTSUP); 5083 *perm = MAC_PROP_PERM_READ; 5084 switch (nxgep->mac.portmode) { 5085 case PORT_1G_COPPER: 5086 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 5087 nxgep->hot_swappable_phy ? 5088 "[Hot Swappable]" : ""); 5089 break; 5090 case PORT_1G_FIBER: 5091 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 5092 nxgep->hot_swappable_phy ? 5093 "[hot swappable]" : ""); 5094 break; 5095 case PORT_10G_COPPER: 5096 (void) snprintf(valstr, sizeof (valstr), 5097 "10G copper %s", 5098 nxgep->hot_swappable_phy ? 5099 "[hot swappable]" : ""); 5100 break; 5101 case PORT_10G_FIBER: 5102 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 5103 nxgep->hot_swappable_phy ? 5104 "[hot swappable]" : ""); 5105 break; 5106 case PORT_10G_SERDES: 5107 (void) snprintf(valstr, sizeof (valstr), 5108 "10G serdes %s", nxgep->hot_swappable_phy ? 5109 "[hot swappable]" : ""); 5110 break; 5111 case PORT_1G_SERDES: 5112 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5113 nxgep->hot_swappable_phy ? 5114 "[hot swappable]" : ""); 5115 break; 5116 case PORT_1G_TN1010: 5117 (void) snprintf(valstr, sizeof (valstr), 5118 "1G TN1010 copper %s", nxgep->hot_swappable_phy ? 5119 "[hot swappable]" : ""); 5120 break; 5121 case PORT_10G_TN1010: 5122 (void) snprintf(valstr, sizeof (valstr), 5123 "10G TN1010 copper %s", nxgep->hot_swappable_phy ? 5124 "[hot swappable]" : ""); 5125 break; 5126 case PORT_1G_RGMII_FIBER: 5127 (void) snprintf(valstr, sizeof (valstr), 5128 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5129 "[hot swappable]" : ""); 5130 break; 5131 case PORT_HSP_MODE: 5132 (void) snprintf(valstr, sizeof (valstr), 5133 "phy not present[hot swappable]"); 5134 break; 5135 default: 5136 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5137 nxgep->hot_swappable_phy ? 5138 "[hot swappable]" : ""); 5139 break; 5140 } 5141 5142 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5143 "==> nxge_get_priv_prop: name %s (value %s)", 5144 pr_name, valstr)); 5145 5146 err = 0; 5147 goto done; 5148 } 5149 5150 /* Hot swappable PHY */ 5151 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5152 if (is_default) 5153 return (ENOTSUP); 5154 *perm = MAC_PROP_PERM_READ; 5155 (void) snprintf(valstr, sizeof (valstr), "%s", 5156 nxgep->hot_swappable_phy ? 5157 "yes" : "no"); 5158 5159 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5160 "==> nxge_get_priv_prop: name %s " 5161 "(value %d valstr %s)", 5162 pr_name, nxgep->hot_swappable_phy, valstr)); 5163 5164 err = 0; 5165 goto done; 5166 } 5167 5168 5169 /* accept jumbo */ 5170 if (strcmp(pr_name, "_accept_jumbo") == 0) { 5171 if (is_default) 5172 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5173 else 5174 (void) snprintf(valstr, sizeof (valstr), 5175 "%d", nxgep->mac.is_jumbo); 5176 err = 0; 5177 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5178 "==> nxge_get_priv_prop: name %s (value %d (%d, %d))", 5179 pr_name, 5180 (uint32_t)param_arr[param_accept_jumbo].value, 5181 nxgep->mac.is_jumbo, 5182 nxge_jumbo_enable)); 5183 5184 goto done; 5185 } 5186 5187 /* Receive Interrupt Blanking Parameters */ 5188 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5189 err = 0; 5190 if (is_default) { 5191 (void) snprintf(valstr, sizeof (valstr), 5192 "%d", RXDMA_RCR_TO_DEFAULT); 5193 goto done; 5194 } 5195 5196 (void) snprintf(valstr, sizeof (valstr), "%d", 5197 nxgep->intr_timeout); 5198 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5199 "==> nxge_get_priv_prop: name %s (value %d)", 5200 pr_name, 5201 (uint32_t)nxgep->intr_timeout)); 5202 goto done; 5203 } 5204 5205 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5206 err = 0; 5207 if (is_default) { 5208 (void) snprintf(valstr, sizeof (valstr), 5209 "%d", RXDMA_RCR_PTHRES_DEFAULT); 5210 goto done; 5211 } 5212 (void) snprintf(valstr, sizeof (valstr), "%d", 5213 nxgep->intr_threshold); 5214 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5215 "==> nxge_get_priv_prop: name %s (value %d)", 5216 pr_name, (uint32_t)nxgep->intr_threshold)); 5217 5218 goto done; 5219 } 5220 5221 /* Classification and Load Distribution Configuration */ 5222 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5223 if (is_default) { 5224 (void) snprintf(valstr, sizeof (valstr), "%x", 5225 NXGE_CLASS_FLOW_GEN_SERVER); 5226 err = 0; 5227 goto done; 5228 } 5229 err = nxge_dld_get_ip_opt(nxgep, 5230 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5231 5232 (void) snprintf(valstr, sizeof (valstr), "%x", 5233 (int)param_arr[param_class_opt_ipv4_tcp].value); 5234 5235 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5236 "==> nxge_get_priv_prop: %s", valstr)); 5237 goto done; 5238 } 5239 5240 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5241 if (is_default) { 5242 (void) snprintf(valstr, sizeof (valstr), "%x", 5243 NXGE_CLASS_FLOW_GEN_SERVER); 5244 err = 0; 5245 goto done; 5246 } 5247 err = nxge_dld_get_ip_opt(nxgep, 5248 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5249 5250 (void) snprintf(valstr, sizeof (valstr), "%x", 5251 (int)param_arr[param_class_opt_ipv4_udp].value); 5252 5253 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5254 "==> nxge_get_priv_prop: %s", valstr)); 5255 goto done; 5256 } 5257 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5258 if (is_default) { 5259 (void) snprintf(valstr, sizeof (valstr), "%x", 5260 NXGE_CLASS_FLOW_GEN_SERVER); 5261 err = 0; 5262 goto done; 5263 } 5264 err = nxge_dld_get_ip_opt(nxgep, 5265 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5266 5267 (void) snprintf(valstr, sizeof (valstr), "%x", 5268 (int)param_arr[param_class_opt_ipv4_ah].value); 5269 5270 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5271 "==> nxge_get_priv_prop: %s", valstr)); 5272 goto done; 5273 } 5274 5275 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5276 if (is_default) { 5277 (void) snprintf(valstr, sizeof (valstr), "%x", 5278 NXGE_CLASS_FLOW_GEN_SERVER); 5279 err = 0; 5280 goto done; 5281 } 5282 err = nxge_dld_get_ip_opt(nxgep, 5283 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5284 5285 (void) snprintf(valstr, sizeof (valstr), "%x", 5286 (int)param_arr[param_class_opt_ipv4_sctp].value); 5287 5288 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5289 "==> nxge_get_priv_prop: %s", valstr)); 5290 goto done; 5291 } 5292 5293 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5294 if (is_default) { 5295 (void) snprintf(valstr, sizeof (valstr), "%x", 5296 NXGE_CLASS_FLOW_GEN_SERVER); 5297 err = 0; 5298 goto done; 5299 } 5300 err = nxge_dld_get_ip_opt(nxgep, 5301 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5302 5303 (void) snprintf(valstr, sizeof (valstr), "%x", 5304 (int)param_arr[param_class_opt_ipv6_tcp].value); 5305 5306 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5307 "==> nxge_get_priv_prop: %s", valstr)); 5308 goto done; 5309 } 5310 5311 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5312 if (is_default) { 5313 (void) snprintf(valstr, sizeof (valstr), "%x", 5314 NXGE_CLASS_FLOW_GEN_SERVER); 5315 err = 0; 5316 goto done; 5317 } 5318 err = nxge_dld_get_ip_opt(nxgep, 5319 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5320 5321 (void) snprintf(valstr, sizeof (valstr), "%x", 5322 (int)param_arr[param_class_opt_ipv6_udp].value); 5323 5324 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5325 "==> nxge_get_priv_prop: %s", valstr)); 5326 goto done; 5327 } 5328 5329 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5330 if (is_default) { 5331 (void) snprintf(valstr, sizeof (valstr), "%x", 5332 NXGE_CLASS_FLOW_GEN_SERVER); 5333 err = 0; 5334 goto done; 5335 } 5336 err = nxge_dld_get_ip_opt(nxgep, 5337 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5338 5339 (void) snprintf(valstr, sizeof (valstr), "%x", 5340 (int)param_arr[param_class_opt_ipv6_ah].value); 5341 5342 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5343 "==> nxge_get_priv_prop: %s", valstr)); 5344 goto done; 5345 } 5346 5347 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5348 if (is_default) { 5349 (void) snprintf(valstr, sizeof (valstr), "%x", 5350 NXGE_CLASS_FLOW_GEN_SERVER); 5351 err = 0; 5352 goto done; 5353 } 5354 err = nxge_dld_get_ip_opt(nxgep, 5355 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5356 5357 (void) snprintf(valstr, sizeof (valstr), "%x", 5358 (int)param_arr[param_class_opt_ipv6_sctp].value); 5359 5360 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5361 "==> nxge_get_priv_prop: %s", valstr)); 5362 goto done; 5363 } 5364 5365 /* Software LSO */ 5366 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5367 if (is_default) { 5368 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5369 err = 0; 5370 goto done; 5371 } 5372 (void) snprintf(valstr, sizeof (valstr), 5373 "%d", nxgep->soft_lso_enable); 5374 err = 0; 5375 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5376 "==> nxge_get_priv_prop: name %s (value %d)", 5377 pr_name, nxgep->soft_lso_enable)); 5378 5379 goto done; 5380 } 5381 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5382 err = 0; 5383 if (is_default || 5384 nxgep->param_arr[param_anar_10gfdx].value != 0) { 5385 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5386 goto done; 5387 } else { 5388 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5389 goto done; 5390 } 5391 } 5392 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5393 err = 0; 5394 if (is_default || 5395 nxgep->param_arr[param_anar_pause].value != 0) { 5396 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5397 goto done; 5398 } else { 5399 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5400 goto done; 5401 } 5402 } 5403 5404 done: 5405 if (err == 0) { 5406 strsize = (uint_t)strlen(valstr); 5407 if (pr_valsize < strsize) { 5408 err = ENOBUFS; 5409 } else { 5410 (void) strlcpy(pr_val, valstr, pr_valsize); 5411 } 5412 } 5413 5414 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5415 "<== nxge_get_priv_prop: return %d", err)); 5416 return (err); 5417 } 5418 5419 /* 5420 * Module loading and removing entry points. 5421 */ 5422 5423 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach, 5424 nodev, NULL, D_MP, NULL, nxge_quiesce); 5425 5426 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5427 5428 /* 5429 * Module linkage information for the kernel. 5430 */ 5431 static struct modldrv nxge_modldrv = { 5432 &mod_driverops, 5433 NXGE_DESC_VER, 5434 &nxge_dev_ops 5435 }; 5436 5437 static struct modlinkage modlinkage = { 5438 MODREV_1, (void *) &nxge_modldrv, NULL 5439 }; 5440 5441 int 5442 _init(void) 5443 { 5444 int status; 5445 5446 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5447 mac_init_ops(&nxge_dev_ops, "nxge"); 5448 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5449 if (status != 0) { 5450 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5451 "failed to init device soft state")); 5452 goto _init_exit; 5453 } 5454 status = mod_install(&modlinkage); 5455 if (status != 0) { 5456 ddi_soft_state_fini(&nxge_list); 5457 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5458 goto _init_exit; 5459 } 5460 5461 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5462 5463 _init_exit: 5464 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 5465 5466 return (status); 5467 } 5468 5469 int 5470 _fini(void) 5471 { 5472 int status; 5473 5474 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5475 5476 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5477 5478 if (nxge_mblks_pending) 5479 return (EBUSY); 5480 5481 status = mod_remove(&modlinkage); 5482 if (status != DDI_SUCCESS) { 5483 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5484 "Module removal failed 0x%08x", 5485 status)); 5486 goto _fini_exit; 5487 } 5488 5489 mac_fini_ops(&nxge_dev_ops); 5490 5491 ddi_soft_state_fini(&nxge_list); 5492 5493 MUTEX_DESTROY(&nxge_common_lock); 5494 _fini_exit: 5495 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 5496 5497 return (status); 5498 } 5499 5500 int 5501 _info(struct modinfo *modinfop) 5502 { 5503 int status; 5504 5505 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5506 status = mod_info(&modlinkage, modinfop); 5507 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5508 5509 return (status); 5510 } 5511 5512 /*ARGSUSED*/ 5513 static int 5514 nxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5515 { 5516 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5517 p_nxge_t nxgep = rhp->nxgep; 5518 uint32_t channel; 5519 p_tx_ring_t ring; 5520 5521 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5522 ring = nxgep->tx_rings->rings[channel]; 5523 5524 MUTEX_ENTER(&ring->lock); 5525 ring->tx_ring_handle = rhp->ring_handle; 5526 MUTEX_EXIT(&ring->lock); 5527 5528 return (0); 5529 } 5530 5531 static void 5532 nxge_tx_ring_stop(mac_ring_driver_t rdriver) 5533 { 5534 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5535 p_nxge_t nxgep = rhp->nxgep; 5536 uint32_t channel; 5537 p_tx_ring_t ring; 5538 5539 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5540 ring = nxgep->tx_rings->rings[channel]; 5541 5542 MUTEX_ENTER(&ring->lock); 5543 ring->tx_ring_handle = (mac_ring_handle_t)NULL; 5544 MUTEX_EXIT(&ring->lock); 5545 } 5546 5547 static int 5548 nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5549 { 5550 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5551 p_nxge_t nxgep = rhp->nxgep; 5552 uint32_t channel; 5553 p_rx_rcr_ring_t ring; 5554 int i; 5555 5556 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5557 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5558 5559 MUTEX_ENTER(&ring->lock); 5560 5561 if (nxgep->rx_channel_started[channel] == B_TRUE) { 5562 MUTEX_EXIT(&ring->lock); 5563 return (0); 5564 } 5565 5566 /* set rcr_ring */ 5567 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) { 5568 if ((nxgep->ldgvp->ldvp[i].is_rxdma == 1) && 5569 (nxgep->ldgvp->ldvp[i].channel == channel)) { 5570 ring->ldvp = &nxgep->ldgvp->ldvp[i]; 5571 ring->ldgp = nxgep->ldgvp->ldvp[i].ldgp; 5572 } 5573 } 5574 5575 nxgep->rx_channel_started[channel] = B_TRUE; 5576 ring->rcr_mac_handle = rhp->ring_handle; 5577 ring->rcr_gen_num = mr_gen_num; 5578 MUTEX_EXIT(&ring->lock); 5579 5580 return (0); 5581 } 5582 5583 static void 5584 nxge_rx_ring_stop(mac_ring_driver_t rdriver) 5585 { 5586 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5587 p_nxge_t nxgep = rhp->nxgep; 5588 uint32_t channel; 5589 p_rx_rcr_ring_t ring; 5590 5591 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5592 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5593 5594 MUTEX_ENTER(&ring->lock); 5595 nxgep->rx_channel_started[channel] = B_FALSE; 5596 ring->rcr_mac_handle = NULL; 5597 MUTEX_EXIT(&ring->lock); 5598 } 5599 5600 /* 5601 * Callback funtion for MAC layer to register all rings. 5602 */ 5603 static void 5604 nxge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 5605 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 5606 { 5607 p_nxge_t nxgep = (p_nxge_t)arg; 5608 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 5609 5610 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5611 "==> nxge_fill_ring 0x%x index %d", rtype, index)); 5612 5613 switch (rtype) { 5614 case MAC_RING_TYPE_TX: { 5615 p_nxge_ring_handle_t rhandlep; 5616 5617 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5618 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d", 5619 rtype, index, p_cfgp->tdc.count)); 5620 5621 ASSERT((index >= 0) && (index < p_cfgp->tdc.count)); 5622 rhandlep = &nxgep->tx_ring_handles[index]; 5623 rhandlep->nxgep = nxgep; 5624 rhandlep->index = index; 5625 rhandlep->ring_handle = rh; 5626 5627 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5628 infop->mri_start = nxge_tx_ring_start; 5629 infop->mri_stop = nxge_tx_ring_stop; 5630 infop->mri_tx = nxge_tx_ring_send; 5631 5632 break; 5633 } 5634 case MAC_RING_TYPE_RX: { 5635 p_nxge_ring_handle_t rhandlep; 5636 int nxge_rindex; 5637 mac_intr_t nxge_mac_intr; 5638 5639 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5640 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d", 5641 rtype, index, p_cfgp->max_rdcs)); 5642 5643 /* 5644 * 'index' is the ring index within the group. 5645 * Find the ring index in the nxge instance. 5646 */ 5647 nxge_rindex = nxge_get_rxring_index(nxgep, rg_index, index); 5648 5649 ASSERT((nxge_rindex >= 0) && (nxge_rindex < p_cfgp->max_rdcs)); 5650 rhandlep = &nxgep->rx_ring_handles[nxge_rindex]; 5651 rhandlep->nxgep = nxgep; 5652 rhandlep->index = nxge_rindex; 5653 rhandlep->ring_handle = rh; 5654 5655 /* 5656 * Entrypoint to enable interrupt (disable poll) and 5657 * disable interrupt (enable poll). 5658 */ 5659 nxge_mac_intr.mi_handle = (mac_intr_handle_t)rhandlep; 5660 nxge_mac_intr.mi_enable = (mac_intr_enable_t)nxge_disable_poll; 5661 nxge_mac_intr.mi_disable = (mac_intr_disable_t)nxge_enable_poll; 5662 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5663 infop->mri_start = nxge_rx_ring_start; 5664 infop->mri_stop = nxge_rx_ring_stop; 5665 infop->mri_intr = nxge_mac_intr; /* ??? */ 5666 infop->mri_poll = nxge_rx_poll; 5667 5668 break; 5669 } 5670 default: 5671 break; 5672 } 5673 5674 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_fill_ring 0x%x", 5675 rtype)); 5676 } 5677 5678 static void 5679 nxge_group_add_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5680 mac_ring_type_t type) 5681 { 5682 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5683 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5684 nxge_t *nxge; 5685 nxge_grp_t *grp; 5686 nxge_rdc_grp_t *rdc_grp; 5687 uint16_t channel; /* device-wise ring id */ 5688 int dev_gindex; 5689 int rv; 5690 5691 nxge = rgroup->nxgep; 5692 5693 switch (type) { 5694 case MAC_RING_TYPE_TX: 5695 /* 5696 * nxge_grp_dc_add takes a channel number which is a 5697 * "devise" ring ID. 5698 */ 5699 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5700 5701 /* 5702 * Remove the ring from the default group 5703 */ 5704 if (rgroup->gindex != 0) { 5705 (void) nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5706 } 5707 5708 /* 5709 * nxge->tx_set.group[] is an array of groups indexed by 5710 * a "port" group ID. 5711 */ 5712 grp = nxge->tx_set.group[rgroup->gindex]; 5713 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5714 if (rv != 0) { 5715 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5716 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5717 } 5718 break; 5719 5720 case MAC_RING_TYPE_RX: 5721 /* 5722 * nxge->rx_set.group[] is an array of groups indexed by 5723 * a "port" group ID. 5724 */ 5725 grp = nxge->rx_set.group[rgroup->gindex]; 5726 5727 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5728 rgroup->gindex; 5729 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5730 5731 /* 5732 * nxge_grp_dc_add takes a channel number which is a 5733 * "devise" ring ID. 5734 */ 5735 channel = nxge->pt_config.hw_config.start_rdc + rhandle->index; 5736 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_RX, channel); 5737 if (rv != 0) { 5738 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5739 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5740 } 5741 5742 rdc_grp->map |= (1 << channel); 5743 rdc_grp->max_rdcs++; 5744 5745 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5746 break; 5747 } 5748 } 5749 5750 static void 5751 nxge_group_rem_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5752 mac_ring_type_t type) 5753 { 5754 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5755 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5756 nxge_t *nxge; 5757 uint16_t channel; /* device-wise ring id */ 5758 nxge_rdc_grp_t *rdc_grp; 5759 int dev_gindex; 5760 5761 nxge = rgroup->nxgep; 5762 5763 switch (type) { 5764 case MAC_RING_TYPE_TX: 5765 dev_gindex = nxge->pt_config.hw_config.def_mac_txdma_grpid + 5766 rgroup->gindex; 5767 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5768 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5769 5770 /* 5771 * Add the ring back to the default group 5772 */ 5773 if (rgroup->gindex != 0) { 5774 nxge_grp_t *grp; 5775 grp = nxge->tx_set.group[0]; 5776 (void) nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5777 } 5778 break; 5779 5780 case MAC_RING_TYPE_RX: 5781 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5782 rgroup->gindex; 5783 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5784 channel = rdc_grp->start_rdc + rhandle->index; 5785 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 5786 5787 rdc_grp->map &= ~(1 << channel); 5788 rdc_grp->max_rdcs--; 5789 5790 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5791 break; 5792 } 5793 } 5794 5795 5796 /*ARGSUSED*/ 5797 static nxge_status_t 5798 nxge_add_intrs(p_nxge_t nxgep) 5799 { 5800 5801 int intr_types; 5802 int type = 0; 5803 int ddi_status = DDI_SUCCESS; 5804 nxge_status_t status = NXGE_OK; 5805 5806 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5807 5808 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5809 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5810 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5811 nxgep->nxge_intr_type.intr_added = 0; 5812 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5813 nxgep->nxge_intr_type.intr_type = 0; 5814 5815 if (nxgep->niu_type == N2_NIU) { 5816 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5817 } else if (nxge_msi_enable) { 5818 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5819 } 5820 5821 /* Get the supported interrupt types */ 5822 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5823 != DDI_SUCCESS) { 5824 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5825 "ddi_intr_get_supported_types failed: status 0x%08x", 5826 ddi_status)); 5827 return (NXGE_ERROR | NXGE_DDI_FAILED); 5828 } 5829 nxgep->nxge_intr_type.intr_types = intr_types; 5830 5831 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5832 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5833 5834 /* 5835 * Solaris MSIX is not supported yet. use MSI for now. 5836 * nxge_msi_enable (1): 5837 * 1 - MSI 2 - MSI-X others - FIXED 5838 */ 5839 switch (nxge_msi_enable) { 5840 default: 5841 type = DDI_INTR_TYPE_FIXED; 5842 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5843 "use fixed (intx emulation) type %08x", 5844 type)); 5845 break; 5846 5847 case 2: 5848 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5849 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5850 if (intr_types & DDI_INTR_TYPE_MSIX) { 5851 type = DDI_INTR_TYPE_MSIX; 5852 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5853 "ddi_intr_get_supported_types: MSIX 0x%08x", 5854 type)); 5855 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5856 type = DDI_INTR_TYPE_MSI; 5857 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5858 "ddi_intr_get_supported_types: MSI 0x%08x", 5859 type)); 5860 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5861 type = DDI_INTR_TYPE_FIXED; 5862 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5863 "ddi_intr_get_supported_types: MSXED0x%08x", 5864 type)); 5865 } 5866 break; 5867 5868 case 1: 5869 if (intr_types & DDI_INTR_TYPE_MSI) { 5870 type = DDI_INTR_TYPE_MSI; 5871 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5872 "ddi_intr_get_supported_types: MSI 0x%08x", 5873 type)); 5874 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5875 type = DDI_INTR_TYPE_MSIX; 5876 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5877 "ddi_intr_get_supported_types: MSIX 0x%08x", 5878 type)); 5879 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5880 type = DDI_INTR_TYPE_FIXED; 5881 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5882 "ddi_intr_get_supported_types: MSXED0x%08x", 5883 type)); 5884 } 5885 } 5886 5887 nxgep->nxge_intr_type.intr_type = type; 5888 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5889 type == DDI_INTR_TYPE_FIXED) && 5890 nxgep->nxge_intr_type.niu_msi_enable) { 5891 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5892 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5893 " nxge_add_intrs: " 5894 " nxge_add_intrs_adv failed: status 0x%08x", 5895 status)); 5896 return (status); 5897 } else { 5898 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5899 "interrupts registered : type %d", type)); 5900 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5901 5902 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5903 "\nAdded advanced nxge add_intr_adv " 5904 "intr type 0x%x\n", type)); 5905 5906 return (status); 5907 } 5908 } 5909 5910 if (!nxgep->nxge_intr_type.intr_registered) { 5911 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5912 "failed to register interrupts")); 5913 return (NXGE_ERROR | NXGE_DDI_FAILED); 5914 } 5915 5916 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5917 return (status); 5918 } 5919 5920 static nxge_status_t 5921 nxge_add_intrs_adv(p_nxge_t nxgep) 5922 { 5923 int intr_type; 5924 p_nxge_intr_t intrp; 5925 5926 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5927 5928 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5929 intr_type = intrp->intr_type; 5930 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5931 intr_type)); 5932 5933 switch (intr_type) { 5934 case DDI_INTR_TYPE_MSI: /* 0x2 */ 5935 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 5936 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 5937 5938 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 5939 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 5940 5941 default: 5942 return (NXGE_ERROR); 5943 } 5944 } 5945 5946 5947 /*ARGSUSED*/ 5948 static nxge_status_t 5949 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 5950 { 5951 dev_info_t *dip = nxgep->dip; 5952 p_nxge_ldg_t ldgp; 5953 p_nxge_intr_t intrp; 5954 uint_t *inthandler; 5955 void *arg1, *arg2; 5956 int behavior; 5957 int nintrs, navail, nrequest; 5958 int nactual, nrequired; 5959 int inum = 0; 5960 int x, y; 5961 int ddi_status = DDI_SUCCESS; 5962 nxge_status_t status = NXGE_OK; 5963 5964 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 5965 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5966 intrp->start_inum = 0; 5967 5968 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5969 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5970 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5971 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5972 "nintrs: %d", ddi_status, nintrs)); 5973 return (NXGE_ERROR | NXGE_DDI_FAILED); 5974 } 5975 5976 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5977 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5978 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5979 "ddi_intr_get_navail() failed, status: 0x%x%, " 5980 "nintrs: %d", ddi_status, navail)); 5981 return (NXGE_ERROR | NXGE_DDI_FAILED); 5982 } 5983 5984 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5985 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 5986 nintrs, navail)); 5987 5988 /* PSARC/2007/453 MSI-X interrupt limit override */ 5989 if (int_type == DDI_INTR_TYPE_MSIX) { 5990 nrequest = nxge_create_msi_property(nxgep); 5991 if (nrequest < navail) { 5992 navail = nrequest; 5993 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5994 "nxge_add_intrs_adv_type: nintrs %d " 5995 "navail %d (nrequest %d)", 5996 nintrs, navail, nrequest)); 5997 } 5998 } 5999 6000 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 6001 /* MSI must be power of 2 */ 6002 if ((navail & 16) == 16) { 6003 navail = 16; 6004 } else if ((navail & 8) == 8) { 6005 navail = 8; 6006 } else if ((navail & 4) == 4) { 6007 navail = 4; 6008 } else if ((navail & 2) == 2) { 6009 navail = 2; 6010 } else { 6011 navail = 1; 6012 } 6013 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6014 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 6015 "navail %d", nintrs, navail)); 6016 } 6017 6018 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6019 DDI_INTR_ALLOC_NORMAL); 6020 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6021 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6022 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6023 navail, &nactual, behavior); 6024 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6025 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6026 " ddi_intr_alloc() failed: %d", 6027 ddi_status)); 6028 kmem_free(intrp->htable, intrp->intr_size); 6029 return (NXGE_ERROR | NXGE_DDI_FAILED); 6030 } 6031 6032 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6033 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6034 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6035 " ddi_intr_get_pri() failed: %d", 6036 ddi_status)); 6037 /* Free already allocated interrupts */ 6038 for (y = 0; y < nactual; y++) { 6039 (void) ddi_intr_free(intrp->htable[y]); 6040 } 6041 6042 kmem_free(intrp->htable, intrp->intr_size); 6043 return (NXGE_ERROR | NXGE_DDI_FAILED); 6044 } 6045 6046 nrequired = 0; 6047 switch (nxgep->niu_type) { 6048 default: 6049 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6050 break; 6051 6052 case N2_NIU: 6053 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6054 break; 6055 } 6056 6057 if (status != NXGE_OK) { 6058 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6059 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 6060 "failed: 0x%x", status)); 6061 /* Free already allocated interrupts */ 6062 for (y = 0; y < nactual; y++) { 6063 (void) ddi_intr_free(intrp->htable[y]); 6064 } 6065 6066 kmem_free(intrp->htable, intrp->intr_size); 6067 return (status); 6068 } 6069 6070 ldgp = nxgep->ldgvp->ldgp; 6071 for (x = 0; x < nrequired; x++, ldgp++) { 6072 ldgp->vector = (uint8_t)x; 6073 ldgp->intdata = SID_DATA(ldgp->func, x); 6074 arg1 = ldgp->ldvp; 6075 arg2 = nxgep; 6076 if (ldgp->nldvs == 1) { 6077 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6078 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6079 "nxge_add_intrs_adv_type: " 6080 "arg1 0x%x arg2 0x%x: " 6081 "1-1 int handler (entry %d intdata 0x%x)\n", 6082 arg1, arg2, 6083 x, ldgp->intdata)); 6084 } else if (ldgp->nldvs > 1) { 6085 inthandler = (uint_t *)ldgp->sys_intr_handler; 6086 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6087 "nxge_add_intrs_adv_type: " 6088 "arg1 0x%x arg2 0x%x: " 6089 "nldevs %d int handler " 6090 "(entry %d intdata 0x%x)\n", 6091 arg1, arg2, 6092 ldgp->nldvs, x, ldgp->intdata)); 6093 } 6094 6095 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6096 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 6097 "htable 0x%llx", x, intrp->htable[x])); 6098 6099 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6100 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6101 != DDI_SUCCESS) { 6102 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6103 "==> nxge_add_intrs_adv_type: failed #%d " 6104 "status 0x%x", x, ddi_status)); 6105 for (y = 0; y < intrp->intr_added; y++) { 6106 (void) ddi_intr_remove_handler( 6107 intrp->htable[y]); 6108 } 6109 /* Free already allocated intr */ 6110 for (y = 0; y < nactual; y++) { 6111 (void) ddi_intr_free(intrp->htable[y]); 6112 } 6113 kmem_free(intrp->htable, intrp->intr_size); 6114 6115 (void) nxge_ldgv_uninit(nxgep); 6116 6117 return (NXGE_ERROR | NXGE_DDI_FAILED); 6118 } 6119 intrp->intr_added++; 6120 } 6121 6122 intrp->msi_intx_cnt = nactual; 6123 6124 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6125 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 6126 navail, nactual, 6127 intrp->msi_intx_cnt, 6128 intrp->intr_added)); 6129 6130 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6131 6132 (void) nxge_intr_ldgv_init(nxgep); 6133 6134 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 6135 6136 return (status); 6137 } 6138 6139 /*ARGSUSED*/ 6140 static nxge_status_t 6141 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 6142 { 6143 dev_info_t *dip = nxgep->dip; 6144 p_nxge_ldg_t ldgp; 6145 p_nxge_intr_t intrp; 6146 uint_t *inthandler; 6147 void *arg1, *arg2; 6148 int behavior; 6149 int nintrs, navail; 6150 int nactual, nrequired; 6151 int inum = 0; 6152 int x, y; 6153 int ddi_status = DDI_SUCCESS; 6154 nxge_status_t status = NXGE_OK; 6155 6156 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 6157 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6158 intrp->start_inum = 0; 6159 6160 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6161 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6162 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6163 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6164 "nintrs: %d", status, nintrs)); 6165 return (NXGE_ERROR | NXGE_DDI_FAILED); 6166 } 6167 6168 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6169 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6170 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6171 "ddi_intr_get_navail() failed, status: 0x%x%, " 6172 "nintrs: %d", ddi_status, navail)); 6173 return (NXGE_ERROR | NXGE_DDI_FAILED); 6174 } 6175 6176 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6177 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6178 nintrs, navail)); 6179 6180 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6181 DDI_INTR_ALLOC_NORMAL); 6182 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6183 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6184 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6185 navail, &nactual, behavior); 6186 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6187 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6188 " ddi_intr_alloc() failed: %d", 6189 ddi_status)); 6190 kmem_free(intrp->htable, intrp->intr_size); 6191 return (NXGE_ERROR | NXGE_DDI_FAILED); 6192 } 6193 6194 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6195 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6196 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6197 " ddi_intr_get_pri() failed: %d", 6198 ddi_status)); 6199 /* Free already allocated interrupts */ 6200 for (y = 0; y < nactual; y++) { 6201 (void) ddi_intr_free(intrp->htable[y]); 6202 } 6203 6204 kmem_free(intrp->htable, intrp->intr_size); 6205 return (NXGE_ERROR | NXGE_DDI_FAILED); 6206 } 6207 6208 nrequired = 0; 6209 switch (nxgep->niu_type) { 6210 default: 6211 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6212 break; 6213 6214 case N2_NIU: 6215 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6216 break; 6217 } 6218 6219 if (status != NXGE_OK) { 6220 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6221 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6222 "failed: 0x%x", status)); 6223 /* Free already allocated interrupts */ 6224 for (y = 0; y < nactual; y++) { 6225 (void) ddi_intr_free(intrp->htable[y]); 6226 } 6227 6228 kmem_free(intrp->htable, intrp->intr_size); 6229 return (status); 6230 } 6231 6232 ldgp = nxgep->ldgvp->ldgp; 6233 for (x = 0; x < nrequired; x++, ldgp++) { 6234 ldgp->vector = (uint8_t)x; 6235 if (nxgep->niu_type != N2_NIU) { 6236 ldgp->intdata = SID_DATA(ldgp->func, x); 6237 } 6238 6239 arg1 = ldgp->ldvp; 6240 arg2 = nxgep; 6241 if (ldgp->nldvs == 1) { 6242 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6243 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6244 "nxge_add_intrs_adv_type_fix: " 6245 "1-1 int handler(%d) ldg %d ldv %d " 6246 "arg1 $%p arg2 $%p\n", 6247 x, ldgp->ldg, ldgp->ldvp->ldv, 6248 arg1, arg2)); 6249 } else if (ldgp->nldvs > 1) { 6250 inthandler = (uint_t *)ldgp->sys_intr_handler; 6251 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6252 "nxge_add_intrs_adv_type_fix: " 6253 "shared ldv %d int handler(%d) ldv %d ldg %d" 6254 "arg1 0x%016llx arg2 0x%016llx\n", 6255 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6256 arg1, arg2)); 6257 } 6258 6259 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6260 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6261 != DDI_SUCCESS) { 6262 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6263 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6264 "status 0x%x", x, ddi_status)); 6265 for (y = 0; y < intrp->intr_added; y++) { 6266 (void) ddi_intr_remove_handler( 6267 intrp->htable[y]); 6268 } 6269 for (y = 0; y < nactual; y++) { 6270 (void) ddi_intr_free(intrp->htable[y]); 6271 } 6272 /* Free already allocated intr */ 6273 kmem_free(intrp->htable, intrp->intr_size); 6274 6275 (void) nxge_ldgv_uninit(nxgep); 6276 6277 return (NXGE_ERROR | NXGE_DDI_FAILED); 6278 } 6279 intrp->intr_added++; 6280 } 6281 6282 intrp->msi_intx_cnt = nactual; 6283 6284 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6285 6286 status = nxge_intr_ldgv_init(nxgep); 6287 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6288 6289 return (status); 6290 } 6291 6292 static void 6293 nxge_remove_intrs(p_nxge_t nxgep) 6294 { 6295 int i, inum; 6296 p_nxge_intr_t intrp; 6297 6298 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6299 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6300 if (!intrp->intr_registered) { 6301 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6302 "<== nxge_remove_intrs: interrupts not registered")); 6303 return; 6304 } 6305 6306 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6307 6308 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6309 (void) ddi_intr_block_disable(intrp->htable, 6310 intrp->intr_added); 6311 } else { 6312 for (i = 0; i < intrp->intr_added; i++) { 6313 (void) ddi_intr_disable(intrp->htable[i]); 6314 } 6315 } 6316 6317 for (inum = 0; inum < intrp->intr_added; inum++) { 6318 if (intrp->htable[inum]) { 6319 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6320 } 6321 } 6322 6323 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6324 if (intrp->htable[inum]) { 6325 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6326 "nxge_remove_intrs: ddi_intr_free inum %d " 6327 "msi_intx_cnt %d intr_added %d", 6328 inum, 6329 intrp->msi_intx_cnt, 6330 intrp->intr_added)); 6331 6332 (void) ddi_intr_free(intrp->htable[inum]); 6333 } 6334 } 6335 6336 kmem_free(intrp->htable, intrp->intr_size); 6337 intrp->intr_registered = B_FALSE; 6338 intrp->intr_enabled = B_FALSE; 6339 intrp->msi_intx_cnt = 0; 6340 intrp->intr_added = 0; 6341 6342 (void) nxge_ldgv_uninit(nxgep); 6343 6344 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6345 "#msix-request"); 6346 6347 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6348 } 6349 6350 /*ARGSUSED*/ 6351 static void 6352 nxge_intrs_enable(p_nxge_t nxgep) 6353 { 6354 p_nxge_intr_t intrp; 6355 int i; 6356 int status; 6357 6358 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6359 6360 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6361 6362 if (!intrp->intr_registered) { 6363 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6364 "interrupts are not registered")); 6365 return; 6366 } 6367 6368 if (intrp->intr_enabled) { 6369 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6370 "<== nxge_intrs_enable: already enabled")); 6371 return; 6372 } 6373 6374 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6375 status = ddi_intr_block_enable(intrp->htable, 6376 intrp->intr_added); 6377 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6378 "block enable - status 0x%x total inums #%d\n", 6379 status, intrp->intr_added)); 6380 } else { 6381 for (i = 0; i < intrp->intr_added; i++) { 6382 status = ddi_intr_enable(intrp->htable[i]); 6383 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6384 "ddi_intr_enable:enable - status 0x%x " 6385 "total inums %d enable inum #%d\n", 6386 status, intrp->intr_added, i)); 6387 if (status == DDI_SUCCESS) { 6388 intrp->intr_enabled = B_TRUE; 6389 } 6390 } 6391 } 6392 6393 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6394 } 6395 6396 /*ARGSUSED*/ 6397 static void 6398 nxge_intrs_disable(p_nxge_t nxgep) 6399 { 6400 p_nxge_intr_t intrp; 6401 int i; 6402 6403 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6404 6405 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6406 6407 if (!intrp->intr_registered) { 6408 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6409 "interrupts are not registered")); 6410 return; 6411 } 6412 6413 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6414 (void) ddi_intr_block_disable(intrp->htable, 6415 intrp->intr_added); 6416 } else { 6417 for (i = 0; i < intrp->intr_added; i++) { 6418 (void) ddi_intr_disable(intrp->htable[i]); 6419 } 6420 } 6421 6422 intrp->intr_enabled = B_FALSE; 6423 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6424 } 6425 6426 static nxge_status_t 6427 nxge_mac_register(p_nxge_t nxgep) 6428 { 6429 mac_register_t *macp; 6430 int status; 6431 6432 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6433 6434 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6435 return (NXGE_ERROR); 6436 6437 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6438 macp->m_driver = nxgep; 6439 macp->m_dip = nxgep->dip; 6440 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6441 macp->m_callbacks = &nxge_m_callbacks; 6442 macp->m_min_sdu = 0; 6443 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6444 NXGE_EHEADER_VLAN_CRC; 6445 macp->m_max_sdu = nxgep->mac.default_mtu; 6446 macp->m_margin = VLAN_TAGSZ; 6447 macp->m_priv_props = nxge_priv_props; 6448 macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 6449 macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1 | MAC_VIRT_SERIALIZE; 6450 6451 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6452 "==> nxge_mac_register: instance %d " 6453 "max_sdu %d margin %d maxframe %d (header %d)", 6454 nxgep->instance, 6455 macp->m_max_sdu, macp->m_margin, 6456 nxgep->mac.maxframesize, 6457 NXGE_EHEADER_VLAN_CRC)); 6458 6459 status = mac_register(macp, &nxgep->mach); 6460 mac_free(macp); 6461 6462 if (status != 0) { 6463 cmn_err(CE_WARN, 6464 "!nxge_mac_register failed (status %d instance %d)", 6465 status, nxgep->instance); 6466 return (NXGE_ERROR); 6467 } 6468 6469 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6470 "(instance %d)", nxgep->instance)); 6471 6472 return (NXGE_OK); 6473 } 6474 6475 void 6476 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6477 { 6478 ssize_t size; 6479 mblk_t *nmp; 6480 uint8_t blk_id; 6481 uint8_t chan; 6482 uint32_t err_id; 6483 err_inject_t *eip; 6484 6485 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6486 6487 size = 1024; 6488 nmp = mp->b_cont; 6489 eip = (err_inject_t *)nmp->b_rptr; 6490 blk_id = eip->blk_id; 6491 err_id = eip->err_id; 6492 chan = eip->chan; 6493 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6494 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6495 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6496 switch (blk_id) { 6497 case MAC_BLK_ID: 6498 break; 6499 case TXMAC_BLK_ID: 6500 break; 6501 case RXMAC_BLK_ID: 6502 break; 6503 case MIF_BLK_ID: 6504 break; 6505 case IPP_BLK_ID: 6506 nxge_ipp_inject_err(nxgep, err_id); 6507 break; 6508 case TXC_BLK_ID: 6509 nxge_txc_inject_err(nxgep, err_id); 6510 break; 6511 case TXDMA_BLK_ID: 6512 nxge_txdma_inject_err(nxgep, err_id, chan); 6513 break; 6514 case RXDMA_BLK_ID: 6515 nxge_rxdma_inject_err(nxgep, err_id, chan); 6516 break; 6517 case ZCP_BLK_ID: 6518 nxge_zcp_inject_err(nxgep, err_id); 6519 break; 6520 case ESPC_BLK_ID: 6521 break; 6522 case FFLP_BLK_ID: 6523 break; 6524 case PHY_BLK_ID: 6525 break; 6526 case ETHER_SERDES_BLK_ID: 6527 break; 6528 case PCIE_SERDES_BLK_ID: 6529 break; 6530 case VIR_BLK_ID: 6531 break; 6532 } 6533 6534 nmp->b_wptr = nmp->b_rptr + size; 6535 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6536 6537 miocack(wq, mp, (int)size, 0); 6538 } 6539 6540 static int 6541 nxge_init_common_dev(p_nxge_t nxgep) 6542 { 6543 p_nxge_hw_list_t hw_p; 6544 dev_info_t *p_dip; 6545 6546 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6547 6548 p_dip = nxgep->p_dip; 6549 MUTEX_ENTER(&nxge_common_lock); 6550 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6551 "==> nxge_init_common_dev:func # %d", 6552 nxgep->function_num)); 6553 /* 6554 * Loop through existing per neptune hardware list. 6555 */ 6556 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6557 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6558 "==> nxge_init_common_device:func # %d " 6559 "hw_p $%p parent dip $%p", 6560 nxgep->function_num, 6561 hw_p, 6562 p_dip)); 6563 if (hw_p->parent_devp == p_dip) { 6564 nxgep->nxge_hw_p = hw_p; 6565 hw_p->ndevs++; 6566 hw_p->nxge_p[nxgep->function_num] = nxgep; 6567 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6568 "==> nxge_init_common_device:func # %d " 6569 "hw_p $%p parent dip $%p " 6570 "ndevs %d (found)", 6571 nxgep->function_num, 6572 hw_p, 6573 p_dip, 6574 hw_p->ndevs)); 6575 break; 6576 } 6577 } 6578 6579 if (hw_p == NULL) { 6580 6581 char **prop_val; 6582 uint_t prop_len; 6583 int i; 6584 6585 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6586 "==> nxge_init_common_device:func # %d " 6587 "parent dip $%p (new)", 6588 nxgep->function_num, 6589 p_dip)); 6590 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6591 hw_p->parent_devp = p_dip; 6592 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6593 nxgep->nxge_hw_p = hw_p; 6594 hw_p->ndevs++; 6595 hw_p->nxge_p[nxgep->function_num] = nxgep; 6596 hw_p->next = nxge_hw_list; 6597 if (nxgep->niu_type == N2_NIU) { 6598 hw_p->niu_type = N2_NIU; 6599 hw_p->platform_type = P_NEPTUNE_NIU; 6600 } else { 6601 hw_p->niu_type = NIU_TYPE_NONE; 6602 hw_p->platform_type = P_NEPTUNE_NONE; 6603 } 6604 6605 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6606 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6607 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6608 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6609 6610 nxge_hw_list = hw_p; 6611 6612 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0, 6613 "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) { 6614 for (i = 0; i < prop_len; i++) { 6615 if ((strcmp((caddr_t)prop_val[i], 6616 NXGE_ROCK_COMPATIBLE) == 0)) { 6617 hw_p->platform_type = P_NEPTUNE_ROCK; 6618 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6619 "ROCK hw_p->platform_type %d", 6620 hw_p->platform_type)); 6621 break; 6622 } 6623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6624 "nxge_init_common_dev: read compatible" 6625 " property[%d] val[%s]", 6626 i, (caddr_t)prop_val[i])); 6627 } 6628 } 6629 6630 ddi_prop_free(prop_val); 6631 6632 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6633 } 6634 6635 MUTEX_EXIT(&nxge_common_lock); 6636 6637 nxgep->platform_type = hw_p->platform_type; 6638 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxgep->platform_type %d", 6639 nxgep->platform_type)); 6640 if (nxgep->niu_type != N2_NIU) { 6641 nxgep->niu_type = hw_p->niu_type; 6642 } 6643 6644 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6645 "==> nxge_init_common_device (nxge_hw_list) $%p", 6646 nxge_hw_list)); 6647 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6648 6649 return (NXGE_OK); 6650 } 6651 6652 static void 6653 nxge_uninit_common_dev(p_nxge_t nxgep) 6654 { 6655 p_nxge_hw_list_t hw_p, h_hw_p; 6656 p_nxge_dma_pt_cfg_t p_dma_cfgp; 6657 p_nxge_hw_pt_cfg_t p_cfgp; 6658 dev_info_t *p_dip; 6659 6660 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6661 if (nxgep->nxge_hw_p == NULL) { 6662 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6663 "<== nxge_uninit_common_device (no common)")); 6664 return; 6665 } 6666 6667 MUTEX_ENTER(&nxge_common_lock); 6668 h_hw_p = nxge_hw_list; 6669 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6670 p_dip = hw_p->parent_devp; 6671 if (nxgep->nxge_hw_p == hw_p && 6672 p_dip == nxgep->p_dip && 6673 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6674 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6675 6676 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6677 "==> nxge_uninit_common_device:func # %d " 6678 "hw_p $%p parent dip $%p " 6679 "ndevs %d (found)", 6680 nxgep->function_num, 6681 hw_p, 6682 p_dip, 6683 hw_p->ndevs)); 6684 6685 /* 6686 * Release the RDC table, a shared resoruce 6687 * of the nxge hardware. The RDC table was 6688 * assigned to this instance of nxge in 6689 * nxge_use_cfg_dma_config(). 6690 */ 6691 if (!isLDOMguest(nxgep)) { 6692 p_dma_cfgp = 6693 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 6694 p_cfgp = 6695 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 6696 (void) nxge_fzc_rdc_tbl_unbind(nxgep, 6697 p_cfgp->def_mac_rxdma_grpid); 6698 6699 /* Cleanup any outstanding groups. */ 6700 nxge_grp_cleanup(nxgep); 6701 } 6702 6703 if (hw_p->ndevs) { 6704 hw_p->ndevs--; 6705 } 6706 hw_p->nxge_p[nxgep->function_num] = NULL; 6707 if (!hw_p->ndevs) { 6708 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6709 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6710 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6711 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6712 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6713 "==> nxge_uninit_common_device: " 6714 "func # %d " 6715 "hw_p $%p parent dip $%p " 6716 "ndevs %d (last)", 6717 nxgep->function_num, 6718 hw_p, 6719 p_dip, 6720 hw_p->ndevs)); 6721 6722 nxge_hio_uninit(nxgep); 6723 6724 if (hw_p == nxge_hw_list) { 6725 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6726 "==> nxge_uninit_common_device:" 6727 "remove head func # %d " 6728 "hw_p $%p parent dip $%p " 6729 "ndevs %d (head)", 6730 nxgep->function_num, 6731 hw_p, 6732 p_dip, 6733 hw_p->ndevs)); 6734 nxge_hw_list = hw_p->next; 6735 } else { 6736 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6737 "==> nxge_uninit_common_device:" 6738 "remove middle func # %d " 6739 "hw_p $%p parent dip $%p " 6740 "ndevs %d (middle)", 6741 nxgep->function_num, 6742 hw_p, 6743 p_dip, 6744 hw_p->ndevs)); 6745 h_hw_p->next = hw_p->next; 6746 } 6747 6748 nxgep->nxge_hw_p = NULL; 6749 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6750 } 6751 break; 6752 } else { 6753 h_hw_p = hw_p; 6754 } 6755 } 6756 6757 MUTEX_EXIT(&nxge_common_lock); 6758 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6759 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6760 nxge_hw_list)); 6761 6762 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6763 } 6764 6765 /* 6766 * Determines the number of ports from the niu_type or the platform type. 6767 * Returns the number of ports, or returns zero on failure. 6768 */ 6769 6770 int 6771 nxge_get_nports(p_nxge_t nxgep) 6772 { 6773 int nports = 0; 6774 6775 switch (nxgep->niu_type) { 6776 case N2_NIU: 6777 case NEPTUNE_2_10GF: 6778 nports = 2; 6779 break; 6780 case NEPTUNE_4_1GC: 6781 case NEPTUNE_2_10GF_2_1GC: 6782 case NEPTUNE_1_10GF_3_1GC: 6783 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6784 case NEPTUNE_2_10GF_2_1GRF: 6785 nports = 4; 6786 break; 6787 default: 6788 switch (nxgep->platform_type) { 6789 case P_NEPTUNE_NIU: 6790 case P_NEPTUNE_ATLAS_2PORT: 6791 nports = 2; 6792 break; 6793 case P_NEPTUNE_ATLAS_4PORT: 6794 case P_NEPTUNE_MARAMBA_P0: 6795 case P_NEPTUNE_MARAMBA_P1: 6796 case P_NEPTUNE_ROCK: 6797 case P_NEPTUNE_ALONSO: 6798 nports = 4; 6799 break; 6800 default: 6801 break; 6802 } 6803 break; 6804 } 6805 6806 return (nports); 6807 } 6808 6809 /* 6810 * The following two functions are to support 6811 * PSARC/2007/453 MSI-X interrupt limit override. 6812 */ 6813 static int 6814 nxge_create_msi_property(p_nxge_t nxgep) 6815 { 6816 int nmsi; 6817 extern int ncpus; 6818 6819 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6820 6821 switch (nxgep->mac.portmode) { 6822 case PORT_10G_COPPER: 6823 case PORT_10G_FIBER: 6824 case PORT_10G_TN1010: 6825 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6826 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6827 /* 6828 * The maximum MSI-X requested will be 8. 6829 * If the # of CPUs is less than 8, we will request 6830 * # MSI-X based on the # of CPUs (default). 6831 */ 6832 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6833 "==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d", 6834 nxge_msix_10g_intrs)); 6835 if ((nxge_msix_10g_intrs == 0) || 6836 (nxge_msix_10g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6837 nmsi = NXGE_MSIX_REQUEST_10G; 6838 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6839 "==>nxge_create_msi_property (10G): reset to 8")); 6840 } else { 6841 nmsi = nxge_msix_10g_intrs; 6842 } 6843 6844 /* 6845 * If # of interrupts requested is 8 (default), 6846 * the checking of the number of cpus will be 6847 * be maintained. 6848 */ 6849 if ((nmsi == NXGE_MSIX_REQUEST_10G) && 6850 (ncpus < nmsi)) { 6851 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6852 "==>nxge_create_msi_property (10G): reset to 8")); 6853 nmsi = ncpus; 6854 } 6855 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6856 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6857 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6858 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6859 break; 6860 6861 default: 6862 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6863 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6864 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6865 "==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d", 6866 nxge_msix_1g_intrs)); 6867 if ((nxge_msix_1g_intrs == 0) || 6868 (nxge_msix_1g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6869 nmsi = NXGE_MSIX_REQUEST_1G; 6870 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6871 "==>nxge_create_msi_property (1G): reset to 2")); 6872 } else { 6873 nmsi = nxge_msix_1g_intrs; 6874 } 6875 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6876 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6877 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6878 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6879 break; 6880 } 6881 6882 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6883 return (nmsi); 6884 } 6885 6886 /* ARGSUSED */ 6887 static int 6888 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6889 void *pr_val) 6890 { 6891 int err = 0; 6892 link_flowctrl_t fl; 6893 6894 switch (pr_num) { 6895 case MAC_PROP_AUTONEG: 6896 *(uint8_t *)pr_val = 1; 6897 break; 6898 case MAC_PROP_FLOWCTRL: 6899 if (pr_valsize < sizeof (link_flowctrl_t)) 6900 return (EINVAL); 6901 fl = LINK_FLOWCTRL_RX; 6902 bcopy(&fl, pr_val, sizeof (fl)); 6903 break; 6904 case MAC_PROP_ADV_1000FDX_CAP: 6905 case MAC_PROP_EN_1000FDX_CAP: 6906 *(uint8_t *)pr_val = 1; 6907 break; 6908 case MAC_PROP_ADV_100FDX_CAP: 6909 case MAC_PROP_EN_100FDX_CAP: 6910 *(uint8_t *)pr_val = 1; 6911 break; 6912 default: 6913 err = ENOTSUP; 6914 break; 6915 } 6916 return (err); 6917 } 6918 6919 6920 /* 6921 * The following is a software around for the Neptune hardware's 6922 * interrupt bugs; The Neptune hardware may generate spurious interrupts when 6923 * an interrupr handler is removed. 6924 */ 6925 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98 6926 #define NXGE_PIM_RESET (1ULL << 29) 6927 #define NXGE_GLU_RESET (1ULL << 30) 6928 #define NXGE_NIU_RESET (1ULL << 31) 6929 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \ 6930 NXGE_GLU_RESET | \ 6931 NXGE_NIU_RESET) 6932 6933 #define NXGE_WAIT_QUITE_TIME 200000 6934 #define NXGE_WAIT_QUITE_RETRY 40 6935 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */ 6936 6937 static void 6938 nxge_niu_peu_reset(p_nxge_t nxgep) 6939 { 6940 uint32_t rvalue; 6941 p_nxge_hw_list_t hw_p; 6942 p_nxge_t fnxgep; 6943 int i, j; 6944 6945 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset")); 6946 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 6947 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6948 "==> nxge_niu_peu_reset: NULL hardware pointer")); 6949 return; 6950 } 6951 6952 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6953 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d", 6954 hw_p->flags, nxgep->nxge_link_poll_timerid, 6955 nxgep->nxge_timerid)); 6956 6957 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 6958 /* 6959 * Make sure other instances from the same hardware 6960 * stop sending PIO and in quiescent state. 6961 */ 6962 for (i = 0; i < NXGE_MAX_PORTS; i++) { 6963 fnxgep = hw_p->nxge_p[i]; 6964 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6965 "==> nxge_niu_peu_reset: checking entry %d " 6966 "nxgep $%p", i, fnxgep)); 6967 #ifdef NXGE_DEBUG 6968 if (fnxgep) { 6969 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6970 "==> nxge_niu_peu_reset: entry %d (function %d) " 6971 "link timer id %d hw timer id %d", 6972 i, fnxgep->function_num, 6973 fnxgep->nxge_link_poll_timerid, 6974 fnxgep->nxge_timerid)); 6975 } 6976 #endif 6977 if (fnxgep && fnxgep != nxgep && 6978 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) { 6979 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6980 "==> nxge_niu_peu_reset: checking $%p " 6981 "(function %d) timer ids", 6982 fnxgep, fnxgep->function_num)); 6983 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) { 6984 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6985 "==> nxge_niu_peu_reset: waiting")); 6986 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6987 if (!fnxgep->nxge_timerid && 6988 !fnxgep->nxge_link_poll_timerid) { 6989 break; 6990 } 6991 } 6992 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6993 if (fnxgep->nxge_timerid || 6994 fnxgep->nxge_link_poll_timerid) { 6995 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6996 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6997 "<== nxge_niu_peu_reset: cannot reset " 6998 "hardware (devices are still in use)")); 6999 return; 7000 } 7001 } 7002 } 7003 7004 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) { 7005 hw_p->flags |= COMMON_RESET_NIU_PCI; 7006 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh, 7007 NXGE_PCI_PORT_LOGIC_OFFSET); 7008 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7009 "nxge_niu_peu_reset: read offset 0x%x (%d) " 7010 "(data 0x%x)", 7011 NXGE_PCI_PORT_LOGIC_OFFSET, 7012 NXGE_PCI_PORT_LOGIC_OFFSET, 7013 rvalue)); 7014 7015 rvalue |= NXGE_PCI_RESET_ALL; 7016 pci_config_put32(nxgep->dev_regs->nxge_pciregh, 7017 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue); 7018 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7019 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x", 7020 rvalue)); 7021 7022 NXGE_DELAY(NXGE_PCI_RESET_WAIT); 7023 } 7024 7025 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 7026 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset")); 7027 } 7028 7029 static void 7030 nxge_set_pci_replay_timeout(p_nxge_t nxgep) 7031 { 7032 p_dev_regs_t dev_regs; 7033 uint32_t value; 7034 7035 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout")); 7036 7037 if (!nxge_set_replay_timer) { 7038 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7039 "==> nxge_set_pci_replay_timeout: will not change " 7040 "the timeout")); 7041 return; 7042 } 7043 7044 dev_regs = nxgep->dev_regs; 7045 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7046 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p", 7047 dev_regs, dev_regs->nxge_pciregh)); 7048 7049 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) { 7050 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7051 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or " 7052 "no PCI handle", 7053 dev_regs)); 7054 return; 7055 } 7056 value = (pci_config_get32(dev_regs->nxge_pciregh, 7057 PCI_REPLAY_TIMEOUT_CFG_OFFSET) | 7058 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT)); 7059 7060 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7061 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x " 7062 "(timeout value to set 0x%x at offset 0x%x) value 0x%x", 7063 pci_config_get32(dev_regs->nxge_pciregh, 7064 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout, 7065 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value)); 7066 7067 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET, 7068 value); 7069 7070 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7071 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x", 7072 pci_config_get32(dev_regs->nxge_pciregh, 7073 PCI_REPLAY_TIMEOUT_CFG_OFFSET))); 7074 7075 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout")); 7076 } 7077 7078 /* 7079 * quiesce(9E) entry point. 7080 * 7081 * This function is called when the system is single-threaded at high 7082 * PIL with preemption disabled. Therefore, this function must not be 7083 * blocked. 7084 * 7085 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 7086 * DDI_FAILURE indicates an error condition and should almost never happen. 7087 */ 7088 static int 7089 nxge_quiesce(dev_info_t *dip) 7090 { 7091 int instance = ddi_get_instance(dip); 7092 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 7093 7094 if (nxgep == NULL) 7095 return (DDI_FAILURE); 7096 7097 /* Turn off debugging */ 7098 nxge_debug_level = NO_DEBUG; 7099 nxgep->nxge_debug_level = NO_DEBUG; 7100 npi_debug_level = NO_DEBUG; 7101 7102 /* 7103 * Stop link monitor only when linkchkmod is interrupt based 7104 */ 7105 if (nxgep->mac.linkchkmode == LINKCHK_INTR) { 7106 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 7107 } 7108 7109 (void) nxge_intr_hw_disable(nxgep); 7110 7111 /* 7112 * Reset the receive MAC side. 7113 */ 7114 (void) nxge_rx_mac_disable(nxgep); 7115 7116 /* Disable and soft reset the IPP */ 7117 if (!isLDOMguest(nxgep)) 7118 (void) nxge_ipp_disable(nxgep); 7119 7120 /* 7121 * Reset the transmit/receive DMA side. 7122 */ 7123 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 7124 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 7125 7126 /* 7127 * Reset the transmit MAC side. 7128 */ 7129 (void) nxge_tx_mac_disable(nxgep); 7130 7131 return (DDI_SUCCESS); 7132 } 7133