1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 29 */ 30 #include <sys/nxge/nxge_impl.h> 31 #include <sys/nxge/nxge_hio.h> 32 #include <sys/nxge/nxge_rxdma.h> 33 #include <sys/pcie.h> 34 35 uint32_t nxge_use_partition = 0; /* debug partition flag */ 36 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 37 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 38 /* 39 * PSARC/2007/453 MSI-X interrupt limit override 40 */ 41 uint32_t nxge_msi_enable = 2; 42 43 /* 44 * Software workaround for a Neptune (PCI-E) 45 * hardware interrupt bug which the hardware 46 * may generate spurious interrupts after the 47 * device interrupt handler was removed. If this flag 48 * is enabled, the driver will reset the 49 * hardware when devices are being detached. 50 */ 51 uint32_t nxge_peu_reset_enable = 0; 52 53 /* 54 * Software workaround for the hardware 55 * checksum bugs that affect packet transmission 56 * and receive: 57 * 58 * Usage of nxge_cksum_offload: 59 * 60 * (1) nxge_cksum_offload = 0 (default): 61 * - transmits packets: 62 * TCP: uses the hardware checksum feature. 63 * UDP: driver will compute the software checksum 64 * based on the partial checksum computed 65 * by the IP layer. 66 * - receives packets 67 * TCP: marks packets checksum flags based on hardware result. 68 * UDP: will not mark checksum flags. 69 * 70 * (2) nxge_cksum_offload = 1: 71 * - transmit packets: 72 * TCP/UDP: uses the hardware checksum feature. 73 * - receives packets 74 * TCP/UDP: marks packet checksum flags based on hardware result. 75 * 76 * (3) nxge_cksum_offload = 2: 77 * - The driver will not register its checksum capability. 78 * Checksum for both TCP and UDP will be computed 79 * by the stack. 80 * - The software LSO is not allowed in this case. 81 * 82 * (4) nxge_cksum_offload > 2: 83 * - Will be treated as it is set to 2 84 * (stack will compute the checksum). 85 * 86 * (5) If the hardware bug is fixed, this workaround 87 * needs to be updated accordingly to reflect 88 * the new hardware revision. 89 */ 90 uint32_t nxge_cksum_offload = 0; 91 92 /* 93 * Globals: tunable parameters (/etc/system or adb) 94 * 95 */ 96 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 97 uint32_t nxge_rbr_spare_size = 0; 98 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 99 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 100 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 101 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 102 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 103 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 104 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 105 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 106 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 107 108 /* MAX LSO size */ 109 #define NXGE_LSO_MAXLEN 65535 110 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 111 112 113 /* 114 * Add tunable to reduce the amount of time spent in the 115 * ISR doing Rx Processing. 116 */ 117 uint32_t nxge_max_rx_pkts = 1024; 118 119 /* 120 * Tunables to manage the receive buffer blocks. 121 * 122 * nxge_rx_threshold_hi: copy all buffers. 123 * nxge_rx_bcopy_size_type: receive buffer block size type. 124 * nxge_rx_threshold_lo: copy only up to tunable block size type. 125 */ 126 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 127 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 128 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 129 130 /* Use kmem_alloc() to allocate data buffers. */ 131 #if defined(__sparc) 132 uint32_t nxge_use_kmem_alloc = 1; 133 #elif defined(__i386) 134 uint32_t nxge_use_kmem_alloc = 0; 135 #else 136 uint32_t nxge_use_kmem_alloc = 1; 137 #endif 138 139 rtrace_t npi_rtracebuf; 140 141 /* 142 * The hardware sometimes fails to allow enough time for the link partner 143 * to send an acknowledgement for packets that the hardware sent to it. The 144 * hardware resends the packets earlier than it should be in those instances. 145 * This behavior caused some switches to acknowledge the wrong packets 146 * and it triggered the fatal error. 147 * This software workaround is to set the replay timer to a value 148 * suggested by the hardware team. 149 * 150 * PCI config space replay timer register: 151 * The following replay timeout value is 0xc 152 * for bit 14:18. 153 */ 154 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8 155 #define PCI_REPLAY_TIMEOUT_SHIFT 14 156 157 uint32_t nxge_set_replay_timer = 1; 158 uint32_t nxge_replay_timeout = 0xc; 159 160 /* 161 * The transmit serialization sometimes causes 162 * longer sleep before calling the driver transmit 163 * function as it sleeps longer than it should. 164 * The performace group suggests that a time wait tunable 165 * can be used to set the maximum wait time when needed 166 * and the default is set to 1 tick. 167 */ 168 uint32_t nxge_tx_serial_maxsleep = 1; 169 170 #if defined(sun4v) 171 /* 172 * Hypervisor N2/NIU services information. 173 */ 174 static hsvc_info_t niu_hsvc = { 175 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 176 NIU_MINOR_VER, "nxge" 177 }; 178 179 static int nxge_hsvc_register(p_nxge_t); 180 #endif 181 182 /* 183 * Function Prototypes 184 */ 185 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 186 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 187 static void nxge_unattach(p_nxge_t); 188 static int nxge_quiesce(dev_info_t *); 189 190 #if NXGE_PROPERTY 191 static void nxge_remove_hard_properties(p_nxge_t); 192 #endif 193 194 /* 195 * These two functions are required by nxge_hio.c 196 */ 197 extern int nxge_m_mmac_remove(void *arg, int slot); 198 extern void nxge_grp_cleanup(p_nxge_t nxge); 199 200 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 201 202 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 203 static void nxge_destroy_mutexes(p_nxge_t); 204 205 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 206 static void nxge_unmap_regs(p_nxge_t nxgep); 207 #ifdef NXGE_DEBUG 208 static void nxge_test_map_regs(p_nxge_t nxgep); 209 #endif 210 211 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 212 static void nxge_remove_intrs(p_nxge_t nxgep); 213 214 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 215 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 216 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 217 static void nxge_intrs_enable(p_nxge_t nxgep); 218 static void nxge_intrs_disable(p_nxge_t nxgep); 219 220 static void nxge_suspend(p_nxge_t); 221 static nxge_status_t nxge_resume(p_nxge_t); 222 223 static nxge_status_t nxge_setup_dev(p_nxge_t); 224 static void nxge_destroy_dev(p_nxge_t); 225 226 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 227 static void nxge_free_mem_pool(p_nxge_t); 228 229 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 230 static void nxge_free_rx_mem_pool(p_nxge_t); 231 232 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 233 static void nxge_free_tx_mem_pool(p_nxge_t); 234 235 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 236 struct ddi_dma_attr *, 237 size_t, ddi_device_acc_attr_t *, uint_t, 238 p_nxge_dma_common_t); 239 240 static void nxge_dma_mem_free(p_nxge_dma_common_t); 241 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 242 243 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 244 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 245 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 246 247 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 248 p_nxge_dma_common_t *, size_t); 249 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 250 251 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 252 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 253 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 254 255 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 256 p_nxge_dma_common_t *, 257 size_t); 258 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 259 260 static int nxge_init_common_dev(p_nxge_t); 261 static void nxge_uninit_common_dev(p_nxge_t); 262 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 263 char *, caddr_t); 264 #if defined(sun4v) 265 extern nxge_status_t nxge_hio_rdc_enable(p_nxge_t nxgep); 266 extern nxge_status_t nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm); 267 #endif 268 269 /* 270 * The next declarations are for the GLDv3 interface. 271 */ 272 static int nxge_m_start(void *); 273 static void nxge_m_stop(void *); 274 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 275 static int nxge_m_promisc(void *, boolean_t); 276 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 277 nxge_status_t nxge_mac_register(p_nxge_t); 278 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 279 int slot, int rdctbl, boolean_t usetbl); 280 void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, 281 boolean_t factory); 282 283 static void nxge_m_getfactaddr(void *, uint_t, uint8_t *); 284 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 285 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 286 uint_t, const void *); 287 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 288 uint_t, uint_t, void *, uint_t *); 289 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 290 const void *); 291 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 292 void *, uint_t *); 293 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 294 static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int, 295 mac_ring_info_t *, mac_ring_handle_t); 296 static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t, 297 mac_ring_type_t); 298 static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t, 299 mac_ring_type_t); 300 301 static void nxge_niu_peu_reset(p_nxge_t nxgep); 302 static void nxge_set_pci_replay_timeout(nxge_t *); 303 304 mac_priv_prop_t nxge_priv_props[] = { 305 {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 306 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 307 {"_function_number", MAC_PROP_PERM_READ}, 308 {"_fw_version", MAC_PROP_PERM_READ}, 309 {"_port_mode", MAC_PROP_PERM_READ}, 310 {"_hot_swap_phy", MAC_PROP_PERM_READ}, 311 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 312 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 313 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 314 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 315 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 316 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 317 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 318 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 319 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 320 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 321 {"_soft_lso_enable", MAC_PROP_PERM_RW} 322 }; 323 324 #define NXGE_MAX_PRIV_PROPS \ 325 (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 326 327 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 328 #define MAX_DUMP_SZ 256 329 330 #define NXGE_M_CALLBACK_FLAGS \ 331 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 332 333 mac_callbacks_t nxge_m_callbacks = { 334 NXGE_M_CALLBACK_FLAGS, 335 nxge_m_stat, 336 nxge_m_start, 337 nxge_m_stop, 338 nxge_m_promisc, 339 nxge_m_multicst, 340 NULL, 341 NULL, 342 nxge_m_ioctl, 343 nxge_m_getcapab, 344 NULL, 345 NULL, 346 nxge_m_setprop, 347 nxge_m_getprop 348 }; 349 350 void 351 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 352 353 /* PSARC/2007/453 MSI-X interrupt limit override. */ 354 #define NXGE_MSIX_REQUEST_10G 8 355 #define NXGE_MSIX_REQUEST_1G 2 356 static int nxge_create_msi_property(p_nxge_t); 357 /* 358 * For applications that care about the 359 * latency, it was requested by PAE and the 360 * customers that the driver has tunables that 361 * allow the user to tune it to a higher number 362 * interrupts to spread the interrupts among 363 * multiple channels. The DDI framework limits 364 * the maximum number of MSI-X resources to allocate 365 * to 8 (ddi_msix_alloc_limit). If more than 8 366 * is set, ddi_msix_alloc_limit must be set accordingly. 367 * The default number of MSI interrupts are set to 368 * 8 for 10G and 2 for 1G link. 369 */ 370 #define NXGE_MSIX_MAX_ALLOWED 32 371 uint32_t nxge_msix_10g_intrs = NXGE_MSIX_REQUEST_10G; 372 uint32_t nxge_msix_1g_intrs = NXGE_MSIX_REQUEST_1G; 373 374 /* 375 * These global variables control the message 376 * output. 377 */ 378 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 379 uint64_t nxge_debug_level; 380 381 /* 382 * This list contains the instance structures for the Neptune 383 * devices present in the system. The lock exists to guarantee 384 * mutually exclusive access to the list. 385 */ 386 void *nxge_list = NULL; 387 void *nxge_hw_list = NULL; 388 nxge_os_mutex_t nxge_common_lock; 389 nxge_os_mutex_t nxgedebuglock; 390 391 extern uint64_t npi_debug_level; 392 393 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 394 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 395 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 396 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 397 extern void nxge_fm_init(p_nxge_t, 398 ddi_device_acc_attr_t *, 399 ddi_device_acc_attr_t *, 400 ddi_dma_attr_t *); 401 extern void nxge_fm_fini(p_nxge_t); 402 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 403 404 /* 405 * Count used to maintain the number of buffers being used 406 * by Neptune instances and loaned up to the upper layers. 407 */ 408 uint32_t nxge_mblks_pending = 0; 409 410 /* 411 * Device register access attributes for PIO. 412 */ 413 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 414 DDI_DEVICE_ATTR_V0, 415 DDI_STRUCTURE_LE_ACC, 416 DDI_STRICTORDER_ACC, 417 }; 418 419 /* 420 * Device descriptor access attributes for DMA. 421 */ 422 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 423 DDI_DEVICE_ATTR_V0, 424 DDI_STRUCTURE_LE_ACC, 425 DDI_STRICTORDER_ACC 426 }; 427 428 /* 429 * Device buffer access attributes for DMA. 430 */ 431 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 432 DDI_DEVICE_ATTR_V0, 433 DDI_STRUCTURE_BE_ACC, 434 DDI_STRICTORDER_ACC 435 }; 436 437 ddi_dma_attr_t nxge_desc_dma_attr = { 438 DMA_ATTR_V0, /* version number. */ 439 0, /* low address */ 440 0xffffffffffffffff, /* high address */ 441 0xffffffffffffffff, /* address counter max */ 442 #ifndef NIU_PA_WORKAROUND 443 0x100000, /* alignment */ 444 #else 445 0x2000, 446 #endif 447 0xfc00fc, /* dlim_burstsizes */ 448 0x1, /* minimum transfer size */ 449 0xffffffffffffffff, /* maximum transfer size */ 450 0xffffffffffffffff, /* maximum segment size */ 451 1, /* scatter/gather list length */ 452 (unsigned int) 1, /* granularity */ 453 0 /* attribute flags */ 454 }; 455 456 ddi_dma_attr_t nxge_tx_dma_attr = { 457 DMA_ATTR_V0, /* version number. */ 458 0, /* low address */ 459 0xffffffffffffffff, /* high address */ 460 0xffffffffffffffff, /* address counter max */ 461 #if defined(_BIG_ENDIAN) 462 0x2000, /* alignment */ 463 #else 464 0x1000, /* alignment */ 465 #endif 466 0xfc00fc, /* dlim_burstsizes */ 467 0x1, /* minimum transfer size */ 468 0xffffffffffffffff, /* maximum transfer size */ 469 0xffffffffffffffff, /* maximum segment size */ 470 5, /* scatter/gather list length */ 471 (unsigned int) 1, /* granularity */ 472 0 /* attribute flags */ 473 }; 474 475 ddi_dma_attr_t nxge_rx_dma_attr = { 476 DMA_ATTR_V0, /* version number. */ 477 0, /* low address */ 478 0xffffffffffffffff, /* high address */ 479 0xffffffffffffffff, /* address counter max */ 480 0x2000, /* alignment */ 481 0xfc00fc, /* dlim_burstsizes */ 482 0x1, /* minimum transfer size */ 483 0xffffffffffffffff, /* maximum transfer size */ 484 0xffffffffffffffff, /* maximum segment size */ 485 1, /* scatter/gather list length */ 486 (unsigned int) 1, /* granularity */ 487 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 488 }; 489 490 ddi_dma_lim_t nxge_dma_limits = { 491 (uint_t)0, /* dlim_addr_lo */ 492 (uint_t)0xffffffff, /* dlim_addr_hi */ 493 (uint_t)0xffffffff, /* dlim_cntr_max */ 494 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 495 0x1, /* dlim_minxfer */ 496 1024 /* dlim_speed */ 497 }; 498 499 dma_method_t nxge_force_dma = DVMA; 500 501 /* 502 * dma chunk sizes. 503 * 504 * Try to allocate the largest possible size 505 * so that fewer number of dma chunks would be managed 506 */ 507 #ifdef NIU_PA_WORKAROUND 508 size_t alloc_sizes [] = {0x2000}; 509 #else 510 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 511 0x10000, 0x20000, 0x40000, 0x80000, 512 0x100000, 0x200000, 0x400000, 0x800000, 513 0x1000000, 0x2000000, 0x4000000}; 514 #endif 515 516 /* 517 * Translate "dev_t" to a pointer to the associated "dev_info_t". 518 */ 519 520 extern void nxge_get_environs(nxge_t *); 521 522 static int 523 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 524 { 525 p_nxge_t nxgep = NULL; 526 int instance; 527 int status = DDI_SUCCESS; 528 uint8_t portn; 529 nxge_mmac_t *mmac_info; 530 531 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 532 533 /* 534 * Get the device instance since we'll need to setup 535 * or retrieve a soft state for this instance. 536 */ 537 instance = ddi_get_instance(dip); 538 539 switch (cmd) { 540 case DDI_ATTACH: 541 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 542 break; 543 544 case DDI_RESUME: 545 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 546 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 547 if (nxgep == NULL) { 548 status = DDI_FAILURE; 549 break; 550 } 551 if (nxgep->dip != dip) { 552 status = DDI_FAILURE; 553 break; 554 } 555 if (nxgep->suspended == DDI_PM_SUSPEND) { 556 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 557 } else { 558 status = nxge_resume(nxgep); 559 } 560 goto nxge_attach_exit; 561 562 case DDI_PM_RESUME: 563 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 564 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 565 if (nxgep == NULL) { 566 status = DDI_FAILURE; 567 break; 568 } 569 if (nxgep->dip != dip) { 570 status = DDI_FAILURE; 571 break; 572 } 573 status = nxge_resume(nxgep); 574 goto nxge_attach_exit; 575 576 default: 577 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 578 status = DDI_FAILURE; 579 goto nxge_attach_exit; 580 } 581 582 583 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 584 status = DDI_FAILURE; 585 goto nxge_attach_exit; 586 } 587 588 nxgep = ddi_get_soft_state(nxge_list, instance); 589 if (nxgep == NULL) { 590 status = NXGE_ERROR; 591 goto nxge_attach_fail2; 592 } 593 594 nxgep->nxge_magic = NXGE_MAGIC; 595 596 nxgep->drv_state = 0; 597 nxgep->dip = dip; 598 nxgep->instance = instance; 599 nxgep->p_dip = ddi_get_parent(dip); 600 nxgep->nxge_debug_level = nxge_debug_level; 601 npi_debug_level = nxge_debug_level; 602 603 /* Are we a guest running in a Hybrid I/O environment? */ 604 nxge_get_environs(nxgep); 605 606 status = nxge_map_regs(nxgep); 607 608 if (status != NXGE_OK) { 609 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 610 goto nxge_attach_fail3; 611 } 612 613 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, 614 &nxge_dev_desc_dma_acc_attr, 615 &nxge_rx_dma_attr); 616 617 /* Create & initialize the per-Neptune data structure */ 618 /* (even if we're a guest). */ 619 status = nxge_init_common_dev(nxgep); 620 if (status != NXGE_OK) { 621 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 622 "nxge_init_common_dev failed")); 623 goto nxge_attach_fail4; 624 } 625 626 /* 627 * Software workaround: set the replay timer. 628 */ 629 if (nxgep->niu_type != N2_NIU) { 630 nxge_set_pci_replay_timeout(nxgep); 631 } 632 633 #if defined(sun4v) 634 /* This is required by nxge_hio_init(), which follows. */ 635 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 636 goto nxge_attach_fail4; 637 #endif 638 639 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 640 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 641 "nxge_hio_init failed")); 642 goto nxge_attach_fail4; 643 } 644 645 if (nxgep->niu_type == NEPTUNE_2_10GF) { 646 if (nxgep->function_num > 1) { 647 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 648 " function %d. Only functions 0 and 1 are " 649 "supported for this card.", nxgep->function_num)); 650 status = NXGE_ERROR; 651 goto nxge_attach_fail4; 652 } 653 } 654 655 if (isLDOMguest(nxgep)) { 656 /* 657 * Use the function number here. 658 */ 659 nxgep->mac.portnum = nxgep->function_num; 660 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 661 662 /* XXX We'll set the MAC address counts to 1 for now. */ 663 mmac_info = &nxgep->nxge_mmac_info; 664 mmac_info->num_mmac = 1; 665 mmac_info->naddrfree = 1; 666 } else { 667 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 668 nxgep->mac.portnum = portn; 669 if ((portn == 0) || (portn == 1)) 670 nxgep->mac.porttype = PORT_TYPE_XMAC; 671 else 672 nxgep->mac.porttype = PORT_TYPE_BMAC; 673 /* 674 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 675 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 676 * The two types of MACs have different characterizations. 677 */ 678 mmac_info = &nxgep->nxge_mmac_info; 679 if (nxgep->function_num < 2) { 680 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 681 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 682 } else { 683 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 684 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 685 } 686 } 687 /* 688 * Setup the Ndd parameters for the this instance. 689 */ 690 nxge_init_param(nxgep); 691 692 /* 693 * Setup Register Tracing Buffer. 694 */ 695 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 696 697 /* init stats ptr */ 698 nxge_init_statsp(nxgep); 699 700 /* 701 * Copy the vpd info from eeprom to a local data 702 * structure, and then check its validity. 703 */ 704 if (!isLDOMguest(nxgep)) { 705 int *regp; 706 uint_t reglen; 707 int rv; 708 709 nxge_vpd_info_get(nxgep); 710 711 /* Find the NIU config handle. */ 712 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 713 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 714 "reg", ®p, ®len); 715 716 if (rv != DDI_PROP_SUCCESS) { 717 goto nxge_attach_fail5; 718 } 719 /* 720 * The address_hi, that is the first int, in the reg 721 * property consists of config handle, but need to remove 722 * the bits 28-31 which are OBP specific info. 723 */ 724 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 725 ddi_prop_free(regp); 726 } 727 728 /* 729 * Set the defaults for the MTU size. 730 */ 731 nxge_hw_id_init(nxgep); 732 733 if (isLDOMguest(nxgep)) { 734 uchar_t *prop_val; 735 uint_t prop_len; 736 uint32_t max_frame_size; 737 738 extern void nxge_get_logical_props(p_nxge_t); 739 740 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 741 nxgep->mac.portmode = PORT_LOGICAL; 742 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 743 "phy-type", "virtual transceiver"); 744 745 nxgep->nports = 1; 746 nxgep->board_ver = 0; /* XXX What? */ 747 748 /* 749 * local-mac-address property gives us info on which 750 * specific MAC address the Hybrid resource is associated 751 * with. 752 */ 753 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 754 "local-mac-address", &prop_val, 755 &prop_len) != DDI_PROP_SUCCESS) { 756 goto nxge_attach_fail5; 757 } 758 if (prop_len != ETHERADDRL) { 759 ddi_prop_free(prop_val); 760 goto nxge_attach_fail5; 761 } 762 ether_copy(prop_val, nxgep->hio_mac_addr); 763 ddi_prop_free(prop_val); 764 nxge_get_logical_props(nxgep); 765 766 /* 767 * Enable Jumbo property based on the "max-frame-size" 768 * property value. 769 */ 770 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY, 771 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 772 "max-frame-size", NXGE_MTU_DEFAULT_MAX); 773 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) && 774 (max_frame_size <= TX_JUMBO_MTU)) { 775 nxgep->mac.is_jumbo = B_TRUE; 776 nxgep->mac.maxframesize = (uint16_t)max_frame_size; 777 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 778 NXGE_EHEADER_VLAN_CRC; 779 } 780 } else { 781 status = nxge_xcvr_find(nxgep); 782 783 if (status != NXGE_OK) { 784 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 785 " Couldn't determine card type" 786 " .... exit ")); 787 goto nxge_attach_fail5; 788 } 789 790 status = nxge_get_config_properties(nxgep); 791 792 if (status != NXGE_OK) { 793 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 794 "get_hw create failed")); 795 goto nxge_attach_fail; 796 } 797 } 798 799 /* 800 * Setup the Kstats for the driver. 801 */ 802 nxge_setup_kstats(nxgep); 803 804 if (!isLDOMguest(nxgep)) 805 nxge_setup_param(nxgep); 806 807 status = nxge_setup_system_dma_pages(nxgep); 808 if (status != NXGE_OK) { 809 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 810 goto nxge_attach_fail; 811 } 812 813 814 if (!isLDOMguest(nxgep)) 815 nxge_hw_init_niu_common(nxgep); 816 817 status = nxge_setup_mutexes(nxgep); 818 if (status != NXGE_OK) { 819 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 820 goto nxge_attach_fail; 821 } 822 823 #if defined(sun4v) 824 if (isLDOMguest(nxgep)) { 825 /* Find our VR & channel sets. */ 826 status = nxge_hio_vr_add(nxgep); 827 if (status != DDI_SUCCESS) { 828 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 829 "nxge_hio_vr_add failed")); 830 (void) hsvc_unregister(&nxgep->niu_hsvc); 831 nxgep->niu_hsvc_available = B_FALSE; 832 goto nxge_attach_fail; 833 } 834 goto nxge_attach_exit; 835 } 836 #endif 837 838 status = nxge_setup_dev(nxgep); 839 if (status != DDI_SUCCESS) { 840 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 841 goto nxge_attach_fail; 842 } 843 844 status = nxge_add_intrs(nxgep); 845 if (status != DDI_SUCCESS) { 846 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 847 goto nxge_attach_fail; 848 } 849 850 /* If a guest, register with vio_net instead. */ 851 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 852 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 853 "unable to register to mac layer (%d)", status)); 854 goto nxge_attach_fail; 855 } 856 857 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 858 859 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 860 "registered to mac (instance %d)", instance)); 861 862 /* nxge_link_monitor calls xcvr.check_link recursively */ 863 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 864 865 goto nxge_attach_exit; 866 867 nxge_attach_fail: 868 nxge_unattach(nxgep); 869 goto nxge_attach_fail1; 870 871 nxge_attach_fail5: 872 /* 873 * Tear down the ndd parameters setup. 874 */ 875 nxge_destroy_param(nxgep); 876 877 /* 878 * Tear down the kstat setup. 879 */ 880 nxge_destroy_kstats(nxgep); 881 882 nxge_attach_fail4: 883 if (nxgep->nxge_hw_p) { 884 nxge_uninit_common_dev(nxgep); 885 nxgep->nxge_hw_p = NULL; 886 } 887 888 nxge_attach_fail3: 889 /* 890 * Unmap the register setup. 891 */ 892 nxge_unmap_regs(nxgep); 893 894 nxge_fm_fini(nxgep); 895 896 nxge_attach_fail2: 897 ddi_soft_state_free(nxge_list, nxgep->instance); 898 899 nxge_attach_fail1: 900 if (status != NXGE_OK) 901 status = (NXGE_ERROR | NXGE_DDI_FAILED); 902 nxgep = NULL; 903 904 nxge_attach_exit: 905 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 906 status)); 907 908 return (status); 909 } 910 911 static int 912 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 913 { 914 int status = DDI_SUCCESS; 915 int instance; 916 p_nxge_t nxgep = NULL; 917 918 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 919 instance = ddi_get_instance(dip); 920 nxgep = ddi_get_soft_state(nxge_list, instance); 921 if (nxgep == NULL) { 922 status = DDI_FAILURE; 923 goto nxge_detach_exit; 924 } 925 926 switch (cmd) { 927 case DDI_DETACH: 928 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 929 break; 930 931 case DDI_PM_SUSPEND: 932 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 933 nxgep->suspended = DDI_PM_SUSPEND; 934 nxge_suspend(nxgep); 935 break; 936 937 case DDI_SUSPEND: 938 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 939 if (nxgep->suspended != DDI_PM_SUSPEND) { 940 nxgep->suspended = DDI_SUSPEND; 941 nxge_suspend(nxgep); 942 } 943 break; 944 945 default: 946 status = DDI_FAILURE; 947 } 948 949 if (cmd != DDI_DETACH) 950 goto nxge_detach_exit; 951 952 /* 953 * Stop the xcvr polling. 954 */ 955 nxgep->suspended = cmd; 956 957 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 958 959 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 960 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 961 "<== nxge_detach status = 0x%08X", status)); 962 return (DDI_FAILURE); 963 } 964 965 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 966 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 967 968 nxge_unattach(nxgep); 969 nxgep = NULL; 970 971 nxge_detach_exit: 972 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 973 status)); 974 975 return (status); 976 } 977 978 static void 979 nxge_unattach(p_nxge_t nxgep) 980 { 981 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 982 983 if (nxgep == NULL || nxgep->dev_regs == NULL) { 984 return; 985 } 986 987 nxgep->nxge_magic = 0; 988 989 if (nxgep->nxge_timerid) { 990 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 991 nxgep->nxge_timerid = 0; 992 } 993 994 /* 995 * If this flag is set, it will affect the Neptune 996 * only. 997 */ 998 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { 999 nxge_niu_peu_reset(nxgep); 1000 } 1001 1002 #if defined(sun4v) 1003 if (isLDOMguest(nxgep)) { 1004 (void) nxge_hio_vr_release(nxgep); 1005 } 1006 #endif 1007 1008 if (nxgep->nxge_hw_p) { 1009 nxge_uninit_common_dev(nxgep); 1010 nxgep->nxge_hw_p = NULL; 1011 } 1012 1013 #if defined(sun4v) 1014 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 1015 (void) hsvc_unregister(&nxgep->niu_hsvc); 1016 nxgep->niu_hsvc_available = B_FALSE; 1017 } 1018 #endif 1019 /* 1020 * Stop any further interrupts. 1021 */ 1022 nxge_remove_intrs(nxgep); 1023 1024 /* 1025 * Stop the device and free resources. 1026 */ 1027 if (!isLDOMguest(nxgep)) { 1028 nxge_destroy_dev(nxgep); 1029 } 1030 1031 /* 1032 * Tear down the ndd parameters setup. 1033 */ 1034 nxge_destroy_param(nxgep); 1035 1036 /* 1037 * Tear down the kstat setup. 1038 */ 1039 nxge_destroy_kstats(nxgep); 1040 1041 /* 1042 * Destroy all mutexes. 1043 */ 1044 nxge_destroy_mutexes(nxgep); 1045 1046 /* 1047 * Remove the list of ndd parameters which 1048 * were setup during attach. 1049 */ 1050 if (nxgep->dip) { 1051 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 1052 " nxge_unattach: remove all properties")); 1053 1054 (void) ddi_prop_remove_all(nxgep->dip); 1055 } 1056 1057 #if NXGE_PROPERTY 1058 nxge_remove_hard_properties(nxgep); 1059 #endif 1060 1061 /* 1062 * Unmap the register setup. 1063 */ 1064 nxge_unmap_regs(nxgep); 1065 1066 nxge_fm_fini(nxgep); 1067 1068 ddi_soft_state_free(nxge_list, nxgep->instance); 1069 1070 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1071 } 1072 1073 #if defined(sun4v) 1074 int 1075 nxge_hsvc_register(nxge_t *nxgep) 1076 { 1077 nxge_status_t status; 1078 1079 if (nxgep->niu_type == N2_NIU) { 1080 nxgep->niu_hsvc_available = B_FALSE; 1081 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 1082 if ((status = hsvc_register(&nxgep->niu_hsvc, 1083 &nxgep->niu_min_ver)) != 0) { 1084 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1085 "nxge_attach: %s: cannot negotiate " 1086 "hypervisor services revision %d group: 0x%lx " 1087 "major: 0x%lx minor: 0x%lx errno: %d", 1088 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1089 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1090 niu_hsvc.hsvc_minor, status)); 1091 return (DDI_FAILURE); 1092 } 1093 nxgep->niu_hsvc_available = B_TRUE; 1094 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1095 "NIU Hypervisor service enabled")); 1096 } 1097 1098 return (DDI_SUCCESS); 1099 } 1100 #endif 1101 1102 static char n2_siu_name[] = "niu"; 1103 1104 static nxge_status_t 1105 nxge_map_regs(p_nxge_t nxgep) 1106 { 1107 int ddi_status = DDI_SUCCESS; 1108 p_dev_regs_t dev_regs; 1109 char buf[MAXPATHLEN + 1]; 1110 char *devname; 1111 #ifdef NXGE_DEBUG 1112 char *sysname; 1113 #endif 1114 off_t regsize; 1115 nxge_status_t status = NXGE_OK; 1116 #if !defined(_BIG_ENDIAN) 1117 off_t pci_offset; 1118 uint16_t pcie_devctl; 1119 #endif 1120 1121 if (isLDOMguest(nxgep)) { 1122 return (nxge_guest_regs_map(nxgep)); 1123 } 1124 1125 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1126 nxgep->dev_regs = NULL; 1127 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1128 dev_regs->nxge_regh = NULL; 1129 dev_regs->nxge_pciregh = NULL; 1130 dev_regs->nxge_msix_regh = NULL; 1131 dev_regs->nxge_vir_regh = NULL; 1132 dev_regs->nxge_vir2_regh = NULL; 1133 nxgep->niu_type = NIU_TYPE_NONE; 1134 1135 devname = ddi_pathname(nxgep->dip, buf); 1136 ASSERT(strlen(devname) > 0); 1137 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1138 "nxge_map_regs: pathname devname %s", devname)); 1139 1140 /* 1141 * The driver is running on a N2-NIU system if devname is something 1142 * like "/niu@80/network@0" 1143 */ 1144 if (strstr(devname, n2_siu_name)) { 1145 /* N2/NIU */ 1146 nxgep->niu_type = N2_NIU; 1147 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1148 "nxge_map_regs: N2/NIU devname %s", devname)); 1149 /* get function number */ 1150 nxgep->function_num = 1151 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1152 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1153 "nxge_map_regs: N2/NIU function number %d", 1154 nxgep->function_num)); 1155 } else { 1156 int *prop_val; 1157 uint_t prop_len; 1158 uint8_t func_num; 1159 1160 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1161 0, "reg", 1162 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1163 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1164 "Reg property not found")); 1165 ddi_status = DDI_FAILURE; 1166 goto nxge_map_regs_fail0; 1167 1168 } else { 1169 func_num = (prop_val[0] >> 8) & 0x7; 1170 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1171 "Reg property found: fun # %d", 1172 func_num)); 1173 nxgep->function_num = func_num; 1174 if (isLDOMguest(nxgep)) { 1175 nxgep->function_num /= 2; 1176 return (NXGE_OK); 1177 } 1178 ddi_prop_free(prop_val); 1179 } 1180 } 1181 1182 switch (nxgep->niu_type) { 1183 default: 1184 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1185 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1186 "nxge_map_regs: pci config size 0x%x", regsize)); 1187 1188 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1189 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1190 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1191 if (ddi_status != DDI_SUCCESS) { 1192 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1193 "ddi_map_regs, nxge bus config regs failed")); 1194 goto nxge_map_regs_fail0; 1195 } 1196 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1197 "nxge_map_reg: PCI config addr 0x%0llx " 1198 " handle 0x%0llx", dev_regs->nxge_pciregp, 1199 dev_regs->nxge_pciregh)); 1200 /* 1201 * IMP IMP 1202 * workaround for bit swapping bug in HW 1203 * which ends up in no-snoop = yes 1204 * resulting, in DMA not synched properly 1205 */ 1206 #if !defined(_BIG_ENDIAN) 1207 /* workarounds for x86 systems */ 1208 pci_offset = 0x80 + PCIE_DEVCTL; 1209 pcie_devctl = pci_config_get16(dev_regs->nxge_pciregh, 1210 pci_offset); 1211 pcie_devctl &= ~PCIE_DEVCTL_ENABLE_NO_SNOOP; 1212 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1213 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1214 pcie_devctl); 1215 #endif 1216 1217 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1218 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1219 "nxge_map_regs: pio size 0x%x", regsize)); 1220 /* set up the device mapped register */ 1221 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1222 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1223 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1224 if (ddi_status != DDI_SUCCESS) { 1225 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1226 "ddi_map_regs for Neptune global reg failed")); 1227 goto nxge_map_regs_fail1; 1228 } 1229 1230 /* set up the msi/msi-x mapped register */ 1231 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1232 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1233 "nxge_map_regs: msix size 0x%x", regsize)); 1234 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1235 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1236 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1237 if (ddi_status != DDI_SUCCESS) { 1238 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1239 "ddi_map_regs for msi reg failed")); 1240 goto nxge_map_regs_fail2; 1241 } 1242 1243 /* set up the vio region mapped register */ 1244 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1245 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1246 "nxge_map_regs: vio size 0x%x", regsize)); 1247 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1248 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1249 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1250 1251 if (ddi_status != DDI_SUCCESS) { 1252 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1253 "ddi_map_regs for nxge vio reg failed")); 1254 goto nxge_map_regs_fail3; 1255 } 1256 nxgep->dev_regs = dev_regs; 1257 1258 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1259 NPI_PCI_ADD_HANDLE_SET(nxgep, 1260 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1261 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1262 NPI_MSI_ADD_HANDLE_SET(nxgep, 1263 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1264 1265 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1266 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1267 1268 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1269 NPI_REG_ADD_HANDLE_SET(nxgep, 1270 (npi_reg_ptr_t)dev_regs->nxge_regp); 1271 1272 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1273 NPI_VREG_ADD_HANDLE_SET(nxgep, 1274 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1275 1276 break; 1277 1278 case N2_NIU: 1279 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1280 /* 1281 * Set up the device mapped register (FWARC 2006/556) 1282 * (changed back to 1: reg starts at 1!) 1283 */ 1284 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1285 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1286 "nxge_map_regs: dev size 0x%x", regsize)); 1287 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1288 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1289 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1290 1291 if (ddi_status != DDI_SUCCESS) { 1292 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1293 "ddi_map_regs for N2/NIU, global reg failed ")); 1294 goto nxge_map_regs_fail1; 1295 } 1296 1297 /* set up the first vio region mapped register */ 1298 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1299 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1300 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1301 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1302 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1303 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1304 1305 if (ddi_status != DDI_SUCCESS) { 1306 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1307 "ddi_map_regs for nxge vio reg failed")); 1308 goto nxge_map_regs_fail2; 1309 } 1310 /* set up the second vio region mapped register */ 1311 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1312 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1313 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1314 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1315 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1316 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1317 1318 if (ddi_status != DDI_SUCCESS) { 1319 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1320 "ddi_map_regs for nxge vio2 reg failed")); 1321 goto nxge_map_regs_fail3; 1322 } 1323 nxgep->dev_regs = dev_regs; 1324 1325 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1326 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1327 1328 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1329 NPI_REG_ADD_HANDLE_SET(nxgep, 1330 (npi_reg_ptr_t)dev_regs->nxge_regp); 1331 1332 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1333 NPI_VREG_ADD_HANDLE_SET(nxgep, 1334 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1335 1336 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1337 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1338 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1339 1340 break; 1341 } 1342 1343 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1344 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1345 1346 goto nxge_map_regs_exit; 1347 nxge_map_regs_fail3: 1348 if (dev_regs->nxge_msix_regh) { 1349 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1350 } 1351 if (dev_regs->nxge_vir_regh) { 1352 ddi_regs_map_free(&dev_regs->nxge_regh); 1353 } 1354 nxge_map_regs_fail2: 1355 if (dev_regs->nxge_regh) { 1356 ddi_regs_map_free(&dev_regs->nxge_regh); 1357 } 1358 nxge_map_regs_fail1: 1359 if (dev_regs->nxge_pciregh) { 1360 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1361 } 1362 nxge_map_regs_fail0: 1363 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1364 kmem_free(dev_regs, sizeof (dev_regs_t)); 1365 1366 nxge_map_regs_exit: 1367 if (ddi_status != DDI_SUCCESS) 1368 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1369 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1370 return (status); 1371 } 1372 1373 static void 1374 nxge_unmap_regs(p_nxge_t nxgep) 1375 { 1376 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1377 1378 if (isLDOMguest(nxgep)) { 1379 nxge_guest_regs_map_free(nxgep); 1380 return; 1381 } 1382 1383 if (nxgep->dev_regs) { 1384 if (nxgep->dev_regs->nxge_pciregh) { 1385 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1386 "==> nxge_unmap_regs: bus")); 1387 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1388 nxgep->dev_regs->nxge_pciregh = NULL; 1389 } 1390 if (nxgep->dev_regs->nxge_regh) { 1391 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1392 "==> nxge_unmap_regs: device registers")); 1393 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1394 nxgep->dev_regs->nxge_regh = NULL; 1395 } 1396 if (nxgep->dev_regs->nxge_msix_regh) { 1397 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1398 "==> nxge_unmap_regs: device interrupts")); 1399 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1400 nxgep->dev_regs->nxge_msix_regh = NULL; 1401 } 1402 if (nxgep->dev_regs->nxge_vir_regh) { 1403 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1404 "==> nxge_unmap_regs: vio region")); 1405 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1406 nxgep->dev_regs->nxge_vir_regh = NULL; 1407 } 1408 if (nxgep->dev_regs->nxge_vir2_regh) { 1409 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1410 "==> nxge_unmap_regs: vio2 region")); 1411 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1412 nxgep->dev_regs->nxge_vir2_regh = NULL; 1413 } 1414 1415 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1416 nxgep->dev_regs = NULL; 1417 } 1418 1419 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1420 } 1421 1422 static nxge_status_t 1423 nxge_setup_mutexes(p_nxge_t nxgep) 1424 { 1425 int ddi_status = DDI_SUCCESS; 1426 nxge_status_t status = NXGE_OK; 1427 nxge_classify_t *classify_ptr; 1428 int partition; 1429 1430 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1431 1432 /* 1433 * Get the interrupt cookie so the mutexes can be 1434 * Initialized. 1435 */ 1436 if (isLDOMguest(nxgep)) { 1437 nxgep->interrupt_cookie = 0; 1438 } else { 1439 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1440 &nxgep->interrupt_cookie); 1441 1442 if (ddi_status != DDI_SUCCESS) { 1443 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1444 "<== nxge_setup_mutexes: failed 0x%x", 1445 ddi_status)); 1446 goto nxge_setup_mutexes_exit; 1447 } 1448 } 1449 1450 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1451 MUTEX_INIT(&nxgep->poll_lock, NULL, 1452 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1453 1454 /* 1455 * Initialize mutexes for this device. 1456 */ 1457 MUTEX_INIT(nxgep->genlock, NULL, 1458 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1459 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1460 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1461 MUTEX_INIT(&nxgep->mif_lock, NULL, 1462 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1463 MUTEX_INIT(&nxgep->group_lock, NULL, 1464 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1465 RW_INIT(&nxgep->filter_lock, NULL, 1466 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1467 1468 classify_ptr = &nxgep->classifier; 1469 /* 1470 * FFLP Mutexes are never used in interrupt context 1471 * as fflp operation can take very long time to 1472 * complete and hence not suitable to invoke from interrupt 1473 * handlers. 1474 */ 1475 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1476 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1477 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1478 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1479 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1480 for (partition = 0; partition < MAX_PARTITION; partition++) { 1481 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1482 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1483 } 1484 } 1485 1486 nxge_setup_mutexes_exit: 1487 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1488 "<== nxge_setup_mutexes status = %x", status)); 1489 1490 if (ddi_status != DDI_SUCCESS) 1491 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1492 1493 return (status); 1494 } 1495 1496 static void 1497 nxge_destroy_mutexes(p_nxge_t nxgep) 1498 { 1499 int partition; 1500 nxge_classify_t *classify_ptr; 1501 1502 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1503 RW_DESTROY(&nxgep->filter_lock); 1504 MUTEX_DESTROY(&nxgep->group_lock); 1505 MUTEX_DESTROY(&nxgep->mif_lock); 1506 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1507 MUTEX_DESTROY(nxgep->genlock); 1508 1509 classify_ptr = &nxgep->classifier; 1510 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1511 1512 /* Destroy all polling resources. */ 1513 MUTEX_DESTROY(&nxgep->poll_lock); 1514 cv_destroy(&nxgep->poll_cv); 1515 1516 /* free data structures, based on HW type */ 1517 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1518 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1519 for (partition = 0; partition < MAX_PARTITION; partition++) { 1520 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1521 } 1522 } 1523 1524 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1525 } 1526 1527 nxge_status_t 1528 nxge_init(p_nxge_t nxgep) 1529 { 1530 nxge_status_t status = NXGE_OK; 1531 1532 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1533 1534 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1535 return (status); 1536 } 1537 1538 /* 1539 * Allocate system memory for the receive/transmit buffer blocks 1540 * and receive/transmit descriptor rings. 1541 */ 1542 status = nxge_alloc_mem_pool(nxgep); 1543 if (status != NXGE_OK) { 1544 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1545 goto nxge_init_fail1; 1546 } 1547 1548 if (!isLDOMguest(nxgep)) { 1549 /* 1550 * Initialize and enable the TXC registers. 1551 * (Globally enable the Tx controller, 1552 * enable the port, configure the dma channel bitmap, 1553 * configure the max burst size). 1554 */ 1555 status = nxge_txc_init(nxgep); 1556 if (status != NXGE_OK) { 1557 NXGE_ERROR_MSG((nxgep, 1558 NXGE_ERR_CTL, "init txc failed\n")); 1559 goto nxge_init_fail2; 1560 } 1561 } 1562 1563 /* 1564 * Initialize and enable TXDMA channels. 1565 */ 1566 status = nxge_init_txdma_channels(nxgep); 1567 if (status != NXGE_OK) { 1568 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1569 goto nxge_init_fail3; 1570 } 1571 1572 /* 1573 * Initialize and enable RXDMA channels. 1574 */ 1575 status = nxge_init_rxdma_channels(nxgep); 1576 if (status != NXGE_OK) { 1577 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1578 goto nxge_init_fail4; 1579 } 1580 1581 /* 1582 * The guest domain is now done. 1583 */ 1584 if (isLDOMguest(nxgep)) { 1585 nxgep->drv_state |= STATE_HW_INITIALIZED; 1586 goto nxge_init_exit; 1587 } 1588 1589 /* 1590 * Initialize TCAM and FCRAM (Neptune). 1591 */ 1592 status = nxge_classify_init(nxgep); 1593 if (status != NXGE_OK) { 1594 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1595 goto nxge_init_fail5; 1596 } 1597 1598 /* 1599 * Initialize ZCP 1600 */ 1601 status = nxge_zcp_init(nxgep); 1602 if (status != NXGE_OK) { 1603 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1604 goto nxge_init_fail5; 1605 } 1606 1607 /* 1608 * Initialize IPP. 1609 */ 1610 status = nxge_ipp_init(nxgep); 1611 if (status != NXGE_OK) { 1612 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1613 goto nxge_init_fail5; 1614 } 1615 1616 /* 1617 * Initialize the MAC block. 1618 */ 1619 status = nxge_mac_init(nxgep); 1620 if (status != NXGE_OK) { 1621 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1622 goto nxge_init_fail5; 1623 } 1624 1625 /* 1626 * Enable the interrrupts for DDI. 1627 */ 1628 nxge_intrs_enable(nxgep); 1629 1630 nxgep->drv_state |= STATE_HW_INITIALIZED; 1631 1632 goto nxge_init_exit; 1633 1634 nxge_init_fail5: 1635 nxge_uninit_rxdma_channels(nxgep); 1636 nxge_init_fail4: 1637 nxge_uninit_txdma_channels(nxgep); 1638 nxge_init_fail3: 1639 if (!isLDOMguest(nxgep)) { 1640 (void) nxge_txc_uninit(nxgep); 1641 } 1642 nxge_init_fail2: 1643 nxge_free_mem_pool(nxgep); 1644 nxge_init_fail1: 1645 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1646 "<== nxge_init status (failed) = 0x%08x", status)); 1647 return (status); 1648 1649 nxge_init_exit: 1650 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1651 status)); 1652 return (status); 1653 } 1654 1655 1656 timeout_id_t 1657 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1658 { 1659 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1660 return (timeout(func, (caddr_t)nxgep, 1661 drv_usectohz(1000 * msec))); 1662 } 1663 return (NULL); 1664 } 1665 1666 /*ARGSUSED*/ 1667 void 1668 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1669 { 1670 if (timerid) { 1671 (void) untimeout(timerid); 1672 } 1673 } 1674 1675 void 1676 nxge_uninit(p_nxge_t nxgep) 1677 { 1678 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1679 1680 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1681 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1682 "==> nxge_uninit: not initialized")); 1683 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1684 "<== nxge_uninit")); 1685 return; 1686 } 1687 1688 if (!isLDOMguest(nxgep)) { 1689 /* 1690 * Reset the receive MAC side. 1691 */ 1692 (void) nxge_rx_mac_disable(nxgep); 1693 1694 /* 1695 * Drain the IPP. 1696 */ 1697 (void) nxge_ipp_drain(nxgep); 1698 } 1699 1700 /* stop timer */ 1701 if (nxgep->nxge_timerid) { 1702 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1703 nxgep->nxge_timerid = 0; 1704 } 1705 1706 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1707 (void) nxge_intr_hw_disable(nxgep); 1708 1709 1710 /* Disable and soft reset the IPP */ 1711 if (!isLDOMguest(nxgep)) 1712 (void) nxge_ipp_disable(nxgep); 1713 1714 /* Free classification resources */ 1715 (void) nxge_classify_uninit(nxgep); 1716 1717 /* 1718 * Reset the transmit/receive DMA side. 1719 */ 1720 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1721 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1722 1723 nxge_uninit_txdma_channels(nxgep); 1724 nxge_uninit_rxdma_channels(nxgep); 1725 1726 /* 1727 * Reset the transmit MAC side. 1728 */ 1729 (void) nxge_tx_mac_disable(nxgep); 1730 1731 nxge_free_mem_pool(nxgep); 1732 1733 /* 1734 * Start the timer if the reset flag is not set. 1735 * If this reset flag is set, the link monitor 1736 * will not be started in order to stop furthur bus 1737 * activities coming from this interface. 1738 * The driver will start the monitor function 1739 * if the interface was initialized again later. 1740 */ 1741 if (!nxge_peu_reset_enable) { 1742 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1743 } 1744 1745 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1746 1747 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1748 "nxge_mblks_pending %d", nxge_mblks_pending)); 1749 } 1750 1751 void 1752 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1753 { 1754 uint64_t reg; 1755 uint64_t regdata; 1756 int i, retry; 1757 1758 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1759 regdata = 0; 1760 retry = 1; 1761 1762 for (i = 0; i < retry; i++) { 1763 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1764 } 1765 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1766 } 1767 1768 void 1769 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1770 { 1771 uint64_t reg; 1772 uint64_t buf[2]; 1773 1774 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1775 reg = buf[0]; 1776 1777 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1778 } 1779 1780 /*ARGSUSED*/ 1781 /*VARARGS*/ 1782 void 1783 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1784 { 1785 char msg_buffer[1048]; 1786 char prefix_buffer[32]; 1787 int instance; 1788 uint64_t debug_level; 1789 int cmn_level = CE_CONT; 1790 va_list ap; 1791 1792 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1793 /* In case a developer has changed nxge_debug_level. */ 1794 if (nxgep->nxge_debug_level != nxge_debug_level) 1795 nxgep->nxge_debug_level = nxge_debug_level; 1796 } 1797 1798 debug_level = (nxgep == NULL) ? nxge_debug_level : 1799 nxgep->nxge_debug_level; 1800 1801 if ((level & debug_level) || 1802 (level == NXGE_NOTE) || 1803 (level == NXGE_ERR_CTL)) { 1804 /* do the msg processing */ 1805 MUTEX_ENTER(&nxgedebuglock); 1806 1807 if ((level & NXGE_NOTE)) { 1808 cmn_level = CE_NOTE; 1809 } 1810 1811 if (level & NXGE_ERR_CTL) { 1812 cmn_level = CE_WARN; 1813 } 1814 1815 va_start(ap, fmt); 1816 (void) vsprintf(msg_buffer, fmt, ap); 1817 va_end(ap); 1818 if (nxgep == NULL) { 1819 instance = -1; 1820 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1821 } else { 1822 instance = nxgep->instance; 1823 (void) sprintf(prefix_buffer, 1824 "%s%d :", "nxge", instance); 1825 } 1826 1827 MUTEX_EXIT(&nxgedebuglock); 1828 cmn_err(cmn_level, "!%s %s\n", 1829 prefix_buffer, msg_buffer); 1830 1831 } 1832 } 1833 1834 char * 1835 nxge_dump_packet(char *addr, int size) 1836 { 1837 uchar_t *ap = (uchar_t *)addr; 1838 int i; 1839 static char etherbuf[1024]; 1840 char *cp = etherbuf; 1841 char digits[] = "0123456789abcdef"; 1842 1843 if (!size) 1844 size = 60; 1845 1846 if (size > MAX_DUMP_SZ) { 1847 /* Dump the leading bytes */ 1848 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1849 if (*ap > 0x0f) 1850 *cp++ = digits[*ap >> 4]; 1851 *cp++ = digits[*ap++ & 0xf]; 1852 *cp++ = ':'; 1853 } 1854 for (i = 0; i < 20; i++) 1855 *cp++ = '.'; 1856 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1857 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1858 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1859 if (*ap > 0x0f) 1860 *cp++ = digits[*ap >> 4]; 1861 *cp++ = digits[*ap++ & 0xf]; 1862 *cp++ = ':'; 1863 } 1864 } else { 1865 for (i = 0; i < size; i++) { 1866 if (*ap > 0x0f) 1867 *cp++ = digits[*ap >> 4]; 1868 *cp++ = digits[*ap++ & 0xf]; 1869 *cp++ = ':'; 1870 } 1871 } 1872 *--cp = 0; 1873 return (etherbuf); 1874 } 1875 1876 #ifdef NXGE_DEBUG 1877 static void 1878 nxge_test_map_regs(p_nxge_t nxgep) 1879 { 1880 ddi_acc_handle_t cfg_handle; 1881 p_pci_cfg_t cfg_ptr; 1882 ddi_acc_handle_t dev_handle; 1883 char *dev_ptr; 1884 ddi_acc_handle_t pci_config_handle; 1885 uint32_t regval; 1886 int i; 1887 1888 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1889 1890 dev_handle = nxgep->dev_regs->nxge_regh; 1891 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1892 1893 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1894 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1895 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1896 1897 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1898 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1899 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1900 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1901 &cfg_ptr->vendorid)); 1902 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1903 "\tvendorid 0x%x devid 0x%x", 1904 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1905 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1906 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1907 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1908 "bar1c 0x%x", 1909 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1910 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1911 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1912 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1913 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1914 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1915 "base 28 0x%x bar2c 0x%x\n", 1916 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1917 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1918 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1919 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1920 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1921 "\nNeptune PCI BAR: base30 0x%x\n", 1922 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1923 1924 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1925 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1926 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1927 "first 0x%llx second 0x%llx third 0x%llx " 1928 "last 0x%llx ", 1929 NXGE_PIO_READ64(dev_handle, 1930 (uint64_t *)(dev_ptr + 0), 0), 1931 NXGE_PIO_READ64(dev_handle, 1932 (uint64_t *)(dev_ptr + 8), 0), 1933 NXGE_PIO_READ64(dev_handle, 1934 (uint64_t *)(dev_ptr + 16), 0), 1935 NXGE_PIO_READ64(cfg_handle, 1936 (uint64_t *)(dev_ptr + 24), 0))); 1937 } 1938 } 1939 1940 #endif 1941 1942 static void 1943 nxge_suspend(p_nxge_t nxgep) 1944 { 1945 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1946 1947 nxge_intrs_disable(nxgep); 1948 nxge_destroy_dev(nxgep); 1949 1950 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1951 } 1952 1953 static nxge_status_t 1954 nxge_resume(p_nxge_t nxgep) 1955 { 1956 nxge_status_t status = NXGE_OK; 1957 1958 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1959 1960 nxgep->suspended = DDI_RESUME; 1961 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1962 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1963 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1964 (void) nxge_rx_mac_enable(nxgep); 1965 (void) nxge_tx_mac_enable(nxgep); 1966 nxge_intrs_enable(nxgep); 1967 nxgep->suspended = 0; 1968 1969 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1970 "<== nxge_resume status = 0x%x", status)); 1971 return (status); 1972 } 1973 1974 static nxge_status_t 1975 nxge_setup_dev(p_nxge_t nxgep) 1976 { 1977 nxge_status_t status = NXGE_OK; 1978 1979 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1980 nxgep->mac.portnum)); 1981 1982 status = nxge_link_init(nxgep); 1983 1984 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1985 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1986 "port%d Bad register acc handle", nxgep->mac.portnum)); 1987 status = NXGE_ERROR; 1988 } 1989 1990 if (status != NXGE_OK) { 1991 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1992 " nxge_setup_dev status " 1993 "(xcvr init 0x%08x)", status)); 1994 goto nxge_setup_dev_exit; 1995 } 1996 1997 nxge_setup_dev_exit: 1998 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1999 "<== nxge_setup_dev port %d status = 0x%08x", 2000 nxgep->mac.portnum, status)); 2001 2002 return (status); 2003 } 2004 2005 static void 2006 nxge_destroy_dev(p_nxge_t nxgep) 2007 { 2008 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 2009 2010 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 2011 2012 (void) nxge_hw_stop(nxgep); 2013 2014 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 2015 } 2016 2017 static nxge_status_t 2018 nxge_setup_system_dma_pages(p_nxge_t nxgep) 2019 { 2020 int ddi_status = DDI_SUCCESS; 2021 uint_t count; 2022 ddi_dma_cookie_t cookie; 2023 uint_t iommu_pagesize; 2024 nxge_status_t status = NXGE_OK; 2025 2026 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 2027 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 2028 if (nxgep->niu_type != N2_NIU) { 2029 iommu_pagesize = dvma_pagesize(nxgep->dip); 2030 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2031 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2032 " default_block_size %d iommu_pagesize %d", 2033 nxgep->sys_page_sz, 2034 ddi_ptob(nxgep->dip, (ulong_t)1), 2035 nxgep->rx_default_block_size, 2036 iommu_pagesize)); 2037 2038 if (iommu_pagesize != 0) { 2039 if (nxgep->sys_page_sz == iommu_pagesize) { 2040 if (iommu_pagesize > 0x4000) 2041 nxgep->sys_page_sz = 0x4000; 2042 } else { 2043 if (nxgep->sys_page_sz > iommu_pagesize) 2044 nxgep->sys_page_sz = iommu_pagesize; 2045 } 2046 } 2047 } 2048 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2049 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2050 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2051 "default_block_size %d page mask %d", 2052 nxgep->sys_page_sz, 2053 ddi_ptob(nxgep->dip, (ulong_t)1), 2054 nxgep->rx_default_block_size, 2055 nxgep->sys_page_mask)); 2056 2057 2058 switch (nxgep->sys_page_sz) { 2059 default: 2060 nxgep->sys_page_sz = 0x1000; 2061 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2062 nxgep->rx_default_block_size = 0x1000; 2063 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2064 break; 2065 case 0x1000: 2066 nxgep->rx_default_block_size = 0x1000; 2067 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2068 break; 2069 case 0x2000: 2070 nxgep->rx_default_block_size = 0x2000; 2071 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2072 break; 2073 case 0x4000: 2074 nxgep->rx_default_block_size = 0x4000; 2075 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2076 break; 2077 case 0x8000: 2078 nxgep->rx_default_block_size = 0x8000; 2079 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2080 break; 2081 } 2082 2083 #ifndef USE_RX_BIG_BUF 2084 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2085 #else 2086 nxgep->rx_default_block_size = 0x2000; 2087 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2088 #endif 2089 /* 2090 * Get the system DMA burst size. 2091 */ 2092 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2093 DDI_DMA_DONTWAIT, 0, 2094 &nxgep->dmasparehandle); 2095 if (ddi_status != DDI_SUCCESS) { 2096 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2097 "ddi_dma_alloc_handle: failed " 2098 " status 0x%x", ddi_status)); 2099 goto nxge_get_soft_properties_exit; 2100 } 2101 2102 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2103 (caddr_t)nxgep->dmasparehandle, 2104 sizeof (nxgep->dmasparehandle), 2105 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2106 DDI_DMA_DONTWAIT, 0, 2107 &cookie, &count); 2108 if (ddi_status != DDI_DMA_MAPPED) { 2109 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2110 "Binding spare handle to find system" 2111 " burstsize failed.")); 2112 ddi_status = DDI_FAILURE; 2113 goto nxge_get_soft_properties_fail1; 2114 } 2115 2116 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2117 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2118 2119 nxge_get_soft_properties_fail1: 2120 ddi_dma_free_handle(&nxgep->dmasparehandle); 2121 2122 nxge_get_soft_properties_exit: 2123 2124 if (ddi_status != DDI_SUCCESS) 2125 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2126 2127 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2128 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2129 return (status); 2130 } 2131 2132 static nxge_status_t 2133 nxge_alloc_mem_pool(p_nxge_t nxgep) 2134 { 2135 nxge_status_t status = NXGE_OK; 2136 2137 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2138 2139 status = nxge_alloc_rx_mem_pool(nxgep); 2140 if (status != NXGE_OK) { 2141 return (NXGE_ERROR); 2142 } 2143 2144 status = nxge_alloc_tx_mem_pool(nxgep); 2145 if (status != NXGE_OK) { 2146 nxge_free_rx_mem_pool(nxgep); 2147 return (NXGE_ERROR); 2148 } 2149 2150 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2151 return (NXGE_OK); 2152 } 2153 2154 static void 2155 nxge_free_mem_pool(p_nxge_t nxgep) 2156 { 2157 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2158 2159 nxge_free_rx_mem_pool(nxgep); 2160 nxge_free_tx_mem_pool(nxgep); 2161 2162 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2163 } 2164 2165 nxge_status_t 2166 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2167 { 2168 uint32_t rdc_max; 2169 p_nxge_dma_pt_cfg_t p_all_cfgp; 2170 p_nxge_hw_pt_cfg_t p_cfgp; 2171 p_nxge_dma_pool_t dma_poolp; 2172 p_nxge_dma_common_t *dma_buf_p; 2173 p_nxge_dma_pool_t dma_cntl_poolp; 2174 p_nxge_dma_common_t *dma_cntl_p; 2175 uint32_t *num_chunks; /* per dma */ 2176 nxge_status_t status = NXGE_OK; 2177 2178 uint32_t nxge_port_rbr_size; 2179 uint32_t nxge_port_rbr_spare_size; 2180 uint32_t nxge_port_rcr_size; 2181 uint32_t rx_cntl_alloc_size; 2182 2183 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2184 2185 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2186 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2187 rdc_max = NXGE_MAX_RDCS; 2188 2189 /* 2190 * Allocate memory for the common DMA data structures. 2191 */ 2192 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2193 KM_SLEEP); 2194 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2195 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2196 2197 dma_cntl_poolp = (p_nxge_dma_pool_t) 2198 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2199 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2200 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2201 2202 num_chunks = (uint32_t *)KMEM_ZALLOC( 2203 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2204 2205 /* 2206 * Assume that each DMA channel will be configured with 2207 * the default block size. 2208 * rbr block counts are modulo the batch count (16). 2209 */ 2210 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2211 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2212 2213 if (!nxge_port_rbr_size) { 2214 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2215 } 2216 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2217 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2218 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2219 } 2220 2221 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2222 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2223 2224 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2225 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2226 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2227 } 2228 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2229 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2230 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2231 "set to default %d", 2232 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2233 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2234 } 2235 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2236 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2237 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2238 "set to default %d", 2239 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2240 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2241 } 2242 2243 /* 2244 * N2/NIU has limitation on the descriptor sizes (contiguous 2245 * memory allocation on data buffers to 4M (contig_mem_alloc) 2246 * and little endian for control buffers (must use the ddi/dki mem alloc 2247 * function). 2248 */ 2249 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2250 if (nxgep->niu_type == N2_NIU) { 2251 nxge_port_rbr_spare_size = 0; 2252 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2253 (!ISP2(nxge_port_rbr_size))) { 2254 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2255 } 2256 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2257 (!ISP2(nxge_port_rcr_size))) { 2258 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2259 } 2260 } 2261 #endif 2262 2263 /* 2264 * Addresses of receive block ring, receive completion ring and the 2265 * mailbox must be all cache-aligned (64 bytes). 2266 */ 2267 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2268 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2269 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2270 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2271 2272 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2273 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2274 "nxge_port_rcr_size = %d " 2275 "rx_cntl_alloc_size = %d", 2276 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2277 nxge_port_rcr_size, 2278 rx_cntl_alloc_size)); 2279 2280 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2281 if (nxgep->niu_type == N2_NIU) { 2282 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2283 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2284 2285 if (!ISP2(rx_buf_alloc_size)) { 2286 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2287 "==> nxge_alloc_rx_mem_pool: " 2288 " must be power of 2")); 2289 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2290 goto nxge_alloc_rx_mem_pool_exit; 2291 } 2292 2293 if (rx_buf_alloc_size > (1 << 22)) { 2294 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2295 "==> nxge_alloc_rx_mem_pool: " 2296 " limit size to 4M")); 2297 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2298 goto nxge_alloc_rx_mem_pool_exit; 2299 } 2300 2301 if (rx_cntl_alloc_size < 0x2000) { 2302 rx_cntl_alloc_size = 0x2000; 2303 } 2304 } 2305 #endif 2306 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2307 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2308 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2309 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2310 2311 dma_poolp->ndmas = p_cfgp->max_rdcs; 2312 dma_poolp->num_chunks = num_chunks; 2313 dma_poolp->buf_allocated = B_TRUE; 2314 nxgep->rx_buf_pool_p = dma_poolp; 2315 dma_poolp->dma_buf_pool_p = dma_buf_p; 2316 2317 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2318 dma_cntl_poolp->buf_allocated = B_TRUE; 2319 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2320 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2321 2322 /* Allocate the receive rings, too. */ 2323 nxgep->rx_rbr_rings = 2324 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2325 nxgep->rx_rbr_rings->rbr_rings = 2326 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2327 nxgep->rx_rcr_rings = 2328 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2329 nxgep->rx_rcr_rings->rcr_rings = 2330 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2331 nxgep->rx_mbox_areas_p = 2332 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2333 nxgep->rx_mbox_areas_p->rxmbox_areas = 2334 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2335 2336 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2337 p_cfgp->max_rdcs; 2338 2339 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2340 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2341 2342 nxge_alloc_rx_mem_pool_exit: 2343 return (status); 2344 } 2345 2346 /* 2347 * nxge_alloc_rxb 2348 * 2349 * Allocate buffers for an RDC. 2350 * 2351 * Arguments: 2352 * nxgep 2353 * channel The channel to map into our kernel space. 2354 * 2355 * Notes: 2356 * 2357 * NPI function calls: 2358 * 2359 * NXGE function calls: 2360 * 2361 * Registers accessed: 2362 * 2363 * Context: 2364 * 2365 * Taking apart: 2366 * 2367 * Open questions: 2368 * 2369 */ 2370 nxge_status_t 2371 nxge_alloc_rxb( 2372 p_nxge_t nxgep, 2373 int channel) 2374 { 2375 size_t rx_buf_alloc_size; 2376 nxge_status_t status = NXGE_OK; 2377 2378 nxge_dma_common_t **data; 2379 nxge_dma_common_t **control; 2380 uint32_t *num_chunks; 2381 2382 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2383 2384 /* 2385 * Allocate memory for the receive buffers and descriptor rings. 2386 * Replace these allocation functions with the interface functions 2387 * provided by the partition manager if/when they are available. 2388 */ 2389 2390 /* 2391 * Allocate memory for the receive buffer blocks. 2392 */ 2393 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2394 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2395 2396 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2397 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2398 2399 if ((status = nxge_alloc_rx_buf_dma( 2400 nxgep, channel, data, rx_buf_alloc_size, 2401 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2402 return (status); 2403 } 2404 2405 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2406 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2407 2408 /* 2409 * Allocate memory for descriptor rings and mailbox. 2410 */ 2411 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2412 2413 if ((status = nxge_alloc_rx_cntl_dma( 2414 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2415 != NXGE_OK) { 2416 nxge_free_rx_cntl_dma(nxgep, *control); 2417 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2418 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2419 return (status); 2420 } 2421 2422 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2423 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2424 2425 return (status); 2426 } 2427 2428 void 2429 nxge_free_rxb( 2430 p_nxge_t nxgep, 2431 int channel) 2432 { 2433 nxge_dma_common_t *data; 2434 nxge_dma_common_t *control; 2435 uint32_t num_chunks; 2436 2437 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2438 2439 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2440 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2441 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2442 2443 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2444 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2445 2446 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2447 nxge_free_rx_cntl_dma(nxgep, control); 2448 2449 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2450 2451 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2452 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2453 2454 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2455 } 2456 2457 static void 2458 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2459 { 2460 int rdc_max = NXGE_MAX_RDCS; 2461 2462 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2463 2464 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2465 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2466 "<== nxge_free_rx_mem_pool " 2467 "(null rx buf pool or buf not allocated")); 2468 return; 2469 } 2470 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2471 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2472 "<== nxge_free_rx_mem_pool " 2473 "(null rx cntl buf pool or cntl buf not allocated")); 2474 return; 2475 } 2476 2477 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2478 sizeof (p_nxge_dma_common_t) * rdc_max); 2479 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2480 2481 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2482 sizeof (uint32_t) * rdc_max); 2483 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2484 sizeof (p_nxge_dma_common_t) * rdc_max); 2485 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2486 2487 nxgep->rx_buf_pool_p = 0; 2488 nxgep->rx_cntl_pool_p = 0; 2489 2490 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2491 sizeof (p_rx_rbr_ring_t) * rdc_max); 2492 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2493 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2494 sizeof (p_rx_rcr_ring_t) * rdc_max); 2495 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2496 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2497 sizeof (p_rx_mbox_t) * rdc_max); 2498 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2499 2500 nxgep->rx_rbr_rings = 0; 2501 nxgep->rx_rcr_rings = 0; 2502 nxgep->rx_mbox_areas_p = 0; 2503 2504 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2505 } 2506 2507 2508 static nxge_status_t 2509 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2510 p_nxge_dma_common_t *dmap, 2511 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2512 { 2513 p_nxge_dma_common_t rx_dmap; 2514 nxge_status_t status = NXGE_OK; 2515 size_t total_alloc_size; 2516 size_t allocated = 0; 2517 int i, size_index, array_size; 2518 boolean_t use_kmem_alloc = B_FALSE; 2519 2520 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2521 2522 rx_dmap = (p_nxge_dma_common_t) 2523 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2524 KM_SLEEP); 2525 2526 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2527 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2528 dma_channel, alloc_size, block_size, dmap)); 2529 2530 total_alloc_size = alloc_size; 2531 2532 #if defined(RX_USE_RECLAIM_POST) 2533 total_alloc_size = alloc_size + alloc_size/4; 2534 #endif 2535 2536 i = 0; 2537 size_index = 0; 2538 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2539 while ((size_index < array_size) && 2540 (alloc_sizes[size_index] < alloc_size)) 2541 size_index++; 2542 if (size_index >= array_size) { 2543 size_index = array_size - 1; 2544 } 2545 2546 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2547 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2548 use_kmem_alloc = B_TRUE; 2549 #if defined(__i386) || defined(__amd64) 2550 size_index = 0; 2551 #endif 2552 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2553 "==> nxge_alloc_rx_buf_dma: " 2554 "Neptune use kmem_alloc() - size_index %d", 2555 size_index)); 2556 } 2557 2558 while ((allocated < total_alloc_size) && 2559 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2560 rx_dmap[i].dma_chunk_index = i; 2561 rx_dmap[i].block_size = block_size; 2562 rx_dmap[i].alength = alloc_sizes[size_index]; 2563 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2564 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2565 rx_dmap[i].dma_channel = dma_channel; 2566 rx_dmap[i].contig_alloc_type = B_FALSE; 2567 rx_dmap[i].kmem_alloc_type = B_FALSE; 2568 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2569 2570 /* 2571 * N2/NIU: data buffers must be contiguous as the driver 2572 * needs to call Hypervisor api to set up 2573 * logical pages. 2574 */ 2575 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2576 rx_dmap[i].contig_alloc_type = B_TRUE; 2577 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2578 } else if (use_kmem_alloc) { 2579 /* For Neptune, use kmem_alloc */ 2580 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2581 "==> nxge_alloc_rx_buf_dma: " 2582 "Neptune use kmem_alloc()")); 2583 rx_dmap[i].kmem_alloc_type = B_TRUE; 2584 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2585 } 2586 2587 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2588 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2589 "i %d nblocks %d alength %d", 2590 dma_channel, i, &rx_dmap[i], block_size, 2591 i, rx_dmap[i].nblocks, 2592 rx_dmap[i].alength)); 2593 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2594 &nxge_rx_dma_attr, 2595 rx_dmap[i].alength, 2596 &nxge_dev_buf_dma_acc_attr, 2597 DDI_DMA_READ | DDI_DMA_STREAMING, 2598 (p_nxge_dma_common_t)(&rx_dmap[i])); 2599 if (status != NXGE_OK) { 2600 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2601 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2602 "dma %d size_index %d size requested %d", 2603 dma_channel, 2604 size_index, 2605 rx_dmap[i].alength)); 2606 size_index--; 2607 } else { 2608 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2609 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2610 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2611 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2612 "buf_alloc_state %d alloc_type %d", 2613 dma_channel, 2614 &rx_dmap[i], 2615 rx_dmap[i].kaddrp, 2616 rx_dmap[i].alength, 2617 rx_dmap[i].buf_alloc_state, 2618 rx_dmap[i].buf_alloc_type)); 2619 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2620 " alloc_rx_buf_dma allocated rdc %d " 2621 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2622 dma_channel, i, rx_dmap[i].alength, 2623 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2624 rx_dmap[i].kaddrp)); 2625 i++; 2626 allocated += alloc_sizes[size_index]; 2627 } 2628 } 2629 2630 if (allocated < total_alloc_size) { 2631 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2632 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2633 "allocated 0x%x requested 0x%x", 2634 dma_channel, 2635 allocated, total_alloc_size)); 2636 status = NXGE_ERROR; 2637 goto nxge_alloc_rx_mem_fail1; 2638 } 2639 2640 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2641 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2642 "allocated 0x%x requested 0x%x", 2643 dma_channel, 2644 allocated, total_alloc_size)); 2645 2646 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2647 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2648 dma_channel, i)); 2649 *num_chunks = i; 2650 *dmap = rx_dmap; 2651 2652 goto nxge_alloc_rx_mem_exit; 2653 2654 nxge_alloc_rx_mem_fail1: 2655 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2656 2657 nxge_alloc_rx_mem_exit: 2658 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2659 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2660 2661 return (status); 2662 } 2663 2664 /*ARGSUSED*/ 2665 static void 2666 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2667 uint32_t num_chunks) 2668 { 2669 int i; 2670 2671 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2672 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2673 2674 if (dmap == 0) 2675 return; 2676 2677 for (i = 0; i < num_chunks; i++) { 2678 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2679 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2680 i, dmap)); 2681 nxge_dma_free_rx_data_buf(dmap++); 2682 } 2683 2684 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2685 } 2686 2687 /*ARGSUSED*/ 2688 static nxge_status_t 2689 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2690 p_nxge_dma_common_t *dmap, size_t size) 2691 { 2692 p_nxge_dma_common_t rx_dmap; 2693 nxge_status_t status = NXGE_OK; 2694 2695 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2696 2697 rx_dmap = (p_nxge_dma_common_t) 2698 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2699 2700 rx_dmap->contig_alloc_type = B_FALSE; 2701 rx_dmap->kmem_alloc_type = B_FALSE; 2702 2703 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2704 &nxge_desc_dma_attr, 2705 size, 2706 &nxge_dev_desc_dma_acc_attr, 2707 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2708 rx_dmap); 2709 if (status != NXGE_OK) { 2710 goto nxge_alloc_rx_cntl_dma_fail1; 2711 } 2712 2713 *dmap = rx_dmap; 2714 goto nxge_alloc_rx_cntl_dma_exit; 2715 2716 nxge_alloc_rx_cntl_dma_fail1: 2717 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2718 2719 nxge_alloc_rx_cntl_dma_exit: 2720 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2721 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2722 2723 return (status); 2724 } 2725 2726 /*ARGSUSED*/ 2727 static void 2728 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2729 { 2730 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2731 2732 if (dmap == 0) 2733 return; 2734 2735 nxge_dma_mem_free(dmap); 2736 2737 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2738 } 2739 2740 typedef struct { 2741 size_t tx_size; 2742 size_t cr_size; 2743 size_t threshhold; 2744 } nxge_tdc_sizes_t; 2745 2746 static 2747 nxge_status_t 2748 nxge_tdc_sizes( 2749 nxge_t *nxgep, 2750 nxge_tdc_sizes_t *sizes) 2751 { 2752 uint32_t threshhold; /* The bcopy() threshhold */ 2753 size_t tx_size; /* Transmit buffer size */ 2754 size_t cr_size; /* Completion ring size */ 2755 2756 /* 2757 * Assume that each DMA channel will be configured with the 2758 * default transmit buffer size for copying transmit data. 2759 * (If a packet is bigger than this, it will not be copied.) 2760 */ 2761 if (nxgep->niu_type == N2_NIU) { 2762 threshhold = TX_BCOPY_SIZE; 2763 } else { 2764 threshhold = nxge_bcopy_thresh; 2765 } 2766 tx_size = nxge_tx_ring_size * threshhold; 2767 2768 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2769 cr_size += sizeof (txdma_mailbox_t); 2770 2771 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2772 if (nxgep->niu_type == N2_NIU) { 2773 if (!ISP2(tx_size)) { 2774 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2775 "==> nxge_tdc_sizes: Tx size" 2776 " must be power of 2")); 2777 return (NXGE_ERROR); 2778 } 2779 2780 if (tx_size > (1 << 22)) { 2781 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2782 "==> nxge_tdc_sizes: Tx size" 2783 " limited to 4M")); 2784 return (NXGE_ERROR); 2785 } 2786 2787 if (cr_size < 0x2000) 2788 cr_size = 0x2000; 2789 } 2790 #endif 2791 2792 sizes->threshhold = threshhold; 2793 sizes->tx_size = tx_size; 2794 sizes->cr_size = cr_size; 2795 2796 return (NXGE_OK); 2797 } 2798 /* 2799 * nxge_alloc_txb 2800 * 2801 * Allocate buffers for an TDC. 2802 * 2803 * Arguments: 2804 * nxgep 2805 * channel The channel to map into our kernel space. 2806 * 2807 * Notes: 2808 * 2809 * NPI function calls: 2810 * 2811 * NXGE function calls: 2812 * 2813 * Registers accessed: 2814 * 2815 * Context: 2816 * 2817 * Taking apart: 2818 * 2819 * Open questions: 2820 * 2821 */ 2822 nxge_status_t 2823 nxge_alloc_txb( 2824 p_nxge_t nxgep, 2825 int channel) 2826 { 2827 nxge_dma_common_t **dma_buf_p; 2828 nxge_dma_common_t **dma_cntl_p; 2829 uint32_t *num_chunks; 2830 nxge_status_t status = NXGE_OK; 2831 2832 nxge_tdc_sizes_t sizes; 2833 2834 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2835 2836 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2837 return (NXGE_ERROR); 2838 2839 /* 2840 * Allocate memory for transmit buffers and descriptor rings. 2841 * Replace these allocation functions with the interface functions 2842 * provided by the partition manager Real Soon Now. 2843 */ 2844 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2845 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2846 2847 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2848 2849 /* 2850 * Allocate memory for transmit buffers and descriptor rings. 2851 * Replace allocation functions with interface functions provided 2852 * by the partition manager when it is available. 2853 * 2854 * Allocate memory for the transmit buffer pool. 2855 */ 2856 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2857 "sizes: tx: %ld, cr:%ld, th:%ld", 2858 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2859 2860 *num_chunks = 0; 2861 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2862 sizes.tx_size, sizes.threshhold, num_chunks); 2863 if (status != NXGE_OK) { 2864 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2865 return (status); 2866 } 2867 2868 /* 2869 * Allocate memory for descriptor rings and mailbox. 2870 */ 2871 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2872 sizes.cr_size); 2873 if (status != NXGE_OK) { 2874 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2875 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2876 return (status); 2877 } 2878 2879 return (NXGE_OK); 2880 } 2881 2882 void 2883 nxge_free_txb( 2884 p_nxge_t nxgep, 2885 int channel) 2886 { 2887 nxge_dma_common_t *data; 2888 nxge_dma_common_t *control; 2889 uint32_t num_chunks; 2890 2891 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2892 2893 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2894 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2895 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2896 2897 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2898 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2899 2900 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2901 nxge_free_tx_cntl_dma(nxgep, control); 2902 2903 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2904 2905 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2906 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2907 2908 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2909 } 2910 2911 /* 2912 * nxge_alloc_tx_mem_pool 2913 * 2914 * This function allocates all of the per-port TDC control data structures. 2915 * The per-channel (TDC) data structures are allocated when needed. 2916 * 2917 * Arguments: 2918 * nxgep 2919 * 2920 * Notes: 2921 * 2922 * Context: 2923 * Any domain 2924 */ 2925 nxge_status_t 2926 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2927 { 2928 nxge_hw_pt_cfg_t *p_cfgp; 2929 nxge_dma_pool_t *dma_poolp; 2930 nxge_dma_common_t **dma_buf_p; 2931 nxge_dma_pool_t *dma_cntl_poolp; 2932 nxge_dma_common_t **dma_cntl_p; 2933 uint32_t *num_chunks; /* per dma */ 2934 int tdc_max; 2935 2936 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2937 2938 p_cfgp = &nxgep->pt_config.hw_config; 2939 tdc_max = NXGE_MAX_TDCS; 2940 2941 /* 2942 * Allocate memory for each transmit DMA channel. 2943 */ 2944 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2945 KM_SLEEP); 2946 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2947 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2948 2949 dma_cntl_poolp = (p_nxge_dma_pool_t) 2950 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2951 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2952 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2953 2954 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2955 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2956 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2957 "set to default %d", 2958 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2959 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2960 } 2961 2962 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2963 /* 2964 * N2/NIU has limitation on the descriptor sizes (contiguous 2965 * memory allocation on data buffers to 4M (contig_mem_alloc) 2966 * and little endian for control buffers (must use the ddi/dki mem alloc 2967 * function). The transmit ring is limited to 8K (includes the 2968 * mailbox). 2969 */ 2970 if (nxgep->niu_type == N2_NIU) { 2971 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2972 (!ISP2(nxge_tx_ring_size))) { 2973 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2974 } 2975 } 2976 #endif 2977 2978 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2979 2980 num_chunks = (uint32_t *)KMEM_ZALLOC( 2981 sizeof (uint32_t) * tdc_max, KM_SLEEP); 2982 2983 dma_poolp->ndmas = p_cfgp->tdc.owned; 2984 dma_poolp->num_chunks = num_chunks; 2985 dma_poolp->dma_buf_pool_p = dma_buf_p; 2986 nxgep->tx_buf_pool_p = dma_poolp; 2987 2988 dma_poolp->buf_allocated = B_TRUE; 2989 2990 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 2991 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2992 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2993 2994 dma_cntl_poolp->buf_allocated = B_TRUE; 2995 2996 nxgep->tx_rings = 2997 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 2998 nxgep->tx_rings->rings = 2999 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 3000 nxgep->tx_mbox_areas_p = 3001 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 3002 nxgep->tx_mbox_areas_p->txmbox_areas_p = 3003 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 3004 3005 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 3006 3007 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3008 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 3009 tdc_max, dma_poolp->ndmas)); 3010 3011 return (NXGE_OK); 3012 } 3013 3014 nxge_status_t 3015 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 3016 p_nxge_dma_common_t *dmap, size_t alloc_size, 3017 size_t block_size, uint32_t *num_chunks) 3018 { 3019 p_nxge_dma_common_t tx_dmap; 3020 nxge_status_t status = NXGE_OK; 3021 size_t total_alloc_size; 3022 size_t allocated = 0; 3023 int i, size_index, array_size; 3024 3025 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 3026 3027 tx_dmap = (p_nxge_dma_common_t) 3028 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 3029 KM_SLEEP); 3030 3031 total_alloc_size = alloc_size; 3032 i = 0; 3033 size_index = 0; 3034 array_size = sizeof (alloc_sizes) / sizeof (size_t); 3035 while ((size_index < array_size) && 3036 (alloc_sizes[size_index] < alloc_size)) 3037 size_index++; 3038 if (size_index >= array_size) { 3039 size_index = array_size - 1; 3040 } 3041 3042 while ((allocated < total_alloc_size) && 3043 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 3044 3045 tx_dmap[i].dma_chunk_index = i; 3046 tx_dmap[i].block_size = block_size; 3047 tx_dmap[i].alength = alloc_sizes[size_index]; 3048 tx_dmap[i].orig_alength = tx_dmap[i].alength; 3049 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 3050 tx_dmap[i].dma_channel = dma_channel; 3051 tx_dmap[i].contig_alloc_type = B_FALSE; 3052 tx_dmap[i].kmem_alloc_type = B_FALSE; 3053 3054 /* 3055 * N2/NIU: data buffers must be contiguous as the driver 3056 * needs to call Hypervisor api to set up 3057 * logical pages. 3058 */ 3059 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 3060 tx_dmap[i].contig_alloc_type = B_TRUE; 3061 } 3062 3063 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3064 &nxge_tx_dma_attr, 3065 tx_dmap[i].alength, 3066 &nxge_dev_buf_dma_acc_attr, 3067 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3068 (p_nxge_dma_common_t)(&tx_dmap[i])); 3069 if (status != NXGE_OK) { 3070 size_index--; 3071 } else { 3072 i++; 3073 allocated += alloc_sizes[size_index]; 3074 } 3075 } 3076 3077 if (allocated < total_alloc_size) { 3078 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3079 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3080 "allocated 0x%x requested 0x%x", 3081 dma_channel, 3082 allocated, total_alloc_size)); 3083 status = NXGE_ERROR; 3084 goto nxge_alloc_tx_mem_fail1; 3085 } 3086 3087 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3088 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3089 "allocated 0x%x requested 0x%x", 3090 dma_channel, 3091 allocated, total_alloc_size)); 3092 3093 *num_chunks = i; 3094 *dmap = tx_dmap; 3095 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3096 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3097 *dmap, i)); 3098 goto nxge_alloc_tx_mem_exit; 3099 3100 nxge_alloc_tx_mem_fail1: 3101 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3102 3103 nxge_alloc_tx_mem_exit: 3104 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3105 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3106 3107 return (status); 3108 } 3109 3110 /*ARGSUSED*/ 3111 static void 3112 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3113 uint32_t num_chunks) 3114 { 3115 int i; 3116 3117 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3118 3119 if (dmap == 0) 3120 return; 3121 3122 for (i = 0; i < num_chunks; i++) { 3123 nxge_dma_mem_free(dmap++); 3124 } 3125 3126 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3127 } 3128 3129 /*ARGSUSED*/ 3130 nxge_status_t 3131 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3132 p_nxge_dma_common_t *dmap, size_t size) 3133 { 3134 p_nxge_dma_common_t tx_dmap; 3135 nxge_status_t status = NXGE_OK; 3136 3137 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3138 tx_dmap = (p_nxge_dma_common_t) 3139 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3140 3141 tx_dmap->contig_alloc_type = B_FALSE; 3142 tx_dmap->kmem_alloc_type = B_FALSE; 3143 3144 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3145 &nxge_desc_dma_attr, 3146 size, 3147 &nxge_dev_desc_dma_acc_attr, 3148 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3149 tx_dmap); 3150 if (status != NXGE_OK) { 3151 goto nxge_alloc_tx_cntl_dma_fail1; 3152 } 3153 3154 *dmap = tx_dmap; 3155 goto nxge_alloc_tx_cntl_dma_exit; 3156 3157 nxge_alloc_tx_cntl_dma_fail1: 3158 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3159 3160 nxge_alloc_tx_cntl_dma_exit: 3161 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3162 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3163 3164 return (status); 3165 } 3166 3167 /*ARGSUSED*/ 3168 static void 3169 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3170 { 3171 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3172 3173 if (dmap == 0) 3174 return; 3175 3176 nxge_dma_mem_free(dmap); 3177 3178 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3179 } 3180 3181 /* 3182 * nxge_free_tx_mem_pool 3183 * 3184 * This function frees all of the per-port TDC control data structures. 3185 * The per-channel (TDC) data structures are freed when the channel 3186 * is stopped. 3187 * 3188 * Arguments: 3189 * nxgep 3190 * 3191 * Notes: 3192 * 3193 * Context: 3194 * Any domain 3195 */ 3196 static void 3197 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3198 { 3199 int tdc_max = NXGE_MAX_TDCS; 3200 3201 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3202 3203 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3204 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3205 "<== nxge_free_tx_mem_pool " 3206 "(null tx buf pool or buf not allocated")); 3207 return; 3208 } 3209 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3210 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3211 "<== nxge_free_tx_mem_pool " 3212 "(null tx cntl buf pool or cntl buf not allocated")); 3213 return; 3214 } 3215 3216 /* 1. Free the mailboxes. */ 3217 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3218 sizeof (p_tx_mbox_t) * tdc_max); 3219 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3220 3221 nxgep->tx_mbox_areas_p = 0; 3222 3223 /* 2. Free the transmit ring arrays. */ 3224 KMEM_FREE(nxgep->tx_rings->rings, 3225 sizeof (p_tx_ring_t) * tdc_max); 3226 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3227 3228 nxgep->tx_rings = 0; 3229 3230 /* 3. Free the completion ring data structures. */ 3231 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3232 sizeof (p_nxge_dma_common_t) * tdc_max); 3233 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3234 3235 nxgep->tx_cntl_pool_p = 0; 3236 3237 /* 4. Free the data ring data structures. */ 3238 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3239 sizeof (uint32_t) * tdc_max); 3240 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3241 sizeof (p_nxge_dma_common_t) * tdc_max); 3242 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3243 3244 nxgep->tx_buf_pool_p = 0; 3245 3246 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3247 } 3248 3249 /*ARGSUSED*/ 3250 static nxge_status_t 3251 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3252 struct ddi_dma_attr *dma_attrp, 3253 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3254 p_nxge_dma_common_t dma_p) 3255 { 3256 caddr_t kaddrp; 3257 int ddi_status = DDI_SUCCESS; 3258 boolean_t contig_alloc_type; 3259 boolean_t kmem_alloc_type; 3260 3261 contig_alloc_type = dma_p->contig_alloc_type; 3262 3263 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3264 /* 3265 * contig_alloc_type for contiguous memory only allowed 3266 * for N2/NIU. 3267 */ 3268 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3269 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3270 dma_p->contig_alloc_type)); 3271 return (NXGE_ERROR | NXGE_DDI_FAILED); 3272 } 3273 3274 dma_p->dma_handle = NULL; 3275 dma_p->acc_handle = NULL; 3276 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3277 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3278 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3279 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3280 if (ddi_status != DDI_SUCCESS) { 3281 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3282 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3283 return (NXGE_ERROR | NXGE_DDI_FAILED); 3284 } 3285 3286 kmem_alloc_type = dma_p->kmem_alloc_type; 3287 3288 switch (contig_alloc_type) { 3289 case B_FALSE: 3290 switch (kmem_alloc_type) { 3291 case B_FALSE: 3292 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3293 length, 3294 acc_attr_p, 3295 xfer_flags, 3296 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3297 &dma_p->acc_handle); 3298 if (ddi_status != DDI_SUCCESS) { 3299 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3300 "nxge_dma_mem_alloc: " 3301 "ddi_dma_mem_alloc failed")); 3302 ddi_dma_free_handle(&dma_p->dma_handle); 3303 dma_p->dma_handle = NULL; 3304 return (NXGE_ERROR | NXGE_DDI_FAILED); 3305 } 3306 if (dma_p->alength < length) { 3307 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3308 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3309 "< length.")); 3310 ddi_dma_mem_free(&dma_p->acc_handle); 3311 ddi_dma_free_handle(&dma_p->dma_handle); 3312 dma_p->acc_handle = NULL; 3313 dma_p->dma_handle = NULL; 3314 return (NXGE_ERROR); 3315 } 3316 3317 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3318 NULL, 3319 kaddrp, dma_p->alength, xfer_flags, 3320 DDI_DMA_DONTWAIT, 3321 0, &dma_p->dma_cookie, &dma_p->ncookies); 3322 if (ddi_status != DDI_DMA_MAPPED) { 3323 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3324 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3325 "failed " 3326 "(staus 0x%x ncookies %d.)", ddi_status, 3327 dma_p->ncookies)); 3328 if (dma_p->acc_handle) { 3329 ddi_dma_mem_free(&dma_p->acc_handle); 3330 dma_p->acc_handle = NULL; 3331 } 3332 ddi_dma_free_handle(&dma_p->dma_handle); 3333 dma_p->dma_handle = NULL; 3334 return (NXGE_ERROR | NXGE_DDI_FAILED); 3335 } 3336 3337 if (dma_p->ncookies != 1) { 3338 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3339 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3340 "> 1 cookie" 3341 "(staus 0x%x ncookies %d.)", ddi_status, 3342 dma_p->ncookies)); 3343 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3344 if (dma_p->acc_handle) { 3345 ddi_dma_mem_free(&dma_p->acc_handle); 3346 dma_p->acc_handle = NULL; 3347 } 3348 ddi_dma_free_handle(&dma_p->dma_handle); 3349 dma_p->dma_handle = NULL; 3350 dma_p->acc_handle = NULL; 3351 return (NXGE_ERROR); 3352 } 3353 break; 3354 3355 case B_TRUE: 3356 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3357 if (kaddrp == NULL) { 3358 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3359 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3360 "kmem alloc failed")); 3361 return (NXGE_ERROR); 3362 } 3363 3364 dma_p->alength = length; 3365 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3366 NULL, kaddrp, dma_p->alength, xfer_flags, 3367 DDI_DMA_DONTWAIT, 0, 3368 &dma_p->dma_cookie, &dma_p->ncookies); 3369 if (ddi_status != DDI_DMA_MAPPED) { 3370 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3371 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3372 "(kmem_alloc) failed kaddrp $%p length %d " 3373 "(staus 0x%x (%d) ncookies %d.)", 3374 kaddrp, length, 3375 ddi_status, ddi_status, dma_p->ncookies)); 3376 KMEM_FREE(kaddrp, length); 3377 dma_p->acc_handle = NULL; 3378 ddi_dma_free_handle(&dma_p->dma_handle); 3379 dma_p->dma_handle = NULL; 3380 dma_p->kaddrp = NULL; 3381 return (NXGE_ERROR | NXGE_DDI_FAILED); 3382 } 3383 3384 if (dma_p->ncookies != 1) { 3385 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3386 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3387 "(kmem_alloc) > 1 cookie" 3388 "(staus 0x%x ncookies %d.)", ddi_status, 3389 dma_p->ncookies)); 3390 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3391 KMEM_FREE(kaddrp, length); 3392 ddi_dma_free_handle(&dma_p->dma_handle); 3393 dma_p->dma_handle = NULL; 3394 dma_p->acc_handle = NULL; 3395 dma_p->kaddrp = NULL; 3396 return (NXGE_ERROR); 3397 } 3398 3399 dma_p->kaddrp = kaddrp; 3400 3401 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3402 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3403 "kaddr $%p alength %d", 3404 dma_p, 3405 kaddrp, 3406 dma_p->alength)); 3407 break; 3408 } 3409 break; 3410 3411 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3412 case B_TRUE: 3413 kaddrp = (caddr_t)contig_mem_alloc(length); 3414 if (kaddrp == NULL) { 3415 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3416 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3417 ddi_dma_free_handle(&dma_p->dma_handle); 3418 return (NXGE_ERROR | NXGE_DDI_FAILED); 3419 } 3420 3421 dma_p->alength = length; 3422 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3423 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3424 &dma_p->dma_cookie, &dma_p->ncookies); 3425 if (ddi_status != DDI_DMA_MAPPED) { 3426 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3427 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3428 "(status 0x%x ncookies %d.)", ddi_status, 3429 dma_p->ncookies)); 3430 3431 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3432 "==> nxge_dma_mem_alloc: (not mapped)" 3433 "length %lu (0x%x) " 3434 "free contig kaddrp $%p " 3435 "va_to_pa $%p", 3436 length, length, 3437 kaddrp, 3438 va_to_pa(kaddrp))); 3439 3440 3441 contig_mem_free((void *)kaddrp, length); 3442 ddi_dma_free_handle(&dma_p->dma_handle); 3443 3444 dma_p->dma_handle = NULL; 3445 dma_p->acc_handle = NULL; 3446 dma_p->alength = NULL; 3447 dma_p->kaddrp = NULL; 3448 3449 return (NXGE_ERROR | NXGE_DDI_FAILED); 3450 } 3451 3452 if (dma_p->ncookies != 1 || 3453 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3454 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3455 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3456 "cookie or " 3457 "dmac_laddress is NULL $%p size %d " 3458 " (status 0x%x ncookies %d.)", 3459 ddi_status, 3460 dma_p->dma_cookie.dmac_laddress, 3461 dma_p->dma_cookie.dmac_size, 3462 dma_p->ncookies)); 3463 3464 contig_mem_free((void *)kaddrp, length); 3465 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3466 ddi_dma_free_handle(&dma_p->dma_handle); 3467 3468 dma_p->alength = 0; 3469 dma_p->dma_handle = NULL; 3470 dma_p->acc_handle = NULL; 3471 dma_p->kaddrp = NULL; 3472 3473 return (NXGE_ERROR | NXGE_DDI_FAILED); 3474 } 3475 break; 3476 3477 #else 3478 case B_TRUE: 3479 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3480 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3481 return (NXGE_ERROR | NXGE_DDI_FAILED); 3482 #endif 3483 } 3484 3485 dma_p->kaddrp = kaddrp; 3486 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3487 dma_p->alength - RXBUF_64B_ALIGNED; 3488 #if defined(__i386) 3489 dma_p->ioaddr_pp = 3490 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3491 #else 3492 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3493 #endif 3494 dma_p->last_ioaddr_pp = 3495 #if defined(__i386) 3496 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3497 #else 3498 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3499 #endif 3500 dma_p->alength - RXBUF_64B_ALIGNED; 3501 3502 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3503 3504 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3505 dma_p->orig_ioaddr_pp = 3506 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3507 dma_p->orig_alength = length; 3508 dma_p->orig_kaddrp = kaddrp; 3509 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3510 #endif 3511 3512 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3513 "dma buffer allocated: dma_p $%p " 3514 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3515 "dma_p->ioaddr_p $%p " 3516 "dma_p->orig_ioaddr_p $%p " 3517 "orig_vatopa $%p " 3518 "alength %d (0x%x) " 3519 "kaddrp $%p " 3520 "length %d (0x%x)", 3521 dma_p, 3522 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3523 dma_p->ioaddr_pp, 3524 dma_p->orig_ioaddr_pp, 3525 dma_p->orig_vatopa, 3526 dma_p->alength, dma_p->alength, 3527 kaddrp, 3528 length, length)); 3529 3530 return (NXGE_OK); 3531 } 3532 3533 static void 3534 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3535 { 3536 if (dma_p->dma_handle != NULL) { 3537 if (dma_p->ncookies) { 3538 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3539 dma_p->ncookies = 0; 3540 } 3541 ddi_dma_free_handle(&dma_p->dma_handle); 3542 dma_p->dma_handle = NULL; 3543 } 3544 3545 if (dma_p->acc_handle != NULL) { 3546 ddi_dma_mem_free(&dma_p->acc_handle); 3547 dma_p->acc_handle = NULL; 3548 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3549 } 3550 3551 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3552 if (dma_p->contig_alloc_type && 3553 dma_p->orig_kaddrp && dma_p->orig_alength) { 3554 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3555 "kaddrp $%p (orig_kaddrp $%p)" 3556 "mem type %d ", 3557 "orig_alength %d " 3558 "alength 0x%x (%d)", 3559 dma_p->kaddrp, 3560 dma_p->orig_kaddrp, 3561 dma_p->contig_alloc_type, 3562 dma_p->orig_alength, 3563 dma_p->alength, dma_p->alength)); 3564 3565 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3566 dma_p->orig_alength = NULL; 3567 dma_p->orig_kaddrp = NULL; 3568 dma_p->contig_alloc_type = B_FALSE; 3569 } 3570 #endif 3571 dma_p->kaddrp = NULL; 3572 dma_p->alength = NULL; 3573 } 3574 3575 static void 3576 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3577 { 3578 uint64_t kaddr; 3579 uint32_t buf_size; 3580 3581 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3582 3583 if (dma_p->dma_handle != NULL) { 3584 if (dma_p->ncookies) { 3585 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3586 dma_p->ncookies = 0; 3587 } 3588 ddi_dma_free_handle(&dma_p->dma_handle); 3589 dma_p->dma_handle = NULL; 3590 } 3591 3592 if (dma_p->acc_handle != NULL) { 3593 ddi_dma_mem_free(&dma_p->acc_handle); 3594 dma_p->acc_handle = NULL; 3595 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3596 } 3597 3598 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3599 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3600 dma_p, 3601 dma_p->buf_alloc_state)); 3602 3603 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3604 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3605 "<== nxge_dma_free_rx_data_buf: " 3606 "outstanding data buffers")); 3607 return; 3608 } 3609 3610 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3611 if (dma_p->contig_alloc_type && 3612 dma_p->orig_kaddrp && dma_p->orig_alength) { 3613 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3614 "kaddrp $%p (orig_kaddrp $%p)" 3615 "mem type %d ", 3616 "orig_alength %d " 3617 "alength 0x%x (%d)", 3618 dma_p->kaddrp, 3619 dma_p->orig_kaddrp, 3620 dma_p->contig_alloc_type, 3621 dma_p->orig_alength, 3622 dma_p->alength, dma_p->alength)); 3623 3624 kaddr = (uint64_t)dma_p->orig_kaddrp; 3625 buf_size = dma_p->orig_alength; 3626 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3627 dma_p->orig_alength = NULL; 3628 dma_p->orig_kaddrp = NULL; 3629 dma_p->contig_alloc_type = B_FALSE; 3630 dma_p->kaddrp = NULL; 3631 dma_p->alength = NULL; 3632 return; 3633 } 3634 #endif 3635 3636 if (dma_p->kmem_alloc_type) { 3637 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3638 "nxge_dma_free_rx_data_buf: free kmem " 3639 "kaddrp $%p (orig_kaddrp $%p)" 3640 "alloc type %d " 3641 "orig_alength %d " 3642 "alength 0x%x (%d)", 3643 dma_p->kaddrp, 3644 dma_p->orig_kaddrp, 3645 dma_p->kmem_alloc_type, 3646 dma_p->orig_alength, 3647 dma_p->alength, dma_p->alength)); 3648 #if defined(__i386) 3649 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3650 #else 3651 kaddr = (uint64_t)dma_p->kaddrp; 3652 #endif 3653 buf_size = dma_p->orig_alength; 3654 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3655 "nxge_dma_free_rx_data_buf: free dmap $%p " 3656 "kaddr $%p buf_size %d", 3657 dma_p, 3658 kaddr, buf_size)); 3659 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3660 dma_p->alength = 0; 3661 dma_p->orig_alength = 0; 3662 dma_p->kaddrp = NULL; 3663 dma_p->kmem_alloc_type = B_FALSE; 3664 } 3665 3666 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3667 } 3668 3669 /* 3670 * nxge_m_start() -- start transmitting and receiving. 3671 * 3672 * This function is called by the MAC layer when the first 3673 * stream is open to prepare the hardware ready for sending 3674 * and transmitting packets. 3675 */ 3676 static int 3677 nxge_m_start(void *arg) 3678 { 3679 p_nxge_t nxgep = (p_nxge_t)arg; 3680 3681 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3682 3683 /* 3684 * Are we already started? 3685 */ 3686 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 3687 return (0); 3688 } 3689 3690 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) { 3691 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 3692 } 3693 3694 /* 3695 * Make sure RX MAC is disabled while we initialize. 3696 */ 3697 if (!isLDOMguest(nxgep)) { 3698 (void) nxge_rx_mac_disable(nxgep); 3699 } 3700 3701 /* 3702 * Grab the global lock. 3703 */ 3704 MUTEX_ENTER(nxgep->genlock); 3705 3706 /* 3707 * Initialize the driver and hardware. 3708 */ 3709 if (nxge_init(nxgep) != NXGE_OK) { 3710 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3711 "<== nxge_m_start: initialization failed")); 3712 MUTEX_EXIT(nxgep->genlock); 3713 return (EIO); 3714 } 3715 3716 /* 3717 * Start timer to check the system error and tx hangs 3718 */ 3719 if (!isLDOMguest(nxgep)) 3720 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3721 nxge_check_hw_state, NXGE_CHECK_TIMER); 3722 #if defined(sun4v) 3723 else 3724 nxge_hio_start_timer(nxgep); 3725 #endif 3726 3727 nxgep->link_notify = B_TRUE; 3728 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3729 3730 /* 3731 * Let the global lock go, since we are intialized. 3732 */ 3733 MUTEX_EXIT(nxgep->genlock); 3734 3735 /* 3736 * Let the MAC start receiving packets, now that 3737 * we are initialized. 3738 */ 3739 if (!isLDOMguest(nxgep)) { 3740 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 3741 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3742 "<== nxge_m_start: enable of RX mac failed")); 3743 return (EIO); 3744 } 3745 3746 /* 3747 * Enable hardware interrupts. 3748 */ 3749 nxge_intr_hw_enable(nxgep); 3750 } 3751 #if defined(sun4v) 3752 else { 3753 /* 3754 * In guest domain we enable RDCs and their interrupts as 3755 * the last step. 3756 */ 3757 if (nxge_hio_rdc_enable(nxgep) != NXGE_OK) { 3758 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3759 "<== nxge_m_start: enable of RDCs failed")); 3760 return (EIO); 3761 } 3762 3763 if (nxge_hio_rdc_intr_arm(nxgep, B_TRUE) != NXGE_OK) { 3764 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3765 "<== nxge_m_start: intrs enable for RDCs failed")); 3766 return (EIO); 3767 } 3768 } 3769 #endif 3770 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3771 return (0); 3772 } 3773 3774 static boolean_t 3775 nxge_check_groups_stopped(p_nxge_t nxgep) 3776 { 3777 int i; 3778 3779 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 3780 if (nxgep->rx_hio_groups[i].started) 3781 return (B_FALSE); 3782 } 3783 3784 return (B_TRUE); 3785 } 3786 3787 /* 3788 * nxge_m_stop(): stop transmitting and receiving. 3789 */ 3790 static void 3791 nxge_m_stop(void *arg) 3792 { 3793 p_nxge_t nxgep = (p_nxge_t)arg; 3794 boolean_t groups_stopped; 3795 3796 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3797 3798 /* 3799 * Are the groups stopped? 3800 */ 3801 groups_stopped = nxge_check_groups_stopped(nxgep); 3802 ASSERT(groups_stopped == B_TRUE); 3803 if (!groups_stopped) { 3804 cmn_err(CE_WARN, "nxge(%d): groups are not stopped!\n", 3805 nxgep->instance); 3806 return; 3807 } 3808 3809 if (!isLDOMguest(nxgep)) { 3810 /* 3811 * Disable the RX mac. 3812 */ 3813 (void) nxge_rx_mac_disable(nxgep); 3814 3815 /* 3816 * Wait for the IPP to drain. 3817 */ 3818 (void) nxge_ipp_drain(nxgep); 3819 3820 /* 3821 * Disable hardware interrupts. 3822 */ 3823 nxge_intr_hw_disable(nxgep); 3824 } 3825 #if defined(sun4v) 3826 else { 3827 (void) nxge_hio_rdc_intr_arm(nxgep, B_FALSE); 3828 } 3829 #endif 3830 3831 /* 3832 * Grab the global lock. 3833 */ 3834 MUTEX_ENTER(nxgep->genlock); 3835 3836 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3837 if (nxgep->nxge_timerid) { 3838 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3839 nxgep->nxge_timerid = 0; 3840 } 3841 3842 /* 3843 * Clean up. 3844 */ 3845 nxge_uninit(nxgep); 3846 3847 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3848 3849 /* 3850 * Let go of the global lock. 3851 */ 3852 MUTEX_EXIT(nxgep->genlock); 3853 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3854 } 3855 3856 static int 3857 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3858 { 3859 p_nxge_t nxgep = (p_nxge_t)arg; 3860 struct ether_addr addrp; 3861 3862 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3863 "==> nxge_m_multicst: add %d", add)); 3864 3865 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3866 if (add) { 3867 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3868 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3869 "<== nxge_m_multicst: add multicast failed")); 3870 return (EINVAL); 3871 } 3872 } else { 3873 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3874 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3875 "<== nxge_m_multicst: del multicast failed")); 3876 return (EINVAL); 3877 } 3878 } 3879 3880 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3881 3882 return (0); 3883 } 3884 3885 static int 3886 nxge_m_promisc(void *arg, boolean_t on) 3887 { 3888 p_nxge_t nxgep = (p_nxge_t)arg; 3889 3890 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3891 "==> nxge_m_promisc: on %d", on)); 3892 3893 if (nxge_set_promisc(nxgep, on)) { 3894 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3895 "<== nxge_m_promisc: set promisc failed")); 3896 return (EINVAL); 3897 } 3898 3899 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3900 "<== nxge_m_promisc: on %d", on)); 3901 3902 return (0); 3903 } 3904 3905 static void 3906 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3907 { 3908 p_nxge_t nxgep = (p_nxge_t)arg; 3909 struct iocblk *iocp; 3910 boolean_t need_privilege; 3911 int err; 3912 int cmd; 3913 3914 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3915 3916 iocp = (struct iocblk *)mp->b_rptr; 3917 iocp->ioc_error = 0; 3918 need_privilege = B_TRUE; 3919 cmd = iocp->ioc_cmd; 3920 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3921 switch (cmd) { 3922 default: 3923 miocnak(wq, mp, 0, EINVAL); 3924 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3925 return; 3926 3927 case LB_GET_INFO_SIZE: 3928 case LB_GET_INFO: 3929 case LB_GET_MODE: 3930 need_privilege = B_FALSE; 3931 break; 3932 case LB_SET_MODE: 3933 break; 3934 3935 3936 case NXGE_GET_MII: 3937 case NXGE_PUT_MII: 3938 case NXGE_GET64: 3939 case NXGE_PUT64: 3940 case NXGE_GET_TX_RING_SZ: 3941 case NXGE_GET_TX_DESC: 3942 case NXGE_TX_SIDE_RESET: 3943 case NXGE_RX_SIDE_RESET: 3944 case NXGE_GLOBAL_RESET: 3945 case NXGE_RESET_MAC: 3946 case NXGE_TX_REGS_DUMP: 3947 case NXGE_RX_REGS_DUMP: 3948 case NXGE_INT_REGS_DUMP: 3949 case NXGE_VIR_INT_REGS_DUMP: 3950 case NXGE_PUT_TCAM: 3951 case NXGE_GET_TCAM: 3952 case NXGE_RTRACE: 3953 case NXGE_RDUMP: 3954 3955 need_privilege = B_FALSE; 3956 break; 3957 case NXGE_INJECT_ERR: 3958 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3959 nxge_err_inject(nxgep, wq, mp); 3960 break; 3961 } 3962 3963 if (need_privilege) { 3964 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3965 if (err != 0) { 3966 miocnak(wq, mp, 0, err); 3967 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3968 "<== nxge_m_ioctl: no priv")); 3969 return; 3970 } 3971 } 3972 3973 switch (cmd) { 3974 3975 case LB_GET_MODE: 3976 case LB_SET_MODE: 3977 case LB_GET_INFO_SIZE: 3978 case LB_GET_INFO: 3979 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3980 break; 3981 3982 case NXGE_GET_MII: 3983 case NXGE_PUT_MII: 3984 case NXGE_PUT_TCAM: 3985 case NXGE_GET_TCAM: 3986 case NXGE_GET64: 3987 case NXGE_PUT64: 3988 case NXGE_GET_TX_RING_SZ: 3989 case NXGE_GET_TX_DESC: 3990 case NXGE_TX_SIDE_RESET: 3991 case NXGE_RX_SIDE_RESET: 3992 case NXGE_GLOBAL_RESET: 3993 case NXGE_RESET_MAC: 3994 case NXGE_TX_REGS_DUMP: 3995 case NXGE_RX_REGS_DUMP: 3996 case NXGE_INT_REGS_DUMP: 3997 case NXGE_VIR_INT_REGS_DUMP: 3998 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3999 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 4000 nxge_hw_ioctl(nxgep, wq, mp, iocp); 4001 break; 4002 } 4003 4004 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 4005 } 4006 4007 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 4008 4009 void 4010 nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory) 4011 { 4012 p_nxge_mmac_stats_t mmac_stats; 4013 int i; 4014 nxge_mmac_t *mmac_info; 4015 4016 mmac_info = &nxgep->nxge_mmac_info; 4017 4018 mmac_stats = &nxgep->statsp->mmac_stats; 4019 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 4020 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 4021 4022 for (i = 0; i < ETHERADDRL; i++) { 4023 if (factory) { 4024 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4025 = mmac_info->factory_mac_pool[slot][ 4026 (ETHERADDRL-1) - i]; 4027 } else { 4028 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4029 = mmac_info->mac_pool[slot].addr[ 4030 (ETHERADDRL - 1) - i]; 4031 } 4032 } 4033 } 4034 4035 /* 4036 * nxge_altmac_set() -- Set an alternate MAC address 4037 */ 4038 static int 4039 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, int slot, 4040 int rdctbl, boolean_t usetbl) 4041 { 4042 uint8_t addrn; 4043 uint8_t portn; 4044 npi_mac_addr_t altmac; 4045 hostinfo_t mac_rdc; 4046 p_nxge_class_pt_cfg_t clscfgp; 4047 4048 4049 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 4050 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 4051 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 4052 4053 portn = nxgep->mac.portnum; 4054 addrn = (uint8_t)slot - 1; 4055 4056 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, 4057 nxgep->function_num, addrn, &altmac) != NPI_SUCCESS) 4058 return (EIO); 4059 4060 /* 4061 * Set the rdc table number for the host info entry 4062 * for this mac address slot. 4063 */ 4064 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 4065 mac_rdc.value = 0; 4066 if (usetbl) 4067 mac_rdc.bits.w0.rdc_tbl_num = rdctbl; 4068 else 4069 mac_rdc.bits.w0.rdc_tbl_num = 4070 clscfgp->mac_host_info[addrn].rdctbl; 4071 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 4072 4073 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 4074 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 4075 return (EIO); 4076 } 4077 4078 /* 4079 * Enable comparison with the alternate MAC address. 4080 * While the first alternate addr is enabled by bit 1 of register 4081 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 4082 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 4083 * accordingly before calling npi_mac_altaddr_entry. 4084 */ 4085 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4086 addrn = (uint8_t)slot - 1; 4087 else 4088 addrn = (uint8_t)slot; 4089 4090 if (npi_mac_altaddr_enable(nxgep->npi_handle, 4091 nxgep->function_num, addrn) != NPI_SUCCESS) { 4092 return (EIO); 4093 } 4094 4095 return (0); 4096 } 4097 4098 /* 4099 * nxeg_m_mmac_add_g() - find an unused address slot, set the address 4100 * value to the one specified, enable the port to start filtering on 4101 * the new MAC address. Returns 0 on success. 4102 */ 4103 int 4104 nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl, 4105 boolean_t usetbl) 4106 { 4107 p_nxge_t nxgep = arg; 4108 int slot; 4109 nxge_mmac_t *mmac_info; 4110 int err; 4111 nxge_status_t status; 4112 4113 mutex_enter(nxgep->genlock); 4114 4115 /* 4116 * Make sure that nxge is initialized, if _start() has 4117 * not been called. 4118 */ 4119 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4120 status = nxge_init(nxgep); 4121 if (status != NXGE_OK) { 4122 mutex_exit(nxgep->genlock); 4123 return (ENXIO); 4124 } 4125 } 4126 4127 mmac_info = &nxgep->nxge_mmac_info; 4128 if (mmac_info->naddrfree == 0) { 4129 mutex_exit(nxgep->genlock); 4130 return (ENOSPC); 4131 } 4132 4133 /* 4134 * Search for the first available slot. Because naddrfree 4135 * is not zero, we are guaranteed to find one. 4136 * Each of the first two ports of Neptune has 16 alternate 4137 * MAC slots but only the first 7 (of 15) slots have assigned factory 4138 * MAC addresses. We first search among the slots without bundled 4139 * factory MACs. If we fail to find one in that range, then we 4140 * search the slots with bundled factory MACs. A factory MAC 4141 * will be wasted while the slot is used with a user MAC address. 4142 * But the slot could be used by factory MAC again after calling 4143 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4144 */ 4145 for (slot = 0; slot <= mmac_info->num_mmac; slot++) { 4146 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4147 break; 4148 } 4149 4150 ASSERT(slot <= mmac_info->num_mmac); 4151 4152 if ((err = nxge_altmac_set(nxgep, (uint8_t *)maddr, slot, rdctbl, 4153 usetbl)) != 0) { 4154 mutex_exit(nxgep->genlock); 4155 return (err); 4156 } 4157 4158 bcopy(maddr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4159 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4160 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4161 mmac_info->naddrfree--; 4162 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4163 4164 mutex_exit(nxgep->genlock); 4165 return (0); 4166 } 4167 4168 /* 4169 * Remove the specified mac address and update the HW not to filter 4170 * the mac address anymore. 4171 */ 4172 int 4173 nxge_m_mmac_remove(void *arg, int slot) 4174 { 4175 p_nxge_t nxgep = arg; 4176 nxge_mmac_t *mmac_info; 4177 uint8_t addrn; 4178 uint8_t portn; 4179 int err = 0; 4180 nxge_status_t status; 4181 4182 mutex_enter(nxgep->genlock); 4183 4184 /* 4185 * Make sure that nxge is initialized, if _start() has 4186 * not been called. 4187 */ 4188 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4189 status = nxge_init(nxgep); 4190 if (status != NXGE_OK) { 4191 mutex_exit(nxgep->genlock); 4192 return (ENXIO); 4193 } 4194 } 4195 4196 mmac_info = &nxgep->nxge_mmac_info; 4197 if (slot < 1 || slot > mmac_info->num_mmac) { 4198 mutex_exit(nxgep->genlock); 4199 return (EINVAL); 4200 } 4201 4202 portn = nxgep->mac.portnum; 4203 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4204 addrn = (uint8_t)slot - 1; 4205 else 4206 addrn = (uint8_t)slot; 4207 4208 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4209 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4210 == NPI_SUCCESS) { 4211 mmac_info->naddrfree++; 4212 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4213 /* 4214 * Regardless if the MAC we just stopped filtering 4215 * is a user addr or a facory addr, we must set 4216 * the MMAC_VENDOR_ADDR flag if this slot has an 4217 * associated factory MAC to indicate that a factory 4218 * MAC is available. 4219 */ 4220 if (slot <= mmac_info->num_factory_mmac) { 4221 mmac_info->mac_pool[slot].flags 4222 |= MMAC_VENDOR_ADDR; 4223 } 4224 /* 4225 * Clear mac_pool[slot].addr so that kstat shows 0 4226 * alternate MAC address if the slot is not used. 4227 * (But nxge_m_mmac_get returns the factory MAC even 4228 * when the slot is not used!) 4229 */ 4230 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4231 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4232 } else { 4233 err = EIO; 4234 } 4235 } else { 4236 err = EINVAL; 4237 } 4238 4239 mutex_exit(nxgep->genlock); 4240 return (err); 4241 } 4242 4243 /* 4244 * The callback to query all the factory addresses. naddr must be the same as 4245 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and 4246 * mcm_addr is the space allocated for keep all the addresses, whose size is 4247 * naddr * MAXMACADDRLEN. 4248 */ 4249 static void 4250 nxge_m_getfactaddr(void *arg, uint_t naddr, uint8_t *addr) 4251 { 4252 nxge_t *nxgep = arg; 4253 nxge_mmac_t *mmac_info; 4254 int i; 4255 4256 mutex_enter(nxgep->genlock); 4257 4258 mmac_info = &nxgep->nxge_mmac_info; 4259 ASSERT(naddr == mmac_info->num_factory_mmac); 4260 4261 for (i = 0; i < naddr; i++) { 4262 bcopy(mmac_info->factory_mac_pool[i + 1], 4263 addr + i * MAXMACADDRLEN, ETHERADDRL); 4264 } 4265 4266 mutex_exit(nxgep->genlock); 4267 } 4268 4269 4270 static boolean_t 4271 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4272 { 4273 nxge_t *nxgep = arg; 4274 uint32_t *txflags = cap_data; 4275 4276 switch (cap) { 4277 case MAC_CAPAB_HCKSUM: 4278 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4279 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4280 if (nxge_cksum_offload <= 1) { 4281 *txflags = HCKSUM_INET_PARTIAL; 4282 } 4283 break; 4284 4285 case MAC_CAPAB_MULTIFACTADDR: { 4286 mac_capab_multifactaddr_t *mfacp = cap_data; 4287 4288 if (!isLDOMguest(nxgep)) { 4289 mutex_enter(nxgep->genlock); 4290 mfacp->mcm_naddr = 4291 nxgep->nxge_mmac_info.num_factory_mmac; 4292 mfacp->mcm_getaddr = nxge_m_getfactaddr; 4293 mutex_exit(nxgep->genlock); 4294 } 4295 break; 4296 } 4297 4298 case MAC_CAPAB_LSO: { 4299 mac_capab_lso_t *cap_lso = cap_data; 4300 4301 if (nxgep->soft_lso_enable) { 4302 if (nxge_cksum_offload <= 1) { 4303 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4304 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4305 nxge_lso_max = NXGE_LSO_MAXLEN; 4306 } 4307 cap_lso->lso_basic_tcp_ipv4.lso_max = 4308 nxge_lso_max; 4309 } 4310 break; 4311 } else { 4312 return (B_FALSE); 4313 } 4314 } 4315 4316 case MAC_CAPAB_RINGS: { 4317 mac_capab_rings_t *cap_rings = cap_data; 4318 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 4319 4320 mutex_enter(nxgep->genlock); 4321 if (cap_rings->mr_type == MAC_RING_TYPE_RX) { 4322 if (isLDOMguest(nxgep)) { 4323 cap_rings->mr_group_type = 4324 MAC_GROUP_TYPE_STATIC; 4325 cap_rings->mr_rnum = 4326 NXGE_HIO_SHARE_MAX_CHANNELS; 4327 cap_rings->mr_rget = nxge_fill_ring; 4328 cap_rings->mr_gnum = 1; 4329 cap_rings->mr_gget = nxge_hio_group_get; 4330 cap_rings->mr_gaddring = NULL; 4331 cap_rings->mr_gremring = NULL; 4332 } else { 4333 /* 4334 * Service Domain. 4335 */ 4336 cap_rings->mr_group_type = 4337 MAC_GROUP_TYPE_DYNAMIC; 4338 cap_rings->mr_rnum = p_cfgp->max_rdcs; 4339 cap_rings->mr_rget = nxge_fill_ring; 4340 cap_rings->mr_gnum = p_cfgp->max_rdc_grpids; 4341 cap_rings->mr_gget = nxge_hio_group_get; 4342 cap_rings->mr_gaddring = nxge_group_add_ring; 4343 cap_rings->mr_gremring = nxge_group_rem_ring; 4344 } 4345 4346 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4347 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]", 4348 p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids)); 4349 } else { 4350 /* 4351 * TX Rings. 4352 */ 4353 if (isLDOMguest(nxgep)) { 4354 cap_rings->mr_group_type = 4355 MAC_GROUP_TYPE_STATIC; 4356 cap_rings->mr_rnum = 4357 NXGE_HIO_SHARE_MAX_CHANNELS; 4358 cap_rings->mr_rget = nxge_fill_ring; 4359 cap_rings->mr_gnum = 0; 4360 cap_rings->mr_gget = NULL; 4361 cap_rings->mr_gaddring = NULL; 4362 cap_rings->mr_gremring = NULL; 4363 } else { 4364 /* 4365 * Service Domain. 4366 */ 4367 cap_rings->mr_group_type = 4368 MAC_GROUP_TYPE_DYNAMIC; 4369 cap_rings->mr_rnum = p_cfgp->tdc.count; 4370 cap_rings->mr_rget = nxge_fill_ring; 4371 4372 /* 4373 * Share capable. 4374 * 4375 * Do not report the default group: hence -1 4376 */ 4377 cap_rings->mr_gnum = 4378 NXGE_MAX_TDC_GROUPS / nxgep->nports - 1; 4379 cap_rings->mr_gget = nxge_hio_group_get; 4380 cap_rings->mr_gaddring = nxge_group_add_ring; 4381 cap_rings->mr_gremring = nxge_group_rem_ring; 4382 } 4383 4384 NXGE_DEBUG_MSG((nxgep, TX_CTL, 4385 "==> nxge_m_getcapab: tx rings # of rings %d", 4386 p_cfgp->tdc.count)); 4387 } 4388 mutex_exit(nxgep->genlock); 4389 break; 4390 } 4391 4392 #if defined(sun4v) 4393 case MAC_CAPAB_SHARES: { 4394 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4395 4396 /* 4397 * Only the service domain driver responds to 4398 * this capability request. 4399 */ 4400 mutex_enter(nxgep->genlock); 4401 if (isLDOMservice(nxgep)) { 4402 mshares->ms_snum = 3; 4403 mshares->ms_handle = (void *)nxgep; 4404 mshares->ms_salloc = nxge_hio_share_alloc; 4405 mshares->ms_sfree = nxge_hio_share_free; 4406 mshares->ms_sadd = nxge_hio_share_add_group; 4407 mshares->ms_sremove = nxge_hio_share_rem_group; 4408 mshares->ms_squery = nxge_hio_share_query; 4409 mshares->ms_sbind = nxge_hio_share_bind; 4410 mshares->ms_sunbind = nxge_hio_share_unbind; 4411 mutex_exit(nxgep->genlock); 4412 } else { 4413 mutex_exit(nxgep->genlock); 4414 return (B_FALSE); 4415 } 4416 break; 4417 } 4418 #endif 4419 default: 4420 return (B_FALSE); 4421 } 4422 return (B_TRUE); 4423 } 4424 4425 static boolean_t 4426 nxge_param_locked(mac_prop_id_t pr_num) 4427 { 4428 /* 4429 * All adv_* parameters are locked (read-only) while 4430 * the device is in any sort of loopback mode ... 4431 */ 4432 switch (pr_num) { 4433 case MAC_PROP_ADV_1000FDX_CAP: 4434 case MAC_PROP_EN_1000FDX_CAP: 4435 case MAC_PROP_ADV_1000HDX_CAP: 4436 case MAC_PROP_EN_1000HDX_CAP: 4437 case MAC_PROP_ADV_100FDX_CAP: 4438 case MAC_PROP_EN_100FDX_CAP: 4439 case MAC_PROP_ADV_100HDX_CAP: 4440 case MAC_PROP_EN_100HDX_CAP: 4441 case MAC_PROP_ADV_10FDX_CAP: 4442 case MAC_PROP_EN_10FDX_CAP: 4443 case MAC_PROP_ADV_10HDX_CAP: 4444 case MAC_PROP_EN_10HDX_CAP: 4445 case MAC_PROP_AUTONEG: 4446 case MAC_PROP_FLOWCTRL: 4447 return (B_TRUE); 4448 } 4449 return (B_FALSE); 4450 } 4451 4452 /* 4453 * callback functions for set/get of properties 4454 */ 4455 static int 4456 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4457 uint_t pr_valsize, const void *pr_val) 4458 { 4459 nxge_t *nxgep = barg; 4460 p_nxge_param_t param_arr; 4461 p_nxge_stats_t statsp; 4462 int err = 0; 4463 uint8_t val; 4464 uint32_t cur_mtu, new_mtu, old_framesize; 4465 link_flowctrl_t fl; 4466 4467 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4468 param_arr = nxgep->param_arr; 4469 statsp = nxgep->statsp; 4470 mutex_enter(nxgep->genlock); 4471 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4472 nxge_param_locked(pr_num)) { 4473 /* 4474 * All adv_* parameters are locked (read-only) 4475 * while the device is in any sort of loopback mode. 4476 */ 4477 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4478 "==> nxge_m_setprop: loopback mode: read only")); 4479 mutex_exit(nxgep->genlock); 4480 return (EBUSY); 4481 } 4482 4483 val = *(uint8_t *)pr_val; 4484 switch (pr_num) { 4485 case MAC_PROP_EN_1000FDX_CAP: 4486 nxgep->param_en_1000fdx = val; 4487 param_arr[param_anar_1000fdx].value = val; 4488 4489 goto reprogram; 4490 4491 case MAC_PROP_EN_100FDX_CAP: 4492 nxgep->param_en_100fdx = val; 4493 param_arr[param_anar_100fdx].value = val; 4494 4495 goto reprogram; 4496 4497 case MAC_PROP_EN_10FDX_CAP: 4498 nxgep->param_en_10fdx = val; 4499 param_arr[param_anar_10fdx].value = val; 4500 4501 goto reprogram; 4502 4503 case MAC_PROP_EN_1000HDX_CAP: 4504 case MAC_PROP_EN_100HDX_CAP: 4505 case MAC_PROP_EN_10HDX_CAP: 4506 case MAC_PROP_ADV_1000FDX_CAP: 4507 case MAC_PROP_ADV_1000HDX_CAP: 4508 case MAC_PROP_ADV_100FDX_CAP: 4509 case MAC_PROP_ADV_100HDX_CAP: 4510 case MAC_PROP_ADV_10FDX_CAP: 4511 case MAC_PROP_ADV_10HDX_CAP: 4512 case MAC_PROP_STATUS: 4513 case MAC_PROP_SPEED: 4514 case MAC_PROP_DUPLEX: 4515 err = EINVAL; /* cannot set read-only properties */ 4516 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4517 "==> nxge_m_setprop: read only property %d", 4518 pr_num)); 4519 break; 4520 4521 case MAC_PROP_AUTONEG: 4522 param_arr[param_autoneg].value = val; 4523 4524 goto reprogram; 4525 4526 case MAC_PROP_MTU: 4527 cur_mtu = nxgep->mac.default_mtu; 4528 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4529 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4530 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4531 new_mtu, nxgep->mac.is_jumbo)); 4532 4533 if (new_mtu == cur_mtu) { 4534 err = 0; 4535 break; 4536 } 4537 4538 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4539 err = EBUSY; 4540 break; 4541 } 4542 4543 if ((new_mtu < NXGE_DEFAULT_MTU) || 4544 (new_mtu > NXGE_MAXIMUM_MTU)) { 4545 err = EINVAL; 4546 break; 4547 } 4548 4549 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4550 nxgep->mac.maxframesize = (uint16_t) 4551 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4552 if (nxge_mac_set_framesize(nxgep)) { 4553 nxgep->mac.maxframesize = 4554 (uint16_t)old_framesize; 4555 err = EINVAL; 4556 break; 4557 } 4558 4559 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4560 if (err) { 4561 nxgep->mac.maxframesize = 4562 (uint16_t)old_framesize; 4563 err = EINVAL; 4564 break; 4565 } 4566 4567 nxgep->mac.default_mtu = new_mtu; 4568 if (new_mtu > NXGE_DEFAULT_MTU) 4569 nxgep->mac.is_jumbo = B_TRUE; 4570 else 4571 nxgep->mac.is_jumbo = B_FALSE; 4572 4573 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4574 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4575 new_mtu, nxgep->mac.maxframesize)); 4576 break; 4577 4578 case MAC_PROP_FLOWCTRL: 4579 bcopy(pr_val, &fl, sizeof (fl)); 4580 switch (fl) { 4581 default: 4582 err = EINVAL; 4583 break; 4584 4585 case LINK_FLOWCTRL_NONE: 4586 param_arr[param_anar_pause].value = 0; 4587 break; 4588 4589 case LINK_FLOWCTRL_RX: 4590 param_arr[param_anar_pause].value = 1; 4591 break; 4592 4593 case LINK_FLOWCTRL_TX: 4594 case LINK_FLOWCTRL_BI: 4595 err = EINVAL; 4596 break; 4597 } 4598 4599 reprogram: 4600 if (err == 0) { 4601 if (!nxge_param_link_update(nxgep)) { 4602 err = EINVAL; 4603 } 4604 } 4605 break; 4606 case MAC_PROP_PRIVATE: 4607 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4608 "==> nxge_m_setprop: private property")); 4609 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4610 pr_val); 4611 break; 4612 4613 default: 4614 err = ENOTSUP; 4615 break; 4616 } 4617 4618 mutex_exit(nxgep->genlock); 4619 4620 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4621 "<== nxge_m_setprop (return %d)", err)); 4622 return (err); 4623 } 4624 4625 static int 4626 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4627 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 4628 { 4629 nxge_t *nxgep = barg; 4630 p_nxge_param_t param_arr = nxgep->param_arr; 4631 p_nxge_stats_t statsp = nxgep->statsp; 4632 int err = 0; 4633 link_flowctrl_t fl; 4634 uint64_t tmp = 0; 4635 link_state_t ls; 4636 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4637 4638 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4639 "==> nxge_m_getprop: pr_num %d", pr_num)); 4640 4641 if (pr_valsize == 0) 4642 return (EINVAL); 4643 4644 *perm = MAC_PROP_PERM_RW; 4645 4646 if ((is_default) && (pr_num != MAC_PROP_PRIVATE)) { 4647 err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4648 return (err); 4649 } 4650 4651 bzero(pr_val, pr_valsize); 4652 switch (pr_num) { 4653 case MAC_PROP_DUPLEX: 4654 *perm = MAC_PROP_PERM_READ; 4655 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4656 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4657 "==> nxge_m_getprop: duplex mode %d", 4658 *(uint8_t *)pr_val)); 4659 break; 4660 4661 case MAC_PROP_SPEED: 4662 if (pr_valsize < sizeof (uint64_t)) 4663 return (EINVAL); 4664 *perm = MAC_PROP_PERM_READ; 4665 tmp = statsp->mac_stats.link_speed * 1000000ull; 4666 bcopy(&tmp, pr_val, sizeof (tmp)); 4667 break; 4668 4669 case MAC_PROP_STATUS: 4670 if (pr_valsize < sizeof (link_state_t)) 4671 return (EINVAL); 4672 *perm = MAC_PROP_PERM_READ; 4673 if (!statsp->mac_stats.link_up) 4674 ls = LINK_STATE_DOWN; 4675 else 4676 ls = LINK_STATE_UP; 4677 bcopy(&ls, pr_val, sizeof (ls)); 4678 break; 4679 4680 case MAC_PROP_AUTONEG: 4681 *(uint8_t *)pr_val = 4682 param_arr[param_autoneg].value; 4683 break; 4684 4685 case MAC_PROP_FLOWCTRL: 4686 if (pr_valsize < sizeof (link_flowctrl_t)) 4687 return (EINVAL); 4688 4689 fl = LINK_FLOWCTRL_NONE; 4690 if (param_arr[param_anar_pause].value) { 4691 fl = LINK_FLOWCTRL_RX; 4692 } 4693 bcopy(&fl, pr_val, sizeof (fl)); 4694 break; 4695 4696 case MAC_PROP_ADV_1000FDX_CAP: 4697 *perm = MAC_PROP_PERM_READ; 4698 *(uint8_t *)pr_val = 4699 param_arr[param_anar_1000fdx].value; 4700 break; 4701 4702 case MAC_PROP_EN_1000FDX_CAP: 4703 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4704 break; 4705 4706 case MAC_PROP_ADV_100FDX_CAP: 4707 *perm = MAC_PROP_PERM_READ; 4708 *(uint8_t *)pr_val = 4709 param_arr[param_anar_100fdx].value; 4710 break; 4711 4712 case MAC_PROP_EN_100FDX_CAP: 4713 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4714 break; 4715 4716 case MAC_PROP_ADV_10FDX_CAP: 4717 *perm = MAC_PROP_PERM_READ; 4718 *(uint8_t *)pr_val = 4719 param_arr[param_anar_10fdx].value; 4720 break; 4721 4722 case MAC_PROP_EN_10FDX_CAP: 4723 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4724 break; 4725 4726 case MAC_PROP_EN_1000HDX_CAP: 4727 case MAC_PROP_EN_100HDX_CAP: 4728 case MAC_PROP_EN_10HDX_CAP: 4729 case MAC_PROP_ADV_1000HDX_CAP: 4730 case MAC_PROP_ADV_100HDX_CAP: 4731 case MAC_PROP_ADV_10HDX_CAP: 4732 err = ENOTSUP; 4733 break; 4734 4735 case MAC_PROP_PRIVATE: 4736 err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4737 pr_valsize, pr_val, perm); 4738 break; 4739 4740 case MAC_PROP_MTU: { 4741 mac_propval_range_t range; 4742 4743 if (!(pr_flags & MAC_PROP_POSSIBLE)) 4744 return (ENOTSUP); 4745 if (pr_valsize < sizeof (mac_propval_range_t)) 4746 return (EINVAL); 4747 range.mpr_count = 1; 4748 range.mpr_type = MAC_PROPVAL_UINT32; 4749 range.range_uint32[0].mpur_min = 4750 range.range_uint32[0].mpur_max = NXGE_DEFAULT_MTU; 4751 range.range_uint32[0].mpur_max = NXGE_MAXIMUM_MTU; 4752 bcopy(&range, pr_val, sizeof (range)); 4753 break; 4754 } 4755 default: 4756 err = EINVAL; 4757 break; 4758 } 4759 4760 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4761 4762 return (err); 4763 } 4764 4765 /* ARGSUSED */ 4766 static int 4767 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4768 const void *pr_val) 4769 { 4770 p_nxge_param_t param_arr = nxgep->param_arr; 4771 int err = 0; 4772 long result; 4773 4774 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4775 "==> nxge_set_priv_prop: name %s", pr_name)); 4776 4777 /* Blanking */ 4778 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4779 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4780 (char *)pr_val, 4781 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4782 if (err) { 4783 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4784 "<== nxge_set_priv_prop: " 4785 "unable to set (%s)", pr_name)); 4786 err = EINVAL; 4787 } else { 4788 err = 0; 4789 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4790 "<== nxge_set_priv_prop: " 4791 "set (%s)", pr_name)); 4792 } 4793 4794 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4795 "<== nxge_set_priv_prop: name %s (value %d)", 4796 pr_name, result)); 4797 4798 return (err); 4799 } 4800 4801 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4802 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4803 (char *)pr_val, 4804 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4805 if (err) { 4806 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4807 "<== nxge_set_priv_prop: " 4808 "unable to set (%s)", pr_name)); 4809 err = EINVAL; 4810 } else { 4811 err = 0; 4812 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4813 "<== nxge_set_priv_prop: " 4814 "set (%s)", pr_name)); 4815 } 4816 4817 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4818 "<== nxge_set_priv_prop: name %s (value %d)", 4819 pr_name, result)); 4820 4821 return (err); 4822 } 4823 4824 /* Classification */ 4825 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4826 if (pr_val == NULL) { 4827 err = EINVAL; 4828 return (err); 4829 } 4830 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4831 4832 err = nxge_param_set_ip_opt(nxgep, NULL, 4833 NULL, (char *)pr_val, 4834 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4835 4836 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4837 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4838 pr_name, result)); 4839 4840 return (err); 4841 } 4842 4843 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 4844 if (pr_val == NULL) { 4845 err = EINVAL; 4846 return (err); 4847 } 4848 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4849 4850 err = nxge_param_set_ip_opt(nxgep, NULL, 4851 NULL, (char *)pr_val, 4852 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 4853 4854 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4855 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4856 pr_name, result)); 4857 4858 return (err); 4859 } 4860 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 4861 if (pr_val == NULL) { 4862 err = EINVAL; 4863 return (err); 4864 } 4865 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4866 4867 err = nxge_param_set_ip_opt(nxgep, NULL, 4868 NULL, (char *)pr_val, 4869 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 4870 4871 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4872 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4873 pr_name, result)); 4874 4875 return (err); 4876 } 4877 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 4878 if (pr_val == NULL) { 4879 err = EINVAL; 4880 return (err); 4881 } 4882 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4883 4884 err = nxge_param_set_ip_opt(nxgep, NULL, 4885 NULL, (char *)pr_val, 4886 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 4887 4888 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4889 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4890 pr_name, result)); 4891 4892 return (err); 4893 } 4894 4895 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 4896 if (pr_val == NULL) { 4897 err = EINVAL; 4898 return (err); 4899 } 4900 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4901 4902 err = nxge_param_set_ip_opt(nxgep, NULL, 4903 NULL, (char *)pr_val, 4904 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 4905 4906 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4907 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4908 pr_name, result)); 4909 4910 return (err); 4911 } 4912 4913 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 4914 if (pr_val == NULL) { 4915 err = EINVAL; 4916 return (err); 4917 } 4918 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4919 4920 err = nxge_param_set_ip_opt(nxgep, NULL, 4921 NULL, (char *)pr_val, 4922 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 4923 4924 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4925 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4926 pr_name, result)); 4927 4928 return (err); 4929 } 4930 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 4931 if (pr_val == NULL) { 4932 err = EINVAL; 4933 return (err); 4934 } 4935 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4936 4937 err = nxge_param_set_ip_opt(nxgep, NULL, 4938 NULL, (char *)pr_val, 4939 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 4940 4941 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4942 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4943 pr_name, result)); 4944 4945 return (err); 4946 } 4947 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 4948 if (pr_val == NULL) { 4949 err = EINVAL; 4950 return (err); 4951 } 4952 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4953 4954 err = nxge_param_set_ip_opt(nxgep, NULL, 4955 NULL, (char *)pr_val, 4956 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 4957 4958 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4959 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4960 pr_name, result)); 4961 4962 return (err); 4963 } 4964 4965 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 4966 if (pr_val == NULL) { 4967 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4968 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 4969 err = EINVAL; 4970 return (err); 4971 } 4972 4973 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4974 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4975 "<== nxge_set_priv_prop: name %s " 4976 "(lso %d pr_val %s value %d)", 4977 pr_name, nxgep->soft_lso_enable, pr_val, result)); 4978 4979 if (result > 1 || result < 0) { 4980 err = EINVAL; 4981 } else { 4982 if (nxgep->soft_lso_enable == (uint32_t)result) { 4983 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4984 "no change (%d %d)", 4985 nxgep->soft_lso_enable, result)); 4986 return (0); 4987 } 4988 } 4989 4990 nxgep->soft_lso_enable = (int)result; 4991 4992 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4993 "<== nxge_set_priv_prop: name %s (value %d)", 4994 pr_name, result)); 4995 4996 return (err); 4997 } 4998 /* 4999 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the 5000 * following code to be executed. 5001 */ 5002 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5003 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5004 (caddr_t)¶m_arr[param_anar_10gfdx]); 5005 return (err); 5006 } 5007 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5008 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5009 (caddr_t)¶m_arr[param_anar_pause]); 5010 return (err); 5011 } 5012 5013 return (EINVAL); 5014 } 5015 5016 static int 5017 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 5018 uint_t pr_valsize, void *pr_val, uint_t *perm) 5019 { 5020 p_nxge_param_t param_arr = nxgep->param_arr; 5021 char valstr[MAXNAMELEN]; 5022 int err = EINVAL; 5023 uint_t strsize; 5024 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 5025 5026 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5027 "==> nxge_get_priv_prop: property %s", pr_name)); 5028 5029 /* function number */ 5030 if (strcmp(pr_name, "_function_number") == 0) { 5031 if (is_default) 5032 return (ENOTSUP); 5033 *perm = MAC_PROP_PERM_READ; 5034 (void) snprintf(valstr, sizeof (valstr), "%d", 5035 nxgep->function_num); 5036 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5037 "==> nxge_get_priv_prop: name %s " 5038 "(value %d valstr %s)", 5039 pr_name, nxgep->function_num, valstr)); 5040 5041 err = 0; 5042 goto done; 5043 } 5044 5045 /* Neptune firmware version */ 5046 if (strcmp(pr_name, "_fw_version") == 0) { 5047 if (is_default) 5048 return (ENOTSUP); 5049 *perm = MAC_PROP_PERM_READ; 5050 (void) snprintf(valstr, sizeof (valstr), "%s", 5051 nxgep->vpd_info.ver); 5052 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5053 "==> nxge_get_priv_prop: name %s " 5054 "(value %d valstr %s)", 5055 pr_name, nxgep->vpd_info.ver, valstr)); 5056 5057 err = 0; 5058 goto done; 5059 } 5060 5061 /* port PHY mode */ 5062 if (strcmp(pr_name, "_port_mode") == 0) { 5063 if (is_default) 5064 return (ENOTSUP); 5065 *perm = MAC_PROP_PERM_READ; 5066 switch (nxgep->mac.portmode) { 5067 case PORT_1G_COPPER: 5068 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 5069 nxgep->hot_swappable_phy ? 5070 "[Hot Swappable]" : ""); 5071 break; 5072 case PORT_1G_FIBER: 5073 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 5074 nxgep->hot_swappable_phy ? 5075 "[hot swappable]" : ""); 5076 break; 5077 case PORT_10G_COPPER: 5078 (void) snprintf(valstr, sizeof (valstr), 5079 "10G copper %s", 5080 nxgep->hot_swappable_phy ? 5081 "[hot swappable]" : ""); 5082 break; 5083 case PORT_10G_FIBER: 5084 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 5085 nxgep->hot_swappable_phy ? 5086 "[hot swappable]" : ""); 5087 break; 5088 case PORT_10G_SERDES: 5089 (void) snprintf(valstr, sizeof (valstr), 5090 "10G serdes %s", nxgep->hot_swappable_phy ? 5091 "[hot swappable]" : ""); 5092 break; 5093 case PORT_1G_SERDES: 5094 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5095 nxgep->hot_swappable_phy ? 5096 "[hot swappable]" : ""); 5097 break; 5098 case PORT_1G_TN1010: 5099 (void) snprintf(valstr, sizeof (valstr), 5100 "1G TN1010 copper %s", nxgep->hot_swappable_phy ? 5101 "[hot swappable]" : ""); 5102 break; 5103 case PORT_10G_TN1010: 5104 (void) snprintf(valstr, sizeof (valstr), 5105 "10G TN1010 copper %s", nxgep->hot_swappable_phy ? 5106 "[hot swappable]" : ""); 5107 break; 5108 case PORT_1G_RGMII_FIBER: 5109 (void) snprintf(valstr, sizeof (valstr), 5110 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5111 "[hot swappable]" : ""); 5112 break; 5113 case PORT_HSP_MODE: 5114 (void) snprintf(valstr, sizeof (valstr), 5115 "phy not present[hot swappable]"); 5116 break; 5117 default: 5118 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5119 nxgep->hot_swappable_phy ? 5120 "[hot swappable]" : ""); 5121 break; 5122 } 5123 5124 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5125 "==> nxge_get_priv_prop: name %s (value %s)", 5126 pr_name, valstr)); 5127 5128 err = 0; 5129 goto done; 5130 } 5131 5132 /* Hot swappable PHY */ 5133 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5134 if (is_default) 5135 return (ENOTSUP); 5136 *perm = MAC_PROP_PERM_READ; 5137 (void) snprintf(valstr, sizeof (valstr), "%s", 5138 nxgep->hot_swappable_phy ? 5139 "yes" : "no"); 5140 5141 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5142 "==> nxge_get_priv_prop: name %s " 5143 "(value %d valstr %s)", 5144 pr_name, nxgep->hot_swappable_phy, valstr)); 5145 5146 err = 0; 5147 goto done; 5148 } 5149 5150 5151 /* Receive Interrupt Blanking Parameters */ 5152 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5153 err = 0; 5154 if (is_default) { 5155 (void) snprintf(valstr, sizeof (valstr), 5156 "%d", RXDMA_RCR_TO_DEFAULT); 5157 goto done; 5158 } 5159 5160 (void) snprintf(valstr, sizeof (valstr), "%d", 5161 nxgep->intr_timeout); 5162 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5163 "==> nxge_get_priv_prop: name %s (value %d)", 5164 pr_name, 5165 (uint32_t)nxgep->intr_timeout)); 5166 goto done; 5167 } 5168 5169 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5170 err = 0; 5171 if (is_default) { 5172 (void) snprintf(valstr, sizeof (valstr), 5173 "%d", RXDMA_RCR_PTHRES_DEFAULT); 5174 goto done; 5175 } 5176 (void) snprintf(valstr, sizeof (valstr), "%d", 5177 nxgep->intr_threshold); 5178 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5179 "==> nxge_get_priv_prop: name %s (value %d)", 5180 pr_name, (uint32_t)nxgep->intr_threshold)); 5181 5182 goto done; 5183 } 5184 5185 /* Classification and Load Distribution Configuration */ 5186 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5187 if (is_default) { 5188 (void) snprintf(valstr, sizeof (valstr), "%x", 5189 NXGE_CLASS_FLOW_GEN_SERVER); 5190 err = 0; 5191 goto done; 5192 } 5193 err = nxge_dld_get_ip_opt(nxgep, 5194 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5195 5196 (void) snprintf(valstr, sizeof (valstr), "%x", 5197 (int)param_arr[param_class_opt_ipv4_tcp].value); 5198 5199 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5200 "==> nxge_get_priv_prop: %s", valstr)); 5201 goto done; 5202 } 5203 5204 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5205 if (is_default) { 5206 (void) snprintf(valstr, sizeof (valstr), "%x", 5207 NXGE_CLASS_FLOW_GEN_SERVER); 5208 err = 0; 5209 goto done; 5210 } 5211 err = nxge_dld_get_ip_opt(nxgep, 5212 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5213 5214 (void) snprintf(valstr, sizeof (valstr), "%x", 5215 (int)param_arr[param_class_opt_ipv4_udp].value); 5216 5217 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5218 "==> nxge_get_priv_prop: %s", valstr)); 5219 goto done; 5220 } 5221 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5222 if (is_default) { 5223 (void) snprintf(valstr, sizeof (valstr), "%x", 5224 NXGE_CLASS_FLOW_GEN_SERVER); 5225 err = 0; 5226 goto done; 5227 } 5228 err = nxge_dld_get_ip_opt(nxgep, 5229 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5230 5231 (void) snprintf(valstr, sizeof (valstr), "%x", 5232 (int)param_arr[param_class_opt_ipv4_ah].value); 5233 5234 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5235 "==> nxge_get_priv_prop: %s", valstr)); 5236 goto done; 5237 } 5238 5239 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5240 if (is_default) { 5241 (void) snprintf(valstr, sizeof (valstr), "%x", 5242 NXGE_CLASS_FLOW_GEN_SERVER); 5243 err = 0; 5244 goto done; 5245 } 5246 err = nxge_dld_get_ip_opt(nxgep, 5247 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5248 5249 (void) snprintf(valstr, sizeof (valstr), "%x", 5250 (int)param_arr[param_class_opt_ipv4_sctp].value); 5251 5252 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5253 "==> nxge_get_priv_prop: %s", valstr)); 5254 goto done; 5255 } 5256 5257 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5258 if (is_default) { 5259 (void) snprintf(valstr, sizeof (valstr), "%x", 5260 NXGE_CLASS_FLOW_GEN_SERVER); 5261 err = 0; 5262 goto done; 5263 } 5264 err = nxge_dld_get_ip_opt(nxgep, 5265 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5266 5267 (void) snprintf(valstr, sizeof (valstr), "%x", 5268 (int)param_arr[param_class_opt_ipv6_tcp].value); 5269 5270 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5271 "==> nxge_get_priv_prop: %s", valstr)); 5272 goto done; 5273 } 5274 5275 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5276 if (is_default) { 5277 (void) snprintf(valstr, sizeof (valstr), "%x", 5278 NXGE_CLASS_FLOW_GEN_SERVER); 5279 err = 0; 5280 goto done; 5281 } 5282 err = nxge_dld_get_ip_opt(nxgep, 5283 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5284 5285 (void) snprintf(valstr, sizeof (valstr), "%x", 5286 (int)param_arr[param_class_opt_ipv6_udp].value); 5287 5288 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5289 "==> nxge_get_priv_prop: %s", valstr)); 5290 goto done; 5291 } 5292 5293 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5294 if (is_default) { 5295 (void) snprintf(valstr, sizeof (valstr), "%x", 5296 NXGE_CLASS_FLOW_GEN_SERVER); 5297 err = 0; 5298 goto done; 5299 } 5300 err = nxge_dld_get_ip_opt(nxgep, 5301 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5302 5303 (void) snprintf(valstr, sizeof (valstr), "%x", 5304 (int)param_arr[param_class_opt_ipv6_ah].value); 5305 5306 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5307 "==> nxge_get_priv_prop: %s", valstr)); 5308 goto done; 5309 } 5310 5311 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5312 if (is_default) { 5313 (void) snprintf(valstr, sizeof (valstr), "%x", 5314 NXGE_CLASS_FLOW_GEN_SERVER); 5315 err = 0; 5316 goto done; 5317 } 5318 err = nxge_dld_get_ip_opt(nxgep, 5319 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5320 5321 (void) snprintf(valstr, sizeof (valstr), "%x", 5322 (int)param_arr[param_class_opt_ipv6_sctp].value); 5323 5324 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5325 "==> nxge_get_priv_prop: %s", valstr)); 5326 goto done; 5327 } 5328 5329 /* Software LSO */ 5330 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5331 if (is_default) { 5332 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5333 err = 0; 5334 goto done; 5335 } 5336 (void) snprintf(valstr, sizeof (valstr), 5337 "%d", nxgep->soft_lso_enable); 5338 err = 0; 5339 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5340 "==> nxge_get_priv_prop: name %s (value %d)", 5341 pr_name, nxgep->soft_lso_enable)); 5342 5343 goto done; 5344 } 5345 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5346 err = 0; 5347 if (is_default || 5348 nxgep->param_arr[param_anar_10gfdx].value != 0) { 5349 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5350 goto done; 5351 } else { 5352 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5353 goto done; 5354 } 5355 } 5356 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5357 err = 0; 5358 if (is_default || 5359 nxgep->param_arr[param_anar_pause].value != 0) { 5360 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5361 goto done; 5362 } else { 5363 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5364 goto done; 5365 } 5366 } 5367 5368 done: 5369 if (err == 0) { 5370 strsize = (uint_t)strlen(valstr); 5371 if (pr_valsize < strsize) { 5372 err = ENOBUFS; 5373 } else { 5374 (void) strlcpy(pr_val, valstr, pr_valsize); 5375 } 5376 } 5377 5378 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5379 "<== nxge_get_priv_prop: return %d", err)); 5380 return (err); 5381 } 5382 5383 /* 5384 * Module loading and removing entry points. 5385 */ 5386 5387 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach, 5388 nodev, NULL, D_MP, NULL, nxge_quiesce); 5389 5390 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5391 5392 /* 5393 * Module linkage information for the kernel. 5394 */ 5395 static struct modldrv nxge_modldrv = { 5396 &mod_driverops, 5397 NXGE_DESC_VER, 5398 &nxge_dev_ops 5399 }; 5400 5401 static struct modlinkage modlinkage = { 5402 MODREV_1, (void *) &nxge_modldrv, NULL 5403 }; 5404 5405 int 5406 _init(void) 5407 { 5408 int status; 5409 5410 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 5411 5412 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5413 5414 mac_init_ops(&nxge_dev_ops, "nxge"); 5415 5416 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5417 if (status != 0) { 5418 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5419 "failed to init device soft state")); 5420 goto _init_exit; 5421 } 5422 5423 status = mod_install(&modlinkage); 5424 if (status != 0) { 5425 ddi_soft_state_fini(&nxge_list); 5426 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5427 goto _init_exit; 5428 } 5429 5430 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5431 5432 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status)); 5433 return (status); 5434 5435 _init_exit: 5436 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status)); 5437 MUTEX_DESTROY(&nxgedebuglock); 5438 return (status); 5439 } 5440 5441 int 5442 _fini(void) 5443 { 5444 int status; 5445 5446 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5447 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5448 5449 if (nxge_mblks_pending) 5450 return (EBUSY); 5451 5452 status = mod_remove(&modlinkage); 5453 if (status != DDI_SUCCESS) { 5454 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5455 "Module removal failed 0x%08x", 5456 status)); 5457 goto _fini_exit; 5458 } 5459 5460 mac_fini_ops(&nxge_dev_ops); 5461 5462 ddi_soft_state_fini(&nxge_list); 5463 5464 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status)); 5465 5466 MUTEX_DESTROY(&nxge_common_lock); 5467 MUTEX_DESTROY(&nxgedebuglock); 5468 return (status); 5469 5470 _fini_exit: 5471 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status)); 5472 return (status); 5473 } 5474 5475 int 5476 _info(struct modinfo *modinfop) 5477 { 5478 int status; 5479 5480 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5481 status = mod_info(&modlinkage, modinfop); 5482 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5483 5484 return (status); 5485 } 5486 5487 /*ARGSUSED*/ 5488 static int 5489 nxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5490 { 5491 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5492 p_nxge_t nxgep = rhp->nxgep; 5493 uint32_t channel; 5494 p_tx_ring_t ring; 5495 5496 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5497 ring = nxgep->tx_rings->rings[channel]; 5498 5499 MUTEX_ENTER(&ring->lock); 5500 ring->tx_ring_handle = rhp->ring_handle; 5501 MUTEX_EXIT(&ring->lock); 5502 5503 return (0); 5504 } 5505 5506 static void 5507 nxge_tx_ring_stop(mac_ring_driver_t rdriver) 5508 { 5509 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5510 p_nxge_t nxgep = rhp->nxgep; 5511 uint32_t channel; 5512 p_tx_ring_t ring; 5513 5514 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5515 ring = nxgep->tx_rings->rings[channel]; 5516 5517 MUTEX_ENTER(&ring->lock); 5518 ring->tx_ring_handle = (mac_ring_handle_t)NULL; 5519 MUTEX_EXIT(&ring->lock); 5520 } 5521 5522 static int 5523 nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5524 { 5525 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5526 p_nxge_t nxgep = rhp->nxgep; 5527 uint32_t channel; 5528 p_rx_rcr_ring_t ring; 5529 int i; 5530 5531 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5532 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5533 5534 MUTEX_ENTER(&ring->lock); 5535 5536 if (nxgep->rx_channel_started[channel] == B_TRUE) { 5537 MUTEX_EXIT(&ring->lock); 5538 return (0); 5539 } 5540 5541 /* set rcr_ring */ 5542 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) { 5543 if ((nxgep->ldgvp->ldvp[i].is_rxdma == 1) && 5544 (nxgep->ldgvp->ldvp[i].channel == channel)) { 5545 ring->ldvp = &nxgep->ldgvp->ldvp[i]; 5546 ring->ldgp = nxgep->ldgvp->ldvp[i].ldgp; 5547 } 5548 } 5549 5550 nxgep->rx_channel_started[channel] = B_TRUE; 5551 ring->rcr_mac_handle = rhp->ring_handle; 5552 ring->rcr_gen_num = mr_gen_num; 5553 MUTEX_EXIT(&ring->lock); 5554 5555 return (0); 5556 } 5557 5558 static void 5559 nxge_rx_ring_stop(mac_ring_driver_t rdriver) 5560 { 5561 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5562 p_nxge_t nxgep = rhp->nxgep; 5563 uint32_t channel; 5564 p_rx_rcr_ring_t ring; 5565 5566 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5567 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5568 5569 MUTEX_ENTER(&ring->lock); 5570 nxgep->rx_channel_started[channel] = B_FALSE; 5571 ring->rcr_mac_handle = NULL; 5572 MUTEX_EXIT(&ring->lock); 5573 } 5574 5575 /* 5576 * Callback funtion for MAC layer to register all rings. 5577 */ 5578 static void 5579 nxge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 5580 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 5581 { 5582 p_nxge_t nxgep = (p_nxge_t)arg; 5583 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 5584 5585 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5586 "==> nxge_fill_ring 0x%x index %d", rtype, index)); 5587 5588 switch (rtype) { 5589 case MAC_RING_TYPE_TX: { 5590 p_nxge_ring_handle_t rhandlep; 5591 5592 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5593 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d", 5594 rtype, index, p_cfgp->tdc.count)); 5595 5596 ASSERT((index >= 0) && (index < p_cfgp->tdc.count)); 5597 rhandlep = &nxgep->tx_ring_handles[index]; 5598 rhandlep->nxgep = nxgep; 5599 rhandlep->index = index; 5600 rhandlep->ring_handle = rh; 5601 5602 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5603 infop->mri_start = nxge_tx_ring_start; 5604 infop->mri_stop = nxge_tx_ring_stop; 5605 infop->mri_tx = nxge_tx_ring_send; 5606 5607 break; 5608 } 5609 case MAC_RING_TYPE_RX: { 5610 p_nxge_ring_handle_t rhandlep; 5611 int nxge_rindex; 5612 mac_intr_t nxge_mac_intr; 5613 5614 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5615 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d", 5616 rtype, index, p_cfgp->max_rdcs)); 5617 5618 /* 5619 * 'index' is the ring index within the group. 5620 * Find the ring index in the nxge instance. 5621 */ 5622 nxge_rindex = nxge_get_rxring_index(nxgep, rg_index, index); 5623 5624 ASSERT((nxge_rindex >= 0) && (nxge_rindex < p_cfgp->max_rdcs)); 5625 rhandlep = &nxgep->rx_ring_handles[nxge_rindex]; 5626 rhandlep->nxgep = nxgep; 5627 rhandlep->index = nxge_rindex; 5628 rhandlep->ring_handle = rh; 5629 5630 /* 5631 * Entrypoint to enable interrupt (disable poll) and 5632 * disable interrupt (enable poll). 5633 */ 5634 nxge_mac_intr.mi_handle = (mac_intr_handle_t)rhandlep; 5635 nxge_mac_intr.mi_enable = (mac_intr_enable_t)nxge_disable_poll; 5636 nxge_mac_intr.mi_disable = (mac_intr_disable_t)nxge_enable_poll; 5637 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5638 infop->mri_start = nxge_rx_ring_start; 5639 infop->mri_stop = nxge_rx_ring_stop; 5640 infop->mri_intr = nxge_mac_intr; /* ??? */ 5641 infop->mri_poll = nxge_rx_poll; 5642 5643 break; 5644 } 5645 default: 5646 break; 5647 } 5648 5649 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_fill_ring 0x%x", 5650 rtype)); 5651 } 5652 5653 static void 5654 nxge_group_add_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5655 mac_ring_type_t type) 5656 { 5657 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5658 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5659 nxge_t *nxge; 5660 nxge_grp_t *grp; 5661 nxge_rdc_grp_t *rdc_grp; 5662 uint16_t channel; /* device-wise ring id */ 5663 int dev_gindex; 5664 int rv; 5665 5666 nxge = rgroup->nxgep; 5667 5668 switch (type) { 5669 case MAC_RING_TYPE_TX: 5670 /* 5671 * nxge_grp_dc_add takes a channel number which is a 5672 * "devise" ring ID. 5673 */ 5674 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5675 5676 /* 5677 * Remove the ring from the default group 5678 */ 5679 if (rgroup->gindex != 0) { 5680 (void) nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5681 } 5682 5683 /* 5684 * nxge->tx_set.group[] is an array of groups indexed by 5685 * a "port" group ID. 5686 */ 5687 grp = nxge->tx_set.group[rgroup->gindex]; 5688 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5689 if (rv != 0) { 5690 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5691 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5692 } 5693 break; 5694 5695 case MAC_RING_TYPE_RX: 5696 /* 5697 * nxge->rx_set.group[] is an array of groups indexed by 5698 * a "port" group ID. 5699 */ 5700 grp = nxge->rx_set.group[rgroup->gindex]; 5701 5702 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5703 rgroup->gindex; 5704 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5705 5706 /* 5707 * nxge_grp_dc_add takes a channel number which is a 5708 * "devise" ring ID. 5709 */ 5710 channel = nxge->pt_config.hw_config.start_rdc + rhandle->index; 5711 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_RX, channel); 5712 if (rv != 0) { 5713 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5714 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5715 } 5716 5717 rdc_grp->map |= (1 << channel); 5718 rdc_grp->max_rdcs++; 5719 5720 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5721 break; 5722 } 5723 } 5724 5725 static void 5726 nxge_group_rem_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5727 mac_ring_type_t type) 5728 { 5729 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5730 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5731 nxge_t *nxge; 5732 uint16_t channel; /* device-wise ring id */ 5733 nxge_rdc_grp_t *rdc_grp; 5734 int dev_gindex; 5735 5736 nxge = rgroup->nxgep; 5737 5738 switch (type) { 5739 case MAC_RING_TYPE_TX: 5740 dev_gindex = nxge->pt_config.hw_config.def_mac_txdma_grpid + 5741 rgroup->gindex; 5742 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5743 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5744 5745 /* 5746 * Add the ring back to the default group 5747 */ 5748 if (rgroup->gindex != 0) { 5749 nxge_grp_t *grp; 5750 grp = nxge->tx_set.group[0]; 5751 (void) nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5752 } 5753 break; 5754 5755 case MAC_RING_TYPE_RX: 5756 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5757 rgroup->gindex; 5758 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5759 channel = rdc_grp->start_rdc + rhandle->index; 5760 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 5761 5762 rdc_grp->map &= ~(1 << channel); 5763 rdc_grp->max_rdcs--; 5764 5765 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5766 break; 5767 } 5768 } 5769 5770 5771 /*ARGSUSED*/ 5772 static nxge_status_t 5773 nxge_add_intrs(p_nxge_t nxgep) 5774 { 5775 5776 int intr_types; 5777 int type = 0; 5778 int ddi_status = DDI_SUCCESS; 5779 nxge_status_t status = NXGE_OK; 5780 5781 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5782 5783 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5784 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5785 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5786 nxgep->nxge_intr_type.intr_added = 0; 5787 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5788 nxgep->nxge_intr_type.intr_type = 0; 5789 5790 if (nxgep->niu_type == N2_NIU) { 5791 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5792 } else if (nxge_msi_enable) { 5793 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5794 } 5795 5796 /* Get the supported interrupt types */ 5797 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5798 != DDI_SUCCESS) { 5799 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5800 "ddi_intr_get_supported_types failed: status 0x%08x", 5801 ddi_status)); 5802 return (NXGE_ERROR | NXGE_DDI_FAILED); 5803 } 5804 nxgep->nxge_intr_type.intr_types = intr_types; 5805 5806 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5807 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5808 5809 /* 5810 * Solaris MSIX is not supported yet. use MSI for now. 5811 * nxge_msi_enable (1): 5812 * 1 - MSI 2 - MSI-X others - FIXED 5813 */ 5814 switch (nxge_msi_enable) { 5815 default: 5816 type = DDI_INTR_TYPE_FIXED; 5817 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5818 "use fixed (intx emulation) type %08x", 5819 type)); 5820 break; 5821 5822 case 2: 5823 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5824 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5825 if (intr_types & DDI_INTR_TYPE_MSIX) { 5826 type = DDI_INTR_TYPE_MSIX; 5827 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5828 "ddi_intr_get_supported_types: MSIX 0x%08x", 5829 type)); 5830 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5831 type = DDI_INTR_TYPE_MSI; 5832 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5833 "ddi_intr_get_supported_types: MSI 0x%08x", 5834 type)); 5835 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5836 type = DDI_INTR_TYPE_FIXED; 5837 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5838 "ddi_intr_get_supported_types: MSXED0x%08x", 5839 type)); 5840 } 5841 break; 5842 5843 case 1: 5844 if (intr_types & DDI_INTR_TYPE_MSI) { 5845 type = DDI_INTR_TYPE_MSI; 5846 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5847 "ddi_intr_get_supported_types: MSI 0x%08x", 5848 type)); 5849 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5850 type = DDI_INTR_TYPE_MSIX; 5851 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5852 "ddi_intr_get_supported_types: MSIX 0x%08x", 5853 type)); 5854 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5855 type = DDI_INTR_TYPE_FIXED; 5856 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5857 "ddi_intr_get_supported_types: MSXED0x%08x", 5858 type)); 5859 } 5860 } 5861 5862 nxgep->nxge_intr_type.intr_type = type; 5863 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5864 type == DDI_INTR_TYPE_FIXED) && 5865 nxgep->nxge_intr_type.niu_msi_enable) { 5866 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5867 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5868 " nxge_add_intrs: " 5869 " nxge_add_intrs_adv failed: status 0x%08x", 5870 status)); 5871 return (status); 5872 } else { 5873 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5874 "interrupts registered : type %d", type)); 5875 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5876 5877 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5878 "\nAdded advanced nxge add_intr_adv " 5879 "intr type 0x%x\n", type)); 5880 5881 return (status); 5882 } 5883 } 5884 5885 if (!nxgep->nxge_intr_type.intr_registered) { 5886 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5887 "failed to register interrupts")); 5888 return (NXGE_ERROR | NXGE_DDI_FAILED); 5889 } 5890 5891 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5892 return (status); 5893 } 5894 5895 static nxge_status_t 5896 nxge_add_intrs_adv(p_nxge_t nxgep) 5897 { 5898 int intr_type; 5899 p_nxge_intr_t intrp; 5900 5901 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5902 5903 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5904 intr_type = intrp->intr_type; 5905 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5906 intr_type)); 5907 5908 switch (intr_type) { 5909 case DDI_INTR_TYPE_MSI: /* 0x2 */ 5910 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 5911 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 5912 5913 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 5914 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 5915 5916 default: 5917 return (NXGE_ERROR); 5918 } 5919 } 5920 5921 5922 /*ARGSUSED*/ 5923 static nxge_status_t 5924 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 5925 { 5926 dev_info_t *dip = nxgep->dip; 5927 p_nxge_ldg_t ldgp; 5928 p_nxge_intr_t intrp; 5929 uint_t *inthandler; 5930 void *arg1, *arg2; 5931 int behavior; 5932 int nintrs, navail, nrequest; 5933 int nactual, nrequired; 5934 int inum = 0; 5935 int x, y; 5936 int ddi_status = DDI_SUCCESS; 5937 nxge_status_t status = NXGE_OK; 5938 5939 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 5940 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5941 intrp->start_inum = 0; 5942 5943 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5944 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5945 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5946 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5947 "nintrs: %d", ddi_status, nintrs)); 5948 return (NXGE_ERROR | NXGE_DDI_FAILED); 5949 } 5950 5951 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5952 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5953 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5954 "ddi_intr_get_navail() failed, status: 0x%x%, " 5955 "nintrs: %d", ddi_status, navail)); 5956 return (NXGE_ERROR | NXGE_DDI_FAILED); 5957 } 5958 5959 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5960 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 5961 nintrs, navail)); 5962 5963 /* PSARC/2007/453 MSI-X interrupt limit override */ 5964 if (int_type == DDI_INTR_TYPE_MSIX) { 5965 nrequest = nxge_create_msi_property(nxgep); 5966 if (nrequest < navail) { 5967 navail = nrequest; 5968 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5969 "nxge_add_intrs_adv_type: nintrs %d " 5970 "navail %d (nrequest %d)", 5971 nintrs, navail, nrequest)); 5972 } 5973 } 5974 5975 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 5976 /* MSI must be power of 2 */ 5977 if ((navail & 16) == 16) { 5978 navail = 16; 5979 } else if ((navail & 8) == 8) { 5980 navail = 8; 5981 } else if ((navail & 4) == 4) { 5982 navail = 4; 5983 } else if ((navail & 2) == 2) { 5984 navail = 2; 5985 } else { 5986 navail = 1; 5987 } 5988 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5989 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 5990 "navail %d", nintrs, navail)); 5991 } 5992 5993 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5994 DDI_INTR_ALLOC_NORMAL); 5995 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 5996 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 5997 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5998 navail, &nactual, behavior); 5999 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6000 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6001 " ddi_intr_alloc() failed: %d", 6002 ddi_status)); 6003 kmem_free(intrp->htable, intrp->intr_size); 6004 return (NXGE_ERROR | NXGE_DDI_FAILED); 6005 } 6006 6007 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6008 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6009 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6010 " ddi_intr_get_pri() failed: %d", 6011 ddi_status)); 6012 /* Free already allocated interrupts */ 6013 for (y = 0; y < nactual; y++) { 6014 (void) ddi_intr_free(intrp->htable[y]); 6015 } 6016 6017 kmem_free(intrp->htable, intrp->intr_size); 6018 return (NXGE_ERROR | NXGE_DDI_FAILED); 6019 } 6020 6021 nrequired = 0; 6022 switch (nxgep->niu_type) { 6023 default: 6024 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6025 break; 6026 6027 case N2_NIU: 6028 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6029 break; 6030 } 6031 6032 if (status != NXGE_OK) { 6033 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6034 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 6035 "failed: 0x%x", status)); 6036 /* Free already allocated interrupts */ 6037 for (y = 0; y < nactual; y++) { 6038 (void) ddi_intr_free(intrp->htable[y]); 6039 } 6040 6041 kmem_free(intrp->htable, intrp->intr_size); 6042 return (status); 6043 } 6044 6045 ldgp = nxgep->ldgvp->ldgp; 6046 for (x = 0; x < nrequired; x++, ldgp++) { 6047 ldgp->vector = (uint8_t)x; 6048 ldgp->intdata = SID_DATA(ldgp->func, x); 6049 arg1 = ldgp->ldvp; 6050 arg2 = nxgep; 6051 if (ldgp->nldvs == 1) { 6052 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6053 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6054 "nxge_add_intrs_adv_type: " 6055 "arg1 0x%x arg2 0x%x: " 6056 "1-1 int handler (entry %d intdata 0x%x)\n", 6057 arg1, arg2, 6058 x, ldgp->intdata)); 6059 } else if (ldgp->nldvs > 1) { 6060 inthandler = (uint_t *)ldgp->sys_intr_handler; 6061 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6062 "nxge_add_intrs_adv_type: " 6063 "arg1 0x%x arg2 0x%x: " 6064 "nldevs %d int handler " 6065 "(entry %d intdata 0x%x)\n", 6066 arg1, arg2, 6067 ldgp->nldvs, x, ldgp->intdata)); 6068 } 6069 6070 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6071 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 6072 "htable 0x%llx", x, intrp->htable[x])); 6073 6074 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6075 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6076 != DDI_SUCCESS) { 6077 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6078 "==> nxge_add_intrs_adv_type: failed #%d " 6079 "status 0x%x", x, ddi_status)); 6080 for (y = 0; y < intrp->intr_added; y++) { 6081 (void) ddi_intr_remove_handler( 6082 intrp->htable[y]); 6083 } 6084 /* Free already allocated intr */ 6085 for (y = 0; y < nactual; y++) { 6086 (void) ddi_intr_free(intrp->htable[y]); 6087 } 6088 kmem_free(intrp->htable, intrp->intr_size); 6089 6090 (void) nxge_ldgv_uninit(nxgep); 6091 6092 return (NXGE_ERROR | NXGE_DDI_FAILED); 6093 } 6094 intrp->intr_added++; 6095 } 6096 6097 intrp->msi_intx_cnt = nactual; 6098 6099 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6100 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 6101 navail, nactual, 6102 intrp->msi_intx_cnt, 6103 intrp->intr_added)); 6104 6105 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6106 6107 (void) nxge_intr_ldgv_init(nxgep); 6108 6109 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 6110 6111 return (status); 6112 } 6113 6114 /*ARGSUSED*/ 6115 static nxge_status_t 6116 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 6117 { 6118 dev_info_t *dip = nxgep->dip; 6119 p_nxge_ldg_t ldgp; 6120 p_nxge_intr_t intrp; 6121 uint_t *inthandler; 6122 void *arg1, *arg2; 6123 int behavior; 6124 int nintrs, navail; 6125 int nactual, nrequired; 6126 int inum = 0; 6127 int x, y; 6128 int ddi_status = DDI_SUCCESS; 6129 nxge_status_t status = NXGE_OK; 6130 6131 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 6132 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6133 intrp->start_inum = 0; 6134 6135 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6136 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6137 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6138 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6139 "nintrs: %d", status, nintrs)); 6140 return (NXGE_ERROR | NXGE_DDI_FAILED); 6141 } 6142 6143 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6144 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6145 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6146 "ddi_intr_get_navail() failed, status: 0x%x%, " 6147 "nintrs: %d", ddi_status, navail)); 6148 return (NXGE_ERROR | NXGE_DDI_FAILED); 6149 } 6150 6151 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6152 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6153 nintrs, navail)); 6154 6155 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6156 DDI_INTR_ALLOC_NORMAL); 6157 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6158 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6159 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6160 navail, &nactual, behavior); 6161 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6162 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6163 " ddi_intr_alloc() failed: %d", 6164 ddi_status)); 6165 kmem_free(intrp->htable, intrp->intr_size); 6166 return (NXGE_ERROR | NXGE_DDI_FAILED); 6167 } 6168 6169 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6170 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6171 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6172 " ddi_intr_get_pri() failed: %d", 6173 ddi_status)); 6174 /* Free already allocated interrupts */ 6175 for (y = 0; y < nactual; y++) { 6176 (void) ddi_intr_free(intrp->htable[y]); 6177 } 6178 6179 kmem_free(intrp->htable, intrp->intr_size); 6180 return (NXGE_ERROR | NXGE_DDI_FAILED); 6181 } 6182 6183 nrequired = 0; 6184 switch (nxgep->niu_type) { 6185 default: 6186 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6187 break; 6188 6189 case N2_NIU: 6190 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6191 break; 6192 } 6193 6194 if (status != NXGE_OK) { 6195 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6196 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6197 "failed: 0x%x", status)); 6198 /* Free already allocated interrupts */ 6199 for (y = 0; y < nactual; y++) { 6200 (void) ddi_intr_free(intrp->htable[y]); 6201 } 6202 6203 kmem_free(intrp->htable, intrp->intr_size); 6204 return (status); 6205 } 6206 6207 ldgp = nxgep->ldgvp->ldgp; 6208 for (x = 0; x < nrequired; x++, ldgp++) { 6209 ldgp->vector = (uint8_t)x; 6210 if (nxgep->niu_type != N2_NIU) { 6211 ldgp->intdata = SID_DATA(ldgp->func, x); 6212 } 6213 6214 arg1 = ldgp->ldvp; 6215 arg2 = nxgep; 6216 if (ldgp->nldvs == 1) { 6217 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6218 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6219 "nxge_add_intrs_adv_type_fix: " 6220 "1-1 int handler(%d) ldg %d ldv %d " 6221 "arg1 $%p arg2 $%p\n", 6222 x, ldgp->ldg, ldgp->ldvp->ldv, 6223 arg1, arg2)); 6224 } else if (ldgp->nldvs > 1) { 6225 inthandler = (uint_t *)ldgp->sys_intr_handler; 6226 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6227 "nxge_add_intrs_adv_type_fix: " 6228 "shared ldv %d int handler(%d) ldv %d ldg %d" 6229 "arg1 0x%016llx arg2 0x%016llx\n", 6230 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6231 arg1, arg2)); 6232 } 6233 6234 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6235 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6236 != DDI_SUCCESS) { 6237 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6238 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6239 "status 0x%x", x, ddi_status)); 6240 for (y = 0; y < intrp->intr_added; y++) { 6241 (void) ddi_intr_remove_handler( 6242 intrp->htable[y]); 6243 } 6244 for (y = 0; y < nactual; y++) { 6245 (void) ddi_intr_free(intrp->htable[y]); 6246 } 6247 /* Free already allocated intr */ 6248 kmem_free(intrp->htable, intrp->intr_size); 6249 6250 (void) nxge_ldgv_uninit(nxgep); 6251 6252 return (NXGE_ERROR | NXGE_DDI_FAILED); 6253 } 6254 intrp->intr_added++; 6255 } 6256 6257 intrp->msi_intx_cnt = nactual; 6258 6259 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6260 6261 status = nxge_intr_ldgv_init(nxgep); 6262 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6263 6264 return (status); 6265 } 6266 6267 static void 6268 nxge_remove_intrs(p_nxge_t nxgep) 6269 { 6270 int i, inum; 6271 p_nxge_intr_t intrp; 6272 6273 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6274 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6275 if (!intrp->intr_registered) { 6276 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6277 "<== nxge_remove_intrs: interrupts not registered")); 6278 return; 6279 } 6280 6281 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6282 6283 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6284 (void) ddi_intr_block_disable(intrp->htable, 6285 intrp->intr_added); 6286 } else { 6287 for (i = 0; i < intrp->intr_added; i++) { 6288 (void) ddi_intr_disable(intrp->htable[i]); 6289 } 6290 } 6291 6292 for (inum = 0; inum < intrp->intr_added; inum++) { 6293 if (intrp->htable[inum]) { 6294 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6295 } 6296 } 6297 6298 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6299 if (intrp->htable[inum]) { 6300 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6301 "nxge_remove_intrs: ddi_intr_free inum %d " 6302 "msi_intx_cnt %d intr_added %d", 6303 inum, 6304 intrp->msi_intx_cnt, 6305 intrp->intr_added)); 6306 6307 (void) ddi_intr_free(intrp->htable[inum]); 6308 } 6309 } 6310 6311 kmem_free(intrp->htable, intrp->intr_size); 6312 intrp->intr_registered = B_FALSE; 6313 intrp->intr_enabled = B_FALSE; 6314 intrp->msi_intx_cnt = 0; 6315 intrp->intr_added = 0; 6316 6317 (void) nxge_ldgv_uninit(nxgep); 6318 6319 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6320 "#msix-request"); 6321 6322 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6323 } 6324 6325 /*ARGSUSED*/ 6326 static void 6327 nxge_intrs_enable(p_nxge_t nxgep) 6328 { 6329 p_nxge_intr_t intrp; 6330 int i; 6331 int status; 6332 6333 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6334 6335 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6336 6337 if (!intrp->intr_registered) { 6338 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6339 "interrupts are not registered")); 6340 return; 6341 } 6342 6343 if (intrp->intr_enabled) { 6344 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6345 "<== nxge_intrs_enable: already enabled")); 6346 return; 6347 } 6348 6349 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6350 status = ddi_intr_block_enable(intrp->htable, 6351 intrp->intr_added); 6352 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6353 "block enable - status 0x%x total inums #%d\n", 6354 status, intrp->intr_added)); 6355 } else { 6356 for (i = 0; i < intrp->intr_added; i++) { 6357 status = ddi_intr_enable(intrp->htable[i]); 6358 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6359 "ddi_intr_enable:enable - status 0x%x " 6360 "total inums %d enable inum #%d\n", 6361 status, intrp->intr_added, i)); 6362 if (status == DDI_SUCCESS) { 6363 intrp->intr_enabled = B_TRUE; 6364 } 6365 } 6366 } 6367 6368 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6369 } 6370 6371 /*ARGSUSED*/ 6372 static void 6373 nxge_intrs_disable(p_nxge_t nxgep) 6374 { 6375 p_nxge_intr_t intrp; 6376 int i; 6377 6378 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6379 6380 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6381 6382 if (!intrp->intr_registered) { 6383 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6384 "interrupts are not registered")); 6385 return; 6386 } 6387 6388 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6389 (void) ddi_intr_block_disable(intrp->htable, 6390 intrp->intr_added); 6391 } else { 6392 for (i = 0; i < intrp->intr_added; i++) { 6393 (void) ddi_intr_disable(intrp->htable[i]); 6394 } 6395 } 6396 6397 intrp->intr_enabled = B_FALSE; 6398 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6399 } 6400 6401 nxge_status_t 6402 nxge_mac_register(p_nxge_t nxgep) 6403 { 6404 mac_register_t *macp; 6405 int status; 6406 6407 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6408 6409 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6410 return (NXGE_ERROR); 6411 6412 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6413 macp->m_driver = nxgep; 6414 macp->m_dip = nxgep->dip; 6415 if (!isLDOMguest(nxgep)) { 6416 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6417 } else { 6418 macp->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); 6419 macp->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); 6420 (void) memset(macp->m_src_addr, 0xff, sizeof (MAXMACADDRLEN)); 6421 } 6422 macp->m_callbacks = &nxge_m_callbacks; 6423 macp->m_min_sdu = 0; 6424 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6425 NXGE_EHEADER_VLAN_CRC; 6426 macp->m_max_sdu = nxgep->mac.default_mtu; 6427 macp->m_margin = VLAN_TAGSZ; 6428 macp->m_priv_props = nxge_priv_props; 6429 macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 6430 if (isLDOMguest(nxgep)) { 6431 macp->m_v12n = MAC_VIRT_LEVEL1 | MAC_VIRT_SERIALIZE; 6432 } else { 6433 macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1 | \ 6434 MAC_VIRT_SERIALIZE; 6435 } 6436 6437 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6438 "==> nxge_mac_register: instance %d " 6439 "max_sdu %d margin %d maxframe %d (header %d)", 6440 nxgep->instance, 6441 macp->m_max_sdu, macp->m_margin, 6442 nxgep->mac.maxframesize, 6443 NXGE_EHEADER_VLAN_CRC)); 6444 6445 status = mac_register(macp, &nxgep->mach); 6446 if (isLDOMguest(nxgep)) { 6447 KMEM_FREE(macp->m_src_addr, MAXMACADDRLEN); 6448 KMEM_FREE(macp->m_dst_addr, MAXMACADDRLEN); 6449 } 6450 mac_free(macp); 6451 6452 if (status != 0) { 6453 cmn_err(CE_WARN, 6454 "!nxge_mac_register failed (status %d instance %d)", 6455 status, nxgep->instance); 6456 return (NXGE_ERROR); 6457 } 6458 6459 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6460 "(instance %d)", nxgep->instance)); 6461 6462 return (NXGE_OK); 6463 } 6464 6465 void 6466 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6467 { 6468 ssize_t size; 6469 mblk_t *nmp; 6470 uint8_t blk_id; 6471 uint8_t chan; 6472 uint32_t err_id; 6473 err_inject_t *eip; 6474 6475 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6476 6477 size = 1024; 6478 nmp = mp->b_cont; 6479 eip = (err_inject_t *)nmp->b_rptr; 6480 blk_id = eip->blk_id; 6481 err_id = eip->err_id; 6482 chan = eip->chan; 6483 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6484 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6485 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6486 switch (blk_id) { 6487 case MAC_BLK_ID: 6488 break; 6489 case TXMAC_BLK_ID: 6490 break; 6491 case RXMAC_BLK_ID: 6492 break; 6493 case MIF_BLK_ID: 6494 break; 6495 case IPP_BLK_ID: 6496 nxge_ipp_inject_err(nxgep, err_id); 6497 break; 6498 case TXC_BLK_ID: 6499 nxge_txc_inject_err(nxgep, err_id); 6500 break; 6501 case TXDMA_BLK_ID: 6502 nxge_txdma_inject_err(nxgep, err_id, chan); 6503 break; 6504 case RXDMA_BLK_ID: 6505 nxge_rxdma_inject_err(nxgep, err_id, chan); 6506 break; 6507 case ZCP_BLK_ID: 6508 nxge_zcp_inject_err(nxgep, err_id); 6509 break; 6510 case ESPC_BLK_ID: 6511 break; 6512 case FFLP_BLK_ID: 6513 break; 6514 case PHY_BLK_ID: 6515 break; 6516 case ETHER_SERDES_BLK_ID: 6517 break; 6518 case PCIE_SERDES_BLK_ID: 6519 break; 6520 case VIR_BLK_ID: 6521 break; 6522 } 6523 6524 nmp->b_wptr = nmp->b_rptr + size; 6525 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6526 6527 miocack(wq, mp, (int)size, 0); 6528 } 6529 6530 static int 6531 nxge_init_common_dev(p_nxge_t nxgep) 6532 { 6533 p_nxge_hw_list_t hw_p; 6534 dev_info_t *p_dip; 6535 6536 ASSERT(nxgep != NULL); 6537 6538 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6539 6540 p_dip = nxgep->p_dip; 6541 MUTEX_ENTER(&nxge_common_lock); 6542 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6543 "==> nxge_init_common_dev:func # %d", 6544 nxgep->function_num)); 6545 /* 6546 * Loop through existing per neptune hardware list. 6547 */ 6548 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6549 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6550 "==> nxge_init_common_device:func # %d " 6551 "hw_p $%p parent dip $%p", 6552 nxgep->function_num, 6553 hw_p, 6554 p_dip)); 6555 if (hw_p->parent_devp == p_dip) { 6556 nxgep->nxge_hw_p = hw_p; 6557 hw_p->ndevs++; 6558 hw_p->nxge_p[nxgep->function_num] = nxgep; 6559 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6560 "==> nxge_init_common_device:func # %d " 6561 "hw_p $%p parent dip $%p " 6562 "ndevs %d (found)", 6563 nxgep->function_num, 6564 hw_p, 6565 p_dip, 6566 hw_p->ndevs)); 6567 break; 6568 } 6569 } 6570 6571 if (hw_p == NULL) { 6572 6573 char **prop_val; 6574 uint_t prop_len; 6575 int i; 6576 6577 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6578 "==> nxge_init_common_device:func # %d " 6579 "parent dip $%p (new)", 6580 nxgep->function_num, 6581 p_dip)); 6582 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6583 hw_p->parent_devp = p_dip; 6584 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6585 nxgep->nxge_hw_p = hw_p; 6586 hw_p->ndevs++; 6587 hw_p->nxge_p[nxgep->function_num] = nxgep; 6588 hw_p->next = nxge_hw_list; 6589 if (nxgep->niu_type == N2_NIU) { 6590 hw_p->niu_type = N2_NIU; 6591 hw_p->platform_type = P_NEPTUNE_NIU; 6592 } else { 6593 hw_p->niu_type = NIU_TYPE_NONE; 6594 hw_p->platform_type = P_NEPTUNE_NONE; 6595 } 6596 6597 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6598 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6599 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6600 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6601 6602 nxge_hw_list = hw_p; 6603 6604 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0, 6605 "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) { 6606 for (i = 0; i < prop_len; i++) { 6607 if ((strcmp((caddr_t)prop_val[i], 6608 NXGE_ROCK_COMPATIBLE) == 0)) { 6609 hw_p->platform_type = P_NEPTUNE_ROCK; 6610 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6611 "ROCK hw_p->platform_type %d", 6612 hw_p->platform_type)); 6613 break; 6614 } 6615 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6616 "nxge_init_common_dev: read compatible" 6617 " property[%d] val[%s]", 6618 i, (caddr_t)prop_val[i])); 6619 } 6620 } 6621 6622 ddi_prop_free(prop_val); 6623 6624 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6625 } 6626 6627 MUTEX_EXIT(&nxge_common_lock); 6628 6629 nxgep->platform_type = hw_p->platform_type; 6630 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxgep->platform_type %d", 6631 nxgep->platform_type)); 6632 if (nxgep->niu_type != N2_NIU) { 6633 nxgep->niu_type = hw_p->niu_type; 6634 } 6635 6636 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6637 "==> nxge_init_common_device (nxge_hw_list) $%p", 6638 nxge_hw_list)); 6639 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6640 6641 return (NXGE_OK); 6642 } 6643 6644 static void 6645 nxge_uninit_common_dev(p_nxge_t nxgep) 6646 { 6647 p_nxge_hw_list_t hw_p, h_hw_p; 6648 p_nxge_dma_pt_cfg_t p_dma_cfgp; 6649 p_nxge_hw_pt_cfg_t p_cfgp; 6650 dev_info_t *p_dip; 6651 6652 ASSERT(nxgep != NULL); 6653 6654 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6655 if (nxgep->nxge_hw_p == NULL) { 6656 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6657 "<== nxge_uninit_common_device (no common)")); 6658 return; 6659 } 6660 6661 MUTEX_ENTER(&nxge_common_lock); 6662 h_hw_p = nxge_hw_list; 6663 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6664 p_dip = hw_p->parent_devp; 6665 if (nxgep->nxge_hw_p == hw_p && 6666 p_dip == nxgep->p_dip && 6667 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6668 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6669 6670 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6671 "==> nxge_uninit_common_device:func # %d " 6672 "hw_p $%p parent dip $%p " 6673 "ndevs %d (found)", 6674 nxgep->function_num, 6675 hw_p, 6676 p_dip, 6677 hw_p->ndevs)); 6678 6679 /* 6680 * Release the RDC table, a shared resoruce 6681 * of the nxge hardware. The RDC table was 6682 * assigned to this instance of nxge in 6683 * nxge_use_cfg_dma_config(). 6684 */ 6685 if (!isLDOMguest(nxgep)) { 6686 p_dma_cfgp = 6687 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 6688 p_cfgp = 6689 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 6690 (void) nxge_fzc_rdc_tbl_unbind(nxgep, 6691 p_cfgp->def_mac_rxdma_grpid); 6692 6693 /* Cleanup any outstanding groups. */ 6694 nxge_grp_cleanup(nxgep); 6695 } 6696 6697 if (hw_p->ndevs) { 6698 hw_p->ndevs--; 6699 } 6700 hw_p->nxge_p[nxgep->function_num] = NULL; 6701 if (!hw_p->ndevs) { 6702 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6703 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6704 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6705 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6706 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6707 "==> nxge_uninit_common_device: " 6708 "func # %d " 6709 "hw_p $%p parent dip $%p " 6710 "ndevs %d (last)", 6711 nxgep->function_num, 6712 hw_p, 6713 p_dip, 6714 hw_p->ndevs)); 6715 6716 nxge_hio_uninit(nxgep); 6717 6718 if (hw_p == nxge_hw_list) { 6719 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6720 "==> nxge_uninit_common_device:" 6721 "remove head func # %d " 6722 "hw_p $%p parent dip $%p " 6723 "ndevs %d (head)", 6724 nxgep->function_num, 6725 hw_p, 6726 p_dip, 6727 hw_p->ndevs)); 6728 nxge_hw_list = hw_p->next; 6729 } else { 6730 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6731 "==> nxge_uninit_common_device:" 6732 "remove middle func # %d " 6733 "hw_p $%p parent dip $%p " 6734 "ndevs %d (middle)", 6735 nxgep->function_num, 6736 hw_p, 6737 p_dip, 6738 hw_p->ndevs)); 6739 h_hw_p->next = hw_p->next; 6740 } 6741 6742 nxgep->nxge_hw_p = NULL; 6743 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6744 } 6745 break; 6746 } else { 6747 h_hw_p = hw_p; 6748 } 6749 } 6750 6751 MUTEX_EXIT(&nxge_common_lock); 6752 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6753 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6754 nxge_hw_list)); 6755 6756 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6757 } 6758 6759 /* 6760 * Determines the number of ports from the niu_type or the platform type. 6761 * Returns the number of ports, or returns zero on failure. 6762 */ 6763 6764 int 6765 nxge_get_nports(p_nxge_t nxgep) 6766 { 6767 int nports = 0; 6768 6769 switch (nxgep->niu_type) { 6770 case N2_NIU: 6771 case NEPTUNE_2_10GF: 6772 nports = 2; 6773 break; 6774 case NEPTUNE_4_1GC: 6775 case NEPTUNE_2_10GF_2_1GC: 6776 case NEPTUNE_1_10GF_3_1GC: 6777 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6778 case NEPTUNE_2_10GF_2_1GRF: 6779 nports = 4; 6780 break; 6781 default: 6782 switch (nxgep->platform_type) { 6783 case P_NEPTUNE_NIU: 6784 case P_NEPTUNE_ATLAS_2PORT: 6785 nports = 2; 6786 break; 6787 case P_NEPTUNE_ATLAS_4PORT: 6788 case P_NEPTUNE_MARAMBA_P0: 6789 case P_NEPTUNE_MARAMBA_P1: 6790 case P_NEPTUNE_ROCK: 6791 case P_NEPTUNE_ALONSO: 6792 nports = 4; 6793 break; 6794 default: 6795 break; 6796 } 6797 break; 6798 } 6799 6800 return (nports); 6801 } 6802 6803 /* 6804 * The following two functions are to support 6805 * PSARC/2007/453 MSI-X interrupt limit override. 6806 */ 6807 static int 6808 nxge_create_msi_property(p_nxge_t nxgep) 6809 { 6810 int nmsi; 6811 extern int ncpus; 6812 6813 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6814 6815 switch (nxgep->mac.portmode) { 6816 case PORT_10G_COPPER: 6817 case PORT_10G_FIBER: 6818 case PORT_10G_TN1010: 6819 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6820 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6821 /* 6822 * The maximum MSI-X requested will be 8. 6823 * If the # of CPUs is less than 8, we will request 6824 * # MSI-X based on the # of CPUs (default). 6825 */ 6826 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6827 "==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d", 6828 nxge_msix_10g_intrs)); 6829 if ((nxge_msix_10g_intrs == 0) || 6830 (nxge_msix_10g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6831 nmsi = NXGE_MSIX_REQUEST_10G; 6832 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6833 "==>nxge_create_msi_property (10G): reset to 8")); 6834 } else { 6835 nmsi = nxge_msix_10g_intrs; 6836 } 6837 6838 /* 6839 * If # of interrupts requested is 8 (default), 6840 * the checking of the number of cpus will be 6841 * be maintained. 6842 */ 6843 if ((nmsi == NXGE_MSIX_REQUEST_10G) && 6844 (ncpus < nmsi)) { 6845 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6846 "==>nxge_create_msi_property (10G): reset to 8")); 6847 nmsi = ncpus; 6848 } 6849 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6850 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6851 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6852 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6853 break; 6854 6855 default: 6856 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6857 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6858 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6859 "==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d", 6860 nxge_msix_1g_intrs)); 6861 if ((nxge_msix_1g_intrs == 0) || 6862 (nxge_msix_1g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6863 nmsi = NXGE_MSIX_REQUEST_1G; 6864 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6865 "==>nxge_create_msi_property (1G): reset to 2")); 6866 } else { 6867 nmsi = nxge_msix_1g_intrs; 6868 } 6869 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6870 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6871 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6872 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6873 break; 6874 } 6875 6876 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6877 return (nmsi); 6878 } 6879 6880 /* ARGSUSED */ 6881 static int 6882 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6883 void *pr_val) 6884 { 6885 int err = 0; 6886 link_flowctrl_t fl; 6887 6888 switch (pr_num) { 6889 case MAC_PROP_AUTONEG: 6890 *(uint8_t *)pr_val = 1; 6891 break; 6892 case MAC_PROP_FLOWCTRL: 6893 if (pr_valsize < sizeof (link_flowctrl_t)) 6894 return (EINVAL); 6895 fl = LINK_FLOWCTRL_RX; 6896 bcopy(&fl, pr_val, sizeof (fl)); 6897 break; 6898 case MAC_PROP_ADV_1000FDX_CAP: 6899 case MAC_PROP_EN_1000FDX_CAP: 6900 *(uint8_t *)pr_val = 1; 6901 break; 6902 case MAC_PROP_ADV_100FDX_CAP: 6903 case MAC_PROP_EN_100FDX_CAP: 6904 *(uint8_t *)pr_val = 1; 6905 break; 6906 default: 6907 err = ENOTSUP; 6908 break; 6909 } 6910 return (err); 6911 } 6912 6913 6914 /* 6915 * The following is a software around for the Neptune hardware's 6916 * interrupt bugs; The Neptune hardware may generate spurious interrupts when 6917 * an interrupr handler is removed. 6918 */ 6919 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98 6920 #define NXGE_PIM_RESET (1ULL << 29) 6921 #define NXGE_GLU_RESET (1ULL << 30) 6922 #define NXGE_NIU_RESET (1ULL << 31) 6923 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \ 6924 NXGE_GLU_RESET | \ 6925 NXGE_NIU_RESET) 6926 6927 #define NXGE_WAIT_QUITE_TIME 200000 6928 #define NXGE_WAIT_QUITE_RETRY 40 6929 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */ 6930 6931 static void 6932 nxge_niu_peu_reset(p_nxge_t nxgep) 6933 { 6934 uint32_t rvalue; 6935 p_nxge_hw_list_t hw_p; 6936 p_nxge_t fnxgep; 6937 int i, j; 6938 6939 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset")); 6940 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 6941 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6942 "==> nxge_niu_peu_reset: NULL hardware pointer")); 6943 return; 6944 } 6945 6946 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6947 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d", 6948 hw_p->flags, nxgep->nxge_link_poll_timerid, 6949 nxgep->nxge_timerid)); 6950 6951 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 6952 /* 6953 * Make sure other instances from the same hardware 6954 * stop sending PIO and in quiescent state. 6955 */ 6956 for (i = 0; i < NXGE_MAX_PORTS; i++) { 6957 fnxgep = hw_p->nxge_p[i]; 6958 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6959 "==> nxge_niu_peu_reset: checking entry %d " 6960 "nxgep $%p", i, fnxgep)); 6961 #ifdef NXGE_DEBUG 6962 if (fnxgep) { 6963 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6964 "==> nxge_niu_peu_reset: entry %d (function %d) " 6965 "link timer id %d hw timer id %d", 6966 i, fnxgep->function_num, 6967 fnxgep->nxge_link_poll_timerid, 6968 fnxgep->nxge_timerid)); 6969 } 6970 #endif 6971 if (fnxgep && fnxgep != nxgep && 6972 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) { 6973 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6974 "==> nxge_niu_peu_reset: checking $%p " 6975 "(function %d) timer ids", 6976 fnxgep, fnxgep->function_num)); 6977 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) { 6978 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6979 "==> nxge_niu_peu_reset: waiting")); 6980 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6981 if (!fnxgep->nxge_timerid && 6982 !fnxgep->nxge_link_poll_timerid) { 6983 break; 6984 } 6985 } 6986 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6987 if (fnxgep->nxge_timerid || 6988 fnxgep->nxge_link_poll_timerid) { 6989 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6990 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6991 "<== nxge_niu_peu_reset: cannot reset " 6992 "hardware (devices are still in use)")); 6993 return; 6994 } 6995 } 6996 } 6997 6998 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) { 6999 hw_p->flags |= COMMON_RESET_NIU_PCI; 7000 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh, 7001 NXGE_PCI_PORT_LOGIC_OFFSET); 7002 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7003 "nxge_niu_peu_reset: read offset 0x%x (%d) " 7004 "(data 0x%x)", 7005 NXGE_PCI_PORT_LOGIC_OFFSET, 7006 NXGE_PCI_PORT_LOGIC_OFFSET, 7007 rvalue)); 7008 7009 rvalue |= NXGE_PCI_RESET_ALL; 7010 pci_config_put32(nxgep->dev_regs->nxge_pciregh, 7011 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue); 7012 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7013 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x", 7014 rvalue)); 7015 7016 NXGE_DELAY(NXGE_PCI_RESET_WAIT); 7017 } 7018 7019 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 7020 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset")); 7021 } 7022 7023 static void 7024 nxge_set_pci_replay_timeout(p_nxge_t nxgep) 7025 { 7026 p_dev_regs_t dev_regs; 7027 uint32_t value; 7028 7029 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout")); 7030 7031 if (!nxge_set_replay_timer) { 7032 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7033 "==> nxge_set_pci_replay_timeout: will not change " 7034 "the timeout")); 7035 return; 7036 } 7037 7038 dev_regs = nxgep->dev_regs; 7039 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7040 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p", 7041 dev_regs, dev_regs->nxge_pciregh)); 7042 7043 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) { 7044 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7045 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or " 7046 "no PCI handle", 7047 dev_regs)); 7048 return; 7049 } 7050 value = (pci_config_get32(dev_regs->nxge_pciregh, 7051 PCI_REPLAY_TIMEOUT_CFG_OFFSET) | 7052 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT)); 7053 7054 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7055 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x " 7056 "(timeout value to set 0x%x at offset 0x%x) value 0x%x", 7057 pci_config_get32(dev_regs->nxge_pciregh, 7058 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout, 7059 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value)); 7060 7061 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET, 7062 value); 7063 7064 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7065 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x", 7066 pci_config_get32(dev_regs->nxge_pciregh, 7067 PCI_REPLAY_TIMEOUT_CFG_OFFSET))); 7068 7069 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout")); 7070 } 7071 7072 /* 7073 * quiesce(9E) entry point. 7074 * 7075 * This function is called when the system is single-threaded at high 7076 * PIL with preemption disabled. Therefore, this function must not be 7077 * blocked. 7078 * 7079 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 7080 * DDI_FAILURE indicates an error condition and should almost never happen. 7081 */ 7082 static int 7083 nxge_quiesce(dev_info_t *dip) 7084 { 7085 int instance = ddi_get_instance(dip); 7086 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 7087 7088 if (nxgep == NULL) 7089 return (DDI_FAILURE); 7090 7091 /* Turn off debugging */ 7092 nxge_debug_level = NO_DEBUG; 7093 nxgep->nxge_debug_level = NO_DEBUG; 7094 npi_debug_level = NO_DEBUG; 7095 7096 /* 7097 * Stop link monitor only when linkchkmod is interrupt based 7098 */ 7099 if (nxgep->mac.linkchkmode == LINKCHK_INTR) { 7100 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 7101 } 7102 7103 (void) nxge_intr_hw_disable(nxgep); 7104 7105 /* 7106 * Reset the receive MAC side. 7107 */ 7108 (void) nxge_rx_mac_disable(nxgep); 7109 7110 /* Disable and soft reset the IPP */ 7111 if (!isLDOMguest(nxgep)) 7112 (void) nxge_ipp_disable(nxgep); 7113 7114 /* 7115 * Reset the transmit/receive DMA side. 7116 */ 7117 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 7118 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 7119 7120 /* 7121 * Reset the transmit MAC side. 7122 */ 7123 (void) nxge_tx_mac_disable(nxgep); 7124 7125 return (DDI_SUCCESS); 7126 } 7127