1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 29 */ 30 #include <sys/nxge/nxge_impl.h> 31 #include <sys/nxge/nxge_hio.h> 32 #include <sys/nxge/nxge_rxdma.h> 33 #include <sys/pcie.h> 34 35 uint32_t nxge_use_partition = 0; /* debug partition flag */ 36 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 37 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 38 /* 39 * PSARC/2007/453 MSI-X interrupt limit override 40 */ 41 uint32_t nxge_msi_enable = 2; 42 43 /* 44 * Software workaround for a Neptune (PCI-E) 45 * hardware interrupt bug which the hardware 46 * may generate spurious interrupts after the 47 * device interrupt handler was removed. If this flag 48 * is enabled, the driver will reset the 49 * hardware when devices are being detached. 50 */ 51 uint32_t nxge_peu_reset_enable = 0; 52 53 /* 54 * Software workaround for the hardware 55 * checksum bugs that affect packet transmission 56 * and receive: 57 * 58 * Usage of nxge_cksum_offload: 59 * 60 * (1) nxge_cksum_offload = 0 (default): 61 * - transmits packets: 62 * TCP: uses the hardware checksum feature. 63 * UDP: driver will compute the software checksum 64 * based on the partial checksum computed 65 * by the IP layer. 66 * - receives packets 67 * TCP: marks packets checksum flags based on hardware result. 68 * UDP: will not mark checksum flags. 69 * 70 * (2) nxge_cksum_offload = 1: 71 * - transmit packets: 72 * TCP/UDP: uses the hardware checksum feature. 73 * - receives packets 74 * TCP/UDP: marks packet checksum flags based on hardware result. 75 * 76 * (3) nxge_cksum_offload = 2: 77 * - The driver will not register its checksum capability. 78 * Checksum for both TCP and UDP will be computed 79 * by the stack. 80 * - The software LSO is not allowed in this case. 81 * 82 * (4) nxge_cksum_offload > 2: 83 * - Will be treated as it is set to 2 84 * (stack will compute the checksum). 85 * 86 * (5) If the hardware bug is fixed, this workaround 87 * needs to be updated accordingly to reflect 88 * the new hardware revision. 89 */ 90 uint32_t nxge_cksum_offload = 0; 91 92 /* 93 * Globals: tunable parameters (/etc/system or adb) 94 * 95 */ 96 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 97 uint32_t nxge_rbr_spare_size = 0; 98 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 99 uint16_t nxge_rdc_buf_offset = SW_OFFSET_NO_OFFSET; 100 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 101 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 102 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 103 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 104 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 105 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 106 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 107 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 108 109 /* MAX LSO size */ 110 #define NXGE_LSO_MAXLEN 65535 111 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 112 113 114 /* 115 * Add tunable to reduce the amount of time spent in the 116 * ISR doing Rx Processing. 117 */ 118 uint32_t nxge_max_rx_pkts = 1024; 119 120 /* 121 * Tunables to manage the receive buffer blocks. 122 * 123 * nxge_rx_threshold_hi: copy all buffers. 124 * nxge_rx_bcopy_size_type: receive buffer block size type. 125 * nxge_rx_threshold_lo: copy only up to tunable block size type. 126 */ 127 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 128 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 129 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 130 131 /* Use kmem_alloc() to allocate data buffers. */ 132 #if defined(__sparc) 133 uint32_t nxge_use_kmem_alloc = 1; 134 #elif defined(__i386) 135 uint32_t nxge_use_kmem_alloc = 0; 136 #else 137 uint32_t nxge_use_kmem_alloc = 1; 138 #endif 139 140 rtrace_t npi_rtracebuf; 141 142 /* 143 * The hardware sometimes fails to allow enough time for the link partner 144 * to send an acknowledgement for packets that the hardware sent to it. The 145 * hardware resends the packets earlier than it should be in those instances. 146 * This behavior caused some switches to acknowledge the wrong packets 147 * and it triggered the fatal error. 148 * This software workaround is to set the replay timer to a value 149 * suggested by the hardware team. 150 * 151 * PCI config space replay timer register: 152 * The following replay timeout value is 0xc 153 * for bit 14:18. 154 */ 155 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8 156 #define PCI_REPLAY_TIMEOUT_SHIFT 14 157 158 uint32_t nxge_set_replay_timer = 1; 159 uint32_t nxge_replay_timeout = 0xc; 160 161 /* 162 * The transmit serialization sometimes causes 163 * longer sleep before calling the driver transmit 164 * function as it sleeps longer than it should. 165 * The performace group suggests that a time wait tunable 166 * can be used to set the maximum wait time when needed 167 * and the default is set to 1 tick. 168 */ 169 uint32_t nxge_tx_serial_maxsleep = 1; 170 171 #if defined(sun4v) 172 /* 173 * Hypervisor N2/NIU services information. 174 */ 175 /* 176 * The following is the default API supported: 177 * major 1 and minor 1. 178 * 179 * Please update the MAX_NIU_MAJORS, 180 * MAX_NIU_MINORS, and minor number supported 181 * when the newer Hypervior API interfaces 182 * are added. Also, please update nxge_hsvc_register() 183 * if needed. 184 */ 185 static hsvc_info_t niu_hsvc = { 186 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 187 NIU_MINOR_VER, "nxge" 188 }; 189 190 static int nxge_hsvc_register(p_nxge_t); 191 #endif 192 193 /* 194 * Function Prototypes 195 */ 196 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 197 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 198 static void nxge_unattach(p_nxge_t); 199 static int nxge_quiesce(dev_info_t *); 200 201 #if NXGE_PROPERTY 202 static void nxge_remove_hard_properties(p_nxge_t); 203 #endif 204 205 /* 206 * These two functions are required by nxge_hio.c 207 */ 208 extern int nxge_m_mmac_remove(void *arg, int slot); 209 extern void nxge_grp_cleanup(p_nxge_t nxge); 210 211 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 212 213 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 214 static void nxge_destroy_mutexes(p_nxge_t); 215 216 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 217 static void nxge_unmap_regs(p_nxge_t nxgep); 218 #ifdef NXGE_DEBUG 219 static void nxge_test_map_regs(p_nxge_t nxgep); 220 #endif 221 222 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 223 static void nxge_remove_intrs(p_nxge_t nxgep); 224 225 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 226 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 227 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 228 static void nxge_intrs_enable(p_nxge_t nxgep); 229 static void nxge_intrs_disable(p_nxge_t nxgep); 230 231 static void nxge_suspend(p_nxge_t); 232 static nxge_status_t nxge_resume(p_nxge_t); 233 234 static nxge_status_t nxge_setup_dev(p_nxge_t); 235 static void nxge_destroy_dev(p_nxge_t); 236 237 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 238 static void nxge_free_mem_pool(p_nxge_t); 239 240 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 241 static void nxge_free_rx_mem_pool(p_nxge_t); 242 243 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 244 static void nxge_free_tx_mem_pool(p_nxge_t); 245 246 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 247 struct ddi_dma_attr *, 248 size_t, ddi_device_acc_attr_t *, uint_t, 249 p_nxge_dma_common_t); 250 251 static void nxge_dma_mem_free(p_nxge_dma_common_t); 252 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 253 254 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 255 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 256 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 257 258 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 259 p_nxge_dma_common_t *, size_t); 260 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 261 262 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 263 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 264 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 265 266 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 267 p_nxge_dma_common_t *, 268 size_t); 269 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 270 271 static int nxge_init_common_dev(p_nxge_t); 272 static void nxge_uninit_common_dev(p_nxge_t); 273 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 274 char *, caddr_t); 275 #if defined(sun4v) 276 extern nxge_status_t nxge_hio_rdc_enable(p_nxge_t nxgep); 277 extern nxge_status_t nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm); 278 #endif 279 280 /* 281 * The next declarations are for the GLDv3 interface. 282 */ 283 static int nxge_m_start(void *); 284 static void nxge_m_stop(void *); 285 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 286 static int nxge_m_promisc(void *, boolean_t); 287 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 288 nxge_status_t nxge_mac_register(p_nxge_t); 289 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 290 int slot, int rdctbl, boolean_t usetbl); 291 void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, 292 boolean_t factory); 293 294 static void nxge_m_getfactaddr(void *, uint_t, uint8_t *); 295 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 296 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 297 uint_t, const void *); 298 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 299 uint_t, uint_t, void *, uint_t *); 300 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 301 const void *); 302 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 303 void *, uint_t *); 304 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 305 static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int, 306 mac_ring_info_t *, mac_ring_handle_t); 307 static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t, 308 mac_ring_type_t); 309 static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t, 310 mac_ring_type_t); 311 312 static void nxge_niu_peu_reset(p_nxge_t nxgep); 313 static void nxge_set_pci_replay_timeout(nxge_t *); 314 315 mac_priv_prop_t nxge_priv_props[] = { 316 {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 317 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 318 {"_function_number", MAC_PROP_PERM_READ}, 319 {"_fw_version", MAC_PROP_PERM_READ}, 320 {"_port_mode", MAC_PROP_PERM_READ}, 321 {"_hot_swap_phy", MAC_PROP_PERM_READ}, 322 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 323 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 324 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 325 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 326 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 327 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 328 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 329 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 330 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 331 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 332 {"_soft_lso_enable", MAC_PROP_PERM_RW} 333 }; 334 335 #define NXGE_MAX_PRIV_PROPS \ 336 (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 337 338 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 339 #define MAX_DUMP_SZ 256 340 341 #define NXGE_M_CALLBACK_FLAGS \ 342 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 343 344 mac_callbacks_t nxge_m_callbacks = { 345 NXGE_M_CALLBACK_FLAGS, 346 nxge_m_stat, 347 nxge_m_start, 348 nxge_m_stop, 349 nxge_m_promisc, 350 nxge_m_multicst, 351 NULL, 352 NULL, 353 nxge_m_ioctl, 354 nxge_m_getcapab, 355 NULL, 356 NULL, 357 nxge_m_setprop, 358 nxge_m_getprop 359 }; 360 361 void 362 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 363 364 /* PSARC/2007/453 MSI-X interrupt limit override. */ 365 #define NXGE_MSIX_REQUEST_10G 8 366 #define NXGE_MSIX_REQUEST_1G 2 367 static int nxge_create_msi_property(p_nxge_t); 368 /* 369 * For applications that care about the 370 * latency, it was requested by PAE and the 371 * customers that the driver has tunables that 372 * allow the user to tune it to a higher number 373 * interrupts to spread the interrupts among 374 * multiple channels. The DDI framework limits 375 * the maximum number of MSI-X resources to allocate 376 * to 8 (ddi_msix_alloc_limit). If more than 8 377 * is set, ddi_msix_alloc_limit must be set accordingly. 378 * The default number of MSI interrupts are set to 379 * 8 for 10G and 2 for 1G link. 380 */ 381 #define NXGE_MSIX_MAX_ALLOWED 32 382 uint32_t nxge_msix_10g_intrs = NXGE_MSIX_REQUEST_10G; 383 uint32_t nxge_msix_1g_intrs = NXGE_MSIX_REQUEST_1G; 384 385 /* 386 * These global variables control the message 387 * output. 388 */ 389 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 390 uint64_t nxge_debug_level; 391 392 /* 393 * This list contains the instance structures for the Neptune 394 * devices present in the system. The lock exists to guarantee 395 * mutually exclusive access to the list. 396 */ 397 void *nxge_list = NULL; 398 void *nxge_hw_list = NULL; 399 nxge_os_mutex_t nxge_common_lock; 400 nxge_os_mutex_t nxgedebuglock; 401 402 extern uint64_t npi_debug_level; 403 404 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 405 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 406 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 407 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 408 extern void nxge_fm_init(p_nxge_t, 409 ddi_device_acc_attr_t *, 410 ddi_dma_attr_t *); 411 extern void nxge_fm_fini(p_nxge_t); 412 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 413 414 /* 415 * Count used to maintain the number of buffers being used 416 * by Neptune instances and loaned up to the upper layers. 417 */ 418 uint32_t nxge_mblks_pending = 0; 419 420 /* 421 * Device register access attributes for PIO. 422 */ 423 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 424 DDI_DEVICE_ATTR_V1, 425 DDI_STRUCTURE_LE_ACC, 426 DDI_STRICTORDER_ACC, 427 DDI_DEFAULT_ACC 428 }; 429 430 /* 431 * Device descriptor access attributes for DMA. 432 */ 433 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 434 DDI_DEVICE_ATTR_V0, 435 DDI_STRUCTURE_LE_ACC, 436 DDI_STRICTORDER_ACC 437 }; 438 439 /* 440 * Device buffer access attributes for DMA. 441 */ 442 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 443 DDI_DEVICE_ATTR_V0, 444 DDI_STRUCTURE_BE_ACC, 445 DDI_STRICTORDER_ACC 446 }; 447 448 ddi_dma_attr_t nxge_desc_dma_attr = { 449 DMA_ATTR_V0, /* version number. */ 450 0, /* low address */ 451 0xffffffffffffffff, /* high address */ 452 0xffffffffffffffff, /* address counter max */ 453 #ifndef NIU_PA_WORKAROUND 454 0x100000, /* alignment */ 455 #else 456 0x2000, 457 #endif 458 0xfc00fc, /* dlim_burstsizes */ 459 0x1, /* minimum transfer size */ 460 0xffffffffffffffff, /* maximum transfer size */ 461 0xffffffffffffffff, /* maximum segment size */ 462 1, /* scatter/gather list length */ 463 (unsigned int) 1, /* granularity */ 464 0 /* attribute flags */ 465 }; 466 467 ddi_dma_attr_t nxge_tx_dma_attr = { 468 DMA_ATTR_V0, /* version number. */ 469 0, /* low address */ 470 0xffffffffffffffff, /* high address */ 471 0xffffffffffffffff, /* address counter max */ 472 #if defined(_BIG_ENDIAN) 473 0x2000, /* alignment */ 474 #else 475 0x1000, /* alignment */ 476 #endif 477 0xfc00fc, /* dlim_burstsizes */ 478 0x1, /* minimum transfer size */ 479 0xffffffffffffffff, /* maximum transfer size */ 480 0xffffffffffffffff, /* maximum segment size */ 481 5, /* scatter/gather list length */ 482 (unsigned int) 1, /* granularity */ 483 0 /* attribute flags */ 484 }; 485 486 ddi_dma_attr_t nxge_rx_dma_attr = { 487 DMA_ATTR_V0, /* version number. */ 488 0, /* low address */ 489 0xffffffffffffffff, /* high address */ 490 0xffffffffffffffff, /* address counter max */ 491 0x2000, /* alignment */ 492 0xfc00fc, /* dlim_burstsizes */ 493 0x1, /* minimum transfer size */ 494 0xffffffffffffffff, /* maximum transfer size */ 495 0xffffffffffffffff, /* maximum segment size */ 496 1, /* scatter/gather list length */ 497 (unsigned int) 1, /* granularity */ 498 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 499 }; 500 501 ddi_dma_lim_t nxge_dma_limits = { 502 (uint_t)0, /* dlim_addr_lo */ 503 (uint_t)0xffffffff, /* dlim_addr_hi */ 504 (uint_t)0xffffffff, /* dlim_cntr_max */ 505 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 506 0x1, /* dlim_minxfer */ 507 1024 /* dlim_speed */ 508 }; 509 510 dma_method_t nxge_force_dma = DVMA; 511 512 /* 513 * dma chunk sizes. 514 * 515 * Try to allocate the largest possible size 516 * so that fewer number of dma chunks would be managed 517 */ 518 #ifdef NIU_PA_WORKAROUND 519 size_t alloc_sizes [] = {0x2000}; 520 #else 521 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 522 0x10000, 0x20000, 0x40000, 0x80000, 523 0x100000, 0x200000, 0x400000, 0x800000, 524 0x1000000, 0x2000000, 0x4000000}; 525 #endif 526 527 /* 528 * Translate "dev_t" to a pointer to the associated "dev_info_t". 529 */ 530 531 extern void nxge_get_environs(nxge_t *); 532 533 static int 534 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 535 { 536 p_nxge_t nxgep = NULL; 537 int instance; 538 int status = DDI_SUCCESS; 539 uint8_t portn; 540 nxge_mmac_t *mmac_info; 541 542 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 543 544 /* 545 * Get the device instance since we'll need to setup 546 * or retrieve a soft state for this instance. 547 */ 548 instance = ddi_get_instance(dip); 549 550 switch (cmd) { 551 case DDI_ATTACH: 552 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 553 break; 554 555 case DDI_RESUME: 556 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 557 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 558 if (nxgep == NULL) { 559 status = DDI_FAILURE; 560 break; 561 } 562 if (nxgep->dip != dip) { 563 status = DDI_FAILURE; 564 break; 565 } 566 if (nxgep->suspended == DDI_PM_SUSPEND) { 567 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 568 } else { 569 status = nxge_resume(nxgep); 570 } 571 goto nxge_attach_exit; 572 573 case DDI_PM_RESUME: 574 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 575 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 576 if (nxgep == NULL) { 577 status = DDI_FAILURE; 578 break; 579 } 580 if (nxgep->dip != dip) { 581 status = DDI_FAILURE; 582 break; 583 } 584 status = nxge_resume(nxgep); 585 goto nxge_attach_exit; 586 587 default: 588 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 589 status = DDI_FAILURE; 590 goto nxge_attach_exit; 591 } 592 593 594 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 595 status = DDI_FAILURE; 596 goto nxge_attach_exit; 597 } 598 599 nxgep = ddi_get_soft_state(nxge_list, instance); 600 if (nxgep == NULL) { 601 status = NXGE_ERROR; 602 goto nxge_attach_fail2; 603 } 604 605 nxgep->nxge_magic = NXGE_MAGIC; 606 607 nxgep->drv_state = 0; 608 nxgep->dip = dip; 609 nxgep->instance = instance; 610 nxgep->p_dip = ddi_get_parent(dip); 611 nxgep->nxge_debug_level = nxge_debug_level; 612 npi_debug_level = nxge_debug_level; 613 614 /* Are we a guest running in a Hybrid I/O environment? */ 615 nxge_get_environs(nxgep); 616 617 status = nxge_map_regs(nxgep); 618 619 if (status != NXGE_OK) { 620 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 621 goto nxge_attach_fail3; 622 } 623 624 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_rx_dma_attr); 625 626 /* Create & initialize the per-Neptune data structure */ 627 /* (even if we're a guest). */ 628 status = nxge_init_common_dev(nxgep); 629 if (status != NXGE_OK) { 630 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 631 "nxge_init_common_dev failed")); 632 goto nxge_attach_fail4; 633 } 634 635 /* 636 * Software workaround: set the replay timer. 637 */ 638 if (nxgep->niu_type != N2_NIU) { 639 nxge_set_pci_replay_timeout(nxgep); 640 } 641 642 #if defined(sun4v) 643 /* This is required by nxge_hio_init(), which follows. */ 644 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 645 goto nxge_attach_fail4; 646 #endif 647 648 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 649 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 650 "nxge_hio_init failed")); 651 goto nxge_attach_fail4; 652 } 653 654 if (nxgep->niu_type == NEPTUNE_2_10GF) { 655 if (nxgep->function_num > 1) { 656 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 657 " function %d. Only functions 0 and 1 are " 658 "supported for this card.", nxgep->function_num)); 659 status = NXGE_ERROR; 660 goto nxge_attach_fail4; 661 } 662 } 663 664 if (isLDOMguest(nxgep)) { 665 /* 666 * Use the function number here. 667 */ 668 nxgep->mac.portnum = nxgep->function_num; 669 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 670 671 /* XXX We'll set the MAC address counts to 1 for now. */ 672 mmac_info = &nxgep->nxge_mmac_info; 673 mmac_info->num_mmac = 1; 674 mmac_info->naddrfree = 1; 675 } else { 676 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 677 nxgep->mac.portnum = portn; 678 if ((portn == 0) || (portn == 1)) 679 nxgep->mac.porttype = PORT_TYPE_XMAC; 680 else 681 nxgep->mac.porttype = PORT_TYPE_BMAC; 682 /* 683 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 684 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 685 * The two types of MACs have different characterizations. 686 */ 687 mmac_info = &nxgep->nxge_mmac_info; 688 if (nxgep->function_num < 2) { 689 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 690 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 691 } else { 692 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 693 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 694 } 695 } 696 /* 697 * Setup the Ndd parameters for the this instance. 698 */ 699 nxge_init_param(nxgep); 700 701 /* 702 * Setup Register Tracing Buffer. 703 */ 704 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 705 706 /* init stats ptr */ 707 nxge_init_statsp(nxgep); 708 709 /* 710 * Copy the vpd info from eeprom to a local data 711 * structure, and then check its validity. 712 */ 713 if (!isLDOMguest(nxgep)) { 714 int *regp; 715 uint_t reglen; 716 int rv; 717 718 nxge_vpd_info_get(nxgep); 719 720 /* Find the NIU config handle. */ 721 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 722 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 723 "reg", ®p, ®len); 724 725 if (rv != DDI_PROP_SUCCESS) { 726 goto nxge_attach_fail5; 727 } 728 /* 729 * The address_hi, that is the first int, in the reg 730 * property consists of config handle, but need to remove 731 * the bits 28-31 which are OBP specific info. 732 */ 733 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 734 ddi_prop_free(regp); 735 } 736 737 /* 738 * Set the defaults for the MTU size. 739 */ 740 nxge_hw_id_init(nxgep); 741 742 if (isLDOMguest(nxgep)) { 743 uchar_t *prop_val; 744 uint_t prop_len; 745 uint32_t max_frame_size; 746 747 extern void nxge_get_logical_props(p_nxge_t); 748 749 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 750 nxgep->mac.portmode = PORT_LOGICAL; 751 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 752 "phy-type", "virtual transceiver"); 753 754 nxgep->nports = 1; 755 nxgep->board_ver = 0; /* XXX What? */ 756 757 /* 758 * local-mac-address property gives us info on which 759 * specific MAC address the Hybrid resource is associated 760 * with. 761 */ 762 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 763 "local-mac-address", &prop_val, 764 &prop_len) != DDI_PROP_SUCCESS) { 765 goto nxge_attach_fail5; 766 } 767 if (prop_len != ETHERADDRL) { 768 ddi_prop_free(prop_val); 769 goto nxge_attach_fail5; 770 } 771 ether_copy(prop_val, nxgep->hio_mac_addr); 772 ddi_prop_free(prop_val); 773 nxge_get_logical_props(nxgep); 774 775 /* 776 * Enable Jumbo property based on the "max-frame-size" 777 * property value. 778 */ 779 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY, 780 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 781 "max-frame-size", NXGE_MTU_DEFAULT_MAX); 782 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) && 783 (max_frame_size <= TX_JUMBO_MTU)) { 784 nxgep->mac.is_jumbo = B_TRUE; 785 nxgep->mac.maxframesize = (uint16_t)max_frame_size; 786 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 787 NXGE_EHEADER_VLAN_CRC; 788 } 789 } else { 790 status = nxge_xcvr_find(nxgep); 791 792 if (status != NXGE_OK) { 793 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 794 " Couldn't determine card type" 795 " .... exit ")); 796 goto nxge_attach_fail5; 797 } 798 799 status = nxge_get_config_properties(nxgep); 800 801 if (status != NXGE_OK) { 802 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 803 "get_hw create failed")); 804 goto nxge_attach_fail; 805 } 806 } 807 808 /* 809 * Setup the Kstats for the driver. 810 */ 811 nxge_setup_kstats(nxgep); 812 813 if (!isLDOMguest(nxgep)) 814 nxge_setup_param(nxgep); 815 816 status = nxge_setup_system_dma_pages(nxgep); 817 if (status != NXGE_OK) { 818 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 819 goto nxge_attach_fail; 820 } 821 822 823 if (!isLDOMguest(nxgep)) 824 nxge_hw_init_niu_common(nxgep); 825 826 status = nxge_setup_mutexes(nxgep); 827 if (status != NXGE_OK) { 828 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 829 goto nxge_attach_fail; 830 } 831 832 #if defined(sun4v) 833 if (isLDOMguest(nxgep)) { 834 /* Find our VR & channel sets. */ 835 status = nxge_hio_vr_add(nxgep); 836 if (status != DDI_SUCCESS) { 837 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 838 "nxge_hio_vr_add failed")); 839 (void) hsvc_unregister(&nxgep->niu_hsvc); 840 nxgep->niu_hsvc_available = B_FALSE; 841 goto nxge_attach_fail; 842 } 843 goto nxge_attach_exit; 844 } 845 #endif 846 847 status = nxge_setup_dev(nxgep); 848 if (status != DDI_SUCCESS) { 849 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 850 goto nxge_attach_fail; 851 } 852 853 status = nxge_add_intrs(nxgep); 854 if (status != DDI_SUCCESS) { 855 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 856 goto nxge_attach_fail; 857 } 858 859 /* If a guest, register with vio_net instead. */ 860 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 861 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 862 "unable to register to mac layer (%d)", status)); 863 goto nxge_attach_fail; 864 } 865 866 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 867 868 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 869 "registered to mac (instance %d)", instance)); 870 871 /* nxge_link_monitor calls xcvr.check_link recursively */ 872 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 873 874 goto nxge_attach_exit; 875 876 nxge_attach_fail: 877 nxge_unattach(nxgep); 878 goto nxge_attach_fail1; 879 880 nxge_attach_fail5: 881 /* 882 * Tear down the ndd parameters setup. 883 */ 884 nxge_destroy_param(nxgep); 885 886 /* 887 * Tear down the kstat setup. 888 */ 889 nxge_destroy_kstats(nxgep); 890 891 nxge_attach_fail4: 892 if (nxgep->nxge_hw_p) { 893 nxge_uninit_common_dev(nxgep); 894 nxgep->nxge_hw_p = NULL; 895 } 896 897 nxge_attach_fail3: 898 /* 899 * Unmap the register setup. 900 */ 901 nxge_unmap_regs(nxgep); 902 903 nxge_fm_fini(nxgep); 904 905 nxge_attach_fail2: 906 ddi_soft_state_free(nxge_list, nxgep->instance); 907 908 nxge_attach_fail1: 909 if (status != NXGE_OK) 910 status = (NXGE_ERROR | NXGE_DDI_FAILED); 911 nxgep = NULL; 912 913 nxge_attach_exit: 914 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 915 status)); 916 917 return (status); 918 } 919 920 static int 921 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 922 { 923 int status = DDI_SUCCESS; 924 int instance; 925 p_nxge_t nxgep = NULL; 926 927 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 928 instance = ddi_get_instance(dip); 929 nxgep = ddi_get_soft_state(nxge_list, instance); 930 if (nxgep == NULL) { 931 status = DDI_FAILURE; 932 goto nxge_detach_exit; 933 } 934 935 switch (cmd) { 936 case DDI_DETACH: 937 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 938 break; 939 940 case DDI_PM_SUSPEND: 941 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 942 nxgep->suspended = DDI_PM_SUSPEND; 943 nxge_suspend(nxgep); 944 break; 945 946 case DDI_SUSPEND: 947 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 948 if (nxgep->suspended != DDI_PM_SUSPEND) { 949 nxgep->suspended = DDI_SUSPEND; 950 nxge_suspend(nxgep); 951 } 952 break; 953 954 default: 955 status = DDI_FAILURE; 956 } 957 958 if (cmd != DDI_DETACH) 959 goto nxge_detach_exit; 960 961 /* 962 * Stop the xcvr polling. 963 */ 964 nxgep->suspended = cmd; 965 966 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 967 968 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 969 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 970 "<== nxge_detach status = 0x%08X", status)); 971 return (DDI_FAILURE); 972 } 973 974 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 975 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 976 977 nxge_unattach(nxgep); 978 nxgep = NULL; 979 980 nxge_detach_exit: 981 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 982 status)); 983 984 return (status); 985 } 986 987 static void 988 nxge_unattach(p_nxge_t nxgep) 989 { 990 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 991 992 if (nxgep == NULL || nxgep->dev_regs == NULL) { 993 return; 994 } 995 996 nxgep->nxge_magic = 0; 997 998 if (nxgep->nxge_timerid) { 999 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1000 nxgep->nxge_timerid = 0; 1001 } 1002 1003 /* 1004 * If this flag is set, it will affect the Neptune 1005 * only. 1006 */ 1007 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { 1008 nxge_niu_peu_reset(nxgep); 1009 } 1010 1011 #if defined(sun4v) 1012 if (isLDOMguest(nxgep)) { 1013 (void) nxge_hio_vr_release(nxgep); 1014 } 1015 #endif 1016 1017 if (nxgep->nxge_hw_p) { 1018 nxge_uninit_common_dev(nxgep); 1019 nxgep->nxge_hw_p = NULL; 1020 } 1021 1022 #if defined(sun4v) 1023 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 1024 (void) hsvc_unregister(&nxgep->niu_hsvc); 1025 nxgep->niu_hsvc_available = B_FALSE; 1026 } 1027 #endif 1028 /* 1029 * Stop any further interrupts. 1030 */ 1031 nxge_remove_intrs(nxgep); 1032 1033 /* 1034 * Stop the device and free resources. 1035 */ 1036 if (!isLDOMguest(nxgep)) { 1037 nxge_destroy_dev(nxgep); 1038 } 1039 1040 /* 1041 * Tear down the ndd parameters setup. 1042 */ 1043 nxge_destroy_param(nxgep); 1044 1045 /* 1046 * Tear down the kstat setup. 1047 */ 1048 nxge_destroy_kstats(nxgep); 1049 1050 /* 1051 * Destroy all mutexes. 1052 */ 1053 nxge_destroy_mutexes(nxgep); 1054 1055 /* 1056 * Remove the list of ndd parameters which 1057 * were setup during attach. 1058 */ 1059 if (nxgep->dip) { 1060 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 1061 " nxge_unattach: remove all properties")); 1062 1063 (void) ddi_prop_remove_all(nxgep->dip); 1064 } 1065 1066 #if NXGE_PROPERTY 1067 nxge_remove_hard_properties(nxgep); 1068 #endif 1069 1070 /* 1071 * Unmap the register setup. 1072 */ 1073 nxge_unmap_regs(nxgep); 1074 1075 nxge_fm_fini(nxgep); 1076 1077 ddi_soft_state_free(nxge_list, nxgep->instance); 1078 1079 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1080 } 1081 1082 #if defined(sun4v) 1083 int 1084 nxge_hsvc_register(nxge_t *nxgep) 1085 { 1086 nxge_status_t status; 1087 int i, j; 1088 1089 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hsvc_register")); 1090 if (nxgep->niu_type != N2_NIU) { 1091 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hsvc_register")); 1092 return (DDI_SUCCESS); 1093 } 1094 1095 /* 1096 * Currently, the NIU Hypervisor API supports two major versions: 1097 * version 1 and 2. 1098 * If Hypervisor introduces a higher major or minor version, 1099 * please update NIU_MAJOR_HI and NIU_MINOR_HI accordingly. 1100 */ 1101 nxgep->niu_hsvc_available = B_FALSE; 1102 bcopy(&niu_hsvc, &nxgep->niu_hsvc, 1103 sizeof (hsvc_info_t)); 1104 1105 for (i = NIU_MAJOR_HI; i > 0; i--) { 1106 nxgep->niu_hsvc.hsvc_major = i; 1107 for (j = NIU_MINOR_HI; j >= 0; j--) { 1108 nxgep->niu_hsvc.hsvc_minor = j; 1109 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1110 "nxge_hsvc_register: %s: negotiating " 1111 "hypervisor services revision %d " 1112 "group: 0x%lx major: 0x%lx " 1113 "minor: 0x%lx", 1114 nxgep->niu_hsvc.hsvc_modname, 1115 nxgep->niu_hsvc.hsvc_rev, 1116 nxgep->niu_hsvc.hsvc_group, 1117 nxgep->niu_hsvc.hsvc_major, 1118 nxgep->niu_hsvc.hsvc_minor, 1119 nxgep->niu_min_ver)); 1120 1121 if ((status = hsvc_register(&nxgep->niu_hsvc, 1122 &nxgep->niu_min_ver)) == 0) { 1123 /* Use the supported minor */ 1124 nxgep->niu_hsvc.hsvc_minor = nxgep->niu_min_ver; 1125 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1126 "nxge_hsvc_register: %s: negotiated " 1127 "hypervisor services revision %d " 1128 "group: 0x%lx major: 0x%lx " 1129 "minor: 0x%lx (niu_min_ver 0x%lx)", 1130 nxgep->niu_hsvc.hsvc_modname, 1131 nxgep->niu_hsvc.hsvc_rev, 1132 nxgep->niu_hsvc.hsvc_group, 1133 nxgep->niu_hsvc.hsvc_major, 1134 nxgep->niu_hsvc.hsvc_minor, 1135 nxgep->niu_min_ver)); 1136 1137 nxgep->niu_hsvc_available = B_TRUE; 1138 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1139 "<== nxge_hsvc_register: " 1140 "NIU Hypervisor service enabled")); 1141 return (DDI_SUCCESS); 1142 } 1143 1144 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1145 "nxge_hsvc_register: %s: negotiated failed - " 1146 "try lower major number " 1147 "hypervisor services revision %d " 1148 "group: 0x%lx major: 0x%lx minor: 0x%lx " 1149 "errno: %d", 1150 nxgep->niu_hsvc.hsvc_modname, 1151 nxgep->niu_hsvc.hsvc_rev, 1152 nxgep->niu_hsvc.hsvc_group, 1153 nxgep->niu_hsvc.hsvc_major, 1154 nxgep->niu_hsvc.hsvc_minor, status)); 1155 } 1156 } 1157 1158 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1159 "nxge_hsvc_register: %s: cannot negotiate " 1160 "hypervisor services revision %d group: 0x%lx " 1161 "major: 0x%lx minor: 0x%lx errno: %d", 1162 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1163 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1164 niu_hsvc.hsvc_minor, status)); 1165 1166 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1167 "<== nxge_hsvc_register: Register to NIU Hypervisor failed")); 1168 1169 return (DDI_FAILURE); 1170 } 1171 #endif 1172 1173 static char n2_siu_name[] = "niu"; 1174 1175 static nxge_status_t 1176 nxge_map_regs(p_nxge_t nxgep) 1177 { 1178 int ddi_status = DDI_SUCCESS; 1179 p_dev_regs_t dev_regs; 1180 char buf[MAXPATHLEN + 1]; 1181 char *devname; 1182 #ifdef NXGE_DEBUG 1183 char *sysname; 1184 #endif 1185 off_t regsize; 1186 nxge_status_t status = NXGE_OK; 1187 #if !defined(_BIG_ENDIAN) 1188 off_t pci_offset; 1189 uint16_t pcie_devctl; 1190 #endif 1191 1192 if (isLDOMguest(nxgep)) { 1193 return (nxge_guest_regs_map(nxgep)); 1194 } 1195 1196 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1197 nxgep->dev_regs = NULL; 1198 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1199 dev_regs->nxge_regh = NULL; 1200 dev_regs->nxge_pciregh = NULL; 1201 dev_regs->nxge_msix_regh = NULL; 1202 dev_regs->nxge_vir_regh = NULL; 1203 dev_regs->nxge_vir2_regh = NULL; 1204 nxgep->niu_type = NIU_TYPE_NONE; 1205 1206 devname = ddi_pathname(nxgep->dip, buf); 1207 ASSERT(strlen(devname) > 0); 1208 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1209 "nxge_map_regs: pathname devname %s", devname)); 1210 1211 /* 1212 * The driver is running on a N2-NIU system if devname is something 1213 * like "/niu@80/network@0" 1214 */ 1215 if (strstr(devname, n2_siu_name)) { 1216 /* N2/NIU */ 1217 nxgep->niu_type = N2_NIU; 1218 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1219 "nxge_map_regs: N2/NIU devname %s", devname)); 1220 /* 1221 * Get function number: 1222 * - N2/NIU: "/niu@80/network@0" and "/niu@80/network@1" 1223 */ 1224 nxgep->function_num = 1225 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1226 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1227 "nxge_map_regs: N2/NIU function number %d", 1228 nxgep->function_num)); 1229 } else { 1230 int *prop_val; 1231 uint_t prop_len; 1232 uint8_t func_num; 1233 1234 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1235 0, "reg", 1236 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1237 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1238 "Reg property not found")); 1239 ddi_status = DDI_FAILURE; 1240 goto nxge_map_regs_fail0; 1241 1242 } else { 1243 func_num = (prop_val[0] >> 8) & 0x7; 1244 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1245 "Reg property found: fun # %d", 1246 func_num)); 1247 nxgep->function_num = func_num; 1248 if (isLDOMguest(nxgep)) { 1249 nxgep->function_num /= 2; 1250 return (NXGE_OK); 1251 } 1252 ddi_prop_free(prop_val); 1253 } 1254 } 1255 1256 switch (nxgep->niu_type) { 1257 default: 1258 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1259 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1260 "nxge_map_regs: pci config size 0x%x", regsize)); 1261 1262 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1263 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1264 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1265 if (ddi_status != DDI_SUCCESS) { 1266 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1267 "ddi_map_regs, nxge bus config regs failed")); 1268 goto nxge_map_regs_fail0; 1269 } 1270 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1271 "nxge_map_reg: PCI config addr 0x%0llx " 1272 " handle 0x%0llx", dev_regs->nxge_pciregp, 1273 dev_regs->nxge_pciregh)); 1274 /* 1275 * IMP IMP 1276 * workaround for bit swapping bug in HW 1277 * which ends up in no-snoop = yes 1278 * resulting, in DMA not synched properly 1279 */ 1280 #if !defined(_BIG_ENDIAN) 1281 /* workarounds for x86 systems */ 1282 pci_offset = 0x80 + PCIE_DEVCTL; 1283 pcie_devctl = pci_config_get16(dev_regs->nxge_pciregh, 1284 pci_offset); 1285 pcie_devctl &= ~PCIE_DEVCTL_ENABLE_NO_SNOOP; 1286 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1287 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1288 pcie_devctl); 1289 #endif 1290 1291 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1292 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1293 "nxge_map_regs: pio size 0x%x", regsize)); 1294 /* set up the device mapped register */ 1295 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1296 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1297 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1298 if (ddi_status != DDI_SUCCESS) { 1299 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1300 "ddi_map_regs for Neptune global reg failed")); 1301 goto nxge_map_regs_fail1; 1302 } 1303 1304 /* set up the msi/msi-x mapped register */ 1305 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1306 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1307 "nxge_map_regs: msix size 0x%x", regsize)); 1308 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1309 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1310 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1311 if (ddi_status != DDI_SUCCESS) { 1312 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1313 "ddi_map_regs for msi reg failed")); 1314 goto nxge_map_regs_fail2; 1315 } 1316 1317 /* set up the vio region mapped register */ 1318 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1319 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1320 "nxge_map_regs: vio size 0x%x", regsize)); 1321 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1322 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1323 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1324 1325 if (ddi_status != DDI_SUCCESS) { 1326 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1327 "ddi_map_regs for nxge vio reg failed")); 1328 goto nxge_map_regs_fail3; 1329 } 1330 nxgep->dev_regs = dev_regs; 1331 1332 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1333 NPI_PCI_ADD_HANDLE_SET(nxgep, 1334 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1335 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1336 NPI_MSI_ADD_HANDLE_SET(nxgep, 1337 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1338 1339 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1340 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1341 1342 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1343 NPI_REG_ADD_HANDLE_SET(nxgep, 1344 (npi_reg_ptr_t)dev_regs->nxge_regp); 1345 1346 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1347 NPI_VREG_ADD_HANDLE_SET(nxgep, 1348 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1349 1350 break; 1351 1352 case N2_NIU: 1353 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1354 /* 1355 * Set up the device mapped register (FWARC 2006/556) 1356 * (changed back to 1: reg starts at 1!) 1357 */ 1358 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1359 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1360 "nxge_map_regs: dev size 0x%x", regsize)); 1361 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1362 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1363 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1364 1365 if (ddi_status != DDI_SUCCESS) { 1366 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1367 "ddi_map_regs for N2/NIU, global reg failed ")); 1368 goto nxge_map_regs_fail1; 1369 } 1370 1371 /* set up the first vio region mapped register */ 1372 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1373 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1374 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1375 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1376 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1377 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1378 1379 if (ddi_status != DDI_SUCCESS) { 1380 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1381 "ddi_map_regs for nxge vio reg failed")); 1382 goto nxge_map_regs_fail2; 1383 } 1384 /* set up the second vio region mapped register */ 1385 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1386 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1387 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1388 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1389 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1390 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1391 1392 if (ddi_status != DDI_SUCCESS) { 1393 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1394 "ddi_map_regs for nxge vio2 reg failed")); 1395 goto nxge_map_regs_fail3; 1396 } 1397 nxgep->dev_regs = dev_regs; 1398 1399 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1400 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1401 1402 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1403 NPI_REG_ADD_HANDLE_SET(nxgep, 1404 (npi_reg_ptr_t)dev_regs->nxge_regp); 1405 1406 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1407 NPI_VREG_ADD_HANDLE_SET(nxgep, 1408 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1409 1410 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1411 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1412 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1413 1414 break; 1415 } 1416 1417 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1418 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1419 1420 goto nxge_map_regs_exit; 1421 nxge_map_regs_fail3: 1422 if (dev_regs->nxge_msix_regh) { 1423 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1424 } 1425 if (dev_regs->nxge_vir_regh) { 1426 ddi_regs_map_free(&dev_regs->nxge_regh); 1427 } 1428 nxge_map_regs_fail2: 1429 if (dev_regs->nxge_regh) { 1430 ddi_regs_map_free(&dev_regs->nxge_regh); 1431 } 1432 nxge_map_regs_fail1: 1433 if (dev_regs->nxge_pciregh) { 1434 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1435 } 1436 nxge_map_regs_fail0: 1437 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1438 kmem_free(dev_regs, sizeof (dev_regs_t)); 1439 1440 nxge_map_regs_exit: 1441 if (ddi_status != DDI_SUCCESS) 1442 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1443 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1444 return (status); 1445 } 1446 1447 static void 1448 nxge_unmap_regs(p_nxge_t nxgep) 1449 { 1450 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1451 1452 if (isLDOMguest(nxgep)) { 1453 nxge_guest_regs_map_free(nxgep); 1454 return; 1455 } 1456 1457 if (nxgep->dev_regs) { 1458 if (nxgep->dev_regs->nxge_pciregh) { 1459 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1460 "==> nxge_unmap_regs: bus")); 1461 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1462 nxgep->dev_regs->nxge_pciregh = NULL; 1463 } 1464 if (nxgep->dev_regs->nxge_regh) { 1465 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1466 "==> nxge_unmap_regs: device registers")); 1467 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1468 nxgep->dev_regs->nxge_regh = NULL; 1469 } 1470 if (nxgep->dev_regs->nxge_msix_regh) { 1471 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1472 "==> nxge_unmap_regs: device interrupts")); 1473 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1474 nxgep->dev_regs->nxge_msix_regh = NULL; 1475 } 1476 if (nxgep->dev_regs->nxge_vir_regh) { 1477 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1478 "==> nxge_unmap_regs: vio region")); 1479 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1480 nxgep->dev_regs->nxge_vir_regh = NULL; 1481 } 1482 if (nxgep->dev_regs->nxge_vir2_regh) { 1483 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1484 "==> nxge_unmap_regs: vio2 region")); 1485 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1486 nxgep->dev_regs->nxge_vir2_regh = NULL; 1487 } 1488 1489 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1490 nxgep->dev_regs = NULL; 1491 } 1492 1493 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1494 } 1495 1496 static nxge_status_t 1497 nxge_setup_mutexes(p_nxge_t nxgep) 1498 { 1499 int ddi_status = DDI_SUCCESS; 1500 nxge_status_t status = NXGE_OK; 1501 nxge_classify_t *classify_ptr; 1502 int partition; 1503 1504 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1505 1506 /* 1507 * Get the interrupt cookie so the mutexes can be 1508 * Initialized. 1509 */ 1510 if (isLDOMguest(nxgep)) { 1511 nxgep->interrupt_cookie = 0; 1512 } else { 1513 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1514 &nxgep->interrupt_cookie); 1515 1516 if (ddi_status != DDI_SUCCESS) { 1517 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1518 "<== nxge_setup_mutexes: failed 0x%x", 1519 ddi_status)); 1520 goto nxge_setup_mutexes_exit; 1521 } 1522 } 1523 1524 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1525 MUTEX_INIT(&nxgep->poll_lock, NULL, 1526 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1527 1528 /* 1529 * Initialize mutexes for this device. 1530 */ 1531 MUTEX_INIT(nxgep->genlock, NULL, 1532 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1533 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1534 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1535 MUTEX_INIT(&nxgep->mif_lock, NULL, 1536 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1537 MUTEX_INIT(&nxgep->group_lock, NULL, 1538 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1539 RW_INIT(&nxgep->filter_lock, NULL, 1540 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1541 1542 classify_ptr = &nxgep->classifier; 1543 /* 1544 * FFLP Mutexes are never used in interrupt context 1545 * as fflp operation can take very long time to 1546 * complete and hence not suitable to invoke from interrupt 1547 * handlers. 1548 */ 1549 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1550 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1551 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1552 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1553 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1554 for (partition = 0; partition < MAX_PARTITION; partition++) { 1555 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1556 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1557 } 1558 } 1559 1560 nxge_setup_mutexes_exit: 1561 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1562 "<== nxge_setup_mutexes status = %x", status)); 1563 1564 if (ddi_status != DDI_SUCCESS) 1565 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1566 1567 return (status); 1568 } 1569 1570 static void 1571 nxge_destroy_mutexes(p_nxge_t nxgep) 1572 { 1573 int partition; 1574 nxge_classify_t *classify_ptr; 1575 1576 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1577 RW_DESTROY(&nxgep->filter_lock); 1578 MUTEX_DESTROY(&nxgep->group_lock); 1579 MUTEX_DESTROY(&nxgep->mif_lock); 1580 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1581 MUTEX_DESTROY(nxgep->genlock); 1582 1583 classify_ptr = &nxgep->classifier; 1584 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1585 1586 /* Destroy all polling resources. */ 1587 MUTEX_DESTROY(&nxgep->poll_lock); 1588 cv_destroy(&nxgep->poll_cv); 1589 1590 /* free data structures, based on HW type */ 1591 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1592 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1593 for (partition = 0; partition < MAX_PARTITION; partition++) { 1594 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1595 } 1596 } 1597 1598 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1599 } 1600 1601 nxge_status_t 1602 nxge_init(p_nxge_t nxgep) 1603 { 1604 nxge_status_t status = NXGE_OK; 1605 1606 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1607 1608 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1609 return (status); 1610 } 1611 1612 /* 1613 * Allocate system memory for the receive/transmit buffer blocks 1614 * and receive/transmit descriptor rings. 1615 */ 1616 status = nxge_alloc_mem_pool(nxgep); 1617 if (status != NXGE_OK) { 1618 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1619 goto nxge_init_fail1; 1620 } 1621 1622 if (!isLDOMguest(nxgep)) { 1623 /* 1624 * Initialize and enable the TXC registers. 1625 * (Globally enable the Tx controller, 1626 * enable the port, configure the dma channel bitmap, 1627 * configure the max burst size). 1628 */ 1629 status = nxge_txc_init(nxgep); 1630 if (status != NXGE_OK) { 1631 NXGE_ERROR_MSG((nxgep, 1632 NXGE_ERR_CTL, "init txc failed\n")); 1633 goto nxge_init_fail2; 1634 } 1635 } 1636 1637 /* 1638 * Initialize and enable TXDMA channels. 1639 */ 1640 status = nxge_init_txdma_channels(nxgep); 1641 if (status != NXGE_OK) { 1642 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1643 goto nxge_init_fail3; 1644 } 1645 1646 /* 1647 * Initialize and enable RXDMA channels. 1648 */ 1649 status = nxge_init_rxdma_channels(nxgep); 1650 if (status != NXGE_OK) { 1651 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1652 goto nxge_init_fail4; 1653 } 1654 1655 /* 1656 * The guest domain is now done. 1657 */ 1658 if (isLDOMguest(nxgep)) { 1659 nxgep->drv_state |= STATE_HW_INITIALIZED; 1660 goto nxge_init_exit; 1661 } 1662 1663 /* 1664 * Initialize TCAM and FCRAM (Neptune). 1665 */ 1666 status = nxge_classify_init(nxgep); 1667 if (status != NXGE_OK) { 1668 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1669 goto nxge_init_fail5; 1670 } 1671 1672 /* 1673 * Initialize ZCP 1674 */ 1675 status = nxge_zcp_init(nxgep); 1676 if (status != NXGE_OK) { 1677 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1678 goto nxge_init_fail5; 1679 } 1680 1681 /* 1682 * Initialize IPP. 1683 */ 1684 status = nxge_ipp_init(nxgep); 1685 if (status != NXGE_OK) { 1686 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1687 goto nxge_init_fail5; 1688 } 1689 1690 /* 1691 * Initialize the MAC block. 1692 */ 1693 status = nxge_mac_init(nxgep); 1694 if (status != NXGE_OK) { 1695 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1696 goto nxge_init_fail5; 1697 } 1698 1699 /* 1700 * Enable the interrrupts for DDI. 1701 */ 1702 nxge_intrs_enable(nxgep); 1703 1704 nxgep->drv_state |= STATE_HW_INITIALIZED; 1705 1706 goto nxge_init_exit; 1707 1708 nxge_init_fail5: 1709 nxge_uninit_rxdma_channels(nxgep); 1710 nxge_init_fail4: 1711 nxge_uninit_txdma_channels(nxgep); 1712 nxge_init_fail3: 1713 if (!isLDOMguest(nxgep)) { 1714 (void) nxge_txc_uninit(nxgep); 1715 } 1716 nxge_init_fail2: 1717 nxge_free_mem_pool(nxgep); 1718 nxge_init_fail1: 1719 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1720 "<== nxge_init status (failed) = 0x%08x", status)); 1721 return (status); 1722 1723 nxge_init_exit: 1724 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1725 status)); 1726 return (status); 1727 } 1728 1729 1730 timeout_id_t 1731 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1732 { 1733 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1734 return (timeout(func, (caddr_t)nxgep, 1735 drv_usectohz(1000 * msec))); 1736 } 1737 return (NULL); 1738 } 1739 1740 /*ARGSUSED*/ 1741 void 1742 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1743 { 1744 if (timerid) { 1745 (void) untimeout(timerid); 1746 } 1747 } 1748 1749 void 1750 nxge_uninit(p_nxge_t nxgep) 1751 { 1752 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1753 1754 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1755 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1756 "==> nxge_uninit: not initialized")); 1757 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1758 "<== nxge_uninit")); 1759 return; 1760 } 1761 1762 if (!isLDOMguest(nxgep)) { 1763 /* 1764 * Reset the receive MAC side. 1765 */ 1766 (void) nxge_rx_mac_disable(nxgep); 1767 1768 /* 1769 * Drain the IPP. 1770 */ 1771 (void) nxge_ipp_drain(nxgep); 1772 } 1773 1774 /* stop timer */ 1775 if (nxgep->nxge_timerid) { 1776 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1777 nxgep->nxge_timerid = 0; 1778 } 1779 1780 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1781 (void) nxge_intr_hw_disable(nxgep); 1782 1783 1784 /* Disable and soft reset the IPP */ 1785 if (!isLDOMguest(nxgep)) 1786 (void) nxge_ipp_disable(nxgep); 1787 1788 /* Free classification resources */ 1789 (void) nxge_classify_uninit(nxgep); 1790 1791 /* 1792 * Reset the transmit/receive DMA side. 1793 */ 1794 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1795 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1796 1797 nxge_uninit_txdma_channels(nxgep); 1798 nxge_uninit_rxdma_channels(nxgep); 1799 1800 /* 1801 * Reset the transmit MAC side. 1802 */ 1803 (void) nxge_tx_mac_disable(nxgep); 1804 1805 nxge_free_mem_pool(nxgep); 1806 1807 /* 1808 * Start the timer if the reset flag is not set. 1809 * If this reset flag is set, the link monitor 1810 * will not be started in order to stop furthur bus 1811 * activities coming from this interface. 1812 * The driver will start the monitor function 1813 * if the interface was initialized again later. 1814 */ 1815 if (!nxge_peu_reset_enable) { 1816 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1817 } 1818 1819 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1820 1821 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1822 "nxge_mblks_pending %d", nxge_mblks_pending)); 1823 } 1824 1825 void 1826 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1827 { 1828 uint64_t reg; 1829 uint64_t regdata; 1830 int i, retry; 1831 1832 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1833 regdata = 0; 1834 retry = 1; 1835 1836 for (i = 0; i < retry; i++) { 1837 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1838 } 1839 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1840 } 1841 1842 void 1843 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1844 { 1845 uint64_t reg; 1846 uint64_t buf[2]; 1847 1848 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1849 reg = buf[0]; 1850 1851 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1852 } 1853 1854 /*ARGSUSED*/ 1855 /*VARARGS*/ 1856 void 1857 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1858 { 1859 char msg_buffer[1048]; 1860 char prefix_buffer[32]; 1861 int instance; 1862 uint64_t debug_level; 1863 int cmn_level = CE_CONT; 1864 va_list ap; 1865 1866 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1867 /* In case a developer has changed nxge_debug_level. */ 1868 if (nxgep->nxge_debug_level != nxge_debug_level) 1869 nxgep->nxge_debug_level = nxge_debug_level; 1870 } 1871 1872 debug_level = (nxgep == NULL) ? nxge_debug_level : 1873 nxgep->nxge_debug_level; 1874 1875 if ((level & debug_level) || 1876 (level == NXGE_NOTE) || 1877 (level == NXGE_ERR_CTL)) { 1878 /* do the msg processing */ 1879 MUTEX_ENTER(&nxgedebuglock); 1880 1881 if ((level & NXGE_NOTE)) { 1882 cmn_level = CE_NOTE; 1883 } 1884 1885 if (level & NXGE_ERR_CTL) { 1886 cmn_level = CE_WARN; 1887 } 1888 1889 va_start(ap, fmt); 1890 (void) vsprintf(msg_buffer, fmt, ap); 1891 va_end(ap); 1892 if (nxgep == NULL) { 1893 instance = -1; 1894 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1895 } else { 1896 instance = nxgep->instance; 1897 (void) sprintf(prefix_buffer, 1898 "%s%d :", "nxge", instance); 1899 } 1900 1901 MUTEX_EXIT(&nxgedebuglock); 1902 cmn_err(cmn_level, "!%s %s\n", 1903 prefix_buffer, msg_buffer); 1904 1905 } 1906 } 1907 1908 char * 1909 nxge_dump_packet(char *addr, int size) 1910 { 1911 uchar_t *ap = (uchar_t *)addr; 1912 int i; 1913 static char etherbuf[1024]; 1914 char *cp = etherbuf; 1915 char digits[] = "0123456789abcdef"; 1916 1917 if (!size) 1918 size = 60; 1919 1920 if (size > MAX_DUMP_SZ) { 1921 /* Dump the leading bytes */ 1922 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1923 if (*ap > 0x0f) 1924 *cp++ = digits[*ap >> 4]; 1925 *cp++ = digits[*ap++ & 0xf]; 1926 *cp++ = ':'; 1927 } 1928 for (i = 0; i < 20; i++) 1929 *cp++ = '.'; 1930 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1931 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1932 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1933 if (*ap > 0x0f) 1934 *cp++ = digits[*ap >> 4]; 1935 *cp++ = digits[*ap++ & 0xf]; 1936 *cp++ = ':'; 1937 } 1938 } else { 1939 for (i = 0; i < size; i++) { 1940 if (*ap > 0x0f) 1941 *cp++ = digits[*ap >> 4]; 1942 *cp++ = digits[*ap++ & 0xf]; 1943 *cp++ = ':'; 1944 } 1945 } 1946 *--cp = 0; 1947 return (etherbuf); 1948 } 1949 1950 #ifdef NXGE_DEBUG 1951 static void 1952 nxge_test_map_regs(p_nxge_t nxgep) 1953 { 1954 ddi_acc_handle_t cfg_handle; 1955 p_pci_cfg_t cfg_ptr; 1956 ddi_acc_handle_t dev_handle; 1957 char *dev_ptr; 1958 ddi_acc_handle_t pci_config_handle; 1959 uint32_t regval; 1960 int i; 1961 1962 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1963 1964 dev_handle = nxgep->dev_regs->nxge_regh; 1965 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1966 1967 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1968 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1969 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1970 1971 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1972 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1973 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1974 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1975 &cfg_ptr->vendorid)); 1976 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1977 "\tvendorid 0x%x devid 0x%x", 1978 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1979 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1980 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1981 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1982 "bar1c 0x%x", 1983 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1984 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1985 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1986 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1987 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1988 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1989 "base 28 0x%x bar2c 0x%x\n", 1990 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1991 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1992 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1993 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1994 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1995 "\nNeptune PCI BAR: base30 0x%x\n", 1996 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1997 1998 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1999 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 2000 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2001 "first 0x%llx second 0x%llx third 0x%llx " 2002 "last 0x%llx ", 2003 NXGE_PIO_READ64(dev_handle, 2004 (uint64_t *)(dev_ptr + 0), 0), 2005 NXGE_PIO_READ64(dev_handle, 2006 (uint64_t *)(dev_ptr + 8), 0), 2007 NXGE_PIO_READ64(dev_handle, 2008 (uint64_t *)(dev_ptr + 16), 0), 2009 NXGE_PIO_READ64(cfg_handle, 2010 (uint64_t *)(dev_ptr + 24), 0))); 2011 } 2012 } 2013 2014 #endif 2015 2016 static void 2017 nxge_suspend(p_nxge_t nxgep) 2018 { 2019 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 2020 2021 nxge_intrs_disable(nxgep); 2022 nxge_destroy_dev(nxgep); 2023 2024 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 2025 } 2026 2027 static nxge_status_t 2028 nxge_resume(p_nxge_t nxgep) 2029 { 2030 nxge_status_t status = NXGE_OK; 2031 2032 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 2033 2034 nxgep->suspended = DDI_RESUME; 2035 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 2036 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 2037 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 2038 (void) nxge_rx_mac_enable(nxgep); 2039 (void) nxge_tx_mac_enable(nxgep); 2040 nxge_intrs_enable(nxgep); 2041 nxgep->suspended = 0; 2042 2043 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2044 "<== nxge_resume status = 0x%x", status)); 2045 return (status); 2046 } 2047 2048 static nxge_status_t 2049 nxge_setup_dev(p_nxge_t nxgep) 2050 { 2051 nxge_status_t status = NXGE_OK; 2052 2053 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 2054 nxgep->mac.portnum)); 2055 2056 status = nxge_link_init(nxgep); 2057 2058 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 2059 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2060 "port%d Bad register acc handle", nxgep->mac.portnum)); 2061 status = NXGE_ERROR; 2062 } 2063 2064 if (status != NXGE_OK) { 2065 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2066 " nxge_setup_dev status " 2067 "(xcvr init 0x%08x)", status)); 2068 goto nxge_setup_dev_exit; 2069 } 2070 2071 nxge_setup_dev_exit: 2072 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2073 "<== nxge_setup_dev port %d status = 0x%08x", 2074 nxgep->mac.portnum, status)); 2075 2076 return (status); 2077 } 2078 2079 static void 2080 nxge_destroy_dev(p_nxge_t nxgep) 2081 { 2082 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 2083 2084 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 2085 2086 (void) nxge_hw_stop(nxgep); 2087 2088 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 2089 } 2090 2091 static nxge_status_t 2092 nxge_setup_system_dma_pages(p_nxge_t nxgep) 2093 { 2094 int ddi_status = DDI_SUCCESS; 2095 uint_t count; 2096 ddi_dma_cookie_t cookie; 2097 uint_t iommu_pagesize; 2098 nxge_status_t status = NXGE_OK; 2099 2100 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 2101 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 2102 if (nxgep->niu_type != N2_NIU) { 2103 iommu_pagesize = dvma_pagesize(nxgep->dip); 2104 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2105 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2106 " default_block_size %d iommu_pagesize %d", 2107 nxgep->sys_page_sz, 2108 ddi_ptob(nxgep->dip, (ulong_t)1), 2109 nxgep->rx_default_block_size, 2110 iommu_pagesize)); 2111 2112 if (iommu_pagesize != 0) { 2113 if (nxgep->sys_page_sz == iommu_pagesize) { 2114 if (iommu_pagesize > 0x4000) 2115 nxgep->sys_page_sz = 0x4000; 2116 } else { 2117 if (nxgep->sys_page_sz > iommu_pagesize) 2118 nxgep->sys_page_sz = iommu_pagesize; 2119 } 2120 } 2121 } 2122 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2123 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2124 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2125 "default_block_size %d page mask %d", 2126 nxgep->sys_page_sz, 2127 ddi_ptob(nxgep->dip, (ulong_t)1), 2128 nxgep->rx_default_block_size, 2129 nxgep->sys_page_mask)); 2130 2131 2132 switch (nxgep->sys_page_sz) { 2133 default: 2134 nxgep->sys_page_sz = 0x1000; 2135 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2136 nxgep->rx_default_block_size = 0x1000; 2137 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2138 break; 2139 case 0x1000: 2140 nxgep->rx_default_block_size = 0x1000; 2141 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2142 break; 2143 case 0x2000: 2144 nxgep->rx_default_block_size = 0x2000; 2145 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2146 break; 2147 case 0x4000: 2148 nxgep->rx_default_block_size = 0x4000; 2149 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2150 break; 2151 case 0x8000: 2152 nxgep->rx_default_block_size = 0x8000; 2153 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2154 break; 2155 } 2156 2157 #ifndef USE_RX_BIG_BUF 2158 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2159 #else 2160 nxgep->rx_default_block_size = 0x2000; 2161 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2162 #endif 2163 /* 2164 * Get the system DMA burst size. 2165 */ 2166 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2167 DDI_DMA_DONTWAIT, 0, 2168 &nxgep->dmasparehandle); 2169 if (ddi_status != DDI_SUCCESS) { 2170 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2171 "ddi_dma_alloc_handle: failed " 2172 " status 0x%x", ddi_status)); 2173 goto nxge_get_soft_properties_exit; 2174 } 2175 2176 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2177 (caddr_t)nxgep->dmasparehandle, 2178 sizeof (nxgep->dmasparehandle), 2179 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2180 DDI_DMA_DONTWAIT, 0, 2181 &cookie, &count); 2182 if (ddi_status != DDI_DMA_MAPPED) { 2183 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2184 "Binding spare handle to find system" 2185 " burstsize failed.")); 2186 ddi_status = DDI_FAILURE; 2187 goto nxge_get_soft_properties_fail1; 2188 } 2189 2190 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2191 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2192 2193 nxge_get_soft_properties_fail1: 2194 ddi_dma_free_handle(&nxgep->dmasparehandle); 2195 2196 nxge_get_soft_properties_exit: 2197 2198 if (ddi_status != DDI_SUCCESS) 2199 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2200 2201 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2202 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2203 return (status); 2204 } 2205 2206 static nxge_status_t 2207 nxge_alloc_mem_pool(p_nxge_t nxgep) 2208 { 2209 nxge_status_t status = NXGE_OK; 2210 2211 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2212 2213 status = nxge_alloc_rx_mem_pool(nxgep); 2214 if (status != NXGE_OK) { 2215 return (NXGE_ERROR); 2216 } 2217 2218 status = nxge_alloc_tx_mem_pool(nxgep); 2219 if (status != NXGE_OK) { 2220 nxge_free_rx_mem_pool(nxgep); 2221 return (NXGE_ERROR); 2222 } 2223 2224 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2225 return (NXGE_OK); 2226 } 2227 2228 static void 2229 nxge_free_mem_pool(p_nxge_t nxgep) 2230 { 2231 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2232 2233 nxge_free_rx_mem_pool(nxgep); 2234 nxge_free_tx_mem_pool(nxgep); 2235 2236 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2237 } 2238 2239 nxge_status_t 2240 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2241 { 2242 uint32_t rdc_max; 2243 p_nxge_dma_pt_cfg_t p_all_cfgp; 2244 p_nxge_hw_pt_cfg_t p_cfgp; 2245 p_nxge_dma_pool_t dma_poolp; 2246 p_nxge_dma_common_t *dma_buf_p; 2247 p_nxge_dma_pool_t dma_cntl_poolp; 2248 p_nxge_dma_common_t *dma_cntl_p; 2249 uint32_t *num_chunks; /* per dma */ 2250 nxge_status_t status = NXGE_OK; 2251 2252 uint32_t nxge_port_rbr_size; 2253 uint32_t nxge_port_rbr_spare_size; 2254 uint32_t nxge_port_rcr_size; 2255 uint32_t rx_cntl_alloc_size; 2256 2257 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2258 2259 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2260 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2261 rdc_max = NXGE_MAX_RDCS; 2262 2263 /* 2264 * Allocate memory for the common DMA data structures. 2265 */ 2266 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2267 KM_SLEEP); 2268 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2269 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2270 2271 dma_cntl_poolp = (p_nxge_dma_pool_t) 2272 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2273 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2274 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2275 2276 num_chunks = (uint32_t *)KMEM_ZALLOC( 2277 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2278 2279 /* 2280 * Assume that each DMA channel will be configured with 2281 * the default block size. 2282 * rbr block counts are modulo the batch count (16). 2283 */ 2284 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2285 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2286 2287 if (!nxge_port_rbr_size) { 2288 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2289 } 2290 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2291 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2292 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2293 } 2294 2295 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2296 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2297 2298 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2299 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2300 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2301 } 2302 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2303 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2304 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2305 "set to default %d", 2306 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2307 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2308 } 2309 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2310 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2311 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2312 "set to default %d", 2313 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2314 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2315 } 2316 2317 /* 2318 * N2/NIU has limitation on the descriptor sizes (contiguous 2319 * memory allocation on data buffers to 4M (contig_mem_alloc) 2320 * and little endian for control buffers (must use the ddi/dki mem alloc 2321 * function). 2322 */ 2323 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2324 if (nxgep->niu_type == N2_NIU) { 2325 nxge_port_rbr_spare_size = 0; 2326 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2327 (!ISP2(nxge_port_rbr_size))) { 2328 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2329 } 2330 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2331 (!ISP2(nxge_port_rcr_size))) { 2332 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2333 } 2334 } 2335 #endif 2336 2337 /* 2338 * Addresses of receive block ring, receive completion ring and the 2339 * mailbox must be all cache-aligned (64 bytes). 2340 */ 2341 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2342 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2343 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2344 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2345 2346 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2347 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2348 "nxge_port_rcr_size = %d " 2349 "rx_cntl_alloc_size = %d", 2350 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2351 nxge_port_rcr_size, 2352 rx_cntl_alloc_size)); 2353 2354 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2355 if (nxgep->niu_type == N2_NIU) { 2356 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2357 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2358 2359 if (!ISP2(rx_buf_alloc_size)) { 2360 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2361 "==> nxge_alloc_rx_mem_pool: " 2362 " must be power of 2")); 2363 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2364 goto nxge_alloc_rx_mem_pool_exit; 2365 } 2366 2367 if (rx_buf_alloc_size > (1 << 22)) { 2368 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2369 "==> nxge_alloc_rx_mem_pool: " 2370 " limit size to 4M")); 2371 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2372 goto nxge_alloc_rx_mem_pool_exit; 2373 } 2374 2375 if (rx_cntl_alloc_size < 0x2000) { 2376 rx_cntl_alloc_size = 0x2000; 2377 } 2378 } 2379 #endif 2380 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2381 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2382 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2383 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2384 2385 dma_poolp->ndmas = p_cfgp->max_rdcs; 2386 dma_poolp->num_chunks = num_chunks; 2387 dma_poolp->buf_allocated = B_TRUE; 2388 nxgep->rx_buf_pool_p = dma_poolp; 2389 dma_poolp->dma_buf_pool_p = dma_buf_p; 2390 2391 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2392 dma_cntl_poolp->buf_allocated = B_TRUE; 2393 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2394 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2395 2396 /* Allocate the receive rings, too. */ 2397 nxgep->rx_rbr_rings = 2398 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2399 nxgep->rx_rbr_rings->rbr_rings = 2400 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2401 nxgep->rx_rcr_rings = 2402 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2403 nxgep->rx_rcr_rings->rcr_rings = 2404 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2405 nxgep->rx_mbox_areas_p = 2406 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2407 nxgep->rx_mbox_areas_p->rxmbox_areas = 2408 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2409 2410 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2411 p_cfgp->max_rdcs; 2412 2413 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2414 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2415 2416 nxge_alloc_rx_mem_pool_exit: 2417 return (status); 2418 } 2419 2420 /* 2421 * nxge_alloc_rxb 2422 * 2423 * Allocate buffers for an RDC. 2424 * 2425 * Arguments: 2426 * nxgep 2427 * channel The channel to map into our kernel space. 2428 * 2429 * Notes: 2430 * 2431 * NPI function calls: 2432 * 2433 * NXGE function calls: 2434 * 2435 * Registers accessed: 2436 * 2437 * Context: 2438 * 2439 * Taking apart: 2440 * 2441 * Open questions: 2442 * 2443 */ 2444 nxge_status_t 2445 nxge_alloc_rxb( 2446 p_nxge_t nxgep, 2447 int channel) 2448 { 2449 size_t rx_buf_alloc_size; 2450 nxge_status_t status = NXGE_OK; 2451 2452 nxge_dma_common_t **data; 2453 nxge_dma_common_t **control; 2454 uint32_t *num_chunks; 2455 2456 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2457 2458 /* 2459 * Allocate memory for the receive buffers and descriptor rings. 2460 * Replace these allocation functions with the interface functions 2461 * provided by the partition manager if/when they are available. 2462 */ 2463 2464 /* 2465 * Allocate memory for the receive buffer blocks. 2466 */ 2467 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2468 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2469 2470 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2471 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2472 2473 if ((status = nxge_alloc_rx_buf_dma( 2474 nxgep, channel, data, rx_buf_alloc_size, 2475 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2476 return (status); 2477 } 2478 2479 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2480 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2481 2482 /* 2483 * Allocate memory for descriptor rings and mailbox. 2484 */ 2485 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2486 2487 if ((status = nxge_alloc_rx_cntl_dma( 2488 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2489 != NXGE_OK) { 2490 nxge_free_rx_cntl_dma(nxgep, *control); 2491 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2492 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2493 return (status); 2494 } 2495 2496 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2497 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2498 2499 return (status); 2500 } 2501 2502 void 2503 nxge_free_rxb( 2504 p_nxge_t nxgep, 2505 int channel) 2506 { 2507 nxge_dma_common_t *data; 2508 nxge_dma_common_t *control; 2509 uint32_t num_chunks; 2510 2511 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2512 2513 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2514 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2515 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2516 2517 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2518 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2519 2520 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2521 nxge_free_rx_cntl_dma(nxgep, control); 2522 2523 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2524 2525 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2526 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2527 2528 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2529 } 2530 2531 static void 2532 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2533 { 2534 int rdc_max = NXGE_MAX_RDCS; 2535 2536 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2537 2538 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2539 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2540 "<== nxge_free_rx_mem_pool " 2541 "(null rx buf pool or buf not allocated")); 2542 return; 2543 } 2544 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2545 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2546 "<== nxge_free_rx_mem_pool " 2547 "(null rx cntl buf pool or cntl buf not allocated")); 2548 return; 2549 } 2550 2551 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2552 sizeof (p_nxge_dma_common_t) * rdc_max); 2553 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2554 2555 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2556 sizeof (uint32_t) * rdc_max); 2557 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2558 sizeof (p_nxge_dma_common_t) * rdc_max); 2559 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2560 2561 nxgep->rx_buf_pool_p = 0; 2562 nxgep->rx_cntl_pool_p = 0; 2563 2564 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2565 sizeof (p_rx_rbr_ring_t) * rdc_max); 2566 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2567 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2568 sizeof (p_rx_rcr_ring_t) * rdc_max); 2569 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2570 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2571 sizeof (p_rx_mbox_t) * rdc_max); 2572 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2573 2574 nxgep->rx_rbr_rings = 0; 2575 nxgep->rx_rcr_rings = 0; 2576 nxgep->rx_mbox_areas_p = 0; 2577 2578 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2579 } 2580 2581 2582 static nxge_status_t 2583 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2584 p_nxge_dma_common_t *dmap, 2585 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2586 { 2587 p_nxge_dma_common_t rx_dmap; 2588 nxge_status_t status = NXGE_OK; 2589 size_t total_alloc_size; 2590 size_t allocated = 0; 2591 int i, size_index, array_size; 2592 boolean_t use_kmem_alloc = B_FALSE; 2593 2594 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2595 2596 rx_dmap = (p_nxge_dma_common_t) 2597 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2598 KM_SLEEP); 2599 2600 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2601 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2602 dma_channel, alloc_size, block_size, dmap)); 2603 2604 total_alloc_size = alloc_size; 2605 2606 #if defined(RX_USE_RECLAIM_POST) 2607 total_alloc_size = alloc_size + alloc_size/4; 2608 #endif 2609 2610 i = 0; 2611 size_index = 0; 2612 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2613 while ((size_index < array_size) && 2614 (alloc_sizes[size_index] < alloc_size)) 2615 size_index++; 2616 if (size_index >= array_size) { 2617 size_index = array_size - 1; 2618 } 2619 2620 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2621 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2622 use_kmem_alloc = B_TRUE; 2623 #if defined(__i386) || defined(__amd64) 2624 size_index = 0; 2625 #endif 2626 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2627 "==> nxge_alloc_rx_buf_dma: " 2628 "Neptune use kmem_alloc() - size_index %d", 2629 size_index)); 2630 } 2631 2632 while ((allocated < total_alloc_size) && 2633 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2634 rx_dmap[i].dma_chunk_index = i; 2635 rx_dmap[i].block_size = block_size; 2636 rx_dmap[i].alength = alloc_sizes[size_index]; 2637 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2638 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2639 rx_dmap[i].dma_channel = dma_channel; 2640 rx_dmap[i].contig_alloc_type = B_FALSE; 2641 rx_dmap[i].kmem_alloc_type = B_FALSE; 2642 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2643 2644 /* 2645 * N2/NIU: data buffers must be contiguous as the driver 2646 * needs to call Hypervisor api to set up 2647 * logical pages. 2648 */ 2649 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2650 rx_dmap[i].contig_alloc_type = B_TRUE; 2651 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2652 } else if (use_kmem_alloc) { 2653 /* For Neptune, use kmem_alloc */ 2654 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2655 "==> nxge_alloc_rx_buf_dma: " 2656 "Neptune use kmem_alloc()")); 2657 rx_dmap[i].kmem_alloc_type = B_TRUE; 2658 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2659 } 2660 2661 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2662 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2663 "i %d nblocks %d alength %d", 2664 dma_channel, i, &rx_dmap[i], block_size, 2665 i, rx_dmap[i].nblocks, 2666 rx_dmap[i].alength)); 2667 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2668 &nxge_rx_dma_attr, 2669 rx_dmap[i].alength, 2670 &nxge_dev_buf_dma_acc_attr, 2671 DDI_DMA_READ | DDI_DMA_STREAMING, 2672 (p_nxge_dma_common_t)(&rx_dmap[i])); 2673 if (status != NXGE_OK) { 2674 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2675 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2676 "dma %d size_index %d size requested %d", 2677 dma_channel, 2678 size_index, 2679 rx_dmap[i].alength)); 2680 size_index--; 2681 } else { 2682 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2683 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2684 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2685 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2686 "buf_alloc_state %d alloc_type %d", 2687 dma_channel, 2688 &rx_dmap[i], 2689 rx_dmap[i].kaddrp, 2690 rx_dmap[i].alength, 2691 rx_dmap[i].buf_alloc_state, 2692 rx_dmap[i].buf_alloc_type)); 2693 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2694 " alloc_rx_buf_dma allocated rdc %d " 2695 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2696 dma_channel, i, rx_dmap[i].alength, 2697 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2698 rx_dmap[i].kaddrp)); 2699 i++; 2700 allocated += alloc_sizes[size_index]; 2701 } 2702 } 2703 2704 if (allocated < total_alloc_size) { 2705 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2706 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2707 "allocated 0x%x requested 0x%x", 2708 dma_channel, 2709 allocated, total_alloc_size)); 2710 status = NXGE_ERROR; 2711 goto nxge_alloc_rx_mem_fail1; 2712 } 2713 2714 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2715 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2716 "allocated 0x%x requested 0x%x", 2717 dma_channel, 2718 allocated, total_alloc_size)); 2719 2720 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2721 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2722 dma_channel, i)); 2723 *num_chunks = i; 2724 *dmap = rx_dmap; 2725 2726 goto nxge_alloc_rx_mem_exit; 2727 2728 nxge_alloc_rx_mem_fail1: 2729 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2730 2731 nxge_alloc_rx_mem_exit: 2732 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2733 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2734 2735 return (status); 2736 } 2737 2738 /*ARGSUSED*/ 2739 static void 2740 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2741 uint32_t num_chunks) 2742 { 2743 int i; 2744 2745 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2746 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2747 2748 if (dmap == 0) 2749 return; 2750 2751 for (i = 0; i < num_chunks; i++) { 2752 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2753 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2754 i, dmap)); 2755 nxge_dma_free_rx_data_buf(dmap++); 2756 } 2757 2758 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2759 } 2760 2761 /*ARGSUSED*/ 2762 static nxge_status_t 2763 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2764 p_nxge_dma_common_t *dmap, size_t size) 2765 { 2766 p_nxge_dma_common_t rx_dmap; 2767 nxge_status_t status = NXGE_OK; 2768 2769 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2770 2771 rx_dmap = (p_nxge_dma_common_t) 2772 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2773 2774 rx_dmap->contig_alloc_type = B_FALSE; 2775 rx_dmap->kmem_alloc_type = B_FALSE; 2776 2777 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2778 &nxge_desc_dma_attr, 2779 size, 2780 &nxge_dev_desc_dma_acc_attr, 2781 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2782 rx_dmap); 2783 if (status != NXGE_OK) { 2784 goto nxge_alloc_rx_cntl_dma_fail1; 2785 } 2786 2787 *dmap = rx_dmap; 2788 goto nxge_alloc_rx_cntl_dma_exit; 2789 2790 nxge_alloc_rx_cntl_dma_fail1: 2791 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2792 2793 nxge_alloc_rx_cntl_dma_exit: 2794 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2795 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2796 2797 return (status); 2798 } 2799 2800 /*ARGSUSED*/ 2801 static void 2802 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2803 { 2804 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2805 2806 if (dmap == 0) 2807 return; 2808 2809 nxge_dma_mem_free(dmap); 2810 2811 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2812 } 2813 2814 typedef struct { 2815 size_t tx_size; 2816 size_t cr_size; 2817 size_t threshhold; 2818 } nxge_tdc_sizes_t; 2819 2820 static 2821 nxge_status_t 2822 nxge_tdc_sizes( 2823 nxge_t *nxgep, 2824 nxge_tdc_sizes_t *sizes) 2825 { 2826 uint32_t threshhold; /* The bcopy() threshhold */ 2827 size_t tx_size; /* Transmit buffer size */ 2828 size_t cr_size; /* Completion ring size */ 2829 2830 /* 2831 * Assume that each DMA channel will be configured with the 2832 * default transmit buffer size for copying transmit data. 2833 * (If a packet is bigger than this, it will not be copied.) 2834 */ 2835 if (nxgep->niu_type == N2_NIU) { 2836 threshhold = TX_BCOPY_SIZE; 2837 } else { 2838 threshhold = nxge_bcopy_thresh; 2839 } 2840 tx_size = nxge_tx_ring_size * threshhold; 2841 2842 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2843 cr_size += sizeof (txdma_mailbox_t); 2844 2845 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2846 if (nxgep->niu_type == N2_NIU) { 2847 if (!ISP2(tx_size)) { 2848 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2849 "==> nxge_tdc_sizes: Tx size" 2850 " must be power of 2")); 2851 return (NXGE_ERROR); 2852 } 2853 2854 if (tx_size > (1 << 22)) { 2855 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2856 "==> nxge_tdc_sizes: Tx size" 2857 " limited to 4M")); 2858 return (NXGE_ERROR); 2859 } 2860 2861 if (cr_size < 0x2000) 2862 cr_size = 0x2000; 2863 } 2864 #endif 2865 2866 sizes->threshhold = threshhold; 2867 sizes->tx_size = tx_size; 2868 sizes->cr_size = cr_size; 2869 2870 return (NXGE_OK); 2871 } 2872 /* 2873 * nxge_alloc_txb 2874 * 2875 * Allocate buffers for an TDC. 2876 * 2877 * Arguments: 2878 * nxgep 2879 * channel The channel to map into our kernel space. 2880 * 2881 * Notes: 2882 * 2883 * NPI function calls: 2884 * 2885 * NXGE function calls: 2886 * 2887 * Registers accessed: 2888 * 2889 * Context: 2890 * 2891 * Taking apart: 2892 * 2893 * Open questions: 2894 * 2895 */ 2896 nxge_status_t 2897 nxge_alloc_txb( 2898 p_nxge_t nxgep, 2899 int channel) 2900 { 2901 nxge_dma_common_t **dma_buf_p; 2902 nxge_dma_common_t **dma_cntl_p; 2903 uint32_t *num_chunks; 2904 nxge_status_t status = NXGE_OK; 2905 2906 nxge_tdc_sizes_t sizes; 2907 2908 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2909 2910 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2911 return (NXGE_ERROR); 2912 2913 /* 2914 * Allocate memory for transmit buffers and descriptor rings. 2915 * Replace these allocation functions with the interface functions 2916 * provided by the partition manager Real Soon Now. 2917 */ 2918 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2919 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2920 2921 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2922 2923 /* 2924 * Allocate memory for transmit buffers and descriptor rings. 2925 * Replace allocation functions with interface functions provided 2926 * by the partition manager when it is available. 2927 * 2928 * Allocate memory for the transmit buffer pool. 2929 */ 2930 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2931 "sizes: tx: %ld, cr:%ld, th:%ld", 2932 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2933 2934 *num_chunks = 0; 2935 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2936 sizes.tx_size, sizes.threshhold, num_chunks); 2937 if (status != NXGE_OK) { 2938 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2939 return (status); 2940 } 2941 2942 /* 2943 * Allocate memory for descriptor rings and mailbox. 2944 */ 2945 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2946 sizes.cr_size); 2947 if (status != NXGE_OK) { 2948 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2949 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2950 return (status); 2951 } 2952 2953 return (NXGE_OK); 2954 } 2955 2956 void 2957 nxge_free_txb( 2958 p_nxge_t nxgep, 2959 int channel) 2960 { 2961 nxge_dma_common_t *data; 2962 nxge_dma_common_t *control; 2963 uint32_t num_chunks; 2964 2965 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2966 2967 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2968 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2969 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2970 2971 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2972 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2973 2974 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2975 nxge_free_tx_cntl_dma(nxgep, control); 2976 2977 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2978 2979 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2980 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2981 2982 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2983 } 2984 2985 /* 2986 * nxge_alloc_tx_mem_pool 2987 * 2988 * This function allocates all of the per-port TDC control data structures. 2989 * The per-channel (TDC) data structures are allocated when needed. 2990 * 2991 * Arguments: 2992 * nxgep 2993 * 2994 * Notes: 2995 * 2996 * Context: 2997 * Any domain 2998 */ 2999 nxge_status_t 3000 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 3001 { 3002 nxge_hw_pt_cfg_t *p_cfgp; 3003 nxge_dma_pool_t *dma_poolp; 3004 nxge_dma_common_t **dma_buf_p; 3005 nxge_dma_pool_t *dma_cntl_poolp; 3006 nxge_dma_common_t **dma_cntl_p; 3007 uint32_t *num_chunks; /* per dma */ 3008 int tdc_max; 3009 3010 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 3011 3012 p_cfgp = &nxgep->pt_config.hw_config; 3013 tdc_max = NXGE_MAX_TDCS; 3014 3015 /* 3016 * Allocate memory for each transmit DMA channel. 3017 */ 3018 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 3019 KM_SLEEP); 3020 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 3021 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 3022 3023 dma_cntl_poolp = (p_nxge_dma_pool_t) 3024 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 3025 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 3026 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 3027 3028 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 3029 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3030 "nxge_alloc_tx_mem_pool: TDC too high %d, " 3031 "set to default %d", 3032 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 3033 nxge_tx_ring_size = TDC_DEFAULT_MAX; 3034 } 3035 3036 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3037 /* 3038 * N2/NIU has limitation on the descriptor sizes (contiguous 3039 * memory allocation on data buffers to 4M (contig_mem_alloc) 3040 * and little endian for control buffers (must use the ddi/dki mem alloc 3041 * function). The transmit ring is limited to 8K (includes the 3042 * mailbox). 3043 */ 3044 if (nxgep->niu_type == N2_NIU) { 3045 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 3046 (!ISP2(nxge_tx_ring_size))) { 3047 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 3048 } 3049 } 3050 #endif 3051 3052 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 3053 3054 num_chunks = (uint32_t *)KMEM_ZALLOC( 3055 sizeof (uint32_t) * tdc_max, KM_SLEEP); 3056 3057 dma_poolp->ndmas = p_cfgp->tdc.owned; 3058 dma_poolp->num_chunks = num_chunks; 3059 dma_poolp->dma_buf_pool_p = dma_buf_p; 3060 nxgep->tx_buf_pool_p = dma_poolp; 3061 3062 dma_poolp->buf_allocated = B_TRUE; 3063 3064 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 3065 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 3066 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 3067 3068 dma_cntl_poolp->buf_allocated = B_TRUE; 3069 3070 nxgep->tx_rings = 3071 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 3072 nxgep->tx_rings->rings = 3073 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 3074 nxgep->tx_mbox_areas_p = 3075 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 3076 nxgep->tx_mbox_areas_p->txmbox_areas_p = 3077 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 3078 3079 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 3080 3081 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3082 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 3083 tdc_max, dma_poolp->ndmas)); 3084 3085 return (NXGE_OK); 3086 } 3087 3088 nxge_status_t 3089 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 3090 p_nxge_dma_common_t *dmap, size_t alloc_size, 3091 size_t block_size, uint32_t *num_chunks) 3092 { 3093 p_nxge_dma_common_t tx_dmap; 3094 nxge_status_t status = NXGE_OK; 3095 size_t total_alloc_size; 3096 size_t allocated = 0; 3097 int i, size_index, array_size; 3098 3099 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 3100 3101 tx_dmap = (p_nxge_dma_common_t) 3102 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 3103 KM_SLEEP); 3104 3105 total_alloc_size = alloc_size; 3106 i = 0; 3107 size_index = 0; 3108 array_size = sizeof (alloc_sizes) / sizeof (size_t); 3109 while ((size_index < array_size) && 3110 (alloc_sizes[size_index] < alloc_size)) 3111 size_index++; 3112 if (size_index >= array_size) { 3113 size_index = array_size - 1; 3114 } 3115 3116 while ((allocated < total_alloc_size) && 3117 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 3118 3119 tx_dmap[i].dma_chunk_index = i; 3120 tx_dmap[i].block_size = block_size; 3121 tx_dmap[i].alength = alloc_sizes[size_index]; 3122 tx_dmap[i].orig_alength = tx_dmap[i].alength; 3123 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 3124 tx_dmap[i].dma_channel = dma_channel; 3125 tx_dmap[i].contig_alloc_type = B_FALSE; 3126 tx_dmap[i].kmem_alloc_type = B_FALSE; 3127 3128 /* 3129 * N2/NIU: data buffers must be contiguous as the driver 3130 * needs to call Hypervisor api to set up 3131 * logical pages. 3132 */ 3133 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 3134 tx_dmap[i].contig_alloc_type = B_TRUE; 3135 } 3136 3137 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3138 &nxge_tx_dma_attr, 3139 tx_dmap[i].alength, 3140 &nxge_dev_buf_dma_acc_attr, 3141 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3142 (p_nxge_dma_common_t)(&tx_dmap[i])); 3143 if (status != NXGE_OK) { 3144 size_index--; 3145 } else { 3146 i++; 3147 allocated += alloc_sizes[size_index]; 3148 } 3149 } 3150 3151 if (allocated < total_alloc_size) { 3152 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3153 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3154 "allocated 0x%x requested 0x%x", 3155 dma_channel, 3156 allocated, total_alloc_size)); 3157 status = NXGE_ERROR; 3158 goto nxge_alloc_tx_mem_fail1; 3159 } 3160 3161 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3162 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3163 "allocated 0x%x requested 0x%x", 3164 dma_channel, 3165 allocated, total_alloc_size)); 3166 3167 *num_chunks = i; 3168 *dmap = tx_dmap; 3169 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3170 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3171 *dmap, i)); 3172 goto nxge_alloc_tx_mem_exit; 3173 3174 nxge_alloc_tx_mem_fail1: 3175 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3176 3177 nxge_alloc_tx_mem_exit: 3178 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3179 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3180 3181 return (status); 3182 } 3183 3184 /*ARGSUSED*/ 3185 static void 3186 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3187 uint32_t num_chunks) 3188 { 3189 int i; 3190 3191 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3192 3193 if (dmap == 0) 3194 return; 3195 3196 for (i = 0; i < num_chunks; i++) { 3197 nxge_dma_mem_free(dmap++); 3198 } 3199 3200 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3201 } 3202 3203 /*ARGSUSED*/ 3204 nxge_status_t 3205 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3206 p_nxge_dma_common_t *dmap, size_t size) 3207 { 3208 p_nxge_dma_common_t tx_dmap; 3209 nxge_status_t status = NXGE_OK; 3210 3211 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3212 tx_dmap = (p_nxge_dma_common_t) 3213 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3214 3215 tx_dmap->contig_alloc_type = B_FALSE; 3216 tx_dmap->kmem_alloc_type = B_FALSE; 3217 3218 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3219 &nxge_desc_dma_attr, 3220 size, 3221 &nxge_dev_desc_dma_acc_attr, 3222 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3223 tx_dmap); 3224 if (status != NXGE_OK) { 3225 goto nxge_alloc_tx_cntl_dma_fail1; 3226 } 3227 3228 *dmap = tx_dmap; 3229 goto nxge_alloc_tx_cntl_dma_exit; 3230 3231 nxge_alloc_tx_cntl_dma_fail1: 3232 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3233 3234 nxge_alloc_tx_cntl_dma_exit: 3235 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3236 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3237 3238 return (status); 3239 } 3240 3241 /*ARGSUSED*/ 3242 static void 3243 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3244 { 3245 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3246 3247 if (dmap == 0) 3248 return; 3249 3250 nxge_dma_mem_free(dmap); 3251 3252 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3253 } 3254 3255 /* 3256 * nxge_free_tx_mem_pool 3257 * 3258 * This function frees all of the per-port TDC control data structures. 3259 * The per-channel (TDC) data structures are freed when the channel 3260 * is stopped. 3261 * 3262 * Arguments: 3263 * nxgep 3264 * 3265 * Notes: 3266 * 3267 * Context: 3268 * Any domain 3269 */ 3270 static void 3271 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3272 { 3273 int tdc_max = NXGE_MAX_TDCS; 3274 3275 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3276 3277 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3278 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3279 "<== nxge_free_tx_mem_pool " 3280 "(null tx buf pool or buf not allocated")); 3281 return; 3282 } 3283 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3284 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3285 "<== nxge_free_tx_mem_pool " 3286 "(null tx cntl buf pool or cntl buf not allocated")); 3287 return; 3288 } 3289 3290 /* 1. Free the mailboxes. */ 3291 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3292 sizeof (p_tx_mbox_t) * tdc_max); 3293 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3294 3295 nxgep->tx_mbox_areas_p = 0; 3296 3297 /* 2. Free the transmit ring arrays. */ 3298 KMEM_FREE(nxgep->tx_rings->rings, 3299 sizeof (p_tx_ring_t) * tdc_max); 3300 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3301 3302 nxgep->tx_rings = 0; 3303 3304 /* 3. Free the completion ring data structures. */ 3305 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3306 sizeof (p_nxge_dma_common_t) * tdc_max); 3307 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3308 3309 nxgep->tx_cntl_pool_p = 0; 3310 3311 /* 4. Free the data ring data structures. */ 3312 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3313 sizeof (uint32_t) * tdc_max); 3314 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3315 sizeof (p_nxge_dma_common_t) * tdc_max); 3316 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3317 3318 nxgep->tx_buf_pool_p = 0; 3319 3320 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3321 } 3322 3323 /*ARGSUSED*/ 3324 static nxge_status_t 3325 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3326 struct ddi_dma_attr *dma_attrp, 3327 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3328 p_nxge_dma_common_t dma_p) 3329 { 3330 caddr_t kaddrp; 3331 int ddi_status = DDI_SUCCESS; 3332 boolean_t contig_alloc_type; 3333 boolean_t kmem_alloc_type; 3334 3335 contig_alloc_type = dma_p->contig_alloc_type; 3336 3337 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3338 /* 3339 * contig_alloc_type for contiguous memory only allowed 3340 * for N2/NIU. 3341 */ 3342 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3343 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3344 dma_p->contig_alloc_type)); 3345 return (NXGE_ERROR | NXGE_DDI_FAILED); 3346 } 3347 3348 dma_p->dma_handle = NULL; 3349 dma_p->acc_handle = NULL; 3350 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3351 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3352 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3353 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3354 if (ddi_status != DDI_SUCCESS) { 3355 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3356 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3357 return (NXGE_ERROR | NXGE_DDI_FAILED); 3358 } 3359 3360 kmem_alloc_type = dma_p->kmem_alloc_type; 3361 3362 switch (contig_alloc_type) { 3363 case B_FALSE: 3364 switch (kmem_alloc_type) { 3365 case B_FALSE: 3366 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3367 length, 3368 acc_attr_p, 3369 xfer_flags, 3370 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3371 &dma_p->acc_handle); 3372 if (ddi_status != DDI_SUCCESS) { 3373 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3374 "nxge_dma_mem_alloc: " 3375 "ddi_dma_mem_alloc failed")); 3376 ddi_dma_free_handle(&dma_p->dma_handle); 3377 dma_p->dma_handle = NULL; 3378 return (NXGE_ERROR | NXGE_DDI_FAILED); 3379 } 3380 if (dma_p->alength < length) { 3381 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3382 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3383 "< length.")); 3384 ddi_dma_mem_free(&dma_p->acc_handle); 3385 ddi_dma_free_handle(&dma_p->dma_handle); 3386 dma_p->acc_handle = NULL; 3387 dma_p->dma_handle = NULL; 3388 return (NXGE_ERROR); 3389 } 3390 3391 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3392 NULL, 3393 kaddrp, dma_p->alength, xfer_flags, 3394 DDI_DMA_DONTWAIT, 3395 0, &dma_p->dma_cookie, &dma_p->ncookies); 3396 if (ddi_status != DDI_DMA_MAPPED) { 3397 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3398 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3399 "failed " 3400 "(staus 0x%x ncookies %d.)", ddi_status, 3401 dma_p->ncookies)); 3402 if (dma_p->acc_handle) { 3403 ddi_dma_mem_free(&dma_p->acc_handle); 3404 dma_p->acc_handle = NULL; 3405 } 3406 ddi_dma_free_handle(&dma_p->dma_handle); 3407 dma_p->dma_handle = NULL; 3408 return (NXGE_ERROR | NXGE_DDI_FAILED); 3409 } 3410 3411 if (dma_p->ncookies != 1) { 3412 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3413 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3414 "> 1 cookie" 3415 "(staus 0x%x ncookies %d.)", ddi_status, 3416 dma_p->ncookies)); 3417 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3418 if (dma_p->acc_handle) { 3419 ddi_dma_mem_free(&dma_p->acc_handle); 3420 dma_p->acc_handle = NULL; 3421 } 3422 ddi_dma_free_handle(&dma_p->dma_handle); 3423 dma_p->dma_handle = NULL; 3424 dma_p->acc_handle = NULL; 3425 return (NXGE_ERROR); 3426 } 3427 break; 3428 3429 case B_TRUE: 3430 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3431 if (kaddrp == NULL) { 3432 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3433 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3434 "kmem alloc failed")); 3435 return (NXGE_ERROR); 3436 } 3437 3438 dma_p->alength = length; 3439 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3440 NULL, kaddrp, dma_p->alength, xfer_flags, 3441 DDI_DMA_DONTWAIT, 0, 3442 &dma_p->dma_cookie, &dma_p->ncookies); 3443 if (ddi_status != DDI_DMA_MAPPED) { 3444 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3445 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3446 "(kmem_alloc) failed kaddrp $%p length %d " 3447 "(staus 0x%x (%d) ncookies %d.)", 3448 kaddrp, length, 3449 ddi_status, ddi_status, dma_p->ncookies)); 3450 KMEM_FREE(kaddrp, length); 3451 dma_p->acc_handle = NULL; 3452 ddi_dma_free_handle(&dma_p->dma_handle); 3453 dma_p->dma_handle = NULL; 3454 dma_p->kaddrp = NULL; 3455 return (NXGE_ERROR | NXGE_DDI_FAILED); 3456 } 3457 3458 if (dma_p->ncookies != 1) { 3459 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3460 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3461 "(kmem_alloc) > 1 cookie" 3462 "(staus 0x%x ncookies %d.)", ddi_status, 3463 dma_p->ncookies)); 3464 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3465 KMEM_FREE(kaddrp, length); 3466 ddi_dma_free_handle(&dma_p->dma_handle); 3467 dma_p->dma_handle = NULL; 3468 dma_p->acc_handle = NULL; 3469 dma_p->kaddrp = NULL; 3470 return (NXGE_ERROR); 3471 } 3472 3473 dma_p->kaddrp = kaddrp; 3474 3475 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3476 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3477 "kaddr $%p alength %d", 3478 dma_p, 3479 kaddrp, 3480 dma_p->alength)); 3481 break; 3482 } 3483 break; 3484 3485 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3486 case B_TRUE: 3487 kaddrp = (caddr_t)contig_mem_alloc(length); 3488 if (kaddrp == NULL) { 3489 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3490 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3491 ddi_dma_free_handle(&dma_p->dma_handle); 3492 return (NXGE_ERROR | NXGE_DDI_FAILED); 3493 } 3494 3495 dma_p->alength = length; 3496 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3497 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3498 &dma_p->dma_cookie, &dma_p->ncookies); 3499 if (ddi_status != DDI_DMA_MAPPED) { 3500 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3501 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3502 "(status 0x%x ncookies %d.)", ddi_status, 3503 dma_p->ncookies)); 3504 3505 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3506 "==> nxge_dma_mem_alloc: (not mapped)" 3507 "length %lu (0x%x) " 3508 "free contig kaddrp $%p " 3509 "va_to_pa $%p", 3510 length, length, 3511 kaddrp, 3512 va_to_pa(kaddrp))); 3513 3514 3515 contig_mem_free((void *)kaddrp, length); 3516 ddi_dma_free_handle(&dma_p->dma_handle); 3517 3518 dma_p->dma_handle = NULL; 3519 dma_p->acc_handle = NULL; 3520 dma_p->alength = NULL; 3521 dma_p->kaddrp = NULL; 3522 3523 return (NXGE_ERROR | NXGE_DDI_FAILED); 3524 } 3525 3526 if (dma_p->ncookies != 1 || 3527 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3528 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3529 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3530 "cookie or " 3531 "dmac_laddress is NULL $%p size %d " 3532 " (status 0x%x ncookies %d.)", 3533 ddi_status, 3534 dma_p->dma_cookie.dmac_laddress, 3535 dma_p->dma_cookie.dmac_size, 3536 dma_p->ncookies)); 3537 3538 contig_mem_free((void *)kaddrp, length); 3539 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3540 ddi_dma_free_handle(&dma_p->dma_handle); 3541 3542 dma_p->alength = 0; 3543 dma_p->dma_handle = NULL; 3544 dma_p->acc_handle = NULL; 3545 dma_p->kaddrp = NULL; 3546 3547 return (NXGE_ERROR | NXGE_DDI_FAILED); 3548 } 3549 break; 3550 3551 #else 3552 case B_TRUE: 3553 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3554 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3555 return (NXGE_ERROR | NXGE_DDI_FAILED); 3556 #endif 3557 } 3558 3559 dma_p->kaddrp = kaddrp; 3560 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3561 dma_p->alength - RXBUF_64B_ALIGNED; 3562 #if defined(__i386) 3563 dma_p->ioaddr_pp = 3564 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3565 #else 3566 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3567 #endif 3568 dma_p->last_ioaddr_pp = 3569 #if defined(__i386) 3570 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3571 #else 3572 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3573 #endif 3574 dma_p->alength - RXBUF_64B_ALIGNED; 3575 3576 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3577 3578 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3579 dma_p->orig_ioaddr_pp = 3580 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3581 dma_p->orig_alength = length; 3582 dma_p->orig_kaddrp = kaddrp; 3583 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3584 #endif 3585 3586 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3587 "dma buffer allocated: dma_p $%p " 3588 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3589 "dma_p->ioaddr_p $%p " 3590 "dma_p->orig_ioaddr_p $%p " 3591 "orig_vatopa $%p " 3592 "alength %d (0x%x) " 3593 "kaddrp $%p " 3594 "length %d (0x%x)", 3595 dma_p, 3596 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3597 dma_p->ioaddr_pp, 3598 dma_p->orig_ioaddr_pp, 3599 dma_p->orig_vatopa, 3600 dma_p->alength, dma_p->alength, 3601 kaddrp, 3602 length, length)); 3603 3604 return (NXGE_OK); 3605 } 3606 3607 static void 3608 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3609 { 3610 if (dma_p->dma_handle != NULL) { 3611 if (dma_p->ncookies) { 3612 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3613 dma_p->ncookies = 0; 3614 } 3615 ddi_dma_free_handle(&dma_p->dma_handle); 3616 dma_p->dma_handle = NULL; 3617 } 3618 3619 if (dma_p->acc_handle != NULL) { 3620 ddi_dma_mem_free(&dma_p->acc_handle); 3621 dma_p->acc_handle = NULL; 3622 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3623 } 3624 3625 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3626 if (dma_p->contig_alloc_type && 3627 dma_p->orig_kaddrp && dma_p->orig_alength) { 3628 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3629 "kaddrp $%p (orig_kaddrp $%p)" 3630 "mem type %d ", 3631 "orig_alength %d " 3632 "alength 0x%x (%d)", 3633 dma_p->kaddrp, 3634 dma_p->orig_kaddrp, 3635 dma_p->contig_alloc_type, 3636 dma_p->orig_alength, 3637 dma_p->alength, dma_p->alength)); 3638 3639 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3640 dma_p->orig_alength = NULL; 3641 dma_p->orig_kaddrp = NULL; 3642 dma_p->contig_alloc_type = B_FALSE; 3643 } 3644 #endif 3645 dma_p->kaddrp = NULL; 3646 dma_p->alength = NULL; 3647 } 3648 3649 static void 3650 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3651 { 3652 uint64_t kaddr; 3653 uint32_t buf_size; 3654 3655 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3656 3657 if (dma_p->dma_handle != NULL) { 3658 if (dma_p->ncookies) { 3659 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3660 dma_p->ncookies = 0; 3661 } 3662 ddi_dma_free_handle(&dma_p->dma_handle); 3663 dma_p->dma_handle = NULL; 3664 } 3665 3666 if (dma_p->acc_handle != NULL) { 3667 ddi_dma_mem_free(&dma_p->acc_handle); 3668 dma_p->acc_handle = NULL; 3669 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3670 } 3671 3672 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3673 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3674 dma_p, 3675 dma_p->buf_alloc_state)); 3676 3677 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3678 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3679 "<== nxge_dma_free_rx_data_buf: " 3680 "outstanding data buffers")); 3681 return; 3682 } 3683 3684 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3685 if (dma_p->contig_alloc_type && 3686 dma_p->orig_kaddrp && dma_p->orig_alength) { 3687 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3688 "kaddrp $%p (orig_kaddrp $%p)" 3689 "mem type %d ", 3690 "orig_alength %d " 3691 "alength 0x%x (%d)", 3692 dma_p->kaddrp, 3693 dma_p->orig_kaddrp, 3694 dma_p->contig_alloc_type, 3695 dma_p->orig_alength, 3696 dma_p->alength, dma_p->alength)); 3697 3698 kaddr = (uint64_t)dma_p->orig_kaddrp; 3699 buf_size = dma_p->orig_alength; 3700 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3701 dma_p->orig_alength = NULL; 3702 dma_p->orig_kaddrp = NULL; 3703 dma_p->contig_alloc_type = B_FALSE; 3704 dma_p->kaddrp = NULL; 3705 dma_p->alength = NULL; 3706 return; 3707 } 3708 #endif 3709 3710 if (dma_p->kmem_alloc_type) { 3711 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3712 "nxge_dma_free_rx_data_buf: free kmem " 3713 "kaddrp $%p (orig_kaddrp $%p)" 3714 "alloc type %d " 3715 "orig_alength %d " 3716 "alength 0x%x (%d)", 3717 dma_p->kaddrp, 3718 dma_p->orig_kaddrp, 3719 dma_p->kmem_alloc_type, 3720 dma_p->orig_alength, 3721 dma_p->alength, dma_p->alength)); 3722 #if defined(__i386) 3723 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3724 #else 3725 kaddr = (uint64_t)dma_p->kaddrp; 3726 #endif 3727 buf_size = dma_p->orig_alength; 3728 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3729 "nxge_dma_free_rx_data_buf: free dmap $%p " 3730 "kaddr $%p buf_size %d", 3731 dma_p, 3732 kaddr, buf_size)); 3733 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3734 dma_p->alength = 0; 3735 dma_p->orig_alength = 0; 3736 dma_p->kaddrp = NULL; 3737 dma_p->kmem_alloc_type = B_FALSE; 3738 } 3739 3740 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3741 } 3742 3743 /* 3744 * nxge_m_start() -- start transmitting and receiving. 3745 * 3746 * This function is called by the MAC layer when the first 3747 * stream is open to prepare the hardware ready for sending 3748 * and transmitting packets. 3749 */ 3750 static int 3751 nxge_m_start(void *arg) 3752 { 3753 p_nxge_t nxgep = (p_nxge_t)arg; 3754 3755 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3756 3757 /* 3758 * Are we already started? 3759 */ 3760 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 3761 return (0); 3762 } 3763 3764 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) { 3765 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 3766 } 3767 3768 /* 3769 * Make sure RX MAC is disabled while we initialize. 3770 */ 3771 if (!isLDOMguest(nxgep)) { 3772 (void) nxge_rx_mac_disable(nxgep); 3773 } 3774 3775 /* 3776 * Grab the global lock. 3777 */ 3778 MUTEX_ENTER(nxgep->genlock); 3779 3780 /* 3781 * Initialize the driver and hardware. 3782 */ 3783 if (nxge_init(nxgep) != NXGE_OK) { 3784 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3785 "<== nxge_m_start: initialization failed")); 3786 MUTEX_EXIT(nxgep->genlock); 3787 return (EIO); 3788 } 3789 3790 /* 3791 * Start timer to check the system error and tx hangs 3792 */ 3793 if (!isLDOMguest(nxgep)) 3794 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3795 nxge_check_hw_state, NXGE_CHECK_TIMER); 3796 #if defined(sun4v) 3797 else 3798 nxge_hio_start_timer(nxgep); 3799 #endif 3800 3801 nxgep->link_notify = B_TRUE; 3802 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3803 3804 /* 3805 * Let the global lock go, since we are intialized. 3806 */ 3807 MUTEX_EXIT(nxgep->genlock); 3808 3809 /* 3810 * Let the MAC start receiving packets, now that 3811 * we are initialized. 3812 */ 3813 if (!isLDOMguest(nxgep)) { 3814 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 3815 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3816 "<== nxge_m_start: enable of RX mac failed")); 3817 return (EIO); 3818 } 3819 3820 /* 3821 * Enable hardware interrupts. 3822 */ 3823 nxge_intr_hw_enable(nxgep); 3824 } 3825 #if defined(sun4v) 3826 else { 3827 /* 3828 * In guest domain we enable RDCs and their interrupts as 3829 * the last step. 3830 */ 3831 if (nxge_hio_rdc_enable(nxgep) != NXGE_OK) { 3832 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3833 "<== nxge_m_start: enable of RDCs failed")); 3834 return (EIO); 3835 } 3836 3837 if (nxge_hio_rdc_intr_arm(nxgep, B_TRUE) != NXGE_OK) { 3838 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3839 "<== nxge_m_start: intrs enable for RDCs failed")); 3840 return (EIO); 3841 } 3842 } 3843 #endif 3844 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3845 return (0); 3846 } 3847 3848 static boolean_t 3849 nxge_check_groups_stopped(p_nxge_t nxgep) 3850 { 3851 int i; 3852 3853 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 3854 if (nxgep->rx_hio_groups[i].started) 3855 return (B_FALSE); 3856 } 3857 3858 return (B_TRUE); 3859 } 3860 3861 /* 3862 * nxge_m_stop(): stop transmitting and receiving. 3863 */ 3864 static void 3865 nxge_m_stop(void *arg) 3866 { 3867 p_nxge_t nxgep = (p_nxge_t)arg; 3868 boolean_t groups_stopped; 3869 3870 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3871 3872 /* 3873 * Are the groups stopped? 3874 */ 3875 groups_stopped = nxge_check_groups_stopped(nxgep); 3876 ASSERT(groups_stopped == B_TRUE); 3877 if (!groups_stopped) { 3878 cmn_err(CE_WARN, "nxge(%d): groups are not stopped!\n", 3879 nxgep->instance); 3880 return; 3881 } 3882 3883 if (!isLDOMguest(nxgep)) { 3884 /* 3885 * Disable the RX mac. 3886 */ 3887 (void) nxge_rx_mac_disable(nxgep); 3888 3889 /* 3890 * Wait for the IPP to drain. 3891 */ 3892 (void) nxge_ipp_drain(nxgep); 3893 3894 /* 3895 * Disable hardware interrupts. 3896 */ 3897 nxge_intr_hw_disable(nxgep); 3898 } 3899 #if defined(sun4v) 3900 else { 3901 (void) nxge_hio_rdc_intr_arm(nxgep, B_FALSE); 3902 } 3903 #endif 3904 3905 /* 3906 * Grab the global lock. 3907 */ 3908 MUTEX_ENTER(nxgep->genlock); 3909 3910 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3911 if (nxgep->nxge_timerid) { 3912 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3913 nxgep->nxge_timerid = 0; 3914 } 3915 3916 /* 3917 * Clean up. 3918 */ 3919 nxge_uninit(nxgep); 3920 3921 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3922 3923 /* 3924 * Let go of the global lock. 3925 */ 3926 MUTEX_EXIT(nxgep->genlock); 3927 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3928 } 3929 3930 static int 3931 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3932 { 3933 p_nxge_t nxgep = (p_nxge_t)arg; 3934 struct ether_addr addrp; 3935 3936 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3937 "==> nxge_m_multicst: add %d", add)); 3938 3939 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3940 if (add) { 3941 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3942 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3943 "<== nxge_m_multicst: add multicast failed")); 3944 return (EINVAL); 3945 } 3946 } else { 3947 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3948 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3949 "<== nxge_m_multicst: del multicast failed")); 3950 return (EINVAL); 3951 } 3952 } 3953 3954 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3955 3956 return (0); 3957 } 3958 3959 static int 3960 nxge_m_promisc(void *arg, boolean_t on) 3961 { 3962 p_nxge_t nxgep = (p_nxge_t)arg; 3963 3964 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3965 "==> nxge_m_promisc: on %d", on)); 3966 3967 if (nxge_set_promisc(nxgep, on)) { 3968 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3969 "<== nxge_m_promisc: set promisc failed")); 3970 return (EINVAL); 3971 } 3972 3973 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3974 "<== nxge_m_promisc: on %d", on)); 3975 3976 return (0); 3977 } 3978 3979 static void 3980 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3981 { 3982 p_nxge_t nxgep = (p_nxge_t)arg; 3983 struct iocblk *iocp; 3984 boolean_t need_privilege; 3985 int err; 3986 int cmd; 3987 3988 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3989 3990 iocp = (struct iocblk *)mp->b_rptr; 3991 iocp->ioc_error = 0; 3992 need_privilege = B_TRUE; 3993 cmd = iocp->ioc_cmd; 3994 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3995 switch (cmd) { 3996 default: 3997 miocnak(wq, mp, 0, EINVAL); 3998 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3999 return; 4000 4001 case LB_GET_INFO_SIZE: 4002 case LB_GET_INFO: 4003 case LB_GET_MODE: 4004 need_privilege = B_FALSE; 4005 break; 4006 case LB_SET_MODE: 4007 break; 4008 4009 4010 case NXGE_GET_MII: 4011 case NXGE_PUT_MII: 4012 case NXGE_GET64: 4013 case NXGE_PUT64: 4014 case NXGE_GET_TX_RING_SZ: 4015 case NXGE_GET_TX_DESC: 4016 case NXGE_TX_SIDE_RESET: 4017 case NXGE_RX_SIDE_RESET: 4018 case NXGE_GLOBAL_RESET: 4019 case NXGE_RESET_MAC: 4020 case NXGE_TX_REGS_DUMP: 4021 case NXGE_RX_REGS_DUMP: 4022 case NXGE_INT_REGS_DUMP: 4023 case NXGE_VIR_INT_REGS_DUMP: 4024 case NXGE_PUT_TCAM: 4025 case NXGE_GET_TCAM: 4026 case NXGE_RTRACE: 4027 case NXGE_RDUMP: 4028 case NXGE_RX_CLASS: 4029 case NXGE_RX_HASH: 4030 4031 need_privilege = B_FALSE; 4032 break; 4033 case NXGE_INJECT_ERR: 4034 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 4035 nxge_err_inject(nxgep, wq, mp); 4036 break; 4037 } 4038 4039 if (need_privilege) { 4040 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 4041 if (err != 0) { 4042 miocnak(wq, mp, 0, err); 4043 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4044 "<== nxge_m_ioctl: no priv")); 4045 return; 4046 } 4047 } 4048 4049 switch (cmd) { 4050 4051 case LB_GET_MODE: 4052 case LB_SET_MODE: 4053 case LB_GET_INFO_SIZE: 4054 case LB_GET_INFO: 4055 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 4056 break; 4057 4058 case NXGE_GET_MII: 4059 case NXGE_PUT_MII: 4060 case NXGE_PUT_TCAM: 4061 case NXGE_GET_TCAM: 4062 case NXGE_GET64: 4063 case NXGE_PUT64: 4064 case NXGE_GET_TX_RING_SZ: 4065 case NXGE_GET_TX_DESC: 4066 case NXGE_TX_SIDE_RESET: 4067 case NXGE_RX_SIDE_RESET: 4068 case NXGE_GLOBAL_RESET: 4069 case NXGE_RESET_MAC: 4070 case NXGE_TX_REGS_DUMP: 4071 case NXGE_RX_REGS_DUMP: 4072 case NXGE_INT_REGS_DUMP: 4073 case NXGE_VIR_INT_REGS_DUMP: 4074 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4075 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 4076 nxge_hw_ioctl(nxgep, wq, mp, iocp); 4077 break; 4078 case NXGE_RX_CLASS: 4079 if (nxge_rxclass_ioctl(nxgep, wq, mp->b_cont) < 0) 4080 miocnak(wq, mp, 0, EINVAL); 4081 else 4082 miocack(wq, mp, sizeof (rx_class_cfg_t), 0); 4083 break; 4084 case NXGE_RX_HASH: 4085 4086 if (nxge_rxhash_ioctl(nxgep, wq, mp->b_cont) < 0) 4087 miocnak(wq, mp, 0, EINVAL); 4088 else 4089 miocack(wq, mp, sizeof (cfg_cmd_t), 0); 4090 break; 4091 } 4092 4093 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 4094 } 4095 4096 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 4097 4098 void 4099 nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory) 4100 { 4101 p_nxge_mmac_stats_t mmac_stats; 4102 int i; 4103 nxge_mmac_t *mmac_info; 4104 4105 mmac_info = &nxgep->nxge_mmac_info; 4106 4107 mmac_stats = &nxgep->statsp->mmac_stats; 4108 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 4109 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 4110 4111 for (i = 0; i < ETHERADDRL; i++) { 4112 if (factory) { 4113 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4114 = mmac_info->factory_mac_pool[slot][ 4115 (ETHERADDRL-1) - i]; 4116 } else { 4117 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4118 = mmac_info->mac_pool[slot].addr[ 4119 (ETHERADDRL - 1) - i]; 4120 } 4121 } 4122 } 4123 4124 /* 4125 * nxge_altmac_set() -- Set an alternate MAC address 4126 */ 4127 static int 4128 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, int slot, 4129 int rdctbl, boolean_t usetbl) 4130 { 4131 uint8_t addrn; 4132 uint8_t portn; 4133 npi_mac_addr_t altmac; 4134 hostinfo_t mac_rdc; 4135 p_nxge_class_pt_cfg_t clscfgp; 4136 4137 4138 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 4139 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 4140 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 4141 4142 portn = nxgep->mac.portnum; 4143 addrn = (uint8_t)slot - 1; 4144 4145 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, 4146 nxgep->function_num, addrn, &altmac) != NPI_SUCCESS) 4147 return (EIO); 4148 4149 /* 4150 * Set the rdc table number for the host info entry 4151 * for this mac address slot. 4152 */ 4153 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 4154 mac_rdc.value = 0; 4155 if (usetbl) 4156 mac_rdc.bits.w0.rdc_tbl_num = rdctbl; 4157 else 4158 mac_rdc.bits.w0.rdc_tbl_num = 4159 clscfgp->mac_host_info[addrn].rdctbl; 4160 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 4161 4162 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 4163 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 4164 return (EIO); 4165 } 4166 4167 /* 4168 * Enable comparison with the alternate MAC address. 4169 * While the first alternate addr is enabled by bit 1 of register 4170 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 4171 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 4172 * accordingly before calling npi_mac_altaddr_entry. 4173 */ 4174 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4175 addrn = (uint8_t)slot - 1; 4176 else 4177 addrn = (uint8_t)slot; 4178 4179 if (npi_mac_altaddr_enable(nxgep->npi_handle, 4180 nxgep->function_num, addrn) != NPI_SUCCESS) { 4181 return (EIO); 4182 } 4183 4184 return (0); 4185 } 4186 4187 /* 4188 * nxeg_m_mmac_add_g() - find an unused address slot, set the address 4189 * value to the one specified, enable the port to start filtering on 4190 * the new MAC address. Returns 0 on success. 4191 */ 4192 int 4193 nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl, 4194 boolean_t usetbl) 4195 { 4196 p_nxge_t nxgep = arg; 4197 int slot; 4198 nxge_mmac_t *mmac_info; 4199 int err; 4200 nxge_status_t status; 4201 4202 mutex_enter(nxgep->genlock); 4203 4204 /* 4205 * Make sure that nxge is initialized, if _start() has 4206 * not been called. 4207 */ 4208 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4209 status = nxge_init(nxgep); 4210 if (status != NXGE_OK) { 4211 mutex_exit(nxgep->genlock); 4212 return (ENXIO); 4213 } 4214 } 4215 4216 mmac_info = &nxgep->nxge_mmac_info; 4217 if (mmac_info->naddrfree == 0) { 4218 mutex_exit(nxgep->genlock); 4219 return (ENOSPC); 4220 } 4221 4222 /* 4223 * Search for the first available slot. Because naddrfree 4224 * is not zero, we are guaranteed to find one. 4225 * Each of the first two ports of Neptune has 16 alternate 4226 * MAC slots but only the first 7 (of 15) slots have assigned factory 4227 * MAC addresses. We first search among the slots without bundled 4228 * factory MACs. If we fail to find one in that range, then we 4229 * search the slots with bundled factory MACs. A factory MAC 4230 * will be wasted while the slot is used with a user MAC address. 4231 * But the slot could be used by factory MAC again after calling 4232 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4233 */ 4234 for (slot = 0; slot <= mmac_info->num_mmac; slot++) { 4235 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4236 break; 4237 } 4238 4239 ASSERT(slot <= mmac_info->num_mmac); 4240 4241 if ((err = nxge_altmac_set(nxgep, (uint8_t *)maddr, slot, rdctbl, 4242 usetbl)) != 0) { 4243 mutex_exit(nxgep->genlock); 4244 return (err); 4245 } 4246 4247 bcopy(maddr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4248 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4249 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4250 mmac_info->naddrfree--; 4251 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4252 4253 mutex_exit(nxgep->genlock); 4254 return (0); 4255 } 4256 4257 /* 4258 * Remove the specified mac address and update the HW not to filter 4259 * the mac address anymore. 4260 */ 4261 int 4262 nxge_m_mmac_remove(void *arg, int slot) 4263 { 4264 p_nxge_t nxgep = arg; 4265 nxge_mmac_t *mmac_info; 4266 uint8_t addrn; 4267 uint8_t portn; 4268 int err = 0; 4269 nxge_status_t status; 4270 4271 mutex_enter(nxgep->genlock); 4272 4273 /* 4274 * Make sure that nxge is initialized, if _start() has 4275 * not been called. 4276 */ 4277 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4278 status = nxge_init(nxgep); 4279 if (status != NXGE_OK) { 4280 mutex_exit(nxgep->genlock); 4281 return (ENXIO); 4282 } 4283 } 4284 4285 mmac_info = &nxgep->nxge_mmac_info; 4286 if (slot < 1 || slot > mmac_info->num_mmac) { 4287 mutex_exit(nxgep->genlock); 4288 return (EINVAL); 4289 } 4290 4291 portn = nxgep->mac.portnum; 4292 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4293 addrn = (uint8_t)slot - 1; 4294 else 4295 addrn = (uint8_t)slot; 4296 4297 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4298 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4299 == NPI_SUCCESS) { 4300 mmac_info->naddrfree++; 4301 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4302 /* 4303 * Regardless if the MAC we just stopped filtering 4304 * is a user addr or a facory addr, we must set 4305 * the MMAC_VENDOR_ADDR flag if this slot has an 4306 * associated factory MAC to indicate that a factory 4307 * MAC is available. 4308 */ 4309 if (slot <= mmac_info->num_factory_mmac) { 4310 mmac_info->mac_pool[slot].flags 4311 |= MMAC_VENDOR_ADDR; 4312 } 4313 /* 4314 * Clear mac_pool[slot].addr so that kstat shows 0 4315 * alternate MAC address if the slot is not used. 4316 * (But nxge_m_mmac_get returns the factory MAC even 4317 * when the slot is not used!) 4318 */ 4319 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4320 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4321 } else { 4322 err = EIO; 4323 } 4324 } else { 4325 err = EINVAL; 4326 } 4327 4328 mutex_exit(nxgep->genlock); 4329 return (err); 4330 } 4331 4332 /* 4333 * The callback to query all the factory addresses. naddr must be the same as 4334 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and 4335 * mcm_addr is the space allocated for keep all the addresses, whose size is 4336 * naddr * MAXMACADDRLEN. 4337 */ 4338 static void 4339 nxge_m_getfactaddr(void *arg, uint_t naddr, uint8_t *addr) 4340 { 4341 nxge_t *nxgep = arg; 4342 nxge_mmac_t *mmac_info; 4343 int i; 4344 4345 mutex_enter(nxgep->genlock); 4346 4347 mmac_info = &nxgep->nxge_mmac_info; 4348 ASSERT(naddr == mmac_info->num_factory_mmac); 4349 4350 for (i = 0; i < naddr; i++) { 4351 bcopy(mmac_info->factory_mac_pool[i + 1], 4352 addr + i * MAXMACADDRLEN, ETHERADDRL); 4353 } 4354 4355 mutex_exit(nxgep->genlock); 4356 } 4357 4358 4359 static boolean_t 4360 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4361 { 4362 nxge_t *nxgep = arg; 4363 uint32_t *txflags = cap_data; 4364 4365 switch (cap) { 4366 case MAC_CAPAB_HCKSUM: 4367 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4368 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4369 if (nxge_cksum_offload <= 1) { 4370 *txflags = HCKSUM_INET_PARTIAL; 4371 } 4372 break; 4373 4374 case MAC_CAPAB_MULTIFACTADDR: { 4375 mac_capab_multifactaddr_t *mfacp = cap_data; 4376 4377 if (!isLDOMguest(nxgep)) { 4378 mutex_enter(nxgep->genlock); 4379 mfacp->mcm_naddr = 4380 nxgep->nxge_mmac_info.num_factory_mmac; 4381 mfacp->mcm_getaddr = nxge_m_getfactaddr; 4382 mutex_exit(nxgep->genlock); 4383 } 4384 break; 4385 } 4386 4387 case MAC_CAPAB_LSO: { 4388 mac_capab_lso_t *cap_lso = cap_data; 4389 4390 if (nxgep->soft_lso_enable) { 4391 if (nxge_cksum_offload <= 1) { 4392 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4393 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4394 nxge_lso_max = NXGE_LSO_MAXLEN; 4395 } 4396 cap_lso->lso_basic_tcp_ipv4.lso_max = 4397 nxge_lso_max; 4398 } 4399 break; 4400 } else { 4401 return (B_FALSE); 4402 } 4403 } 4404 4405 case MAC_CAPAB_RINGS: { 4406 mac_capab_rings_t *cap_rings = cap_data; 4407 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 4408 4409 mutex_enter(nxgep->genlock); 4410 if (cap_rings->mr_type == MAC_RING_TYPE_RX) { 4411 if (isLDOMguest(nxgep)) { 4412 cap_rings->mr_group_type = 4413 MAC_GROUP_TYPE_STATIC; 4414 cap_rings->mr_rnum = 4415 NXGE_HIO_SHARE_MAX_CHANNELS; 4416 cap_rings->mr_rget = nxge_fill_ring; 4417 cap_rings->mr_gnum = 1; 4418 cap_rings->mr_gget = nxge_hio_group_get; 4419 cap_rings->mr_gaddring = NULL; 4420 cap_rings->mr_gremring = NULL; 4421 } else { 4422 /* 4423 * Service Domain. 4424 */ 4425 cap_rings->mr_group_type = 4426 MAC_GROUP_TYPE_DYNAMIC; 4427 cap_rings->mr_rnum = p_cfgp->max_rdcs; 4428 cap_rings->mr_rget = nxge_fill_ring; 4429 cap_rings->mr_gnum = p_cfgp->max_rdc_grpids; 4430 cap_rings->mr_gget = nxge_hio_group_get; 4431 cap_rings->mr_gaddring = nxge_group_add_ring; 4432 cap_rings->mr_gremring = nxge_group_rem_ring; 4433 } 4434 4435 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4436 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]", 4437 p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids)); 4438 } else { 4439 /* 4440 * TX Rings. 4441 */ 4442 if (isLDOMguest(nxgep)) { 4443 cap_rings->mr_group_type = 4444 MAC_GROUP_TYPE_STATIC; 4445 cap_rings->mr_rnum = 4446 NXGE_HIO_SHARE_MAX_CHANNELS; 4447 cap_rings->mr_rget = nxge_fill_ring; 4448 cap_rings->mr_gnum = 0; 4449 cap_rings->mr_gget = NULL; 4450 cap_rings->mr_gaddring = NULL; 4451 cap_rings->mr_gremring = NULL; 4452 } else { 4453 /* 4454 * Service Domain. 4455 */ 4456 cap_rings->mr_group_type = 4457 MAC_GROUP_TYPE_DYNAMIC; 4458 cap_rings->mr_rnum = p_cfgp->tdc.count; 4459 cap_rings->mr_rget = nxge_fill_ring; 4460 4461 /* 4462 * Share capable. 4463 * 4464 * Do not report the default group: hence -1 4465 */ 4466 cap_rings->mr_gnum = 4467 NXGE_MAX_TDC_GROUPS / nxgep->nports - 1; 4468 cap_rings->mr_gget = nxge_hio_group_get; 4469 cap_rings->mr_gaddring = nxge_group_add_ring; 4470 cap_rings->mr_gremring = nxge_group_rem_ring; 4471 } 4472 4473 NXGE_DEBUG_MSG((nxgep, TX_CTL, 4474 "==> nxge_m_getcapab: tx rings # of rings %d", 4475 p_cfgp->tdc.count)); 4476 } 4477 mutex_exit(nxgep->genlock); 4478 break; 4479 } 4480 4481 #if defined(sun4v) 4482 case MAC_CAPAB_SHARES: { 4483 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4484 4485 /* 4486 * Only the service domain driver responds to 4487 * this capability request. 4488 */ 4489 mutex_enter(nxgep->genlock); 4490 if (isLDOMservice(nxgep)) { 4491 mshares->ms_snum = 3; 4492 mshares->ms_handle = (void *)nxgep; 4493 mshares->ms_salloc = nxge_hio_share_alloc; 4494 mshares->ms_sfree = nxge_hio_share_free; 4495 mshares->ms_sadd = nxge_hio_share_add_group; 4496 mshares->ms_sremove = nxge_hio_share_rem_group; 4497 mshares->ms_squery = nxge_hio_share_query; 4498 mshares->ms_sbind = nxge_hio_share_bind; 4499 mshares->ms_sunbind = nxge_hio_share_unbind; 4500 mutex_exit(nxgep->genlock); 4501 } else { 4502 mutex_exit(nxgep->genlock); 4503 return (B_FALSE); 4504 } 4505 break; 4506 } 4507 #endif 4508 default: 4509 return (B_FALSE); 4510 } 4511 return (B_TRUE); 4512 } 4513 4514 static boolean_t 4515 nxge_param_locked(mac_prop_id_t pr_num) 4516 { 4517 /* 4518 * All adv_* parameters are locked (read-only) while 4519 * the device is in any sort of loopback mode ... 4520 */ 4521 switch (pr_num) { 4522 case MAC_PROP_ADV_1000FDX_CAP: 4523 case MAC_PROP_EN_1000FDX_CAP: 4524 case MAC_PROP_ADV_1000HDX_CAP: 4525 case MAC_PROP_EN_1000HDX_CAP: 4526 case MAC_PROP_ADV_100FDX_CAP: 4527 case MAC_PROP_EN_100FDX_CAP: 4528 case MAC_PROP_ADV_100HDX_CAP: 4529 case MAC_PROP_EN_100HDX_CAP: 4530 case MAC_PROP_ADV_10FDX_CAP: 4531 case MAC_PROP_EN_10FDX_CAP: 4532 case MAC_PROP_ADV_10HDX_CAP: 4533 case MAC_PROP_EN_10HDX_CAP: 4534 case MAC_PROP_AUTONEG: 4535 case MAC_PROP_FLOWCTRL: 4536 return (B_TRUE); 4537 } 4538 return (B_FALSE); 4539 } 4540 4541 /* 4542 * callback functions for set/get of properties 4543 */ 4544 static int 4545 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4546 uint_t pr_valsize, const void *pr_val) 4547 { 4548 nxge_t *nxgep = barg; 4549 p_nxge_param_t param_arr; 4550 p_nxge_stats_t statsp; 4551 int err = 0; 4552 uint8_t val; 4553 uint32_t cur_mtu, new_mtu, old_framesize; 4554 link_flowctrl_t fl; 4555 4556 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4557 param_arr = nxgep->param_arr; 4558 statsp = nxgep->statsp; 4559 mutex_enter(nxgep->genlock); 4560 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4561 nxge_param_locked(pr_num)) { 4562 /* 4563 * All adv_* parameters are locked (read-only) 4564 * while the device is in any sort of loopback mode. 4565 */ 4566 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4567 "==> nxge_m_setprop: loopback mode: read only")); 4568 mutex_exit(nxgep->genlock); 4569 return (EBUSY); 4570 } 4571 4572 val = *(uint8_t *)pr_val; 4573 switch (pr_num) { 4574 case MAC_PROP_EN_1000FDX_CAP: 4575 nxgep->param_en_1000fdx = val; 4576 param_arr[param_anar_1000fdx].value = val; 4577 4578 goto reprogram; 4579 4580 case MAC_PROP_EN_100FDX_CAP: 4581 nxgep->param_en_100fdx = val; 4582 param_arr[param_anar_100fdx].value = val; 4583 4584 goto reprogram; 4585 4586 case MAC_PROP_EN_10FDX_CAP: 4587 nxgep->param_en_10fdx = val; 4588 param_arr[param_anar_10fdx].value = val; 4589 4590 goto reprogram; 4591 4592 case MAC_PROP_EN_1000HDX_CAP: 4593 case MAC_PROP_EN_100HDX_CAP: 4594 case MAC_PROP_EN_10HDX_CAP: 4595 case MAC_PROP_ADV_1000FDX_CAP: 4596 case MAC_PROP_ADV_1000HDX_CAP: 4597 case MAC_PROP_ADV_100FDX_CAP: 4598 case MAC_PROP_ADV_100HDX_CAP: 4599 case MAC_PROP_ADV_10FDX_CAP: 4600 case MAC_PROP_ADV_10HDX_CAP: 4601 case MAC_PROP_STATUS: 4602 case MAC_PROP_SPEED: 4603 case MAC_PROP_DUPLEX: 4604 err = EINVAL; /* cannot set read-only properties */ 4605 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4606 "==> nxge_m_setprop: read only property %d", 4607 pr_num)); 4608 break; 4609 4610 case MAC_PROP_AUTONEG: 4611 param_arr[param_autoneg].value = val; 4612 4613 goto reprogram; 4614 4615 case MAC_PROP_MTU: 4616 cur_mtu = nxgep->mac.default_mtu; 4617 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4618 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4619 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4620 new_mtu, nxgep->mac.is_jumbo)); 4621 4622 if (new_mtu == cur_mtu) { 4623 err = 0; 4624 break; 4625 } 4626 4627 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4628 err = EBUSY; 4629 break; 4630 } 4631 4632 if ((new_mtu < NXGE_DEFAULT_MTU) || 4633 (new_mtu > NXGE_MAXIMUM_MTU)) { 4634 err = EINVAL; 4635 break; 4636 } 4637 4638 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4639 nxgep->mac.maxframesize = (uint16_t) 4640 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4641 if (nxge_mac_set_framesize(nxgep)) { 4642 nxgep->mac.maxframesize = 4643 (uint16_t)old_framesize; 4644 err = EINVAL; 4645 break; 4646 } 4647 4648 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4649 if (err) { 4650 nxgep->mac.maxframesize = 4651 (uint16_t)old_framesize; 4652 err = EINVAL; 4653 break; 4654 } 4655 4656 nxgep->mac.default_mtu = new_mtu; 4657 if (new_mtu > NXGE_DEFAULT_MTU) 4658 nxgep->mac.is_jumbo = B_TRUE; 4659 else 4660 nxgep->mac.is_jumbo = B_FALSE; 4661 4662 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4663 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4664 new_mtu, nxgep->mac.maxframesize)); 4665 break; 4666 4667 case MAC_PROP_FLOWCTRL: 4668 bcopy(pr_val, &fl, sizeof (fl)); 4669 switch (fl) { 4670 default: 4671 err = EINVAL; 4672 break; 4673 4674 case LINK_FLOWCTRL_NONE: 4675 param_arr[param_anar_pause].value = 0; 4676 break; 4677 4678 case LINK_FLOWCTRL_RX: 4679 param_arr[param_anar_pause].value = 1; 4680 break; 4681 4682 case LINK_FLOWCTRL_TX: 4683 case LINK_FLOWCTRL_BI: 4684 err = EINVAL; 4685 break; 4686 } 4687 4688 reprogram: 4689 if (err == 0) { 4690 if (!nxge_param_link_update(nxgep)) { 4691 err = EINVAL; 4692 } 4693 } 4694 break; 4695 case MAC_PROP_PRIVATE: 4696 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4697 "==> nxge_m_setprop: private property")); 4698 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4699 pr_val); 4700 break; 4701 4702 default: 4703 err = ENOTSUP; 4704 break; 4705 } 4706 4707 mutex_exit(nxgep->genlock); 4708 4709 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4710 "<== nxge_m_setprop (return %d)", err)); 4711 return (err); 4712 } 4713 4714 static int 4715 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4716 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 4717 { 4718 nxge_t *nxgep = barg; 4719 p_nxge_param_t param_arr = nxgep->param_arr; 4720 p_nxge_stats_t statsp = nxgep->statsp; 4721 int err = 0; 4722 link_flowctrl_t fl; 4723 uint64_t tmp = 0; 4724 link_state_t ls; 4725 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4726 4727 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4728 "==> nxge_m_getprop: pr_num %d", pr_num)); 4729 4730 if (pr_valsize == 0) 4731 return (EINVAL); 4732 4733 *perm = MAC_PROP_PERM_RW; 4734 4735 if ((is_default) && (pr_num != MAC_PROP_PRIVATE)) { 4736 err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4737 return (err); 4738 } 4739 4740 bzero(pr_val, pr_valsize); 4741 switch (pr_num) { 4742 case MAC_PROP_DUPLEX: 4743 *perm = MAC_PROP_PERM_READ; 4744 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4745 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4746 "==> nxge_m_getprop: duplex mode %d", 4747 *(uint8_t *)pr_val)); 4748 break; 4749 4750 case MAC_PROP_SPEED: 4751 if (pr_valsize < sizeof (uint64_t)) 4752 return (EINVAL); 4753 *perm = MAC_PROP_PERM_READ; 4754 tmp = statsp->mac_stats.link_speed * 1000000ull; 4755 bcopy(&tmp, pr_val, sizeof (tmp)); 4756 break; 4757 4758 case MAC_PROP_STATUS: 4759 if (pr_valsize < sizeof (link_state_t)) 4760 return (EINVAL); 4761 *perm = MAC_PROP_PERM_READ; 4762 if (!statsp->mac_stats.link_up) 4763 ls = LINK_STATE_DOWN; 4764 else 4765 ls = LINK_STATE_UP; 4766 bcopy(&ls, pr_val, sizeof (ls)); 4767 break; 4768 4769 case MAC_PROP_AUTONEG: 4770 *(uint8_t *)pr_val = 4771 param_arr[param_autoneg].value; 4772 break; 4773 4774 case MAC_PROP_FLOWCTRL: 4775 if (pr_valsize < sizeof (link_flowctrl_t)) 4776 return (EINVAL); 4777 4778 fl = LINK_FLOWCTRL_NONE; 4779 if (param_arr[param_anar_pause].value) { 4780 fl = LINK_FLOWCTRL_RX; 4781 } 4782 bcopy(&fl, pr_val, sizeof (fl)); 4783 break; 4784 4785 case MAC_PROP_ADV_1000FDX_CAP: 4786 *perm = MAC_PROP_PERM_READ; 4787 *(uint8_t *)pr_val = 4788 param_arr[param_anar_1000fdx].value; 4789 break; 4790 4791 case MAC_PROP_EN_1000FDX_CAP: 4792 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4793 break; 4794 4795 case MAC_PROP_ADV_100FDX_CAP: 4796 *perm = MAC_PROP_PERM_READ; 4797 *(uint8_t *)pr_val = 4798 param_arr[param_anar_100fdx].value; 4799 break; 4800 4801 case MAC_PROP_EN_100FDX_CAP: 4802 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4803 break; 4804 4805 case MAC_PROP_ADV_10FDX_CAP: 4806 *perm = MAC_PROP_PERM_READ; 4807 *(uint8_t *)pr_val = 4808 param_arr[param_anar_10fdx].value; 4809 break; 4810 4811 case MAC_PROP_EN_10FDX_CAP: 4812 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4813 break; 4814 4815 case MAC_PROP_EN_1000HDX_CAP: 4816 case MAC_PROP_EN_100HDX_CAP: 4817 case MAC_PROP_EN_10HDX_CAP: 4818 case MAC_PROP_ADV_1000HDX_CAP: 4819 case MAC_PROP_ADV_100HDX_CAP: 4820 case MAC_PROP_ADV_10HDX_CAP: 4821 err = ENOTSUP; 4822 break; 4823 4824 case MAC_PROP_PRIVATE: 4825 err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4826 pr_valsize, pr_val, perm); 4827 break; 4828 4829 case MAC_PROP_MTU: { 4830 mac_propval_range_t range; 4831 4832 if (!(pr_flags & MAC_PROP_POSSIBLE)) 4833 return (ENOTSUP); 4834 if (pr_valsize < sizeof (mac_propval_range_t)) 4835 return (EINVAL); 4836 range.mpr_count = 1; 4837 range.mpr_type = MAC_PROPVAL_UINT32; 4838 range.range_uint32[0].mpur_min = 4839 range.range_uint32[0].mpur_max = NXGE_DEFAULT_MTU; 4840 range.range_uint32[0].mpur_max = NXGE_MAXIMUM_MTU; 4841 bcopy(&range, pr_val, sizeof (range)); 4842 break; 4843 } 4844 default: 4845 err = EINVAL; 4846 break; 4847 } 4848 4849 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4850 4851 return (err); 4852 } 4853 4854 /* ARGSUSED */ 4855 static int 4856 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4857 const void *pr_val) 4858 { 4859 p_nxge_param_t param_arr = nxgep->param_arr; 4860 int err = 0; 4861 long result; 4862 4863 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4864 "==> nxge_set_priv_prop: name %s", pr_name)); 4865 4866 /* Blanking */ 4867 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4868 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4869 (char *)pr_val, 4870 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4871 if (err) { 4872 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4873 "<== nxge_set_priv_prop: " 4874 "unable to set (%s)", pr_name)); 4875 err = EINVAL; 4876 } else { 4877 err = 0; 4878 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4879 "<== nxge_set_priv_prop: " 4880 "set (%s)", pr_name)); 4881 } 4882 4883 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4884 "<== nxge_set_priv_prop: name %s (value %d)", 4885 pr_name, result)); 4886 4887 return (err); 4888 } 4889 4890 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4891 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4892 (char *)pr_val, 4893 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4894 if (err) { 4895 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4896 "<== nxge_set_priv_prop: " 4897 "unable to set (%s)", pr_name)); 4898 err = EINVAL; 4899 } else { 4900 err = 0; 4901 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4902 "<== nxge_set_priv_prop: " 4903 "set (%s)", pr_name)); 4904 } 4905 4906 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4907 "<== nxge_set_priv_prop: name %s (value %d)", 4908 pr_name, result)); 4909 4910 return (err); 4911 } 4912 4913 /* Classification */ 4914 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4915 if (pr_val == NULL) { 4916 err = EINVAL; 4917 return (err); 4918 } 4919 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4920 4921 err = nxge_param_set_ip_opt(nxgep, NULL, 4922 NULL, (char *)pr_val, 4923 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4924 4925 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4926 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4927 pr_name, result)); 4928 4929 return (err); 4930 } 4931 4932 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 4933 if (pr_val == NULL) { 4934 err = EINVAL; 4935 return (err); 4936 } 4937 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4938 4939 err = nxge_param_set_ip_opt(nxgep, NULL, 4940 NULL, (char *)pr_val, 4941 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 4942 4943 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4944 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4945 pr_name, result)); 4946 4947 return (err); 4948 } 4949 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 4950 if (pr_val == NULL) { 4951 err = EINVAL; 4952 return (err); 4953 } 4954 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4955 4956 err = nxge_param_set_ip_opt(nxgep, NULL, 4957 NULL, (char *)pr_val, 4958 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 4959 4960 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4961 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4962 pr_name, result)); 4963 4964 return (err); 4965 } 4966 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 4967 if (pr_val == NULL) { 4968 err = EINVAL; 4969 return (err); 4970 } 4971 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4972 4973 err = nxge_param_set_ip_opt(nxgep, NULL, 4974 NULL, (char *)pr_val, 4975 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 4976 4977 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4978 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4979 pr_name, result)); 4980 4981 return (err); 4982 } 4983 4984 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 4985 if (pr_val == NULL) { 4986 err = EINVAL; 4987 return (err); 4988 } 4989 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4990 4991 err = nxge_param_set_ip_opt(nxgep, NULL, 4992 NULL, (char *)pr_val, 4993 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 4994 4995 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4996 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4997 pr_name, result)); 4998 4999 return (err); 5000 } 5001 5002 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5003 if (pr_val == NULL) { 5004 err = EINVAL; 5005 return (err); 5006 } 5007 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5008 5009 err = nxge_param_set_ip_opt(nxgep, NULL, 5010 NULL, (char *)pr_val, 5011 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5012 5013 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5014 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5015 pr_name, result)); 5016 5017 return (err); 5018 } 5019 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5020 if (pr_val == NULL) { 5021 err = EINVAL; 5022 return (err); 5023 } 5024 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5025 5026 err = nxge_param_set_ip_opt(nxgep, NULL, 5027 NULL, (char *)pr_val, 5028 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5029 5030 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5031 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5032 pr_name, result)); 5033 5034 return (err); 5035 } 5036 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5037 if (pr_val == NULL) { 5038 err = EINVAL; 5039 return (err); 5040 } 5041 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5042 5043 err = nxge_param_set_ip_opt(nxgep, NULL, 5044 NULL, (char *)pr_val, 5045 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5046 5047 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5048 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5049 pr_name, result)); 5050 5051 return (err); 5052 } 5053 5054 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5055 if (pr_val == NULL) { 5056 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5057 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 5058 err = EINVAL; 5059 return (err); 5060 } 5061 5062 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5063 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5064 "<== nxge_set_priv_prop: name %s " 5065 "(lso %d pr_val %s value %d)", 5066 pr_name, nxgep->soft_lso_enable, pr_val, result)); 5067 5068 if (result > 1 || result < 0) { 5069 err = EINVAL; 5070 } else { 5071 if (nxgep->soft_lso_enable == (uint32_t)result) { 5072 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5073 "no change (%d %d)", 5074 nxgep->soft_lso_enable, result)); 5075 return (0); 5076 } 5077 } 5078 5079 nxgep->soft_lso_enable = (int)result; 5080 5081 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5082 "<== nxge_set_priv_prop: name %s (value %d)", 5083 pr_name, result)); 5084 5085 return (err); 5086 } 5087 /* 5088 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the 5089 * following code to be executed. 5090 */ 5091 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5092 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5093 (caddr_t)¶m_arr[param_anar_10gfdx]); 5094 return (err); 5095 } 5096 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5097 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5098 (caddr_t)¶m_arr[param_anar_pause]); 5099 return (err); 5100 } 5101 5102 return (EINVAL); 5103 } 5104 5105 static int 5106 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 5107 uint_t pr_valsize, void *pr_val, uint_t *perm) 5108 { 5109 p_nxge_param_t param_arr = nxgep->param_arr; 5110 char valstr[MAXNAMELEN]; 5111 int err = EINVAL; 5112 uint_t strsize; 5113 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 5114 5115 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5116 "==> nxge_get_priv_prop: property %s", pr_name)); 5117 5118 /* function number */ 5119 if (strcmp(pr_name, "_function_number") == 0) { 5120 if (is_default) 5121 return (ENOTSUP); 5122 *perm = MAC_PROP_PERM_READ; 5123 (void) snprintf(valstr, sizeof (valstr), "%d", 5124 nxgep->function_num); 5125 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5126 "==> nxge_get_priv_prop: name %s " 5127 "(value %d valstr %s)", 5128 pr_name, nxgep->function_num, valstr)); 5129 5130 err = 0; 5131 goto done; 5132 } 5133 5134 /* Neptune firmware version */ 5135 if (strcmp(pr_name, "_fw_version") == 0) { 5136 if (is_default) 5137 return (ENOTSUP); 5138 *perm = MAC_PROP_PERM_READ; 5139 (void) snprintf(valstr, sizeof (valstr), "%s", 5140 nxgep->vpd_info.ver); 5141 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5142 "==> nxge_get_priv_prop: name %s " 5143 "(value %d valstr %s)", 5144 pr_name, nxgep->vpd_info.ver, valstr)); 5145 5146 err = 0; 5147 goto done; 5148 } 5149 5150 /* port PHY mode */ 5151 if (strcmp(pr_name, "_port_mode") == 0) { 5152 if (is_default) 5153 return (ENOTSUP); 5154 *perm = MAC_PROP_PERM_READ; 5155 switch (nxgep->mac.portmode) { 5156 case PORT_1G_COPPER: 5157 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 5158 nxgep->hot_swappable_phy ? 5159 "[Hot Swappable]" : ""); 5160 break; 5161 case PORT_1G_FIBER: 5162 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 5163 nxgep->hot_swappable_phy ? 5164 "[hot swappable]" : ""); 5165 break; 5166 case PORT_10G_COPPER: 5167 (void) snprintf(valstr, sizeof (valstr), 5168 "10G copper %s", 5169 nxgep->hot_swappable_phy ? 5170 "[hot swappable]" : ""); 5171 break; 5172 case PORT_10G_FIBER: 5173 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 5174 nxgep->hot_swappable_phy ? 5175 "[hot swappable]" : ""); 5176 break; 5177 case PORT_10G_SERDES: 5178 (void) snprintf(valstr, sizeof (valstr), 5179 "10G serdes %s", nxgep->hot_swappable_phy ? 5180 "[hot swappable]" : ""); 5181 break; 5182 case PORT_1G_SERDES: 5183 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5184 nxgep->hot_swappable_phy ? 5185 "[hot swappable]" : ""); 5186 break; 5187 case PORT_1G_TN1010: 5188 (void) snprintf(valstr, sizeof (valstr), 5189 "1G TN1010 copper %s", nxgep->hot_swappable_phy ? 5190 "[hot swappable]" : ""); 5191 break; 5192 case PORT_10G_TN1010: 5193 (void) snprintf(valstr, sizeof (valstr), 5194 "10G TN1010 copper %s", nxgep->hot_swappable_phy ? 5195 "[hot swappable]" : ""); 5196 break; 5197 case PORT_1G_RGMII_FIBER: 5198 (void) snprintf(valstr, sizeof (valstr), 5199 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5200 "[hot swappable]" : ""); 5201 break; 5202 case PORT_HSP_MODE: 5203 (void) snprintf(valstr, sizeof (valstr), 5204 "phy not present[hot swappable]"); 5205 break; 5206 default: 5207 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5208 nxgep->hot_swappable_phy ? 5209 "[hot swappable]" : ""); 5210 break; 5211 } 5212 5213 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5214 "==> nxge_get_priv_prop: name %s (value %s)", 5215 pr_name, valstr)); 5216 5217 err = 0; 5218 goto done; 5219 } 5220 5221 /* Hot swappable PHY */ 5222 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5223 if (is_default) 5224 return (ENOTSUP); 5225 *perm = MAC_PROP_PERM_READ; 5226 (void) snprintf(valstr, sizeof (valstr), "%s", 5227 nxgep->hot_swappable_phy ? 5228 "yes" : "no"); 5229 5230 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5231 "==> nxge_get_priv_prop: name %s " 5232 "(value %d valstr %s)", 5233 pr_name, nxgep->hot_swappable_phy, valstr)); 5234 5235 err = 0; 5236 goto done; 5237 } 5238 5239 5240 /* Receive Interrupt Blanking Parameters */ 5241 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5242 err = 0; 5243 if (is_default) { 5244 (void) snprintf(valstr, sizeof (valstr), 5245 "%d", RXDMA_RCR_TO_DEFAULT); 5246 goto done; 5247 } 5248 5249 (void) snprintf(valstr, sizeof (valstr), "%d", 5250 nxgep->intr_timeout); 5251 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5252 "==> nxge_get_priv_prop: name %s (value %d)", 5253 pr_name, 5254 (uint32_t)nxgep->intr_timeout)); 5255 goto done; 5256 } 5257 5258 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5259 err = 0; 5260 if (is_default) { 5261 (void) snprintf(valstr, sizeof (valstr), 5262 "%d", RXDMA_RCR_PTHRES_DEFAULT); 5263 goto done; 5264 } 5265 (void) snprintf(valstr, sizeof (valstr), "%d", 5266 nxgep->intr_threshold); 5267 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5268 "==> nxge_get_priv_prop: name %s (value %d)", 5269 pr_name, (uint32_t)nxgep->intr_threshold)); 5270 5271 goto done; 5272 } 5273 5274 /* Classification and Load Distribution Configuration */ 5275 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5276 if (is_default) { 5277 (void) snprintf(valstr, sizeof (valstr), "%x", 5278 NXGE_CLASS_FLOW_GEN_SERVER); 5279 err = 0; 5280 goto done; 5281 } 5282 err = nxge_dld_get_ip_opt(nxgep, 5283 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5284 5285 (void) snprintf(valstr, sizeof (valstr), "%x", 5286 (int)param_arr[param_class_opt_ipv4_tcp].value); 5287 5288 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5289 "==> nxge_get_priv_prop: %s", valstr)); 5290 goto done; 5291 } 5292 5293 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5294 if (is_default) { 5295 (void) snprintf(valstr, sizeof (valstr), "%x", 5296 NXGE_CLASS_FLOW_GEN_SERVER); 5297 err = 0; 5298 goto done; 5299 } 5300 err = nxge_dld_get_ip_opt(nxgep, 5301 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5302 5303 (void) snprintf(valstr, sizeof (valstr), "%x", 5304 (int)param_arr[param_class_opt_ipv4_udp].value); 5305 5306 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5307 "==> nxge_get_priv_prop: %s", valstr)); 5308 goto done; 5309 } 5310 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5311 if (is_default) { 5312 (void) snprintf(valstr, sizeof (valstr), "%x", 5313 NXGE_CLASS_FLOW_GEN_SERVER); 5314 err = 0; 5315 goto done; 5316 } 5317 err = nxge_dld_get_ip_opt(nxgep, 5318 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5319 5320 (void) snprintf(valstr, sizeof (valstr), "%x", 5321 (int)param_arr[param_class_opt_ipv4_ah].value); 5322 5323 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5324 "==> nxge_get_priv_prop: %s", valstr)); 5325 goto done; 5326 } 5327 5328 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5329 if (is_default) { 5330 (void) snprintf(valstr, sizeof (valstr), "%x", 5331 NXGE_CLASS_FLOW_GEN_SERVER); 5332 err = 0; 5333 goto done; 5334 } 5335 err = nxge_dld_get_ip_opt(nxgep, 5336 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5337 5338 (void) snprintf(valstr, sizeof (valstr), "%x", 5339 (int)param_arr[param_class_opt_ipv4_sctp].value); 5340 5341 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5342 "==> nxge_get_priv_prop: %s", valstr)); 5343 goto done; 5344 } 5345 5346 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5347 if (is_default) { 5348 (void) snprintf(valstr, sizeof (valstr), "%x", 5349 NXGE_CLASS_FLOW_GEN_SERVER); 5350 err = 0; 5351 goto done; 5352 } 5353 err = nxge_dld_get_ip_opt(nxgep, 5354 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5355 5356 (void) snprintf(valstr, sizeof (valstr), "%x", 5357 (int)param_arr[param_class_opt_ipv6_tcp].value); 5358 5359 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5360 "==> nxge_get_priv_prop: %s", valstr)); 5361 goto done; 5362 } 5363 5364 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5365 if (is_default) { 5366 (void) snprintf(valstr, sizeof (valstr), "%x", 5367 NXGE_CLASS_FLOW_GEN_SERVER); 5368 err = 0; 5369 goto done; 5370 } 5371 err = nxge_dld_get_ip_opt(nxgep, 5372 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5373 5374 (void) snprintf(valstr, sizeof (valstr), "%x", 5375 (int)param_arr[param_class_opt_ipv6_udp].value); 5376 5377 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5378 "==> nxge_get_priv_prop: %s", valstr)); 5379 goto done; 5380 } 5381 5382 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5383 if (is_default) { 5384 (void) snprintf(valstr, sizeof (valstr), "%x", 5385 NXGE_CLASS_FLOW_GEN_SERVER); 5386 err = 0; 5387 goto done; 5388 } 5389 err = nxge_dld_get_ip_opt(nxgep, 5390 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5391 5392 (void) snprintf(valstr, sizeof (valstr), "%x", 5393 (int)param_arr[param_class_opt_ipv6_ah].value); 5394 5395 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5396 "==> nxge_get_priv_prop: %s", valstr)); 5397 goto done; 5398 } 5399 5400 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5401 if (is_default) { 5402 (void) snprintf(valstr, sizeof (valstr), "%x", 5403 NXGE_CLASS_FLOW_GEN_SERVER); 5404 err = 0; 5405 goto done; 5406 } 5407 err = nxge_dld_get_ip_opt(nxgep, 5408 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5409 5410 (void) snprintf(valstr, sizeof (valstr), "%x", 5411 (int)param_arr[param_class_opt_ipv6_sctp].value); 5412 5413 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5414 "==> nxge_get_priv_prop: %s", valstr)); 5415 goto done; 5416 } 5417 5418 /* Software LSO */ 5419 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5420 if (is_default) { 5421 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5422 err = 0; 5423 goto done; 5424 } 5425 (void) snprintf(valstr, sizeof (valstr), 5426 "%d", nxgep->soft_lso_enable); 5427 err = 0; 5428 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5429 "==> nxge_get_priv_prop: name %s (value %d)", 5430 pr_name, nxgep->soft_lso_enable)); 5431 5432 goto done; 5433 } 5434 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5435 err = 0; 5436 if (is_default || 5437 nxgep->param_arr[param_anar_10gfdx].value != 0) { 5438 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5439 goto done; 5440 } else { 5441 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5442 goto done; 5443 } 5444 } 5445 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5446 err = 0; 5447 if (is_default || 5448 nxgep->param_arr[param_anar_pause].value != 0) { 5449 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5450 goto done; 5451 } else { 5452 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5453 goto done; 5454 } 5455 } 5456 5457 done: 5458 if (err == 0) { 5459 strsize = (uint_t)strlen(valstr); 5460 if (pr_valsize < strsize) { 5461 err = ENOBUFS; 5462 } else { 5463 (void) strlcpy(pr_val, valstr, pr_valsize); 5464 } 5465 } 5466 5467 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5468 "<== nxge_get_priv_prop: return %d", err)); 5469 return (err); 5470 } 5471 5472 /* 5473 * Module loading and removing entry points. 5474 */ 5475 5476 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach, 5477 nodev, NULL, D_MP, NULL, nxge_quiesce); 5478 5479 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5480 5481 /* 5482 * Module linkage information for the kernel. 5483 */ 5484 static struct modldrv nxge_modldrv = { 5485 &mod_driverops, 5486 NXGE_DESC_VER, 5487 &nxge_dev_ops 5488 }; 5489 5490 static struct modlinkage modlinkage = { 5491 MODREV_1, (void *) &nxge_modldrv, NULL 5492 }; 5493 5494 int 5495 _init(void) 5496 { 5497 int status; 5498 5499 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 5500 5501 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5502 5503 mac_init_ops(&nxge_dev_ops, "nxge"); 5504 5505 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5506 if (status != 0) { 5507 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5508 "failed to init device soft state")); 5509 goto _init_exit; 5510 } 5511 5512 status = mod_install(&modlinkage); 5513 if (status != 0) { 5514 ddi_soft_state_fini(&nxge_list); 5515 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5516 goto _init_exit; 5517 } 5518 5519 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5520 5521 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status)); 5522 return (status); 5523 5524 _init_exit: 5525 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status)); 5526 MUTEX_DESTROY(&nxgedebuglock); 5527 return (status); 5528 } 5529 5530 int 5531 _fini(void) 5532 { 5533 int status; 5534 5535 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5536 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5537 5538 if (nxge_mblks_pending) 5539 return (EBUSY); 5540 5541 status = mod_remove(&modlinkage); 5542 if (status != DDI_SUCCESS) { 5543 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5544 "Module removal failed 0x%08x", 5545 status)); 5546 goto _fini_exit; 5547 } 5548 5549 mac_fini_ops(&nxge_dev_ops); 5550 5551 ddi_soft_state_fini(&nxge_list); 5552 5553 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status)); 5554 5555 MUTEX_DESTROY(&nxge_common_lock); 5556 MUTEX_DESTROY(&nxgedebuglock); 5557 return (status); 5558 5559 _fini_exit: 5560 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status)); 5561 return (status); 5562 } 5563 5564 int 5565 _info(struct modinfo *modinfop) 5566 { 5567 int status; 5568 5569 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5570 status = mod_info(&modlinkage, modinfop); 5571 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5572 5573 return (status); 5574 } 5575 5576 /*ARGSUSED*/ 5577 static int 5578 nxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5579 { 5580 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5581 p_nxge_t nxgep = rhp->nxgep; 5582 uint32_t channel; 5583 p_tx_ring_t ring; 5584 5585 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5586 ring = nxgep->tx_rings->rings[channel]; 5587 5588 MUTEX_ENTER(&ring->lock); 5589 ring->tx_ring_handle = rhp->ring_handle; 5590 MUTEX_EXIT(&ring->lock); 5591 5592 return (0); 5593 } 5594 5595 static void 5596 nxge_tx_ring_stop(mac_ring_driver_t rdriver) 5597 { 5598 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5599 p_nxge_t nxgep = rhp->nxgep; 5600 uint32_t channel; 5601 p_tx_ring_t ring; 5602 5603 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5604 ring = nxgep->tx_rings->rings[channel]; 5605 5606 MUTEX_ENTER(&ring->lock); 5607 ring->tx_ring_handle = (mac_ring_handle_t)NULL; 5608 MUTEX_EXIT(&ring->lock); 5609 } 5610 5611 static int 5612 nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5613 { 5614 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5615 p_nxge_t nxgep = rhp->nxgep; 5616 uint32_t channel; 5617 p_rx_rcr_ring_t ring; 5618 int i; 5619 5620 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5621 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5622 5623 MUTEX_ENTER(&ring->lock); 5624 5625 if (nxgep->rx_channel_started[channel] == B_TRUE) { 5626 MUTEX_EXIT(&ring->lock); 5627 return (0); 5628 } 5629 5630 /* set rcr_ring */ 5631 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) { 5632 if ((nxgep->ldgvp->ldvp[i].is_rxdma == 1) && 5633 (nxgep->ldgvp->ldvp[i].channel == channel)) { 5634 ring->ldvp = &nxgep->ldgvp->ldvp[i]; 5635 ring->ldgp = nxgep->ldgvp->ldvp[i].ldgp; 5636 } 5637 } 5638 5639 nxgep->rx_channel_started[channel] = B_TRUE; 5640 ring->rcr_mac_handle = rhp->ring_handle; 5641 ring->rcr_gen_num = mr_gen_num; 5642 MUTEX_EXIT(&ring->lock); 5643 5644 return (0); 5645 } 5646 5647 static void 5648 nxge_rx_ring_stop(mac_ring_driver_t rdriver) 5649 { 5650 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5651 p_nxge_t nxgep = rhp->nxgep; 5652 uint32_t channel; 5653 p_rx_rcr_ring_t ring; 5654 5655 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5656 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5657 5658 MUTEX_ENTER(&ring->lock); 5659 nxgep->rx_channel_started[channel] = B_FALSE; 5660 ring->rcr_mac_handle = NULL; 5661 MUTEX_EXIT(&ring->lock); 5662 } 5663 5664 /* 5665 * Callback funtion for MAC layer to register all rings. 5666 */ 5667 static void 5668 nxge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 5669 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 5670 { 5671 p_nxge_t nxgep = (p_nxge_t)arg; 5672 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 5673 5674 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5675 "==> nxge_fill_ring 0x%x index %d", rtype, index)); 5676 5677 switch (rtype) { 5678 case MAC_RING_TYPE_TX: { 5679 p_nxge_ring_handle_t rhandlep; 5680 5681 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5682 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d", 5683 rtype, index, p_cfgp->tdc.count)); 5684 5685 ASSERT((index >= 0) && (index < p_cfgp->tdc.count)); 5686 rhandlep = &nxgep->tx_ring_handles[index]; 5687 rhandlep->nxgep = nxgep; 5688 rhandlep->index = index; 5689 rhandlep->ring_handle = rh; 5690 5691 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5692 infop->mri_start = nxge_tx_ring_start; 5693 infop->mri_stop = nxge_tx_ring_stop; 5694 infop->mri_tx = nxge_tx_ring_send; 5695 5696 break; 5697 } 5698 case MAC_RING_TYPE_RX: { 5699 p_nxge_ring_handle_t rhandlep; 5700 int nxge_rindex; 5701 mac_intr_t nxge_mac_intr; 5702 5703 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5704 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d", 5705 rtype, index, p_cfgp->max_rdcs)); 5706 5707 /* 5708 * 'index' is the ring index within the group. 5709 * Find the ring index in the nxge instance. 5710 */ 5711 nxge_rindex = nxge_get_rxring_index(nxgep, rg_index, index); 5712 5713 ASSERT((nxge_rindex >= 0) && (nxge_rindex < p_cfgp->max_rdcs)); 5714 rhandlep = &nxgep->rx_ring_handles[nxge_rindex]; 5715 rhandlep->nxgep = nxgep; 5716 rhandlep->index = nxge_rindex; 5717 rhandlep->ring_handle = rh; 5718 5719 /* 5720 * Entrypoint to enable interrupt (disable poll) and 5721 * disable interrupt (enable poll). 5722 */ 5723 nxge_mac_intr.mi_handle = (mac_intr_handle_t)rhandlep; 5724 nxge_mac_intr.mi_enable = (mac_intr_enable_t)nxge_disable_poll; 5725 nxge_mac_intr.mi_disable = (mac_intr_disable_t)nxge_enable_poll; 5726 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5727 infop->mri_start = nxge_rx_ring_start; 5728 infop->mri_stop = nxge_rx_ring_stop; 5729 infop->mri_intr = nxge_mac_intr; /* ??? */ 5730 infop->mri_poll = nxge_rx_poll; 5731 5732 break; 5733 } 5734 default: 5735 break; 5736 } 5737 5738 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_fill_ring 0x%x", 5739 rtype)); 5740 } 5741 5742 static void 5743 nxge_group_add_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5744 mac_ring_type_t type) 5745 { 5746 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5747 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5748 nxge_t *nxge; 5749 nxge_grp_t *grp; 5750 nxge_rdc_grp_t *rdc_grp; 5751 uint16_t channel; /* device-wise ring id */ 5752 int dev_gindex; 5753 int rv; 5754 5755 nxge = rgroup->nxgep; 5756 5757 switch (type) { 5758 case MAC_RING_TYPE_TX: 5759 /* 5760 * nxge_grp_dc_add takes a channel number which is a 5761 * "devise" ring ID. 5762 */ 5763 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5764 5765 /* 5766 * Remove the ring from the default group 5767 */ 5768 if (rgroup->gindex != 0) { 5769 (void) nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5770 } 5771 5772 /* 5773 * nxge->tx_set.group[] is an array of groups indexed by 5774 * a "port" group ID. 5775 */ 5776 grp = nxge->tx_set.group[rgroup->gindex]; 5777 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5778 if (rv != 0) { 5779 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5780 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5781 } 5782 break; 5783 5784 case MAC_RING_TYPE_RX: 5785 /* 5786 * nxge->rx_set.group[] is an array of groups indexed by 5787 * a "port" group ID. 5788 */ 5789 grp = nxge->rx_set.group[rgroup->gindex]; 5790 5791 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5792 rgroup->gindex; 5793 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5794 5795 /* 5796 * nxge_grp_dc_add takes a channel number which is a 5797 * "devise" ring ID. 5798 */ 5799 channel = nxge->pt_config.hw_config.start_rdc + rhandle->index; 5800 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_RX, channel); 5801 if (rv != 0) { 5802 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5803 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5804 } 5805 5806 rdc_grp->map |= (1 << channel); 5807 rdc_grp->max_rdcs++; 5808 5809 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5810 break; 5811 } 5812 } 5813 5814 static void 5815 nxge_group_rem_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5816 mac_ring_type_t type) 5817 { 5818 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5819 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5820 nxge_t *nxge; 5821 uint16_t channel; /* device-wise ring id */ 5822 nxge_rdc_grp_t *rdc_grp; 5823 int dev_gindex; 5824 5825 nxge = rgroup->nxgep; 5826 5827 switch (type) { 5828 case MAC_RING_TYPE_TX: 5829 dev_gindex = nxge->pt_config.hw_config.def_mac_txdma_grpid + 5830 rgroup->gindex; 5831 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5832 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5833 5834 /* 5835 * Add the ring back to the default group 5836 */ 5837 if (rgroup->gindex != 0) { 5838 nxge_grp_t *grp; 5839 grp = nxge->tx_set.group[0]; 5840 (void) nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5841 } 5842 break; 5843 5844 case MAC_RING_TYPE_RX: 5845 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5846 rgroup->gindex; 5847 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5848 channel = rdc_grp->start_rdc + rhandle->index; 5849 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 5850 5851 rdc_grp->map &= ~(1 << channel); 5852 rdc_grp->max_rdcs--; 5853 5854 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5855 break; 5856 } 5857 } 5858 5859 5860 /*ARGSUSED*/ 5861 static nxge_status_t 5862 nxge_add_intrs(p_nxge_t nxgep) 5863 { 5864 5865 int intr_types; 5866 int type = 0; 5867 int ddi_status = DDI_SUCCESS; 5868 nxge_status_t status = NXGE_OK; 5869 5870 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5871 5872 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5873 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5874 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5875 nxgep->nxge_intr_type.intr_added = 0; 5876 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5877 nxgep->nxge_intr_type.intr_type = 0; 5878 5879 if (nxgep->niu_type == N2_NIU) { 5880 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5881 } else if (nxge_msi_enable) { 5882 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5883 } 5884 5885 /* Get the supported interrupt types */ 5886 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5887 != DDI_SUCCESS) { 5888 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5889 "ddi_intr_get_supported_types failed: status 0x%08x", 5890 ddi_status)); 5891 return (NXGE_ERROR | NXGE_DDI_FAILED); 5892 } 5893 nxgep->nxge_intr_type.intr_types = intr_types; 5894 5895 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5896 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5897 5898 /* 5899 * Solaris MSIX is not supported yet. use MSI for now. 5900 * nxge_msi_enable (1): 5901 * 1 - MSI 2 - MSI-X others - FIXED 5902 */ 5903 switch (nxge_msi_enable) { 5904 default: 5905 type = DDI_INTR_TYPE_FIXED; 5906 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5907 "use fixed (intx emulation) type %08x", 5908 type)); 5909 break; 5910 5911 case 2: 5912 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5913 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5914 if (intr_types & DDI_INTR_TYPE_MSIX) { 5915 type = DDI_INTR_TYPE_MSIX; 5916 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5917 "ddi_intr_get_supported_types: MSIX 0x%08x", 5918 type)); 5919 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5920 type = DDI_INTR_TYPE_MSI; 5921 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5922 "ddi_intr_get_supported_types: MSI 0x%08x", 5923 type)); 5924 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5925 type = DDI_INTR_TYPE_FIXED; 5926 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5927 "ddi_intr_get_supported_types: MSXED0x%08x", 5928 type)); 5929 } 5930 break; 5931 5932 case 1: 5933 if (intr_types & DDI_INTR_TYPE_MSI) { 5934 type = DDI_INTR_TYPE_MSI; 5935 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5936 "ddi_intr_get_supported_types: MSI 0x%08x", 5937 type)); 5938 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5939 type = DDI_INTR_TYPE_MSIX; 5940 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5941 "ddi_intr_get_supported_types: MSIX 0x%08x", 5942 type)); 5943 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5944 type = DDI_INTR_TYPE_FIXED; 5945 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5946 "ddi_intr_get_supported_types: MSXED0x%08x", 5947 type)); 5948 } 5949 } 5950 5951 nxgep->nxge_intr_type.intr_type = type; 5952 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5953 type == DDI_INTR_TYPE_FIXED) && 5954 nxgep->nxge_intr_type.niu_msi_enable) { 5955 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5956 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5957 " nxge_add_intrs: " 5958 " nxge_add_intrs_adv failed: status 0x%08x", 5959 status)); 5960 return (status); 5961 } else { 5962 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5963 "interrupts registered : type %d", type)); 5964 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5965 5966 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5967 "\nAdded advanced nxge add_intr_adv " 5968 "intr type 0x%x\n", type)); 5969 5970 return (status); 5971 } 5972 } 5973 5974 if (!nxgep->nxge_intr_type.intr_registered) { 5975 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5976 "failed to register interrupts")); 5977 return (NXGE_ERROR | NXGE_DDI_FAILED); 5978 } 5979 5980 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5981 return (status); 5982 } 5983 5984 static nxge_status_t 5985 nxge_add_intrs_adv(p_nxge_t nxgep) 5986 { 5987 int intr_type; 5988 p_nxge_intr_t intrp; 5989 5990 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5991 5992 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5993 intr_type = intrp->intr_type; 5994 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5995 intr_type)); 5996 5997 switch (intr_type) { 5998 case DDI_INTR_TYPE_MSI: /* 0x2 */ 5999 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 6000 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 6001 6002 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 6003 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 6004 6005 default: 6006 return (NXGE_ERROR); 6007 } 6008 } 6009 6010 6011 /*ARGSUSED*/ 6012 static nxge_status_t 6013 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 6014 { 6015 dev_info_t *dip = nxgep->dip; 6016 p_nxge_ldg_t ldgp; 6017 p_nxge_intr_t intrp; 6018 uint_t *inthandler; 6019 void *arg1, *arg2; 6020 int behavior; 6021 int nintrs, navail, nrequest; 6022 int nactual, nrequired; 6023 int inum = 0; 6024 int x, y; 6025 int ddi_status = DDI_SUCCESS; 6026 nxge_status_t status = NXGE_OK; 6027 6028 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 6029 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6030 intrp->start_inum = 0; 6031 6032 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6033 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6034 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6035 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6036 "nintrs: %d", ddi_status, nintrs)); 6037 return (NXGE_ERROR | NXGE_DDI_FAILED); 6038 } 6039 6040 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6041 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6042 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6043 "ddi_intr_get_navail() failed, status: 0x%x%, " 6044 "nintrs: %d", ddi_status, navail)); 6045 return (NXGE_ERROR | NXGE_DDI_FAILED); 6046 } 6047 6048 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6049 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 6050 nintrs, navail)); 6051 6052 /* PSARC/2007/453 MSI-X interrupt limit override */ 6053 if (int_type == DDI_INTR_TYPE_MSIX) { 6054 nrequest = nxge_create_msi_property(nxgep); 6055 if (nrequest < navail) { 6056 navail = nrequest; 6057 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6058 "nxge_add_intrs_adv_type: nintrs %d " 6059 "navail %d (nrequest %d)", 6060 nintrs, navail, nrequest)); 6061 } 6062 } 6063 6064 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 6065 /* MSI must be power of 2 */ 6066 if ((navail & 16) == 16) { 6067 navail = 16; 6068 } else if ((navail & 8) == 8) { 6069 navail = 8; 6070 } else if ((navail & 4) == 4) { 6071 navail = 4; 6072 } else if ((navail & 2) == 2) { 6073 navail = 2; 6074 } else { 6075 navail = 1; 6076 } 6077 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6078 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 6079 "navail %d", nintrs, navail)); 6080 } 6081 6082 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6083 DDI_INTR_ALLOC_NORMAL); 6084 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6085 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6086 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6087 navail, &nactual, behavior); 6088 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6089 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6090 " ddi_intr_alloc() failed: %d", 6091 ddi_status)); 6092 kmem_free(intrp->htable, intrp->intr_size); 6093 return (NXGE_ERROR | NXGE_DDI_FAILED); 6094 } 6095 6096 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6097 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6098 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6099 " ddi_intr_get_pri() failed: %d", 6100 ddi_status)); 6101 /* Free already allocated interrupts */ 6102 for (y = 0; y < nactual; y++) { 6103 (void) ddi_intr_free(intrp->htable[y]); 6104 } 6105 6106 kmem_free(intrp->htable, intrp->intr_size); 6107 return (NXGE_ERROR | NXGE_DDI_FAILED); 6108 } 6109 6110 nrequired = 0; 6111 switch (nxgep->niu_type) { 6112 default: 6113 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6114 break; 6115 6116 case N2_NIU: 6117 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6118 break; 6119 } 6120 6121 if (status != NXGE_OK) { 6122 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6123 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 6124 "failed: 0x%x", status)); 6125 /* Free already allocated interrupts */ 6126 for (y = 0; y < nactual; y++) { 6127 (void) ddi_intr_free(intrp->htable[y]); 6128 } 6129 6130 kmem_free(intrp->htable, intrp->intr_size); 6131 return (status); 6132 } 6133 6134 ldgp = nxgep->ldgvp->ldgp; 6135 for (x = 0; x < nrequired; x++, ldgp++) { 6136 ldgp->vector = (uint8_t)x; 6137 ldgp->intdata = SID_DATA(ldgp->func, x); 6138 arg1 = ldgp->ldvp; 6139 arg2 = nxgep; 6140 if (ldgp->nldvs == 1) { 6141 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6142 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6143 "nxge_add_intrs_adv_type: " 6144 "arg1 0x%x arg2 0x%x: " 6145 "1-1 int handler (entry %d intdata 0x%x)\n", 6146 arg1, arg2, 6147 x, ldgp->intdata)); 6148 } else if (ldgp->nldvs > 1) { 6149 inthandler = (uint_t *)ldgp->sys_intr_handler; 6150 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6151 "nxge_add_intrs_adv_type: " 6152 "arg1 0x%x arg2 0x%x: " 6153 "nldevs %d int handler " 6154 "(entry %d intdata 0x%x)\n", 6155 arg1, arg2, 6156 ldgp->nldvs, x, ldgp->intdata)); 6157 } 6158 6159 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6160 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 6161 "htable 0x%llx", x, intrp->htable[x])); 6162 6163 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6164 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6165 != DDI_SUCCESS) { 6166 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6167 "==> nxge_add_intrs_adv_type: failed #%d " 6168 "status 0x%x", x, ddi_status)); 6169 for (y = 0; y < intrp->intr_added; y++) { 6170 (void) ddi_intr_remove_handler( 6171 intrp->htable[y]); 6172 } 6173 /* Free already allocated intr */ 6174 for (y = 0; y < nactual; y++) { 6175 (void) ddi_intr_free(intrp->htable[y]); 6176 } 6177 kmem_free(intrp->htable, intrp->intr_size); 6178 6179 (void) nxge_ldgv_uninit(nxgep); 6180 6181 return (NXGE_ERROR | NXGE_DDI_FAILED); 6182 } 6183 intrp->intr_added++; 6184 } 6185 6186 intrp->msi_intx_cnt = nactual; 6187 6188 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6189 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 6190 navail, nactual, 6191 intrp->msi_intx_cnt, 6192 intrp->intr_added)); 6193 6194 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6195 6196 (void) nxge_intr_ldgv_init(nxgep); 6197 6198 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 6199 6200 return (status); 6201 } 6202 6203 /*ARGSUSED*/ 6204 static nxge_status_t 6205 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 6206 { 6207 dev_info_t *dip = nxgep->dip; 6208 p_nxge_ldg_t ldgp; 6209 p_nxge_intr_t intrp; 6210 uint_t *inthandler; 6211 void *arg1, *arg2; 6212 int behavior; 6213 int nintrs, navail; 6214 int nactual, nrequired; 6215 int inum = 0; 6216 int x, y; 6217 int ddi_status = DDI_SUCCESS; 6218 nxge_status_t status = NXGE_OK; 6219 6220 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 6221 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6222 intrp->start_inum = 0; 6223 6224 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6225 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6226 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6227 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6228 "nintrs: %d", status, nintrs)); 6229 return (NXGE_ERROR | NXGE_DDI_FAILED); 6230 } 6231 6232 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6233 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6234 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6235 "ddi_intr_get_navail() failed, status: 0x%x%, " 6236 "nintrs: %d", ddi_status, navail)); 6237 return (NXGE_ERROR | NXGE_DDI_FAILED); 6238 } 6239 6240 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6241 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6242 nintrs, navail)); 6243 6244 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6245 DDI_INTR_ALLOC_NORMAL); 6246 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6247 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6248 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6249 navail, &nactual, behavior); 6250 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6251 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6252 " ddi_intr_alloc() failed: %d", 6253 ddi_status)); 6254 kmem_free(intrp->htable, intrp->intr_size); 6255 return (NXGE_ERROR | NXGE_DDI_FAILED); 6256 } 6257 6258 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6259 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6260 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6261 " ddi_intr_get_pri() failed: %d", 6262 ddi_status)); 6263 /* Free already allocated interrupts */ 6264 for (y = 0; y < nactual; y++) { 6265 (void) ddi_intr_free(intrp->htable[y]); 6266 } 6267 6268 kmem_free(intrp->htable, intrp->intr_size); 6269 return (NXGE_ERROR | NXGE_DDI_FAILED); 6270 } 6271 6272 nrequired = 0; 6273 switch (nxgep->niu_type) { 6274 default: 6275 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6276 break; 6277 6278 case N2_NIU: 6279 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6280 break; 6281 } 6282 6283 if (status != NXGE_OK) { 6284 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6285 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6286 "failed: 0x%x", status)); 6287 /* Free already allocated interrupts */ 6288 for (y = 0; y < nactual; y++) { 6289 (void) ddi_intr_free(intrp->htable[y]); 6290 } 6291 6292 kmem_free(intrp->htable, intrp->intr_size); 6293 return (status); 6294 } 6295 6296 ldgp = nxgep->ldgvp->ldgp; 6297 for (x = 0; x < nrequired; x++, ldgp++) { 6298 ldgp->vector = (uint8_t)x; 6299 if (nxgep->niu_type != N2_NIU) { 6300 ldgp->intdata = SID_DATA(ldgp->func, x); 6301 } 6302 6303 arg1 = ldgp->ldvp; 6304 arg2 = nxgep; 6305 if (ldgp->nldvs == 1) { 6306 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6307 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6308 "nxge_add_intrs_adv_type_fix: " 6309 "1-1 int handler(%d) ldg %d ldv %d " 6310 "arg1 $%p arg2 $%p\n", 6311 x, ldgp->ldg, ldgp->ldvp->ldv, 6312 arg1, arg2)); 6313 } else if (ldgp->nldvs > 1) { 6314 inthandler = (uint_t *)ldgp->sys_intr_handler; 6315 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6316 "nxge_add_intrs_adv_type_fix: " 6317 "shared ldv %d int handler(%d) ldv %d ldg %d" 6318 "arg1 0x%016llx arg2 0x%016llx\n", 6319 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6320 arg1, arg2)); 6321 } 6322 6323 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6324 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6325 != DDI_SUCCESS) { 6326 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6327 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6328 "status 0x%x", x, ddi_status)); 6329 for (y = 0; y < intrp->intr_added; y++) { 6330 (void) ddi_intr_remove_handler( 6331 intrp->htable[y]); 6332 } 6333 for (y = 0; y < nactual; y++) { 6334 (void) ddi_intr_free(intrp->htable[y]); 6335 } 6336 /* Free already allocated intr */ 6337 kmem_free(intrp->htable, intrp->intr_size); 6338 6339 (void) nxge_ldgv_uninit(nxgep); 6340 6341 return (NXGE_ERROR | NXGE_DDI_FAILED); 6342 } 6343 intrp->intr_added++; 6344 } 6345 6346 intrp->msi_intx_cnt = nactual; 6347 6348 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6349 6350 status = nxge_intr_ldgv_init(nxgep); 6351 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6352 6353 return (status); 6354 } 6355 6356 static void 6357 nxge_remove_intrs(p_nxge_t nxgep) 6358 { 6359 int i, inum; 6360 p_nxge_intr_t intrp; 6361 6362 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6363 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6364 if (!intrp->intr_registered) { 6365 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6366 "<== nxge_remove_intrs: interrupts not registered")); 6367 return; 6368 } 6369 6370 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6371 6372 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6373 (void) ddi_intr_block_disable(intrp->htable, 6374 intrp->intr_added); 6375 } else { 6376 for (i = 0; i < intrp->intr_added; i++) { 6377 (void) ddi_intr_disable(intrp->htable[i]); 6378 } 6379 } 6380 6381 for (inum = 0; inum < intrp->intr_added; inum++) { 6382 if (intrp->htable[inum]) { 6383 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6384 } 6385 } 6386 6387 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6388 if (intrp->htable[inum]) { 6389 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6390 "nxge_remove_intrs: ddi_intr_free inum %d " 6391 "msi_intx_cnt %d intr_added %d", 6392 inum, 6393 intrp->msi_intx_cnt, 6394 intrp->intr_added)); 6395 6396 (void) ddi_intr_free(intrp->htable[inum]); 6397 } 6398 } 6399 6400 kmem_free(intrp->htable, intrp->intr_size); 6401 intrp->intr_registered = B_FALSE; 6402 intrp->intr_enabled = B_FALSE; 6403 intrp->msi_intx_cnt = 0; 6404 intrp->intr_added = 0; 6405 6406 (void) nxge_ldgv_uninit(nxgep); 6407 6408 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6409 "#msix-request"); 6410 6411 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6412 } 6413 6414 /*ARGSUSED*/ 6415 static void 6416 nxge_intrs_enable(p_nxge_t nxgep) 6417 { 6418 p_nxge_intr_t intrp; 6419 int i; 6420 int status; 6421 6422 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6423 6424 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6425 6426 if (!intrp->intr_registered) { 6427 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6428 "interrupts are not registered")); 6429 return; 6430 } 6431 6432 if (intrp->intr_enabled) { 6433 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6434 "<== nxge_intrs_enable: already enabled")); 6435 return; 6436 } 6437 6438 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6439 status = ddi_intr_block_enable(intrp->htable, 6440 intrp->intr_added); 6441 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6442 "block enable - status 0x%x total inums #%d\n", 6443 status, intrp->intr_added)); 6444 } else { 6445 for (i = 0; i < intrp->intr_added; i++) { 6446 status = ddi_intr_enable(intrp->htable[i]); 6447 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6448 "ddi_intr_enable:enable - status 0x%x " 6449 "total inums %d enable inum #%d\n", 6450 status, intrp->intr_added, i)); 6451 if (status == DDI_SUCCESS) { 6452 intrp->intr_enabled = B_TRUE; 6453 } 6454 } 6455 } 6456 6457 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6458 } 6459 6460 /*ARGSUSED*/ 6461 static void 6462 nxge_intrs_disable(p_nxge_t nxgep) 6463 { 6464 p_nxge_intr_t intrp; 6465 int i; 6466 6467 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6468 6469 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6470 6471 if (!intrp->intr_registered) { 6472 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6473 "interrupts are not registered")); 6474 return; 6475 } 6476 6477 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6478 (void) ddi_intr_block_disable(intrp->htable, 6479 intrp->intr_added); 6480 } else { 6481 for (i = 0; i < intrp->intr_added; i++) { 6482 (void) ddi_intr_disable(intrp->htable[i]); 6483 } 6484 } 6485 6486 intrp->intr_enabled = B_FALSE; 6487 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6488 } 6489 6490 nxge_status_t 6491 nxge_mac_register(p_nxge_t nxgep) 6492 { 6493 mac_register_t *macp; 6494 int status; 6495 6496 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6497 6498 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6499 return (NXGE_ERROR); 6500 6501 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6502 macp->m_driver = nxgep; 6503 macp->m_dip = nxgep->dip; 6504 if (!isLDOMguest(nxgep)) { 6505 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6506 } else { 6507 macp->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); 6508 macp->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); 6509 (void) memset(macp->m_src_addr, 0xff, sizeof (MAXMACADDRLEN)); 6510 } 6511 macp->m_callbacks = &nxge_m_callbacks; 6512 macp->m_min_sdu = 0; 6513 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6514 NXGE_EHEADER_VLAN_CRC; 6515 macp->m_max_sdu = nxgep->mac.default_mtu; 6516 macp->m_margin = VLAN_TAGSZ; 6517 macp->m_priv_props = nxge_priv_props; 6518 macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 6519 if (isLDOMguest(nxgep)) { 6520 macp->m_v12n = MAC_VIRT_LEVEL1 | MAC_VIRT_SERIALIZE; 6521 } else { 6522 macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1 | \ 6523 MAC_VIRT_SERIALIZE; 6524 } 6525 6526 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6527 "==> nxge_mac_register: instance %d " 6528 "max_sdu %d margin %d maxframe %d (header %d)", 6529 nxgep->instance, 6530 macp->m_max_sdu, macp->m_margin, 6531 nxgep->mac.maxframesize, 6532 NXGE_EHEADER_VLAN_CRC)); 6533 6534 status = mac_register(macp, &nxgep->mach); 6535 if (isLDOMguest(nxgep)) { 6536 KMEM_FREE(macp->m_src_addr, MAXMACADDRLEN); 6537 KMEM_FREE(macp->m_dst_addr, MAXMACADDRLEN); 6538 } 6539 mac_free(macp); 6540 6541 if (status != 0) { 6542 cmn_err(CE_WARN, 6543 "!nxge_mac_register failed (status %d instance %d)", 6544 status, nxgep->instance); 6545 return (NXGE_ERROR); 6546 } 6547 6548 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6549 "(instance %d)", nxgep->instance)); 6550 6551 return (NXGE_OK); 6552 } 6553 6554 void 6555 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6556 { 6557 ssize_t size; 6558 mblk_t *nmp; 6559 uint8_t blk_id; 6560 uint8_t chan; 6561 uint32_t err_id; 6562 err_inject_t *eip; 6563 6564 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6565 6566 size = 1024; 6567 nmp = mp->b_cont; 6568 eip = (err_inject_t *)nmp->b_rptr; 6569 blk_id = eip->blk_id; 6570 err_id = eip->err_id; 6571 chan = eip->chan; 6572 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6573 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6574 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6575 switch (blk_id) { 6576 case MAC_BLK_ID: 6577 break; 6578 case TXMAC_BLK_ID: 6579 break; 6580 case RXMAC_BLK_ID: 6581 break; 6582 case MIF_BLK_ID: 6583 break; 6584 case IPP_BLK_ID: 6585 nxge_ipp_inject_err(nxgep, err_id); 6586 break; 6587 case TXC_BLK_ID: 6588 nxge_txc_inject_err(nxgep, err_id); 6589 break; 6590 case TXDMA_BLK_ID: 6591 nxge_txdma_inject_err(nxgep, err_id, chan); 6592 break; 6593 case RXDMA_BLK_ID: 6594 nxge_rxdma_inject_err(nxgep, err_id, chan); 6595 break; 6596 case ZCP_BLK_ID: 6597 nxge_zcp_inject_err(nxgep, err_id); 6598 break; 6599 case ESPC_BLK_ID: 6600 break; 6601 case FFLP_BLK_ID: 6602 break; 6603 case PHY_BLK_ID: 6604 break; 6605 case ETHER_SERDES_BLK_ID: 6606 break; 6607 case PCIE_SERDES_BLK_ID: 6608 break; 6609 case VIR_BLK_ID: 6610 break; 6611 } 6612 6613 nmp->b_wptr = nmp->b_rptr + size; 6614 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6615 6616 miocack(wq, mp, (int)size, 0); 6617 } 6618 6619 static int 6620 nxge_init_common_dev(p_nxge_t nxgep) 6621 { 6622 p_nxge_hw_list_t hw_p; 6623 dev_info_t *p_dip; 6624 6625 ASSERT(nxgep != NULL); 6626 6627 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6628 6629 p_dip = nxgep->p_dip; 6630 MUTEX_ENTER(&nxge_common_lock); 6631 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6632 "==> nxge_init_common_dev:func # %d", 6633 nxgep->function_num)); 6634 /* 6635 * Loop through existing per neptune hardware list. 6636 */ 6637 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6638 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6639 "==> nxge_init_common_device:func # %d " 6640 "hw_p $%p parent dip $%p", 6641 nxgep->function_num, 6642 hw_p, 6643 p_dip)); 6644 if (hw_p->parent_devp == p_dip) { 6645 nxgep->nxge_hw_p = hw_p; 6646 hw_p->ndevs++; 6647 hw_p->nxge_p[nxgep->function_num] = nxgep; 6648 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6649 "==> nxge_init_common_device:func # %d " 6650 "hw_p $%p parent dip $%p " 6651 "ndevs %d (found)", 6652 nxgep->function_num, 6653 hw_p, 6654 p_dip, 6655 hw_p->ndevs)); 6656 break; 6657 } 6658 } 6659 6660 if (hw_p == NULL) { 6661 6662 char **prop_val; 6663 uint_t prop_len; 6664 int i; 6665 6666 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6667 "==> nxge_init_common_device:func # %d " 6668 "parent dip $%p (new)", 6669 nxgep->function_num, 6670 p_dip)); 6671 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6672 hw_p->parent_devp = p_dip; 6673 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6674 nxgep->nxge_hw_p = hw_p; 6675 hw_p->ndevs++; 6676 hw_p->nxge_p[nxgep->function_num] = nxgep; 6677 hw_p->next = nxge_hw_list; 6678 if (nxgep->niu_type == N2_NIU) { 6679 hw_p->niu_type = N2_NIU; 6680 hw_p->platform_type = P_NEPTUNE_NIU; 6681 hw_p->tcam_size = TCAM_NIU_TCAM_MAX_ENTRY; 6682 } else { 6683 hw_p->niu_type = NIU_TYPE_NONE; 6684 hw_p->platform_type = P_NEPTUNE_NONE; 6685 hw_p->tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY; 6686 } 6687 6688 hw_p->tcam = KMEM_ZALLOC(sizeof (tcam_flow_spec_t) * 6689 hw_p->tcam_size, KM_SLEEP); 6690 6691 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6692 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6693 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6694 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6695 6696 nxge_hw_list = hw_p; 6697 6698 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0, 6699 "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) { 6700 for (i = 0; i < prop_len; i++) { 6701 if ((strcmp((caddr_t)prop_val[i], 6702 NXGE_ROCK_COMPATIBLE) == 0)) { 6703 hw_p->platform_type = P_NEPTUNE_ROCK; 6704 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6705 "ROCK hw_p->platform_type %d", 6706 hw_p->platform_type)); 6707 break; 6708 } 6709 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6710 "nxge_init_common_dev: read compatible" 6711 " property[%d] val[%s]", 6712 i, (caddr_t)prop_val[i])); 6713 } 6714 } 6715 6716 ddi_prop_free(prop_val); 6717 6718 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6719 } 6720 6721 MUTEX_EXIT(&nxge_common_lock); 6722 6723 nxgep->platform_type = hw_p->platform_type; 6724 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxgep->platform_type %d", 6725 nxgep->platform_type)); 6726 if (nxgep->niu_type != N2_NIU) { 6727 nxgep->niu_type = hw_p->niu_type; 6728 } 6729 6730 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6731 "==> nxge_init_common_device (nxge_hw_list) $%p", 6732 nxge_hw_list)); 6733 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6734 6735 return (NXGE_OK); 6736 } 6737 6738 static void 6739 nxge_uninit_common_dev(p_nxge_t nxgep) 6740 { 6741 p_nxge_hw_list_t hw_p, h_hw_p; 6742 p_nxge_dma_pt_cfg_t p_dma_cfgp; 6743 p_nxge_hw_pt_cfg_t p_cfgp; 6744 dev_info_t *p_dip; 6745 6746 ASSERT(nxgep != NULL); 6747 6748 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6749 if (nxgep->nxge_hw_p == NULL) { 6750 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6751 "<== nxge_uninit_common_device (no common)")); 6752 return; 6753 } 6754 6755 MUTEX_ENTER(&nxge_common_lock); 6756 h_hw_p = nxge_hw_list; 6757 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6758 p_dip = hw_p->parent_devp; 6759 if (nxgep->nxge_hw_p == hw_p && 6760 p_dip == nxgep->p_dip && 6761 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6762 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6763 6764 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6765 "==> nxge_uninit_common_device:func # %d " 6766 "hw_p $%p parent dip $%p " 6767 "ndevs %d (found)", 6768 nxgep->function_num, 6769 hw_p, 6770 p_dip, 6771 hw_p->ndevs)); 6772 6773 /* 6774 * Release the RDC table, a shared resoruce 6775 * of the nxge hardware. The RDC table was 6776 * assigned to this instance of nxge in 6777 * nxge_use_cfg_dma_config(). 6778 */ 6779 if (!isLDOMguest(nxgep)) { 6780 p_dma_cfgp = 6781 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 6782 p_cfgp = 6783 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 6784 (void) nxge_fzc_rdc_tbl_unbind(nxgep, 6785 p_cfgp->def_mac_rxdma_grpid); 6786 6787 /* Cleanup any outstanding groups. */ 6788 nxge_grp_cleanup(nxgep); 6789 } 6790 6791 if (hw_p->ndevs) { 6792 hw_p->ndevs--; 6793 } 6794 hw_p->nxge_p[nxgep->function_num] = NULL; 6795 if (!hw_p->ndevs) { 6796 KMEM_FREE(hw_p->tcam, 6797 sizeof (tcam_flow_spec_t) * 6798 hw_p->tcam_size); 6799 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6800 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6801 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6802 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6803 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6804 "==> nxge_uninit_common_device: " 6805 "func # %d " 6806 "hw_p $%p parent dip $%p " 6807 "ndevs %d (last)", 6808 nxgep->function_num, 6809 hw_p, 6810 p_dip, 6811 hw_p->ndevs)); 6812 6813 nxge_hio_uninit(nxgep); 6814 6815 if (hw_p == nxge_hw_list) { 6816 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6817 "==> nxge_uninit_common_device:" 6818 "remove head func # %d " 6819 "hw_p $%p parent dip $%p " 6820 "ndevs %d (head)", 6821 nxgep->function_num, 6822 hw_p, 6823 p_dip, 6824 hw_p->ndevs)); 6825 nxge_hw_list = hw_p->next; 6826 } else { 6827 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6828 "==> nxge_uninit_common_device:" 6829 "remove middle func # %d " 6830 "hw_p $%p parent dip $%p " 6831 "ndevs %d (middle)", 6832 nxgep->function_num, 6833 hw_p, 6834 p_dip, 6835 hw_p->ndevs)); 6836 h_hw_p->next = hw_p->next; 6837 } 6838 6839 nxgep->nxge_hw_p = NULL; 6840 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6841 } 6842 break; 6843 } else { 6844 h_hw_p = hw_p; 6845 } 6846 } 6847 6848 MUTEX_EXIT(&nxge_common_lock); 6849 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6850 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6851 nxge_hw_list)); 6852 6853 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6854 } 6855 6856 /* 6857 * Determines the number of ports from the niu_type or the platform type. 6858 * Returns the number of ports, or returns zero on failure. 6859 */ 6860 6861 int 6862 nxge_get_nports(p_nxge_t nxgep) 6863 { 6864 int nports = 0; 6865 6866 switch (nxgep->niu_type) { 6867 case N2_NIU: 6868 case NEPTUNE_2_10GF: 6869 nports = 2; 6870 break; 6871 case NEPTUNE_4_1GC: 6872 case NEPTUNE_2_10GF_2_1GC: 6873 case NEPTUNE_1_10GF_3_1GC: 6874 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6875 case NEPTUNE_2_10GF_2_1GRF: 6876 nports = 4; 6877 break; 6878 default: 6879 switch (nxgep->platform_type) { 6880 case P_NEPTUNE_NIU: 6881 case P_NEPTUNE_ATLAS_2PORT: 6882 nports = 2; 6883 break; 6884 case P_NEPTUNE_ATLAS_4PORT: 6885 case P_NEPTUNE_MARAMBA_P0: 6886 case P_NEPTUNE_MARAMBA_P1: 6887 case P_NEPTUNE_ROCK: 6888 case P_NEPTUNE_ALONSO: 6889 nports = 4; 6890 break; 6891 default: 6892 break; 6893 } 6894 break; 6895 } 6896 6897 return (nports); 6898 } 6899 6900 /* 6901 * The following two functions are to support 6902 * PSARC/2007/453 MSI-X interrupt limit override. 6903 */ 6904 static int 6905 nxge_create_msi_property(p_nxge_t nxgep) 6906 { 6907 int nmsi; 6908 extern int ncpus; 6909 6910 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6911 6912 switch (nxgep->mac.portmode) { 6913 case PORT_10G_COPPER: 6914 case PORT_10G_FIBER: 6915 case PORT_10G_TN1010: 6916 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6917 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6918 /* 6919 * The maximum MSI-X requested will be 8. 6920 * If the # of CPUs is less than 8, we will request 6921 * # MSI-X based on the # of CPUs (default). 6922 */ 6923 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6924 "==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d", 6925 nxge_msix_10g_intrs)); 6926 if ((nxge_msix_10g_intrs == 0) || 6927 (nxge_msix_10g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6928 nmsi = NXGE_MSIX_REQUEST_10G; 6929 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6930 "==>nxge_create_msi_property (10G): reset to 8")); 6931 } else { 6932 nmsi = nxge_msix_10g_intrs; 6933 } 6934 6935 /* 6936 * If # of interrupts requested is 8 (default), 6937 * the checking of the number of cpus will be 6938 * be maintained. 6939 */ 6940 if ((nmsi == NXGE_MSIX_REQUEST_10G) && 6941 (ncpus < nmsi)) { 6942 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6943 "==>nxge_create_msi_property (10G): reset to 8")); 6944 nmsi = ncpus; 6945 } 6946 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6947 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6948 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6949 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6950 break; 6951 6952 default: 6953 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6954 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6955 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6956 "==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d", 6957 nxge_msix_1g_intrs)); 6958 if ((nxge_msix_1g_intrs == 0) || 6959 (nxge_msix_1g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6960 nmsi = NXGE_MSIX_REQUEST_1G; 6961 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6962 "==>nxge_create_msi_property (1G): reset to 2")); 6963 } else { 6964 nmsi = nxge_msix_1g_intrs; 6965 } 6966 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6967 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6968 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6969 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6970 break; 6971 } 6972 6973 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6974 return (nmsi); 6975 } 6976 6977 /* ARGSUSED */ 6978 static int 6979 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6980 void *pr_val) 6981 { 6982 int err = 0; 6983 link_flowctrl_t fl; 6984 6985 switch (pr_num) { 6986 case MAC_PROP_AUTONEG: 6987 *(uint8_t *)pr_val = 1; 6988 break; 6989 case MAC_PROP_FLOWCTRL: 6990 if (pr_valsize < sizeof (link_flowctrl_t)) 6991 return (EINVAL); 6992 fl = LINK_FLOWCTRL_RX; 6993 bcopy(&fl, pr_val, sizeof (fl)); 6994 break; 6995 case MAC_PROP_ADV_1000FDX_CAP: 6996 case MAC_PROP_EN_1000FDX_CAP: 6997 *(uint8_t *)pr_val = 1; 6998 break; 6999 case MAC_PROP_ADV_100FDX_CAP: 7000 case MAC_PROP_EN_100FDX_CAP: 7001 *(uint8_t *)pr_val = 1; 7002 break; 7003 default: 7004 err = ENOTSUP; 7005 break; 7006 } 7007 return (err); 7008 } 7009 7010 7011 /* 7012 * The following is a software around for the Neptune hardware's 7013 * interrupt bugs; The Neptune hardware may generate spurious interrupts when 7014 * an interrupr handler is removed. 7015 */ 7016 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98 7017 #define NXGE_PIM_RESET (1ULL << 29) 7018 #define NXGE_GLU_RESET (1ULL << 30) 7019 #define NXGE_NIU_RESET (1ULL << 31) 7020 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \ 7021 NXGE_GLU_RESET | \ 7022 NXGE_NIU_RESET) 7023 7024 #define NXGE_WAIT_QUITE_TIME 200000 7025 #define NXGE_WAIT_QUITE_RETRY 40 7026 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */ 7027 7028 static void 7029 nxge_niu_peu_reset(p_nxge_t nxgep) 7030 { 7031 uint32_t rvalue; 7032 p_nxge_hw_list_t hw_p; 7033 p_nxge_t fnxgep; 7034 int i, j; 7035 7036 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset")); 7037 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 7038 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 7039 "==> nxge_niu_peu_reset: NULL hardware pointer")); 7040 return; 7041 } 7042 7043 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7044 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d", 7045 hw_p->flags, nxgep->nxge_link_poll_timerid, 7046 nxgep->nxge_timerid)); 7047 7048 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 7049 /* 7050 * Make sure other instances from the same hardware 7051 * stop sending PIO and in quiescent state. 7052 */ 7053 for (i = 0; i < NXGE_MAX_PORTS; i++) { 7054 fnxgep = hw_p->nxge_p[i]; 7055 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7056 "==> nxge_niu_peu_reset: checking entry %d " 7057 "nxgep $%p", i, fnxgep)); 7058 #ifdef NXGE_DEBUG 7059 if (fnxgep) { 7060 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7061 "==> nxge_niu_peu_reset: entry %d (function %d) " 7062 "link timer id %d hw timer id %d", 7063 i, fnxgep->function_num, 7064 fnxgep->nxge_link_poll_timerid, 7065 fnxgep->nxge_timerid)); 7066 } 7067 #endif 7068 if (fnxgep && fnxgep != nxgep && 7069 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) { 7070 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7071 "==> nxge_niu_peu_reset: checking $%p " 7072 "(function %d) timer ids", 7073 fnxgep, fnxgep->function_num)); 7074 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) { 7075 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7076 "==> nxge_niu_peu_reset: waiting")); 7077 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 7078 if (!fnxgep->nxge_timerid && 7079 !fnxgep->nxge_link_poll_timerid) { 7080 break; 7081 } 7082 } 7083 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 7084 if (fnxgep->nxge_timerid || 7085 fnxgep->nxge_link_poll_timerid) { 7086 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 7087 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 7088 "<== nxge_niu_peu_reset: cannot reset " 7089 "hardware (devices are still in use)")); 7090 return; 7091 } 7092 } 7093 } 7094 7095 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) { 7096 hw_p->flags |= COMMON_RESET_NIU_PCI; 7097 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh, 7098 NXGE_PCI_PORT_LOGIC_OFFSET); 7099 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7100 "nxge_niu_peu_reset: read offset 0x%x (%d) " 7101 "(data 0x%x)", 7102 NXGE_PCI_PORT_LOGIC_OFFSET, 7103 NXGE_PCI_PORT_LOGIC_OFFSET, 7104 rvalue)); 7105 7106 rvalue |= NXGE_PCI_RESET_ALL; 7107 pci_config_put32(nxgep->dev_regs->nxge_pciregh, 7108 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue); 7109 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7110 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x", 7111 rvalue)); 7112 7113 NXGE_DELAY(NXGE_PCI_RESET_WAIT); 7114 } 7115 7116 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 7117 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset")); 7118 } 7119 7120 static void 7121 nxge_set_pci_replay_timeout(p_nxge_t nxgep) 7122 { 7123 p_dev_regs_t dev_regs; 7124 uint32_t value; 7125 7126 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout")); 7127 7128 if (!nxge_set_replay_timer) { 7129 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7130 "==> nxge_set_pci_replay_timeout: will not change " 7131 "the timeout")); 7132 return; 7133 } 7134 7135 dev_regs = nxgep->dev_regs; 7136 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7137 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p", 7138 dev_regs, dev_regs->nxge_pciregh)); 7139 7140 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) { 7141 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7142 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or " 7143 "no PCI handle", 7144 dev_regs)); 7145 return; 7146 } 7147 value = (pci_config_get32(dev_regs->nxge_pciregh, 7148 PCI_REPLAY_TIMEOUT_CFG_OFFSET) | 7149 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT)); 7150 7151 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7152 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x " 7153 "(timeout value to set 0x%x at offset 0x%x) value 0x%x", 7154 pci_config_get32(dev_regs->nxge_pciregh, 7155 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout, 7156 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value)); 7157 7158 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET, 7159 value); 7160 7161 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7162 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x", 7163 pci_config_get32(dev_regs->nxge_pciregh, 7164 PCI_REPLAY_TIMEOUT_CFG_OFFSET))); 7165 7166 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout")); 7167 } 7168 7169 /* 7170 * quiesce(9E) entry point. 7171 * 7172 * This function is called when the system is single-threaded at high 7173 * PIL with preemption disabled. Therefore, this function must not be 7174 * blocked. 7175 * 7176 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 7177 * DDI_FAILURE indicates an error condition and should almost never happen. 7178 */ 7179 static int 7180 nxge_quiesce(dev_info_t *dip) 7181 { 7182 int instance = ddi_get_instance(dip); 7183 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 7184 7185 if (nxgep == NULL) 7186 return (DDI_FAILURE); 7187 7188 /* Turn off debugging */ 7189 nxge_debug_level = NO_DEBUG; 7190 nxgep->nxge_debug_level = NO_DEBUG; 7191 npi_debug_level = NO_DEBUG; 7192 7193 /* 7194 * Stop link monitor only when linkchkmod is interrupt based 7195 */ 7196 if (nxgep->mac.linkchkmode == LINKCHK_INTR) { 7197 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 7198 } 7199 7200 (void) nxge_intr_hw_disable(nxgep); 7201 7202 /* 7203 * Reset the receive MAC side. 7204 */ 7205 (void) nxge_rx_mac_disable(nxgep); 7206 7207 /* Disable and soft reset the IPP */ 7208 if (!isLDOMguest(nxgep)) 7209 (void) nxge_ipp_disable(nxgep); 7210 7211 /* 7212 * Reset the transmit/receive DMA side. 7213 */ 7214 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 7215 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 7216 7217 /* 7218 * Reset the transmit MAC side. 7219 */ 7220 (void) nxge_tx_mac_disable(nxgep); 7221 7222 return (DDI_SUCCESS); 7223 } 7224