1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 29 */ 30 #include <sys/nxge/nxge_impl.h> 31 #include <sys/nxge/nxge_hio.h> 32 #include <sys/nxge/nxge_rxdma.h> 33 #include <sys/pcie.h> 34 35 uint32_t nxge_use_partition = 0; /* debug partition flag */ 36 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 37 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 38 /* 39 * PSARC/2007/453 MSI-X interrupt limit override 40 */ 41 uint32_t nxge_msi_enable = 2; 42 43 /* 44 * Software workaround for a Neptune (PCI-E) 45 * hardware interrupt bug which the hardware 46 * may generate spurious interrupts after the 47 * device interrupt handler was removed. If this flag 48 * is enabled, the driver will reset the 49 * hardware when devices are being detached. 50 */ 51 uint32_t nxge_peu_reset_enable = 0; 52 53 /* 54 * Software workaround for the hardware 55 * checksum bugs that affect packet transmission 56 * and receive: 57 * 58 * Usage of nxge_cksum_offload: 59 * 60 * (1) nxge_cksum_offload = 0 (default): 61 * - transmits packets: 62 * TCP: uses the hardware checksum feature. 63 * UDP: driver will compute the software checksum 64 * based on the partial checksum computed 65 * by the IP layer. 66 * - receives packets 67 * TCP: marks packets checksum flags based on hardware result. 68 * UDP: will not mark checksum flags. 69 * 70 * (2) nxge_cksum_offload = 1: 71 * - transmit packets: 72 * TCP/UDP: uses the hardware checksum feature. 73 * - receives packets 74 * TCP/UDP: marks packet checksum flags based on hardware result. 75 * 76 * (3) nxge_cksum_offload = 2: 77 * - The driver will not register its checksum capability. 78 * Checksum for both TCP and UDP will be computed 79 * by the stack. 80 * - The software LSO is not allowed in this case. 81 * 82 * (4) nxge_cksum_offload > 2: 83 * - Will be treated as it is set to 2 84 * (stack will compute the checksum). 85 * 86 * (5) If the hardware bug is fixed, this workaround 87 * needs to be updated accordingly to reflect 88 * the new hardware revision. 89 */ 90 uint32_t nxge_cksum_offload = 0; 91 92 /* 93 * Globals: tunable parameters (/etc/system or adb) 94 * 95 */ 96 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 97 uint32_t nxge_rbr_spare_size = 0; 98 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 99 uint16_t nxge_rdc_buf_offset = SW_OFFSET_NO_OFFSET; 100 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 101 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 102 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 103 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 104 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 105 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 106 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 107 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 108 109 /* MAX LSO size */ 110 #define NXGE_LSO_MAXLEN 65535 111 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 112 113 114 /* 115 * Add tunable to reduce the amount of time spent in the 116 * ISR doing Rx Processing. 117 */ 118 uint32_t nxge_max_rx_pkts = 1024; 119 120 /* 121 * Tunables to manage the receive buffer blocks. 122 * 123 * nxge_rx_threshold_hi: copy all buffers. 124 * nxge_rx_bcopy_size_type: receive buffer block size type. 125 * nxge_rx_threshold_lo: copy only up to tunable block size type. 126 */ 127 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 128 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 129 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 130 131 /* Use kmem_alloc() to allocate data buffers. */ 132 #if defined(__sparc) 133 uint32_t nxge_use_kmem_alloc = 1; 134 #elif defined(__i386) 135 uint32_t nxge_use_kmem_alloc = 0; 136 #else 137 uint32_t nxge_use_kmem_alloc = 1; 138 #endif 139 140 rtrace_t npi_rtracebuf; 141 142 /* 143 * The hardware sometimes fails to allow enough time for the link partner 144 * to send an acknowledgement for packets that the hardware sent to it. The 145 * hardware resends the packets earlier than it should be in those instances. 146 * This behavior caused some switches to acknowledge the wrong packets 147 * and it triggered the fatal error. 148 * This software workaround is to set the replay timer to a value 149 * suggested by the hardware team. 150 * 151 * PCI config space replay timer register: 152 * The following replay timeout value is 0xc 153 * for bit 14:18. 154 */ 155 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8 156 #define PCI_REPLAY_TIMEOUT_SHIFT 14 157 158 uint32_t nxge_set_replay_timer = 1; 159 uint32_t nxge_replay_timeout = 0xc; 160 161 /* 162 * The transmit serialization sometimes causes 163 * longer sleep before calling the driver transmit 164 * function as it sleeps longer than it should. 165 * The performace group suggests that a time wait tunable 166 * can be used to set the maximum wait time when needed 167 * and the default is set to 1 tick. 168 */ 169 uint32_t nxge_tx_serial_maxsleep = 1; 170 171 #if defined(sun4v) 172 /* 173 * Hypervisor N2/NIU services information. 174 */ 175 /* 176 * The following is the default API supported: 177 * major 1 and minor 1. 178 * 179 * Please update the MAX_NIU_MAJORS, 180 * MAX_NIU_MINORS, and minor number supported 181 * when the newer Hypervior API interfaces 182 * are added. Also, please update nxge_hsvc_register() 183 * if needed. 184 */ 185 static hsvc_info_t niu_hsvc = { 186 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 187 NIU_MINOR_VER, "nxge" 188 }; 189 190 static int nxge_hsvc_register(p_nxge_t); 191 #endif 192 193 /* 194 * Function Prototypes 195 */ 196 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 197 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 198 static void nxge_unattach(p_nxge_t); 199 static int nxge_quiesce(dev_info_t *); 200 201 #if NXGE_PROPERTY 202 static void nxge_remove_hard_properties(p_nxge_t); 203 #endif 204 205 /* 206 * These two functions are required by nxge_hio.c 207 */ 208 extern int nxge_m_mmac_remove(void *arg, int slot); 209 extern void nxge_grp_cleanup(p_nxge_t nxge); 210 211 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 212 213 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 214 static void nxge_destroy_mutexes(p_nxge_t); 215 216 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 217 static void nxge_unmap_regs(p_nxge_t nxgep); 218 #ifdef NXGE_DEBUG 219 static void nxge_test_map_regs(p_nxge_t nxgep); 220 #endif 221 222 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 223 static void nxge_remove_intrs(p_nxge_t nxgep); 224 225 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 226 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 227 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 228 static void nxge_intrs_enable(p_nxge_t nxgep); 229 static void nxge_intrs_disable(p_nxge_t nxgep); 230 231 static void nxge_suspend(p_nxge_t); 232 static nxge_status_t nxge_resume(p_nxge_t); 233 234 static nxge_status_t nxge_setup_dev(p_nxge_t); 235 static void nxge_destroy_dev(p_nxge_t); 236 237 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 238 static void nxge_free_mem_pool(p_nxge_t); 239 240 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 241 static void nxge_free_rx_mem_pool(p_nxge_t); 242 243 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 244 static void nxge_free_tx_mem_pool(p_nxge_t); 245 246 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 247 struct ddi_dma_attr *, 248 size_t, ddi_device_acc_attr_t *, uint_t, 249 p_nxge_dma_common_t); 250 251 static void nxge_dma_mem_free(p_nxge_dma_common_t); 252 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 253 254 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 255 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 256 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 257 258 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 259 p_nxge_dma_common_t *, size_t); 260 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 261 262 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 263 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 264 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 265 266 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 267 p_nxge_dma_common_t *, 268 size_t); 269 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 270 271 static int nxge_init_common_dev(p_nxge_t); 272 static void nxge_uninit_common_dev(p_nxge_t); 273 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 274 char *, caddr_t); 275 #if defined(sun4v) 276 extern nxge_status_t nxge_hio_rdc_enable(p_nxge_t nxgep); 277 extern nxge_status_t nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm); 278 #endif 279 280 /* 281 * The next declarations are for the GLDv3 interface. 282 */ 283 static int nxge_m_start(void *); 284 static void nxge_m_stop(void *); 285 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 286 static int nxge_m_promisc(void *, boolean_t); 287 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 288 nxge_status_t nxge_mac_register(p_nxge_t); 289 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 290 int slot, int rdctbl, boolean_t usetbl); 291 void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, 292 boolean_t factory); 293 294 static void nxge_m_getfactaddr(void *, uint_t, uint8_t *); 295 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 296 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 297 uint_t, const void *); 298 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 299 uint_t, uint_t, void *, uint_t *); 300 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 301 const void *); 302 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 303 void *, uint_t *); 304 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 305 static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int, 306 mac_ring_info_t *, mac_ring_handle_t); 307 static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t, 308 mac_ring_type_t); 309 static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t, 310 mac_ring_type_t); 311 312 static void nxge_niu_peu_reset(p_nxge_t nxgep); 313 static void nxge_set_pci_replay_timeout(nxge_t *); 314 315 mac_priv_prop_t nxge_priv_props[] = { 316 {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 317 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 318 {"_function_number", MAC_PROP_PERM_READ}, 319 {"_fw_version", MAC_PROP_PERM_READ}, 320 {"_port_mode", MAC_PROP_PERM_READ}, 321 {"_hot_swap_phy", MAC_PROP_PERM_READ}, 322 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 323 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 324 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 325 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 326 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 327 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 328 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 329 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 330 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 331 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 332 {"_soft_lso_enable", MAC_PROP_PERM_RW} 333 }; 334 335 #define NXGE_MAX_PRIV_PROPS \ 336 (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 337 338 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 339 #define MAX_DUMP_SZ 256 340 341 #define NXGE_M_CALLBACK_FLAGS \ 342 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 343 344 mac_callbacks_t nxge_m_callbacks = { 345 NXGE_M_CALLBACK_FLAGS, 346 nxge_m_stat, 347 nxge_m_start, 348 nxge_m_stop, 349 nxge_m_promisc, 350 nxge_m_multicst, 351 NULL, 352 NULL, 353 nxge_m_ioctl, 354 nxge_m_getcapab, 355 NULL, 356 NULL, 357 nxge_m_setprop, 358 nxge_m_getprop 359 }; 360 361 void 362 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 363 364 /* PSARC/2007/453 MSI-X interrupt limit override. */ 365 #define NXGE_MSIX_REQUEST_10G 8 366 #define NXGE_MSIX_REQUEST_1G 2 367 static int nxge_create_msi_property(p_nxge_t); 368 /* 369 * For applications that care about the 370 * latency, it was requested by PAE and the 371 * customers that the driver has tunables that 372 * allow the user to tune it to a higher number 373 * interrupts to spread the interrupts among 374 * multiple channels. The DDI framework limits 375 * the maximum number of MSI-X resources to allocate 376 * to 8 (ddi_msix_alloc_limit). If more than 8 377 * is set, ddi_msix_alloc_limit must be set accordingly. 378 * The default number of MSI interrupts are set to 379 * 8 for 10G and 2 for 1G link. 380 */ 381 #define NXGE_MSIX_MAX_ALLOWED 32 382 uint32_t nxge_msix_10g_intrs = NXGE_MSIX_REQUEST_10G; 383 uint32_t nxge_msix_1g_intrs = NXGE_MSIX_REQUEST_1G; 384 385 /* 386 * These global variables control the message 387 * output. 388 */ 389 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 390 uint64_t nxge_debug_level; 391 392 /* 393 * This list contains the instance structures for the Neptune 394 * devices present in the system. The lock exists to guarantee 395 * mutually exclusive access to the list. 396 */ 397 void *nxge_list = NULL; 398 void *nxge_hw_list = NULL; 399 nxge_os_mutex_t nxge_common_lock; 400 nxge_os_mutex_t nxgedebuglock; 401 402 extern uint64_t npi_debug_level; 403 404 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 405 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 406 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 407 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 408 extern void nxge_fm_init(p_nxge_t, 409 ddi_device_acc_attr_t *, 410 ddi_dma_attr_t *); 411 extern void nxge_fm_fini(p_nxge_t); 412 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 413 414 /* 415 * Count used to maintain the number of buffers being used 416 * by Neptune instances and loaned up to the upper layers. 417 */ 418 uint32_t nxge_mblks_pending = 0; 419 420 /* 421 * Device register access attributes for PIO. 422 */ 423 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 424 DDI_DEVICE_ATTR_V1, 425 DDI_STRUCTURE_LE_ACC, 426 DDI_STRICTORDER_ACC, 427 DDI_DEFAULT_ACC 428 }; 429 430 /* 431 * Device descriptor access attributes for DMA. 432 */ 433 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 434 DDI_DEVICE_ATTR_V0, 435 DDI_STRUCTURE_LE_ACC, 436 DDI_STRICTORDER_ACC 437 }; 438 439 /* 440 * Device buffer access attributes for DMA. 441 */ 442 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 443 DDI_DEVICE_ATTR_V0, 444 DDI_STRUCTURE_BE_ACC, 445 DDI_STRICTORDER_ACC 446 }; 447 448 ddi_dma_attr_t nxge_desc_dma_attr = { 449 DMA_ATTR_V0, /* version number. */ 450 0, /* low address */ 451 0xffffffffffffffff, /* high address */ 452 0xffffffffffffffff, /* address counter max */ 453 #ifndef NIU_PA_WORKAROUND 454 0x100000, /* alignment */ 455 #else 456 0x2000, 457 #endif 458 0xfc00fc, /* dlim_burstsizes */ 459 0x1, /* minimum transfer size */ 460 0xffffffffffffffff, /* maximum transfer size */ 461 0xffffffffffffffff, /* maximum segment size */ 462 1, /* scatter/gather list length */ 463 (unsigned int) 1, /* granularity */ 464 0 /* attribute flags */ 465 }; 466 467 ddi_dma_attr_t nxge_tx_dma_attr = { 468 DMA_ATTR_V0, /* version number. */ 469 0, /* low address */ 470 0xffffffffffffffff, /* high address */ 471 0xffffffffffffffff, /* address counter max */ 472 #if defined(_BIG_ENDIAN) 473 0x2000, /* alignment */ 474 #else 475 0x1000, /* alignment */ 476 #endif 477 0xfc00fc, /* dlim_burstsizes */ 478 0x1, /* minimum transfer size */ 479 0xffffffffffffffff, /* maximum transfer size */ 480 0xffffffffffffffff, /* maximum segment size */ 481 5, /* scatter/gather list length */ 482 (unsigned int) 1, /* granularity */ 483 0 /* attribute flags */ 484 }; 485 486 ddi_dma_attr_t nxge_rx_dma_attr = { 487 DMA_ATTR_V0, /* version number. */ 488 0, /* low address */ 489 0xffffffffffffffff, /* high address */ 490 0xffffffffffffffff, /* address counter max */ 491 0x2000, /* alignment */ 492 0xfc00fc, /* dlim_burstsizes */ 493 0x1, /* minimum transfer size */ 494 0xffffffffffffffff, /* maximum transfer size */ 495 0xffffffffffffffff, /* maximum segment size */ 496 1, /* scatter/gather list length */ 497 (unsigned int) 1, /* granularity */ 498 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 499 }; 500 501 ddi_dma_lim_t nxge_dma_limits = { 502 (uint_t)0, /* dlim_addr_lo */ 503 (uint_t)0xffffffff, /* dlim_addr_hi */ 504 (uint_t)0xffffffff, /* dlim_cntr_max */ 505 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 506 0x1, /* dlim_minxfer */ 507 1024 /* dlim_speed */ 508 }; 509 510 dma_method_t nxge_force_dma = DVMA; 511 512 /* 513 * dma chunk sizes. 514 * 515 * Try to allocate the largest possible size 516 * so that fewer number of dma chunks would be managed 517 */ 518 #ifdef NIU_PA_WORKAROUND 519 size_t alloc_sizes [] = {0x2000}; 520 #else 521 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 522 0x10000, 0x20000, 0x40000, 0x80000, 523 0x100000, 0x200000, 0x400000, 0x800000, 524 0x1000000, 0x2000000, 0x4000000}; 525 #endif 526 527 /* 528 * Translate "dev_t" to a pointer to the associated "dev_info_t". 529 */ 530 531 extern void nxge_get_environs(nxge_t *); 532 533 static int 534 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 535 { 536 p_nxge_t nxgep = NULL; 537 int instance; 538 int status = DDI_SUCCESS; 539 uint8_t portn; 540 nxge_mmac_t *mmac_info; 541 542 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 543 544 /* 545 * Get the device instance since we'll need to setup 546 * or retrieve a soft state for this instance. 547 */ 548 instance = ddi_get_instance(dip); 549 550 switch (cmd) { 551 case DDI_ATTACH: 552 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 553 break; 554 555 case DDI_RESUME: 556 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 557 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 558 if (nxgep == NULL) { 559 status = DDI_FAILURE; 560 break; 561 } 562 if (nxgep->dip != dip) { 563 status = DDI_FAILURE; 564 break; 565 } 566 if (nxgep->suspended == DDI_PM_SUSPEND) { 567 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 568 } else { 569 status = nxge_resume(nxgep); 570 } 571 goto nxge_attach_exit; 572 573 case DDI_PM_RESUME: 574 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 575 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 576 if (nxgep == NULL) { 577 status = DDI_FAILURE; 578 break; 579 } 580 if (nxgep->dip != dip) { 581 status = DDI_FAILURE; 582 break; 583 } 584 status = nxge_resume(nxgep); 585 goto nxge_attach_exit; 586 587 default: 588 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 589 status = DDI_FAILURE; 590 goto nxge_attach_exit; 591 } 592 593 594 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 595 status = DDI_FAILURE; 596 goto nxge_attach_exit; 597 } 598 599 nxgep = ddi_get_soft_state(nxge_list, instance); 600 if (nxgep == NULL) { 601 status = NXGE_ERROR; 602 goto nxge_attach_fail2; 603 } 604 605 nxgep->nxge_magic = NXGE_MAGIC; 606 607 nxgep->drv_state = 0; 608 nxgep->dip = dip; 609 nxgep->instance = instance; 610 nxgep->p_dip = ddi_get_parent(dip); 611 nxgep->nxge_debug_level = nxge_debug_level; 612 npi_debug_level = nxge_debug_level; 613 614 /* Are we a guest running in a Hybrid I/O environment? */ 615 nxge_get_environs(nxgep); 616 617 status = nxge_map_regs(nxgep); 618 619 if (status != NXGE_OK) { 620 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 621 goto nxge_attach_fail3; 622 } 623 624 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_rx_dma_attr); 625 626 /* Create & initialize the per-Neptune data structure */ 627 /* (even if we're a guest). */ 628 status = nxge_init_common_dev(nxgep); 629 if (status != NXGE_OK) { 630 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 631 "nxge_init_common_dev failed")); 632 goto nxge_attach_fail4; 633 } 634 635 /* 636 * Software workaround: set the replay timer. 637 */ 638 if (nxgep->niu_type != N2_NIU) { 639 nxge_set_pci_replay_timeout(nxgep); 640 } 641 642 #if defined(sun4v) 643 /* This is required by nxge_hio_init(), which follows. */ 644 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 645 goto nxge_attach_fail4; 646 #endif 647 648 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 649 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 650 "nxge_hio_init failed")); 651 goto nxge_attach_fail4; 652 } 653 654 if (nxgep->niu_type == NEPTUNE_2_10GF) { 655 if (nxgep->function_num > 1) { 656 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 657 " function %d. Only functions 0 and 1 are " 658 "supported for this card.", nxgep->function_num)); 659 status = NXGE_ERROR; 660 goto nxge_attach_fail4; 661 } 662 } 663 664 if (isLDOMguest(nxgep)) { 665 /* 666 * Use the function number here. 667 */ 668 nxgep->mac.portnum = nxgep->function_num; 669 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 670 671 /* XXX We'll set the MAC address counts to 1 for now. */ 672 mmac_info = &nxgep->nxge_mmac_info; 673 mmac_info->num_mmac = 1; 674 mmac_info->naddrfree = 1; 675 } else { 676 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 677 nxgep->mac.portnum = portn; 678 if ((portn == 0) || (portn == 1)) 679 nxgep->mac.porttype = PORT_TYPE_XMAC; 680 else 681 nxgep->mac.porttype = PORT_TYPE_BMAC; 682 /* 683 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 684 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 685 * The two types of MACs have different characterizations. 686 */ 687 mmac_info = &nxgep->nxge_mmac_info; 688 if (nxgep->function_num < 2) { 689 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 690 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 691 } else { 692 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 693 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 694 } 695 } 696 /* 697 * Setup the Ndd parameters for the this instance. 698 */ 699 nxge_init_param(nxgep); 700 701 /* 702 * Setup Register Tracing Buffer. 703 */ 704 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 705 706 /* init stats ptr */ 707 nxge_init_statsp(nxgep); 708 709 /* 710 * Copy the vpd info from eeprom to a local data 711 * structure, and then check its validity. 712 */ 713 if (!isLDOMguest(nxgep)) { 714 int *regp; 715 uint_t reglen; 716 int rv; 717 718 nxge_vpd_info_get(nxgep); 719 720 /* Find the NIU config handle. */ 721 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 722 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 723 "reg", ®p, ®len); 724 725 if (rv != DDI_PROP_SUCCESS) { 726 goto nxge_attach_fail5; 727 } 728 /* 729 * The address_hi, that is the first int, in the reg 730 * property consists of config handle, but need to remove 731 * the bits 28-31 which are OBP specific info. 732 */ 733 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 734 ddi_prop_free(regp); 735 } 736 737 /* 738 * Set the defaults for the MTU size. 739 */ 740 nxge_hw_id_init(nxgep); 741 742 if (isLDOMguest(nxgep)) { 743 uchar_t *prop_val; 744 uint_t prop_len; 745 uint32_t max_frame_size; 746 747 extern void nxge_get_logical_props(p_nxge_t); 748 749 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 750 nxgep->mac.portmode = PORT_LOGICAL; 751 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 752 "phy-type", "virtual transceiver"); 753 754 nxgep->nports = 1; 755 nxgep->board_ver = 0; /* XXX What? */ 756 757 /* 758 * local-mac-address property gives us info on which 759 * specific MAC address the Hybrid resource is associated 760 * with. 761 */ 762 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 763 "local-mac-address", &prop_val, 764 &prop_len) != DDI_PROP_SUCCESS) { 765 goto nxge_attach_fail5; 766 } 767 if (prop_len != ETHERADDRL) { 768 ddi_prop_free(prop_val); 769 goto nxge_attach_fail5; 770 } 771 ether_copy(prop_val, nxgep->hio_mac_addr); 772 ddi_prop_free(prop_val); 773 nxge_get_logical_props(nxgep); 774 775 /* 776 * Enable Jumbo property based on the "max-frame-size" 777 * property value. 778 */ 779 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY, 780 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 781 "max-frame-size", NXGE_MTU_DEFAULT_MAX); 782 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) && 783 (max_frame_size <= TX_JUMBO_MTU)) { 784 nxgep->mac.is_jumbo = B_TRUE; 785 nxgep->mac.maxframesize = (uint16_t)max_frame_size; 786 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 787 NXGE_EHEADER_VLAN_CRC; 788 } 789 } else { 790 status = nxge_xcvr_find(nxgep); 791 792 if (status != NXGE_OK) { 793 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 794 " Couldn't determine card type" 795 " .... exit ")); 796 goto nxge_attach_fail5; 797 } 798 799 status = nxge_get_config_properties(nxgep); 800 801 if (status != NXGE_OK) { 802 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 803 "get_hw create failed")); 804 goto nxge_attach_fail; 805 } 806 } 807 808 /* 809 * Setup the Kstats for the driver. 810 */ 811 nxge_setup_kstats(nxgep); 812 813 if (!isLDOMguest(nxgep)) 814 nxge_setup_param(nxgep); 815 816 status = nxge_setup_system_dma_pages(nxgep); 817 if (status != NXGE_OK) { 818 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 819 goto nxge_attach_fail; 820 } 821 822 823 if (!isLDOMguest(nxgep)) 824 nxge_hw_init_niu_common(nxgep); 825 826 status = nxge_setup_mutexes(nxgep); 827 if (status != NXGE_OK) { 828 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 829 goto nxge_attach_fail; 830 } 831 832 #if defined(sun4v) 833 if (isLDOMguest(nxgep)) { 834 /* Find our VR & channel sets. */ 835 status = nxge_hio_vr_add(nxgep); 836 if (status != DDI_SUCCESS) { 837 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 838 "nxge_hio_vr_add failed")); 839 (void) hsvc_unregister(&nxgep->niu_hsvc); 840 nxgep->niu_hsvc_available = B_FALSE; 841 goto nxge_attach_fail; 842 } 843 goto nxge_attach_exit; 844 } 845 #endif 846 847 status = nxge_setup_dev(nxgep); 848 if (status != DDI_SUCCESS) { 849 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 850 goto nxge_attach_fail; 851 } 852 853 status = nxge_add_intrs(nxgep); 854 if (status != DDI_SUCCESS) { 855 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 856 goto nxge_attach_fail; 857 } 858 859 /* If a guest, register with vio_net instead. */ 860 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 861 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 862 "unable to register to mac layer (%d)", status)); 863 goto nxge_attach_fail; 864 } 865 866 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 867 868 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 869 "registered to mac (instance %d)", instance)); 870 871 /* nxge_link_monitor calls xcvr.check_link recursively */ 872 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 873 874 goto nxge_attach_exit; 875 876 nxge_attach_fail: 877 nxge_unattach(nxgep); 878 goto nxge_attach_fail1; 879 880 nxge_attach_fail5: 881 /* 882 * Tear down the ndd parameters setup. 883 */ 884 nxge_destroy_param(nxgep); 885 886 /* 887 * Tear down the kstat setup. 888 */ 889 nxge_destroy_kstats(nxgep); 890 891 nxge_attach_fail4: 892 if (nxgep->nxge_hw_p) { 893 nxge_uninit_common_dev(nxgep); 894 nxgep->nxge_hw_p = NULL; 895 } 896 897 nxge_attach_fail3: 898 /* 899 * Unmap the register setup. 900 */ 901 nxge_unmap_regs(nxgep); 902 903 nxge_fm_fini(nxgep); 904 905 nxge_attach_fail2: 906 ddi_soft_state_free(nxge_list, nxgep->instance); 907 908 nxge_attach_fail1: 909 if (status != NXGE_OK) 910 status = (NXGE_ERROR | NXGE_DDI_FAILED); 911 nxgep = NULL; 912 913 nxge_attach_exit: 914 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 915 status)); 916 917 return (status); 918 } 919 920 static int 921 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 922 { 923 int status = DDI_SUCCESS; 924 int instance; 925 p_nxge_t nxgep = NULL; 926 927 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 928 instance = ddi_get_instance(dip); 929 nxgep = ddi_get_soft_state(nxge_list, instance); 930 if (nxgep == NULL) { 931 status = DDI_FAILURE; 932 goto nxge_detach_exit; 933 } 934 935 switch (cmd) { 936 case DDI_DETACH: 937 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 938 break; 939 940 case DDI_PM_SUSPEND: 941 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 942 nxgep->suspended = DDI_PM_SUSPEND; 943 nxge_suspend(nxgep); 944 break; 945 946 case DDI_SUSPEND: 947 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 948 if (nxgep->suspended != DDI_PM_SUSPEND) { 949 nxgep->suspended = DDI_SUSPEND; 950 nxge_suspend(nxgep); 951 } 952 break; 953 954 default: 955 status = DDI_FAILURE; 956 } 957 958 if (cmd != DDI_DETACH) 959 goto nxge_detach_exit; 960 961 /* 962 * Stop the xcvr polling. 963 */ 964 nxgep->suspended = cmd; 965 966 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 967 968 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 969 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 970 "<== nxge_detach status = 0x%08X", status)); 971 return (DDI_FAILURE); 972 } 973 974 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 975 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 976 977 nxge_unattach(nxgep); 978 nxgep = NULL; 979 980 nxge_detach_exit: 981 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 982 status)); 983 984 return (status); 985 } 986 987 static void 988 nxge_unattach(p_nxge_t nxgep) 989 { 990 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 991 992 if (nxgep == NULL || nxgep->dev_regs == NULL) { 993 return; 994 } 995 996 nxgep->nxge_magic = 0; 997 998 if (nxgep->nxge_timerid) { 999 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1000 nxgep->nxge_timerid = 0; 1001 } 1002 1003 /* 1004 * If this flag is set, it will affect the Neptune 1005 * only. 1006 */ 1007 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { 1008 nxge_niu_peu_reset(nxgep); 1009 } 1010 1011 #if defined(sun4v) 1012 if (isLDOMguest(nxgep)) { 1013 (void) nxge_hio_vr_release(nxgep); 1014 } 1015 #endif 1016 1017 if (nxgep->nxge_hw_p) { 1018 nxge_uninit_common_dev(nxgep); 1019 nxgep->nxge_hw_p = NULL; 1020 } 1021 1022 #if defined(sun4v) 1023 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 1024 (void) hsvc_unregister(&nxgep->niu_hsvc); 1025 nxgep->niu_hsvc_available = B_FALSE; 1026 } 1027 #endif 1028 /* 1029 * Stop any further interrupts. 1030 */ 1031 nxge_remove_intrs(nxgep); 1032 1033 /* 1034 * Stop the device and free resources. 1035 */ 1036 if (!isLDOMguest(nxgep)) { 1037 nxge_destroy_dev(nxgep); 1038 } 1039 1040 /* 1041 * Tear down the ndd parameters setup. 1042 */ 1043 nxge_destroy_param(nxgep); 1044 1045 /* 1046 * Tear down the kstat setup. 1047 */ 1048 nxge_destroy_kstats(nxgep); 1049 1050 /* 1051 * Destroy all mutexes. 1052 */ 1053 nxge_destroy_mutexes(nxgep); 1054 1055 /* 1056 * Remove the list of ndd parameters which 1057 * were setup during attach. 1058 */ 1059 if (nxgep->dip) { 1060 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 1061 " nxge_unattach: remove all properties")); 1062 1063 (void) ddi_prop_remove_all(nxgep->dip); 1064 } 1065 1066 #if NXGE_PROPERTY 1067 nxge_remove_hard_properties(nxgep); 1068 #endif 1069 1070 /* 1071 * Unmap the register setup. 1072 */ 1073 nxge_unmap_regs(nxgep); 1074 1075 nxge_fm_fini(nxgep); 1076 1077 ddi_soft_state_free(nxge_list, nxgep->instance); 1078 1079 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1080 } 1081 1082 #if defined(sun4v) 1083 int 1084 nxge_hsvc_register(nxge_t *nxgep) 1085 { 1086 nxge_status_t status; 1087 int i, j; 1088 1089 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hsvc_register")); 1090 if (nxgep->niu_type != N2_NIU) { 1091 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hsvc_register")); 1092 return (DDI_SUCCESS); 1093 } 1094 1095 /* 1096 * Currently, the NIU Hypervisor API supports two major versions: 1097 * version 1 and 2. 1098 * If Hypervisor introduces a higher major or minor version, 1099 * please update NIU_MAJOR_HI and NIU_MINOR_HI accordingly. 1100 */ 1101 nxgep->niu_hsvc_available = B_FALSE; 1102 bcopy(&niu_hsvc, &nxgep->niu_hsvc, 1103 sizeof (hsvc_info_t)); 1104 1105 for (i = NIU_MAJOR_HI; i > 0; i--) { 1106 nxgep->niu_hsvc.hsvc_major = i; 1107 for (j = NIU_MINOR_HI; j >= 0; j--) { 1108 nxgep->niu_hsvc.hsvc_minor = j; 1109 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1110 "nxge_hsvc_register: %s: negotiating " 1111 "hypervisor services revision %d " 1112 "group: 0x%lx major: 0x%lx " 1113 "minor: 0x%lx", 1114 nxgep->niu_hsvc.hsvc_modname, 1115 nxgep->niu_hsvc.hsvc_rev, 1116 nxgep->niu_hsvc.hsvc_group, 1117 nxgep->niu_hsvc.hsvc_major, 1118 nxgep->niu_hsvc.hsvc_minor, 1119 nxgep->niu_min_ver)); 1120 1121 if ((status = hsvc_register(&nxgep->niu_hsvc, 1122 &nxgep->niu_min_ver)) == 0) { 1123 /* Use the supported minor */ 1124 nxgep->niu_hsvc.hsvc_minor = nxgep->niu_min_ver; 1125 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1126 "nxge_hsvc_register: %s: negotiated " 1127 "hypervisor services revision %d " 1128 "group: 0x%lx major: 0x%lx " 1129 "minor: 0x%lx (niu_min_ver 0x%lx)", 1130 nxgep->niu_hsvc.hsvc_modname, 1131 nxgep->niu_hsvc.hsvc_rev, 1132 nxgep->niu_hsvc.hsvc_group, 1133 nxgep->niu_hsvc.hsvc_major, 1134 nxgep->niu_hsvc.hsvc_minor, 1135 nxgep->niu_min_ver)); 1136 1137 nxgep->niu_hsvc_available = B_TRUE; 1138 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1139 "<== nxge_hsvc_register: " 1140 "NIU Hypervisor service enabled")); 1141 return (DDI_SUCCESS); 1142 } 1143 1144 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1145 "nxge_hsvc_register: %s: negotiated failed - " 1146 "try lower major number " 1147 "hypervisor services revision %d " 1148 "group: 0x%lx major: 0x%lx minor: 0x%lx " 1149 "errno: %d", 1150 nxgep->niu_hsvc.hsvc_modname, 1151 nxgep->niu_hsvc.hsvc_rev, 1152 nxgep->niu_hsvc.hsvc_group, 1153 nxgep->niu_hsvc.hsvc_major, 1154 nxgep->niu_hsvc.hsvc_minor, status)); 1155 } 1156 } 1157 1158 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1159 "nxge_hsvc_register: %s: cannot negotiate " 1160 "hypervisor services revision %d group: 0x%lx " 1161 "major: 0x%lx minor: 0x%lx errno: %d", 1162 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1163 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1164 niu_hsvc.hsvc_minor, status)); 1165 1166 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1167 "<== nxge_hsvc_register: Register to NIU Hypervisor failed")); 1168 1169 return (DDI_FAILURE); 1170 } 1171 #endif 1172 1173 static char n2_siu_name[] = "niu"; 1174 1175 static nxge_status_t 1176 nxge_map_regs(p_nxge_t nxgep) 1177 { 1178 int ddi_status = DDI_SUCCESS; 1179 p_dev_regs_t dev_regs; 1180 char buf[MAXPATHLEN + 1]; 1181 char *devname; 1182 #ifdef NXGE_DEBUG 1183 char *sysname; 1184 #endif 1185 off_t regsize; 1186 nxge_status_t status = NXGE_OK; 1187 #if !defined(_BIG_ENDIAN) 1188 off_t pci_offset; 1189 uint16_t pcie_devctl; 1190 #endif 1191 1192 if (isLDOMguest(nxgep)) { 1193 return (nxge_guest_regs_map(nxgep)); 1194 } 1195 1196 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1197 nxgep->dev_regs = NULL; 1198 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1199 dev_regs->nxge_regh = NULL; 1200 dev_regs->nxge_pciregh = NULL; 1201 dev_regs->nxge_msix_regh = NULL; 1202 dev_regs->nxge_vir_regh = NULL; 1203 dev_regs->nxge_vir2_regh = NULL; 1204 nxgep->niu_type = NIU_TYPE_NONE; 1205 1206 devname = ddi_pathname(nxgep->dip, buf); 1207 ASSERT(strlen(devname) > 0); 1208 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1209 "nxge_map_regs: pathname devname %s", devname)); 1210 1211 /* 1212 * The driver is running on a N2-NIU system if devname is something 1213 * like "/niu@80/network@0" 1214 */ 1215 if (strstr(devname, n2_siu_name)) { 1216 /* N2/NIU */ 1217 nxgep->niu_type = N2_NIU; 1218 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1219 "nxge_map_regs: N2/NIU devname %s", devname)); 1220 /* 1221 * Get function number: 1222 * - N2/NIU: "/niu@80/network@0" and "/niu@80/network@1" 1223 */ 1224 nxgep->function_num = 1225 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1226 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1227 "nxge_map_regs: N2/NIU function number %d", 1228 nxgep->function_num)); 1229 } else { 1230 int *prop_val; 1231 uint_t prop_len; 1232 uint8_t func_num; 1233 1234 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1235 0, "reg", 1236 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1237 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1238 "Reg property not found")); 1239 ddi_status = DDI_FAILURE; 1240 goto nxge_map_regs_fail0; 1241 1242 } else { 1243 func_num = (prop_val[0] >> 8) & 0x7; 1244 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1245 "Reg property found: fun # %d", 1246 func_num)); 1247 nxgep->function_num = func_num; 1248 if (isLDOMguest(nxgep)) { 1249 nxgep->function_num /= 2; 1250 return (NXGE_OK); 1251 } 1252 ddi_prop_free(prop_val); 1253 } 1254 } 1255 1256 switch (nxgep->niu_type) { 1257 default: 1258 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1259 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1260 "nxge_map_regs: pci config size 0x%x", regsize)); 1261 1262 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1263 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1264 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1265 if (ddi_status != DDI_SUCCESS) { 1266 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1267 "ddi_map_regs, nxge bus config regs failed")); 1268 goto nxge_map_regs_fail0; 1269 } 1270 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1271 "nxge_map_reg: PCI config addr 0x%0llx " 1272 " handle 0x%0llx", dev_regs->nxge_pciregp, 1273 dev_regs->nxge_pciregh)); 1274 /* 1275 * IMP IMP 1276 * workaround for bit swapping bug in HW 1277 * which ends up in no-snoop = yes 1278 * resulting, in DMA not synched properly 1279 */ 1280 #if !defined(_BIG_ENDIAN) 1281 /* workarounds for x86 systems */ 1282 pci_offset = 0x80 + PCIE_DEVCTL; 1283 pcie_devctl = pci_config_get16(dev_regs->nxge_pciregh, 1284 pci_offset); 1285 pcie_devctl &= ~PCIE_DEVCTL_ENABLE_NO_SNOOP; 1286 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1287 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1288 pcie_devctl); 1289 #endif 1290 1291 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1292 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1293 "nxge_map_regs: pio size 0x%x", regsize)); 1294 /* set up the device mapped register */ 1295 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1296 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1297 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1298 if (ddi_status != DDI_SUCCESS) { 1299 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1300 "ddi_map_regs for Neptune global reg failed")); 1301 goto nxge_map_regs_fail1; 1302 } 1303 1304 /* set up the msi/msi-x mapped register */ 1305 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1306 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1307 "nxge_map_regs: msix size 0x%x", regsize)); 1308 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1309 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1310 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1311 if (ddi_status != DDI_SUCCESS) { 1312 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1313 "ddi_map_regs for msi reg failed")); 1314 goto nxge_map_regs_fail2; 1315 } 1316 1317 /* set up the vio region mapped register */ 1318 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1319 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1320 "nxge_map_regs: vio size 0x%x", regsize)); 1321 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1322 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1323 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1324 1325 if (ddi_status != DDI_SUCCESS) { 1326 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1327 "ddi_map_regs for nxge vio reg failed")); 1328 goto nxge_map_regs_fail3; 1329 } 1330 nxgep->dev_regs = dev_regs; 1331 1332 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1333 NPI_PCI_ADD_HANDLE_SET(nxgep, 1334 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1335 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1336 NPI_MSI_ADD_HANDLE_SET(nxgep, 1337 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1338 1339 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1340 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1341 1342 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1343 NPI_REG_ADD_HANDLE_SET(nxgep, 1344 (npi_reg_ptr_t)dev_regs->nxge_regp); 1345 1346 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1347 NPI_VREG_ADD_HANDLE_SET(nxgep, 1348 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1349 1350 break; 1351 1352 case N2_NIU: 1353 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1354 /* 1355 * Set up the device mapped register (FWARC 2006/556) 1356 * (changed back to 1: reg starts at 1!) 1357 */ 1358 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1359 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1360 "nxge_map_regs: dev size 0x%x", regsize)); 1361 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1362 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1363 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1364 1365 if (ddi_status != DDI_SUCCESS) { 1366 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1367 "ddi_map_regs for N2/NIU, global reg failed ")); 1368 goto nxge_map_regs_fail1; 1369 } 1370 1371 /* set up the first vio region mapped register */ 1372 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1373 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1374 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1375 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1376 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1377 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1378 1379 if (ddi_status != DDI_SUCCESS) { 1380 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1381 "ddi_map_regs for nxge vio reg failed")); 1382 goto nxge_map_regs_fail2; 1383 } 1384 /* set up the second vio region mapped register */ 1385 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1386 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1387 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1388 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1389 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1390 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1391 1392 if (ddi_status != DDI_SUCCESS) { 1393 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1394 "ddi_map_regs for nxge vio2 reg failed")); 1395 goto nxge_map_regs_fail3; 1396 } 1397 nxgep->dev_regs = dev_regs; 1398 1399 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1400 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1401 1402 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1403 NPI_REG_ADD_HANDLE_SET(nxgep, 1404 (npi_reg_ptr_t)dev_regs->nxge_regp); 1405 1406 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1407 NPI_VREG_ADD_HANDLE_SET(nxgep, 1408 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1409 1410 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1411 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1412 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1413 1414 break; 1415 } 1416 1417 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1418 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1419 1420 goto nxge_map_regs_exit; 1421 nxge_map_regs_fail3: 1422 if (dev_regs->nxge_msix_regh) { 1423 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1424 } 1425 if (dev_regs->nxge_vir_regh) { 1426 ddi_regs_map_free(&dev_regs->nxge_regh); 1427 } 1428 nxge_map_regs_fail2: 1429 if (dev_regs->nxge_regh) { 1430 ddi_regs_map_free(&dev_regs->nxge_regh); 1431 } 1432 nxge_map_regs_fail1: 1433 if (dev_regs->nxge_pciregh) { 1434 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1435 } 1436 nxge_map_regs_fail0: 1437 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1438 kmem_free(dev_regs, sizeof (dev_regs_t)); 1439 1440 nxge_map_regs_exit: 1441 if (ddi_status != DDI_SUCCESS) 1442 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1443 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1444 return (status); 1445 } 1446 1447 static void 1448 nxge_unmap_regs(p_nxge_t nxgep) 1449 { 1450 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1451 1452 if (isLDOMguest(nxgep)) { 1453 nxge_guest_regs_map_free(nxgep); 1454 return; 1455 } 1456 1457 if (nxgep->dev_regs) { 1458 if (nxgep->dev_regs->nxge_pciregh) { 1459 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1460 "==> nxge_unmap_regs: bus")); 1461 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1462 nxgep->dev_regs->nxge_pciregh = NULL; 1463 } 1464 if (nxgep->dev_regs->nxge_regh) { 1465 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1466 "==> nxge_unmap_regs: device registers")); 1467 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1468 nxgep->dev_regs->nxge_regh = NULL; 1469 } 1470 if (nxgep->dev_regs->nxge_msix_regh) { 1471 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1472 "==> nxge_unmap_regs: device interrupts")); 1473 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1474 nxgep->dev_regs->nxge_msix_regh = NULL; 1475 } 1476 if (nxgep->dev_regs->nxge_vir_regh) { 1477 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1478 "==> nxge_unmap_regs: vio region")); 1479 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1480 nxgep->dev_regs->nxge_vir_regh = NULL; 1481 } 1482 if (nxgep->dev_regs->nxge_vir2_regh) { 1483 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1484 "==> nxge_unmap_regs: vio2 region")); 1485 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1486 nxgep->dev_regs->nxge_vir2_regh = NULL; 1487 } 1488 1489 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1490 nxgep->dev_regs = NULL; 1491 } 1492 1493 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1494 } 1495 1496 static nxge_status_t 1497 nxge_setup_mutexes(p_nxge_t nxgep) 1498 { 1499 int ddi_status = DDI_SUCCESS; 1500 nxge_status_t status = NXGE_OK; 1501 nxge_classify_t *classify_ptr; 1502 int partition; 1503 1504 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1505 1506 /* 1507 * Get the interrupt cookie so the mutexes can be 1508 * Initialized. 1509 */ 1510 if (isLDOMguest(nxgep)) { 1511 nxgep->interrupt_cookie = 0; 1512 } else { 1513 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1514 &nxgep->interrupt_cookie); 1515 1516 if (ddi_status != DDI_SUCCESS) { 1517 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1518 "<== nxge_setup_mutexes: failed 0x%x", 1519 ddi_status)); 1520 goto nxge_setup_mutexes_exit; 1521 } 1522 } 1523 1524 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1525 MUTEX_INIT(&nxgep->poll_lock, NULL, 1526 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1527 1528 /* 1529 * Initialize mutexes for this device. 1530 */ 1531 MUTEX_INIT(nxgep->genlock, NULL, 1532 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1533 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1534 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1535 MUTEX_INIT(&nxgep->mif_lock, NULL, 1536 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1537 MUTEX_INIT(&nxgep->group_lock, NULL, 1538 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1539 RW_INIT(&nxgep->filter_lock, NULL, 1540 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1541 1542 classify_ptr = &nxgep->classifier; 1543 /* 1544 * FFLP Mutexes are never used in interrupt context 1545 * as fflp operation can take very long time to 1546 * complete and hence not suitable to invoke from interrupt 1547 * handlers. 1548 */ 1549 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1550 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1551 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1552 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1553 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1554 for (partition = 0; partition < MAX_PARTITION; partition++) { 1555 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1556 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1557 } 1558 } 1559 1560 nxge_setup_mutexes_exit: 1561 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1562 "<== nxge_setup_mutexes status = %x", status)); 1563 1564 if (ddi_status != DDI_SUCCESS) 1565 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1566 1567 return (status); 1568 } 1569 1570 static void 1571 nxge_destroy_mutexes(p_nxge_t nxgep) 1572 { 1573 int partition; 1574 nxge_classify_t *classify_ptr; 1575 1576 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1577 RW_DESTROY(&nxgep->filter_lock); 1578 MUTEX_DESTROY(&nxgep->group_lock); 1579 MUTEX_DESTROY(&nxgep->mif_lock); 1580 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1581 MUTEX_DESTROY(nxgep->genlock); 1582 1583 classify_ptr = &nxgep->classifier; 1584 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1585 1586 /* Destroy all polling resources. */ 1587 MUTEX_DESTROY(&nxgep->poll_lock); 1588 cv_destroy(&nxgep->poll_cv); 1589 1590 /* free data structures, based on HW type */ 1591 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1592 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1593 for (partition = 0; partition < MAX_PARTITION; partition++) { 1594 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1595 } 1596 } 1597 1598 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1599 } 1600 1601 nxge_status_t 1602 nxge_init(p_nxge_t nxgep) 1603 { 1604 nxge_status_t status = NXGE_OK; 1605 1606 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1607 1608 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1609 return (status); 1610 } 1611 1612 /* 1613 * Allocate system memory for the receive/transmit buffer blocks 1614 * and receive/transmit descriptor rings. 1615 */ 1616 status = nxge_alloc_mem_pool(nxgep); 1617 if (status != NXGE_OK) { 1618 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1619 goto nxge_init_fail1; 1620 } 1621 1622 if (!isLDOMguest(nxgep)) { 1623 /* 1624 * Initialize and enable the TXC registers. 1625 * (Globally enable the Tx controller, 1626 * enable the port, configure the dma channel bitmap, 1627 * configure the max burst size). 1628 */ 1629 status = nxge_txc_init(nxgep); 1630 if (status != NXGE_OK) { 1631 NXGE_ERROR_MSG((nxgep, 1632 NXGE_ERR_CTL, "init txc failed\n")); 1633 goto nxge_init_fail2; 1634 } 1635 } 1636 1637 /* 1638 * Initialize and enable TXDMA channels. 1639 */ 1640 status = nxge_init_txdma_channels(nxgep); 1641 if (status != NXGE_OK) { 1642 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1643 goto nxge_init_fail3; 1644 } 1645 1646 /* 1647 * Initialize and enable RXDMA channels. 1648 */ 1649 status = nxge_init_rxdma_channels(nxgep); 1650 if (status != NXGE_OK) { 1651 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1652 goto nxge_init_fail4; 1653 } 1654 1655 /* 1656 * The guest domain is now done. 1657 */ 1658 if (isLDOMguest(nxgep)) { 1659 nxgep->drv_state |= STATE_HW_INITIALIZED; 1660 goto nxge_init_exit; 1661 } 1662 1663 /* 1664 * Initialize TCAM and FCRAM (Neptune). 1665 */ 1666 status = nxge_classify_init(nxgep); 1667 if (status != NXGE_OK) { 1668 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1669 goto nxge_init_fail5; 1670 } 1671 1672 /* 1673 * Initialize ZCP 1674 */ 1675 status = nxge_zcp_init(nxgep); 1676 if (status != NXGE_OK) { 1677 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1678 goto nxge_init_fail5; 1679 } 1680 1681 /* 1682 * Initialize IPP. 1683 */ 1684 status = nxge_ipp_init(nxgep); 1685 if (status != NXGE_OK) { 1686 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1687 goto nxge_init_fail5; 1688 } 1689 1690 /* 1691 * Initialize the MAC block. 1692 */ 1693 status = nxge_mac_init(nxgep); 1694 if (status != NXGE_OK) { 1695 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1696 goto nxge_init_fail5; 1697 } 1698 1699 /* 1700 * Enable the interrrupts for DDI. 1701 */ 1702 nxge_intrs_enable(nxgep); 1703 1704 nxgep->drv_state |= STATE_HW_INITIALIZED; 1705 1706 goto nxge_init_exit; 1707 1708 nxge_init_fail5: 1709 nxge_uninit_rxdma_channels(nxgep); 1710 nxge_init_fail4: 1711 nxge_uninit_txdma_channels(nxgep); 1712 nxge_init_fail3: 1713 if (!isLDOMguest(nxgep)) { 1714 (void) nxge_txc_uninit(nxgep); 1715 } 1716 nxge_init_fail2: 1717 nxge_free_mem_pool(nxgep); 1718 nxge_init_fail1: 1719 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1720 "<== nxge_init status (failed) = 0x%08x", status)); 1721 return (status); 1722 1723 nxge_init_exit: 1724 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1725 status)); 1726 return (status); 1727 } 1728 1729 1730 timeout_id_t 1731 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1732 { 1733 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1734 return (timeout(func, (caddr_t)nxgep, 1735 drv_usectohz(1000 * msec))); 1736 } 1737 return (NULL); 1738 } 1739 1740 /*ARGSUSED*/ 1741 void 1742 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1743 { 1744 if (timerid) { 1745 (void) untimeout(timerid); 1746 } 1747 } 1748 1749 void 1750 nxge_uninit(p_nxge_t nxgep) 1751 { 1752 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1753 1754 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1755 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1756 "==> nxge_uninit: not initialized")); 1757 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1758 "<== nxge_uninit")); 1759 return; 1760 } 1761 1762 if (!isLDOMguest(nxgep)) { 1763 /* 1764 * Reset the receive MAC side. 1765 */ 1766 (void) nxge_rx_mac_disable(nxgep); 1767 1768 /* 1769 * Drain the IPP. 1770 */ 1771 (void) nxge_ipp_drain(nxgep); 1772 } 1773 1774 /* stop timer */ 1775 if (nxgep->nxge_timerid) { 1776 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1777 nxgep->nxge_timerid = 0; 1778 } 1779 1780 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1781 (void) nxge_intr_hw_disable(nxgep); 1782 1783 1784 /* Disable and soft reset the IPP */ 1785 if (!isLDOMguest(nxgep)) 1786 (void) nxge_ipp_disable(nxgep); 1787 1788 /* Free classification resources */ 1789 (void) nxge_classify_uninit(nxgep); 1790 1791 /* 1792 * Reset the transmit/receive DMA side. 1793 */ 1794 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1795 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1796 1797 nxge_uninit_txdma_channels(nxgep); 1798 nxge_uninit_rxdma_channels(nxgep); 1799 1800 /* 1801 * Reset the transmit MAC side. 1802 */ 1803 (void) nxge_tx_mac_disable(nxgep); 1804 1805 nxge_free_mem_pool(nxgep); 1806 1807 /* 1808 * Start the timer if the reset flag is not set. 1809 * If this reset flag is set, the link monitor 1810 * will not be started in order to stop furthur bus 1811 * activities coming from this interface. 1812 * The driver will start the monitor function 1813 * if the interface was initialized again later. 1814 */ 1815 if (!nxge_peu_reset_enable) { 1816 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1817 } 1818 1819 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1820 1821 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1822 "nxge_mblks_pending %d", nxge_mblks_pending)); 1823 } 1824 1825 void 1826 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1827 { 1828 uint64_t reg; 1829 uint64_t regdata; 1830 int i, retry; 1831 1832 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1833 regdata = 0; 1834 retry = 1; 1835 1836 for (i = 0; i < retry; i++) { 1837 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1838 } 1839 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1840 } 1841 1842 void 1843 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1844 { 1845 uint64_t reg; 1846 uint64_t buf[2]; 1847 1848 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1849 reg = buf[0]; 1850 1851 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1852 } 1853 1854 /*ARGSUSED*/ 1855 /*VARARGS*/ 1856 void 1857 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1858 { 1859 char msg_buffer[1048]; 1860 char prefix_buffer[32]; 1861 int instance; 1862 uint64_t debug_level; 1863 int cmn_level = CE_CONT; 1864 va_list ap; 1865 1866 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1867 /* In case a developer has changed nxge_debug_level. */ 1868 if (nxgep->nxge_debug_level != nxge_debug_level) 1869 nxgep->nxge_debug_level = nxge_debug_level; 1870 } 1871 1872 debug_level = (nxgep == NULL) ? nxge_debug_level : 1873 nxgep->nxge_debug_level; 1874 1875 if ((level & debug_level) || 1876 (level == NXGE_NOTE) || 1877 (level == NXGE_ERR_CTL)) { 1878 /* do the msg processing */ 1879 MUTEX_ENTER(&nxgedebuglock); 1880 1881 if ((level & NXGE_NOTE)) { 1882 cmn_level = CE_NOTE; 1883 } 1884 1885 if (level & NXGE_ERR_CTL) { 1886 cmn_level = CE_WARN; 1887 } 1888 1889 va_start(ap, fmt); 1890 (void) vsprintf(msg_buffer, fmt, ap); 1891 va_end(ap); 1892 if (nxgep == NULL) { 1893 instance = -1; 1894 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1895 } else { 1896 instance = nxgep->instance; 1897 (void) sprintf(prefix_buffer, 1898 "%s%d :", "nxge", instance); 1899 } 1900 1901 MUTEX_EXIT(&nxgedebuglock); 1902 cmn_err(cmn_level, "!%s %s\n", 1903 prefix_buffer, msg_buffer); 1904 1905 } 1906 } 1907 1908 char * 1909 nxge_dump_packet(char *addr, int size) 1910 { 1911 uchar_t *ap = (uchar_t *)addr; 1912 int i; 1913 static char etherbuf[1024]; 1914 char *cp = etherbuf; 1915 char digits[] = "0123456789abcdef"; 1916 1917 if (!size) 1918 size = 60; 1919 1920 if (size > MAX_DUMP_SZ) { 1921 /* Dump the leading bytes */ 1922 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1923 if (*ap > 0x0f) 1924 *cp++ = digits[*ap >> 4]; 1925 *cp++ = digits[*ap++ & 0xf]; 1926 *cp++ = ':'; 1927 } 1928 for (i = 0; i < 20; i++) 1929 *cp++ = '.'; 1930 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1931 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1932 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1933 if (*ap > 0x0f) 1934 *cp++ = digits[*ap >> 4]; 1935 *cp++ = digits[*ap++ & 0xf]; 1936 *cp++ = ':'; 1937 } 1938 } else { 1939 for (i = 0; i < size; i++) { 1940 if (*ap > 0x0f) 1941 *cp++ = digits[*ap >> 4]; 1942 *cp++ = digits[*ap++ & 0xf]; 1943 *cp++ = ':'; 1944 } 1945 } 1946 *--cp = 0; 1947 return (etherbuf); 1948 } 1949 1950 #ifdef NXGE_DEBUG 1951 static void 1952 nxge_test_map_regs(p_nxge_t nxgep) 1953 { 1954 ddi_acc_handle_t cfg_handle; 1955 p_pci_cfg_t cfg_ptr; 1956 ddi_acc_handle_t dev_handle; 1957 char *dev_ptr; 1958 ddi_acc_handle_t pci_config_handle; 1959 uint32_t regval; 1960 int i; 1961 1962 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1963 1964 dev_handle = nxgep->dev_regs->nxge_regh; 1965 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1966 1967 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1968 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1969 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1970 1971 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1972 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1973 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1974 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1975 &cfg_ptr->vendorid)); 1976 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1977 "\tvendorid 0x%x devid 0x%x", 1978 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1979 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1980 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1981 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1982 "bar1c 0x%x", 1983 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1984 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1985 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1986 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1987 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1988 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1989 "base 28 0x%x bar2c 0x%x\n", 1990 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1991 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1992 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1993 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1994 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1995 "\nNeptune PCI BAR: base30 0x%x\n", 1996 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1997 1998 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1999 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 2000 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2001 "first 0x%llx second 0x%llx third 0x%llx " 2002 "last 0x%llx ", 2003 NXGE_PIO_READ64(dev_handle, 2004 (uint64_t *)(dev_ptr + 0), 0), 2005 NXGE_PIO_READ64(dev_handle, 2006 (uint64_t *)(dev_ptr + 8), 0), 2007 NXGE_PIO_READ64(dev_handle, 2008 (uint64_t *)(dev_ptr + 16), 0), 2009 NXGE_PIO_READ64(cfg_handle, 2010 (uint64_t *)(dev_ptr + 24), 0))); 2011 } 2012 } 2013 2014 #endif 2015 2016 static void 2017 nxge_suspend(p_nxge_t nxgep) 2018 { 2019 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 2020 2021 nxge_intrs_disable(nxgep); 2022 nxge_destroy_dev(nxgep); 2023 2024 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 2025 } 2026 2027 static nxge_status_t 2028 nxge_resume(p_nxge_t nxgep) 2029 { 2030 nxge_status_t status = NXGE_OK; 2031 2032 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 2033 2034 nxgep->suspended = DDI_RESUME; 2035 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 2036 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 2037 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 2038 (void) nxge_rx_mac_enable(nxgep); 2039 (void) nxge_tx_mac_enable(nxgep); 2040 nxge_intrs_enable(nxgep); 2041 nxgep->suspended = 0; 2042 2043 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2044 "<== nxge_resume status = 0x%x", status)); 2045 return (status); 2046 } 2047 2048 static nxge_status_t 2049 nxge_setup_dev(p_nxge_t nxgep) 2050 { 2051 nxge_status_t status = NXGE_OK; 2052 2053 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 2054 nxgep->mac.portnum)); 2055 2056 status = nxge_link_init(nxgep); 2057 2058 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 2059 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2060 "port%d Bad register acc handle", nxgep->mac.portnum)); 2061 status = NXGE_ERROR; 2062 } 2063 2064 if (status != NXGE_OK) { 2065 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2066 " nxge_setup_dev status " 2067 "(xcvr init 0x%08x)", status)); 2068 goto nxge_setup_dev_exit; 2069 } 2070 2071 nxge_setup_dev_exit: 2072 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2073 "<== nxge_setup_dev port %d status = 0x%08x", 2074 nxgep->mac.portnum, status)); 2075 2076 return (status); 2077 } 2078 2079 static void 2080 nxge_destroy_dev(p_nxge_t nxgep) 2081 { 2082 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 2083 2084 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 2085 2086 (void) nxge_hw_stop(nxgep); 2087 2088 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 2089 } 2090 2091 static nxge_status_t 2092 nxge_setup_system_dma_pages(p_nxge_t nxgep) 2093 { 2094 int ddi_status = DDI_SUCCESS; 2095 uint_t count; 2096 ddi_dma_cookie_t cookie; 2097 uint_t iommu_pagesize; 2098 nxge_status_t status = NXGE_OK; 2099 2100 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 2101 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 2102 if (nxgep->niu_type != N2_NIU) { 2103 iommu_pagesize = dvma_pagesize(nxgep->dip); 2104 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2105 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2106 " default_block_size %d iommu_pagesize %d", 2107 nxgep->sys_page_sz, 2108 ddi_ptob(nxgep->dip, (ulong_t)1), 2109 nxgep->rx_default_block_size, 2110 iommu_pagesize)); 2111 2112 if (iommu_pagesize != 0) { 2113 if (nxgep->sys_page_sz == iommu_pagesize) { 2114 if (iommu_pagesize > 0x4000) 2115 nxgep->sys_page_sz = 0x4000; 2116 } else { 2117 if (nxgep->sys_page_sz > iommu_pagesize) 2118 nxgep->sys_page_sz = iommu_pagesize; 2119 } 2120 } 2121 } 2122 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2123 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2124 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2125 "default_block_size %d page mask %d", 2126 nxgep->sys_page_sz, 2127 ddi_ptob(nxgep->dip, (ulong_t)1), 2128 nxgep->rx_default_block_size, 2129 nxgep->sys_page_mask)); 2130 2131 2132 switch (nxgep->sys_page_sz) { 2133 default: 2134 nxgep->sys_page_sz = 0x1000; 2135 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2136 nxgep->rx_default_block_size = 0x1000; 2137 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2138 break; 2139 case 0x1000: 2140 nxgep->rx_default_block_size = 0x1000; 2141 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2142 break; 2143 case 0x2000: 2144 nxgep->rx_default_block_size = 0x2000; 2145 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2146 break; 2147 case 0x4000: 2148 nxgep->rx_default_block_size = 0x4000; 2149 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2150 break; 2151 case 0x8000: 2152 nxgep->rx_default_block_size = 0x8000; 2153 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2154 break; 2155 } 2156 2157 #ifndef USE_RX_BIG_BUF 2158 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2159 #else 2160 nxgep->rx_default_block_size = 0x2000; 2161 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2162 #endif 2163 /* 2164 * Get the system DMA burst size. 2165 */ 2166 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2167 DDI_DMA_DONTWAIT, 0, 2168 &nxgep->dmasparehandle); 2169 if (ddi_status != DDI_SUCCESS) { 2170 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2171 "ddi_dma_alloc_handle: failed " 2172 " status 0x%x", ddi_status)); 2173 goto nxge_get_soft_properties_exit; 2174 } 2175 2176 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2177 (caddr_t)nxgep->dmasparehandle, 2178 sizeof (nxgep->dmasparehandle), 2179 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2180 DDI_DMA_DONTWAIT, 0, 2181 &cookie, &count); 2182 if (ddi_status != DDI_DMA_MAPPED) { 2183 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2184 "Binding spare handle to find system" 2185 " burstsize failed.")); 2186 ddi_status = DDI_FAILURE; 2187 goto nxge_get_soft_properties_fail1; 2188 } 2189 2190 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2191 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2192 2193 nxge_get_soft_properties_fail1: 2194 ddi_dma_free_handle(&nxgep->dmasparehandle); 2195 2196 nxge_get_soft_properties_exit: 2197 2198 if (ddi_status != DDI_SUCCESS) 2199 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2200 2201 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2202 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2203 return (status); 2204 } 2205 2206 static nxge_status_t 2207 nxge_alloc_mem_pool(p_nxge_t nxgep) 2208 { 2209 nxge_status_t status = NXGE_OK; 2210 2211 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2212 2213 status = nxge_alloc_rx_mem_pool(nxgep); 2214 if (status != NXGE_OK) { 2215 return (NXGE_ERROR); 2216 } 2217 2218 status = nxge_alloc_tx_mem_pool(nxgep); 2219 if (status != NXGE_OK) { 2220 nxge_free_rx_mem_pool(nxgep); 2221 return (NXGE_ERROR); 2222 } 2223 2224 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2225 return (NXGE_OK); 2226 } 2227 2228 static void 2229 nxge_free_mem_pool(p_nxge_t nxgep) 2230 { 2231 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2232 2233 nxge_free_rx_mem_pool(nxgep); 2234 nxge_free_tx_mem_pool(nxgep); 2235 2236 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2237 } 2238 2239 nxge_status_t 2240 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2241 { 2242 uint32_t rdc_max; 2243 p_nxge_dma_pt_cfg_t p_all_cfgp; 2244 p_nxge_hw_pt_cfg_t p_cfgp; 2245 p_nxge_dma_pool_t dma_poolp; 2246 p_nxge_dma_common_t *dma_buf_p; 2247 p_nxge_dma_pool_t dma_cntl_poolp; 2248 p_nxge_dma_common_t *dma_cntl_p; 2249 uint32_t *num_chunks; /* per dma */ 2250 nxge_status_t status = NXGE_OK; 2251 2252 uint32_t nxge_port_rbr_size; 2253 uint32_t nxge_port_rbr_spare_size; 2254 uint32_t nxge_port_rcr_size; 2255 uint32_t rx_cntl_alloc_size; 2256 2257 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2258 2259 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2260 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2261 rdc_max = NXGE_MAX_RDCS; 2262 2263 /* 2264 * Allocate memory for the common DMA data structures. 2265 */ 2266 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2267 KM_SLEEP); 2268 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2269 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2270 2271 dma_cntl_poolp = (p_nxge_dma_pool_t) 2272 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2273 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2274 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2275 2276 num_chunks = (uint32_t *)KMEM_ZALLOC( 2277 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2278 2279 /* 2280 * Assume that each DMA channel will be configured with 2281 * the default block size. 2282 * rbr block counts are modulo the batch count (16). 2283 */ 2284 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2285 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2286 2287 if (!nxge_port_rbr_size) { 2288 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2289 } 2290 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2291 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2292 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2293 } 2294 2295 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2296 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2297 2298 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2299 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2300 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2301 } 2302 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2303 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2304 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2305 "set to default %d", 2306 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2307 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2308 } 2309 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2310 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2311 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2312 "set to default %d", 2313 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2314 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2315 } 2316 2317 /* 2318 * N2/NIU has limitation on the descriptor sizes (contiguous 2319 * memory allocation on data buffers to 4M (contig_mem_alloc) 2320 * and little endian for control buffers (must use the ddi/dki mem alloc 2321 * function). 2322 */ 2323 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2324 if (nxgep->niu_type == N2_NIU) { 2325 nxge_port_rbr_spare_size = 0; 2326 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2327 (!ISP2(nxge_port_rbr_size))) { 2328 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2329 } 2330 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2331 (!ISP2(nxge_port_rcr_size))) { 2332 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2333 } 2334 } 2335 #endif 2336 2337 /* 2338 * Addresses of receive block ring, receive completion ring and the 2339 * mailbox must be all cache-aligned (64 bytes). 2340 */ 2341 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2342 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2343 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2344 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2345 2346 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2347 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2348 "nxge_port_rcr_size = %d " 2349 "rx_cntl_alloc_size = %d", 2350 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2351 nxge_port_rcr_size, 2352 rx_cntl_alloc_size)); 2353 2354 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2355 if (nxgep->niu_type == N2_NIU) { 2356 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2357 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2358 2359 if (!ISP2(rx_buf_alloc_size)) { 2360 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2361 "==> nxge_alloc_rx_mem_pool: " 2362 " must be power of 2")); 2363 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2364 goto nxge_alloc_rx_mem_pool_exit; 2365 } 2366 2367 if (rx_buf_alloc_size > (1 << 22)) { 2368 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2369 "==> nxge_alloc_rx_mem_pool: " 2370 " limit size to 4M")); 2371 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2372 goto nxge_alloc_rx_mem_pool_exit; 2373 } 2374 2375 if (rx_cntl_alloc_size < 0x2000) { 2376 rx_cntl_alloc_size = 0x2000; 2377 } 2378 } 2379 #endif 2380 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2381 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2382 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2383 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2384 2385 dma_poolp->ndmas = p_cfgp->max_rdcs; 2386 dma_poolp->num_chunks = num_chunks; 2387 dma_poolp->buf_allocated = B_TRUE; 2388 nxgep->rx_buf_pool_p = dma_poolp; 2389 dma_poolp->dma_buf_pool_p = dma_buf_p; 2390 2391 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2392 dma_cntl_poolp->buf_allocated = B_TRUE; 2393 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2394 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2395 2396 /* Allocate the receive rings, too. */ 2397 nxgep->rx_rbr_rings = 2398 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2399 nxgep->rx_rbr_rings->rbr_rings = 2400 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2401 nxgep->rx_rcr_rings = 2402 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2403 nxgep->rx_rcr_rings->rcr_rings = 2404 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2405 nxgep->rx_mbox_areas_p = 2406 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2407 nxgep->rx_mbox_areas_p->rxmbox_areas = 2408 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2409 2410 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2411 p_cfgp->max_rdcs; 2412 2413 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2414 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2415 2416 nxge_alloc_rx_mem_pool_exit: 2417 return (status); 2418 } 2419 2420 /* 2421 * nxge_alloc_rxb 2422 * 2423 * Allocate buffers for an RDC. 2424 * 2425 * Arguments: 2426 * nxgep 2427 * channel The channel to map into our kernel space. 2428 * 2429 * Notes: 2430 * 2431 * NPI function calls: 2432 * 2433 * NXGE function calls: 2434 * 2435 * Registers accessed: 2436 * 2437 * Context: 2438 * 2439 * Taking apart: 2440 * 2441 * Open questions: 2442 * 2443 */ 2444 nxge_status_t 2445 nxge_alloc_rxb( 2446 p_nxge_t nxgep, 2447 int channel) 2448 { 2449 size_t rx_buf_alloc_size; 2450 nxge_status_t status = NXGE_OK; 2451 2452 nxge_dma_common_t **data; 2453 nxge_dma_common_t **control; 2454 uint32_t *num_chunks; 2455 2456 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2457 2458 /* 2459 * Allocate memory for the receive buffers and descriptor rings. 2460 * Replace these allocation functions with the interface functions 2461 * provided by the partition manager if/when they are available. 2462 */ 2463 2464 /* 2465 * Allocate memory for the receive buffer blocks. 2466 */ 2467 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2468 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2469 2470 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2471 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2472 2473 if ((status = nxge_alloc_rx_buf_dma( 2474 nxgep, channel, data, rx_buf_alloc_size, 2475 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2476 return (status); 2477 } 2478 2479 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2480 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2481 2482 /* 2483 * Allocate memory for descriptor rings and mailbox. 2484 */ 2485 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2486 2487 if ((status = nxge_alloc_rx_cntl_dma( 2488 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2489 != NXGE_OK) { 2490 nxge_free_rx_cntl_dma(nxgep, *control); 2491 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2492 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2493 return (status); 2494 } 2495 2496 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2497 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2498 2499 return (status); 2500 } 2501 2502 void 2503 nxge_free_rxb( 2504 p_nxge_t nxgep, 2505 int channel) 2506 { 2507 nxge_dma_common_t *data; 2508 nxge_dma_common_t *control; 2509 uint32_t num_chunks; 2510 2511 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2512 2513 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2514 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2515 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2516 2517 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2518 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2519 2520 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2521 nxge_free_rx_cntl_dma(nxgep, control); 2522 2523 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2524 2525 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2526 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2527 2528 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2529 } 2530 2531 static void 2532 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2533 { 2534 int rdc_max = NXGE_MAX_RDCS; 2535 2536 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2537 2538 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2539 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2540 "<== nxge_free_rx_mem_pool " 2541 "(null rx buf pool or buf not allocated")); 2542 return; 2543 } 2544 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2545 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2546 "<== nxge_free_rx_mem_pool " 2547 "(null rx cntl buf pool or cntl buf not allocated")); 2548 return; 2549 } 2550 2551 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2552 sizeof (p_nxge_dma_common_t) * rdc_max); 2553 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2554 2555 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2556 sizeof (uint32_t) * rdc_max); 2557 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2558 sizeof (p_nxge_dma_common_t) * rdc_max); 2559 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2560 2561 nxgep->rx_buf_pool_p = 0; 2562 nxgep->rx_cntl_pool_p = 0; 2563 2564 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2565 sizeof (p_rx_rbr_ring_t) * rdc_max); 2566 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2567 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2568 sizeof (p_rx_rcr_ring_t) * rdc_max); 2569 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2570 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2571 sizeof (p_rx_mbox_t) * rdc_max); 2572 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2573 2574 nxgep->rx_rbr_rings = 0; 2575 nxgep->rx_rcr_rings = 0; 2576 nxgep->rx_mbox_areas_p = 0; 2577 2578 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2579 } 2580 2581 2582 static nxge_status_t 2583 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2584 p_nxge_dma_common_t *dmap, 2585 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2586 { 2587 p_nxge_dma_common_t rx_dmap; 2588 nxge_status_t status = NXGE_OK; 2589 size_t total_alloc_size; 2590 size_t allocated = 0; 2591 int i, size_index, array_size; 2592 boolean_t use_kmem_alloc = B_FALSE; 2593 2594 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2595 2596 rx_dmap = (p_nxge_dma_common_t) 2597 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2598 KM_SLEEP); 2599 2600 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2601 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2602 dma_channel, alloc_size, block_size, dmap)); 2603 2604 total_alloc_size = alloc_size; 2605 2606 #if defined(RX_USE_RECLAIM_POST) 2607 total_alloc_size = alloc_size + alloc_size/4; 2608 #endif 2609 2610 i = 0; 2611 size_index = 0; 2612 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2613 while ((size_index < array_size) && 2614 (alloc_sizes[size_index] < alloc_size)) 2615 size_index++; 2616 if (size_index >= array_size) { 2617 size_index = array_size - 1; 2618 } 2619 2620 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2621 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2622 use_kmem_alloc = B_TRUE; 2623 #if defined(__i386) || defined(__amd64) 2624 size_index = 0; 2625 #endif 2626 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2627 "==> nxge_alloc_rx_buf_dma: " 2628 "Neptune use kmem_alloc() - size_index %d", 2629 size_index)); 2630 } 2631 2632 while ((allocated < total_alloc_size) && 2633 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2634 rx_dmap[i].dma_chunk_index = i; 2635 rx_dmap[i].block_size = block_size; 2636 rx_dmap[i].alength = alloc_sizes[size_index]; 2637 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2638 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2639 rx_dmap[i].dma_channel = dma_channel; 2640 rx_dmap[i].contig_alloc_type = B_FALSE; 2641 rx_dmap[i].kmem_alloc_type = B_FALSE; 2642 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2643 2644 /* 2645 * N2/NIU: data buffers must be contiguous as the driver 2646 * needs to call Hypervisor api to set up 2647 * logical pages. 2648 */ 2649 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2650 rx_dmap[i].contig_alloc_type = B_TRUE; 2651 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2652 } else if (use_kmem_alloc) { 2653 /* For Neptune, use kmem_alloc */ 2654 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2655 "==> nxge_alloc_rx_buf_dma: " 2656 "Neptune use kmem_alloc()")); 2657 rx_dmap[i].kmem_alloc_type = B_TRUE; 2658 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2659 } 2660 2661 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2662 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2663 "i %d nblocks %d alength %d", 2664 dma_channel, i, &rx_dmap[i], block_size, 2665 i, rx_dmap[i].nblocks, 2666 rx_dmap[i].alength)); 2667 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2668 &nxge_rx_dma_attr, 2669 rx_dmap[i].alength, 2670 &nxge_dev_buf_dma_acc_attr, 2671 DDI_DMA_READ | DDI_DMA_STREAMING, 2672 (p_nxge_dma_common_t)(&rx_dmap[i])); 2673 if (status != NXGE_OK) { 2674 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2675 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2676 "dma %d size_index %d size requested %d", 2677 dma_channel, 2678 size_index, 2679 rx_dmap[i].alength)); 2680 size_index--; 2681 } else { 2682 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2683 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2684 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2685 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2686 "buf_alloc_state %d alloc_type %d", 2687 dma_channel, 2688 &rx_dmap[i], 2689 rx_dmap[i].kaddrp, 2690 rx_dmap[i].alength, 2691 rx_dmap[i].buf_alloc_state, 2692 rx_dmap[i].buf_alloc_type)); 2693 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2694 " alloc_rx_buf_dma allocated rdc %d " 2695 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2696 dma_channel, i, rx_dmap[i].alength, 2697 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2698 rx_dmap[i].kaddrp)); 2699 i++; 2700 allocated += alloc_sizes[size_index]; 2701 } 2702 } 2703 2704 if (allocated < total_alloc_size) { 2705 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2706 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2707 "allocated 0x%x requested 0x%x", 2708 dma_channel, 2709 allocated, total_alloc_size)); 2710 status = NXGE_ERROR; 2711 goto nxge_alloc_rx_mem_fail1; 2712 } 2713 2714 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2715 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2716 "allocated 0x%x requested 0x%x", 2717 dma_channel, 2718 allocated, total_alloc_size)); 2719 2720 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2721 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2722 dma_channel, i)); 2723 *num_chunks = i; 2724 *dmap = rx_dmap; 2725 2726 goto nxge_alloc_rx_mem_exit; 2727 2728 nxge_alloc_rx_mem_fail1: 2729 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2730 2731 nxge_alloc_rx_mem_exit: 2732 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2733 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2734 2735 return (status); 2736 } 2737 2738 /*ARGSUSED*/ 2739 static void 2740 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2741 uint32_t num_chunks) 2742 { 2743 int i; 2744 2745 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2746 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2747 2748 if (dmap == 0) 2749 return; 2750 2751 for (i = 0; i < num_chunks; i++) { 2752 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2753 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2754 i, dmap)); 2755 nxge_dma_free_rx_data_buf(dmap++); 2756 } 2757 2758 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2759 } 2760 2761 /*ARGSUSED*/ 2762 static nxge_status_t 2763 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2764 p_nxge_dma_common_t *dmap, size_t size) 2765 { 2766 p_nxge_dma_common_t rx_dmap; 2767 nxge_status_t status = NXGE_OK; 2768 2769 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2770 2771 rx_dmap = (p_nxge_dma_common_t) 2772 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2773 2774 rx_dmap->contig_alloc_type = B_FALSE; 2775 rx_dmap->kmem_alloc_type = B_FALSE; 2776 2777 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2778 &nxge_desc_dma_attr, 2779 size, 2780 &nxge_dev_desc_dma_acc_attr, 2781 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2782 rx_dmap); 2783 if (status != NXGE_OK) { 2784 goto nxge_alloc_rx_cntl_dma_fail1; 2785 } 2786 2787 *dmap = rx_dmap; 2788 goto nxge_alloc_rx_cntl_dma_exit; 2789 2790 nxge_alloc_rx_cntl_dma_fail1: 2791 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2792 2793 nxge_alloc_rx_cntl_dma_exit: 2794 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2795 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2796 2797 return (status); 2798 } 2799 2800 /*ARGSUSED*/ 2801 static void 2802 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2803 { 2804 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2805 2806 if (dmap == 0) 2807 return; 2808 2809 nxge_dma_mem_free(dmap); 2810 2811 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2812 } 2813 2814 typedef struct { 2815 size_t tx_size; 2816 size_t cr_size; 2817 size_t threshhold; 2818 } nxge_tdc_sizes_t; 2819 2820 static 2821 nxge_status_t 2822 nxge_tdc_sizes( 2823 nxge_t *nxgep, 2824 nxge_tdc_sizes_t *sizes) 2825 { 2826 uint32_t threshhold; /* The bcopy() threshhold */ 2827 size_t tx_size; /* Transmit buffer size */ 2828 size_t cr_size; /* Completion ring size */ 2829 2830 /* 2831 * Assume that each DMA channel will be configured with the 2832 * default transmit buffer size for copying transmit data. 2833 * (If a packet is bigger than this, it will not be copied.) 2834 */ 2835 if (nxgep->niu_type == N2_NIU) { 2836 threshhold = TX_BCOPY_SIZE; 2837 } else { 2838 threshhold = nxge_bcopy_thresh; 2839 } 2840 tx_size = nxge_tx_ring_size * threshhold; 2841 2842 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2843 cr_size += sizeof (txdma_mailbox_t); 2844 2845 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2846 if (nxgep->niu_type == N2_NIU) { 2847 if (!ISP2(tx_size)) { 2848 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2849 "==> nxge_tdc_sizes: Tx size" 2850 " must be power of 2")); 2851 return (NXGE_ERROR); 2852 } 2853 2854 if (tx_size > (1 << 22)) { 2855 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2856 "==> nxge_tdc_sizes: Tx size" 2857 " limited to 4M")); 2858 return (NXGE_ERROR); 2859 } 2860 2861 if (cr_size < 0x2000) 2862 cr_size = 0x2000; 2863 } 2864 #endif 2865 2866 sizes->threshhold = threshhold; 2867 sizes->tx_size = tx_size; 2868 sizes->cr_size = cr_size; 2869 2870 return (NXGE_OK); 2871 } 2872 /* 2873 * nxge_alloc_txb 2874 * 2875 * Allocate buffers for an TDC. 2876 * 2877 * Arguments: 2878 * nxgep 2879 * channel The channel to map into our kernel space. 2880 * 2881 * Notes: 2882 * 2883 * NPI function calls: 2884 * 2885 * NXGE function calls: 2886 * 2887 * Registers accessed: 2888 * 2889 * Context: 2890 * 2891 * Taking apart: 2892 * 2893 * Open questions: 2894 * 2895 */ 2896 nxge_status_t 2897 nxge_alloc_txb( 2898 p_nxge_t nxgep, 2899 int channel) 2900 { 2901 nxge_dma_common_t **dma_buf_p; 2902 nxge_dma_common_t **dma_cntl_p; 2903 uint32_t *num_chunks; 2904 nxge_status_t status = NXGE_OK; 2905 2906 nxge_tdc_sizes_t sizes; 2907 2908 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2909 2910 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2911 return (NXGE_ERROR); 2912 2913 /* 2914 * Allocate memory for transmit buffers and descriptor rings. 2915 * Replace these allocation functions with the interface functions 2916 * provided by the partition manager Real Soon Now. 2917 */ 2918 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2919 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2920 2921 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2922 2923 /* 2924 * Allocate memory for transmit buffers and descriptor rings. 2925 * Replace allocation functions with interface functions provided 2926 * by the partition manager when it is available. 2927 * 2928 * Allocate memory for the transmit buffer pool. 2929 */ 2930 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2931 "sizes: tx: %ld, cr:%ld, th:%ld", 2932 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2933 2934 *num_chunks = 0; 2935 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2936 sizes.tx_size, sizes.threshhold, num_chunks); 2937 if (status != NXGE_OK) { 2938 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2939 return (status); 2940 } 2941 2942 /* 2943 * Allocate memory for descriptor rings and mailbox. 2944 */ 2945 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2946 sizes.cr_size); 2947 if (status != NXGE_OK) { 2948 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2949 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2950 return (status); 2951 } 2952 2953 return (NXGE_OK); 2954 } 2955 2956 void 2957 nxge_free_txb( 2958 p_nxge_t nxgep, 2959 int channel) 2960 { 2961 nxge_dma_common_t *data; 2962 nxge_dma_common_t *control; 2963 uint32_t num_chunks; 2964 2965 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2966 2967 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2968 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2969 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2970 2971 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2972 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2973 2974 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2975 nxge_free_tx_cntl_dma(nxgep, control); 2976 2977 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2978 2979 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2980 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2981 2982 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2983 } 2984 2985 /* 2986 * nxge_alloc_tx_mem_pool 2987 * 2988 * This function allocates all of the per-port TDC control data structures. 2989 * The per-channel (TDC) data structures are allocated when needed. 2990 * 2991 * Arguments: 2992 * nxgep 2993 * 2994 * Notes: 2995 * 2996 * Context: 2997 * Any domain 2998 */ 2999 nxge_status_t 3000 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 3001 { 3002 nxge_hw_pt_cfg_t *p_cfgp; 3003 nxge_dma_pool_t *dma_poolp; 3004 nxge_dma_common_t **dma_buf_p; 3005 nxge_dma_pool_t *dma_cntl_poolp; 3006 nxge_dma_common_t **dma_cntl_p; 3007 uint32_t *num_chunks; /* per dma */ 3008 int tdc_max; 3009 3010 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 3011 3012 p_cfgp = &nxgep->pt_config.hw_config; 3013 tdc_max = NXGE_MAX_TDCS; 3014 3015 /* 3016 * Allocate memory for each transmit DMA channel. 3017 */ 3018 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 3019 KM_SLEEP); 3020 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 3021 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 3022 3023 dma_cntl_poolp = (p_nxge_dma_pool_t) 3024 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 3025 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 3026 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 3027 3028 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 3029 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3030 "nxge_alloc_tx_mem_pool: TDC too high %d, " 3031 "set to default %d", 3032 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 3033 nxge_tx_ring_size = TDC_DEFAULT_MAX; 3034 } 3035 3036 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3037 /* 3038 * N2/NIU has limitation on the descriptor sizes (contiguous 3039 * memory allocation on data buffers to 4M (contig_mem_alloc) 3040 * and little endian for control buffers (must use the ddi/dki mem alloc 3041 * function). The transmit ring is limited to 8K (includes the 3042 * mailbox). 3043 */ 3044 if (nxgep->niu_type == N2_NIU) { 3045 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 3046 (!ISP2(nxge_tx_ring_size))) { 3047 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 3048 } 3049 } 3050 #endif 3051 3052 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 3053 3054 num_chunks = (uint32_t *)KMEM_ZALLOC( 3055 sizeof (uint32_t) * tdc_max, KM_SLEEP); 3056 3057 dma_poolp->ndmas = p_cfgp->tdc.owned; 3058 dma_poolp->num_chunks = num_chunks; 3059 dma_poolp->dma_buf_pool_p = dma_buf_p; 3060 nxgep->tx_buf_pool_p = dma_poolp; 3061 3062 dma_poolp->buf_allocated = B_TRUE; 3063 3064 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 3065 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 3066 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 3067 3068 dma_cntl_poolp->buf_allocated = B_TRUE; 3069 3070 nxgep->tx_rings = 3071 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 3072 nxgep->tx_rings->rings = 3073 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 3074 nxgep->tx_mbox_areas_p = 3075 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 3076 nxgep->tx_mbox_areas_p->txmbox_areas_p = 3077 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 3078 3079 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 3080 3081 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3082 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 3083 tdc_max, dma_poolp->ndmas)); 3084 3085 return (NXGE_OK); 3086 } 3087 3088 nxge_status_t 3089 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 3090 p_nxge_dma_common_t *dmap, size_t alloc_size, 3091 size_t block_size, uint32_t *num_chunks) 3092 { 3093 p_nxge_dma_common_t tx_dmap; 3094 nxge_status_t status = NXGE_OK; 3095 size_t total_alloc_size; 3096 size_t allocated = 0; 3097 int i, size_index, array_size; 3098 3099 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 3100 3101 tx_dmap = (p_nxge_dma_common_t) 3102 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 3103 KM_SLEEP); 3104 3105 total_alloc_size = alloc_size; 3106 i = 0; 3107 size_index = 0; 3108 array_size = sizeof (alloc_sizes) / sizeof (size_t); 3109 while ((size_index < array_size) && 3110 (alloc_sizes[size_index] < alloc_size)) 3111 size_index++; 3112 if (size_index >= array_size) { 3113 size_index = array_size - 1; 3114 } 3115 3116 while ((allocated < total_alloc_size) && 3117 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 3118 3119 tx_dmap[i].dma_chunk_index = i; 3120 tx_dmap[i].block_size = block_size; 3121 tx_dmap[i].alength = alloc_sizes[size_index]; 3122 tx_dmap[i].orig_alength = tx_dmap[i].alength; 3123 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 3124 tx_dmap[i].dma_channel = dma_channel; 3125 tx_dmap[i].contig_alloc_type = B_FALSE; 3126 tx_dmap[i].kmem_alloc_type = B_FALSE; 3127 3128 /* 3129 * N2/NIU: data buffers must be contiguous as the driver 3130 * needs to call Hypervisor api to set up 3131 * logical pages. 3132 */ 3133 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 3134 tx_dmap[i].contig_alloc_type = B_TRUE; 3135 } 3136 3137 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3138 &nxge_tx_dma_attr, 3139 tx_dmap[i].alength, 3140 &nxge_dev_buf_dma_acc_attr, 3141 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3142 (p_nxge_dma_common_t)(&tx_dmap[i])); 3143 if (status != NXGE_OK) { 3144 size_index--; 3145 } else { 3146 i++; 3147 allocated += alloc_sizes[size_index]; 3148 } 3149 } 3150 3151 if (allocated < total_alloc_size) { 3152 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3153 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3154 "allocated 0x%x requested 0x%x", 3155 dma_channel, 3156 allocated, total_alloc_size)); 3157 status = NXGE_ERROR; 3158 goto nxge_alloc_tx_mem_fail1; 3159 } 3160 3161 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3162 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3163 "allocated 0x%x requested 0x%x", 3164 dma_channel, 3165 allocated, total_alloc_size)); 3166 3167 *num_chunks = i; 3168 *dmap = tx_dmap; 3169 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3170 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3171 *dmap, i)); 3172 goto nxge_alloc_tx_mem_exit; 3173 3174 nxge_alloc_tx_mem_fail1: 3175 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3176 3177 nxge_alloc_tx_mem_exit: 3178 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3179 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3180 3181 return (status); 3182 } 3183 3184 /*ARGSUSED*/ 3185 static void 3186 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3187 uint32_t num_chunks) 3188 { 3189 int i; 3190 3191 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3192 3193 if (dmap == 0) 3194 return; 3195 3196 for (i = 0; i < num_chunks; i++) { 3197 nxge_dma_mem_free(dmap++); 3198 } 3199 3200 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3201 } 3202 3203 /*ARGSUSED*/ 3204 nxge_status_t 3205 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3206 p_nxge_dma_common_t *dmap, size_t size) 3207 { 3208 p_nxge_dma_common_t tx_dmap; 3209 nxge_status_t status = NXGE_OK; 3210 3211 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3212 tx_dmap = (p_nxge_dma_common_t) 3213 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3214 3215 tx_dmap->contig_alloc_type = B_FALSE; 3216 tx_dmap->kmem_alloc_type = B_FALSE; 3217 3218 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3219 &nxge_desc_dma_attr, 3220 size, 3221 &nxge_dev_desc_dma_acc_attr, 3222 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3223 tx_dmap); 3224 if (status != NXGE_OK) { 3225 goto nxge_alloc_tx_cntl_dma_fail1; 3226 } 3227 3228 *dmap = tx_dmap; 3229 goto nxge_alloc_tx_cntl_dma_exit; 3230 3231 nxge_alloc_tx_cntl_dma_fail1: 3232 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3233 3234 nxge_alloc_tx_cntl_dma_exit: 3235 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3236 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3237 3238 return (status); 3239 } 3240 3241 /*ARGSUSED*/ 3242 static void 3243 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3244 { 3245 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3246 3247 if (dmap == 0) 3248 return; 3249 3250 nxge_dma_mem_free(dmap); 3251 3252 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3253 } 3254 3255 /* 3256 * nxge_free_tx_mem_pool 3257 * 3258 * This function frees all of the per-port TDC control data structures. 3259 * The per-channel (TDC) data structures are freed when the channel 3260 * is stopped. 3261 * 3262 * Arguments: 3263 * nxgep 3264 * 3265 * Notes: 3266 * 3267 * Context: 3268 * Any domain 3269 */ 3270 static void 3271 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3272 { 3273 int tdc_max = NXGE_MAX_TDCS; 3274 3275 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3276 3277 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3278 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3279 "<== nxge_free_tx_mem_pool " 3280 "(null tx buf pool or buf not allocated")); 3281 return; 3282 } 3283 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3284 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3285 "<== nxge_free_tx_mem_pool " 3286 "(null tx cntl buf pool or cntl buf not allocated")); 3287 return; 3288 } 3289 3290 /* 1. Free the mailboxes. */ 3291 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3292 sizeof (p_tx_mbox_t) * tdc_max); 3293 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3294 3295 nxgep->tx_mbox_areas_p = 0; 3296 3297 /* 2. Free the transmit ring arrays. */ 3298 KMEM_FREE(nxgep->tx_rings->rings, 3299 sizeof (p_tx_ring_t) * tdc_max); 3300 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3301 3302 nxgep->tx_rings = 0; 3303 3304 /* 3. Free the completion ring data structures. */ 3305 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3306 sizeof (p_nxge_dma_common_t) * tdc_max); 3307 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3308 3309 nxgep->tx_cntl_pool_p = 0; 3310 3311 /* 4. Free the data ring data structures. */ 3312 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3313 sizeof (uint32_t) * tdc_max); 3314 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3315 sizeof (p_nxge_dma_common_t) * tdc_max); 3316 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3317 3318 nxgep->tx_buf_pool_p = 0; 3319 3320 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3321 } 3322 3323 /*ARGSUSED*/ 3324 static nxge_status_t 3325 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3326 struct ddi_dma_attr *dma_attrp, 3327 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3328 p_nxge_dma_common_t dma_p) 3329 { 3330 caddr_t kaddrp; 3331 int ddi_status = DDI_SUCCESS; 3332 boolean_t contig_alloc_type; 3333 boolean_t kmem_alloc_type; 3334 3335 contig_alloc_type = dma_p->contig_alloc_type; 3336 3337 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3338 /* 3339 * contig_alloc_type for contiguous memory only allowed 3340 * for N2/NIU. 3341 */ 3342 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3343 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3344 dma_p->contig_alloc_type)); 3345 return (NXGE_ERROR | NXGE_DDI_FAILED); 3346 } 3347 3348 dma_p->dma_handle = NULL; 3349 dma_p->acc_handle = NULL; 3350 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3351 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3352 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3353 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3354 if (ddi_status != DDI_SUCCESS) { 3355 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3356 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3357 return (NXGE_ERROR | NXGE_DDI_FAILED); 3358 } 3359 3360 kmem_alloc_type = dma_p->kmem_alloc_type; 3361 3362 switch (contig_alloc_type) { 3363 case B_FALSE: 3364 switch (kmem_alloc_type) { 3365 case B_FALSE: 3366 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3367 length, 3368 acc_attr_p, 3369 xfer_flags, 3370 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3371 &dma_p->acc_handle); 3372 if (ddi_status != DDI_SUCCESS) { 3373 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3374 "nxge_dma_mem_alloc: " 3375 "ddi_dma_mem_alloc failed")); 3376 ddi_dma_free_handle(&dma_p->dma_handle); 3377 dma_p->dma_handle = NULL; 3378 return (NXGE_ERROR | NXGE_DDI_FAILED); 3379 } 3380 if (dma_p->alength < length) { 3381 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3382 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3383 "< length.")); 3384 ddi_dma_mem_free(&dma_p->acc_handle); 3385 ddi_dma_free_handle(&dma_p->dma_handle); 3386 dma_p->acc_handle = NULL; 3387 dma_p->dma_handle = NULL; 3388 return (NXGE_ERROR); 3389 } 3390 3391 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3392 NULL, 3393 kaddrp, dma_p->alength, xfer_flags, 3394 DDI_DMA_DONTWAIT, 3395 0, &dma_p->dma_cookie, &dma_p->ncookies); 3396 if (ddi_status != DDI_DMA_MAPPED) { 3397 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3398 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3399 "failed " 3400 "(staus 0x%x ncookies %d.)", ddi_status, 3401 dma_p->ncookies)); 3402 if (dma_p->acc_handle) { 3403 ddi_dma_mem_free(&dma_p->acc_handle); 3404 dma_p->acc_handle = NULL; 3405 } 3406 ddi_dma_free_handle(&dma_p->dma_handle); 3407 dma_p->dma_handle = NULL; 3408 return (NXGE_ERROR | NXGE_DDI_FAILED); 3409 } 3410 3411 if (dma_p->ncookies != 1) { 3412 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3413 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3414 "> 1 cookie" 3415 "(staus 0x%x ncookies %d.)", ddi_status, 3416 dma_p->ncookies)); 3417 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3418 if (dma_p->acc_handle) { 3419 ddi_dma_mem_free(&dma_p->acc_handle); 3420 dma_p->acc_handle = NULL; 3421 } 3422 ddi_dma_free_handle(&dma_p->dma_handle); 3423 dma_p->dma_handle = NULL; 3424 dma_p->acc_handle = NULL; 3425 return (NXGE_ERROR); 3426 } 3427 break; 3428 3429 case B_TRUE: 3430 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3431 if (kaddrp == NULL) { 3432 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3433 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3434 "kmem alloc failed")); 3435 return (NXGE_ERROR); 3436 } 3437 3438 dma_p->alength = length; 3439 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3440 NULL, kaddrp, dma_p->alength, xfer_flags, 3441 DDI_DMA_DONTWAIT, 0, 3442 &dma_p->dma_cookie, &dma_p->ncookies); 3443 if (ddi_status != DDI_DMA_MAPPED) { 3444 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3445 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3446 "(kmem_alloc) failed kaddrp $%p length %d " 3447 "(staus 0x%x (%d) ncookies %d.)", 3448 kaddrp, length, 3449 ddi_status, ddi_status, dma_p->ncookies)); 3450 KMEM_FREE(kaddrp, length); 3451 dma_p->acc_handle = NULL; 3452 ddi_dma_free_handle(&dma_p->dma_handle); 3453 dma_p->dma_handle = NULL; 3454 dma_p->kaddrp = NULL; 3455 return (NXGE_ERROR | NXGE_DDI_FAILED); 3456 } 3457 3458 if (dma_p->ncookies != 1) { 3459 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3460 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3461 "(kmem_alloc) > 1 cookie" 3462 "(staus 0x%x ncookies %d.)", ddi_status, 3463 dma_p->ncookies)); 3464 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3465 KMEM_FREE(kaddrp, length); 3466 ddi_dma_free_handle(&dma_p->dma_handle); 3467 dma_p->dma_handle = NULL; 3468 dma_p->acc_handle = NULL; 3469 dma_p->kaddrp = NULL; 3470 return (NXGE_ERROR); 3471 } 3472 3473 dma_p->kaddrp = kaddrp; 3474 3475 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3476 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3477 "kaddr $%p alength %d", 3478 dma_p, 3479 kaddrp, 3480 dma_p->alength)); 3481 break; 3482 } 3483 break; 3484 3485 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3486 case B_TRUE: 3487 kaddrp = (caddr_t)contig_mem_alloc(length); 3488 if (kaddrp == NULL) { 3489 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3490 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3491 ddi_dma_free_handle(&dma_p->dma_handle); 3492 return (NXGE_ERROR | NXGE_DDI_FAILED); 3493 } 3494 3495 dma_p->alength = length; 3496 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3497 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3498 &dma_p->dma_cookie, &dma_p->ncookies); 3499 if (ddi_status != DDI_DMA_MAPPED) { 3500 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3501 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3502 "(status 0x%x ncookies %d.)", ddi_status, 3503 dma_p->ncookies)); 3504 3505 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3506 "==> nxge_dma_mem_alloc: (not mapped)" 3507 "length %lu (0x%x) " 3508 "free contig kaddrp $%p " 3509 "va_to_pa $%p", 3510 length, length, 3511 kaddrp, 3512 va_to_pa(kaddrp))); 3513 3514 3515 contig_mem_free((void *)kaddrp, length); 3516 ddi_dma_free_handle(&dma_p->dma_handle); 3517 3518 dma_p->dma_handle = NULL; 3519 dma_p->acc_handle = NULL; 3520 dma_p->alength = NULL; 3521 dma_p->kaddrp = NULL; 3522 3523 return (NXGE_ERROR | NXGE_DDI_FAILED); 3524 } 3525 3526 if (dma_p->ncookies != 1 || 3527 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3528 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3529 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3530 "cookie or " 3531 "dmac_laddress is NULL $%p size %d " 3532 " (status 0x%x ncookies %d.)", 3533 ddi_status, 3534 dma_p->dma_cookie.dmac_laddress, 3535 dma_p->dma_cookie.dmac_size, 3536 dma_p->ncookies)); 3537 3538 contig_mem_free((void *)kaddrp, length); 3539 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3540 ddi_dma_free_handle(&dma_p->dma_handle); 3541 3542 dma_p->alength = 0; 3543 dma_p->dma_handle = NULL; 3544 dma_p->acc_handle = NULL; 3545 dma_p->kaddrp = NULL; 3546 3547 return (NXGE_ERROR | NXGE_DDI_FAILED); 3548 } 3549 break; 3550 3551 #else 3552 case B_TRUE: 3553 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3554 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3555 return (NXGE_ERROR | NXGE_DDI_FAILED); 3556 #endif 3557 } 3558 3559 dma_p->kaddrp = kaddrp; 3560 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3561 dma_p->alength - RXBUF_64B_ALIGNED; 3562 #if defined(__i386) 3563 dma_p->ioaddr_pp = 3564 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3565 #else 3566 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3567 #endif 3568 dma_p->last_ioaddr_pp = 3569 #if defined(__i386) 3570 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3571 #else 3572 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3573 #endif 3574 dma_p->alength - RXBUF_64B_ALIGNED; 3575 3576 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3577 3578 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3579 dma_p->orig_ioaddr_pp = 3580 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3581 dma_p->orig_alength = length; 3582 dma_p->orig_kaddrp = kaddrp; 3583 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3584 #endif 3585 3586 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3587 "dma buffer allocated: dma_p $%p " 3588 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3589 "dma_p->ioaddr_p $%p " 3590 "dma_p->orig_ioaddr_p $%p " 3591 "orig_vatopa $%p " 3592 "alength %d (0x%x) " 3593 "kaddrp $%p " 3594 "length %d (0x%x)", 3595 dma_p, 3596 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3597 dma_p->ioaddr_pp, 3598 dma_p->orig_ioaddr_pp, 3599 dma_p->orig_vatopa, 3600 dma_p->alength, dma_p->alength, 3601 kaddrp, 3602 length, length)); 3603 3604 return (NXGE_OK); 3605 } 3606 3607 static void 3608 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3609 { 3610 if (dma_p->dma_handle != NULL) { 3611 if (dma_p->ncookies) { 3612 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3613 dma_p->ncookies = 0; 3614 } 3615 ddi_dma_free_handle(&dma_p->dma_handle); 3616 dma_p->dma_handle = NULL; 3617 } 3618 3619 if (dma_p->acc_handle != NULL) { 3620 ddi_dma_mem_free(&dma_p->acc_handle); 3621 dma_p->acc_handle = NULL; 3622 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3623 } 3624 3625 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3626 if (dma_p->contig_alloc_type && 3627 dma_p->orig_kaddrp && dma_p->orig_alength) { 3628 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3629 "kaddrp $%p (orig_kaddrp $%p)" 3630 "mem type %d ", 3631 "orig_alength %d " 3632 "alength 0x%x (%d)", 3633 dma_p->kaddrp, 3634 dma_p->orig_kaddrp, 3635 dma_p->contig_alloc_type, 3636 dma_p->orig_alength, 3637 dma_p->alength, dma_p->alength)); 3638 3639 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3640 dma_p->orig_alength = NULL; 3641 dma_p->orig_kaddrp = NULL; 3642 dma_p->contig_alloc_type = B_FALSE; 3643 } 3644 #endif 3645 dma_p->kaddrp = NULL; 3646 dma_p->alength = NULL; 3647 } 3648 3649 static void 3650 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3651 { 3652 uint64_t kaddr; 3653 uint32_t buf_size; 3654 3655 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3656 3657 if (dma_p->dma_handle != NULL) { 3658 if (dma_p->ncookies) { 3659 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3660 dma_p->ncookies = 0; 3661 } 3662 ddi_dma_free_handle(&dma_p->dma_handle); 3663 dma_p->dma_handle = NULL; 3664 } 3665 3666 if (dma_p->acc_handle != NULL) { 3667 ddi_dma_mem_free(&dma_p->acc_handle); 3668 dma_p->acc_handle = NULL; 3669 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3670 } 3671 3672 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3673 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3674 dma_p, 3675 dma_p->buf_alloc_state)); 3676 3677 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3678 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3679 "<== nxge_dma_free_rx_data_buf: " 3680 "outstanding data buffers")); 3681 return; 3682 } 3683 3684 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3685 if (dma_p->contig_alloc_type && 3686 dma_p->orig_kaddrp && dma_p->orig_alength) { 3687 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3688 "kaddrp $%p (orig_kaddrp $%p)" 3689 "mem type %d ", 3690 "orig_alength %d " 3691 "alength 0x%x (%d)", 3692 dma_p->kaddrp, 3693 dma_p->orig_kaddrp, 3694 dma_p->contig_alloc_type, 3695 dma_p->orig_alength, 3696 dma_p->alength, dma_p->alength)); 3697 3698 kaddr = (uint64_t)dma_p->orig_kaddrp; 3699 buf_size = dma_p->orig_alength; 3700 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3701 dma_p->orig_alength = NULL; 3702 dma_p->orig_kaddrp = NULL; 3703 dma_p->contig_alloc_type = B_FALSE; 3704 dma_p->kaddrp = NULL; 3705 dma_p->alength = NULL; 3706 return; 3707 } 3708 #endif 3709 3710 if (dma_p->kmem_alloc_type) { 3711 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3712 "nxge_dma_free_rx_data_buf: free kmem " 3713 "kaddrp $%p (orig_kaddrp $%p)" 3714 "alloc type %d " 3715 "orig_alength %d " 3716 "alength 0x%x (%d)", 3717 dma_p->kaddrp, 3718 dma_p->orig_kaddrp, 3719 dma_p->kmem_alloc_type, 3720 dma_p->orig_alength, 3721 dma_p->alength, dma_p->alength)); 3722 #if defined(__i386) 3723 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3724 #else 3725 kaddr = (uint64_t)dma_p->kaddrp; 3726 #endif 3727 buf_size = dma_p->orig_alength; 3728 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3729 "nxge_dma_free_rx_data_buf: free dmap $%p " 3730 "kaddr $%p buf_size %d", 3731 dma_p, 3732 kaddr, buf_size)); 3733 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3734 dma_p->alength = 0; 3735 dma_p->orig_alength = 0; 3736 dma_p->kaddrp = NULL; 3737 dma_p->kmem_alloc_type = B_FALSE; 3738 } 3739 3740 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3741 } 3742 3743 /* 3744 * nxge_m_start() -- start transmitting and receiving. 3745 * 3746 * This function is called by the MAC layer when the first 3747 * stream is open to prepare the hardware ready for sending 3748 * and transmitting packets. 3749 */ 3750 static int 3751 nxge_m_start(void *arg) 3752 { 3753 p_nxge_t nxgep = (p_nxge_t)arg; 3754 3755 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3756 3757 /* 3758 * Are we already started? 3759 */ 3760 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 3761 return (0); 3762 } 3763 3764 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) { 3765 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 3766 } 3767 3768 /* 3769 * Make sure RX MAC is disabled while we initialize. 3770 */ 3771 if (!isLDOMguest(nxgep)) { 3772 (void) nxge_rx_mac_disable(nxgep); 3773 } 3774 3775 /* 3776 * Grab the global lock. 3777 */ 3778 MUTEX_ENTER(nxgep->genlock); 3779 3780 /* 3781 * Initialize the driver and hardware. 3782 */ 3783 if (nxge_init(nxgep) != NXGE_OK) { 3784 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3785 "<== nxge_m_start: initialization failed")); 3786 MUTEX_EXIT(nxgep->genlock); 3787 return (EIO); 3788 } 3789 3790 /* 3791 * Start timer to check the system error and tx hangs 3792 */ 3793 if (!isLDOMguest(nxgep)) 3794 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3795 nxge_check_hw_state, NXGE_CHECK_TIMER); 3796 #if defined(sun4v) 3797 else 3798 nxge_hio_start_timer(nxgep); 3799 #endif 3800 3801 nxgep->link_notify = B_TRUE; 3802 nxgep->link_check_count = 0; 3803 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3804 3805 /* 3806 * Let the global lock go, since we are intialized. 3807 */ 3808 MUTEX_EXIT(nxgep->genlock); 3809 3810 /* 3811 * Let the MAC start receiving packets, now that 3812 * we are initialized. 3813 */ 3814 if (!isLDOMguest(nxgep)) { 3815 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 3816 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3817 "<== nxge_m_start: enable of RX mac failed")); 3818 return (EIO); 3819 } 3820 3821 /* 3822 * Enable hardware interrupts. 3823 */ 3824 nxge_intr_hw_enable(nxgep); 3825 } 3826 #if defined(sun4v) 3827 else { 3828 /* 3829 * In guest domain we enable RDCs and their interrupts as 3830 * the last step. 3831 */ 3832 if (nxge_hio_rdc_enable(nxgep) != NXGE_OK) { 3833 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3834 "<== nxge_m_start: enable of RDCs failed")); 3835 return (EIO); 3836 } 3837 3838 if (nxge_hio_rdc_intr_arm(nxgep, B_TRUE) != NXGE_OK) { 3839 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3840 "<== nxge_m_start: intrs enable for RDCs failed")); 3841 return (EIO); 3842 } 3843 } 3844 #endif 3845 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3846 return (0); 3847 } 3848 3849 static boolean_t 3850 nxge_check_groups_stopped(p_nxge_t nxgep) 3851 { 3852 int i; 3853 3854 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 3855 if (nxgep->rx_hio_groups[i].started) 3856 return (B_FALSE); 3857 } 3858 3859 return (B_TRUE); 3860 } 3861 3862 /* 3863 * nxge_m_stop(): stop transmitting and receiving. 3864 */ 3865 static void 3866 nxge_m_stop(void *arg) 3867 { 3868 p_nxge_t nxgep = (p_nxge_t)arg; 3869 boolean_t groups_stopped; 3870 3871 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3872 3873 /* 3874 * Are the groups stopped? 3875 */ 3876 groups_stopped = nxge_check_groups_stopped(nxgep); 3877 ASSERT(groups_stopped == B_TRUE); 3878 if (!groups_stopped) { 3879 cmn_err(CE_WARN, "nxge(%d): groups are not stopped!\n", 3880 nxgep->instance); 3881 return; 3882 } 3883 3884 if (!isLDOMguest(nxgep)) { 3885 /* 3886 * Disable the RX mac. 3887 */ 3888 (void) nxge_rx_mac_disable(nxgep); 3889 3890 /* 3891 * Wait for the IPP to drain. 3892 */ 3893 (void) nxge_ipp_drain(nxgep); 3894 3895 /* 3896 * Disable hardware interrupts. 3897 */ 3898 nxge_intr_hw_disable(nxgep); 3899 } 3900 #if defined(sun4v) 3901 else { 3902 (void) nxge_hio_rdc_intr_arm(nxgep, B_FALSE); 3903 } 3904 #endif 3905 3906 /* 3907 * Grab the global lock. 3908 */ 3909 MUTEX_ENTER(nxgep->genlock); 3910 3911 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3912 if (nxgep->nxge_timerid) { 3913 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3914 nxgep->nxge_timerid = 0; 3915 } 3916 3917 /* 3918 * Clean up. 3919 */ 3920 nxge_uninit(nxgep); 3921 3922 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3923 3924 /* 3925 * Let go of the global lock. 3926 */ 3927 MUTEX_EXIT(nxgep->genlock); 3928 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3929 } 3930 3931 static int 3932 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3933 { 3934 p_nxge_t nxgep = (p_nxge_t)arg; 3935 struct ether_addr addrp; 3936 3937 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3938 "==> nxge_m_multicst: add %d", add)); 3939 3940 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3941 if (add) { 3942 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3943 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3944 "<== nxge_m_multicst: add multicast failed")); 3945 return (EINVAL); 3946 } 3947 } else { 3948 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3949 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3950 "<== nxge_m_multicst: del multicast failed")); 3951 return (EINVAL); 3952 } 3953 } 3954 3955 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3956 3957 return (0); 3958 } 3959 3960 static int 3961 nxge_m_promisc(void *arg, boolean_t on) 3962 { 3963 p_nxge_t nxgep = (p_nxge_t)arg; 3964 3965 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3966 "==> nxge_m_promisc: on %d", on)); 3967 3968 if (nxge_set_promisc(nxgep, on)) { 3969 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3970 "<== nxge_m_promisc: set promisc failed")); 3971 return (EINVAL); 3972 } 3973 3974 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3975 "<== nxge_m_promisc: on %d", on)); 3976 3977 return (0); 3978 } 3979 3980 static void 3981 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3982 { 3983 p_nxge_t nxgep = (p_nxge_t)arg; 3984 struct iocblk *iocp; 3985 boolean_t need_privilege; 3986 int err; 3987 int cmd; 3988 3989 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3990 3991 iocp = (struct iocblk *)mp->b_rptr; 3992 iocp->ioc_error = 0; 3993 need_privilege = B_TRUE; 3994 cmd = iocp->ioc_cmd; 3995 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3996 switch (cmd) { 3997 default: 3998 miocnak(wq, mp, 0, EINVAL); 3999 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 4000 return; 4001 4002 case LB_GET_INFO_SIZE: 4003 case LB_GET_INFO: 4004 case LB_GET_MODE: 4005 need_privilege = B_FALSE; 4006 break; 4007 case LB_SET_MODE: 4008 break; 4009 4010 4011 case NXGE_GET_MII: 4012 case NXGE_PUT_MII: 4013 case NXGE_GET64: 4014 case NXGE_PUT64: 4015 case NXGE_GET_TX_RING_SZ: 4016 case NXGE_GET_TX_DESC: 4017 case NXGE_TX_SIDE_RESET: 4018 case NXGE_RX_SIDE_RESET: 4019 case NXGE_GLOBAL_RESET: 4020 case NXGE_RESET_MAC: 4021 case NXGE_TX_REGS_DUMP: 4022 case NXGE_RX_REGS_DUMP: 4023 case NXGE_INT_REGS_DUMP: 4024 case NXGE_VIR_INT_REGS_DUMP: 4025 case NXGE_PUT_TCAM: 4026 case NXGE_GET_TCAM: 4027 case NXGE_RTRACE: 4028 case NXGE_RDUMP: 4029 case NXGE_RX_CLASS: 4030 case NXGE_RX_HASH: 4031 4032 need_privilege = B_FALSE; 4033 break; 4034 case NXGE_INJECT_ERR: 4035 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 4036 nxge_err_inject(nxgep, wq, mp); 4037 break; 4038 } 4039 4040 if (need_privilege) { 4041 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 4042 if (err != 0) { 4043 miocnak(wq, mp, 0, err); 4044 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4045 "<== nxge_m_ioctl: no priv")); 4046 return; 4047 } 4048 } 4049 4050 switch (cmd) { 4051 4052 case LB_GET_MODE: 4053 case LB_SET_MODE: 4054 case LB_GET_INFO_SIZE: 4055 case LB_GET_INFO: 4056 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 4057 break; 4058 4059 case NXGE_GET_MII: 4060 case NXGE_PUT_MII: 4061 case NXGE_PUT_TCAM: 4062 case NXGE_GET_TCAM: 4063 case NXGE_GET64: 4064 case NXGE_PUT64: 4065 case NXGE_GET_TX_RING_SZ: 4066 case NXGE_GET_TX_DESC: 4067 case NXGE_TX_SIDE_RESET: 4068 case NXGE_RX_SIDE_RESET: 4069 case NXGE_GLOBAL_RESET: 4070 case NXGE_RESET_MAC: 4071 case NXGE_TX_REGS_DUMP: 4072 case NXGE_RX_REGS_DUMP: 4073 case NXGE_INT_REGS_DUMP: 4074 case NXGE_VIR_INT_REGS_DUMP: 4075 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4076 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 4077 nxge_hw_ioctl(nxgep, wq, mp, iocp); 4078 break; 4079 case NXGE_RX_CLASS: 4080 if (nxge_rxclass_ioctl(nxgep, wq, mp->b_cont) < 0) 4081 miocnak(wq, mp, 0, EINVAL); 4082 else 4083 miocack(wq, mp, sizeof (rx_class_cfg_t), 0); 4084 break; 4085 case NXGE_RX_HASH: 4086 4087 if (nxge_rxhash_ioctl(nxgep, wq, mp->b_cont) < 0) 4088 miocnak(wq, mp, 0, EINVAL); 4089 else 4090 miocack(wq, mp, sizeof (cfg_cmd_t), 0); 4091 break; 4092 } 4093 4094 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 4095 } 4096 4097 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 4098 4099 void 4100 nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory) 4101 { 4102 p_nxge_mmac_stats_t mmac_stats; 4103 int i; 4104 nxge_mmac_t *mmac_info; 4105 4106 mmac_info = &nxgep->nxge_mmac_info; 4107 4108 mmac_stats = &nxgep->statsp->mmac_stats; 4109 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 4110 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 4111 4112 for (i = 0; i < ETHERADDRL; i++) { 4113 if (factory) { 4114 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4115 = mmac_info->factory_mac_pool[slot][ 4116 (ETHERADDRL-1) - i]; 4117 } else { 4118 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4119 = mmac_info->mac_pool[slot].addr[ 4120 (ETHERADDRL - 1) - i]; 4121 } 4122 } 4123 } 4124 4125 /* 4126 * nxge_altmac_set() -- Set an alternate MAC address 4127 */ 4128 static int 4129 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, int slot, 4130 int rdctbl, boolean_t usetbl) 4131 { 4132 uint8_t addrn; 4133 uint8_t portn; 4134 npi_mac_addr_t altmac; 4135 hostinfo_t mac_rdc; 4136 p_nxge_class_pt_cfg_t clscfgp; 4137 4138 4139 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 4140 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 4141 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 4142 4143 portn = nxgep->mac.portnum; 4144 addrn = (uint8_t)slot - 1; 4145 4146 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, 4147 nxgep->function_num, addrn, &altmac) != NPI_SUCCESS) 4148 return (EIO); 4149 4150 /* 4151 * Set the rdc table number for the host info entry 4152 * for this mac address slot. 4153 */ 4154 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 4155 mac_rdc.value = 0; 4156 if (usetbl) 4157 mac_rdc.bits.w0.rdc_tbl_num = rdctbl; 4158 else 4159 mac_rdc.bits.w0.rdc_tbl_num = 4160 clscfgp->mac_host_info[addrn].rdctbl; 4161 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 4162 4163 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 4164 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 4165 return (EIO); 4166 } 4167 4168 /* 4169 * Enable comparison with the alternate MAC address. 4170 * While the first alternate addr is enabled by bit 1 of register 4171 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 4172 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 4173 * accordingly before calling npi_mac_altaddr_entry. 4174 */ 4175 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4176 addrn = (uint8_t)slot - 1; 4177 else 4178 addrn = (uint8_t)slot; 4179 4180 if (npi_mac_altaddr_enable(nxgep->npi_handle, 4181 nxgep->function_num, addrn) != NPI_SUCCESS) { 4182 return (EIO); 4183 } 4184 4185 return (0); 4186 } 4187 4188 /* 4189 * nxeg_m_mmac_add_g() - find an unused address slot, set the address 4190 * value to the one specified, enable the port to start filtering on 4191 * the new MAC address. Returns 0 on success. 4192 */ 4193 int 4194 nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl, 4195 boolean_t usetbl) 4196 { 4197 p_nxge_t nxgep = arg; 4198 int slot; 4199 nxge_mmac_t *mmac_info; 4200 int err; 4201 nxge_status_t status; 4202 4203 mutex_enter(nxgep->genlock); 4204 4205 /* 4206 * Make sure that nxge is initialized, if _start() has 4207 * not been called. 4208 */ 4209 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4210 status = nxge_init(nxgep); 4211 if (status != NXGE_OK) { 4212 mutex_exit(nxgep->genlock); 4213 return (ENXIO); 4214 } 4215 } 4216 4217 mmac_info = &nxgep->nxge_mmac_info; 4218 if (mmac_info->naddrfree == 0) { 4219 mutex_exit(nxgep->genlock); 4220 return (ENOSPC); 4221 } 4222 4223 /* 4224 * Search for the first available slot. Because naddrfree 4225 * is not zero, we are guaranteed to find one. 4226 * Each of the first two ports of Neptune has 16 alternate 4227 * MAC slots but only the first 7 (of 15) slots have assigned factory 4228 * MAC addresses. We first search among the slots without bundled 4229 * factory MACs. If we fail to find one in that range, then we 4230 * search the slots with bundled factory MACs. A factory MAC 4231 * will be wasted while the slot is used with a user MAC address. 4232 * But the slot could be used by factory MAC again after calling 4233 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4234 */ 4235 for (slot = 0; slot <= mmac_info->num_mmac; slot++) { 4236 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4237 break; 4238 } 4239 4240 ASSERT(slot <= mmac_info->num_mmac); 4241 4242 if ((err = nxge_altmac_set(nxgep, (uint8_t *)maddr, slot, rdctbl, 4243 usetbl)) != 0) { 4244 mutex_exit(nxgep->genlock); 4245 return (err); 4246 } 4247 4248 bcopy(maddr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4249 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4250 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4251 mmac_info->naddrfree--; 4252 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4253 4254 mutex_exit(nxgep->genlock); 4255 return (0); 4256 } 4257 4258 /* 4259 * Remove the specified mac address and update the HW not to filter 4260 * the mac address anymore. 4261 */ 4262 int 4263 nxge_m_mmac_remove(void *arg, int slot) 4264 { 4265 p_nxge_t nxgep = arg; 4266 nxge_mmac_t *mmac_info; 4267 uint8_t addrn; 4268 uint8_t portn; 4269 int err = 0; 4270 nxge_status_t status; 4271 4272 mutex_enter(nxgep->genlock); 4273 4274 /* 4275 * Make sure that nxge is initialized, if _start() has 4276 * not been called. 4277 */ 4278 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4279 status = nxge_init(nxgep); 4280 if (status != NXGE_OK) { 4281 mutex_exit(nxgep->genlock); 4282 return (ENXIO); 4283 } 4284 } 4285 4286 mmac_info = &nxgep->nxge_mmac_info; 4287 if (slot < 1 || slot > mmac_info->num_mmac) { 4288 mutex_exit(nxgep->genlock); 4289 return (EINVAL); 4290 } 4291 4292 portn = nxgep->mac.portnum; 4293 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4294 addrn = (uint8_t)slot - 1; 4295 else 4296 addrn = (uint8_t)slot; 4297 4298 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4299 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4300 == NPI_SUCCESS) { 4301 mmac_info->naddrfree++; 4302 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4303 /* 4304 * Regardless if the MAC we just stopped filtering 4305 * is a user addr or a facory addr, we must set 4306 * the MMAC_VENDOR_ADDR flag if this slot has an 4307 * associated factory MAC to indicate that a factory 4308 * MAC is available. 4309 */ 4310 if (slot <= mmac_info->num_factory_mmac) { 4311 mmac_info->mac_pool[slot].flags 4312 |= MMAC_VENDOR_ADDR; 4313 } 4314 /* 4315 * Clear mac_pool[slot].addr so that kstat shows 0 4316 * alternate MAC address if the slot is not used. 4317 * (But nxge_m_mmac_get returns the factory MAC even 4318 * when the slot is not used!) 4319 */ 4320 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4321 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4322 } else { 4323 err = EIO; 4324 } 4325 } else { 4326 err = EINVAL; 4327 } 4328 4329 mutex_exit(nxgep->genlock); 4330 return (err); 4331 } 4332 4333 /* 4334 * The callback to query all the factory addresses. naddr must be the same as 4335 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and 4336 * mcm_addr is the space allocated for keep all the addresses, whose size is 4337 * naddr * MAXMACADDRLEN. 4338 */ 4339 static void 4340 nxge_m_getfactaddr(void *arg, uint_t naddr, uint8_t *addr) 4341 { 4342 nxge_t *nxgep = arg; 4343 nxge_mmac_t *mmac_info; 4344 int i; 4345 4346 mutex_enter(nxgep->genlock); 4347 4348 mmac_info = &nxgep->nxge_mmac_info; 4349 ASSERT(naddr == mmac_info->num_factory_mmac); 4350 4351 for (i = 0; i < naddr; i++) { 4352 bcopy(mmac_info->factory_mac_pool[i + 1], 4353 addr + i * MAXMACADDRLEN, ETHERADDRL); 4354 } 4355 4356 mutex_exit(nxgep->genlock); 4357 } 4358 4359 4360 static boolean_t 4361 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4362 { 4363 nxge_t *nxgep = arg; 4364 uint32_t *txflags = cap_data; 4365 4366 switch (cap) { 4367 case MAC_CAPAB_HCKSUM: 4368 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4369 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4370 if (nxge_cksum_offload <= 1) { 4371 *txflags = HCKSUM_INET_PARTIAL; 4372 } 4373 break; 4374 4375 case MAC_CAPAB_MULTIFACTADDR: { 4376 mac_capab_multifactaddr_t *mfacp = cap_data; 4377 4378 if (!isLDOMguest(nxgep)) { 4379 mutex_enter(nxgep->genlock); 4380 mfacp->mcm_naddr = 4381 nxgep->nxge_mmac_info.num_factory_mmac; 4382 mfacp->mcm_getaddr = nxge_m_getfactaddr; 4383 mutex_exit(nxgep->genlock); 4384 } 4385 break; 4386 } 4387 4388 case MAC_CAPAB_LSO: { 4389 mac_capab_lso_t *cap_lso = cap_data; 4390 4391 if (nxgep->soft_lso_enable) { 4392 if (nxge_cksum_offload <= 1) { 4393 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4394 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4395 nxge_lso_max = NXGE_LSO_MAXLEN; 4396 } 4397 cap_lso->lso_basic_tcp_ipv4.lso_max = 4398 nxge_lso_max; 4399 } 4400 break; 4401 } else { 4402 return (B_FALSE); 4403 } 4404 } 4405 4406 case MAC_CAPAB_RINGS: { 4407 mac_capab_rings_t *cap_rings = cap_data; 4408 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 4409 4410 mutex_enter(nxgep->genlock); 4411 if (cap_rings->mr_type == MAC_RING_TYPE_RX) { 4412 if (isLDOMguest(nxgep)) { 4413 cap_rings->mr_group_type = 4414 MAC_GROUP_TYPE_STATIC; 4415 cap_rings->mr_rnum = 4416 NXGE_HIO_SHARE_MAX_CHANNELS; 4417 cap_rings->mr_rget = nxge_fill_ring; 4418 cap_rings->mr_gnum = 1; 4419 cap_rings->mr_gget = nxge_hio_group_get; 4420 cap_rings->mr_gaddring = NULL; 4421 cap_rings->mr_gremring = NULL; 4422 } else { 4423 /* 4424 * Service Domain. 4425 */ 4426 cap_rings->mr_group_type = 4427 MAC_GROUP_TYPE_DYNAMIC; 4428 cap_rings->mr_rnum = p_cfgp->max_rdcs; 4429 cap_rings->mr_rget = nxge_fill_ring; 4430 cap_rings->mr_gnum = p_cfgp->max_rdc_grpids; 4431 cap_rings->mr_gget = nxge_hio_group_get; 4432 cap_rings->mr_gaddring = nxge_group_add_ring; 4433 cap_rings->mr_gremring = nxge_group_rem_ring; 4434 } 4435 4436 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4437 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]", 4438 p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids)); 4439 } else { 4440 /* 4441 * TX Rings. 4442 */ 4443 if (isLDOMguest(nxgep)) { 4444 cap_rings->mr_group_type = 4445 MAC_GROUP_TYPE_STATIC; 4446 cap_rings->mr_rnum = 4447 NXGE_HIO_SHARE_MAX_CHANNELS; 4448 cap_rings->mr_rget = nxge_fill_ring; 4449 cap_rings->mr_gnum = 0; 4450 cap_rings->mr_gget = NULL; 4451 cap_rings->mr_gaddring = NULL; 4452 cap_rings->mr_gremring = NULL; 4453 } else { 4454 /* 4455 * Service Domain. 4456 */ 4457 cap_rings->mr_group_type = 4458 MAC_GROUP_TYPE_DYNAMIC; 4459 cap_rings->mr_rnum = p_cfgp->tdc.count; 4460 cap_rings->mr_rget = nxge_fill_ring; 4461 4462 /* 4463 * Share capable. 4464 * 4465 * Do not report the default group: hence -1 4466 */ 4467 cap_rings->mr_gnum = 4468 NXGE_MAX_TDC_GROUPS / nxgep->nports - 1; 4469 cap_rings->mr_gget = nxge_hio_group_get; 4470 cap_rings->mr_gaddring = nxge_group_add_ring; 4471 cap_rings->mr_gremring = nxge_group_rem_ring; 4472 } 4473 4474 NXGE_DEBUG_MSG((nxgep, TX_CTL, 4475 "==> nxge_m_getcapab: tx rings # of rings %d", 4476 p_cfgp->tdc.count)); 4477 } 4478 mutex_exit(nxgep->genlock); 4479 break; 4480 } 4481 4482 #if defined(sun4v) 4483 case MAC_CAPAB_SHARES: { 4484 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4485 4486 /* 4487 * Only the service domain driver responds to 4488 * this capability request. 4489 */ 4490 mutex_enter(nxgep->genlock); 4491 if (isLDOMservice(nxgep)) { 4492 mshares->ms_snum = 3; 4493 mshares->ms_handle = (void *)nxgep; 4494 mshares->ms_salloc = nxge_hio_share_alloc; 4495 mshares->ms_sfree = nxge_hio_share_free; 4496 mshares->ms_sadd = nxge_hio_share_add_group; 4497 mshares->ms_sremove = nxge_hio_share_rem_group; 4498 mshares->ms_squery = nxge_hio_share_query; 4499 mshares->ms_sbind = nxge_hio_share_bind; 4500 mshares->ms_sunbind = nxge_hio_share_unbind; 4501 mutex_exit(nxgep->genlock); 4502 } else { 4503 mutex_exit(nxgep->genlock); 4504 return (B_FALSE); 4505 } 4506 break; 4507 } 4508 #endif 4509 default: 4510 return (B_FALSE); 4511 } 4512 return (B_TRUE); 4513 } 4514 4515 static boolean_t 4516 nxge_param_locked(mac_prop_id_t pr_num) 4517 { 4518 /* 4519 * All adv_* parameters are locked (read-only) while 4520 * the device is in any sort of loopback mode ... 4521 */ 4522 switch (pr_num) { 4523 case MAC_PROP_ADV_1000FDX_CAP: 4524 case MAC_PROP_EN_1000FDX_CAP: 4525 case MAC_PROP_ADV_1000HDX_CAP: 4526 case MAC_PROP_EN_1000HDX_CAP: 4527 case MAC_PROP_ADV_100FDX_CAP: 4528 case MAC_PROP_EN_100FDX_CAP: 4529 case MAC_PROP_ADV_100HDX_CAP: 4530 case MAC_PROP_EN_100HDX_CAP: 4531 case MAC_PROP_ADV_10FDX_CAP: 4532 case MAC_PROP_EN_10FDX_CAP: 4533 case MAC_PROP_ADV_10HDX_CAP: 4534 case MAC_PROP_EN_10HDX_CAP: 4535 case MAC_PROP_AUTONEG: 4536 case MAC_PROP_FLOWCTRL: 4537 return (B_TRUE); 4538 } 4539 return (B_FALSE); 4540 } 4541 4542 /* 4543 * callback functions for set/get of properties 4544 */ 4545 static int 4546 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4547 uint_t pr_valsize, const void *pr_val) 4548 { 4549 nxge_t *nxgep = barg; 4550 p_nxge_param_t param_arr; 4551 p_nxge_stats_t statsp; 4552 int err = 0; 4553 uint8_t val; 4554 uint32_t cur_mtu, new_mtu, old_framesize; 4555 link_flowctrl_t fl; 4556 4557 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4558 param_arr = nxgep->param_arr; 4559 statsp = nxgep->statsp; 4560 mutex_enter(nxgep->genlock); 4561 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4562 nxge_param_locked(pr_num)) { 4563 /* 4564 * All adv_* parameters are locked (read-only) 4565 * while the device is in any sort of loopback mode. 4566 */ 4567 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4568 "==> nxge_m_setprop: loopback mode: read only")); 4569 mutex_exit(nxgep->genlock); 4570 return (EBUSY); 4571 } 4572 4573 val = *(uint8_t *)pr_val; 4574 switch (pr_num) { 4575 case MAC_PROP_EN_1000FDX_CAP: 4576 nxgep->param_en_1000fdx = val; 4577 param_arr[param_anar_1000fdx].value = val; 4578 4579 goto reprogram; 4580 4581 case MAC_PROP_EN_100FDX_CAP: 4582 nxgep->param_en_100fdx = val; 4583 param_arr[param_anar_100fdx].value = val; 4584 4585 goto reprogram; 4586 4587 case MAC_PROP_EN_10FDX_CAP: 4588 nxgep->param_en_10fdx = val; 4589 param_arr[param_anar_10fdx].value = val; 4590 4591 goto reprogram; 4592 4593 case MAC_PROP_EN_1000HDX_CAP: 4594 case MAC_PROP_EN_100HDX_CAP: 4595 case MAC_PROP_EN_10HDX_CAP: 4596 case MAC_PROP_ADV_1000FDX_CAP: 4597 case MAC_PROP_ADV_1000HDX_CAP: 4598 case MAC_PROP_ADV_100FDX_CAP: 4599 case MAC_PROP_ADV_100HDX_CAP: 4600 case MAC_PROP_ADV_10FDX_CAP: 4601 case MAC_PROP_ADV_10HDX_CAP: 4602 case MAC_PROP_STATUS: 4603 case MAC_PROP_SPEED: 4604 case MAC_PROP_DUPLEX: 4605 err = EINVAL; /* cannot set read-only properties */ 4606 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4607 "==> nxge_m_setprop: read only property %d", 4608 pr_num)); 4609 break; 4610 4611 case MAC_PROP_AUTONEG: 4612 param_arr[param_autoneg].value = val; 4613 4614 goto reprogram; 4615 4616 case MAC_PROP_MTU: 4617 cur_mtu = nxgep->mac.default_mtu; 4618 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4619 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4620 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4621 new_mtu, nxgep->mac.is_jumbo)); 4622 4623 if (new_mtu == cur_mtu) { 4624 err = 0; 4625 break; 4626 } 4627 4628 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4629 err = EBUSY; 4630 break; 4631 } 4632 4633 if ((new_mtu < NXGE_DEFAULT_MTU) || 4634 (new_mtu > NXGE_MAXIMUM_MTU)) { 4635 err = EINVAL; 4636 break; 4637 } 4638 4639 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4640 nxgep->mac.maxframesize = (uint16_t) 4641 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4642 if (nxge_mac_set_framesize(nxgep)) { 4643 nxgep->mac.maxframesize = 4644 (uint16_t)old_framesize; 4645 err = EINVAL; 4646 break; 4647 } 4648 4649 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4650 if (err) { 4651 nxgep->mac.maxframesize = 4652 (uint16_t)old_framesize; 4653 err = EINVAL; 4654 break; 4655 } 4656 4657 nxgep->mac.default_mtu = new_mtu; 4658 if (new_mtu > NXGE_DEFAULT_MTU) 4659 nxgep->mac.is_jumbo = B_TRUE; 4660 else 4661 nxgep->mac.is_jumbo = B_FALSE; 4662 4663 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4664 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4665 new_mtu, nxgep->mac.maxframesize)); 4666 break; 4667 4668 case MAC_PROP_FLOWCTRL: 4669 bcopy(pr_val, &fl, sizeof (fl)); 4670 switch (fl) { 4671 default: 4672 err = EINVAL; 4673 break; 4674 4675 case LINK_FLOWCTRL_NONE: 4676 param_arr[param_anar_pause].value = 0; 4677 break; 4678 4679 case LINK_FLOWCTRL_RX: 4680 param_arr[param_anar_pause].value = 1; 4681 break; 4682 4683 case LINK_FLOWCTRL_TX: 4684 case LINK_FLOWCTRL_BI: 4685 err = EINVAL; 4686 break; 4687 } 4688 4689 reprogram: 4690 if (err == 0) { 4691 if (!nxge_param_link_update(nxgep)) { 4692 err = EINVAL; 4693 } 4694 } 4695 break; 4696 case MAC_PROP_PRIVATE: 4697 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4698 "==> nxge_m_setprop: private property")); 4699 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4700 pr_val); 4701 break; 4702 4703 default: 4704 err = ENOTSUP; 4705 break; 4706 } 4707 4708 mutex_exit(nxgep->genlock); 4709 4710 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4711 "<== nxge_m_setprop (return %d)", err)); 4712 return (err); 4713 } 4714 4715 static int 4716 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4717 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 4718 { 4719 nxge_t *nxgep = barg; 4720 p_nxge_param_t param_arr = nxgep->param_arr; 4721 p_nxge_stats_t statsp = nxgep->statsp; 4722 int err = 0; 4723 link_flowctrl_t fl; 4724 uint64_t tmp = 0; 4725 link_state_t ls; 4726 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4727 4728 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4729 "==> nxge_m_getprop: pr_num %d", pr_num)); 4730 4731 if (pr_valsize == 0) 4732 return (EINVAL); 4733 4734 *perm = MAC_PROP_PERM_RW; 4735 4736 if ((is_default) && (pr_num != MAC_PROP_PRIVATE)) { 4737 err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4738 return (err); 4739 } 4740 4741 bzero(pr_val, pr_valsize); 4742 switch (pr_num) { 4743 case MAC_PROP_DUPLEX: 4744 *perm = MAC_PROP_PERM_READ; 4745 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4746 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4747 "==> nxge_m_getprop: duplex mode %d", 4748 *(uint8_t *)pr_val)); 4749 break; 4750 4751 case MAC_PROP_SPEED: 4752 if (pr_valsize < sizeof (uint64_t)) 4753 return (EINVAL); 4754 *perm = MAC_PROP_PERM_READ; 4755 tmp = statsp->mac_stats.link_speed * 1000000ull; 4756 bcopy(&tmp, pr_val, sizeof (tmp)); 4757 break; 4758 4759 case MAC_PROP_STATUS: 4760 if (pr_valsize < sizeof (link_state_t)) 4761 return (EINVAL); 4762 *perm = MAC_PROP_PERM_READ; 4763 if (!statsp->mac_stats.link_up) 4764 ls = LINK_STATE_DOWN; 4765 else 4766 ls = LINK_STATE_UP; 4767 bcopy(&ls, pr_val, sizeof (ls)); 4768 break; 4769 4770 case MAC_PROP_AUTONEG: 4771 *(uint8_t *)pr_val = 4772 param_arr[param_autoneg].value; 4773 break; 4774 4775 case MAC_PROP_FLOWCTRL: 4776 if (pr_valsize < sizeof (link_flowctrl_t)) 4777 return (EINVAL); 4778 4779 fl = LINK_FLOWCTRL_NONE; 4780 if (param_arr[param_anar_pause].value) { 4781 fl = LINK_FLOWCTRL_RX; 4782 } 4783 bcopy(&fl, pr_val, sizeof (fl)); 4784 break; 4785 4786 case MAC_PROP_ADV_1000FDX_CAP: 4787 *perm = MAC_PROP_PERM_READ; 4788 *(uint8_t *)pr_val = 4789 param_arr[param_anar_1000fdx].value; 4790 break; 4791 4792 case MAC_PROP_EN_1000FDX_CAP: 4793 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4794 break; 4795 4796 case MAC_PROP_ADV_100FDX_CAP: 4797 *perm = MAC_PROP_PERM_READ; 4798 *(uint8_t *)pr_val = 4799 param_arr[param_anar_100fdx].value; 4800 break; 4801 4802 case MAC_PROP_EN_100FDX_CAP: 4803 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4804 break; 4805 4806 case MAC_PROP_ADV_10FDX_CAP: 4807 *perm = MAC_PROP_PERM_READ; 4808 *(uint8_t *)pr_val = 4809 param_arr[param_anar_10fdx].value; 4810 break; 4811 4812 case MAC_PROP_EN_10FDX_CAP: 4813 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4814 break; 4815 4816 case MAC_PROP_EN_1000HDX_CAP: 4817 case MAC_PROP_EN_100HDX_CAP: 4818 case MAC_PROP_EN_10HDX_CAP: 4819 case MAC_PROP_ADV_1000HDX_CAP: 4820 case MAC_PROP_ADV_100HDX_CAP: 4821 case MAC_PROP_ADV_10HDX_CAP: 4822 err = ENOTSUP; 4823 break; 4824 4825 case MAC_PROP_PRIVATE: 4826 err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4827 pr_valsize, pr_val, perm); 4828 break; 4829 4830 case MAC_PROP_MTU: { 4831 mac_propval_range_t range; 4832 4833 if (!(pr_flags & MAC_PROP_POSSIBLE)) 4834 return (ENOTSUP); 4835 if (pr_valsize < sizeof (mac_propval_range_t)) 4836 return (EINVAL); 4837 range.mpr_count = 1; 4838 range.mpr_type = MAC_PROPVAL_UINT32; 4839 range.range_uint32[0].mpur_min = 4840 range.range_uint32[0].mpur_max = NXGE_DEFAULT_MTU; 4841 range.range_uint32[0].mpur_max = NXGE_MAXIMUM_MTU; 4842 bcopy(&range, pr_val, sizeof (range)); 4843 break; 4844 } 4845 default: 4846 err = EINVAL; 4847 break; 4848 } 4849 4850 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4851 4852 return (err); 4853 } 4854 4855 /* ARGSUSED */ 4856 static int 4857 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4858 const void *pr_val) 4859 { 4860 p_nxge_param_t param_arr = nxgep->param_arr; 4861 int err = 0; 4862 long result; 4863 4864 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4865 "==> nxge_set_priv_prop: name %s", pr_name)); 4866 4867 /* Blanking */ 4868 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4869 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4870 (char *)pr_val, 4871 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4872 if (err) { 4873 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4874 "<== nxge_set_priv_prop: " 4875 "unable to set (%s)", pr_name)); 4876 err = EINVAL; 4877 } else { 4878 err = 0; 4879 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4880 "<== nxge_set_priv_prop: " 4881 "set (%s)", pr_name)); 4882 } 4883 4884 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4885 "<== nxge_set_priv_prop: name %s (value %d)", 4886 pr_name, result)); 4887 4888 return (err); 4889 } 4890 4891 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4892 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4893 (char *)pr_val, 4894 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4895 if (err) { 4896 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4897 "<== nxge_set_priv_prop: " 4898 "unable to set (%s)", pr_name)); 4899 err = EINVAL; 4900 } else { 4901 err = 0; 4902 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4903 "<== nxge_set_priv_prop: " 4904 "set (%s)", pr_name)); 4905 } 4906 4907 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4908 "<== nxge_set_priv_prop: name %s (value %d)", 4909 pr_name, result)); 4910 4911 return (err); 4912 } 4913 4914 /* Classification */ 4915 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4916 if (pr_val == NULL) { 4917 err = EINVAL; 4918 return (err); 4919 } 4920 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4921 4922 err = nxge_param_set_ip_opt(nxgep, NULL, 4923 NULL, (char *)pr_val, 4924 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4925 4926 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4927 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4928 pr_name, result)); 4929 4930 return (err); 4931 } 4932 4933 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 4934 if (pr_val == NULL) { 4935 err = EINVAL; 4936 return (err); 4937 } 4938 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4939 4940 err = nxge_param_set_ip_opt(nxgep, NULL, 4941 NULL, (char *)pr_val, 4942 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 4943 4944 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4945 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4946 pr_name, result)); 4947 4948 return (err); 4949 } 4950 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 4951 if (pr_val == NULL) { 4952 err = EINVAL; 4953 return (err); 4954 } 4955 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4956 4957 err = nxge_param_set_ip_opt(nxgep, NULL, 4958 NULL, (char *)pr_val, 4959 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 4960 4961 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4962 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4963 pr_name, result)); 4964 4965 return (err); 4966 } 4967 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 4968 if (pr_val == NULL) { 4969 err = EINVAL; 4970 return (err); 4971 } 4972 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4973 4974 err = nxge_param_set_ip_opt(nxgep, NULL, 4975 NULL, (char *)pr_val, 4976 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 4977 4978 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4979 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4980 pr_name, result)); 4981 4982 return (err); 4983 } 4984 4985 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 4986 if (pr_val == NULL) { 4987 err = EINVAL; 4988 return (err); 4989 } 4990 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4991 4992 err = nxge_param_set_ip_opt(nxgep, NULL, 4993 NULL, (char *)pr_val, 4994 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 4995 4996 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4997 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4998 pr_name, result)); 4999 5000 return (err); 5001 } 5002 5003 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5004 if (pr_val == NULL) { 5005 err = EINVAL; 5006 return (err); 5007 } 5008 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5009 5010 err = nxge_param_set_ip_opt(nxgep, NULL, 5011 NULL, (char *)pr_val, 5012 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5013 5014 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5015 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5016 pr_name, result)); 5017 5018 return (err); 5019 } 5020 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5021 if (pr_val == NULL) { 5022 err = EINVAL; 5023 return (err); 5024 } 5025 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5026 5027 err = nxge_param_set_ip_opt(nxgep, NULL, 5028 NULL, (char *)pr_val, 5029 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5030 5031 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5032 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5033 pr_name, result)); 5034 5035 return (err); 5036 } 5037 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5038 if (pr_val == NULL) { 5039 err = EINVAL; 5040 return (err); 5041 } 5042 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5043 5044 err = nxge_param_set_ip_opt(nxgep, NULL, 5045 NULL, (char *)pr_val, 5046 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5047 5048 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5049 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5050 pr_name, result)); 5051 5052 return (err); 5053 } 5054 5055 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5056 if (pr_val == NULL) { 5057 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5058 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 5059 err = EINVAL; 5060 return (err); 5061 } 5062 5063 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5064 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5065 "<== nxge_set_priv_prop: name %s " 5066 "(lso %d pr_val %s value %d)", 5067 pr_name, nxgep->soft_lso_enable, pr_val, result)); 5068 5069 if (result > 1 || result < 0) { 5070 err = EINVAL; 5071 } else { 5072 if (nxgep->soft_lso_enable == (uint32_t)result) { 5073 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5074 "no change (%d %d)", 5075 nxgep->soft_lso_enable, result)); 5076 return (0); 5077 } 5078 } 5079 5080 nxgep->soft_lso_enable = (int)result; 5081 5082 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5083 "<== nxge_set_priv_prop: name %s (value %d)", 5084 pr_name, result)); 5085 5086 return (err); 5087 } 5088 /* 5089 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the 5090 * following code to be executed. 5091 */ 5092 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5093 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5094 (caddr_t)¶m_arr[param_anar_10gfdx]); 5095 return (err); 5096 } 5097 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5098 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5099 (caddr_t)¶m_arr[param_anar_pause]); 5100 return (err); 5101 } 5102 5103 return (EINVAL); 5104 } 5105 5106 static int 5107 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 5108 uint_t pr_valsize, void *pr_val, uint_t *perm) 5109 { 5110 p_nxge_param_t param_arr = nxgep->param_arr; 5111 char valstr[MAXNAMELEN]; 5112 int err = EINVAL; 5113 uint_t strsize; 5114 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 5115 5116 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5117 "==> nxge_get_priv_prop: property %s", pr_name)); 5118 5119 /* function number */ 5120 if (strcmp(pr_name, "_function_number") == 0) { 5121 if (is_default) 5122 return (ENOTSUP); 5123 *perm = MAC_PROP_PERM_READ; 5124 (void) snprintf(valstr, sizeof (valstr), "%d", 5125 nxgep->function_num); 5126 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5127 "==> nxge_get_priv_prop: name %s " 5128 "(value %d valstr %s)", 5129 pr_name, nxgep->function_num, valstr)); 5130 5131 err = 0; 5132 goto done; 5133 } 5134 5135 /* Neptune firmware version */ 5136 if (strcmp(pr_name, "_fw_version") == 0) { 5137 if (is_default) 5138 return (ENOTSUP); 5139 *perm = MAC_PROP_PERM_READ; 5140 (void) snprintf(valstr, sizeof (valstr), "%s", 5141 nxgep->vpd_info.ver); 5142 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5143 "==> nxge_get_priv_prop: name %s " 5144 "(value %d valstr %s)", 5145 pr_name, nxgep->vpd_info.ver, valstr)); 5146 5147 err = 0; 5148 goto done; 5149 } 5150 5151 /* port PHY mode */ 5152 if (strcmp(pr_name, "_port_mode") == 0) { 5153 if (is_default) 5154 return (ENOTSUP); 5155 *perm = MAC_PROP_PERM_READ; 5156 switch (nxgep->mac.portmode) { 5157 case PORT_1G_COPPER: 5158 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 5159 nxgep->hot_swappable_phy ? 5160 "[Hot Swappable]" : ""); 5161 break; 5162 case PORT_1G_FIBER: 5163 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 5164 nxgep->hot_swappable_phy ? 5165 "[hot swappable]" : ""); 5166 break; 5167 case PORT_10G_COPPER: 5168 (void) snprintf(valstr, sizeof (valstr), 5169 "10G copper %s", 5170 nxgep->hot_swappable_phy ? 5171 "[hot swappable]" : ""); 5172 break; 5173 case PORT_10G_FIBER: 5174 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 5175 nxgep->hot_swappable_phy ? 5176 "[hot swappable]" : ""); 5177 break; 5178 case PORT_10G_SERDES: 5179 (void) snprintf(valstr, sizeof (valstr), 5180 "10G serdes %s", nxgep->hot_swappable_phy ? 5181 "[hot swappable]" : ""); 5182 break; 5183 case PORT_1G_SERDES: 5184 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5185 nxgep->hot_swappable_phy ? 5186 "[hot swappable]" : ""); 5187 break; 5188 case PORT_1G_TN1010: 5189 (void) snprintf(valstr, sizeof (valstr), 5190 "1G TN1010 copper %s", nxgep->hot_swappable_phy ? 5191 "[hot swappable]" : ""); 5192 break; 5193 case PORT_10G_TN1010: 5194 (void) snprintf(valstr, sizeof (valstr), 5195 "10G TN1010 copper %s", nxgep->hot_swappable_phy ? 5196 "[hot swappable]" : ""); 5197 break; 5198 case PORT_1G_RGMII_FIBER: 5199 (void) snprintf(valstr, sizeof (valstr), 5200 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5201 "[hot swappable]" : ""); 5202 break; 5203 case PORT_HSP_MODE: 5204 (void) snprintf(valstr, sizeof (valstr), 5205 "phy not present[hot swappable]"); 5206 break; 5207 default: 5208 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5209 nxgep->hot_swappable_phy ? 5210 "[hot swappable]" : ""); 5211 break; 5212 } 5213 5214 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5215 "==> nxge_get_priv_prop: name %s (value %s)", 5216 pr_name, valstr)); 5217 5218 err = 0; 5219 goto done; 5220 } 5221 5222 /* Hot swappable PHY */ 5223 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5224 if (is_default) 5225 return (ENOTSUP); 5226 *perm = MAC_PROP_PERM_READ; 5227 (void) snprintf(valstr, sizeof (valstr), "%s", 5228 nxgep->hot_swappable_phy ? 5229 "yes" : "no"); 5230 5231 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5232 "==> nxge_get_priv_prop: name %s " 5233 "(value %d valstr %s)", 5234 pr_name, nxgep->hot_swappable_phy, valstr)); 5235 5236 err = 0; 5237 goto done; 5238 } 5239 5240 5241 /* Receive Interrupt Blanking Parameters */ 5242 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5243 err = 0; 5244 if (is_default) { 5245 (void) snprintf(valstr, sizeof (valstr), 5246 "%d", RXDMA_RCR_TO_DEFAULT); 5247 goto done; 5248 } 5249 5250 (void) snprintf(valstr, sizeof (valstr), "%d", 5251 nxgep->intr_timeout); 5252 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5253 "==> nxge_get_priv_prop: name %s (value %d)", 5254 pr_name, 5255 (uint32_t)nxgep->intr_timeout)); 5256 goto done; 5257 } 5258 5259 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5260 err = 0; 5261 if (is_default) { 5262 (void) snprintf(valstr, sizeof (valstr), 5263 "%d", RXDMA_RCR_PTHRES_DEFAULT); 5264 goto done; 5265 } 5266 (void) snprintf(valstr, sizeof (valstr), "%d", 5267 nxgep->intr_threshold); 5268 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5269 "==> nxge_get_priv_prop: name %s (value %d)", 5270 pr_name, (uint32_t)nxgep->intr_threshold)); 5271 5272 goto done; 5273 } 5274 5275 /* Classification and Load Distribution Configuration */ 5276 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5277 if (is_default) { 5278 (void) snprintf(valstr, sizeof (valstr), "%x", 5279 NXGE_CLASS_FLOW_GEN_SERVER); 5280 err = 0; 5281 goto done; 5282 } 5283 err = nxge_dld_get_ip_opt(nxgep, 5284 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5285 5286 (void) snprintf(valstr, sizeof (valstr), "%x", 5287 (int)param_arr[param_class_opt_ipv4_tcp].value); 5288 5289 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5290 "==> nxge_get_priv_prop: %s", valstr)); 5291 goto done; 5292 } 5293 5294 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5295 if (is_default) { 5296 (void) snprintf(valstr, sizeof (valstr), "%x", 5297 NXGE_CLASS_FLOW_GEN_SERVER); 5298 err = 0; 5299 goto done; 5300 } 5301 err = nxge_dld_get_ip_opt(nxgep, 5302 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5303 5304 (void) snprintf(valstr, sizeof (valstr), "%x", 5305 (int)param_arr[param_class_opt_ipv4_udp].value); 5306 5307 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5308 "==> nxge_get_priv_prop: %s", valstr)); 5309 goto done; 5310 } 5311 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5312 if (is_default) { 5313 (void) snprintf(valstr, sizeof (valstr), "%x", 5314 NXGE_CLASS_FLOW_GEN_SERVER); 5315 err = 0; 5316 goto done; 5317 } 5318 err = nxge_dld_get_ip_opt(nxgep, 5319 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5320 5321 (void) snprintf(valstr, sizeof (valstr), "%x", 5322 (int)param_arr[param_class_opt_ipv4_ah].value); 5323 5324 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5325 "==> nxge_get_priv_prop: %s", valstr)); 5326 goto done; 5327 } 5328 5329 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5330 if (is_default) { 5331 (void) snprintf(valstr, sizeof (valstr), "%x", 5332 NXGE_CLASS_FLOW_GEN_SERVER); 5333 err = 0; 5334 goto done; 5335 } 5336 err = nxge_dld_get_ip_opt(nxgep, 5337 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5338 5339 (void) snprintf(valstr, sizeof (valstr), "%x", 5340 (int)param_arr[param_class_opt_ipv4_sctp].value); 5341 5342 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5343 "==> nxge_get_priv_prop: %s", valstr)); 5344 goto done; 5345 } 5346 5347 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5348 if (is_default) { 5349 (void) snprintf(valstr, sizeof (valstr), "%x", 5350 NXGE_CLASS_FLOW_GEN_SERVER); 5351 err = 0; 5352 goto done; 5353 } 5354 err = nxge_dld_get_ip_opt(nxgep, 5355 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5356 5357 (void) snprintf(valstr, sizeof (valstr), "%x", 5358 (int)param_arr[param_class_opt_ipv6_tcp].value); 5359 5360 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5361 "==> nxge_get_priv_prop: %s", valstr)); 5362 goto done; 5363 } 5364 5365 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5366 if (is_default) { 5367 (void) snprintf(valstr, sizeof (valstr), "%x", 5368 NXGE_CLASS_FLOW_GEN_SERVER); 5369 err = 0; 5370 goto done; 5371 } 5372 err = nxge_dld_get_ip_opt(nxgep, 5373 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5374 5375 (void) snprintf(valstr, sizeof (valstr), "%x", 5376 (int)param_arr[param_class_opt_ipv6_udp].value); 5377 5378 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5379 "==> nxge_get_priv_prop: %s", valstr)); 5380 goto done; 5381 } 5382 5383 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5384 if (is_default) { 5385 (void) snprintf(valstr, sizeof (valstr), "%x", 5386 NXGE_CLASS_FLOW_GEN_SERVER); 5387 err = 0; 5388 goto done; 5389 } 5390 err = nxge_dld_get_ip_opt(nxgep, 5391 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5392 5393 (void) snprintf(valstr, sizeof (valstr), "%x", 5394 (int)param_arr[param_class_opt_ipv6_ah].value); 5395 5396 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5397 "==> nxge_get_priv_prop: %s", valstr)); 5398 goto done; 5399 } 5400 5401 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5402 if (is_default) { 5403 (void) snprintf(valstr, sizeof (valstr), "%x", 5404 NXGE_CLASS_FLOW_GEN_SERVER); 5405 err = 0; 5406 goto done; 5407 } 5408 err = nxge_dld_get_ip_opt(nxgep, 5409 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5410 5411 (void) snprintf(valstr, sizeof (valstr), "%x", 5412 (int)param_arr[param_class_opt_ipv6_sctp].value); 5413 5414 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5415 "==> nxge_get_priv_prop: %s", valstr)); 5416 goto done; 5417 } 5418 5419 /* Software LSO */ 5420 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5421 if (is_default) { 5422 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5423 err = 0; 5424 goto done; 5425 } 5426 (void) snprintf(valstr, sizeof (valstr), 5427 "%d", nxgep->soft_lso_enable); 5428 err = 0; 5429 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5430 "==> nxge_get_priv_prop: name %s (value %d)", 5431 pr_name, nxgep->soft_lso_enable)); 5432 5433 goto done; 5434 } 5435 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5436 err = 0; 5437 if (is_default || 5438 nxgep->param_arr[param_anar_10gfdx].value != 0) { 5439 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5440 goto done; 5441 } else { 5442 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5443 goto done; 5444 } 5445 } 5446 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5447 err = 0; 5448 if (is_default || 5449 nxgep->param_arr[param_anar_pause].value != 0) { 5450 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5451 goto done; 5452 } else { 5453 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5454 goto done; 5455 } 5456 } 5457 5458 done: 5459 if (err == 0) { 5460 strsize = (uint_t)strlen(valstr); 5461 if (pr_valsize < strsize) { 5462 err = ENOBUFS; 5463 } else { 5464 (void) strlcpy(pr_val, valstr, pr_valsize); 5465 } 5466 } 5467 5468 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5469 "<== nxge_get_priv_prop: return %d", err)); 5470 return (err); 5471 } 5472 5473 /* 5474 * Module loading and removing entry points. 5475 */ 5476 5477 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach, 5478 nodev, NULL, D_MP, NULL, nxge_quiesce); 5479 5480 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5481 5482 /* 5483 * Module linkage information for the kernel. 5484 */ 5485 static struct modldrv nxge_modldrv = { 5486 &mod_driverops, 5487 NXGE_DESC_VER, 5488 &nxge_dev_ops 5489 }; 5490 5491 static struct modlinkage modlinkage = { 5492 MODREV_1, (void *) &nxge_modldrv, NULL 5493 }; 5494 5495 int 5496 _init(void) 5497 { 5498 int status; 5499 5500 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 5501 5502 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5503 5504 mac_init_ops(&nxge_dev_ops, "nxge"); 5505 5506 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5507 if (status != 0) { 5508 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5509 "failed to init device soft state")); 5510 goto _init_exit; 5511 } 5512 5513 status = mod_install(&modlinkage); 5514 if (status != 0) { 5515 ddi_soft_state_fini(&nxge_list); 5516 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5517 goto _init_exit; 5518 } 5519 5520 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5521 5522 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status)); 5523 return (status); 5524 5525 _init_exit: 5526 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status)); 5527 MUTEX_DESTROY(&nxgedebuglock); 5528 return (status); 5529 } 5530 5531 int 5532 _fini(void) 5533 { 5534 int status; 5535 5536 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5537 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5538 5539 if (nxge_mblks_pending) 5540 return (EBUSY); 5541 5542 status = mod_remove(&modlinkage); 5543 if (status != DDI_SUCCESS) { 5544 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5545 "Module removal failed 0x%08x", 5546 status)); 5547 goto _fini_exit; 5548 } 5549 5550 mac_fini_ops(&nxge_dev_ops); 5551 5552 ddi_soft_state_fini(&nxge_list); 5553 5554 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status)); 5555 5556 MUTEX_DESTROY(&nxge_common_lock); 5557 MUTEX_DESTROY(&nxgedebuglock); 5558 return (status); 5559 5560 _fini_exit: 5561 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status)); 5562 return (status); 5563 } 5564 5565 int 5566 _info(struct modinfo *modinfop) 5567 { 5568 int status; 5569 5570 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5571 status = mod_info(&modlinkage, modinfop); 5572 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5573 5574 return (status); 5575 } 5576 5577 /*ARGSUSED*/ 5578 static int 5579 nxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5580 { 5581 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5582 p_nxge_t nxgep = rhp->nxgep; 5583 uint32_t channel; 5584 p_tx_ring_t ring; 5585 5586 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5587 ring = nxgep->tx_rings->rings[channel]; 5588 5589 MUTEX_ENTER(&ring->lock); 5590 ring->tx_ring_handle = rhp->ring_handle; 5591 MUTEX_EXIT(&ring->lock); 5592 5593 return (0); 5594 } 5595 5596 static void 5597 nxge_tx_ring_stop(mac_ring_driver_t rdriver) 5598 { 5599 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5600 p_nxge_t nxgep = rhp->nxgep; 5601 uint32_t channel; 5602 p_tx_ring_t ring; 5603 5604 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5605 ring = nxgep->tx_rings->rings[channel]; 5606 5607 MUTEX_ENTER(&ring->lock); 5608 ring->tx_ring_handle = (mac_ring_handle_t)NULL; 5609 MUTEX_EXIT(&ring->lock); 5610 } 5611 5612 static int 5613 nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5614 { 5615 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5616 p_nxge_t nxgep = rhp->nxgep; 5617 uint32_t channel; 5618 p_rx_rcr_ring_t ring; 5619 int i; 5620 5621 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5622 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5623 5624 MUTEX_ENTER(&ring->lock); 5625 5626 if (nxgep->rx_channel_started[channel] == B_TRUE) { 5627 MUTEX_EXIT(&ring->lock); 5628 return (0); 5629 } 5630 5631 /* set rcr_ring */ 5632 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) { 5633 if ((nxgep->ldgvp->ldvp[i].is_rxdma == 1) && 5634 (nxgep->ldgvp->ldvp[i].channel == channel)) { 5635 ring->ldvp = &nxgep->ldgvp->ldvp[i]; 5636 ring->ldgp = nxgep->ldgvp->ldvp[i].ldgp; 5637 } 5638 } 5639 5640 nxgep->rx_channel_started[channel] = B_TRUE; 5641 ring->rcr_mac_handle = rhp->ring_handle; 5642 ring->rcr_gen_num = mr_gen_num; 5643 MUTEX_EXIT(&ring->lock); 5644 5645 return (0); 5646 } 5647 5648 static void 5649 nxge_rx_ring_stop(mac_ring_driver_t rdriver) 5650 { 5651 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5652 p_nxge_t nxgep = rhp->nxgep; 5653 uint32_t channel; 5654 p_rx_rcr_ring_t ring; 5655 5656 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5657 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5658 5659 MUTEX_ENTER(&ring->lock); 5660 nxgep->rx_channel_started[channel] = B_FALSE; 5661 ring->rcr_mac_handle = NULL; 5662 MUTEX_EXIT(&ring->lock); 5663 } 5664 5665 /* 5666 * Callback funtion for MAC layer to register all rings. 5667 */ 5668 static void 5669 nxge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 5670 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 5671 { 5672 p_nxge_t nxgep = (p_nxge_t)arg; 5673 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 5674 5675 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5676 "==> nxge_fill_ring 0x%x index %d", rtype, index)); 5677 5678 switch (rtype) { 5679 case MAC_RING_TYPE_TX: { 5680 p_nxge_ring_handle_t rhandlep; 5681 5682 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5683 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d", 5684 rtype, index, p_cfgp->tdc.count)); 5685 5686 ASSERT((index >= 0) && (index < p_cfgp->tdc.count)); 5687 rhandlep = &nxgep->tx_ring_handles[index]; 5688 rhandlep->nxgep = nxgep; 5689 rhandlep->index = index; 5690 rhandlep->ring_handle = rh; 5691 5692 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5693 infop->mri_start = nxge_tx_ring_start; 5694 infop->mri_stop = nxge_tx_ring_stop; 5695 infop->mri_tx = nxge_tx_ring_send; 5696 5697 break; 5698 } 5699 case MAC_RING_TYPE_RX: { 5700 p_nxge_ring_handle_t rhandlep; 5701 int nxge_rindex; 5702 mac_intr_t nxge_mac_intr; 5703 5704 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5705 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d", 5706 rtype, index, p_cfgp->max_rdcs)); 5707 5708 /* 5709 * 'index' is the ring index within the group. 5710 * Find the ring index in the nxge instance. 5711 */ 5712 nxge_rindex = nxge_get_rxring_index(nxgep, rg_index, index); 5713 5714 ASSERT((nxge_rindex >= 0) && (nxge_rindex < p_cfgp->max_rdcs)); 5715 rhandlep = &nxgep->rx_ring_handles[nxge_rindex]; 5716 rhandlep->nxgep = nxgep; 5717 rhandlep->index = nxge_rindex; 5718 rhandlep->ring_handle = rh; 5719 5720 /* 5721 * Entrypoint to enable interrupt (disable poll) and 5722 * disable interrupt (enable poll). 5723 */ 5724 nxge_mac_intr.mi_handle = (mac_intr_handle_t)rhandlep; 5725 nxge_mac_intr.mi_enable = (mac_intr_enable_t)nxge_disable_poll; 5726 nxge_mac_intr.mi_disable = (mac_intr_disable_t)nxge_enable_poll; 5727 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5728 infop->mri_start = nxge_rx_ring_start; 5729 infop->mri_stop = nxge_rx_ring_stop; 5730 infop->mri_intr = nxge_mac_intr; /* ??? */ 5731 infop->mri_poll = nxge_rx_poll; 5732 5733 break; 5734 } 5735 default: 5736 break; 5737 } 5738 5739 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_fill_ring 0x%x", 5740 rtype)); 5741 } 5742 5743 static void 5744 nxge_group_add_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5745 mac_ring_type_t type) 5746 { 5747 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5748 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5749 nxge_t *nxge; 5750 nxge_grp_t *grp; 5751 nxge_rdc_grp_t *rdc_grp; 5752 uint16_t channel; /* device-wise ring id */ 5753 int dev_gindex; 5754 int rv; 5755 5756 nxge = rgroup->nxgep; 5757 5758 switch (type) { 5759 case MAC_RING_TYPE_TX: 5760 /* 5761 * nxge_grp_dc_add takes a channel number which is a 5762 * "devise" ring ID. 5763 */ 5764 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5765 5766 /* 5767 * Remove the ring from the default group 5768 */ 5769 if (rgroup->gindex != 0) { 5770 (void) nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5771 } 5772 5773 /* 5774 * nxge->tx_set.group[] is an array of groups indexed by 5775 * a "port" group ID. 5776 */ 5777 grp = nxge->tx_set.group[rgroup->gindex]; 5778 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5779 if (rv != 0) { 5780 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5781 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5782 } 5783 break; 5784 5785 case MAC_RING_TYPE_RX: 5786 /* 5787 * nxge->rx_set.group[] is an array of groups indexed by 5788 * a "port" group ID. 5789 */ 5790 grp = nxge->rx_set.group[rgroup->gindex]; 5791 5792 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5793 rgroup->gindex; 5794 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5795 5796 /* 5797 * nxge_grp_dc_add takes a channel number which is a 5798 * "devise" ring ID. 5799 */ 5800 channel = nxge->pt_config.hw_config.start_rdc + rhandle->index; 5801 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_RX, channel); 5802 if (rv != 0) { 5803 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5804 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5805 } 5806 5807 rdc_grp->map |= (1 << channel); 5808 rdc_grp->max_rdcs++; 5809 5810 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5811 break; 5812 } 5813 } 5814 5815 static void 5816 nxge_group_rem_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5817 mac_ring_type_t type) 5818 { 5819 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5820 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5821 nxge_t *nxge; 5822 uint16_t channel; /* device-wise ring id */ 5823 nxge_rdc_grp_t *rdc_grp; 5824 int dev_gindex; 5825 5826 nxge = rgroup->nxgep; 5827 5828 switch (type) { 5829 case MAC_RING_TYPE_TX: 5830 dev_gindex = nxge->pt_config.hw_config.def_mac_txdma_grpid + 5831 rgroup->gindex; 5832 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5833 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5834 5835 /* 5836 * Add the ring back to the default group 5837 */ 5838 if (rgroup->gindex != 0) { 5839 nxge_grp_t *grp; 5840 grp = nxge->tx_set.group[0]; 5841 (void) nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5842 } 5843 break; 5844 5845 case MAC_RING_TYPE_RX: 5846 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5847 rgroup->gindex; 5848 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5849 channel = rdc_grp->start_rdc + rhandle->index; 5850 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 5851 5852 rdc_grp->map &= ~(1 << channel); 5853 rdc_grp->max_rdcs--; 5854 5855 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5856 break; 5857 } 5858 } 5859 5860 5861 /*ARGSUSED*/ 5862 static nxge_status_t 5863 nxge_add_intrs(p_nxge_t nxgep) 5864 { 5865 5866 int intr_types; 5867 int type = 0; 5868 int ddi_status = DDI_SUCCESS; 5869 nxge_status_t status = NXGE_OK; 5870 5871 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5872 5873 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5874 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5875 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5876 nxgep->nxge_intr_type.intr_added = 0; 5877 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5878 nxgep->nxge_intr_type.intr_type = 0; 5879 5880 if (nxgep->niu_type == N2_NIU) { 5881 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5882 } else if (nxge_msi_enable) { 5883 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5884 } 5885 5886 /* Get the supported interrupt types */ 5887 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5888 != DDI_SUCCESS) { 5889 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5890 "ddi_intr_get_supported_types failed: status 0x%08x", 5891 ddi_status)); 5892 return (NXGE_ERROR | NXGE_DDI_FAILED); 5893 } 5894 nxgep->nxge_intr_type.intr_types = intr_types; 5895 5896 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5897 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5898 5899 /* 5900 * Solaris MSIX is not supported yet. use MSI for now. 5901 * nxge_msi_enable (1): 5902 * 1 - MSI 2 - MSI-X others - FIXED 5903 */ 5904 switch (nxge_msi_enable) { 5905 default: 5906 type = DDI_INTR_TYPE_FIXED; 5907 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5908 "use fixed (intx emulation) type %08x", 5909 type)); 5910 break; 5911 5912 case 2: 5913 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5914 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5915 if (intr_types & DDI_INTR_TYPE_MSIX) { 5916 type = DDI_INTR_TYPE_MSIX; 5917 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5918 "ddi_intr_get_supported_types: MSIX 0x%08x", 5919 type)); 5920 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5921 type = DDI_INTR_TYPE_MSI; 5922 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5923 "ddi_intr_get_supported_types: MSI 0x%08x", 5924 type)); 5925 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5926 type = DDI_INTR_TYPE_FIXED; 5927 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5928 "ddi_intr_get_supported_types: MSXED0x%08x", 5929 type)); 5930 } 5931 break; 5932 5933 case 1: 5934 if (intr_types & DDI_INTR_TYPE_MSI) { 5935 type = DDI_INTR_TYPE_MSI; 5936 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5937 "ddi_intr_get_supported_types: MSI 0x%08x", 5938 type)); 5939 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5940 type = DDI_INTR_TYPE_MSIX; 5941 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5942 "ddi_intr_get_supported_types: MSIX 0x%08x", 5943 type)); 5944 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5945 type = DDI_INTR_TYPE_FIXED; 5946 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5947 "ddi_intr_get_supported_types: MSXED0x%08x", 5948 type)); 5949 } 5950 } 5951 5952 nxgep->nxge_intr_type.intr_type = type; 5953 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5954 type == DDI_INTR_TYPE_FIXED) && 5955 nxgep->nxge_intr_type.niu_msi_enable) { 5956 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5957 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5958 " nxge_add_intrs: " 5959 " nxge_add_intrs_adv failed: status 0x%08x", 5960 status)); 5961 return (status); 5962 } else { 5963 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5964 "interrupts registered : type %d", type)); 5965 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5966 5967 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5968 "\nAdded advanced nxge add_intr_adv " 5969 "intr type 0x%x\n", type)); 5970 5971 return (status); 5972 } 5973 } 5974 5975 if (!nxgep->nxge_intr_type.intr_registered) { 5976 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5977 "failed to register interrupts")); 5978 return (NXGE_ERROR | NXGE_DDI_FAILED); 5979 } 5980 5981 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5982 return (status); 5983 } 5984 5985 static nxge_status_t 5986 nxge_add_intrs_adv(p_nxge_t nxgep) 5987 { 5988 int intr_type; 5989 p_nxge_intr_t intrp; 5990 5991 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5992 5993 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5994 intr_type = intrp->intr_type; 5995 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5996 intr_type)); 5997 5998 switch (intr_type) { 5999 case DDI_INTR_TYPE_MSI: /* 0x2 */ 6000 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 6001 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 6002 6003 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 6004 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 6005 6006 default: 6007 return (NXGE_ERROR); 6008 } 6009 } 6010 6011 6012 /*ARGSUSED*/ 6013 static nxge_status_t 6014 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 6015 { 6016 dev_info_t *dip = nxgep->dip; 6017 p_nxge_ldg_t ldgp; 6018 p_nxge_intr_t intrp; 6019 uint_t *inthandler; 6020 void *arg1, *arg2; 6021 int behavior; 6022 int nintrs, navail, nrequest; 6023 int nactual, nrequired; 6024 int inum = 0; 6025 int x, y; 6026 int ddi_status = DDI_SUCCESS; 6027 nxge_status_t status = NXGE_OK; 6028 6029 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 6030 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6031 intrp->start_inum = 0; 6032 6033 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6034 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6035 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6036 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6037 "nintrs: %d", ddi_status, nintrs)); 6038 return (NXGE_ERROR | NXGE_DDI_FAILED); 6039 } 6040 6041 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6042 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6043 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6044 "ddi_intr_get_navail() failed, status: 0x%x%, " 6045 "nintrs: %d", ddi_status, navail)); 6046 return (NXGE_ERROR | NXGE_DDI_FAILED); 6047 } 6048 6049 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6050 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 6051 nintrs, navail)); 6052 6053 /* PSARC/2007/453 MSI-X interrupt limit override */ 6054 if (int_type == DDI_INTR_TYPE_MSIX) { 6055 nrequest = nxge_create_msi_property(nxgep); 6056 if (nrequest < navail) { 6057 navail = nrequest; 6058 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6059 "nxge_add_intrs_adv_type: nintrs %d " 6060 "navail %d (nrequest %d)", 6061 nintrs, navail, nrequest)); 6062 } 6063 } 6064 6065 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 6066 /* MSI must be power of 2 */ 6067 if ((navail & 16) == 16) { 6068 navail = 16; 6069 } else if ((navail & 8) == 8) { 6070 navail = 8; 6071 } else if ((navail & 4) == 4) { 6072 navail = 4; 6073 } else if ((navail & 2) == 2) { 6074 navail = 2; 6075 } else { 6076 navail = 1; 6077 } 6078 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6079 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 6080 "navail %d", nintrs, navail)); 6081 } 6082 6083 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6084 DDI_INTR_ALLOC_NORMAL); 6085 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6086 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6087 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6088 navail, &nactual, behavior); 6089 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6090 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6091 " ddi_intr_alloc() failed: %d", 6092 ddi_status)); 6093 kmem_free(intrp->htable, intrp->intr_size); 6094 return (NXGE_ERROR | NXGE_DDI_FAILED); 6095 } 6096 6097 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6098 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6099 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6100 " ddi_intr_get_pri() failed: %d", 6101 ddi_status)); 6102 /* Free already allocated interrupts */ 6103 for (y = 0; y < nactual; y++) { 6104 (void) ddi_intr_free(intrp->htable[y]); 6105 } 6106 6107 kmem_free(intrp->htable, intrp->intr_size); 6108 return (NXGE_ERROR | NXGE_DDI_FAILED); 6109 } 6110 6111 nrequired = 0; 6112 switch (nxgep->niu_type) { 6113 default: 6114 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6115 break; 6116 6117 case N2_NIU: 6118 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6119 break; 6120 } 6121 6122 if (status != NXGE_OK) { 6123 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6124 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 6125 "failed: 0x%x", status)); 6126 /* Free already allocated interrupts */ 6127 for (y = 0; y < nactual; y++) { 6128 (void) ddi_intr_free(intrp->htable[y]); 6129 } 6130 6131 kmem_free(intrp->htable, intrp->intr_size); 6132 return (status); 6133 } 6134 6135 ldgp = nxgep->ldgvp->ldgp; 6136 for (x = 0; x < nrequired; x++, ldgp++) { 6137 ldgp->vector = (uint8_t)x; 6138 ldgp->intdata = SID_DATA(ldgp->func, x); 6139 arg1 = ldgp->ldvp; 6140 arg2 = nxgep; 6141 if (ldgp->nldvs == 1) { 6142 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6143 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6144 "nxge_add_intrs_adv_type: " 6145 "arg1 0x%x arg2 0x%x: " 6146 "1-1 int handler (entry %d intdata 0x%x)\n", 6147 arg1, arg2, 6148 x, ldgp->intdata)); 6149 } else if (ldgp->nldvs > 1) { 6150 inthandler = (uint_t *)ldgp->sys_intr_handler; 6151 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6152 "nxge_add_intrs_adv_type: " 6153 "arg1 0x%x arg2 0x%x: " 6154 "nldevs %d int handler " 6155 "(entry %d intdata 0x%x)\n", 6156 arg1, arg2, 6157 ldgp->nldvs, x, ldgp->intdata)); 6158 } 6159 6160 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6161 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 6162 "htable 0x%llx", x, intrp->htable[x])); 6163 6164 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6165 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6166 != DDI_SUCCESS) { 6167 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6168 "==> nxge_add_intrs_adv_type: failed #%d " 6169 "status 0x%x", x, ddi_status)); 6170 for (y = 0; y < intrp->intr_added; y++) { 6171 (void) ddi_intr_remove_handler( 6172 intrp->htable[y]); 6173 } 6174 /* Free already allocated intr */ 6175 for (y = 0; y < nactual; y++) { 6176 (void) ddi_intr_free(intrp->htable[y]); 6177 } 6178 kmem_free(intrp->htable, intrp->intr_size); 6179 6180 (void) nxge_ldgv_uninit(nxgep); 6181 6182 return (NXGE_ERROR | NXGE_DDI_FAILED); 6183 } 6184 intrp->intr_added++; 6185 } 6186 6187 intrp->msi_intx_cnt = nactual; 6188 6189 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6190 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 6191 navail, nactual, 6192 intrp->msi_intx_cnt, 6193 intrp->intr_added)); 6194 6195 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6196 6197 (void) nxge_intr_ldgv_init(nxgep); 6198 6199 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 6200 6201 return (status); 6202 } 6203 6204 /*ARGSUSED*/ 6205 static nxge_status_t 6206 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 6207 { 6208 dev_info_t *dip = nxgep->dip; 6209 p_nxge_ldg_t ldgp; 6210 p_nxge_intr_t intrp; 6211 uint_t *inthandler; 6212 void *arg1, *arg2; 6213 int behavior; 6214 int nintrs, navail; 6215 int nactual, nrequired; 6216 int inum = 0; 6217 int x, y; 6218 int ddi_status = DDI_SUCCESS; 6219 nxge_status_t status = NXGE_OK; 6220 6221 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 6222 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6223 intrp->start_inum = 0; 6224 6225 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6226 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6227 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6228 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6229 "nintrs: %d", status, nintrs)); 6230 return (NXGE_ERROR | NXGE_DDI_FAILED); 6231 } 6232 6233 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6234 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6235 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6236 "ddi_intr_get_navail() failed, status: 0x%x%, " 6237 "nintrs: %d", ddi_status, navail)); 6238 return (NXGE_ERROR | NXGE_DDI_FAILED); 6239 } 6240 6241 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6242 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6243 nintrs, navail)); 6244 6245 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6246 DDI_INTR_ALLOC_NORMAL); 6247 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6248 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6249 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6250 navail, &nactual, behavior); 6251 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6252 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6253 " ddi_intr_alloc() failed: %d", 6254 ddi_status)); 6255 kmem_free(intrp->htable, intrp->intr_size); 6256 return (NXGE_ERROR | NXGE_DDI_FAILED); 6257 } 6258 6259 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6260 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6261 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6262 " ddi_intr_get_pri() failed: %d", 6263 ddi_status)); 6264 /* Free already allocated interrupts */ 6265 for (y = 0; y < nactual; y++) { 6266 (void) ddi_intr_free(intrp->htable[y]); 6267 } 6268 6269 kmem_free(intrp->htable, intrp->intr_size); 6270 return (NXGE_ERROR | NXGE_DDI_FAILED); 6271 } 6272 6273 nrequired = 0; 6274 switch (nxgep->niu_type) { 6275 default: 6276 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6277 break; 6278 6279 case N2_NIU: 6280 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6281 break; 6282 } 6283 6284 if (status != NXGE_OK) { 6285 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6286 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6287 "failed: 0x%x", status)); 6288 /* Free already allocated interrupts */ 6289 for (y = 0; y < nactual; y++) { 6290 (void) ddi_intr_free(intrp->htable[y]); 6291 } 6292 6293 kmem_free(intrp->htable, intrp->intr_size); 6294 return (status); 6295 } 6296 6297 ldgp = nxgep->ldgvp->ldgp; 6298 for (x = 0; x < nrequired; x++, ldgp++) { 6299 ldgp->vector = (uint8_t)x; 6300 if (nxgep->niu_type != N2_NIU) { 6301 ldgp->intdata = SID_DATA(ldgp->func, x); 6302 } 6303 6304 arg1 = ldgp->ldvp; 6305 arg2 = nxgep; 6306 if (ldgp->nldvs == 1) { 6307 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6308 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6309 "nxge_add_intrs_adv_type_fix: " 6310 "1-1 int handler(%d) ldg %d ldv %d " 6311 "arg1 $%p arg2 $%p\n", 6312 x, ldgp->ldg, ldgp->ldvp->ldv, 6313 arg1, arg2)); 6314 } else if (ldgp->nldvs > 1) { 6315 inthandler = (uint_t *)ldgp->sys_intr_handler; 6316 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6317 "nxge_add_intrs_adv_type_fix: " 6318 "shared ldv %d int handler(%d) ldv %d ldg %d" 6319 "arg1 0x%016llx arg2 0x%016llx\n", 6320 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6321 arg1, arg2)); 6322 } 6323 6324 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6325 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6326 != DDI_SUCCESS) { 6327 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6328 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6329 "status 0x%x", x, ddi_status)); 6330 for (y = 0; y < intrp->intr_added; y++) { 6331 (void) ddi_intr_remove_handler( 6332 intrp->htable[y]); 6333 } 6334 for (y = 0; y < nactual; y++) { 6335 (void) ddi_intr_free(intrp->htable[y]); 6336 } 6337 /* Free already allocated intr */ 6338 kmem_free(intrp->htable, intrp->intr_size); 6339 6340 (void) nxge_ldgv_uninit(nxgep); 6341 6342 return (NXGE_ERROR | NXGE_DDI_FAILED); 6343 } 6344 intrp->intr_added++; 6345 } 6346 6347 intrp->msi_intx_cnt = nactual; 6348 6349 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6350 6351 status = nxge_intr_ldgv_init(nxgep); 6352 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6353 6354 return (status); 6355 } 6356 6357 static void 6358 nxge_remove_intrs(p_nxge_t nxgep) 6359 { 6360 int i, inum; 6361 p_nxge_intr_t intrp; 6362 6363 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6364 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6365 if (!intrp->intr_registered) { 6366 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6367 "<== nxge_remove_intrs: interrupts not registered")); 6368 return; 6369 } 6370 6371 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6372 6373 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6374 (void) ddi_intr_block_disable(intrp->htable, 6375 intrp->intr_added); 6376 } else { 6377 for (i = 0; i < intrp->intr_added; i++) { 6378 (void) ddi_intr_disable(intrp->htable[i]); 6379 } 6380 } 6381 6382 for (inum = 0; inum < intrp->intr_added; inum++) { 6383 if (intrp->htable[inum]) { 6384 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6385 } 6386 } 6387 6388 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6389 if (intrp->htable[inum]) { 6390 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6391 "nxge_remove_intrs: ddi_intr_free inum %d " 6392 "msi_intx_cnt %d intr_added %d", 6393 inum, 6394 intrp->msi_intx_cnt, 6395 intrp->intr_added)); 6396 6397 (void) ddi_intr_free(intrp->htable[inum]); 6398 } 6399 } 6400 6401 kmem_free(intrp->htable, intrp->intr_size); 6402 intrp->intr_registered = B_FALSE; 6403 intrp->intr_enabled = B_FALSE; 6404 intrp->msi_intx_cnt = 0; 6405 intrp->intr_added = 0; 6406 6407 (void) nxge_ldgv_uninit(nxgep); 6408 6409 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6410 "#msix-request"); 6411 6412 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6413 } 6414 6415 /*ARGSUSED*/ 6416 static void 6417 nxge_intrs_enable(p_nxge_t nxgep) 6418 { 6419 p_nxge_intr_t intrp; 6420 int i; 6421 int status; 6422 6423 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6424 6425 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6426 6427 if (!intrp->intr_registered) { 6428 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6429 "interrupts are not registered")); 6430 return; 6431 } 6432 6433 if (intrp->intr_enabled) { 6434 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6435 "<== nxge_intrs_enable: already enabled")); 6436 return; 6437 } 6438 6439 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6440 status = ddi_intr_block_enable(intrp->htable, 6441 intrp->intr_added); 6442 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6443 "block enable - status 0x%x total inums #%d\n", 6444 status, intrp->intr_added)); 6445 } else { 6446 for (i = 0; i < intrp->intr_added; i++) { 6447 status = ddi_intr_enable(intrp->htable[i]); 6448 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6449 "ddi_intr_enable:enable - status 0x%x " 6450 "total inums %d enable inum #%d\n", 6451 status, intrp->intr_added, i)); 6452 if (status == DDI_SUCCESS) { 6453 intrp->intr_enabled = B_TRUE; 6454 } 6455 } 6456 } 6457 6458 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6459 } 6460 6461 /*ARGSUSED*/ 6462 static void 6463 nxge_intrs_disable(p_nxge_t nxgep) 6464 { 6465 p_nxge_intr_t intrp; 6466 int i; 6467 6468 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6469 6470 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6471 6472 if (!intrp->intr_registered) { 6473 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6474 "interrupts are not registered")); 6475 return; 6476 } 6477 6478 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6479 (void) ddi_intr_block_disable(intrp->htable, 6480 intrp->intr_added); 6481 } else { 6482 for (i = 0; i < intrp->intr_added; i++) { 6483 (void) ddi_intr_disable(intrp->htable[i]); 6484 } 6485 } 6486 6487 intrp->intr_enabled = B_FALSE; 6488 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6489 } 6490 6491 nxge_status_t 6492 nxge_mac_register(p_nxge_t nxgep) 6493 { 6494 mac_register_t *macp; 6495 int status; 6496 6497 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6498 6499 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6500 return (NXGE_ERROR); 6501 6502 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6503 macp->m_driver = nxgep; 6504 macp->m_dip = nxgep->dip; 6505 if (!isLDOMguest(nxgep)) { 6506 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6507 } else { 6508 macp->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); 6509 macp->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); 6510 (void) memset(macp->m_src_addr, 0xff, sizeof (MAXMACADDRLEN)); 6511 } 6512 macp->m_callbacks = &nxge_m_callbacks; 6513 macp->m_min_sdu = 0; 6514 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6515 NXGE_EHEADER_VLAN_CRC; 6516 macp->m_max_sdu = nxgep->mac.default_mtu; 6517 macp->m_margin = VLAN_TAGSZ; 6518 macp->m_priv_props = nxge_priv_props; 6519 macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 6520 if (isLDOMguest(nxgep)) { 6521 macp->m_v12n = MAC_VIRT_LEVEL1 | MAC_VIRT_SERIALIZE; 6522 } else { 6523 macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1 | \ 6524 MAC_VIRT_SERIALIZE; 6525 } 6526 6527 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6528 "==> nxge_mac_register: instance %d " 6529 "max_sdu %d margin %d maxframe %d (header %d)", 6530 nxgep->instance, 6531 macp->m_max_sdu, macp->m_margin, 6532 nxgep->mac.maxframesize, 6533 NXGE_EHEADER_VLAN_CRC)); 6534 6535 status = mac_register(macp, &nxgep->mach); 6536 if (isLDOMguest(nxgep)) { 6537 KMEM_FREE(macp->m_src_addr, MAXMACADDRLEN); 6538 KMEM_FREE(macp->m_dst_addr, MAXMACADDRLEN); 6539 } 6540 mac_free(macp); 6541 6542 if (status != 0) { 6543 cmn_err(CE_WARN, 6544 "!nxge_mac_register failed (status %d instance %d)", 6545 status, nxgep->instance); 6546 return (NXGE_ERROR); 6547 } 6548 6549 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6550 "(instance %d)", nxgep->instance)); 6551 6552 return (NXGE_OK); 6553 } 6554 6555 void 6556 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6557 { 6558 ssize_t size; 6559 mblk_t *nmp; 6560 uint8_t blk_id; 6561 uint8_t chan; 6562 uint32_t err_id; 6563 err_inject_t *eip; 6564 6565 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6566 6567 size = 1024; 6568 nmp = mp->b_cont; 6569 eip = (err_inject_t *)nmp->b_rptr; 6570 blk_id = eip->blk_id; 6571 err_id = eip->err_id; 6572 chan = eip->chan; 6573 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6574 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6575 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6576 switch (blk_id) { 6577 case MAC_BLK_ID: 6578 break; 6579 case TXMAC_BLK_ID: 6580 break; 6581 case RXMAC_BLK_ID: 6582 break; 6583 case MIF_BLK_ID: 6584 break; 6585 case IPP_BLK_ID: 6586 nxge_ipp_inject_err(nxgep, err_id); 6587 break; 6588 case TXC_BLK_ID: 6589 nxge_txc_inject_err(nxgep, err_id); 6590 break; 6591 case TXDMA_BLK_ID: 6592 nxge_txdma_inject_err(nxgep, err_id, chan); 6593 break; 6594 case RXDMA_BLK_ID: 6595 nxge_rxdma_inject_err(nxgep, err_id, chan); 6596 break; 6597 case ZCP_BLK_ID: 6598 nxge_zcp_inject_err(nxgep, err_id); 6599 break; 6600 case ESPC_BLK_ID: 6601 break; 6602 case FFLP_BLK_ID: 6603 break; 6604 case PHY_BLK_ID: 6605 break; 6606 case ETHER_SERDES_BLK_ID: 6607 break; 6608 case PCIE_SERDES_BLK_ID: 6609 break; 6610 case VIR_BLK_ID: 6611 break; 6612 } 6613 6614 nmp->b_wptr = nmp->b_rptr + size; 6615 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6616 6617 miocack(wq, mp, (int)size, 0); 6618 } 6619 6620 static int 6621 nxge_init_common_dev(p_nxge_t nxgep) 6622 { 6623 p_nxge_hw_list_t hw_p; 6624 dev_info_t *p_dip; 6625 6626 ASSERT(nxgep != NULL); 6627 6628 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6629 6630 p_dip = nxgep->p_dip; 6631 MUTEX_ENTER(&nxge_common_lock); 6632 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6633 "==> nxge_init_common_dev:func # %d", 6634 nxgep->function_num)); 6635 /* 6636 * Loop through existing per neptune hardware list. 6637 */ 6638 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6639 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6640 "==> nxge_init_common_device:func # %d " 6641 "hw_p $%p parent dip $%p", 6642 nxgep->function_num, 6643 hw_p, 6644 p_dip)); 6645 if (hw_p->parent_devp == p_dip) { 6646 nxgep->nxge_hw_p = hw_p; 6647 hw_p->ndevs++; 6648 hw_p->nxge_p[nxgep->function_num] = nxgep; 6649 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6650 "==> nxge_init_common_device:func # %d " 6651 "hw_p $%p parent dip $%p " 6652 "ndevs %d (found)", 6653 nxgep->function_num, 6654 hw_p, 6655 p_dip, 6656 hw_p->ndevs)); 6657 break; 6658 } 6659 } 6660 6661 if (hw_p == NULL) { 6662 6663 char **prop_val; 6664 uint_t prop_len; 6665 int i; 6666 6667 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6668 "==> nxge_init_common_device:func # %d " 6669 "parent dip $%p (new)", 6670 nxgep->function_num, 6671 p_dip)); 6672 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6673 hw_p->parent_devp = p_dip; 6674 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6675 nxgep->nxge_hw_p = hw_p; 6676 hw_p->ndevs++; 6677 hw_p->nxge_p[nxgep->function_num] = nxgep; 6678 hw_p->next = nxge_hw_list; 6679 if (nxgep->niu_type == N2_NIU) { 6680 hw_p->niu_type = N2_NIU; 6681 hw_p->platform_type = P_NEPTUNE_NIU; 6682 hw_p->tcam_size = TCAM_NIU_TCAM_MAX_ENTRY; 6683 } else { 6684 hw_p->niu_type = NIU_TYPE_NONE; 6685 hw_p->platform_type = P_NEPTUNE_NONE; 6686 hw_p->tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY; 6687 } 6688 6689 hw_p->tcam = KMEM_ZALLOC(sizeof (tcam_flow_spec_t) * 6690 hw_p->tcam_size, KM_SLEEP); 6691 6692 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6693 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6694 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6695 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6696 6697 nxge_hw_list = hw_p; 6698 6699 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0, 6700 "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) { 6701 for (i = 0; i < prop_len; i++) { 6702 if ((strcmp((caddr_t)prop_val[i], 6703 NXGE_ROCK_COMPATIBLE) == 0)) { 6704 hw_p->platform_type = P_NEPTUNE_ROCK; 6705 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6706 "ROCK hw_p->platform_type %d", 6707 hw_p->platform_type)); 6708 break; 6709 } 6710 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6711 "nxge_init_common_dev: read compatible" 6712 " property[%d] val[%s]", 6713 i, (caddr_t)prop_val[i])); 6714 } 6715 } 6716 6717 ddi_prop_free(prop_val); 6718 6719 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6720 } 6721 6722 MUTEX_EXIT(&nxge_common_lock); 6723 6724 nxgep->platform_type = hw_p->platform_type; 6725 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxgep->platform_type %d", 6726 nxgep->platform_type)); 6727 if (nxgep->niu_type != N2_NIU) { 6728 nxgep->niu_type = hw_p->niu_type; 6729 } 6730 6731 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6732 "==> nxge_init_common_device (nxge_hw_list) $%p", 6733 nxge_hw_list)); 6734 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6735 6736 return (NXGE_OK); 6737 } 6738 6739 static void 6740 nxge_uninit_common_dev(p_nxge_t nxgep) 6741 { 6742 p_nxge_hw_list_t hw_p, h_hw_p; 6743 p_nxge_dma_pt_cfg_t p_dma_cfgp; 6744 p_nxge_hw_pt_cfg_t p_cfgp; 6745 dev_info_t *p_dip; 6746 6747 ASSERT(nxgep != NULL); 6748 6749 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6750 if (nxgep->nxge_hw_p == NULL) { 6751 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6752 "<== nxge_uninit_common_device (no common)")); 6753 return; 6754 } 6755 6756 MUTEX_ENTER(&nxge_common_lock); 6757 h_hw_p = nxge_hw_list; 6758 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6759 p_dip = hw_p->parent_devp; 6760 if (nxgep->nxge_hw_p == hw_p && 6761 p_dip == nxgep->p_dip && 6762 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6763 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6764 6765 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6766 "==> nxge_uninit_common_device:func # %d " 6767 "hw_p $%p parent dip $%p " 6768 "ndevs %d (found)", 6769 nxgep->function_num, 6770 hw_p, 6771 p_dip, 6772 hw_p->ndevs)); 6773 6774 /* 6775 * Release the RDC table, a shared resoruce 6776 * of the nxge hardware. The RDC table was 6777 * assigned to this instance of nxge in 6778 * nxge_use_cfg_dma_config(). 6779 */ 6780 if (!isLDOMguest(nxgep)) { 6781 p_dma_cfgp = 6782 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 6783 p_cfgp = 6784 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 6785 (void) nxge_fzc_rdc_tbl_unbind(nxgep, 6786 p_cfgp->def_mac_rxdma_grpid); 6787 6788 /* Cleanup any outstanding groups. */ 6789 nxge_grp_cleanup(nxgep); 6790 } 6791 6792 if (hw_p->ndevs) { 6793 hw_p->ndevs--; 6794 } 6795 hw_p->nxge_p[nxgep->function_num] = NULL; 6796 if (!hw_p->ndevs) { 6797 KMEM_FREE(hw_p->tcam, 6798 sizeof (tcam_flow_spec_t) * 6799 hw_p->tcam_size); 6800 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6801 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6802 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6803 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6804 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6805 "==> nxge_uninit_common_device: " 6806 "func # %d " 6807 "hw_p $%p parent dip $%p " 6808 "ndevs %d (last)", 6809 nxgep->function_num, 6810 hw_p, 6811 p_dip, 6812 hw_p->ndevs)); 6813 6814 nxge_hio_uninit(nxgep); 6815 6816 if (hw_p == nxge_hw_list) { 6817 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6818 "==> nxge_uninit_common_device:" 6819 "remove head func # %d " 6820 "hw_p $%p parent dip $%p " 6821 "ndevs %d (head)", 6822 nxgep->function_num, 6823 hw_p, 6824 p_dip, 6825 hw_p->ndevs)); 6826 nxge_hw_list = hw_p->next; 6827 } else { 6828 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6829 "==> nxge_uninit_common_device:" 6830 "remove middle func # %d " 6831 "hw_p $%p parent dip $%p " 6832 "ndevs %d (middle)", 6833 nxgep->function_num, 6834 hw_p, 6835 p_dip, 6836 hw_p->ndevs)); 6837 h_hw_p->next = hw_p->next; 6838 } 6839 6840 nxgep->nxge_hw_p = NULL; 6841 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6842 } 6843 break; 6844 } else { 6845 h_hw_p = hw_p; 6846 } 6847 } 6848 6849 MUTEX_EXIT(&nxge_common_lock); 6850 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6851 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6852 nxge_hw_list)); 6853 6854 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6855 } 6856 6857 /* 6858 * Determines the number of ports from the niu_type or the platform type. 6859 * Returns the number of ports, or returns zero on failure. 6860 */ 6861 6862 int 6863 nxge_get_nports(p_nxge_t nxgep) 6864 { 6865 int nports = 0; 6866 6867 switch (nxgep->niu_type) { 6868 case N2_NIU: 6869 case NEPTUNE_2_10GF: 6870 nports = 2; 6871 break; 6872 case NEPTUNE_4_1GC: 6873 case NEPTUNE_2_10GF_2_1GC: 6874 case NEPTUNE_1_10GF_3_1GC: 6875 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6876 case NEPTUNE_2_10GF_2_1GRF: 6877 nports = 4; 6878 break; 6879 default: 6880 switch (nxgep->platform_type) { 6881 case P_NEPTUNE_NIU: 6882 case P_NEPTUNE_ATLAS_2PORT: 6883 nports = 2; 6884 break; 6885 case P_NEPTUNE_ATLAS_4PORT: 6886 case P_NEPTUNE_MARAMBA_P0: 6887 case P_NEPTUNE_MARAMBA_P1: 6888 case P_NEPTUNE_ROCK: 6889 case P_NEPTUNE_ALONSO: 6890 nports = 4; 6891 break; 6892 default: 6893 break; 6894 } 6895 break; 6896 } 6897 6898 return (nports); 6899 } 6900 6901 /* 6902 * The following two functions are to support 6903 * PSARC/2007/453 MSI-X interrupt limit override. 6904 */ 6905 static int 6906 nxge_create_msi_property(p_nxge_t nxgep) 6907 { 6908 int nmsi; 6909 extern int ncpus; 6910 6911 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6912 6913 switch (nxgep->mac.portmode) { 6914 case PORT_10G_COPPER: 6915 case PORT_10G_FIBER: 6916 case PORT_10G_TN1010: 6917 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6918 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6919 /* 6920 * The maximum MSI-X requested will be 8. 6921 * If the # of CPUs is less than 8, we will request 6922 * # MSI-X based on the # of CPUs (default). 6923 */ 6924 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6925 "==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d", 6926 nxge_msix_10g_intrs)); 6927 if ((nxge_msix_10g_intrs == 0) || 6928 (nxge_msix_10g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6929 nmsi = NXGE_MSIX_REQUEST_10G; 6930 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6931 "==>nxge_create_msi_property (10G): reset to 8")); 6932 } else { 6933 nmsi = nxge_msix_10g_intrs; 6934 } 6935 6936 /* 6937 * If # of interrupts requested is 8 (default), 6938 * the checking of the number of cpus will be 6939 * be maintained. 6940 */ 6941 if ((nmsi == NXGE_MSIX_REQUEST_10G) && 6942 (ncpus < nmsi)) { 6943 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6944 "==>nxge_create_msi_property (10G): reset to 8")); 6945 nmsi = ncpus; 6946 } 6947 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6948 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6949 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6950 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6951 break; 6952 6953 default: 6954 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6955 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6956 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6957 "==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d", 6958 nxge_msix_1g_intrs)); 6959 if ((nxge_msix_1g_intrs == 0) || 6960 (nxge_msix_1g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6961 nmsi = NXGE_MSIX_REQUEST_1G; 6962 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6963 "==>nxge_create_msi_property (1G): reset to 2")); 6964 } else { 6965 nmsi = nxge_msix_1g_intrs; 6966 } 6967 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6968 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6969 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6970 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6971 break; 6972 } 6973 6974 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6975 return (nmsi); 6976 } 6977 6978 /* ARGSUSED */ 6979 static int 6980 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6981 void *pr_val) 6982 { 6983 int err = 0; 6984 link_flowctrl_t fl; 6985 6986 switch (pr_num) { 6987 case MAC_PROP_AUTONEG: 6988 *(uint8_t *)pr_val = 1; 6989 break; 6990 case MAC_PROP_FLOWCTRL: 6991 if (pr_valsize < sizeof (link_flowctrl_t)) 6992 return (EINVAL); 6993 fl = LINK_FLOWCTRL_RX; 6994 bcopy(&fl, pr_val, sizeof (fl)); 6995 break; 6996 case MAC_PROP_ADV_1000FDX_CAP: 6997 case MAC_PROP_EN_1000FDX_CAP: 6998 *(uint8_t *)pr_val = 1; 6999 break; 7000 case MAC_PROP_ADV_100FDX_CAP: 7001 case MAC_PROP_EN_100FDX_CAP: 7002 *(uint8_t *)pr_val = 1; 7003 break; 7004 default: 7005 err = ENOTSUP; 7006 break; 7007 } 7008 return (err); 7009 } 7010 7011 7012 /* 7013 * The following is a software around for the Neptune hardware's 7014 * interrupt bugs; The Neptune hardware may generate spurious interrupts when 7015 * an interrupr handler is removed. 7016 */ 7017 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98 7018 #define NXGE_PIM_RESET (1ULL << 29) 7019 #define NXGE_GLU_RESET (1ULL << 30) 7020 #define NXGE_NIU_RESET (1ULL << 31) 7021 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \ 7022 NXGE_GLU_RESET | \ 7023 NXGE_NIU_RESET) 7024 7025 #define NXGE_WAIT_QUITE_TIME 200000 7026 #define NXGE_WAIT_QUITE_RETRY 40 7027 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */ 7028 7029 static void 7030 nxge_niu_peu_reset(p_nxge_t nxgep) 7031 { 7032 uint32_t rvalue; 7033 p_nxge_hw_list_t hw_p; 7034 p_nxge_t fnxgep; 7035 int i, j; 7036 7037 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset")); 7038 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 7039 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 7040 "==> nxge_niu_peu_reset: NULL hardware pointer")); 7041 return; 7042 } 7043 7044 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7045 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d", 7046 hw_p->flags, nxgep->nxge_link_poll_timerid, 7047 nxgep->nxge_timerid)); 7048 7049 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 7050 /* 7051 * Make sure other instances from the same hardware 7052 * stop sending PIO and in quiescent state. 7053 */ 7054 for (i = 0; i < NXGE_MAX_PORTS; i++) { 7055 fnxgep = hw_p->nxge_p[i]; 7056 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7057 "==> nxge_niu_peu_reset: checking entry %d " 7058 "nxgep $%p", i, fnxgep)); 7059 #ifdef NXGE_DEBUG 7060 if (fnxgep) { 7061 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7062 "==> nxge_niu_peu_reset: entry %d (function %d) " 7063 "link timer id %d hw timer id %d", 7064 i, fnxgep->function_num, 7065 fnxgep->nxge_link_poll_timerid, 7066 fnxgep->nxge_timerid)); 7067 } 7068 #endif 7069 if (fnxgep && fnxgep != nxgep && 7070 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) { 7071 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7072 "==> nxge_niu_peu_reset: checking $%p " 7073 "(function %d) timer ids", 7074 fnxgep, fnxgep->function_num)); 7075 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) { 7076 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7077 "==> nxge_niu_peu_reset: waiting")); 7078 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 7079 if (!fnxgep->nxge_timerid && 7080 !fnxgep->nxge_link_poll_timerid) { 7081 break; 7082 } 7083 } 7084 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 7085 if (fnxgep->nxge_timerid || 7086 fnxgep->nxge_link_poll_timerid) { 7087 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 7088 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 7089 "<== nxge_niu_peu_reset: cannot reset " 7090 "hardware (devices are still in use)")); 7091 return; 7092 } 7093 } 7094 } 7095 7096 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) { 7097 hw_p->flags |= COMMON_RESET_NIU_PCI; 7098 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh, 7099 NXGE_PCI_PORT_LOGIC_OFFSET); 7100 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7101 "nxge_niu_peu_reset: read offset 0x%x (%d) " 7102 "(data 0x%x)", 7103 NXGE_PCI_PORT_LOGIC_OFFSET, 7104 NXGE_PCI_PORT_LOGIC_OFFSET, 7105 rvalue)); 7106 7107 rvalue |= NXGE_PCI_RESET_ALL; 7108 pci_config_put32(nxgep->dev_regs->nxge_pciregh, 7109 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue); 7110 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7111 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x", 7112 rvalue)); 7113 7114 NXGE_DELAY(NXGE_PCI_RESET_WAIT); 7115 } 7116 7117 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 7118 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset")); 7119 } 7120 7121 static void 7122 nxge_set_pci_replay_timeout(p_nxge_t nxgep) 7123 { 7124 p_dev_regs_t dev_regs; 7125 uint32_t value; 7126 7127 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout")); 7128 7129 if (!nxge_set_replay_timer) { 7130 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7131 "==> nxge_set_pci_replay_timeout: will not change " 7132 "the timeout")); 7133 return; 7134 } 7135 7136 dev_regs = nxgep->dev_regs; 7137 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7138 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p", 7139 dev_regs, dev_regs->nxge_pciregh)); 7140 7141 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) { 7142 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7143 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or " 7144 "no PCI handle", 7145 dev_regs)); 7146 return; 7147 } 7148 value = (pci_config_get32(dev_regs->nxge_pciregh, 7149 PCI_REPLAY_TIMEOUT_CFG_OFFSET) | 7150 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT)); 7151 7152 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7153 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x " 7154 "(timeout value to set 0x%x at offset 0x%x) value 0x%x", 7155 pci_config_get32(dev_regs->nxge_pciregh, 7156 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout, 7157 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value)); 7158 7159 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET, 7160 value); 7161 7162 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7163 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x", 7164 pci_config_get32(dev_regs->nxge_pciregh, 7165 PCI_REPLAY_TIMEOUT_CFG_OFFSET))); 7166 7167 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout")); 7168 } 7169 7170 /* 7171 * quiesce(9E) entry point. 7172 * 7173 * This function is called when the system is single-threaded at high 7174 * PIL with preemption disabled. Therefore, this function must not be 7175 * blocked. 7176 * 7177 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 7178 * DDI_FAILURE indicates an error condition and should almost never happen. 7179 */ 7180 static int 7181 nxge_quiesce(dev_info_t *dip) 7182 { 7183 int instance = ddi_get_instance(dip); 7184 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 7185 7186 if (nxgep == NULL) 7187 return (DDI_FAILURE); 7188 7189 /* Turn off debugging */ 7190 nxge_debug_level = NO_DEBUG; 7191 nxgep->nxge_debug_level = NO_DEBUG; 7192 npi_debug_level = NO_DEBUG; 7193 7194 /* 7195 * Stop link monitor only when linkchkmod is interrupt based 7196 */ 7197 if (nxgep->mac.linkchkmode == LINKCHK_INTR) { 7198 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 7199 } 7200 7201 (void) nxge_intr_hw_disable(nxgep); 7202 7203 /* 7204 * Reset the receive MAC side. 7205 */ 7206 (void) nxge_rx_mac_disable(nxgep); 7207 7208 /* Disable and soft reset the IPP */ 7209 if (!isLDOMguest(nxgep)) 7210 (void) nxge_ipp_disable(nxgep); 7211 7212 /* 7213 * Reset the transmit/receive DMA side. 7214 */ 7215 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 7216 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 7217 7218 /* 7219 * Reset the transmit MAC side. 7220 */ 7221 (void) nxge_tx_mac_disable(nxgep); 7222 7223 return (DDI_SUCCESS); 7224 } 7225