1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 29 */ 30 #include <sys/nxge/nxge_impl.h> 31 #include <sys/nxge/nxge_hio.h> 32 #include <sys/nxge/nxge_rxdma.h> 33 #include <sys/pcie.h> 34 35 uint32_t nxge_use_partition = 0; /* debug partition flag */ 36 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 37 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 38 /* 39 * PSARC/2007/453 MSI-X interrupt limit override 40 */ 41 uint32_t nxge_msi_enable = 2; 42 43 /* 44 * Software workaround for a Neptune (PCI-E) 45 * hardware interrupt bug which the hardware 46 * may generate spurious interrupts after the 47 * device interrupt handler was removed. If this flag 48 * is enabled, the driver will reset the 49 * hardware when devices are being detached. 50 */ 51 uint32_t nxge_peu_reset_enable = 0; 52 53 /* 54 * Software workaround for the hardware 55 * checksum bugs that affect packet transmission 56 * and receive: 57 * 58 * Usage of nxge_cksum_offload: 59 * 60 * (1) nxge_cksum_offload = 0 (default): 61 * - transmits packets: 62 * TCP: uses the hardware checksum feature. 63 * UDP: driver will compute the software checksum 64 * based on the partial checksum computed 65 * by the IP layer. 66 * - receives packets 67 * TCP: marks packets checksum flags based on hardware result. 68 * UDP: will not mark checksum flags. 69 * 70 * (2) nxge_cksum_offload = 1: 71 * - transmit packets: 72 * TCP/UDP: uses the hardware checksum feature. 73 * - receives packets 74 * TCP/UDP: marks packet checksum flags based on hardware result. 75 * 76 * (3) nxge_cksum_offload = 2: 77 * - The driver will not register its checksum capability. 78 * Checksum for both TCP and UDP will be computed 79 * by the stack. 80 * - The software LSO is not allowed in this case. 81 * 82 * (4) nxge_cksum_offload > 2: 83 * - Will be treated as it is set to 2 84 * (stack will compute the checksum). 85 * 86 * (5) If the hardware bug is fixed, this workaround 87 * needs to be updated accordingly to reflect 88 * the new hardware revision. 89 */ 90 uint32_t nxge_cksum_offload = 0; 91 92 /* 93 * Globals: tunable parameters (/etc/system or adb) 94 * 95 */ 96 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 97 uint32_t nxge_rbr_spare_size = 0; 98 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 99 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 100 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 101 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 102 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 103 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 104 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 105 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 106 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 107 108 /* MAX LSO size */ 109 #define NXGE_LSO_MAXLEN 65535 110 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 111 112 113 /* 114 * Add tunable to reduce the amount of time spent in the 115 * ISR doing Rx Processing. 116 */ 117 uint32_t nxge_max_rx_pkts = 1024; 118 119 /* 120 * Tunables to manage the receive buffer blocks. 121 * 122 * nxge_rx_threshold_hi: copy all buffers. 123 * nxge_rx_bcopy_size_type: receive buffer block size type. 124 * nxge_rx_threshold_lo: copy only up to tunable block size type. 125 */ 126 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 127 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 128 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 129 130 /* Use kmem_alloc() to allocate data buffers. */ 131 #if defined(_BIG_ENDIAN) 132 uint32_t nxge_use_kmem_alloc = 1; 133 #else 134 uint32_t nxge_use_kmem_alloc = 0; 135 #endif 136 137 rtrace_t npi_rtracebuf; 138 139 /* 140 * The hardware sometimes fails to allow enough time for the link partner 141 * to send an acknowledgement for packets that the hardware sent to it. The 142 * hardware resends the packets earlier than it should be in those instances. 143 * This behavior caused some switches to acknowledge the wrong packets 144 * and it triggered the fatal error. 145 * This software workaround is to set the replay timer to a value 146 * suggested by the hardware team. 147 * 148 * PCI config space replay timer register: 149 * The following replay timeout value is 0xc 150 * for bit 14:18. 151 */ 152 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8 153 #define PCI_REPLAY_TIMEOUT_SHIFT 14 154 155 uint32_t nxge_set_replay_timer = 1; 156 uint32_t nxge_replay_timeout = 0xc; 157 158 /* 159 * The transmit serialization sometimes causes 160 * longer sleep before calling the driver transmit 161 * function as it sleeps longer than it should. 162 * The performace group suggests that a time wait tunable 163 * can be used to set the maximum wait time when needed 164 * and the default is set to 1 tick. 165 */ 166 uint32_t nxge_tx_serial_maxsleep = 1; 167 168 #if defined(sun4v) 169 /* 170 * Hypervisor N2/NIU services information. 171 */ 172 static hsvc_info_t niu_hsvc = { 173 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 174 NIU_MINOR_VER, "nxge" 175 }; 176 177 static int nxge_hsvc_register(p_nxge_t); 178 #endif 179 180 /* 181 * Function Prototypes 182 */ 183 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 184 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 185 static void nxge_unattach(p_nxge_t); 186 static int nxge_quiesce(dev_info_t *); 187 188 #if NXGE_PROPERTY 189 static void nxge_remove_hard_properties(p_nxge_t); 190 #endif 191 192 /* 193 * These two functions are required by nxge_hio.c 194 */ 195 extern int nxge_m_mmac_remove(void *arg, int slot); 196 extern void nxge_grp_cleanup(p_nxge_t nxge); 197 198 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 199 200 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 201 static void nxge_destroy_mutexes(p_nxge_t); 202 203 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 204 static void nxge_unmap_regs(p_nxge_t nxgep); 205 #ifdef NXGE_DEBUG 206 static void nxge_test_map_regs(p_nxge_t nxgep); 207 #endif 208 209 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 210 static void nxge_remove_intrs(p_nxge_t nxgep); 211 212 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 213 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 214 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 215 static void nxge_intrs_enable(p_nxge_t nxgep); 216 static void nxge_intrs_disable(p_nxge_t nxgep); 217 218 static void nxge_suspend(p_nxge_t); 219 static nxge_status_t nxge_resume(p_nxge_t); 220 221 static nxge_status_t nxge_setup_dev(p_nxge_t); 222 static void nxge_destroy_dev(p_nxge_t); 223 224 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 225 static void nxge_free_mem_pool(p_nxge_t); 226 227 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 228 static void nxge_free_rx_mem_pool(p_nxge_t); 229 230 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 231 static void nxge_free_tx_mem_pool(p_nxge_t); 232 233 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 234 struct ddi_dma_attr *, 235 size_t, ddi_device_acc_attr_t *, uint_t, 236 p_nxge_dma_common_t); 237 238 static void nxge_dma_mem_free(p_nxge_dma_common_t); 239 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 240 241 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 242 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 243 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 244 245 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 246 p_nxge_dma_common_t *, size_t); 247 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 248 249 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 250 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 251 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 252 253 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 254 p_nxge_dma_common_t *, 255 size_t); 256 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 257 258 static int nxge_init_common_dev(p_nxge_t); 259 static void nxge_uninit_common_dev(p_nxge_t); 260 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 261 char *, caddr_t); 262 #if defined(sun4v) 263 extern nxge_status_t nxge_hio_rdc_enable(p_nxge_t nxgep); 264 extern nxge_status_t nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm); 265 #endif 266 267 /* 268 * The next declarations are for the GLDv3 interface. 269 */ 270 static int nxge_m_start(void *); 271 static void nxge_m_stop(void *); 272 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 273 static int nxge_m_promisc(void *, boolean_t); 274 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 275 static nxge_status_t nxge_mac_register(p_nxge_t); 276 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 277 int slot, int rdctbl, boolean_t usetbl); 278 void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, 279 boolean_t factory); 280 #if defined(sun4v) 281 extern mblk_t *nxge_m_tx(void *arg, mblk_t *mp); 282 #endif 283 284 static void nxge_m_getfactaddr(void *, uint_t, uint8_t *); 285 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 286 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 287 uint_t, const void *); 288 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 289 uint_t, uint_t, void *, uint_t *); 290 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 291 const void *); 292 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 293 void *, uint_t *); 294 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 295 static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int, 296 mac_ring_info_t *, mac_ring_handle_t); 297 static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t, 298 mac_ring_type_t); 299 static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t, 300 mac_ring_type_t); 301 302 static void nxge_niu_peu_reset(p_nxge_t nxgep); 303 static void nxge_set_pci_replay_timeout(nxge_t *); 304 305 mac_priv_prop_t nxge_priv_props[] = { 306 {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 307 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 308 {"_function_number", MAC_PROP_PERM_READ}, 309 {"_fw_version", MAC_PROP_PERM_READ}, 310 {"_port_mode", MAC_PROP_PERM_READ}, 311 {"_hot_swap_phy", MAC_PROP_PERM_READ}, 312 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 313 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 314 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 315 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 316 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 317 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 318 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 319 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 320 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 321 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 322 {"_soft_lso_enable", MAC_PROP_PERM_RW} 323 }; 324 325 #define NXGE_MAX_PRIV_PROPS \ 326 (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 327 328 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 329 #define MAX_DUMP_SZ 256 330 331 #define NXGE_M_CALLBACK_FLAGS \ 332 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 333 334 mac_callbacks_t nxge_m_callbacks = { 335 NXGE_M_CALLBACK_FLAGS, 336 nxge_m_stat, 337 nxge_m_start, 338 nxge_m_stop, 339 nxge_m_promisc, 340 nxge_m_multicst, 341 NULL, 342 NULL, 343 nxge_m_ioctl, 344 nxge_m_getcapab, 345 NULL, 346 NULL, 347 nxge_m_setprop, 348 nxge_m_getprop 349 }; 350 351 void 352 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 353 354 /* PSARC/2007/453 MSI-X interrupt limit override. */ 355 #define NXGE_MSIX_REQUEST_10G 8 356 #define NXGE_MSIX_REQUEST_1G 2 357 static int nxge_create_msi_property(p_nxge_t); 358 /* 359 * For applications that care about the 360 * latency, it was requested by PAE and the 361 * customers that the driver has tunables that 362 * allow the user to tune it to a higher number 363 * interrupts to spread the interrupts among 364 * multiple channels. The DDI framework limits 365 * the maximum number of MSI-X resources to allocate 366 * to 8 (ddi_msix_alloc_limit). If more than 8 367 * is set, ddi_msix_alloc_limit must be set accordingly. 368 * The default number of MSI interrupts are set to 369 * 8 for 10G and 2 for 1G link. 370 */ 371 #define NXGE_MSIX_MAX_ALLOWED 32 372 uint32_t nxge_msix_10g_intrs = NXGE_MSIX_REQUEST_10G; 373 uint32_t nxge_msix_1g_intrs = NXGE_MSIX_REQUEST_1G; 374 375 /* 376 * These global variables control the message 377 * output. 378 */ 379 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 380 uint64_t nxge_debug_level; 381 382 /* 383 * This list contains the instance structures for the Neptune 384 * devices present in the system. The lock exists to guarantee 385 * mutually exclusive access to the list. 386 */ 387 void *nxge_list = NULL; 388 void *nxge_hw_list = NULL; 389 nxge_os_mutex_t nxge_common_lock; 390 nxge_os_mutex_t nxgedebuglock; 391 392 extern uint64_t npi_debug_level; 393 394 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 395 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 396 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 397 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 398 extern void nxge_fm_init(p_nxge_t, 399 ddi_device_acc_attr_t *, 400 ddi_device_acc_attr_t *, 401 ddi_dma_attr_t *); 402 extern void nxge_fm_fini(p_nxge_t); 403 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 404 405 /* 406 * Count used to maintain the number of buffers being used 407 * by Neptune instances and loaned up to the upper layers. 408 */ 409 uint32_t nxge_mblks_pending = 0; 410 411 /* 412 * Device register access attributes for PIO. 413 */ 414 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 415 DDI_DEVICE_ATTR_V0, 416 DDI_STRUCTURE_LE_ACC, 417 DDI_STRICTORDER_ACC, 418 }; 419 420 /* 421 * Device descriptor access attributes for DMA. 422 */ 423 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 424 DDI_DEVICE_ATTR_V0, 425 DDI_STRUCTURE_LE_ACC, 426 DDI_STRICTORDER_ACC 427 }; 428 429 /* 430 * Device buffer access attributes for DMA. 431 */ 432 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 433 DDI_DEVICE_ATTR_V0, 434 DDI_STRUCTURE_BE_ACC, 435 DDI_STRICTORDER_ACC 436 }; 437 438 ddi_dma_attr_t nxge_desc_dma_attr = { 439 DMA_ATTR_V0, /* version number. */ 440 0, /* low address */ 441 0xffffffffffffffff, /* high address */ 442 0xffffffffffffffff, /* address counter max */ 443 #ifndef NIU_PA_WORKAROUND 444 0x100000, /* alignment */ 445 #else 446 0x2000, 447 #endif 448 0xfc00fc, /* dlim_burstsizes */ 449 0x1, /* minimum transfer size */ 450 0xffffffffffffffff, /* maximum transfer size */ 451 0xffffffffffffffff, /* maximum segment size */ 452 1, /* scatter/gather list length */ 453 (unsigned int) 1, /* granularity */ 454 0 /* attribute flags */ 455 }; 456 457 ddi_dma_attr_t nxge_tx_dma_attr = { 458 DMA_ATTR_V0, /* version number. */ 459 0, /* low address */ 460 0xffffffffffffffff, /* high address */ 461 0xffffffffffffffff, /* address counter max */ 462 #if defined(_BIG_ENDIAN) 463 0x2000, /* alignment */ 464 #else 465 0x1000, /* alignment */ 466 #endif 467 0xfc00fc, /* dlim_burstsizes */ 468 0x1, /* minimum transfer size */ 469 0xffffffffffffffff, /* maximum transfer size */ 470 0xffffffffffffffff, /* maximum segment size */ 471 5, /* scatter/gather list length */ 472 (unsigned int) 1, /* granularity */ 473 0 /* attribute flags */ 474 }; 475 476 ddi_dma_attr_t nxge_rx_dma_attr = { 477 DMA_ATTR_V0, /* version number. */ 478 0, /* low address */ 479 0xffffffffffffffff, /* high address */ 480 0xffffffffffffffff, /* address counter max */ 481 0x2000, /* alignment */ 482 0xfc00fc, /* dlim_burstsizes */ 483 0x1, /* minimum transfer size */ 484 0xffffffffffffffff, /* maximum transfer size */ 485 0xffffffffffffffff, /* maximum segment size */ 486 1, /* scatter/gather list length */ 487 (unsigned int) 1, /* granularity */ 488 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 489 }; 490 491 ddi_dma_lim_t nxge_dma_limits = { 492 (uint_t)0, /* dlim_addr_lo */ 493 (uint_t)0xffffffff, /* dlim_addr_hi */ 494 (uint_t)0xffffffff, /* dlim_cntr_max */ 495 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 496 0x1, /* dlim_minxfer */ 497 1024 /* dlim_speed */ 498 }; 499 500 dma_method_t nxge_force_dma = DVMA; 501 502 /* 503 * dma chunk sizes. 504 * 505 * Try to allocate the largest possible size 506 * so that fewer number of dma chunks would be managed 507 */ 508 #ifdef NIU_PA_WORKAROUND 509 size_t alloc_sizes [] = {0x2000}; 510 #else 511 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 512 0x10000, 0x20000, 0x40000, 0x80000, 513 0x100000, 0x200000, 0x400000, 0x800000, 514 0x1000000, 0x2000000, 0x4000000}; 515 #endif 516 517 /* 518 * Translate "dev_t" to a pointer to the associated "dev_info_t". 519 */ 520 521 extern void nxge_get_environs(nxge_t *); 522 523 static int 524 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 525 { 526 p_nxge_t nxgep = NULL; 527 int instance; 528 int status = DDI_SUCCESS; 529 uint8_t portn; 530 nxge_mmac_t *mmac_info; 531 532 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 533 534 /* 535 * Get the device instance since we'll need to setup 536 * or retrieve a soft state for this instance. 537 */ 538 instance = ddi_get_instance(dip); 539 540 switch (cmd) { 541 case DDI_ATTACH: 542 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 543 break; 544 545 case DDI_RESUME: 546 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 547 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 548 if (nxgep == NULL) { 549 status = DDI_FAILURE; 550 break; 551 } 552 if (nxgep->dip != dip) { 553 status = DDI_FAILURE; 554 break; 555 } 556 if (nxgep->suspended == DDI_PM_SUSPEND) { 557 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 558 } else { 559 status = nxge_resume(nxgep); 560 } 561 goto nxge_attach_exit; 562 563 case DDI_PM_RESUME: 564 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 565 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 566 if (nxgep == NULL) { 567 status = DDI_FAILURE; 568 break; 569 } 570 if (nxgep->dip != dip) { 571 status = DDI_FAILURE; 572 break; 573 } 574 status = nxge_resume(nxgep); 575 goto nxge_attach_exit; 576 577 default: 578 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 579 status = DDI_FAILURE; 580 goto nxge_attach_exit; 581 } 582 583 584 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 585 status = DDI_FAILURE; 586 goto nxge_attach_exit; 587 } 588 589 nxgep = ddi_get_soft_state(nxge_list, instance); 590 if (nxgep == NULL) { 591 status = NXGE_ERROR; 592 goto nxge_attach_fail2; 593 } 594 595 nxgep->nxge_magic = NXGE_MAGIC; 596 597 nxgep->drv_state = 0; 598 nxgep->dip = dip; 599 nxgep->instance = instance; 600 nxgep->p_dip = ddi_get_parent(dip); 601 nxgep->nxge_debug_level = nxge_debug_level; 602 npi_debug_level = nxge_debug_level; 603 604 /* Are we a guest running in a Hybrid I/O environment? */ 605 nxge_get_environs(nxgep); 606 607 status = nxge_map_regs(nxgep); 608 609 if (status != NXGE_OK) { 610 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 611 goto nxge_attach_fail3; 612 } 613 614 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, 615 &nxge_dev_desc_dma_acc_attr, 616 &nxge_rx_dma_attr); 617 618 /* Create & initialize the per-Neptune data structure */ 619 /* (even if we're a guest). */ 620 status = nxge_init_common_dev(nxgep); 621 if (status != NXGE_OK) { 622 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 623 "nxge_init_common_dev failed")); 624 goto nxge_attach_fail4; 625 } 626 627 /* 628 * Software workaround: set the replay timer. 629 */ 630 if (nxgep->niu_type != N2_NIU) { 631 nxge_set_pci_replay_timeout(nxgep); 632 } 633 #if defined(sun4v) 634 if (isLDOMguest(nxgep)) { 635 nxge_m_callbacks.mc_tx = nxge_m_tx; 636 } 637 #endif 638 639 #if defined(sun4v) 640 /* This is required by nxge_hio_init(), which follows. */ 641 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 642 goto nxge_attach_fail4; 643 #endif 644 645 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 646 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 647 "nxge_hio_init failed")); 648 goto nxge_attach_fail4; 649 } 650 651 if (nxgep->niu_type == NEPTUNE_2_10GF) { 652 if (nxgep->function_num > 1) { 653 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 654 " function %d. Only functions 0 and 1 are " 655 "supported for this card.", nxgep->function_num)); 656 status = NXGE_ERROR; 657 goto nxge_attach_fail4; 658 } 659 } 660 661 if (isLDOMguest(nxgep)) { 662 /* 663 * Use the function number here. 664 */ 665 nxgep->mac.portnum = nxgep->function_num; 666 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 667 668 /* XXX We'll set the MAC address counts to 1 for now. */ 669 mmac_info = &nxgep->nxge_mmac_info; 670 mmac_info->num_mmac = 1; 671 mmac_info->naddrfree = 1; 672 } else { 673 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 674 nxgep->mac.portnum = portn; 675 if ((portn == 0) || (portn == 1)) 676 nxgep->mac.porttype = PORT_TYPE_XMAC; 677 else 678 nxgep->mac.porttype = PORT_TYPE_BMAC; 679 /* 680 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 681 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 682 * The two types of MACs have different characterizations. 683 */ 684 mmac_info = &nxgep->nxge_mmac_info; 685 if (nxgep->function_num < 2) { 686 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 687 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 688 } else { 689 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 690 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 691 } 692 } 693 /* 694 * Setup the Ndd parameters for the this instance. 695 */ 696 nxge_init_param(nxgep); 697 698 /* 699 * Setup Register Tracing Buffer. 700 */ 701 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 702 703 /* init stats ptr */ 704 nxge_init_statsp(nxgep); 705 706 /* 707 * Copy the vpd info from eeprom to a local data 708 * structure, and then check its validity. 709 */ 710 if (!isLDOMguest(nxgep)) { 711 int *regp; 712 uint_t reglen; 713 int rv; 714 715 nxge_vpd_info_get(nxgep); 716 717 /* Find the NIU config handle. */ 718 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 719 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 720 "reg", ®p, ®len); 721 722 if (rv != DDI_PROP_SUCCESS) { 723 goto nxge_attach_fail5; 724 } 725 /* 726 * The address_hi, that is the first int, in the reg 727 * property consists of config handle, but need to remove 728 * the bits 28-31 which are OBP specific info. 729 */ 730 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 731 ddi_prop_free(regp); 732 } 733 734 /* 735 * Set the defaults for the MTU size. 736 */ 737 nxge_hw_id_init(nxgep); 738 739 if (isLDOMguest(nxgep)) { 740 uchar_t *prop_val; 741 uint_t prop_len; 742 uint32_t max_frame_size; 743 744 extern void nxge_get_logical_props(p_nxge_t); 745 746 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 747 nxgep->mac.portmode = PORT_LOGICAL; 748 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 749 "phy-type", "virtual transceiver"); 750 751 nxgep->nports = 1; 752 nxgep->board_ver = 0; /* XXX What? */ 753 754 /* 755 * local-mac-address property gives us info on which 756 * specific MAC address the Hybrid resource is associated 757 * with. 758 */ 759 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 760 "local-mac-address", &prop_val, 761 &prop_len) != DDI_PROP_SUCCESS) { 762 goto nxge_attach_fail5; 763 } 764 if (prop_len != ETHERADDRL) { 765 ddi_prop_free(prop_val); 766 goto nxge_attach_fail5; 767 } 768 ether_copy(prop_val, nxgep->hio_mac_addr); 769 ddi_prop_free(prop_val); 770 nxge_get_logical_props(nxgep); 771 772 /* 773 * Enable Jumbo property based on the "max-frame-size" 774 * property value. 775 */ 776 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY, 777 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 778 "max-frame-size", NXGE_MTU_DEFAULT_MAX); 779 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) && 780 (max_frame_size <= TX_JUMBO_MTU)) { 781 nxgep->mac.is_jumbo = B_TRUE; 782 nxgep->mac.maxframesize = (uint16_t)max_frame_size; 783 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 784 NXGE_EHEADER_VLAN_CRC; 785 } 786 } else { 787 status = nxge_xcvr_find(nxgep); 788 789 if (status != NXGE_OK) { 790 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 791 " Couldn't determine card type" 792 " .... exit ")); 793 goto nxge_attach_fail5; 794 } 795 796 status = nxge_get_config_properties(nxgep); 797 798 if (status != NXGE_OK) { 799 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 800 "get_hw create failed")); 801 goto nxge_attach_fail; 802 } 803 } 804 805 /* 806 * Setup the Kstats for the driver. 807 */ 808 nxge_setup_kstats(nxgep); 809 810 if (!isLDOMguest(nxgep)) 811 nxge_setup_param(nxgep); 812 813 status = nxge_setup_system_dma_pages(nxgep); 814 if (status != NXGE_OK) { 815 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 816 goto nxge_attach_fail; 817 } 818 819 820 if (!isLDOMguest(nxgep)) 821 nxge_hw_init_niu_common(nxgep); 822 823 status = nxge_setup_mutexes(nxgep); 824 if (status != NXGE_OK) { 825 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 826 goto nxge_attach_fail; 827 } 828 829 #if defined(sun4v) 830 if (isLDOMguest(nxgep)) { 831 /* Find our VR & channel sets. */ 832 status = nxge_hio_vr_add(nxgep); 833 if (status != NXGE_OK) { 834 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 835 "nxge_hio_vr_add failed")); 836 (void) hsvc_unregister(&nxgep->niu_hsvc); 837 nxgep->niu_hsvc_available = B_FALSE; 838 } 839 goto nxge_attach_exit; 840 } 841 #endif 842 843 status = nxge_setup_dev(nxgep); 844 if (status != DDI_SUCCESS) { 845 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 846 goto nxge_attach_fail; 847 } 848 849 status = nxge_add_intrs(nxgep); 850 if (status != DDI_SUCCESS) { 851 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 852 goto nxge_attach_fail; 853 } 854 855 /* If a guest, register with vio_net instead. */ 856 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 857 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 858 "unable to register to mac layer (%d)", status)); 859 goto nxge_attach_fail; 860 } 861 862 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 863 864 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 865 "registered to mac (instance %d)", instance)); 866 867 /* nxge_link_monitor calls xcvr.check_link recursively */ 868 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 869 870 goto nxge_attach_exit; 871 872 nxge_attach_fail: 873 nxge_unattach(nxgep); 874 goto nxge_attach_fail1; 875 876 nxge_attach_fail5: 877 /* 878 * Tear down the ndd parameters setup. 879 */ 880 nxge_destroy_param(nxgep); 881 882 /* 883 * Tear down the kstat setup. 884 */ 885 nxge_destroy_kstats(nxgep); 886 887 nxge_attach_fail4: 888 if (nxgep->nxge_hw_p) { 889 nxge_uninit_common_dev(nxgep); 890 nxgep->nxge_hw_p = NULL; 891 } 892 893 nxge_attach_fail3: 894 /* 895 * Unmap the register setup. 896 */ 897 nxge_unmap_regs(nxgep); 898 899 nxge_fm_fini(nxgep); 900 901 nxge_attach_fail2: 902 ddi_soft_state_free(nxge_list, nxgep->instance); 903 904 nxge_attach_fail1: 905 if (status != NXGE_OK) 906 status = (NXGE_ERROR | NXGE_DDI_FAILED); 907 nxgep = NULL; 908 909 nxge_attach_exit: 910 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 911 status)); 912 913 return (status); 914 } 915 916 static int 917 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 918 { 919 int status = DDI_SUCCESS; 920 int instance; 921 p_nxge_t nxgep = NULL; 922 923 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 924 instance = ddi_get_instance(dip); 925 nxgep = ddi_get_soft_state(nxge_list, instance); 926 if (nxgep == NULL) { 927 status = DDI_FAILURE; 928 goto nxge_detach_exit; 929 } 930 931 switch (cmd) { 932 case DDI_DETACH: 933 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 934 break; 935 936 case DDI_PM_SUSPEND: 937 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 938 nxgep->suspended = DDI_PM_SUSPEND; 939 nxge_suspend(nxgep); 940 break; 941 942 case DDI_SUSPEND: 943 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 944 if (nxgep->suspended != DDI_PM_SUSPEND) { 945 nxgep->suspended = DDI_SUSPEND; 946 nxge_suspend(nxgep); 947 } 948 break; 949 950 default: 951 status = DDI_FAILURE; 952 } 953 954 if (cmd != DDI_DETACH) 955 goto nxge_detach_exit; 956 957 /* 958 * Stop the xcvr polling. 959 */ 960 nxgep->suspended = cmd; 961 962 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 963 964 if (isLDOMguest(nxgep)) { 965 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 966 nxge_m_stop((void *)nxgep); 967 nxge_hio_unregister(nxgep); 968 } else if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 969 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 970 "<== nxge_detach status = 0x%08X", status)); 971 return (DDI_FAILURE); 972 } 973 974 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 975 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 976 977 nxge_unattach(nxgep); 978 nxgep = NULL; 979 980 nxge_detach_exit: 981 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 982 status)); 983 984 return (status); 985 } 986 987 static void 988 nxge_unattach(p_nxge_t nxgep) 989 { 990 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 991 992 if (nxgep == NULL || nxgep->dev_regs == NULL) { 993 return; 994 } 995 996 nxgep->nxge_magic = 0; 997 998 if (nxgep->nxge_timerid) { 999 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1000 nxgep->nxge_timerid = 0; 1001 } 1002 1003 /* 1004 * If this flag is set, it will affect the Neptune 1005 * only. 1006 */ 1007 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { 1008 nxge_niu_peu_reset(nxgep); 1009 } 1010 1011 #if defined(sun4v) 1012 if (isLDOMguest(nxgep)) { 1013 (void) nxge_hio_vr_release(nxgep); 1014 } 1015 #endif 1016 1017 if (nxgep->nxge_hw_p) { 1018 nxge_uninit_common_dev(nxgep); 1019 nxgep->nxge_hw_p = NULL; 1020 } 1021 1022 #if defined(sun4v) 1023 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 1024 (void) hsvc_unregister(&nxgep->niu_hsvc); 1025 nxgep->niu_hsvc_available = B_FALSE; 1026 } 1027 #endif 1028 /* 1029 * Stop any further interrupts. 1030 */ 1031 nxge_remove_intrs(nxgep); 1032 1033 /* 1034 * Stop the device and free resources. 1035 */ 1036 if (!isLDOMguest(nxgep)) { 1037 nxge_destroy_dev(nxgep); 1038 } 1039 1040 /* 1041 * Tear down the ndd parameters setup. 1042 */ 1043 nxge_destroy_param(nxgep); 1044 1045 /* 1046 * Tear down the kstat setup. 1047 */ 1048 nxge_destroy_kstats(nxgep); 1049 1050 /* 1051 * Destroy all mutexes. 1052 */ 1053 nxge_destroy_mutexes(nxgep); 1054 1055 /* 1056 * Remove the list of ndd parameters which 1057 * were setup during attach. 1058 */ 1059 if (nxgep->dip) { 1060 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 1061 " nxge_unattach: remove all properties")); 1062 1063 (void) ddi_prop_remove_all(nxgep->dip); 1064 } 1065 1066 #if NXGE_PROPERTY 1067 nxge_remove_hard_properties(nxgep); 1068 #endif 1069 1070 /* 1071 * Unmap the register setup. 1072 */ 1073 nxge_unmap_regs(nxgep); 1074 1075 nxge_fm_fini(nxgep); 1076 1077 ddi_soft_state_free(nxge_list, nxgep->instance); 1078 1079 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1080 } 1081 1082 #if defined(sun4v) 1083 int 1084 nxge_hsvc_register(nxge_t *nxgep) 1085 { 1086 nxge_status_t status; 1087 1088 if (nxgep->niu_type == N2_NIU) { 1089 nxgep->niu_hsvc_available = B_FALSE; 1090 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 1091 if ((status = hsvc_register(&nxgep->niu_hsvc, 1092 &nxgep->niu_min_ver)) != 0) { 1093 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1094 "nxge_attach: %s: cannot negotiate " 1095 "hypervisor services revision %d group: 0x%lx " 1096 "major: 0x%lx minor: 0x%lx errno: %d", 1097 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1098 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1099 niu_hsvc.hsvc_minor, status)); 1100 return (DDI_FAILURE); 1101 } 1102 nxgep->niu_hsvc_available = B_TRUE; 1103 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1104 "NIU Hypervisor service enabled")); 1105 } 1106 1107 return (DDI_SUCCESS); 1108 } 1109 #endif 1110 1111 static char n2_siu_name[] = "niu"; 1112 1113 static nxge_status_t 1114 nxge_map_regs(p_nxge_t nxgep) 1115 { 1116 int ddi_status = DDI_SUCCESS; 1117 p_dev_regs_t dev_regs; 1118 char buf[MAXPATHLEN + 1]; 1119 char *devname; 1120 #ifdef NXGE_DEBUG 1121 char *sysname; 1122 #endif 1123 off_t regsize; 1124 nxge_status_t status = NXGE_OK; 1125 #if !defined(_BIG_ENDIAN) 1126 off_t pci_offset; 1127 uint16_t pcie_devctl; 1128 #endif 1129 1130 if (isLDOMguest(nxgep)) { 1131 return (nxge_guest_regs_map(nxgep)); 1132 } 1133 1134 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1135 nxgep->dev_regs = NULL; 1136 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1137 dev_regs->nxge_regh = NULL; 1138 dev_regs->nxge_pciregh = NULL; 1139 dev_regs->nxge_msix_regh = NULL; 1140 dev_regs->nxge_vir_regh = NULL; 1141 dev_regs->nxge_vir2_regh = NULL; 1142 nxgep->niu_type = NIU_TYPE_NONE; 1143 1144 devname = ddi_pathname(nxgep->dip, buf); 1145 ASSERT(strlen(devname) > 0); 1146 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1147 "nxge_map_regs: pathname devname %s", devname)); 1148 1149 /* 1150 * The driver is running on a N2-NIU system if devname is something 1151 * like "/niu@80/network@0" 1152 */ 1153 if (strstr(devname, n2_siu_name)) { 1154 /* N2/NIU */ 1155 nxgep->niu_type = N2_NIU; 1156 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1157 "nxge_map_regs: N2/NIU devname %s", devname)); 1158 /* get function number */ 1159 nxgep->function_num = 1160 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1161 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1162 "nxge_map_regs: N2/NIU function number %d", 1163 nxgep->function_num)); 1164 } else { 1165 int *prop_val; 1166 uint_t prop_len; 1167 uint8_t func_num; 1168 1169 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1170 0, "reg", 1171 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1172 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1173 "Reg property not found")); 1174 ddi_status = DDI_FAILURE; 1175 goto nxge_map_regs_fail0; 1176 1177 } else { 1178 func_num = (prop_val[0] >> 8) & 0x7; 1179 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1180 "Reg property found: fun # %d", 1181 func_num)); 1182 nxgep->function_num = func_num; 1183 if (isLDOMguest(nxgep)) { 1184 nxgep->function_num /= 2; 1185 return (NXGE_OK); 1186 } 1187 ddi_prop_free(prop_val); 1188 } 1189 } 1190 1191 switch (nxgep->niu_type) { 1192 default: 1193 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1194 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1195 "nxge_map_regs: pci config size 0x%x", regsize)); 1196 1197 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1198 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1199 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1200 if (ddi_status != DDI_SUCCESS) { 1201 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1202 "ddi_map_regs, nxge bus config regs failed")); 1203 goto nxge_map_regs_fail0; 1204 } 1205 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1206 "nxge_map_reg: PCI config addr 0x%0llx " 1207 " handle 0x%0llx", dev_regs->nxge_pciregp, 1208 dev_regs->nxge_pciregh)); 1209 /* 1210 * IMP IMP 1211 * workaround for bit swapping bug in HW 1212 * which ends up in no-snoop = yes 1213 * resulting, in DMA not synched properly 1214 */ 1215 #if !defined(_BIG_ENDIAN) 1216 /* workarounds for x86 systems */ 1217 pci_offset = 0x80 + PCIE_DEVCTL; 1218 pcie_devctl = pci_config_get16(dev_regs->nxge_pciregh, 1219 pci_offset); 1220 pcie_devctl &= ~PCIE_DEVCTL_ENABLE_NO_SNOOP; 1221 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1222 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1223 pcie_devctl); 1224 #endif 1225 1226 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1227 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1228 "nxge_map_regs: pio size 0x%x", regsize)); 1229 /* set up the device mapped register */ 1230 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1231 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1232 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1233 if (ddi_status != DDI_SUCCESS) { 1234 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1235 "ddi_map_regs for Neptune global reg failed")); 1236 goto nxge_map_regs_fail1; 1237 } 1238 1239 /* set up the msi/msi-x mapped register */ 1240 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1241 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1242 "nxge_map_regs: msix size 0x%x", regsize)); 1243 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1244 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1245 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1246 if (ddi_status != DDI_SUCCESS) { 1247 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1248 "ddi_map_regs for msi reg failed")); 1249 goto nxge_map_regs_fail2; 1250 } 1251 1252 /* set up the vio region mapped register */ 1253 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1254 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1255 "nxge_map_regs: vio size 0x%x", regsize)); 1256 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1257 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1258 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1259 1260 if (ddi_status != DDI_SUCCESS) { 1261 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1262 "ddi_map_regs for nxge vio reg failed")); 1263 goto nxge_map_regs_fail3; 1264 } 1265 nxgep->dev_regs = dev_regs; 1266 1267 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1268 NPI_PCI_ADD_HANDLE_SET(nxgep, 1269 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1270 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1271 NPI_MSI_ADD_HANDLE_SET(nxgep, 1272 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1273 1274 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1275 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1276 1277 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1278 NPI_REG_ADD_HANDLE_SET(nxgep, 1279 (npi_reg_ptr_t)dev_regs->nxge_regp); 1280 1281 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1282 NPI_VREG_ADD_HANDLE_SET(nxgep, 1283 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1284 1285 break; 1286 1287 case N2_NIU: 1288 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1289 /* 1290 * Set up the device mapped register (FWARC 2006/556) 1291 * (changed back to 1: reg starts at 1!) 1292 */ 1293 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1294 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1295 "nxge_map_regs: dev size 0x%x", regsize)); 1296 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1297 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1298 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1299 1300 if (ddi_status != DDI_SUCCESS) { 1301 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1302 "ddi_map_regs for N2/NIU, global reg failed ")); 1303 goto nxge_map_regs_fail1; 1304 } 1305 1306 /* set up the first vio region mapped register */ 1307 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1308 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1309 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1310 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1311 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1312 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1313 1314 if (ddi_status != DDI_SUCCESS) { 1315 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1316 "ddi_map_regs for nxge vio reg failed")); 1317 goto nxge_map_regs_fail2; 1318 } 1319 /* set up the second vio region mapped register */ 1320 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1321 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1322 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1323 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1324 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1325 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1326 1327 if (ddi_status != DDI_SUCCESS) { 1328 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1329 "ddi_map_regs for nxge vio2 reg failed")); 1330 goto nxge_map_regs_fail3; 1331 } 1332 nxgep->dev_regs = dev_regs; 1333 1334 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1335 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1336 1337 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1338 NPI_REG_ADD_HANDLE_SET(nxgep, 1339 (npi_reg_ptr_t)dev_regs->nxge_regp); 1340 1341 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1342 NPI_VREG_ADD_HANDLE_SET(nxgep, 1343 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1344 1345 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1346 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1347 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1348 1349 break; 1350 } 1351 1352 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1353 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1354 1355 goto nxge_map_regs_exit; 1356 nxge_map_regs_fail3: 1357 if (dev_regs->nxge_msix_regh) { 1358 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1359 } 1360 if (dev_regs->nxge_vir_regh) { 1361 ddi_regs_map_free(&dev_regs->nxge_regh); 1362 } 1363 nxge_map_regs_fail2: 1364 if (dev_regs->nxge_regh) { 1365 ddi_regs_map_free(&dev_regs->nxge_regh); 1366 } 1367 nxge_map_regs_fail1: 1368 if (dev_regs->nxge_pciregh) { 1369 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1370 } 1371 nxge_map_regs_fail0: 1372 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1373 kmem_free(dev_regs, sizeof (dev_regs_t)); 1374 1375 nxge_map_regs_exit: 1376 if (ddi_status != DDI_SUCCESS) 1377 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1378 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1379 return (status); 1380 } 1381 1382 static void 1383 nxge_unmap_regs(p_nxge_t nxgep) 1384 { 1385 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1386 1387 if (isLDOMguest(nxgep)) { 1388 nxge_guest_regs_map_free(nxgep); 1389 return; 1390 } 1391 1392 if (nxgep->dev_regs) { 1393 if (nxgep->dev_regs->nxge_pciregh) { 1394 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1395 "==> nxge_unmap_regs: bus")); 1396 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1397 nxgep->dev_regs->nxge_pciregh = NULL; 1398 } 1399 if (nxgep->dev_regs->nxge_regh) { 1400 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1401 "==> nxge_unmap_regs: device registers")); 1402 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1403 nxgep->dev_regs->nxge_regh = NULL; 1404 } 1405 if (nxgep->dev_regs->nxge_msix_regh) { 1406 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1407 "==> nxge_unmap_regs: device interrupts")); 1408 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1409 nxgep->dev_regs->nxge_msix_regh = NULL; 1410 } 1411 if (nxgep->dev_regs->nxge_vir_regh) { 1412 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1413 "==> nxge_unmap_regs: vio region")); 1414 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1415 nxgep->dev_regs->nxge_vir_regh = NULL; 1416 } 1417 if (nxgep->dev_regs->nxge_vir2_regh) { 1418 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1419 "==> nxge_unmap_regs: vio2 region")); 1420 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1421 nxgep->dev_regs->nxge_vir2_regh = NULL; 1422 } 1423 1424 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1425 nxgep->dev_regs = NULL; 1426 } 1427 1428 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1429 } 1430 1431 static nxge_status_t 1432 nxge_setup_mutexes(p_nxge_t nxgep) 1433 { 1434 int ddi_status = DDI_SUCCESS; 1435 nxge_status_t status = NXGE_OK; 1436 nxge_classify_t *classify_ptr; 1437 int partition; 1438 1439 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1440 1441 /* 1442 * Get the interrupt cookie so the mutexes can be 1443 * Initialized. 1444 */ 1445 if (isLDOMguest(nxgep)) { 1446 nxgep->interrupt_cookie = 0; 1447 } else { 1448 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1449 &nxgep->interrupt_cookie); 1450 1451 if (ddi_status != DDI_SUCCESS) { 1452 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1453 "<== nxge_setup_mutexes: failed 0x%x", 1454 ddi_status)); 1455 goto nxge_setup_mutexes_exit; 1456 } 1457 } 1458 1459 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1460 MUTEX_INIT(&nxgep->poll_lock, NULL, 1461 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1462 1463 /* 1464 * Initialize mutexes for this device. 1465 */ 1466 MUTEX_INIT(nxgep->genlock, NULL, 1467 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1468 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1469 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1470 MUTEX_INIT(&nxgep->mif_lock, NULL, 1471 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1472 MUTEX_INIT(&nxgep->group_lock, NULL, 1473 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1474 RW_INIT(&nxgep->filter_lock, NULL, 1475 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1476 1477 classify_ptr = &nxgep->classifier; 1478 /* 1479 * FFLP Mutexes are never used in interrupt context 1480 * as fflp operation can take very long time to 1481 * complete and hence not suitable to invoke from interrupt 1482 * handlers. 1483 */ 1484 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1485 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1486 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1487 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1488 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1489 for (partition = 0; partition < MAX_PARTITION; partition++) { 1490 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1491 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1492 } 1493 } 1494 1495 nxge_setup_mutexes_exit: 1496 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1497 "<== nxge_setup_mutexes status = %x", status)); 1498 1499 if (ddi_status != DDI_SUCCESS) 1500 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1501 1502 return (status); 1503 } 1504 1505 static void 1506 nxge_destroy_mutexes(p_nxge_t nxgep) 1507 { 1508 int partition; 1509 nxge_classify_t *classify_ptr; 1510 1511 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1512 RW_DESTROY(&nxgep->filter_lock); 1513 MUTEX_DESTROY(&nxgep->group_lock); 1514 MUTEX_DESTROY(&nxgep->mif_lock); 1515 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1516 MUTEX_DESTROY(nxgep->genlock); 1517 1518 classify_ptr = &nxgep->classifier; 1519 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1520 1521 /* Destroy all polling resources. */ 1522 MUTEX_DESTROY(&nxgep->poll_lock); 1523 cv_destroy(&nxgep->poll_cv); 1524 1525 /* free data structures, based on HW type */ 1526 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1527 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1528 for (partition = 0; partition < MAX_PARTITION; partition++) { 1529 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1530 } 1531 } 1532 1533 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1534 } 1535 1536 nxge_status_t 1537 nxge_init(p_nxge_t nxgep) 1538 { 1539 nxge_status_t status = NXGE_OK; 1540 1541 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1542 1543 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1544 return (status); 1545 } 1546 1547 /* 1548 * Allocate system memory for the receive/transmit buffer blocks 1549 * and receive/transmit descriptor rings. 1550 */ 1551 status = nxge_alloc_mem_pool(nxgep); 1552 if (status != NXGE_OK) { 1553 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1554 goto nxge_init_fail1; 1555 } 1556 1557 if (!isLDOMguest(nxgep)) { 1558 /* 1559 * Initialize and enable the TXC registers. 1560 * (Globally enable the Tx controller, 1561 * enable the port, configure the dma channel bitmap, 1562 * configure the max burst size). 1563 */ 1564 status = nxge_txc_init(nxgep); 1565 if (status != NXGE_OK) { 1566 NXGE_ERROR_MSG((nxgep, 1567 NXGE_ERR_CTL, "init txc failed\n")); 1568 goto nxge_init_fail2; 1569 } 1570 } 1571 1572 /* 1573 * Initialize and enable TXDMA channels. 1574 */ 1575 status = nxge_init_txdma_channels(nxgep); 1576 if (status != NXGE_OK) { 1577 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1578 goto nxge_init_fail3; 1579 } 1580 1581 /* 1582 * Initialize and enable RXDMA channels. 1583 */ 1584 status = nxge_init_rxdma_channels(nxgep); 1585 if (status != NXGE_OK) { 1586 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1587 goto nxge_init_fail4; 1588 } 1589 1590 /* 1591 * The guest domain is now done. 1592 */ 1593 if (isLDOMguest(nxgep)) { 1594 nxgep->drv_state |= STATE_HW_INITIALIZED; 1595 goto nxge_init_exit; 1596 } 1597 1598 /* 1599 * Initialize TCAM and FCRAM (Neptune). 1600 */ 1601 status = nxge_classify_init(nxgep); 1602 if (status != NXGE_OK) { 1603 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1604 goto nxge_init_fail5; 1605 } 1606 1607 /* 1608 * Initialize ZCP 1609 */ 1610 status = nxge_zcp_init(nxgep); 1611 if (status != NXGE_OK) { 1612 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1613 goto nxge_init_fail5; 1614 } 1615 1616 /* 1617 * Initialize IPP. 1618 */ 1619 status = nxge_ipp_init(nxgep); 1620 if (status != NXGE_OK) { 1621 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1622 goto nxge_init_fail5; 1623 } 1624 1625 /* 1626 * Initialize the MAC block. 1627 */ 1628 status = nxge_mac_init(nxgep); 1629 if (status != NXGE_OK) { 1630 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1631 goto nxge_init_fail5; 1632 } 1633 1634 /* 1635 * Enable the interrrupts for DDI. 1636 */ 1637 nxge_intrs_enable(nxgep); 1638 1639 nxgep->drv_state |= STATE_HW_INITIALIZED; 1640 1641 goto nxge_init_exit; 1642 1643 nxge_init_fail5: 1644 nxge_uninit_rxdma_channels(nxgep); 1645 nxge_init_fail4: 1646 nxge_uninit_txdma_channels(nxgep); 1647 nxge_init_fail3: 1648 if (!isLDOMguest(nxgep)) { 1649 (void) nxge_txc_uninit(nxgep); 1650 } 1651 nxge_init_fail2: 1652 nxge_free_mem_pool(nxgep); 1653 nxge_init_fail1: 1654 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1655 "<== nxge_init status (failed) = 0x%08x", status)); 1656 return (status); 1657 1658 nxge_init_exit: 1659 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1660 status)); 1661 return (status); 1662 } 1663 1664 1665 timeout_id_t 1666 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1667 { 1668 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1669 return (timeout(func, (caddr_t)nxgep, 1670 drv_usectohz(1000 * msec))); 1671 } 1672 return (NULL); 1673 } 1674 1675 /*ARGSUSED*/ 1676 void 1677 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1678 { 1679 if (timerid) { 1680 (void) untimeout(timerid); 1681 } 1682 } 1683 1684 void 1685 nxge_uninit(p_nxge_t nxgep) 1686 { 1687 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1688 1689 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1690 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1691 "==> nxge_uninit: not initialized")); 1692 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1693 "<== nxge_uninit")); 1694 return; 1695 } 1696 1697 if (!isLDOMguest(nxgep)) { 1698 /* 1699 * Reset the receive MAC side. 1700 */ 1701 (void) nxge_rx_mac_disable(nxgep); 1702 1703 /* 1704 * Drain the IPP. 1705 */ 1706 (void) nxge_ipp_drain(nxgep); 1707 } 1708 1709 /* stop timer */ 1710 if (nxgep->nxge_timerid) { 1711 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1712 nxgep->nxge_timerid = 0; 1713 } 1714 1715 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1716 (void) nxge_intr_hw_disable(nxgep); 1717 1718 1719 /* Disable and soft reset the IPP */ 1720 if (!isLDOMguest(nxgep)) 1721 (void) nxge_ipp_disable(nxgep); 1722 1723 /* Free classification resources */ 1724 (void) nxge_classify_uninit(nxgep); 1725 1726 /* 1727 * Reset the transmit/receive DMA side. 1728 */ 1729 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1730 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1731 1732 nxge_uninit_txdma_channels(nxgep); 1733 nxge_uninit_rxdma_channels(nxgep); 1734 1735 /* 1736 * Reset the transmit MAC side. 1737 */ 1738 (void) nxge_tx_mac_disable(nxgep); 1739 1740 nxge_free_mem_pool(nxgep); 1741 1742 /* 1743 * Start the timer if the reset flag is not set. 1744 * If this reset flag is set, the link monitor 1745 * will not be started in order to stop furthur bus 1746 * activities coming from this interface. 1747 * The driver will start the monitor function 1748 * if the interface was initialized again later. 1749 */ 1750 if (!nxge_peu_reset_enable) { 1751 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1752 } 1753 1754 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1755 1756 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1757 "nxge_mblks_pending %d", nxge_mblks_pending)); 1758 } 1759 1760 void 1761 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1762 { 1763 uint64_t reg; 1764 uint64_t regdata; 1765 int i, retry; 1766 1767 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1768 regdata = 0; 1769 retry = 1; 1770 1771 for (i = 0; i < retry; i++) { 1772 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1773 } 1774 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1775 } 1776 1777 void 1778 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1779 { 1780 uint64_t reg; 1781 uint64_t buf[2]; 1782 1783 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1784 reg = buf[0]; 1785 1786 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1787 } 1788 1789 /*ARGSUSED*/ 1790 /*VARARGS*/ 1791 void 1792 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1793 { 1794 char msg_buffer[1048]; 1795 char prefix_buffer[32]; 1796 int instance; 1797 uint64_t debug_level; 1798 int cmn_level = CE_CONT; 1799 va_list ap; 1800 1801 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1802 /* In case a developer has changed nxge_debug_level. */ 1803 if (nxgep->nxge_debug_level != nxge_debug_level) 1804 nxgep->nxge_debug_level = nxge_debug_level; 1805 } 1806 1807 debug_level = (nxgep == NULL) ? nxge_debug_level : 1808 nxgep->nxge_debug_level; 1809 1810 if ((level & debug_level) || 1811 (level == NXGE_NOTE) || 1812 (level == NXGE_ERR_CTL)) { 1813 /* do the msg processing */ 1814 MUTEX_ENTER(&nxgedebuglock); 1815 1816 if ((level & NXGE_NOTE)) { 1817 cmn_level = CE_NOTE; 1818 } 1819 1820 if (level & NXGE_ERR_CTL) { 1821 cmn_level = CE_WARN; 1822 } 1823 1824 va_start(ap, fmt); 1825 (void) vsprintf(msg_buffer, fmt, ap); 1826 va_end(ap); 1827 if (nxgep == NULL) { 1828 instance = -1; 1829 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1830 } else { 1831 instance = nxgep->instance; 1832 (void) sprintf(prefix_buffer, 1833 "%s%d :", "nxge", instance); 1834 } 1835 1836 MUTEX_EXIT(&nxgedebuglock); 1837 cmn_err(cmn_level, "!%s %s\n", 1838 prefix_buffer, msg_buffer); 1839 1840 } 1841 } 1842 1843 char * 1844 nxge_dump_packet(char *addr, int size) 1845 { 1846 uchar_t *ap = (uchar_t *)addr; 1847 int i; 1848 static char etherbuf[1024]; 1849 char *cp = etherbuf; 1850 char digits[] = "0123456789abcdef"; 1851 1852 if (!size) 1853 size = 60; 1854 1855 if (size > MAX_DUMP_SZ) { 1856 /* Dump the leading bytes */ 1857 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1858 if (*ap > 0x0f) 1859 *cp++ = digits[*ap >> 4]; 1860 *cp++ = digits[*ap++ & 0xf]; 1861 *cp++ = ':'; 1862 } 1863 for (i = 0; i < 20; i++) 1864 *cp++ = '.'; 1865 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1866 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1867 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1868 if (*ap > 0x0f) 1869 *cp++ = digits[*ap >> 4]; 1870 *cp++ = digits[*ap++ & 0xf]; 1871 *cp++ = ':'; 1872 } 1873 } else { 1874 for (i = 0; i < size; i++) { 1875 if (*ap > 0x0f) 1876 *cp++ = digits[*ap >> 4]; 1877 *cp++ = digits[*ap++ & 0xf]; 1878 *cp++ = ':'; 1879 } 1880 } 1881 *--cp = 0; 1882 return (etherbuf); 1883 } 1884 1885 #ifdef NXGE_DEBUG 1886 static void 1887 nxge_test_map_regs(p_nxge_t nxgep) 1888 { 1889 ddi_acc_handle_t cfg_handle; 1890 p_pci_cfg_t cfg_ptr; 1891 ddi_acc_handle_t dev_handle; 1892 char *dev_ptr; 1893 ddi_acc_handle_t pci_config_handle; 1894 uint32_t regval; 1895 int i; 1896 1897 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1898 1899 dev_handle = nxgep->dev_regs->nxge_regh; 1900 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1901 1902 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1903 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1904 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1905 1906 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1907 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1908 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1909 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1910 &cfg_ptr->vendorid)); 1911 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1912 "\tvendorid 0x%x devid 0x%x", 1913 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1914 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1915 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1916 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1917 "bar1c 0x%x", 1918 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1919 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1920 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1921 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1922 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1923 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1924 "base 28 0x%x bar2c 0x%x\n", 1925 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1926 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1927 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1928 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1929 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1930 "\nNeptune PCI BAR: base30 0x%x\n", 1931 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1932 1933 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1934 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1935 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1936 "first 0x%llx second 0x%llx third 0x%llx " 1937 "last 0x%llx ", 1938 NXGE_PIO_READ64(dev_handle, 1939 (uint64_t *)(dev_ptr + 0), 0), 1940 NXGE_PIO_READ64(dev_handle, 1941 (uint64_t *)(dev_ptr + 8), 0), 1942 NXGE_PIO_READ64(dev_handle, 1943 (uint64_t *)(dev_ptr + 16), 0), 1944 NXGE_PIO_READ64(cfg_handle, 1945 (uint64_t *)(dev_ptr + 24), 0))); 1946 } 1947 } 1948 1949 #endif 1950 1951 static void 1952 nxge_suspend(p_nxge_t nxgep) 1953 { 1954 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1955 1956 nxge_intrs_disable(nxgep); 1957 nxge_destroy_dev(nxgep); 1958 1959 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1960 } 1961 1962 static nxge_status_t 1963 nxge_resume(p_nxge_t nxgep) 1964 { 1965 nxge_status_t status = NXGE_OK; 1966 1967 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1968 1969 nxgep->suspended = DDI_RESUME; 1970 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1971 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1972 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1973 (void) nxge_rx_mac_enable(nxgep); 1974 (void) nxge_tx_mac_enable(nxgep); 1975 nxge_intrs_enable(nxgep); 1976 nxgep->suspended = 0; 1977 1978 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1979 "<== nxge_resume status = 0x%x", status)); 1980 return (status); 1981 } 1982 1983 static nxge_status_t 1984 nxge_setup_dev(p_nxge_t nxgep) 1985 { 1986 nxge_status_t status = NXGE_OK; 1987 1988 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1989 nxgep->mac.portnum)); 1990 1991 status = nxge_link_init(nxgep); 1992 1993 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1994 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1995 "port%d Bad register acc handle", nxgep->mac.portnum)); 1996 status = NXGE_ERROR; 1997 } 1998 1999 if (status != NXGE_OK) { 2000 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2001 " nxge_setup_dev status " 2002 "(xcvr init 0x%08x)", status)); 2003 goto nxge_setup_dev_exit; 2004 } 2005 2006 nxge_setup_dev_exit: 2007 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2008 "<== nxge_setup_dev port %d status = 0x%08x", 2009 nxgep->mac.portnum, status)); 2010 2011 return (status); 2012 } 2013 2014 static void 2015 nxge_destroy_dev(p_nxge_t nxgep) 2016 { 2017 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 2018 2019 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 2020 2021 (void) nxge_hw_stop(nxgep); 2022 2023 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 2024 } 2025 2026 static nxge_status_t 2027 nxge_setup_system_dma_pages(p_nxge_t nxgep) 2028 { 2029 int ddi_status = DDI_SUCCESS; 2030 uint_t count; 2031 ddi_dma_cookie_t cookie; 2032 uint_t iommu_pagesize; 2033 nxge_status_t status = NXGE_OK; 2034 2035 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 2036 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 2037 if (nxgep->niu_type != N2_NIU) { 2038 iommu_pagesize = dvma_pagesize(nxgep->dip); 2039 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2040 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2041 " default_block_size %d iommu_pagesize %d", 2042 nxgep->sys_page_sz, 2043 ddi_ptob(nxgep->dip, (ulong_t)1), 2044 nxgep->rx_default_block_size, 2045 iommu_pagesize)); 2046 2047 if (iommu_pagesize != 0) { 2048 if (nxgep->sys_page_sz == iommu_pagesize) { 2049 if (iommu_pagesize > 0x4000) 2050 nxgep->sys_page_sz = 0x4000; 2051 } else { 2052 if (nxgep->sys_page_sz > iommu_pagesize) 2053 nxgep->sys_page_sz = iommu_pagesize; 2054 } 2055 } 2056 } 2057 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2058 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2059 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2060 "default_block_size %d page mask %d", 2061 nxgep->sys_page_sz, 2062 ddi_ptob(nxgep->dip, (ulong_t)1), 2063 nxgep->rx_default_block_size, 2064 nxgep->sys_page_mask)); 2065 2066 2067 switch (nxgep->sys_page_sz) { 2068 default: 2069 nxgep->sys_page_sz = 0x1000; 2070 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2071 nxgep->rx_default_block_size = 0x1000; 2072 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2073 break; 2074 case 0x1000: 2075 nxgep->rx_default_block_size = 0x1000; 2076 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2077 break; 2078 case 0x2000: 2079 nxgep->rx_default_block_size = 0x2000; 2080 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2081 break; 2082 case 0x4000: 2083 nxgep->rx_default_block_size = 0x4000; 2084 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2085 break; 2086 case 0x8000: 2087 nxgep->rx_default_block_size = 0x8000; 2088 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2089 break; 2090 } 2091 2092 #ifndef USE_RX_BIG_BUF 2093 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2094 #else 2095 nxgep->rx_default_block_size = 0x2000; 2096 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2097 #endif 2098 /* 2099 * Get the system DMA burst size. 2100 */ 2101 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2102 DDI_DMA_DONTWAIT, 0, 2103 &nxgep->dmasparehandle); 2104 if (ddi_status != DDI_SUCCESS) { 2105 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2106 "ddi_dma_alloc_handle: failed " 2107 " status 0x%x", ddi_status)); 2108 goto nxge_get_soft_properties_exit; 2109 } 2110 2111 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2112 (caddr_t)nxgep->dmasparehandle, 2113 sizeof (nxgep->dmasparehandle), 2114 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2115 DDI_DMA_DONTWAIT, 0, 2116 &cookie, &count); 2117 if (ddi_status != DDI_DMA_MAPPED) { 2118 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2119 "Binding spare handle to find system" 2120 " burstsize failed.")); 2121 ddi_status = DDI_FAILURE; 2122 goto nxge_get_soft_properties_fail1; 2123 } 2124 2125 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2126 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2127 2128 nxge_get_soft_properties_fail1: 2129 ddi_dma_free_handle(&nxgep->dmasparehandle); 2130 2131 nxge_get_soft_properties_exit: 2132 2133 if (ddi_status != DDI_SUCCESS) 2134 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2135 2136 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2137 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2138 return (status); 2139 } 2140 2141 static nxge_status_t 2142 nxge_alloc_mem_pool(p_nxge_t nxgep) 2143 { 2144 nxge_status_t status = NXGE_OK; 2145 2146 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2147 2148 status = nxge_alloc_rx_mem_pool(nxgep); 2149 if (status != NXGE_OK) { 2150 return (NXGE_ERROR); 2151 } 2152 2153 status = nxge_alloc_tx_mem_pool(nxgep); 2154 if (status != NXGE_OK) { 2155 nxge_free_rx_mem_pool(nxgep); 2156 return (NXGE_ERROR); 2157 } 2158 2159 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2160 return (NXGE_OK); 2161 } 2162 2163 static void 2164 nxge_free_mem_pool(p_nxge_t nxgep) 2165 { 2166 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2167 2168 nxge_free_rx_mem_pool(nxgep); 2169 nxge_free_tx_mem_pool(nxgep); 2170 2171 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2172 } 2173 2174 nxge_status_t 2175 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2176 { 2177 uint32_t rdc_max; 2178 p_nxge_dma_pt_cfg_t p_all_cfgp; 2179 p_nxge_hw_pt_cfg_t p_cfgp; 2180 p_nxge_dma_pool_t dma_poolp; 2181 p_nxge_dma_common_t *dma_buf_p; 2182 p_nxge_dma_pool_t dma_cntl_poolp; 2183 p_nxge_dma_common_t *dma_cntl_p; 2184 uint32_t *num_chunks; /* per dma */ 2185 nxge_status_t status = NXGE_OK; 2186 2187 uint32_t nxge_port_rbr_size; 2188 uint32_t nxge_port_rbr_spare_size; 2189 uint32_t nxge_port_rcr_size; 2190 uint32_t rx_cntl_alloc_size; 2191 2192 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2193 2194 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2195 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2196 rdc_max = NXGE_MAX_RDCS; 2197 2198 /* 2199 * Allocate memory for the common DMA data structures. 2200 */ 2201 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2202 KM_SLEEP); 2203 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2204 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2205 2206 dma_cntl_poolp = (p_nxge_dma_pool_t) 2207 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2208 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2209 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2210 2211 num_chunks = (uint32_t *)KMEM_ZALLOC( 2212 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2213 2214 /* 2215 * Assume that each DMA channel will be configured with 2216 * the default block size. 2217 * rbr block counts are modulo the batch count (16). 2218 */ 2219 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2220 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2221 2222 if (!nxge_port_rbr_size) { 2223 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2224 } 2225 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2226 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2227 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2228 } 2229 2230 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2231 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2232 2233 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2234 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2235 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2236 } 2237 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2238 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2239 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2240 "set to default %d", 2241 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2242 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2243 } 2244 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2245 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2246 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2247 "set to default %d", 2248 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2249 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2250 } 2251 2252 /* 2253 * N2/NIU has limitation on the descriptor sizes (contiguous 2254 * memory allocation on data buffers to 4M (contig_mem_alloc) 2255 * and little endian for control buffers (must use the ddi/dki mem alloc 2256 * function). 2257 */ 2258 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2259 if (nxgep->niu_type == N2_NIU) { 2260 nxge_port_rbr_spare_size = 0; 2261 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2262 (!ISP2(nxge_port_rbr_size))) { 2263 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2264 } 2265 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2266 (!ISP2(nxge_port_rcr_size))) { 2267 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2268 } 2269 } 2270 #endif 2271 2272 /* 2273 * Addresses of receive block ring, receive completion ring and the 2274 * mailbox must be all cache-aligned (64 bytes). 2275 */ 2276 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2277 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2278 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2279 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2280 2281 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2282 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2283 "nxge_port_rcr_size = %d " 2284 "rx_cntl_alloc_size = %d", 2285 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2286 nxge_port_rcr_size, 2287 rx_cntl_alloc_size)); 2288 2289 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2290 if (nxgep->niu_type == N2_NIU) { 2291 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2292 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2293 2294 if (!ISP2(rx_buf_alloc_size)) { 2295 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2296 "==> nxge_alloc_rx_mem_pool: " 2297 " must be power of 2")); 2298 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2299 goto nxge_alloc_rx_mem_pool_exit; 2300 } 2301 2302 if (rx_buf_alloc_size > (1 << 22)) { 2303 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2304 "==> nxge_alloc_rx_mem_pool: " 2305 " limit size to 4M")); 2306 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2307 goto nxge_alloc_rx_mem_pool_exit; 2308 } 2309 2310 if (rx_cntl_alloc_size < 0x2000) { 2311 rx_cntl_alloc_size = 0x2000; 2312 } 2313 } 2314 #endif 2315 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2316 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2317 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2318 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2319 2320 dma_poolp->ndmas = p_cfgp->max_rdcs; 2321 dma_poolp->num_chunks = num_chunks; 2322 dma_poolp->buf_allocated = B_TRUE; 2323 nxgep->rx_buf_pool_p = dma_poolp; 2324 dma_poolp->dma_buf_pool_p = dma_buf_p; 2325 2326 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2327 dma_cntl_poolp->buf_allocated = B_TRUE; 2328 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2329 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2330 2331 /* Allocate the receive rings, too. */ 2332 nxgep->rx_rbr_rings = 2333 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2334 nxgep->rx_rbr_rings->rbr_rings = 2335 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2336 nxgep->rx_rcr_rings = 2337 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2338 nxgep->rx_rcr_rings->rcr_rings = 2339 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2340 nxgep->rx_mbox_areas_p = 2341 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2342 nxgep->rx_mbox_areas_p->rxmbox_areas = 2343 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2344 2345 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2346 p_cfgp->max_rdcs; 2347 2348 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2349 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2350 2351 nxge_alloc_rx_mem_pool_exit: 2352 return (status); 2353 } 2354 2355 /* 2356 * nxge_alloc_rxb 2357 * 2358 * Allocate buffers for an RDC. 2359 * 2360 * Arguments: 2361 * nxgep 2362 * channel The channel to map into our kernel space. 2363 * 2364 * Notes: 2365 * 2366 * NPI function calls: 2367 * 2368 * NXGE function calls: 2369 * 2370 * Registers accessed: 2371 * 2372 * Context: 2373 * 2374 * Taking apart: 2375 * 2376 * Open questions: 2377 * 2378 */ 2379 nxge_status_t 2380 nxge_alloc_rxb( 2381 p_nxge_t nxgep, 2382 int channel) 2383 { 2384 size_t rx_buf_alloc_size; 2385 nxge_status_t status = NXGE_OK; 2386 2387 nxge_dma_common_t **data; 2388 nxge_dma_common_t **control; 2389 uint32_t *num_chunks; 2390 2391 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2392 2393 /* 2394 * Allocate memory for the receive buffers and descriptor rings. 2395 * Replace these allocation functions with the interface functions 2396 * provided by the partition manager if/when they are available. 2397 */ 2398 2399 /* 2400 * Allocate memory for the receive buffer blocks. 2401 */ 2402 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2403 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2404 2405 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2406 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2407 2408 if ((status = nxge_alloc_rx_buf_dma( 2409 nxgep, channel, data, rx_buf_alloc_size, 2410 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2411 return (status); 2412 } 2413 2414 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2415 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2416 2417 /* 2418 * Allocate memory for descriptor rings and mailbox. 2419 */ 2420 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2421 2422 if ((status = nxge_alloc_rx_cntl_dma( 2423 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2424 != NXGE_OK) { 2425 nxge_free_rx_cntl_dma(nxgep, *control); 2426 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2427 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2428 return (status); 2429 } 2430 2431 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2432 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2433 2434 return (status); 2435 } 2436 2437 void 2438 nxge_free_rxb( 2439 p_nxge_t nxgep, 2440 int channel) 2441 { 2442 nxge_dma_common_t *data; 2443 nxge_dma_common_t *control; 2444 uint32_t num_chunks; 2445 2446 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2447 2448 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2449 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2450 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2451 2452 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2453 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2454 2455 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2456 nxge_free_rx_cntl_dma(nxgep, control); 2457 2458 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2459 2460 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2461 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2462 2463 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2464 } 2465 2466 static void 2467 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2468 { 2469 int rdc_max = NXGE_MAX_RDCS; 2470 2471 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2472 2473 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2474 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2475 "<== nxge_free_rx_mem_pool " 2476 "(null rx buf pool or buf not allocated")); 2477 return; 2478 } 2479 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2480 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2481 "<== nxge_free_rx_mem_pool " 2482 "(null rx cntl buf pool or cntl buf not allocated")); 2483 return; 2484 } 2485 2486 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2487 sizeof (p_nxge_dma_common_t) * rdc_max); 2488 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2489 2490 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2491 sizeof (uint32_t) * rdc_max); 2492 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2493 sizeof (p_nxge_dma_common_t) * rdc_max); 2494 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2495 2496 nxgep->rx_buf_pool_p = 0; 2497 nxgep->rx_cntl_pool_p = 0; 2498 2499 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2500 sizeof (p_rx_rbr_ring_t) * rdc_max); 2501 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2502 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2503 sizeof (p_rx_rcr_ring_t) * rdc_max); 2504 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2505 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2506 sizeof (p_rx_mbox_t) * rdc_max); 2507 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2508 2509 nxgep->rx_rbr_rings = 0; 2510 nxgep->rx_rcr_rings = 0; 2511 nxgep->rx_mbox_areas_p = 0; 2512 2513 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2514 } 2515 2516 2517 static nxge_status_t 2518 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2519 p_nxge_dma_common_t *dmap, 2520 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2521 { 2522 p_nxge_dma_common_t rx_dmap; 2523 nxge_status_t status = NXGE_OK; 2524 size_t total_alloc_size; 2525 size_t allocated = 0; 2526 int i, size_index, array_size; 2527 boolean_t use_kmem_alloc = B_FALSE; 2528 2529 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2530 2531 rx_dmap = (p_nxge_dma_common_t) 2532 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2533 KM_SLEEP); 2534 2535 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2536 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2537 dma_channel, alloc_size, block_size, dmap)); 2538 2539 total_alloc_size = alloc_size; 2540 2541 #if defined(RX_USE_RECLAIM_POST) 2542 total_alloc_size = alloc_size + alloc_size/4; 2543 #endif 2544 2545 i = 0; 2546 size_index = 0; 2547 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2548 while ((size_index < array_size) && 2549 (alloc_sizes[size_index] < alloc_size)) 2550 size_index++; 2551 if (size_index >= array_size) { 2552 size_index = array_size - 1; 2553 } 2554 2555 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2556 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2557 use_kmem_alloc = B_TRUE; 2558 #if defined(__i386) || defined(__amd64) 2559 size_index = 0; 2560 #endif 2561 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2562 "==> nxge_alloc_rx_buf_dma: " 2563 "Neptune use kmem_alloc() - size_index %d", 2564 size_index)); 2565 } 2566 2567 while ((allocated < total_alloc_size) && 2568 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2569 rx_dmap[i].dma_chunk_index = i; 2570 rx_dmap[i].block_size = block_size; 2571 rx_dmap[i].alength = alloc_sizes[size_index]; 2572 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2573 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2574 rx_dmap[i].dma_channel = dma_channel; 2575 rx_dmap[i].contig_alloc_type = B_FALSE; 2576 rx_dmap[i].kmem_alloc_type = B_FALSE; 2577 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2578 2579 /* 2580 * N2/NIU: data buffers must be contiguous as the driver 2581 * needs to call Hypervisor api to set up 2582 * logical pages. 2583 */ 2584 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2585 rx_dmap[i].contig_alloc_type = B_TRUE; 2586 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2587 } else if (use_kmem_alloc) { 2588 /* For Neptune, use kmem_alloc */ 2589 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2590 "==> nxge_alloc_rx_buf_dma: " 2591 "Neptune use kmem_alloc()")); 2592 rx_dmap[i].kmem_alloc_type = B_TRUE; 2593 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2594 } 2595 2596 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2597 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2598 "i %d nblocks %d alength %d", 2599 dma_channel, i, &rx_dmap[i], block_size, 2600 i, rx_dmap[i].nblocks, 2601 rx_dmap[i].alength)); 2602 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2603 &nxge_rx_dma_attr, 2604 rx_dmap[i].alength, 2605 &nxge_dev_buf_dma_acc_attr, 2606 DDI_DMA_READ | DDI_DMA_STREAMING, 2607 (p_nxge_dma_common_t)(&rx_dmap[i])); 2608 if (status != NXGE_OK) { 2609 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2610 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2611 "dma %d size_index %d size requested %d", 2612 dma_channel, 2613 size_index, 2614 rx_dmap[i].alength)); 2615 size_index--; 2616 } else { 2617 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2618 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2619 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2620 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2621 "buf_alloc_state %d alloc_type %d", 2622 dma_channel, 2623 &rx_dmap[i], 2624 rx_dmap[i].kaddrp, 2625 rx_dmap[i].alength, 2626 rx_dmap[i].buf_alloc_state, 2627 rx_dmap[i].buf_alloc_type)); 2628 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2629 " alloc_rx_buf_dma allocated rdc %d " 2630 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2631 dma_channel, i, rx_dmap[i].alength, 2632 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2633 rx_dmap[i].kaddrp)); 2634 i++; 2635 allocated += alloc_sizes[size_index]; 2636 } 2637 } 2638 2639 if (allocated < total_alloc_size) { 2640 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2641 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2642 "allocated 0x%x requested 0x%x", 2643 dma_channel, 2644 allocated, total_alloc_size)); 2645 status = NXGE_ERROR; 2646 goto nxge_alloc_rx_mem_fail1; 2647 } 2648 2649 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2650 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2651 "allocated 0x%x requested 0x%x", 2652 dma_channel, 2653 allocated, total_alloc_size)); 2654 2655 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2656 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2657 dma_channel, i)); 2658 *num_chunks = i; 2659 *dmap = rx_dmap; 2660 2661 goto nxge_alloc_rx_mem_exit; 2662 2663 nxge_alloc_rx_mem_fail1: 2664 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2665 2666 nxge_alloc_rx_mem_exit: 2667 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2668 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2669 2670 return (status); 2671 } 2672 2673 /*ARGSUSED*/ 2674 static void 2675 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2676 uint32_t num_chunks) 2677 { 2678 int i; 2679 2680 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2681 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2682 2683 if (dmap == 0) 2684 return; 2685 2686 for (i = 0; i < num_chunks; i++) { 2687 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2688 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2689 i, dmap)); 2690 nxge_dma_free_rx_data_buf(dmap++); 2691 } 2692 2693 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2694 } 2695 2696 /*ARGSUSED*/ 2697 static nxge_status_t 2698 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2699 p_nxge_dma_common_t *dmap, size_t size) 2700 { 2701 p_nxge_dma_common_t rx_dmap; 2702 nxge_status_t status = NXGE_OK; 2703 2704 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2705 2706 rx_dmap = (p_nxge_dma_common_t) 2707 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2708 2709 rx_dmap->contig_alloc_type = B_FALSE; 2710 rx_dmap->kmem_alloc_type = B_FALSE; 2711 2712 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2713 &nxge_desc_dma_attr, 2714 size, 2715 &nxge_dev_desc_dma_acc_attr, 2716 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2717 rx_dmap); 2718 if (status != NXGE_OK) { 2719 goto nxge_alloc_rx_cntl_dma_fail1; 2720 } 2721 2722 *dmap = rx_dmap; 2723 goto nxge_alloc_rx_cntl_dma_exit; 2724 2725 nxge_alloc_rx_cntl_dma_fail1: 2726 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2727 2728 nxge_alloc_rx_cntl_dma_exit: 2729 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2730 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2731 2732 return (status); 2733 } 2734 2735 /*ARGSUSED*/ 2736 static void 2737 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2738 { 2739 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2740 2741 if (dmap == 0) 2742 return; 2743 2744 nxge_dma_mem_free(dmap); 2745 2746 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2747 } 2748 2749 typedef struct { 2750 size_t tx_size; 2751 size_t cr_size; 2752 size_t threshhold; 2753 } nxge_tdc_sizes_t; 2754 2755 static 2756 nxge_status_t 2757 nxge_tdc_sizes( 2758 nxge_t *nxgep, 2759 nxge_tdc_sizes_t *sizes) 2760 { 2761 uint32_t threshhold; /* The bcopy() threshhold */ 2762 size_t tx_size; /* Transmit buffer size */ 2763 size_t cr_size; /* Completion ring size */ 2764 2765 /* 2766 * Assume that each DMA channel will be configured with the 2767 * default transmit buffer size for copying transmit data. 2768 * (If a packet is bigger than this, it will not be copied.) 2769 */ 2770 if (nxgep->niu_type == N2_NIU) { 2771 threshhold = TX_BCOPY_SIZE; 2772 } else { 2773 threshhold = nxge_bcopy_thresh; 2774 } 2775 tx_size = nxge_tx_ring_size * threshhold; 2776 2777 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2778 cr_size += sizeof (txdma_mailbox_t); 2779 2780 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2781 if (nxgep->niu_type == N2_NIU) { 2782 if (!ISP2(tx_size)) { 2783 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2784 "==> nxge_tdc_sizes: Tx size" 2785 " must be power of 2")); 2786 return (NXGE_ERROR); 2787 } 2788 2789 if (tx_size > (1 << 22)) { 2790 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2791 "==> nxge_tdc_sizes: Tx size" 2792 " limited to 4M")); 2793 return (NXGE_ERROR); 2794 } 2795 2796 if (cr_size < 0x2000) 2797 cr_size = 0x2000; 2798 } 2799 #endif 2800 2801 sizes->threshhold = threshhold; 2802 sizes->tx_size = tx_size; 2803 sizes->cr_size = cr_size; 2804 2805 return (NXGE_OK); 2806 } 2807 /* 2808 * nxge_alloc_txb 2809 * 2810 * Allocate buffers for an TDC. 2811 * 2812 * Arguments: 2813 * nxgep 2814 * channel The channel to map into our kernel space. 2815 * 2816 * Notes: 2817 * 2818 * NPI function calls: 2819 * 2820 * NXGE function calls: 2821 * 2822 * Registers accessed: 2823 * 2824 * Context: 2825 * 2826 * Taking apart: 2827 * 2828 * Open questions: 2829 * 2830 */ 2831 nxge_status_t 2832 nxge_alloc_txb( 2833 p_nxge_t nxgep, 2834 int channel) 2835 { 2836 nxge_dma_common_t **dma_buf_p; 2837 nxge_dma_common_t **dma_cntl_p; 2838 uint32_t *num_chunks; 2839 nxge_status_t status = NXGE_OK; 2840 2841 nxge_tdc_sizes_t sizes; 2842 2843 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2844 2845 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2846 return (NXGE_ERROR); 2847 2848 /* 2849 * Allocate memory for transmit buffers and descriptor rings. 2850 * Replace these allocation functions with the interface functions 2851 * provided by the partition manager Real Soon Now. 2852 */ 2853 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2854 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2855 2856 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2857 2858 /* 2859 * Allocate memory for transmit buffers and descriptor rings. 2860 * Replace allocation functions with interface functions provided 2861 * by the partition manager when it is available. 2862 * 2863 * Allocate memory for the transmit buffer pool. 2864 */ 2865 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2866 "sizes: tx: %ld, cr:%ld, th:%ld", 2867 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2868 2869 *num_chunks = 0; 2870 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2871 sizes.tx_size, sizes.threshhold, num_chunks); 2872 if (status != NXGE_OK) { 2873 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2874 return (status); 2875 } 2876 2877 /* 2878 * Allocate memory for descriptor rings and mailbox. 2879 */ 2880 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2881 sizes.cr_size); 2882 if (status != NXGE_OK) { 2883 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2884 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2885 return (status); 2886 } 2887 2888 return (NXGE_OK); 2889 } 2890 2891 void 2892 nxge_free_txb( 2893 p_nxge_t nxgep, 2894 int channel) 2895 { 2896 nxge_dma_common_t *data; 2897 nxge_dma_common_t *control; 2898 uint32_t num_chunks; 2899 2900 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2901 2902 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2903 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2904 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2905 2906 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2907 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2908 2909 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2910 nxge_free_tx_cntl_dma(nxgep, control); 2911 2912 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2913 2914 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2915 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2916 2917 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2918 } 2919 2920 /* 2921 * nxge_alloc_tx_mem_pool 2922 * 2923 * This function allocates all of the per-port TDC control data structures. 2924 * The per-channel (TDC) data structures are allocated when needed. 2925 * 2926 * Arguments: 2927 * nxgep 2928 * 2929 * Notes: 2930 * 2931 * Context: 2932 * Any domain 2933 */ 2934 nxge_status_t 2935 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2936 { 2937 nxge_hw_pt_cfg_t *p_cfgp; 2938 nxge_dma_pool_t *dma_poolp; 2939 nxge_dma_common_t **dma_buf_p; 2940 nxge_dma_pool_t *dma_cntl_poolp; 2941 nxge_dma_common_t **dma_cntl_p; 2942 uint32_t *num_chunks; /* per dma */ 2943 int tdc_max; 2944 2945 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2946 2947 p_cfgp = &nxgep->pt_config.hw_config; 2948 tdc_max = NXGE_MAX_TDCS; 2949 2950 /* 2951 * Allocate memory for each transmit DMA channel. 2952 */ 2953 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2954 KM_SLEEP); 2955 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2956 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2957 2958 dma_cntl_poolp = (p_nxge_dma_pool_t) 2959 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2960 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2961 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2962 2963 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2964 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2965 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2966 "set to default %d", 2967 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2968 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2969 } 2970 2971 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2972 /* 2973 * N2/NIU has limitation on the descriptor sizes (contiguous 2974 * memory allocation on data buffers to 4M (contig_mem_alloc) 2975 * and little endian for control buffers (must use the ddi/dki mem alloc 2976 * function). The transmit ring is limited to 8K (includes the 2977 * mailbox). 2978 */ 2979 if (nxgep->niu_type == N2_NIU) { 2980 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2981 (!ISP2(nxge_tx_ring_size))) { 2982 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2983 } 2984 } 2985 #endif 2986 2987 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2988 2989 num_chunks = (uint32_t *)KMEM_ZALLOC( 2990 sizeof (uint32_t) * tdc_max, KM_SLEEP); 2991 2992 dma_poolp->ndmas = p_cfgp->tdc.owned; 2993 dma_poolp->num_chunks = num_chunks; 2994 dma_poolp->dma_buf_pool_p = dma_buf_p; 2995 nxgep->tx_buf_pool_p = dma_poolp; 2996 2997 dma_poolp->buf_allocated = B_TRUE; 2998 2999 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 3000 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 3001 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 3002 3003 dma_cntl_poolp->buf_allocated = B_TRUE; 3004 3005 nxgep->tx_rings = 3006 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 3007 nxgep->tx_rings->rings = 3008 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 3009 nxgep->tx_mbox_areas_p = 3010 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 3011 nxgep->tx_mbox_areas_p->txmbox_areas_p = 3012 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 3013 3014 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 3015 3016 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3017 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 3018 tdc_max, dma_poolp->ndmas)); 3019 3020 return (NXGE_OK); 3021 } 3022 3023 nxge_status_t 3024 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 3025 p_nxge_dma_common_t *dmap, size_t alloc_size, 3026 size_t block_size, uint32_t *num_chunks) 3027 { 3028 p_nxge_dma_common_t tx_dmap; 3029 nxge_status_t status = NXGE_OK; 3030 size_t total_alloc_size; 3031 size_t allocated = 0; 3032 int i, size_index, array_size; 3033 3034 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 3035 3036 tx_dmap = (p_nxge_dma_common_t) 3037 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 3038 KM_SLEEP); 3039 3040 total_alloc_size = alloc_size; 3041 i = 0; 3042 size_index = 0; 3043 array_size = sizeof (alloc_sizes) / sizeof (size_t); 3044 while ((size_index < array_size) && 3045 (alloc_sizes[size_index] < alloc_size)) 3046 size_index++; 3047 if (size_index >= array_size) { 3048 size_index = array_size - 1; 3049 } 3050 3051 while ((allocated < total_alloc_size) && 3052 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 3053 3054 tx_dmap[i].dma_chunk_index = i; 3055 tx_dmap[i].block_size = block_size; 3056 tx_dmap[i].alength = alloc_sizes[size_index]; 3057 tx_dmap[i].orig_alength = tx_dmap[i].alength; 3058 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 3059 tx_dmap[i].dma_channel = dma_channel; 3060 tx_dmap[i].contig_alloc_type = B_FALSE; 3061 tx_dmap[i].kmem_alloc_type = B_FALSE; 3062 3063 /* 3064 * N2/NIU: data buffers must be contiguous as the driver 3065 * needs to call Hypervisor api to set up 3066 * logical pages. 3067 */ 3068 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 3069 tx_dmap[i].contig_alloc_type = B_TRUE; 3070 } 3071 3072 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3073 &nxge_tx_dma_attr, 3074 tx_dmap[i].alength, 3075 &nxge_dev_buf_dma_acc_attr, 3076 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3077 (p_nxge_dma_common_t)(&tx_dmap[i])); 3078 if (status != NXGE_OK) { 3079 size_index--; 3080 } else { 3081 i++; 3082 allocated += alloc_sizes[size_index]; 3083 } 3084 } 3085 3086 if (allocated < total_alloc_size) { 3087 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3088 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3089 "allocated 0x%x requested 0x%x", 3090 dma_channel, 3091 allocated, total_alloc_size)); 3092 status = NXGE_ERROR; 3093 goto nxge_alloc_tx_mem_fail1; 3094 } 3095 3096 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3097 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3098 "allocated 0x%x requested 0x%x", 3099 dma_channel, 3100 allocated, total_alloc_size)); 3101 3102 *num_chunks = i; 3103 *dmap = tx_dmap; 3104 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3105 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3106 *dmap, i)); 3107 goto nxge_alloc_tx_mem_exit; 3108 3109 nxge_alloc_tx_mem_fail1: 3110 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3111 3112 nxge_alloc_tx_mem_exit: 3113 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3114 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3115 3116 return (status); 3117 } 3118 3119 /*ARGSUSED*/ 3120 static void 3121 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3122 uint32_t num_chunks) 3123 { 3124 int i; 3125 3126 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3127 3128 if (dmap == 0) 3129 return; 3130 3131 for (i = 0; i < num_chunks; i++) { 3132 nxge_dma_mem_free(dmap++); 3133 } 3134 3135 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3136 } 3137 3138 /*ARGSUSED*/ 3139 nxge_status_t 3140 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3141 p_nxge_dma_common_t *dmap, size_t size) 3142 { 3143 p_nxge_dma_common_t tx_dmap; 3144 nxge_status_t status = NXGE_OK; 3145 3146 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3147 tx_dmap = (p_nxge_dma_common_t) 3148 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3149 3150 tx_dmap->contig_alloc_type = B_FALSE; 3151 tx_dmap->kmem_alloc_type = B_FALSE; 3152 3153 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3154 &nxge_desc_dma_attr, 3155 size, 3156 &nxge_dev_desc_dma_acc_attr, 3157 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3158 tx_dmap); 3159 if (status != NXGE_OK) { 3160 goto nxge_alloc_tx_cntl_dma_fail1; 3161 } 3162 3163 *dmap = tx_dmap; 3164 goto nxge_alloc_tx_cntl_dma_exit; 3165 3166 nxge_alloc_tx_cntl_dma_fail1: 3167 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3168 3169 nxge_alloc_tx_cntl_dma_exit: 3170 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3171 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3172 3173 return (status); 3174 } 3175 3176 /*ARGSUSED*/ 3177 static void 3178 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3179 { 3180 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3181 3182 if (dmap == 0) 3183 return; 3184 3185 nxge_dma_mem_free(dmap); 3186 3187 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3188 } 3189 3190 /* 3191 * nxge_free_tx_mem_pool 3192 * 3193 * This function frees all of the per-port TDC control data structures. 3194 * The per-channel (TDC) data structures are freed when the channel 3195 * is stopped. 3196 * 3197 * Arguments: 3198 * nxgep 3199 * 3200 * Notes: 3201 * 3202 * Context: 3203 * Any domain 3204 */ 3205 static void 3206 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3207 { 3208 int tdc_max = NXGE_MAX_TDCS; 3209 3210 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3211 3212 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3213 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3214 "<== nxge_free_tx_mem_pool " 3215 "(null tx buf pool or buf not allocated")); 3216 return; 3217 } 3218 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3219 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3220 "<== nxge_free_tx_mem_pool " 3221 "(null tx cntl buf pool or cntl buf not allocated")); 3222 return; 3223 } 3224 3225 /* 1. Free the mailboxes. */ 3226 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3227 sizeof (p_tx_mbox_t) * tdc_max); 3228 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3229 3230 nxgep->tx_mbox_areas_p = 0; 3231 3232 /* 2. Free the transmit ring arrays. */ 3233 KMEM_FREE(nxgep->tx_rings->rings, 3234 sizeof (p_tx_ring_t) * tdc_max); 3235 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3236 3237 nxgep->tx_rings = 0; 3238 3239 /* 3. Free the completion ring data structures. */ 3240 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3241 sizeof (p_nxge_dma_common_t) * tdc_max); 3242 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3243 3244 nxgep->tx_cntl_pool_p = 0; 3245 3246 /* 4. Free the data ring data structures. */ 3247 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3248 sizeof (uint32_t) * tdc_max); 3249 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3250 sizeof (p_nxge_dma_common_t) * tdc_max); 3251 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3252 3253 nxgep->tx_buf_pool_p = 0; 3254 3255 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3256 } 3257 3258 /*ARGSUSED*/ 3259 static nxge_status_t 3260 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3261 struct ddi_dma_attr *dma_attrp, 3262 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3263 p_nxge_dma_common_t dma_p) 3264 { 3265 caddr_t kaddrp; 3266 int ddi_status = DDI_SUCCESS; 3267 boolean_t contig_alloc_type; 3268 boolean_t kmem_alloc_type; 3269 3270 contig_alloc_type = dma_p->contig_alloc_type; 3271 3272 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3273 /* 3274 * contig_alloc_type for contiguous memory only allowed 3275 * for N2/NIU. 3276 */ 3277 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3278 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3279 dma_p->contig_alloc_type)); 3280 return (NXGE_ERROR | NXGE_DDI_FAILED); 3281 } 3282 3283 dma_p->dma_handle = NULL; 3284 dma_p->acc_handle = NULL; 3285 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3286 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3287 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3288 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3289 if (ddi_status != DDI_SUCCESS) { 3290 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3291 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3292 return (NXGE_ERROR | NXGE_DDI_FAILED); 3293 } 3294 3295 kmem_alloc_type = dma_p->kmem_alloc_type; 3296 3297 switch (contig_alloc_type) { 3298 case B_FALSE: 3299 switch (kmem_alloc_type) { 3300 case B_FALSE: 3301 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3302 length, 3303 acc_attr_p, 3304 xfer_flags, 3305 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3306 &dma_p->acc_handle); 3307 if (ddi_status != DDI_SUCCESS) { 3308 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3309 "nxge_dma_mem_alloc: " 3310 "ddi_dma_mem_alloc failed")); 3311 ddi_dma_free_handle(&dma_p->dma_handle); 3312 dma_p->dma_handle = NULL; 3313 return (NXGE_ERROR | NXGE_DDI_FAILED); 3314 } 3315 if (dma_p->alength < length) { 3316 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3317 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3318 "< length.")); 3319 ddi_dma_mem_free(&dma_p->acc_handle); 3320 ddi_dma_free_handle(&dma_p->dma_handle); 3321 dma_p->acc_handle = NULL; 3322 dma_p->dma_handle = NULL; 3323 return (NXGE_ERROR); 3324 } 3325 3326 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3327 NULL, 3328 kaddrp, dma_p->alength, xfer_flags, 3329 DDI_DMA_DONTWAIT, 3330 0, &dma_p->dma_cookie, &dma_p->ncookies); 3331 if (ddi_status != DDI_DMA_MAPPED) { 3332 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3333 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3334 "failed " 3335 "(staus 0x%x ncookies %d.)", ddi_status, 3336 dma_p->ncookies)); 3337 if (dma_p->acc_handle) { 3338 ddi_dma_mem_free(&dma_p->acc_handle); 3339 dma_p->acc_handle = NULL; 3340 } 3341 ddi_dma_free_handle(&dma_p->dma_handle); 3342 dma_p->dma_handle = NULL; 3343 return (NXGE_ERROR | NXGE_DDI_FAILED); 3344 } 3345 3346 if (dma_p->ncookies != 1) { 3347 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3348 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3349 "> 1 cookie" 3350 "(staus 0x%x ncookies %d.)", ddi_status, 3351 dma_p->ncookies)); 3352 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3353 if (dma_p->acc_handle) { 3354 ddi_dma_mem_free(&dma_p->acc_handle); 3355 dma_p->acc_handle = NULL; 3356 } 3357 ddi_dma_free_handle(&dma_p->dma_handle); 3358 dma_p->dma_handle = NULL; 3359 dma_p->acc_handle = NULL; 3360 return (NXGE_ERROR); 3361 } 3362 break; 3363 3364 case B_TRUE: 3365 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3366 if (kaddrp == NULL) { 3367 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3368 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3369 "kmem alloc failed")); 3370 return (NXGE_ERROR); 3371 } 3372 3373 dma_p->alength = length; 3374 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3375 NULL, kaddrp, dma_p->alength, xfer_flags, 3376 DDI_DMA_DONTWAIT, 0, 3377 &dma_p->dma_cookie, &dma_p->ncookies); 3378 if (ddi_status != DDI_DMA_MAPPED) { 3379 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3380 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3381 "(kmem_alloc) failed kaddrp $%p length %d " 3382 "(staus 0x%x (%d) ncookies %d.)", 3383 kaddrp, length, 3384 ddi_status, ddi_status, dma_p->ncookies)); 3385 KMEM_FREE(kaddrp, length); 3386 dma_p->acc_handle = NULL; 3387 ddi_dma_free_handle(&dma_p->dma_handle); 3388 dma_p->dma_handle = NULL; 3389 dma_p->kaddrp = NULL; 3390 return (NXGE_ERROR | NXGE_DDI_FAILED); 3391 } 3392 3393 if (dma_p->ncookies != 1) { 3394 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3395 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3396 "(kmem_alloc) > 1 cookie" 3397 "(staus 0x%x ncookies %d.)", ddi_status, 3398 dma_p->ncookies)); 3399 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3400 KMEM_FREE(kaddrp, length); 3401 ddi_dma_free_handle(&dma_p->dma_handle); 3402 dma_p->dma_handle = NULL; 3403 dma_p->acc_handle = NULL; 3404 dma_p->kaddrp = NULL; 3405 return (NXGE_ERROR); 3406 } 3407 3408 dma_p->kaddrp = kaddrp; 3409 3410 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3411 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3412 "kaddr $%p alength %d", 3413 dma_p, 3414 kaddrp, 3415 dma_p->alength)); 3416 break; 3417 } 3418 break; 3419 3420 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3421 case B_TRUE: 3422 kaddrp = (caddr_t)contig_mem_alloc(length); 3423 if (kaddrp == NULL) { 3424 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3425 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3426 ddi_dma_free_handle(&dma_p->dma_handle); 3427 return (NXGE_ERROR | NXGE_DDI_FAILED); 3428 } 3429 3430 dma_p->alength = length; 3431 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3432 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3433 &dma_p->dma_cookie, &dma_p->ncookies); 3434 if (ddi_status != DDI_DMA_MAPPED) { 3435 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3436 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3437 "(status 0x%x ncookies %d.)", ddi_status, 3438 dma_p->ncookies)); 3439 3440 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3441 "==> nxge_dma_mem_alloc: (not mapped)" 3442 "length %lu (0x%x) " 3443 "free contig kaddrp $%p " 3444 "va_to_pa $%p", 3445 length, length, 3446 kaddrp, 3447 va_to_pa(kaddrp))); 3448 3449 3450 contig_mem_free((void *)kaddrp, length); 3451 ddi_dma_free_handle(&dma_p->dma_handle); 3452 3453 dma_p->dma_handle = NULL; 3454 dma_p->acc_handle = NULL; 3455 dma_p->alength = NULL; 3456 dma_p->kaddrp = NULL; 3457 3458 return (NXGE_ERROR | NXGE_DDI_FAILED); 3459 } 3460 3461 if (dma_p->ncookies != 1 || 3462 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3463 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3464 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3465 "cookie or " 3466 "dmac_laddress is NULL $%p size %d " 3467 " (status 0x%x ncookies %d.)", 3468 ddi_status, 3469 dma_p->dma_cookie.dmac_laddress, 3470 dma_p->dma_cookie.dmac_size, 3471 dma_p->ncookies)); 3472 3473 contig_mem_free((void *)kaddrp, length); 3474 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3475 ddi_dma_free_handle(&dma_p->dma_handle); 3476 3477 dma_p->alength = 0; 3478 dma_p->dma_handle = NULL; 3479 dma_p->acc_handle = NULL; 3480 dma_p->kaddrp = NULL; 3481 3482 return (NXGE_ERROR | NXGE_DDI_FAILED); 3483 } 3484 break; 3485 3486 #else 3487 case B_TRUE: 3488 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3489 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3490 return (NXGE_ERROR | NXGE_DDI_FAILED); 3491 #endif 3492 } 3493 3494 dma_p->kaddrp = kaddrp; 3495 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3496 dma_p->alength - RXBUF_64B_ALIGNED; 3497 #if defined(__i386) 3498 dma_p->ioaddr_pp = 3499 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3500 #else 3501 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3502 #endif 3503 dma_p->last_ioaddr_pp = 3504 #if defined(__i386) 3505 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3506 #else 3507 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3508 #endif 3509 dma_p->alength - RXBUF_64B_ALIGNED; 3510 3511 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3512 3513 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3514 dma_p->orig_ioaddr_pp = 3515 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3516 dma_p->orig_alength = length; 3517 dma_p->orig_kaddrp = kaddrp; 3518 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3519 #endif 3520 3521 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3522 "dma buffer allocated: dma_p $%p " 3523 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3524 "dma_p->ioaddr_p $%p " 3525 "dma_p->orig_ioaddr_p $%p " 3526 "orig_vatopa $%p " 3527 "alength %d (0x%x) " 3528 "kaddrp $%p " 3529 "length %d (0x%x)", 3530 dma_p, 3531 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3532 dma_p->ioaddr_pp, 3533 dma_p->orig_ioaddr_pp, 3534 dma_p->orig_vatopa, 3535 dma_p->alength, dma_p->alength, 3536 kaddrp, 3537 length, length)); 3538 3539 return (NXGE_OK); 3540 } 3541 3542 static void 3543 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3544 { 3545 if (dma_p->dma_handle != NULL) { 3546 if (dma_p->ncookies) { 3547 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3548 dma_p->ncookies = 0; 3549 } 3550 ddi_dma_free_handle(&dma_p->dma_handle); 3551 dma_p->dma_handle = NULL; 3552 } 3553 3554 if (dma_p->acc_handle != NULL) { 3555 ddi_dma_mem_free(&dma_p->acc_handle); 3556 dma_p->acc_handle = NULL; 3557 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3558 } 3559 3560 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3561 if (dma_p->contig_alloc_type && 3562 dma_p->orig_kaddrp && dma_p->orig_alength) { 3563 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3564 "kaddrp $%p (orig_kaddrp $%p)" 3565 "mem type %d ", 3566 "orig_alength %d " 3567 "alength 0x%x (%d)", 3568 dma_p->kaddrp, 3569 dma_p->orig_kaddrp, 3570 dma_p->contig_alloc_type, 3571 dma_p->orig_alength, 3572 dma_p->alength, dma_p->alength)); 3573 3574 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3575 dma_p->orig_alength = NULL; 3576 dma_p->orig_kaddrp = NULL; 3577 dma_p->contig_alloc_type = B_FALSE; 3578 } 3579 #endif 3580 dma_p->kaddrp = NULL; 3581 dma_p->alength = NULL; 3582 } 3583 3584 static void 3585 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3586 { 3587 uint64_t kaddr; 3588 uint32_t buf_size; 3589 3590 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3591 3592 if (dma_p->dma_handle != NULL) { 3593 if (dma_p->ncookies) { 3594 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3595 dma_p->ncookies = 0; 3596 } 3597 ddi_dma_free_handle(&dma_p->dma_handle); 3598 dma_p->dma_handle = NULL; 3599 } 3600 3601 if (dma_p->acc_handle != NULL) { 3602 ddi_dma_mem_free(&dma_p->acc_handle); 3603 dma_p->acc_handle = NULL; 3604 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3605 } 3606 3607 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3608 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3609 dma_p, 3610 dma_p->buf_alloc_state)); 3611 3612 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3613 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3614 "<== nxge_dma_free_rx_data_buf: " 3615 "outstanding data buffers")); 3616 return; 3617 } 3618 3619 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3620 if (dma_p->contig_alloc_type && 3621 dma_p->orig_kaddrp && dma_p->orig_alength) { 3622 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3623 "kaddrp $%p (orig_kaddrp $%p)" 3624 "mem type %d ", 3625 "orig_alength %d " 3626 "alength 0x%x (%d)", 3627 dma_p->kaddrp, 3628 dma_p->orig_kaddrp, 3629 dma_p->contig_alloc_type, 3630 dma_p->orig_alength, 3631 dma_p->alength, dma_p->alength)); 3632 3633 kaddr = (uint64_t)dma_p->orig_kaddrp; 3634 buf_size = dma_p->orig_alength; 3635 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3636 dma_p->orig_alength = NULL; 3637 dma_p->orig_kaddrp = NULL; 3638 dma_p->contig_alloc_type = B_FALSE; 3639 dma_p->kaddrp = NULL; 3640 dma_p->alength = NULL; 3641 return; 3642 } 3643 #endif 3644 3645 if (dma_p->kmem_alloc_type) { 3646 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3647 "nxge_dma_free_rx_data_buf: free kmem " 3648 "kaddrp $%p (orig_kaddrp $%p)" 3649 "alloc type %d " 3650 "orig_alength %d " 3651 "alength 0x%x (%d)", 3652 dma_p->kaddrp, 3653 dma_p->orig_kaddrp, 3654 dma_p->kmem_alloc_type, 3655 dma_p->orig_alength, 3656 dma_p->alength, dma_p->alength)); 3657 #if defined(__i386) 3658 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3659 #else 3660 kaddr = (uint64_t)dma_p->kaddrp; 3661 #endif 3662 buf_size = dma_p->orig_alength; 3663 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3664 "nxge_dma_free_rx_data_buf: free dmap $%p " 3665 "kaddr $%p buf_size %d", 3666 dma_p, 3667 kaddr, buf_size)); 3668 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3669 dma_p->alength = 0; 3670 dma_p->orig_alength = 0; 3671 dma_p->kaddrp = NULL; 3672 dma_p->kmem_alloc_type = B_FALSE; 3673 } 3674 3675 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3676 } 3677 3678 /* 3679 * nxge_m_start() -- start transmitting and receiving. 3680 * 3681 * This function is called by the MAC layer when the first 3682 * stream is open to prepare the hardware ready for sending 3683 * and transmitting packets. 3684 */ 3685 static int 3686 nxge_m_start(void *arg) 3687 { 3688 p_nxge_t nxgep = (p_nxge_t)arg; 3689 3690 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3691 3692 /* 3693 * Are we already started? 3694 */ 3695 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 3696 return (0); 3697 } 3698 3699 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) { 3700 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 3701 } 3702 3703 /* 3704 * Make sure RX MAC is disabled while we initialize. 3705 */ 3706 if (!isLDOMguest(nxgep)) { 3707 (void) nxge_rx_mac_disable(nxgep); 3708 } 3709 3710 /* 3711 * Grab the global lock. 3712 */ 3713 MUTEX_ENTER(nxgep->genlock); 3714 3715 /* 3716 * Initialize the driver and hardware. 3717 */ 3718 if (nxge_init(nxgep) != NXGE_OK) { 3719 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3720 "<== nxge_m_start: initialization failed")); 3721 MUTEX_EXIT(nxgep->genlock); 3722 return (EIO); 3723 } 3724 3725 /* 3726 * Start timer to check the system error and tx hangs 3727 */ 3728 if (!isLDOMguest(nxgep)) 3729 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3730 nxge_check_hw_state, NXGE_CHECK_TIMER); 3731 #if defined(sun4v) 3732 else 3733 nxge_hio_start_timer(nxgep); 3734 #endif 3735 3736 nxgep->link_notify = B_TRUE; 3737 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3738 3739 /* 3740 * Let the global lock go, since we are intialized. 3741 */ 3742 MUTEX_EXIT(nxgep->genlock); 3743 3744 /* 3745 * Let the MAC start receiving packets, now that 3746 * we are initialized. 3747 */ 3748 if (!isLDOMguest(nxgep)) { 3749 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 3750 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3751 "<== nxge_m_start: enable of RX mac failed")); 3752 return (EIO); 3753 } 3754 3755 /* 3756 * Enable hardware interrupts. 3757 */ 3758 nxge_intr_hw_enable(nxgep); 3759 } 3760 #if defined(sun4v) 3761 else { 3762 /* 3763 * In guest domain we enable RDCs and their interrupts as 3764 * the last step. 3765 */ 3766 if (nxge_hio_rdc_enable(nxgep) != NXGE_OK) { 3767 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3768 "<== nxge_m_start: enable of RDCs failed")); 3769 return (EIO); 3770 } 3771 3772 if (nxge_hio_rdc_intr_arm(nxgep, B_TRUE) != NXGE_OK) { 3773 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3774 "<== nxge_m_start: intrs enable for RDCs failed")); 3775 return (EIO); 3776 } 3777 } 3778 #endif 3779 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3780 return (0); 3781 } 3782 3783 static boolean_t 3784 nxge_check_groups_stopped(p_nxge_t nxgep) 3785 { 3786 int i; 3787 3788 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 3789 if (nxgep->rx_hio_groups[i].started) 3790 return (B_FALSE); 3791 } 3792 3793 return (B_TRUE); 3794 } 3795 3796 /* 3797 * nxge_m_stop(): stop transmitting and receiving. 3798 */ 3799 static void 3800 nxge_m_stop(void *arg) 3801 { 3802 p_nxge_t nxgep = (p_nxge_t)arg; 3803 boolean_t groups_stopped; 3804 3805 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3806 3807 /* 3808 * Are the groups stopped? 3809 */ 3810 groups_stopped = nxge_check_groups_stopped(nxgep); 3811 ASSERT(groups_stopped == B_TRUE); 3812 if (!groups_stopped) { 3813 cmn_err(CE_WARN, "nxge(%d): groups are not stopped!\n", 3814 nxgep->instance); 3815 return; 3816 } 3817 3818 if (!isLDOMguest(nxgep)) { 3819 /* 3820 * Disable the RX mac. 3821 */ 3822 (void) nxge_rx_mac_disable(nxgep); 3823 3824 /* 3825 * Wait for the IPP to drain. 3826 */ 3827 (void) nxge_ipp_drain(nxgep); 3828 3829 /* 3830 * Disable hardware interrupts. 3831 */ 3832 nxge_intr_hw_disable(nxgep); 3833 } 3834 #if defined(sun4v) 3835 else { 3836 (void) nxge_hio_rdc_intr_arm(nxgep, B_FALSE); 3837 } 3838 #endif 3839 3840 /* 3841 * Grab the global lock. 3842 */ 3843 MUTEX_ENTER(nxgep->genlock); 3844 3845 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3846 if (nxgep->nxge_timerid) { 3847 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3848 nxgep->nxge_timerid = 0; 3849 } 3850 3851 /* 3852 * Clean up. 3853 */ 3854 nxge_uninit(nxgep); 3855 3856 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3857 3858 /* 3859 * Let go of the global lock. 3860 */ 3861 MUTEX_EXIT(nxgep->genlock); 3862 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3863 } 3864 3865 static int 3866 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3867 { 3868 p_nxge_t nxgep = (p_nxge_t)arg; 3869 struct ether_addr addrp; 3870 3871 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3872 "==> nxge_m_multicst: add %d", add)); 3873 3874 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3875 if (add) { 3876 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3877 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3878 "<== nxge_m_multicst: add multicast failed")); 3879 return (EINVAL); 3880 } 3881 } else { 3882 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3883 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3884 "<== nxge_m_multicst: del multicast failed")); 3885 return (EINVAL); 3886 } 3887 } 3888 3889 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3890 3891 return (0); 3892 } 3893 3894 static int 3895 nxge_m_promisc(void *arg, boolean_t on) 3896 { 3897 p_nxge_t nxgep = (p_nxge_t)arg; 3898 3899 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3900 "==> nxge_m_promisc: on %d", on)); 3901 3902 if (nxge_set_promisc(nxgep, on)) { 3903 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3904 "<== nxge_m_promisc: set promisc failed")); 3905 return (EINVAL); 3906 } 3907 3908 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3909 "<== nxge_m_promisc: on %d", on)); 3910 3911 return (0); 3912 } 3913 3914 static void 3915 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3916 { 3917 p_nxge_t nxgep = (p_nxge_t)arg; 3918 struct iocblk *iocp; 3919 boolean_t need_privilege; 3920 int err; 3921 int cmd; 3922 3923 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3924 3925 iocp = (struct iocblk *)mp->b_rptr; 3926 iocp->ioc_error = 0; 3927 need_privilege = B_TRUE; 3928 cmd = iocp->ioc_cmd; 3929 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3930 switch (cmd) { 3931 default: 3932 miocnak(wq, mp, 0, EINVAL); 3933 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3934 return; 3935 3936 case LB_GET_INFO_SIZE: 3937 case LB_GET_INFO: 3938 case LB_GET_MODE: 3939 need_privilege = B_FALSE; 3940 break; 3941 case LB_SET_MODE: 3942 break; 3943 3944 3945 case NXGE_GET_MII: 3946 case NXGE_PUT_MII: 3947 case NXGE_GET64: 3948 case NXGE_PUT64: 3949 case NXGE_GET_TX_RING_SZ: 3950 case NXGE_GET_TX_DESC: 3951 case NXGE_TX_SIDE_RESET: 3952 case NXGE_RX_SIDE_RESET: 3953 case NXGE_GLOBAL_RESET: 3954 case NXGE_RESET_MAC: 3955 case NXGE_TX_REGS_DUMP: 3956 case NXGE_RX_REGS_DUMP: 3957 case NXGE_INT_REGS_DUMP: 3958 case NXGE_VIR_INT_REGS_DUMP: 3959 case NXGE_PUT_TCAM: 3960 case NXGE_GET_TCAM: 3961 case NXGE_RTRACE: 3962 case NXGE_RDUMP: 3963 3964 need_privilege = B_FALSE; 3965 break; 3966 case NXGE_INJECT_ERR: 3967 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3968 nxge_err_inject(nxgep, wq, mp); 3969 break; 3970 } 3971 3972 if (need_privilege) { 3973 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3974 if (err != 0) { 3975 miocnak(wq, mp, 0, err); 3976 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3977 "<== nxge_m_ioctl: no priv")); 3978 return; 3979 } 3980 } 3981 3982 switch (cmd) { 3983 3984 case LB_GET_MODE: 3985 case LB_SET_MODE: 3986 case LB_GET_INFO_SIZE: 3987 case LB_GET_INFO: 3988 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3989 break; 3990 3991 case NXGE_GET_MII: 3992 case NXGE_PUT_MII: 3993 case NXGE_PUT_TCAM: 3994 case NXGE_GET_TCAM: 3995 case NXGE_GET64: 3996 case NXGE_PUT64: 3997 case NXGE_GET_TX_RING_SZ: 3998 case NXGE_GET_TX_DESC: 3999 case NXGE_TX_SIDE_RESET: 4000 case NXGE_RX_SIDE_RESET: 4001 case NXGE_GLOBAL_RESET: 4002 case NXGE_RESET_MAC: 4003 case NXGE_TX_REGS_DUMP: 4004 case NXGE_RX_REGS_DUMP: 4005 case NXGE_INT_REGS_DUMP: 4006 case NXGE_VIR_INT_REGS_DUMP: 4007 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4008 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 4009 nxge_hw_ioctl(nxgep, wq, mp, iocp); 4010 break; 4011 } 4012 4013 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 4014 } 4015 4016 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 4017 4018 void 4019 nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory) 4020 { 4021 p_nxge_mmac_stats_t mmac_stats; 4022 int i; 4023 nxge_mmac_t *mmac_info; 4024 4025 mmac_info = &nxgep->nxge_mmac_info; 4026 4027 mmac_stats = &nxgep->statsp->mmac_stats; 4028 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 4029 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 4030 4031 for (i = 0; i < ETHERADDRL; i++) { 4032 if (factory) { 4033 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4034 = mmac_info->factory_mac_pool[slot][ 4035 (ETHERADDRL-1) - i]; 4036 } else { 4037 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4038 = mmac_info->mac_pool[slot].addr[ 4039 (ETHERADDRL - 1) - i]; 4040 } 4041 } 4042 } 4043 4044 /* 4045 * nxge_altmac_set() -- Set an alternate MAC address 4046 */ 4047 static int 4048 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, int slot, 4049 int rdctbl, boolean_t usetbl) 4050 { 4051 uint8_t addrn; 4052 uint8_t portn; 4053 npi_mac_addr_t altmac; 4054 hostinfo_t mac_rdc; 4055 p_nxge_class_pt_cfg_t clscfgp; 4056 4057 4058 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 4059 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 4060 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 4061 4062 portn = nxgep->mac.portnum; 4063 addrn = (uint8_t)slot - 1; 4064 4065 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, 4066 nxgep->function_num, addrn, &altmac) != NPI_SUCCESS) 4067 return (EIO); 4068 4069 /* 4070 * Set the rdc table number for the host info entry 4071 * for this mac address slot. 4072 */ 4073 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 4074 mac_rdc.value = 0; 4075 if (usetbl) 4076 mac_rdc.bits.w0.rdc_tbl_num = rdctbl; 4077 else 4078 mac_rdc.bits.w0.rdc_tbl_num = 4079 clscfgp->mac_host_info[addrn].rdctbl; 4080 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 4081 4082 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 4083 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 4084 return (EIO); 4085 } 4086 4087 /* 4088 * Enable comparison with the alternate MAC address. 4089 * While the first alternate addr is enabled by bit 1 of register 4090 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 4091 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 4092 * accordingly before calling npi_mac_altaddr_entry. 4093 */ 4094 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4095 addrn = (uint8_t)slot - 1; 4096 else 4097 addrn = (uint8_t)slot; 4098 4099 if (npi_mac_altaddr_enable(nxgep->npi_handle, 4100 nxgep->function_num, addrn) != NPI_SUCCESS) { 4101 return (EIO); 4102 } 4103 4104 return (0); 4105 } 4106 4107 /* 4108 * nxeg_m_mmac_add_g() - find an unused address slot, set the address 4109 * value to the one specified, enable the port to start filtering on 4110 * the new MAC address. Returns 0 on success. 4111 */ 4112 int 4113 nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl, 4114 boolean_t usetbl) 4115 { 4116 p_nxge_t nxgep = arg; 4117 int slot; 4118 nxge_mmac_t *mmac_info; 4119 int err; 4120 nxge_status_t status; 4121 4122 mutex_enter(nxgep->genlock); 4123 4124 /* 4125 * Make sure that nxge is initialized, if _start() has 4126 * not been called. 4127 */ 4128 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4129 status = nxge_init(nxgep); 4130 if (status != NXGE_OK) { 4131 mutex_exit(nxgep->genlock); 4132 return (ENXIO); 4133 } 4134 } 4135 4136 mmac_info = &nxgep->nxge_mmac_info; 4137 if (mmac_info->naddrfree == 0) { 4138 mutex_exit(nxgep->genlock); 4139 return (ENOSPC); 4140 } 4141 4142 /* 4143 * Search for the first available slot. Because naddrfree 4144 * is not zero, we are guaranteed to find one. 4145 * Each of the first two ports of Neptune has 16 alternate 4146 * MAC slots but only the first 7 (of 15) slots have assigned factory 4147 * MAC addresses. We first search among the slots without bundled 4148 * factory MACs. If we fail to find one in that range, then we 4149 * search the slots with bundled factory MACs. A factory MAC 4150 * will be wasted while the slot is used with a user MAC address. 4151 * But the slot could be used by factory MAC again after calling 4152 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4153 */ 4154 for (slot = 0; slot <= mmac_info->num_mmac; slot++) { 4155 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4156 break; 4157 } 4158 4159 ASSERT(slot <= mmac_info->num_mmac); 4160 4161 if ((err = nxge_altmac_set(nxgep, (uint8_t *)maddr, slot, rdctbl, 4162 usetbl)) != 0) { 4163 mutex_exit(nxgep->genlock); 4164 return (err); 4165 } 4166 4167 bcopy(maddr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4168 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4169 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4170 mmac_info->naddrfree--; 4171 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4172 4173 mutex_exit(nxgep->genlock); 4174 return (0); 4175 } 4176 4177 /* 4178 * Remove the specified mac address and update the HW not to filter 4179 * the mac address anymore. 4180 */ 4181 int 4182 nxge_m_mmac_remove(void *arg, int slot) 4183 { 4184 p_nxge_t nxgep = arg; 4185 nxge_mmac_t *mmac_info; 4186 uint8_t addrn; 4187 uint8_t portn; 4188 int err = 0; 4189 nxge_status_t status; 4190 4191 mutex_enter(nxgep->genlock); 4192 4193 /* 4194 * Make sure that nxge is initialized, if _start() has 4195 * not been called. 4196 */ 4197 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4198 status = nxge_init(nxgep); 4199 if (status != NXGE_OK) { 4200 mutex_exit(nxgep->genlock); 4201 return (ENXIO); 4202 } 4203 } 4204 4205 mmac_info = &nxgep->nxge_mmac_info; 4206 if (slot < 1 || slot > mmac_info->num_mmac) { 4207 mutex_exit(nxgep->genlock); 4208 return (EINVAL); 4209 } 4210 4211 portn = nxgep->mac.portnum; 4212 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4213 addrn = (uint8_t)slot - 1; 4214 else 4215 addrn = (uint8_t)slot; 4216 4217 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4218 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4219 == NPI_SUCCESS) { 4220 mmac_info->naddrfree++; 4221 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4222 /* 4223 * Regardless if the MAC we just stopped filtering 4224 * is a user addr or a facory addr, we must set 4225 * the MMAC_VENDOR_ADDR flag if this slot has an 4226 * associated factory MAC to indicate that a factory 4227 * MAC is available. 4228 */ 4229 if (slot <= mmac_info->num_factory_mmac) { 4230 mmac_info->mac_pool[slot].flags 4231 |= MMAC_VENDOR_ADDR; 4232 } 4233 /* 4234 * Clear mac_pool[slot].addr so that kstat shows 0 4235 * alternate MAC address if the slot is not used. 4236 * (But nxge_m_mmac_get returns the factory MAC even 4237 * when the slot is not used!) 4238 */ 4239 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4240 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4241 } else { 4242 err = EIO; 4243 } 4244 } else { 4245 err = EINVAL; 4246 } 4247 4248 mutex_exit(nxgep->genlock); 4249 return (err); 4250 } 4251 4252 /* 4253 * The callback to query all the factory addresses. naddr must be the same as 4254 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and 4255 * mcm_addr is the space allocated for keep all the addresses, whose size is 4256 * naddr * MAXMACADDRLEN. 4257 */ 4258 static void 4259 nxge_m_getfactaddr(void *arg, uint_t naddr, uint8_t *addr) 4260 { 4261 nxge_t *nxgep = arg; 4262 nxge_mmac_t *mmac_info; 4263 int i; 4264 4265 mutex_enter(nxgep->genlock); 4266 4267 mmac_info = &nxgep->nxge_mmac_info; 4268 ASSERT(naddr == mmac_info->num_factory_mmac); 4269 4270 for (i = 0; i < naddr; i++) { 4271 bcopy(mmac_info->factory_mac_pool[i + 1], 4272 addr + i * MAXMACADDRLEN, ETHERADDRL); 4273 } 4274 4275 mutex_exit(nxgep->genlock); 4276 } 4277 4278 4279 static boolean_t 4280 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4281 { 4282 nxge_t *nxgep = arg; 4283 uint32_t *txflags = cap_data; 4284 4285 switch (cap) { 4286 case MAC_CAPAB_HCKSUM: 4287 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4288 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4289 if (nxge_cksum_offload <= 1) { 4290 *txflags = HCKSUM_INET_PARTIAL; 4291 } 4292 break; 4293 4294 case MAC_CAPAB_MULTIFACTADDR: { 4295 mac_capab_multifactaddr_t *mfacp = cap_data; 4296 4297 mutex_enter(nxgep->genlock); 4298 mfacp->mcm_naddr = nxgep->nxge_mmac_info.num_factory_mmac; 4299 mfacp->mcm_getaddr = nxge_m_getfactaddr; 4300 mutex_exit(nxgep->genlock); 4301 break; 4302 } 4303 4304 case MAC_CAPAB_LSO: { 4305 mac_capab_lso_t *cap_lso = cap_data; 4306 4307 if (nxgep->soft_lso_enable) { 4308 if (nxge_cksum_offload <= 1) { 4309 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4310 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4311 nxge_lso_max = NXGE_LSO_MAXLEN; 4312 } 4313 cap_lso->lso_basic_tcp_ipv4.lso_max = 4314 nxge_lso_max; 4315 } 4316 break; 4317 } else { 4318 return (B_FALSE); 4319 } 4320 } 4321 4322 case MAC_CAPAB_RINGS: { 4323 mac_capab_rings_t *cap_rings = cap_data; 4324 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 4325 4326 mutex_enter(nxgep->genlock); 4327 if (cap_rings->mr_type == MAC_RING_TYPE_RX) { 4328 cap_rings->mr_group_type = MAC_GROUP_TYPE_DYNAMIC; 4329 cap_rings->mr_rnum = p_cfgp->max_rdcs; 4330 cap_rings->mr_rget = nxge_fill_ring; 4331 cap_rings->mr_gnum = p_cfgp->max_rdc_grpids; 4332 cap_rings->mr_gget = nxge_hio_group_get; 4333 cap_rings->mr_gaddring = nxge_group_add_ring; 4334 cap_rings->mr_gremring = nxge_group_rem_ring; 4335 4336 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4337 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]", 4338 p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids)); 4339 } else { 4340 cap_rings->mr_group_type = MAC_GROUP_TYPE_DYNAMIC; 4341 cap_rings->mr_rnum = p_cfgp->tdc.count; 4342 cap_rings->mr_rget = nxge_fill_ring; 4343 if (isLDOMservice(nxgep)) { 4344 /* share capable */ 4345 /* Do not report the default ring: hence -1 */ 4346 cap_rings->mr_gnum = 4347 NXGE_MAX_TDC_GROUPS / nxgep->nports - 1; 4348 } else { 4349 cap_rings->mr_gnum = 0; 4350 } 4351 4352 cap_rings->mr_gget = nxge_hio_group_get; 4353 cap_rings->mr_gaddring = nxge_group_add_ring; 4354 cap_rings->mr_gremring = nxge_group_rem_ring; 4355 4356 NXGE_DEBUG_MSG((nxgep, TX_CTL, 4357 "==> nxge_m_getcapab: tx rings # of rings %d", 4358 p_cfgp->tdc.count)); 4359 } 4360 mutex_exit(nxgep->genlock); 4361 break; 4362 } 4363 4364 #if defined(sun4v) 4365 case MAC_CAPAB_SHARES: { 4366 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4367 4368 /* 4369 * Only the service domain driver responds to 4370 * this capability request. 4371 */ 4372 mutex_enter(nxgep->genlock); 4373 if (isLDOMservice(nxgep)) { 4374 mshares->ms_snum = 3; 4375 mshares->ms_handle = (void *)nxgep; 4376 mshares->ms_salloc = nxge_hio_share_alloc; 4377 mshares->ms_sfree = nxge_hio_share_free; 4378 mshares->ms_sadd = nxge_hio_share_add_group; 4379 mshares->ms_sremove = nxge_hio_share_rem_group; 4380 mshares->ms_squery = nxge_hio_share_query; 4381 mshares->ms_sbind = nxge_hio_share_bind; 4382 mshares->ms_sunbind = nxge_hio_share_unbind; 4383 mutex_exit(nxgep->genlock); 4384 } else { 4385 mutex_exit(nxgep->genlock); 4386 return (B_FALSE); 4387 } 4388 break; 4389 } 4390 #endif 4391 default: 4392 return (B_FALSE); 4393 } 4394 return (B_TRUE); 4395 } 4396 4397 static boolean_t 4398 nxge_param_locked(mac_prop_id_t pr_num) 4399 { 4400 /* 4401 * All adv_* parameters are locked (read-only) while 4402 * the device is in any sort of loopback mode ... 4403 */ 4404 switch (pr_num) { 4405 case MAC_PROP_ADV_1000FDX_CAP: 4406 case MAC_PROP_EN_1000FDX_CAP: 4407 case MAC_PROP_ADV_1000HDX_CAP: 4408 case MAC_PROP_EN_1000HDX_CAP: 4409 case MAC_PROP_ADV_100FDX_CAP: 4410 case MAC_PROP_EN_100FDX_CAP: 4411 case MAC_PROP_ADV_100HDX_CAP: 4412 case MAC_PROP_EN_100HDX_CAP: 4413 case MAC_PROP_ADV_10FDX_CAP: 4414 case MAC_PROP_EN_10FDX_CAP: 4415 case MAC_PROP_ADV_10HDX_CAP: 4416 case MAC_PROP_EN_10HDX_CAP: 4417 case MAC_PROP_AUTONEG: 4418 case MAC_PROP_FLOWCTRL: 4419 return (B_TRUE); 4420 } 4421 return (B_FALSE); 4422 } 4423 4424 /* 4425 * callback functions for set/get of properties 4426 */ 4427 static int 4428 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4429 uint_t pr_valsize, const void *pr_val) 4430 { 4431 nxge_t *nxgep = barg; 4432 p_nxge_param_t param_arr; 4433 p_nxge_stats_t statsp; 4434 int err = 0; 4435 uint8_t val; 4436 uint32_t cur_mtu, new_mtu, old_framesize; 4437 link_flowctrl_t fl; 4438 4439 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4440 param_arr = nxgep->param_arr; 4441 statsp = nxgep->statsp; 4442 mutex_enter(nxgep->genlock); 4443 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4444 nxge_param_locked(pr_num)) { 4445 /* 4446 * All adv_* parameters are locked (read-only) 4447 * while the device is in any sort of loopback mode. 4448 */ 4449 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4450 "==> nxge_m_setprop: loopback mode: read only")); 4451 mutex_exit(nxgep->genlock); 4452 return (EBUSY); 4453 } 4454 4455 val = *(uint8_t *)pr_val; 4456 switch (pr_num) { 4457 case MAC_PROP_EN_1000FDX_CAP: 4458 nxgep->param_en_1000fdx = val; 4459 param_arr[param_anar_1000fdx].value = val; 4460 4461 goto reprogram; 4462 4463 case MAC_PROP_EN_100FDX_CAP: 4464 nxgep->param_en_100fdx = val; 4465 param_arr[param_anar_100fdx].value = val; 4466 4467 goto reprogram; 4468 4469 case MAC_PROP_EN_10FDX_CAP: 4470 nxgep->param_en_10fdx = val; 4471 param_arr[param_anar_10fdx].value = val; 4472 4473 goto reprogram; 4474 4475 case MAC_PROP_EN_1000HDX_CAP: 4476 case MAC_PROP_EN_100HDX_CAP: 4477 case MAC_PROP_EN_10HDX_CAP: 4478 case MAC_PROP_ADV_1000FDX_CAP: 4479 case MAC_PROP_ADV_1000HDX_CAP: 4480 case MAC_PROP_ADV_100FDX_CAP: 4481 case MAC_PROP_ADV_100HDX_CAP: 4482 case MAC_PROP_ADV_10FDX_CAP: 4483 case MAC_PROP_ADV_10HDX_CAP: 4484 case MAC_PROP_STATUS: 4485 case MAC_PROP_SPEED: 4486 case MAC_PROP_DUPLEX: 4487 err = EINVAL; /* cannot set read-only properties */ 4488 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4489 "==> nxge_m_setprop: read only property %d", 4490 pr_num)); 4491 break; 4492 4493 case MAC_PROP_AUTONEG: 4494 param_arr[param_autoneg].value = val; 4495 4496 goto reprogram; 4497 4498 case MAC_PROP_MTU: 4499 cur_mtu = nxgep->mac.default_mtu; 4500 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4501 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4502 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4503 new_mtu, nxgep->mac.is_jumbo)); 4504 4505 if (new_mtu == cur_mtu) { 4506 err = 0; 4507 break; 4508 } 4509 4510 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4511 err = EBUSY; 4512 break; 4513 } 4514 4515 if ((new_mtu < NXGE_DEFAULT_MTU) || 4516 (new_mtu > NXGE_MAXIMUM_MTU)) { 4517 err = EINVAL; 4518 break; 4519 } 4520 4521 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4522 nxgep->mac.maxframesize = (uint16_t) 4523 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4524 if (nxge_mac_set_framesize(nxgep)) { 4525 nxgep->mac.maxframesize = 4526 (uint16_t)old_framesize; 4527 err = EINVAL; 4528 break; 4529 } 4530 4531 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4532 if (err) { 4533 nxgep->mac.maxframesize = 4534 (uint16_t)old_framesize; 4535 err = EINVAL; 4536 break; 4537 } 4538 4539 nxgep->mac.default_mtu = new_mtu; 4540 if (new_mtu > NXGE_DEFAULT_MTU) 4541 nxgep->mac.is_jumbo = B_TRUE; 4542 else 4543 nxgep->mac.is_jumbo = B_FALSE; 4544 4545 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4546 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4547 new_mtu, nxgep->mac.maxframesize)); 4548 break; 4549 4550 case MAC_PROP_FLOWCTRL: 4551 bcopy(pr_val, &fl, sizeof (fl)); 4552 switch (fl) { 4553 default: 4554 err = EINVAL; 4555 break; 4556 4557 case LINK_FLOWCTRL_NONE: 4558 param_arr[param_anar_pause].value = 0; 4559 break; 4560 4561 case LINK_FLOWCTRL_RX: 4562 param_arr[param_anar_pause].value = 1; 4563 break; 4564 4565 case LINK_FLOWCTRL_TX: 4566 case LINK_FLOWCTRL_BI: 4567 err = EINVAL; 4568 break; 4569 } 4570 4571 reprogram: 4572 if (err == 0) { 4573 if (!nxge_param_link_update(nxgep)) { 4574 err = EINVAL; 4575 } 4576 } 4577 break; 4578 case MAC_PROP_PRIVATE: 4579 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4580 "==> nxge_m_setprop: private property")); 4581 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4582 pr_val); 4583 break; 4584 4585 default: 4586 err = ENOTSUP; 4587 break; 4588 } 4589 4590 mutex_exit(nxgep->genlock); 4591 4592 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4593 "<== nxge_m_setprop (return %d)", err)); 4594 return (err); 4595 } 4596 4597 static int 4598 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4599 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 4600 { 4601 nxge_t *nxgep = barg; 4602 p_nxge_param_t param_arr = nxgep->param_arr; 4603 p_nxge_stats_t statsp = nxgep->statsp; 4604 int err = 0; 4605 link_flowctrl_t fl; 4606 uint64_t tmp = 0; 4607 link_state_t ls; 4608 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4609 4610 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4611 "==> nxge_m_getprop: pr_num %d", pr_num)); 4612 4613 if (pr_valsize == 0) 4614 return (EINVAL); 4615 4616 *perm = MAC_PROP_PERM_RW; 4617 4618 if ((is_default) && (pr_num != MAC_PROP_PRIVATE)) { 4619 err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4620 return (err); 4621 } 4622 4623 bzero(pr_val, pr_valsize); 4624 switch (pr_num) { 4625 case MAC_PROP_DUPLEX: 4626 *perm = MAC_PROP_PERM_READ; 4627 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4628 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4629 "==> nxge_m_getprop: duplex mode %d", 4630 *(uint8_t *)pr_val)); 4631 break; 4632 4633 case MAC_PROP_SPEED: 4634 if (pr_valsize < sizeof (uint64_t)) 4635 return (EINVAL); 4636 *perm = MAC_PROP_PERM_READ; 4637 tmp = statsp->mac_stats.link_speed * 1000000ull; 4638 bcopy(&tmp, pr_val, sizeof (tmp)); 4639 break; 4640 4641 case MAC_PROP_STATUS: 4642 if (pr_valsize < sizeof (link_state_t)) 4643 return (EINVAL); 4644 *perm = MAC_PROP_PERM_READ; 4645 if (!statsp->mac_stats.link_up) 4646 ls = LINK_STATE_DOWN; 4647 else 4648 ls = LINK_STATE_UP; 4649 bcopy(&ls, pr_val, sizeof (ls)); 4650 break; 4651 4652 case MAC_PROP_AUTONEG: 4653 *(uint8_t *)pr_val = 4654 param_arr[param_autoneg].value; 4655 break; 4656 4657 case MAC_PROP_FLOWCTRL: 4658 if (pr_valsize < sizeof (link_flowctrl_t)) 4659 return (EINVAL); 4660 4661 fl = LINK_FLOWCTRL_NONE; 4662 if (param_arr[param_anar_pause].value) { 4663 fl = LINK_FLOWCTRL_RX; 4664 } 4665 bcopy(&fl, pr_val, sizeof (fl)); 4666 break; 4667 4668 case MAC_PROP_ADV_1000FDX_CAP: 4669 *perm = MAC_PROP_PERM_READ; 4670 *(uint8_t *)pr_val = 4671 param_arr[param_anar_1000fdx].value; 4672 break; 4673 4674 case MAC_PROP_EN_1000FDX_CAP: 4675 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4676 break; 4677 4678 case MAC_PROP_ADV_100FDX_CAP: 4679 *perm = MAC_PROP_PERM_READ; 4680 *(uint8_t *)pr_val = 4681 param_arr[param_anar_100fdx].value; 4682 break; 4683 4684 case MAC_PROP_EN_100FDX_CAP: 4685 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4686 break; 4687 4688 case MAC_PROP_ADV_10FDX_CAP: 4689 *perm = MAC_PROP_PERM_READ; 4690 *(uint8_t *)pr_val = 4691 param_arr[param_anar_10fdx].value; 4692 break; 4693 4694 case MAC_PROP_EN_10FDX_CAP: 4695 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4696 break; 4697 4698 case MAC_PROP_EN_1000HDX_CAP: 4699 case MAC_PROP_EN_100HDX_CAP: 4700 case MAC_PROP_EN_10HDX_CAP: 4701 case MAC_PROP_ADV_1000HDX_CAP: 4702 case MAC_PROP_ADV_100HDX_CAP: 4703 case MAC_PROP_ADV_10HDX_CAP: 4704 err = ENOTSUP; 4705 break; 4706 4707 case MAC_PROP_PRIVATE: 4708 err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4709 pr_valsize, pr_val, perm); 4710 break; 4711 4712 case MAC_PROP_MTU: { 4713 mac_propval_range_t range; 4714 4715 if (!(pr_flags & MAC_PROP_POSSIBLE)) 4716 return (ENOTSUP); 4717 if (pr_valsize < sizeof (mac_propval_range_t)) 4718 return (EINVAL); 4719 range.mpr_count = 1; 4720 range.mpr_type = MAC_PROPVAL_UINT32; 4721 range.range_uint32[0].mpur_min = 4722 range.range_uint32[0].mpur_max = NXGE_DEFAULT_MTU; 4723 if (nxgep->mac.is_jumbo) 4724 range.range_uint32[0].mpur_max = 4725 NXGE_MAXIMUM_MTU; 4726 bcopy(&range, pr_val, sizeof (range)); 4727 break; 4728 } 4729 default: 4730 err = EINVAL; 4731 break; 4732 } 4733 4734 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4735 4736 return (err); 4737 } 4738 4739 /* ARGSUSED */ 4740 static int 4741 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4742 const void *pr_val) 4743 { 4744 p_nxge_param_t param_arr = nxgep->param_arr; 4745 int err = 0; 4746 long result; 4747 4748 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4749 "==> nxge_set_priv_prop: name %s", pr_name)); 4750 4751 /* Blanking */ 4752 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4753 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4754 (char *)pr_val, 4755 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4756 if (err) { 4757 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4758 "<== nxge_set_priv_prop: " 4759 "unable to set (%s)", pr_name)); 4760 err = EINVAL; 4761 } else { 4762 err = 0; 4763 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4764 "<== nxge_set_priv_prop: " 4765 "set (%s)", pr_name)); 4766 } 4767 4768 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4769 "<== nxge_set_priv_prop: name %s (value %d)", 4770 pr_name, result)); 4771 4772 return (err); 4773 } 4774 4775 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4776 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4777 (char *)pr_val, 4778 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4779 if (err) { 4780 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4781 "<== nxge_set_priv_prop: " 4782 "unable to set (%s)", pr_name)); 4783 err = EINVAL; 4784 } else { 4785 err = 0; 4786 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4787 "<== nxge_set_priv_prop: " 4788 "set (%s)", pr_name)); 4789 } 4790 4791 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4792 "<== nxge_set_priv_prop: name %s (value %d)", 4793 pr_name, result)); 4794 4795 return (err); 4796 } 4797 4798 /* Classification */ 4799 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4800 if (pr_val == NULL) { 4801 err = EINVAL; 4802 return (err); 4803 } 4804 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4805 4806 err = nxge_param_set_ip_opt(nxgep, NULL, 4807 NULL, (char *)pr_val, 4808 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4809 4810 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4811 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4812 pr_name, result)); 4813 4814 return (err); 4815 } 4816 4817 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 4818 if (pr_val == NULL) { 4819 err = EINVAL; 4820 return (err); 4821 } 4822 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4823 4824 err = nxge_param_set_ip_opt(nxgep, NULL, 4825 NULL, (char *)pr_val, 4826 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 4827 4828 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4829 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4830 pr_name, result)); 4831 4832 return (err); 4833 } 4834 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 4835 if (pr_val == NULL) { 4836 err = EINVAL; 4837 return (err); 4838 } 4839 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4840 4841 err = nxge_param_set_ip_opt(nxgep, NULL, 4842 NULL, (char *)pr_val, 4843 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 4844 4845 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4846 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4847 pr_name, result)); 4848 4849 return (err); 4850 } 4851 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 4852 if (pr_val == NULL) { 4853 err = EINVAL; 4854 return (err); 4855 } 4856 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4857 4858 err = nxge_param_set_ip_opt(nxgep, NULL, 4859 NULL, (char *)pr_val, 4860 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 4861 4862 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4863 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4864 pr_name, result)); 4865 4866 return (err); 4867 } 4868 4869 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 4870 if (pr_val == NULL) { 4871 err = EINVAL; 4872 return (err); 4873 } 4874 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4875 4876 err = nxge_param_set_ip_opt(nxgep, NULL, 4877 NULL, (char *)pr_val, 4878 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 4879 4880 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4881 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4882 pr_name, result)); 4883 4884 return (err); 4885 } 4886 4887 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 4888 if (pr_val == NULL) { 4889 err = EINVAL; 4890 return (err); 4891 } 4892 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4893 4894 err = nxge_param_set_ip_opt(nxgep, NULL, 4895 NULL, (char *)pr_val, 4896 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 4897 4898 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4899 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4900 pr_name, result)); 4901 4902 return (err); 4903 } 4904 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 4905 if (pr_val == NULL) { 4906 err = EINVAL; 4907 return (err); 4908 } 4909 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4910 4911 err = nxge_param_set_ip_opt(nxgep, NULL, 4912 NULL, (char *)pr_val, 4913 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 4914 4915 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4916 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4917 pr_name, result)); 4918 4919 return (err); 4920 } 4921 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 4922 if (pr_val == NULL) { 4923 err = EINVAL; 4924 return (err); 4925 } 4926 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4927 4928 err = nxge_param_set_ip_opt(nxgep, NULL, 4929 NULL, (char *)pr_val, 4930 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 4931 4932 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4933 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4934 pr_name, result)); 4935 4936 return (err); 4937 } 4938 4939 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 4940 if (pr_val == NULL) { 4941 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4942 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 4943 err = EINVAL; 4944 return (err); 4945 } 4946 4947 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4948 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4949 "<== nxge_set_priv_prop: name %s " 4950 "(lso %d pr_val %s value %d)", 4951 pr_name, nxgep->soft_lso_enable, pr_val, result)); 4952 4953 if (result > 1 || result < 0) { 4954 err = EINVAL; 4955 } else { 4956 if (nxgep->soft_lso_enable == (uint32_t)result) { 4957 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4958 "no change (%d %d)", 4959 nxgep->soft_lso_enable, result)); 4960 return (0); 4961 } 4962 } 4963 4964 nxgep->soft_lso_enable = (int)result; 4965 4966 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4967 "<== nxge_set_priv_prop: name %s (value %d)", 4968 pr_name, result)); 4969 4970 return (err); 4971 } 4972 /* 4973 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the 4974 * following code to be executed. 4975 */ 4976 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 4977 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 4978 (caddr_t)¶m_arr[param_anar_10gfdx]); 4979 return (err); 4980 } 4981 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 4982 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 4983 (caddr_t)¶m_arr[param_anar_pause]); 4984 return (err); 4985 } 4986 4987 return (EINVAL); 4988 } 4989 4990 static int 4991 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 4992 uint_t pr_valsize, void *pr_val, uint_t *perm) 4993 { 4994 p_nxge_param_t param_arr = nxgep->param_arr; 4995 char valstr[MAXNAMELEN]; 4996 int err = EINVAL; 4997 uint_t strsize; 4998 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4999 5000 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5001 "==> nxge_get_priv_prop: property %s", pr_name)); 5002 5003 /* function number */ 5004 if (strcmp(pr_name, "_function_number") == 0) { 5005 if (is_default) 5006 return (ENOTSUP); 5007 *perm = MAC_PROP_PERM_READ; 5008 (void) snprintf(valstr, sizeof (valstr), "%d", 5009 nxgep->function_num); 5010 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5011 "==> nxge_get_priv_prop: name %s " 5012 "(value %d valstr %s)", 5013 pr_name, nxgep->function_num, valstr)); 5014 5015 err = 0; 5016 goto done; 5017 } 5018 5019 /* Neptune firmware version */ 5020 if (strcmp(pr_name, "_fw_version") == 0) { 5021 if (is_default) 5022 return (ENOTSUP); 5023 *perm = MAC_PROP_PERM_READ; 5024 (void) snprintf(valstr, sizeof (valstr), "%s", 5025 nxgep->vpd_info.ver); 5026 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5027 "==> nxge_get_priv_prop: name %s " 5028 "(value %d valstr %s)", 5029 pr_name, nxgep->vpd_info.ver, valstr)); 5030 5031 err = 0; 5032 goto done; 5033 } 5034 5035 /* port PHY mode */ 5036 if (strcmp(pr_name, "_port_mode") == 0) { 5037 if (is_default) 5038 return (ENOTSUP); 5039 *perm = MAC_PROP_PERM_READ; 5040 switch (nxgep->mac.portmode) { 5041 case PORT_1G_COPPER: 5042 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 5043 nxgep->hot_swappable_phy ? 5044 "[Hot Swappable]" : ""); 5045 break; 5046 case PORT_1G_FIBER: 5047 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 5048 nxgep->hot_swappable_phy ? 5049 "[hot swappable]" : ""); 5050 break; 5051 case PORT_10G_COPPER: 5052 (void) snprintf(valstr, sizeof (valstr), 5053 "10G copper %s", 5054 nxgep->hot_swappable_phy ? 5055 "[hot swappable]" : ""); 5056 break; 5057 case PORT_10G_FIBER: 5058 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 5059 nxgep->hot_swappable_phy ? 5060 "[hot swappable]" : ""); 5061 break; 5062 case PORT_10G_SERDES: 5063 (void) snprintf(valstr, sizeof (valstr), 5064 "10G serdes %s", nxgep->hot_swappable_phy ? 5065 "[hot swappable]" : ""); 5066 break; 5067 case PORT_1G_SERDES: 5068 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5069 nxgep->hot_swappable_phy ? 5070 "[hot swappable]" : ""); 5071 break; 5072 case PORT_1G_TN1010: 5073 (void) snprintf(valstr, sizeof (valstr), 5074 "1G TN1010 copper %s", nxgep->hot_swappable_phy ? 5075 "[hot swappable]" : ""); 5076 break; 5077 case PORT_10G_TN1010: 5078 (void) snprintf(valstr, sizeof (valstr), 5079 "10G TN1010 copper %s", nxgep->hot_swappable_phy ? 5080 "[hot swappable]" : ""); 5081 break; 5082 case PORT_1G_RGMII_FIBER: 5083 (void) snprintf(valstr, sizeof (valstr), 5084 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5085 "[hot swappable]" : ""); 5086 break; 5087 case PORT_HSP_MODE: 5088 (void) snprintf(valstr, sizeof (valstr), 5089 "phy not present[hot swappable]"); 5090 break; 5091 default: 5092 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5093 nxgep->hot_swappable_phy ? 5094 "[hot swappable]" : ""); 5095 break; 5096 } 5097 5098 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5099 "==> nxge_get_priv_prop: name %s (value %s)", 5100 pr_name, valstr)); 5101 5102 err = 0; 5103 goto done; 5104 } 5105 5106 /* Hot swappable PHY */ 5107 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5108 if (is_default) 5109 return (ENOTSUP); 5110 *perm = MAC_PROP_PERM_READ; 5111 (void) snprintf(valstr, sizeof (valstr), "%s", 5112 nxgep->hot_swappable_phy ? 5113 "yes" : "no"); 5114 5115 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5116 "==> nxge_get_priv_prop: name %s " 5117 "(value %d valstr %s)", 5118 pr_name, nxgep->hot_swappable_phy, valstr)); 5119 5120 err = 0; 5121 goto done; 5122 } 5123 5124 5125 /* Receive Interrupt Blanking Parameters */ 5126 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5127 err = 0; 5128 if (is_default) { 5129 (void) snprintf(valstr, sizeof (valstr), 5130 "%d", RXDMA_RCR_TO_DEFAULT); 5131 goto done; 5132 } 5133 5134 (void) snprintf(valstr, sizeof (valstr), "%d", 5135 nxgep->intr_timeout); 5136 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5137 "==> nxge_get_priv_prop: name %s (value %d)", 5138 pr_name, 5139 (uint32_t)nxgep->intr_timeout)); 5140 goto done; 5141 } 5142 5143 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5144 err = 0; 5145 if (is_default) { 5146 (void) snprintf(valstr, sizeof (valstr), 5147 "%d", RXDMA_RCR_PTHRES_DEFAULT); 5148 goto done; 5149 } 5150 (void) snprintf(valstr, sizeof (valstr), "%d", 5151 nxgep->intr_threshold); 5152 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5153 "==> nxge_get_priv_prop: name %s (value %d)", 5154 pr_name, (uint32_t)nxgep->intr_threshold)); 5155 5156 goto done; 5157 } 5158 5159 /* Classification and Load Distribution Configuration */ 5160 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5161 if (is_default) { 5162 (void) snprintf(valstr, sizeof (valstr), "%x", 5163 NXGE_CLASS_FLOW_GEN_SERVER); 5164 err = 0; 5165 goto done; 5166 } 5167 err = nxge_dld_get_ip_opt(nxgep, 5168 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5169 5170 (void) snprintf(valstr, sizeof (valstr), "%x", 5171 (int)param_arr[param_class_opt_ipv4_tcp].value); 5172 5173 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5174 "==> nxge_get_priv_prop: %s", valstr)); 5175 goto done; 5176 } 5177 5178 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5179 if (is_default) { 5180 (void) snprintf(valstr, sizeof (valstr), "%x", 5181 NXGE_CLASS_FLOW_GEN_SERVER); 5182 err = 0; 5183 goto done; 5184 } 5185 err = nxge_dld_get_ip_opt(nxgep, 5186 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5187 5188 (void) snprintf(valstr, sizeof (valstr), "%x", 5189 (int)param_arr[param_class_opt_ipv4_udp].value); 5190 5191 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5192 "==> nxge_get_priv_prop: %s", valstr)); 5193 goto done; 5194 } 5195 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5196 if (is_default) { 5197 (void) snprintf(valstr, sizeof (valstr), "%x", 5198 NXGE_CLASS_FLOW_GEN_SERVER); 5199 err = 0; 5200 goto done; 5201 } 5202 err = nxge_dld_get_ip_opt(nxgep, 5203 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5204 5205 (void) snprintf(valstr, sizeof (valstr), "%x", 5206 (int)param_arr[param_class_opt_ipv4_ah].value); 5207 5208 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5209 "==> nxge_get_priv_prop: %s", valstr)); 5210 goto done; 5211 } 5212 5213 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5214 if (is_default) { 5215 (void) snprintf(valstr, sizeof (valstr), "%x", 5216 NXGE_CLASS_FLOW_GEN_SERVER); 5217 err = 0; 5218 goto done; 5219 } 5220 err = nxge_dld_get_ip_opt(nxgep, 5221 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5222 5223 (void) snprintf(valstr, sizeof (valstr), "%x", 5224 (int)param_arr[param_class_opt_ipv4_sctp].value); 5225 5226 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5227 "==> nxge_get_priv_prop: %s", valstr)); 5228 goto done; 5229 } 5230 5231 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5232 if (is_default) { 5233 (void) snprintf(valstr, sizeof (valstr), "%x", 5234 NXGE_CLASS_FLOW_GEN_SERVER); 5235 err = 0; 5236 goto done; 5237 } 5238 err = nxge_dld_get_ip_opt(nxgep, 5239 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5240 5241 (void) snprintf(valstr, sizeof (valstr), "%x", 5242 (int)param_arr[param_class_opt_ipv6_tcp].value); 5243 5244 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5245 "==> nxge_get_priv_prop: %s", valstr)); 5246 goto done; 5247 } 5248 5249 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5250 if (is_default) { 5251 (void) snprintf(valstr, sizeof (valstr), "%x", 5252 NXGE_CLASS_FLOW_GEN_SERVER); 5253 err = 0; 5254 goto done; 5255 } 5256 err = nxge_dld_get_ip_opt(nxgep, 5257 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5258 5259 (void) snprintf(valstr, sizeof (valstr), "%x", 5260 (int)param_arr[param_class_opt_ipv6_udp].value); 5261 5262 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5263 "==> nxge_get_priv_prop: %s", valstr)); 5264 goto done; 5265 } 5266 5267 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5268 if (is_default) { 5269 (void) snprintf(valstr, sizeof (valstr), "%x", 5270 NXGE_CLASS_FLOW_GEN_SERVER); 5271 err = 0; 5272 goto done; 5273 } 5274 err = nxge_dld_get_ip_opt(nxgep, 5275 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5276 5277 (void) snprintf(valstr, sizeof (valstr), "%x", 5278 (int)param_arr[param_class_opt_ipv6_ah].value); 5279 5280 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5281 "==> nxge_get_priv_prop: %s", valstr)); 5282 goto done; 5283 } 5284 5285 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5286 if (is_default) { 5287 (void) snprintf(valstr, sizeof (valstr), "%x", 5288 NXGE_CLASS_FLOW_GEN_SERVER); 5289 err = 0; 5290 goto done; 5291 } 5292 err = nxge_dld_get_ip_opt(nxgep, 5293 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5294 5295 (void) snprintf(valstr, sizeof (valstr), "%x", 5296 (int)param_arr[param_class_opt_ipv6_sctp].value); 5297 5298 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5299 "==> nxge_get_priv_prop: %s", valstr)); 5300 goto done; 5301 } 5302 5303 /* Software LSO */ 5304 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5305 if (is_default) { 5306 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5307 err = 0; 5308 goto done; 5309 } 5310 (void) snprintf(valstr, sizeof (valstr), 5311 "%d", nxgep->soft_lso_enable); 5312 err = 0; 5313 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5314 "==> nxge_get_priv_prop: name %s (value %d)", 5315 pr_name, nxgep->soft_lso_enable)); 5316 5317 goto done; 5318 } 5319 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5320 err = 0; 5321 if (is_default || 5322 nxgep->param_arr[param_anar_10gfdx].value != 0) { 5323 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5324 goto done; 5325 } else { 5326 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5327 goto done; 5328 } 5329 } 5330 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5331 err = 0; 5332 if (is_default || 5333 nxgep->param_arr[param_anar_pause].value != 0) { 5334 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5335 goto done; 5336 } else { 5337 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5338 goto done; 5339 } 5340 } 5341 5342 done: 5343 if (err == 0) { 5344 strsize = (uint_t)strlen(valstr); 5345 if (pr_valsize < strsize) { 5346 err = ENOBUFS; 5347 } else { 5348 (void) strlcpy(pr_val, valstr, pr_valsize); 5349 } 5350 } 5351 5352 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5353 "<== nxge_get_priv_prop: return %d", err)); 5354 return (err); 5355 } 5356 5357 /* 5358 * Module loading and removing entry points. 5359 */ 5360 5361 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach, 5362 nodev, NULL, D_MP, NULL, nxge_quiesce); 5363 5364 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5365 5366 /* 5367 * Module linkage information for the kernel. 5368 */ 5369 static struct modldrv nxge_modldrv = { 5370 &mod_driverops, 5371 NXGE_DESC_VER, 5372 &nxge_dev_ops 5373 }; 5374 5375 static struct modlinkage modlinkage = { 5376 MODREV_1, (void *) &nxge_modldrv, NULL 5377 }; 5378 5379 int 5380 _init(void) 5381 { 5382 int status; 5383 5384 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 5385 5386 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5387 5388 mac_init_ops(&nxge_dev_ops, "nxge"); 5389 5390 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5391 if (status != 0) { 5392 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5393 "failed to init device soft state")); 5394 goto _init_exit; 5395 } 5396 5397 status = mod_install(&modlinkage); 5398 if (status != 0) { 5399 ddi_soft_state_fini(&nxge_list); 5400 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5401 goto _init_exit; 5402 } 5403 5404 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5405 5406 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status)); 5407 return (status); 5408 5409 _init_exit: 5410 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status)); 5411 MUTEX_DESTROY(&nxgedebuglock); 5412 return (status); 5413 } 5414 5415 int 5416 _fini(void) 5417 { 5418 int status; 5419 5420 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5421 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5422 5423 if (nxge_mblks_pending) 5424 return (EBUSY); 5425 5426 status = mod_remove(&modlinkage); 5427 if (status != DDI_SUCCESS) { 5428 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5429 "Module removal failed 0x%08x", 5430 status)); 5431 goto _fini_exit; 5432 } 5433 5434 mac_fini_ops(&nxge_dev_ops); 5435 5436 ddi_soft_state_fini(&nxge_list); 5437 5438 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status)); 5439 5440 MUTEX_DESTROY(&nxge_common_lock); 5441 MUTEX_DESTROY(&nxgedebuglock); 5442 return (status); 5443 5444 _fini_exit: 5445 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status)); 5446 return (status); 5447 } 5448 5449 int 5450 _info(struct modinfo *modinfop) 5451 { 5452 int status; 5453 5454 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5455 status = mod_info(&modlinkage, modinfop); 5456 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5457 5458 return (status); 5459 } 5460 5461 /*ARGSUSED*/ 5462 static int 5463 nxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5464 { 5465 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5466 p_nxge_t nxgep = rhp->nxgep; 5467 uint32_t channel; 5468 p_tx_ring_t ring; 5469 5470 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5471 ring = nxgep->tx_rings->rings[channel]; 5472 5473 MUTEX_ENTER(&ring->lock); 5474 ring->tx_ring_handle = rhp->ring_handle; 5475 MUTEX_EXIT(&ring->lock); 5476 5477 return (0); 5478 } 5479 5480 static void 5481 nxge_tx_ring_stop(mac_ring_driver_t rdriver) 5482 { 5483 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5484 p_nxge_t nxgep = rhp->nxgep; 5485 uint32_t channel; 5486 p_tx_ring_t ring; 5487 5488 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5489 ring = nxgep->tx_rings->rings[channel]; 5490 5491 MUTEX_ENTER(&ring->lock); 5492 ring->tx_ring_handle = (mac_ring_handle_t)NULL; 5493 MUTEX_EXIT(&ring->lock); 5494 } 5495 5496 static int 5497 nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5498 { 5499 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5500 p_nxge_t nxgep = rhp->nxgep; 5501 uint32_t channel; 5502 p_rx_rcr_ring_t ring; 5503 int i; 5504 5505 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5506 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5507 5508 MUTEX_ENTER(&ring->lock); 5509 5510 if (nxgep->rx_channel_started[channel] == B_TRUE) { 5511 MUTEX_EXIT(&ring->lock); 5512 return (0); 5513 } 5514 5515 /* set rcr_ring */ 5516 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) { 5517 if ((nxgep->ldgvp->ldvp[i].is_rxdma == 1) && 5518 (nxgep->ldgvp->ldvp[i].channel == channel)) { 5519 ring->ldvp = &nxgep->ldgvp->ldvp[i]; 5520 ring->ldgp = nxgep->ldgvp->ldvp[i].ldgp; 5521 } 5522 } 5523 5524 nxgep->rx_channel_started[channel] = B_TRUE; 5525 ring->rcr_mac_handle = rhp->ring_handle; 5526 ring->rcr_gen_num = mr_gen_num; 5527 MUTEX_EXIT(&ring->lock); 5528 5529 return (0); 5530 } 5531 5532 static void 5533 nxge_rx_ring_stop(mac_ring_driver_t rdriver) 5534 { 5535 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5536 p_nxge_t nxgep = rhp->nxgep; 5537 uint32_t channel; 5538 p_rx_rcr_ring_t ring; 5539 5540 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5541 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5542 5543 MUTEX_ENTER(&ring->lock); 5544 nxgep->rx_channel_started[channel] = B_FALSE; 5545 ring->rcr_mac_handle = NULL; 5546 MUTEX_EXIT(&ring->lock); 5547 } 5548 5549 /* 5550 * Callback funtion for MAC layer to register all rings. 5551 */ 5552 static void 5553 nxge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 5554 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 5555 { 5556 p_nxge_t nxgep = (p_nxge_t)arg; 5557 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 5558 5559 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5560 "==> nxge_fill_ring 0x%x index %d", rtype, index)); 5561 5562 switch (rtype) { 5563 case MAC_RING_TYPE_TX: { 5564 p_nxge_ring_handle_t rhandlep; 5565 5566 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5567 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d", 5568 rtype, index, p_cfgp->tdc.count)); 5569 5570 ASSERT((index >= 0) && (index < p_cfgp->tdc.count)); 5571 rhandlep = &nxgep->tx_ring_handles[index]; 5572 rhandlep->nxgep = nxgep; 5573 rhandlep->index = index; 5574 rhandlep->ring_handle = rh; 5575 5576 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5577 infop->mri_start = nxge_tx_ring_start; 5578 infop->mri_stop = nxge_tx_ring_stop; 5579 infop->mri_tx = nxge_tx_ring_send; 5580 5581 break; 5582 } 5583 case MAC_RING_TYPE_RX: { 5584 p_nxge_ring_handle_t rhandlep; 5585 int nxge_rindex; 5586 mac_intr_t nxge_mac_intr; 5587 5588 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5589 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d", 5590 rtype, index, p_cfgp->max_rdcs)); 5591 5592 /* 5593 * 'index' is the ring index within the group. 5594 * Find the ring index in the nxge instance. 5595 */ 5596 nxge_rindex = nxge_get_rxring_index(nxgep, rg_index, index); 5597 5598 ASSERT((nxge_rindex >= 0) && (nxge_rindex < p_cfgp->max_rdcs)); 5599 rhandlep = &nxgep->rx_ring_handles[nxge_rindex]; 5600 rhandlep->nxgep = nxgep; 5601 rhandlep->index = nxge_rindex; 5602 rhandlep->ring_handle = rh; 5603 5604 /* 5605 * Entrypoint to enable interrupt (disable poll) and 5606 * disable interrupt (enable poll). 5607 */ 5608 nxge_mac_intr.mi_handle = (mac_intr_handle_t)rhandlep; 5609 nxge_mac_intr.mi_enable = (mac_intr_enable_t)nxge_disable_poll; 5610 nxge_mac_intr.mi_disable = (mac_intr_disable_t)nxge_enable_poll; 5611 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5612 infop->mri_start = nxge_rx_ring_start; 5613 infop->mri_stop = nxge_rx_ring_stop; 5614 infop->mri_intr = nxge_mac_intr; /* ??? */ 5615 infop->mri_poll = nxge_rx_poll; 5616 5617 break; 5618 } 5619 default: 5620 break; 5621 } 5622 5623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_fill_ring 0x%x", 5624 rtype)); 5625 } 5626 5627 static void 5628 nxge_group_add_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5629 mac_ring_type_t type) 5630 { 5631 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5632 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5633 nxge_t *nxge; 5634 nxge_grp_t *grp; 5635 nxge_rdc_grp_t *rdc_grp; 5636 uint16_t channel; /* device-wise ring id */ 5637 int dev_gindex; 5638 int rv; 5639 5640 nxge = rgroup->nxgep; 5641 5642 switch (type) { 5643 case MAC_RING_TYPE_TX: 5644 /* 5645 * nxge_grp_dc_add takes a channel number which is a 5646 * "devise" ring ID. 5647 */ 5648 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5649 5650 /* 5651 * Remove the ring from the default group 5652 */ 5653 if (rgroup->gindex != 0) { 5654 (void) nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5655 } 5656 5657 /* 5658 * nxge->tx_set.group[] is an array of groups indexed by 5659 * a "port" group ID. 5660 */ 5661 grp = nxge->tx_set.group[rgroup->gindex]; 5662 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5663 if (rv != 0) { 5664 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5665 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5666 } 5667 break; 5668 5669 case MAC_RING_TYPE_RX: 5670 /* 5671 * nxge->rx_set.group[] is an array of groups indexed by 5672 * a "port" group ID. 5673 */ 5674 grp = nxge->rx_set.group[rgroup->gindex]; 5675 5676 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5677 rgroup->gindex; 5678 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5679 5680 /* 5681 * nxge_grp_dc_add takes a channel number which is a 5682 * "devise" ring ID. 5683 */ 5684 channel = nxge->pt_config.hw_config.start_rdc + rhandle->index; 5685 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_RX, channel); 5686 if (rv != 0) { 5687 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5688 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5689 } 5690 5691 rdc_grp->map |= (1 << channel); 5692 rdc_grp->max_rdcs++; 5693 5694 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5695 break; 5696 } 5697 } 5698 5699 static void 5700 nxge_group_rem_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5701 mac_ring_type_t type) 5702 { 5703 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5704 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5705 nxge_t *nxge; 5706 uint16_t channel; /* device-wise ring id */ 5707 nxge_rdc_grp_t *rdc_grp; 5708 int dev_gindex; 5709 5710 nxge = rgroup->nxgep; 5711 5712 switch (type) { 5713 case MAC_RING_TYPE_TX: 5714 dev_gindex = nxge->pt_config.hw_config.def_mac_txdma_grpid + 5715 rgroup->gindex; 5716 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5717 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5718 5719 /* 5720 * Add the ring back to the default group 5721 */ 5722 if (rgroup->gindex != 0) { 5723 nxge_grp_t *grp; 5724 grp = nxge->tx_set.group[0]; 5725 (void) nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5726 } 5727 break; 5728 5729 case MAC_RING_TYPE_RX: 5730 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5731 rgroup->gindex; 5732 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5733 channel = rdc_grp->start_rdc + rhandle->index; 5734 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 5735 5736 rdc_grp->map &= ~(1 << channel); 5737 rdc_grp->max_rdcs--; 5738 5739 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5740 break; 5741 } 5742 } 5743 5744 5745 /*ARGSUSED*/ 5746 static nxge_status_t 5747 nxge_add_intrs(p_nxge_t nxgep) 5748 { 5749 5750 int intr_types; 5751 int type = 0; 5752 int ddi_status = DDI_SUCCESS; 5753 nxge_status_t status = NXGE_OK; 5754 5755 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5756 5757 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5758 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5759 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5760 nxgep->nxge_intr_type.intr_added = 0; 5761 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5762 nxgep->nxge_intr_type.intr_type = 0; 5763 5764 if (nxgep->niu_type == N2_NIU) { 5765 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5766 } else if (nxge_msi_enable) { 5767 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5768 } 5769 5770 /* Get the supported interrupt types */ 5771 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5772 != DDI_SUCCESS) { 5773 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5774 "ddi_intr_get_supported_types failed: status 0x%08x", 5775 ddi_status)); 5776 return (NXGE_ERROR | NXGE_DDI_FAILED); 5777 } 5778 nxgep->nxge_intr_type.intr_types = intr_types; 5779 5780 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5781 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5782 5783 /* 5784 * Solaris MSIX is not supported yet. use MSI for now. 5785 * nxge_msi_enable (1): 5786 * 1 - MSI 2 - MSI-X others - FIXED 5787 */ 5788 switch (nxge_msi_enable) { 5789 default: 5790 type = DDI_INTR_TYPE_FIXED; 5791 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5792 "use fixed (intx emulation) type %08x", 5793 type)); 5794 break; 5795 5796 case 2: 5797 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5798 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5799 if (intr_types & DDI_INTR_TYPE_MSIX) { 5800 type = DDI_INTR_TYPE_MSIX; 5801 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5802 "ddi_intr_get_supported_types: MSIX 0x%08x", 5803 type)); 5804 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5805 type = DDI_INTR_TYPE_MSI; 5806 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5807 "ddi_intr_get_supported_types: MSI 0x%08x", 5808 type)); 5809 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5810 type = DDI_INTR_TYPE_FIXED; 5811 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5812 "ddi_intr_get_supported_types: MSXED0x%08x", 5813 type)); 5814 } 5815 break; 5816 5817 case 1: 5818 if (intr_types & DDI_INTR_TYPE_MSI) { 5819 type = DDI_INTR_TYPE_MSI; 5820 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5821 "ddi_intr_get_supported_types: MSI 0x%08x", 5822 type)); 5823 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5824 type = DDI_INTR_TYPE_MSIX; 5825 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5826 "ddi_intr_get_supported_types: MSIX 0x%08x", 5827 type)); 5828 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5829 type = DDI_INTR_TYPE_FIXED; 5830 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5831 "ddi_intr_get_supported_types: MSXED0x%08x", 5832 type)); 5833 } 5834 } 5835 5836 nxgep->nxge_intr_type.intr_type = type; 5837 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5838 type == DDI_INTR_TYPE_FIXED) && 5839 nxgep->nxge_intr_type.niu_msi_enable) { 5840 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5841 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5842 " nxge_add_intrs: " 5843 " nxge_add_intrs_adv failed: status 0x%08x", 5844 status)); 5845 return (status); 5846 } else { 5847 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5848 "interrupts registered : type %d", type)); 5849 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5850 5851 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5852 "\nAdded advanced nxge add_intr_adv " 5853 "intr type 0x%x\n", type)); 5854 5855 return (status); 5856 } 5857 } 5858 5859 if (!nxgep->nxge_intr_type.intr_registered) { 5860 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5861 "failed to register interrupts")); 5862 return (NXGE_ERROR | NXGE_DDI_FAILED); 5863 } 5864 5865 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5866 return (status); 5867 } 5868 5869 static nxge_status_t 5870 nxge_add_intrs_adv(p_nxge_t nxgep) 5871 { 5872 int intr_type; 5873 p_nxge_intr_t intrp; 5874 5875 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5876 5877 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5878 intr_type = intrp->intr_type; 5879 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5880 intr_type)); 5881 5882 switch (intr_type) { 5883 case DDI_INTR_TYPE_MSI: /* 0x2 */ 5884 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 5885 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 5886 5887 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 5888 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 5889 5890 default: 5891 return (NXGE_ERROR); 5892 } 5893 } 5894 5895 5896 /*ARGSUSED*/ 5897 static nxge_status_t 5898 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 5899 { 5900 dev_info_t *dip = nxgep->dip; 5901 p_nxge_ldg_t ldgp; 5902 p_nxge_intr_t intrp; 5903 uint_t *inthandler; 5904 void *arg1, *arg2; 5905 int behavior; 5906 int nintrs, navail, nrequest; 5907 int nactual, nrequired; 5908 int inum = 0; 5909 int x, y; 5910 int ddi_status = DDI_SUCCESS; 5911 nxge_status_t status = NXGE_OK; 5912 5913 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 5914 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5915 intrp->start_inum = 0; 5916 5917 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5918 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5919 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5920 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5921 "nintrs: %d", ddi_status, nintrs)); 5922 return (NXGE_ERROR | NXGE_DDI_FAILED); 5923 } 5924 5925 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5926 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5927 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5928 "ddi_intr_get_navail() failed, status: 0x%x%, " 5929 "nintrs: %d", ddi_status, navail)); 5930 return (NXGE_ERROR | NXGE_DDI_FAILED); 5931 } 5932 5933 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5934 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 5935 nintrs, navail)); 5936 5937 /* PSARC/2007/453 MSI-X interrupt limit override */ 5938 if (int_type == DDI_INTR_TYPE_MSIX) { 5939 nrequest = nxge_create_msi_property(nxgep); 5940 if (nrequest < navail) { 5941 navail = nrequest; 5942 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5943 "nxge_add_intrs_adv_type: nintrs %d " 5944 "navail %d (nrequest %d)", 5945 nintrs, navail, nrequest)); 5946 } 5947 } 5948 5949 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 5950 /* MSI must be power of 2 */ 5951 if ((navail & 16) == 16) { 5952 navail = 16; 5953 } else if ((navail & 8) == 8) { 5954 navail = 8; 5955 } else if ((navail & 4) == 4) { 5956 navail = 4; 5957 } else if ((navail & 2) == 2) { 5958 navail = 2; 5959 } else { 5960 navail = 1; 5961 } 5962 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5963 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 5964 "navail %d", nintrs, navail)); 5965 } 5966 5967 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5968 DDI_INTR_ALLOC_NORMAL); 5969 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 5970 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 5971 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5972 navail, &nactual, behavior); 5973 if (ddi_status != DDI_SUCCESS || nactual == 0) { 5974 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5975 " ddi_intr_alloc() failed: %d", 5976 ddi_status)); 5977 kmem_free(intrp->htable, intrp->intr_size); 5978 return (NXGE_ERROR | NXGE_DDI_FAILED); 5979 } 5980 5981 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 5982 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 5983 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5984 " ddi_intr_get_pri() failed: %d", 5985 ddi_status)); 5986 /* Free already allocated interrupts */ 5987 for (y = 0; y < nactual; y++) { 5988 (void) ddi_intr_free(intrp->htable[y]); 5989 } 5990 5991 kmem_free(intrp->htable, intrp->intr_size); 5992 return (NXGE_ERROR | NXGE_DDI_FAILED); 5993 } 5994 5995 nrequired = 0; 5996 switch (nxgep->niu_type) { 5997 default: 5998 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 5999 break; 6000 6001 case N2_NIU: 6002 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6003 break; 6004 } 6005 6006 if (status != NXGE_OK) { 6007 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6008 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 6009 "failed: 0x%x", status)); 6010 /* Free already allocated interrupts */ 6011 for (y = 0; y < nactual; y++) { 6012 (void) ddi_intr_free(intrp->htable[y]); 6013 } 6014 6015 kmem_free(intrp->htable, intrp->intr_size); 6016 return (status); 6017 } 6018 6019 ldgp = nxgep->ldgvp->ldgp; 6020 for (x = 0; x < nrequired; x++, ldgp++) { 6021 ldgp->vector = (uint8_t)x; 6022 ldgp->intdata = SID_DATA(ldgp->func, x); 6023 arg1 = ldgp->ldvp; 6024 arg2 = nxgep; 6025 if (ldgp->nldvs == 1) { 6026 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6027 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6028 "nxge_add_intrs_adv_type: " 6029 "arg1 0x%x arg2 0x%x: " 6030 "1-1 int handler (entry %d intdata 0x%x)\n", 6031 arg1, arg2, 6032 x, ldgp->intdata)); 6033 } else if (ldgp->nldvs > 1) { 6034 inthandler = (uint_t *)ldgp->sys_intr_handler; 6035 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6036 "nxge_add_intrs_adv_type: " 6037 "arg1 0x%x arg2 0x%x: " 6038 "nldevs %d int handler " 6039 "(entry %d intdata 0x%x)\n", 6040 arg1, arg2, 6041 ldgp->nldvs, x, ldgp->intdata)); 6042 } 6043 6044 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6045 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 6046 "htable 0x%llx", x, intrp->htable[x])); 6047 6048 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6049 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6050 != DDI_SUCCESS) { 6051 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6052 "==> nxge_add_intrs_adv_type: failed #%d " 6053 "status 0x%x", x, ddi_status)); 6054 for (y = 0; y < intrp->intr_added; y++) { 6055 (void) ddi_intr_remove_handler( 6056 intrp->htable[y]); 6057 } 6058 /* Free already allocated intr */ 6059 for (y = 0; y < nactual; y++) { 6060 (void) ddi_intr_free(intrp->htable[y]); 6061 } 6062 kmem_free(intrp->htable, intrp->intr_size); 6063 6064 (void) nxge_ldgv_uninit(nxgep); 6065 6066 return (NXGE_ERROR | NXGE_DDI_FAILED); 6067 } 6068 intrp->intr_added++; 6069 } 6070 6071 intrp->msi_intx_cnt = nactual; 6072 6073 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6074 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 6075 navail, nactual, 6076 intrp->msi_intx_cnt, 6077 intrp->intr_added)); 6078 6079 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6080 6081 (void) nxge_intr_ldgv_init(nxgep); 6082 6083 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 6084 6085 return (status); 6086 } 6087 6088 /*ARGSUSED*/ 6089 static nxge_status_t 6090 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 6091 { 6092 dev_info_t *dip = nxgep->dip; 6093 p_nxge_ldg_t ldgp; 6094 p_nxge_intr_t intrp; 6095 uint_t *inthandler; 6096 void *arg1, *arg2; 6097 int behavior; 6098 int nintrs, navail; 6099 int nactual, nrequired; 6100 int inum = 0; 6101 int x, y; 6102 int ddi_status = DDI_SUCCESS; 6103 nxge_status_t status = NXGE_OK; 6104 6105 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 6106 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6107 intrp->start_inum = 0; 6108 6109 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6110 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6111 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6112 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6113 "nintrs: %d", status, nintrs)); 6114 return (NXGE_ERROR | NXGE_DDI_FAILED); 6115 } 6116 6117 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6118 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6119 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6120 "ddi_intr_get_navail() failed, status: 0x%x%, " 6121 "nintrs: %d", ddi_status, navail)); 6122 return (NXGE_ERROR | NXGE_DDI_FAILED); 6123 } 6124 6125 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6126 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6127 nintrs, navail)); 6128 6129 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6130 DDI_INTR_ALLOC_NORMAL); 6131 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6132 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6133 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6134 navail, &nactual, behavior); 6135 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6136 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6137 " ddi_intr_alloc() failed: %d", 6138 ddi_status)); 6139 kmem_free(intrp->htable, intrp->intr_size); 6140 return (NXGE_ERROR | NXGE_DDI_FAILED); 6141 } 6142 6143 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6144 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6145 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6146 " ddi_intr_get_pri() failed: %d", 6147 ddi_status)); 6148 /* Free already allocated interrupts */ 6149 for (y = 0; y < nactual; y++) { 6150 (void) ddi_intr_free(intrp->htable[y]); 6151 } 6152 6153 kmem_free(intrp->htable, intrp->intr_size); 6154 return (NXGE_ERROR | NXGE_DDI_FAILED); 6155 } 6156 6157 nrequired = 0; 6158 switch (nxgep->niu_type) { 6159 default: 6160 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6161 break; 6162 6163 case N2_NIU: 6164 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6165 break; 6166 } 6167 6168 if (status != NXGE_OK) { 6169 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6170 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6171 "failed: 0x%x", status)); 6172 /* Free already allocated interrupts */ 6173 for (y = 0; y < nactual; y++) { 6174 (void) ddi_intr_free(intrp->htable[y]); 6175 } 6176 6177 kmem_free(intrp->htable, intrp->intr_size); 6178 return (status); 6179 } 6180 6181 ldgp = nxgep->ldgvp->ldgp; 6182 for (x = 0; x < nrequired; x++, ldgp++) { 6183 ldgp->vector = (uint8_t)x; 6184 if (nxgep->niu_type != N2_NIU) { 6185 ldgp->intdata = SID_DATA(ldgp->func, x); 6186 } 6187 6188 arg1 = ldgp->ldvp; 6189 arg2 = nxgep; 6190 if (ldgp->nldvs == 1) { 6191 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6192 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6193 "nxge_add_intrs_adv_type_fix: " 6194 "1-1 int handler(%d) ldg %d ldv %d " 6195 "arg1 $%p arg2 $%p\n", 6196 x, ldgp->ldg, ldgp->ldvp->ldv, 6197 arg1, arg2)); 6198 } else if (ldgp->nldvs > 1) { 6199 inthandler = (uint_t *)ldgp->sys_intr_handler; 6200 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6201 "nxge_add_intrs_adv_type_fix: " 6202 "shared ldv %d int handler(%d) ldv %d ldg %d" 6203 "arg1 0x%016llx arg2 0x%016llx\n", 6204 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6205 arg1, arg2)); 6206 } 6207 6208 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6209 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6210 != DDI_SUCCESS) { 6211 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6212 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6213 "status 0x%x", x, ddi_status)); 6214 for (y = 0; y < intrp->intr_added; y++) { 6215 (void) ddi_intr_remove_handler( 6216 intrp->htable[y]); 6217 } 6218 for (y = 0; y < nactual; y++) { 6219 (void) ddi_intr_free(intrp->htable[y]); 6220 } 6221 /* Free already allocated intr */ 6222 kmem_free(intrp->htable, intrp->intr_size); 6223 6224 (void) nxge_ldgv_uninit(nxgep); 6225 6226 return (NXGE_ERROR | NXGE_DDI_FAILED); 6227 } 6228 intrp->intr_added++; 6229 } 6230 6231 intrp->msi_intx_cnt = nactual; 6232 6233 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6234 6235 status = nxge_intr_ldgv_init(nxgep); 6236 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6237 6238 return (status); 6239 } 6240 6241 static void 6242 nxge_remove_intrs(p_nxge_t nxgep) 6243 { 6244 int i, inum; 6245 p_nxge_intr_t intrp; 6246 6247 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6248 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6249 if (!intrp->intr_registered) { 6250 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6251 "<== nxge_remove_intrs: interrupts not registered")); 6252 return; 6253 } 6254 6255 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6256 6257 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6258 (void) ddi_intr_block_disable(intrp->htable, 6259 intrp->intr_added); 6260 } else { 6261 for (i = 0; i < intrp->intr_added; i++) { 6262 (void) ddi_intr_disable(intrp->htable[i]); 6263 } 6264 } 6265 6266 for (inum = 0; inum < intrp->intr_added; inum++) { 6267 if (intrp->htable[inum]) { 6268 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6269 } 6270 } 6271 6272 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6273 if (intrp->htable[inum]) { 6274 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6275 "nxge_remove_intrs: ddi_intr_free inum %d " 6276 "msi_intx_cnt %d intr_added %d", 6277 inum, 6278 intrp->msi_intx_cnt, 6279 intrp->intr_added)); 6280 6281 (void) ddi_intr_free(intrp->htable[inum]); 6282 } 6283 } 6284 6285 kmem_free(intrp->htable, intrp->intr_size); 6286 intrp->intr_registered = B_FALSE; 6287 intrp->intr_enabled = B_FALSE; 6288 intrp->msi_intx_cnt = 0; 6289 intrp->intr_added = 0; 6290 6291 (void) nxge_ldgv_uninit(nxgep); 6292 6293 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6294 "#msix-request"); 6295 6296 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6297 } 6298 6299 /*ARGSUSED*/ 6300 static void 6301 nxge_intrs_enable(p_nxge_t nxgep) 6302 { 6303 p_nxge_intr_t intrp; 6304 int i; 6305 int status; 6306 6307 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6308 6309 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6310 6311 if (!intrp->intr_registered) { 6312 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6313 "interrupts are not registered")); 6314 return; 6315 } 6316 6317 if (intrp->intr_enabled) { 6318 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6319 "<== nxge_intrs_enable: already enabled")); 6320 return; 6321 } 6322 6323 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6324 status = ddi_intr_block_enable(intrp->htable, 6325 intrp->intr_added); 6326 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6327 "block enable - status 0x%x total inums #%d\n", 6328 status, intrp->intr_added)); 6329 } else { 6330 for (i = 0; i < intrp->intr_added; i++) { 6331 status = ddi_intr_enable(intrp->htable[i]); 6332 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6333 "ddi_intr_enable:enable - status 0x%x " 6334 "total inums %d enable inum #%d\n", 6335 status, intrp->intr_added, i)); 6336 if (status == DDI_SUCCESS) { 6337 intrp->intr_enabled = B_TRUE; 6338 } 6339 } 6340 } 6341 6342 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6343 } 6344 6345 /*ARGSUSED*/ 6346 static void 6347 nxge_intrs_disable(p_nxge_t nxgep) 6348 { 6349 p_nxge_intr_t intrp; 6350 int i; 6351 6352 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6353 6354 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6355 6356 if (!intrp->intr_registered) { 6357 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6358 "interrupts are not registered")); 6359 return; 6360 } 6361 6362 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6363 (void) ddi_intr_block_disable(intrp->htable, 6364 intrp->intr_added); 6365 } else { 6366 for (i = 0; i < intrp->intr_added; i++) { 6367 (void) ddi_intr_disable(intrp->htable[i]); 6368 } 6369 } 6370 6371 intrp->intr_enabled = B_FALSE; 6372 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6373 } 6374 6375 static nxge_status_t 6376 nxge_mac_register(p_nxge_t nxgep) 6377 { 6378 mac_register_t *macp; 6379 int status; 6380 6381 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6382 6383 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6384 return (NXGE_ERROR); 6385 6386 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6387 macp->m_driver = nxgep; 6388 macp->m_dip = nxgep->dip; 6389 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6390 macp->m_callbacks = &nxge_m_callbacks; 6391 macp->m_min_sdu = 0; 6392 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6393 NXGE_EHEADER_VLAN_CRC; 6394 macp->m_max_sdu = nxgep->mac.default_mtu; 6395 macp->m_margin = VLAN_TAGSZ; 6396 macp->m_priv_props = nxge_priv_props; 6397 macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 6398 macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1 | MAC_VIRT_SERIALIZE; 6399 6400 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6401 "==> nxge_mac_register: instance %d " 6402 "max_sdu %d margin %d maxframe %d (header %d)", 6403 nxgep->instance, 6404 macp->m_max_sdu, macp->m_margin, 6405 nxgep->mac.maxframesize, 6406 NXGE_EHEADER_VLAN_CRC)); 6407 6408 status = mac_register(macp, &nxgep->mach); 6409 mac_free(macp); 6410 6411 if (status != 0) { 6412 cmn_err(CE_WARN, 6413 "!nxge_mac_register failed (status %d instance %d)", 6414 status, nxgep->instance); 6415 return (NXGE_ERROR); 6416 } 6417 6418 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6419 "(instance %d)", nxgep->instance)); 6420 6421 return (NXGE_OK); 6422 } 6423 6424 void 6425 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6426 { 6427 ssize_t size; 6428 mblk_t *nmp; 6429 uint8_t blk_id; 6430 uint8_t chan; 6431 uint32_t err_id; 6432 err_inject_t *eip; 6433 6434 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6435 6436 size = 1024; 6437 nmp = mp->b_cont; 6438 eip = (err_inject_t *)nmp->b_rptr; 6439 blk_id = eip->blk_id; 6440 err_id = eip->err_id; 6441 chan = eip->chan; 6442 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6443 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6444 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6445 switch (blk_id) { 6446 case MAC_BLK_ID: 6447 break; 6448 case TXMAC_BLK_ID: 6449 break; 6450 case RXMAC_BLK_ID: 6451 break; 6452 case MIF_BLK_ID: 6453 break; 6454 case IPP_BLK_ID: 6455 nxge_ipp_inject_err(nxgep, err_id); 6456 break; 6457 case TXC_BLK_ID: 6458 nxge_txc_inject_err(nxgep, err_id); 6459 break; 6460 case TXDMA_BLK_ID: 6461 nxge_txdma_inject_err(nxgep, err_id, chan); 6462 break; 6463 case RXDMA_BLK_ID: 6464 nxge_rxdma_inject_err(nxgep, err_id, chan); 6465 break; 6466 case ZCP_BLK_ID: 6467 nxge_zcp_inject_err(nxgep, err_id); 6468 break; 6469 case ESPC_BLK_ID: 6470 break; 6471 case FFLP_BLK_ID: 6472 break; 6473 case PHY_BLK_ID: 6474 break; 6475 case ETHER_SERDES_BLK_ID: 6476 break; 6477 case PCIE_SERDES_BLK_ID: 6478 break; 6479 case VIR_BLK_ID: 6480 break; 6481 } 6482 6483 nmp->b_wptr = nmp->b_rptr + size; 6484 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6485 6486 miocack(wq, mp, (int)size, 0); 6487 } 6488 6489 static int 6490 nxge_init_common_dev(p_nxge_t nxgep) 6491 { 6492 p_nxge_hw_list_t hw_p; 6493 dev_info_t *p_dip; 6494 6495 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6496 6497 p_dip = nxgep->p_dip; 6498 MUTEX_ENTER(&nxge_common_lock); 6499 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6500 "==> nxge_init_common_dev:func # %d", 6501 nxgep->function_num)); 6502 /* 6503 * Loop through existing per neptune hardware list. 6504 */ 6505 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6506 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6507 "==> nxge_init_common_device:func # %d " 6508 "hw_p $%p parent dip $%p", 6509 nxgep->function_num, 6510 hw_p, 6511 p_dip)); 6512 if (hw_p->parent_devp == p_dip) { 6513 nxgep->nxge_hw_p = hw_p; 6514 hw_p->ndevs++; 6515 hw_p->nxge_p[nxgep->function_num] = nxgep; 6516 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6517 "==> nxge_init_common_device:func # %d " 6518 "hw_p $%p parent dip $%p " 6519 "ndevs %d (found)", 6520 nxgep->function_num, 6521 hw_p, 6522 p_dip, 6523 hw_p->ndevs)); 6524 break; 6525 } 6526 } 6527 6528 if (hw_p == NULL) { 6529 6530 char **prop_val; 6531 uint_t prop_len; 6532 int i; 6533 6534 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6535 "==> nxge_init_common_device:func # %d " 6536 "parent dip $%p (new)", 6537 nxgep->function_num, 6538 p_dip)); 6539 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6540 hw_p->parent_devp = p_dip; 6541 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6542 nxgep->nxge_hw_p = hw_p; 6543 hw_p->ndevs++; 6544 hw_p->nxge_p[nxgep->function_num] = nxgep; 6545 hw_p->next = nxge_hw_list; 6546 if (nxgep->niu_type == N2_NIU) { 6547 hw_p->niu_type = N2_NIU; 6548 hw_p->platform_type = P_NEPTUNE_NIU; 6549 } else { 6550 hw_p->niu_type = NIU_TYPE_NONE; 6551 hw_p->platform_type = P_NEPTUNE_NONE; 6552 } 6553 6554 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6555 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6556 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6557 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6558 6559 nxge_hw_list = hw_p; 6560 6561 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0, 6562 "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) { 6563 for (i = 0; i < prop_len; i++) { 6564 if ((strcmp((caddr_t)prop_val[i], 6565 NXGE_ROCK_COMPATIBLE) == 0)) { 6566 hw_p->platform_type = P_NEPTUNE_ROCK; 6567 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6568 "ROCK hw_p->platform_type %d", 6569 hw_p->platform_type)); 6570 break; 6571 } 6572 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6573 "nxge_init_common_dev: read compatible" 6574 " property[%d] val[%s]", 6575 i, (caddr_t)prop_val[i])); 6576 } 6577 } 6578 6579 ddi_prop_free(prop_val); 6580 6581 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6582 } 6583 6584 MUTEX_EXIT(&nxge_common_lock); 6585 6586 nxgep->platform_type = hw_p->platform_type; 6587 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxgep->platform_type %d", 6588 nxgep->platform_type)); 6589 if (nxgep->niu_type != N2_NIU) { 6590 nxgep->niu_type = hw_p->niu_type; 6591 } 6592 6593 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6594 "==> nxge_init_common_device (nxge_hw_list) $%p", 6595 nxge_hw_list)); 6596 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6597 6598 return (NXGE_OK); 6599 } 6600 6601 static void 6602 nxge_uninit_common_dev(p_nxge_t nxgep) 6603 { 6604 p_nxge_hw_list_t hw_p, h_hw_p; 6605 p_nxge_dma_pt_cfg_t p_dma_cfgp; 6606 p_nxge_hw_pt_cfg_t p_cfgp; 6607 dev_info_t *p_dip; 6608 6609 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6610 if (nxgep->nxge_hw_p == NULL) { 6611 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6612 "<== nxge_uninit_common_device (no common)")); 6613 return; 6614 } 6615 6616 MUTEX_ENTER(&nxge_common_lock); 6617 h_hw_p = nxge_hw_list; 6618 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6619 p_dip = hw_p->parent_devp; 6620 if (nxgep->nxge_hw_p == hw_p && 6621 p_dip == nxgep->p_dip && 6622 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6623 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6624 6625 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6626 "==> nxge_uninit_common_device:func # %d " 6627 "hw_p $%p parent dip $%p " 6628 "ndevs %d (found)", 6629 nxgep->function_num, 6630 hw_p, 6631 p_dip, 6632 hw_p->ndevs)); 6633 6634 /* 6635 * Release the RDC table, a shared resoruce 6636 * of the nxge hardware. The RDC table was 6637 * assigned to this instance of nxge in 6638 * nxge_use_cfg_dma_config(). 6639 */ 6640 if (!isLDOMguest(nxgep)) { 6641 p_dma_cfgp = 6642 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 6643 p_cfgp = 6644 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 6645 (void) nxge_fzc_rdc_tbl_unbind(nxgep, 6646 p_cfgp->def_mac_rxdma_grpid); 6647 6648 /* Cleanup any outstanding groups. */ 6649 nxge_grp_cleanup(nxgep); 6650 } 6651 6652 if (hw_p->ndevs) { 6653 hw_p->ndevs--; 6654 } 6655 hw_p->nxge_p[nxgep->function_num] = NULL; 6656 if (!hw_p->ndevs) { 6657 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6658 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6659 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6660 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6661 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6662 "==> nxge_uninit_common_device: " 6663 "func # %d " 6664 "hw_p $%p parent dip $%p " 6665 "ndevs %d (last)", 6666 nxgep->function_num, 6667 hw_p, 6668 p_dip, 6669 hw_p->ndevs)); 6670 6671 nxge_hio_uninit(nxgep); 6672 6673 if (hw_p == nxge_hw_list) { 6674 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6675 "==> nxge_uninit_common_device:" 6676 "remove head func # %d " 6677 "hw_p $%p parent dip $%p " 6678 "ndevs %d (head)", 6679 nxgep->function_num, 6680 hw_p, 6681 p_dip, 6682 hw_p->ndevs)); 6683 nxge_hw_list = hw_p->next; 6684 } else { 6685 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6686 "==> nxge_uninit_common_device:" 6687 "remove middle func # %d " 6688 "hw_p $%p parent dip $%p " 6689 "ndevs %d (middle)", 6690 nxgep->function_num, 6691 hw_p, 6692 p_dip, 6693 hw_p->ndevs)); 6694 h_hw_p->next = hw_p->next; 6695 } 6696 6697 nxgep->nxge_hw_p = NULL; 6698 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6699 } 6700 break; 6701 } else { 6702 h_hw_p = hw_p; 6703 } 6704 } 6705 6706 MUTEX_EXIT(&nxge_common_lock); 6707 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6708 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6709 nxge_hw_list)); 6710 6711 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6712 } 6713 6714 /* 6715 * Determines the number of ports from the niu_type or the platform type. 6716 * Returns the number of ports, or returns zero on failure. 6717 */ 6718 6719 int 6720 nxge_get_nports(p_nxge_t nxgep) 6721 { 6722 int nports = 0; 6723 6724 switch (nxgep->niu_type) { 6725 case N2_NIU: 6726 case NEPTUNE_2_10GF: 6727 nports = 2; 6728 break; 6729 case NEPTUNE_4_1GC: 6730 case NEPTUNE_2_10GF_2_1GC: 6731 case NEPTUNE_1_10GF_3_1GC: 6732 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6733 case NEPTUNE_2_10GF_2_1GRF: 6734 nports = 4; 6735 break; 6736 default: 6737 switch (nxgep->platform_type) { 6738 case P_NEPTUNE_NIU: 6739 case P_NEPTUNE_ATLAS_2PORT: 6740 nports = 2; 6741 break; 6742 case P_NEPTUNE_ATLAS_4PORT: 6743 case P_NEPTUNE_MARAMBA_P0: 6744 case P_NEPTUNE_MARAMBA_P1: 6745 case P_NEPTUNE_ROCK: 6746 case P_NEPTUNE_ALONSO: 6747 nports = 4; 6748 break; 6749 default: 6750 break; 6751 } 6752 break; 6753 } 6754 6755 return (nports); 6756 } 6757 6758 /* 6759 * The following two functions are to support 6760 * PSARC/2007/453 MSI-X interrupt limit override. 6761 */ 6762 static int 6763 nxge_create_msi_property(p_nxge_t nxgep) 6764 { 6765 int nmsi; 6766 extern int ncpus; 6767 6768 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6769 6770 switch (nxgep->mac.portmode) { 6771 case PORT_10G_COPPER: 6772 case PORT_10G_FIBER: 6773 case PORT_10G_TN1010: 6774 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6775 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6776 /* 6777 * The maximum MSI-X requested will be 8. 6778 * If the # of CPUs is less than 8, we will request 6779 * # MSI-X based on the # of CPUs (default). 6780 */ 6781 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6782 "==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d", 6783 nxge_msix_10g_intrs)); 6784 if ((nxge_msix_10g_intrs == 0) || 6785 (nxge_msix_10g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6786 nmsi = NXGE_MSIX_REQUEST_10G; 6787 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6788 "==>nxge_create_msi_property (10G): reset to 8")); 6789 } else { 6790 nmsi = nxge_msix_10g_intrs; 6791 } 6792 6793 /* 6794 * If # of interrupts requested is 8 (default), 6795 * the checking of the number of cpus will be 6796 * be maintained. 6797 */ 6798 if ((nmsi == NXGE_MSIX_REQUEST_10G) && 6799 (ncpus < nmsi)) { 6800 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6801 "==>nxge_create_msi_property (10G): reset to 8")); 6802 nmsi = ncpus; 6803 } 6804 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6805 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6806 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6807 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6808 break; 6809 6810 default: 6811 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6812 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6813 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6814 "==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d", 6815 nxge_msix_1g_intrs)); 6816 if ((nxge_msix_1g_intrs == 0) || 6817 (nxge_msix_1g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6818 nmsi = NXGE_MSIX_REQUEST_1G; 6819 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6820 "==>nxge_create_msi_property (1G): reset to 2")); 6821 } else { 6822 nmsi = nxge_msix_1g_intrs; 6823 } 6824 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6825 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6826 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6827 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6828 break; 6829 } 6830 6831 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6832 return (nmsi); 6833 } 6834 6835 /* ARGSUSED */ 6836 static int 6837 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6838 void *pr_val) 6839 { 6840 int err = 0; 6841 link_flowctrl_t fl; 6842 6843 switch (pr_num) { 6844 case MAC_PROP_AUTONEG: 6845 *(uint8_t *)pr_val = 1; 6846 break; 6847 case MAC_PROP_FLOWCTRL: 6848 if (pr_valsize < sizeof (link_flowctrl_t)) 6849 return (EINVAL); 6850 fl = LINK_FLOWCTRL_RX; 6851 bcopy(&fl, pr_val, sizeof (fl)); 6852 break; 6853 case MAC_PROP_ADV_1000FDX_CAP: 6854 case MAC_PROP_EN_1000FDX_CAP: 6855 *(uint8_t *)pr_val = 1; 6856 break; 6857 case MAC_PROP_ADV_100FDX_CAP: 6858 case MAC_PROP_EN_100FDX_CAP: 6859 *(uint8_t *)pr_val = 1; 6860 break; 6861 default: 6862 err = ENOTSUP; 6863 break; 6864 } 6865 return (err); 6866 } 6867 6868 6869 /* 6870 * The following is a software around for the Neptune hardware's 6871 * interrupt bugs; The Neptune hardware may generate spurious interrupts when 6872 * an interrupr handler is removed. 6873 */ 6874 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98 6875 #define NXGE_PIM_RESET (1ULL << 29) 6876 #define NXGE_GLU_RESET (1ULL << 30) 6877 #define NXGE_NIU_RESET (1ULL << 31) 6878 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \ 6879 NXGE_GLU_RESET | \ 6880 NXGE_NIU_RESET) 6881 6882 #define NXGE_WAIT_QUITE_TIME 200000 6883 #define NXGE_WAIT_QUITE_RETRY 40 6884 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */ 6885 6886 static void 6887 nxge_niu_peu_reset(p_nxge_t nxgep) 6888 { 6889 uint32_t rvalue; 6890 p_nxge_hw_list_t hw_p; 6891 p_nxge_t fnxgep; 6892 int i, j; 6893 6894 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset")); 6895 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 6896 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6897 "==> nxge_niu_peu_reset: NULL hardware pointer")); 6898 return; 6899 } 6900 6901 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6902 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d", 6903 hw_p->flags, nxgep->nxge_link_poll_timerid, 6904 nxgep->nxge_timerid)); 6905 6906 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 6907 /* 6908 * Make sure other instances from the same hardware 6909 * stop sending PIO and in quiescent state. 6910 */ 6911 for (i = 0; i < NXGE_MAX_PORTS; i++) { 6912 fnxgep = hw_p->nxge_p[i]; 6913 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6914 "==> nxge_niu_peu_reset: checking entry %d " 6915 "nxgep $%p", i, fnxgep)); 6916 #ifdef NXGE_DEBUG 6917 if (fnxgep) { 6918 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6919 "==> nxge_niu_peu_reset: entry %d (function %d) " 6920 "link timer id %d hw timer id %d", 6921 i, fnxgep->function_num, 6922 fnxgep->nxge_link_poll_timerid, 6923 fnxgep->nxge_timerid)); 6924 } 6925 #endif 6926 if (fnxgep && fnxgep != nxgep && 6927 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) { 6928 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6929 "==> nxge_niu_peu_reset: checking $%p " 6930 "(function %d) timer ids", 6931 fnxgep, fnxgep->function_num)); 6932 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) { 6933 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6934 "==> nxge_niu_peu_reset: waiting")); 6935 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6936 if (!fnxgep->nxge_timerid && 6937 !fnxgep->nxge_link_poll_timerid) { 6938 break; 6939 } 6940 } 6941 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6942 if (fnxgep->nxge_timerid || 6943 fnxgep->nxge_link_poll_timerid) { 6944 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6945 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6946 "<== nxge_niu_peu_reset: cannot reset " 6947 "hardware (devices are still in use)")); 6948 return; 6949 } 6950 } 6951 } 6952 6953 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) { 6954 hw_p->flags |= COMMON_RESET_NIU_PCI; 6955 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh, 6956 NXGE_PCI_PORT_LOGIC_OFFSET); 6957 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6958 "nxge_niu_peu_reset: read offset 0x%x (%d) " 6959 "(data 0x%x)", 6960 NXGE_PCI_PORT_LOGIC_OFFSET, 6961 NXGE_PCI_PORT_LOGIC_OFFSET, 6962 rvalue)); 6963 6964 rvalue |= NXGE_PCI_RESET_ALL; 6965 pci_config_put32(nxgep->dev_regs->nxge_pciregh, 6966 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue); 6967 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6968 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x", 6969 rvalue)); 6970 6971 NXGE_DELAY(NXGE_PCI_RESET_WAIT); 6972 } 6973 6974 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6975 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset")); 6976 } 6977 6978 static void 6979 nxge_set_pci_replay_timeout(p_nxge_t nxgep) 6980 { 6981 p_dev_regs_t dev_regs; 6982 uint32_t value; 6983 6984 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout")); 6985 6986 if (!nxge_set_replay_timer) { 6987 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6988 "==> nxge_set_pci_replay_timeout: will not change " 6989 "the timeout")); 6990 return; 6991 } 6992 6993 dev_regs = nxgep->dev_regs; 6994 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6995 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p", 6996 dev_regs, dev_regs->nxge_pciregh)); 6997 6998 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) { 6999 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7000 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or " 7001 "no PCI handle", 7002 dev_regs)); 7003 return; 7004 } 7005 value = (pci_config_get32(dev_regs->nxge_pciregh, 7006 PCI_REPLAY_TIMEOUT_CFG_OFFSET) | 7007 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT)); 7008 7009 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7010 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x " 7011 "(timeout value to set 0x%x at offset 0x%x) value 0x%x", 7012 pci_config_get32(dev_regs->nxge_pciregh, 7013 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout, 7014 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value)); 7015 7016 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET, 7017 value); 7018 7019 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7020 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x", 7021 pci_config_get32(dev_regs->nxge_pciregh, 7022 PCI_REPLAY_TIMEOUT_CFG_OFFSET))); 7023 7024 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout")); 7025 } 7026 7027 /* 7028 * quiesce(9E) entry point. 7029 * 7030 * This function is called when the system is single-threaded at high 7031 * PIL with preemption disabled. Therefore, this function must not be 7032 * blocked. 7033 * 7034 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 7035 * DDI_FAILURE indicates an error condition and should almost never happen. 7036 */ 7037 static int 7038 nxge_quiesce(dev_info_t *dip) 7039 { 7040 int instance = ddi_get_instance(dip); 7041 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 7042 7043 if (nxgep == NULL) 7044 return (DDI_FAILURE); 7045 7046 /* Turn off debugging */ 7047 nxge_debug_level = NO_DEBUG; 7048 nxgep->nxge_debug_level = NO_DEBUG; 7049 npi_debug_level = NO_DEBUG; 7050 7051 /* 7052 * Stop link monitor only when linkchkmod is interrupt based 7053 */ 7054 if (nxgep->mac.linkchkmode == LINKCHK_INTR) { 7055 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 7056 } 7057 7058 (void) nxge_intr_hw_disable(nxgep); 7059 7060 /* 7061 * Reset the receive MAC side. 7062 */ 7063 (void) nxge_rx_mac_disable(nxgep); 7064 7065 /* Disable and soft reset the IPP */ 7066 if (!isLDOMguest(nxgep)) 7067 (void) nxge_ipp_disable(nxgep); 7068 7069 /* 7070 * Reset the transmit/receive DMA side. 7071 */ 7072 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 7073 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 7074 7075 /* 7076 * Reset the transmit MAC side. 7077 */ 7078 (void) nxge_tx_mac_disable(nxgep); 7079 7080 return (DDI_SUCCESS); 7081 } 7082