1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 28 */ 29 #include <sys/nxge/nxge_impl.h> 30 #include <sys/nxge/nxge_hio.h> 31 #include <sys/nxge/nxge_rxdma.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * PSARC/2007/453 MSI-X interrupt limit override 39 */ 40 uint32_t nxge_msi_enable = 2; 41 42 /* 43 * Software workaround for a Neptune (PCI-E) 44 * hardware interrupt bug which the hardware 45 * may generate spurious interrupts after the 46 * device interrupt handler was removed. If this flag 47 * is enabled, the driver will reset the 48 * hardware when devices are being detached. 49 */ 50 uint32_t nxge_peu_reset_enable = 0; 51 52 /* 53 * Software workaround for the hardware 54 * checksum bugs that affect packet transmission 55 * and receive: 56 * 57 * Usage of nxge_cksum_offload: 58 * 59 * (1) nxge_cksum_offload = 0 (default): 60 * - transmits packets: 61 * TCP: uses the hardware checksum feature. 62 * UDP: driver will compute the software checksum 63 * based on the partial checksum computed 64 * by the IP layer. 65 * - receives packets 66 * TCP: marks packets checksum flags based on hardware result. 67 * UDP: will not mark checksum flags. 68 * 69 * (2) nxge_cksum_offload = 1: 70 * - transmit packets: 71 * TCP/UDP: uses the hardware checksum feature. 72 * - receives packets 73 * TCP/UDP: marks packet checksum flags based on hardware result. 74 * 75 * (3) nxge_cksum_offload = 2: 76 * - The driver will not register its checksum capability. 77 * Checksum for both TCP and UDP will be computed 78 * by the stack. 79 * - The software LSO is not allowed in this case. 80 * 81 * (4) nxge_cksum_offload > 2: 82 * - Will be treated as it is set to 2 83 * (stack will compute the checksum). 84 * 85 * (5) If the hardware bug is fixed, this workaround 86 * needs to be updated accordingly to reflect 87 * the new hardware revision. 88 */ 89 uint32_t nxge_cksum_offload = 0; 90 91 /* 92 * Globals: tunable parameters (/etc/system or adb) 93 * 94 */ 95 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 96 uint32_t nxge_rbr_spare_size = 0; 97 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 98 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 99 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 100 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 101 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 102 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 103 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 104 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 105 boolean_t nxge_jumbo_enable = B_FALSE; 106 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 107 108 /* MAX LSO size */ 109 #define NXGE_LSO_MAXLEN 65535 110 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 111 112 113 /* 114 * Add tunable to reduce the amount of time spent in the 115 * ISR doing Rx Processing. 116 */ 117 uint32_t nxge_max_rx_pkts = 1024; 118 119 /* 120 * Tunables to manage the receive buffer blocks. 121 * 122 * nxge_rx_threshold_hi: copy all buffers. 123 * nxge_rx_bcopy_size_type: receive buffer block size type. 124 * nxge_rx_threshold_lo: copy only up to tunable block size type. 125 */ 126 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 127 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 128 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 129 130 /* Use kmem_alloc() to allocate data buffers. */ 131 #if defined(_BIG_ENDIAN) 132 uint32_t nxge_use_kmem_alloc = 1; 133 #else 134 uint32_t nxge_use_kmem_alloc = 0; 135 #endif 136 137 rtrace_t npi_rtracebuf; 138 139 /* 140 * The hardware sometimes fails to allow enough time for the link partner 141 * to send an acknowledgement for packets that the hardware sent to it. The 142 * hardware resends the packets earlier than it should be in those instances. 143 * This behavior caused some switches to acknowledge the wrong packets 144 * and it triggered the fatal error. 145 * This software workaround is to set the replay timer to a value 146 * suggested by the hardware team. 147 * 148 * PCI config space replay timer register: 149 * The following replay timeout value is 0xc 150 * for bit 14:18. 151 */ 152 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8 153 #define PCI_REPLAY_TIMEOUT_SHIFT 14 154 155 uint32_t nxge_set_replay_timer = 1; 156 uint32_t nxge_replay_timeout = 0xc; 157 158 /* 159 * The transmit serialization sometimes causes 160 * longer sleep before calling the driver transmit 161 * function as it sleeps longer than it should. 162 * The performace group suggests that a time wait tunable 163 * can be used to set the maximum wait time when needed 164 * and the default is set to 1 tick. 165 */ 166 uint32_t nxge_tx_serial_maxsleep = 1; 167 168 #if defined(sun4v) 169 /* 170 * Hypervisor N2/NIU services information. 171 */ 172 static hsvc_info_t niu_hsvc = { 173 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 174 NIU_MINOR_VER, "nxge" 175 }; 176 177 static int nxge_hsvc_register(p_nxge_t); 178 #endif 179 180 /* 181 * Function Prototypes 182 */ 183 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 184 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 185 static void nxge_unattach(p_nxge_t); 186 static int nxge_quiesce(dev_info_t *); 187 188 #if NXGE_PROPERTY 189 static void nxge_remove_hard_properties(p_nxge_t); 190 #endif 191 192 /* 193 * These two functions are required by nxge_hio.c 194 */ 195 extern int nxge_m_mmac_remove(void *arg, int slot); 196 extern void nxge_grp_cleanup(p_nxge_t nxge); 197 198 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 199 200 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 201 static void nxge_destroy_mutexes(p_nxge_t); 202 203 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 204 static void nxge_unmap_regs(p_nxge_t nxgep); 205 #ifdef NXGE_DEBUG 206 static void nxge_test_map_regs(p_nxge_t nxgep); 207 #endif 208 209 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 210 static void nxge_remove_intrs(p_nxge_t nxgep); 211 212 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 213 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 214 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 215 static void nxge_intrs_enable(p_nxge_t nxgep); 216 static void nxge_intrs_disable(p_nxge_t nxgep); 217 218 static void nxge_suspend(p_nxge_t); 219 static nxge_status_t nxge_resume(p_nxge_t); 220 221 static nxge_status_t nxge_setup_dev(p_nxge_t); 222 static void nxge_destroy_dev(p_nxge_t); 223 224 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 225 static void nxge_free_mem_pool(p_nxge_t); 226 227 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 228 static void nxge_free_rx_mem_pool(p_nxge_t); 229 230 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 231 static void nxge_free_tx_mem_pool(p_nxge_t); 232 233 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 234 struct ddi_dma_attr *, 235 size_t, ddi_device_acc_attr_t *, uint_t, 236 p_nxge_dma_common_t); 237 238 static void nxge_dma_mem_free(p_nxge_dma_common_t); 239 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 240 241 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 242 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 243 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 244 245 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 246 p_nxge_dma_common_t *, size_t); 247 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 248 249 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 250 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 251 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 252 253 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 254 p_nxge_dma_common_t *, 255 size_t); 256 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 257 258 static int nxge_init_common_dev(p_nxge_t); 259 static void nxge_uninit_common_dev(p_nxge_t); 260 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 261 char *, caddr_t); 262 263 /* 264 * The next declarations are for the GLDv3 interface. 265 */ 266 static int nxge_m_start(void *); 267 static void nxge_m_stop(void *); 268 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 269 static int nxge_m_promisc(void *, boolean_t); 270 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 271 static nxge_status_t nxge_mac_register(p_nxge_t); 272 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 273 int slot, int rdctbl, boolean_t usetbl); 274 void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, 275 boolean_t factory); 276 #if defined(sun4v) 277 extern mblk_t *nxge_m_tx(void *arg, mblk_t *mp); 278 #endif 279 280 static void nxge_m_getfactaddr(void *, uint_t, uint8_t *); 281 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 282 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 283 uint_t, const void *); 284 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 285 uint_t, uint_t, void *, uint_t *); 286 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 287 const void *); 288 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 289 void *, uint_t *); 290 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 291 static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int, 292 mac_ring_info_t *, mac_ring_handle_t); 293 static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t, 294 mac_ring_type_t); 295 static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t, 296 mac_ring_type_t); 297 298 static void nxge_niu_peu_reset(p_nxge_t nxgep); 299 static void nxge_set_pci_replay_timeout(nxge_t *); 300 301 mac_priv_prop_t nxge_priv_props[] = { 302 {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 303 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 304 {"_function_number", MAC_PROP_PERM_READ}, 305 {"_fw_version", MAC_PROP_PERM_READ}, 306 {"_port_mode", MAC_PROP_PERM_READ}, 307 {"_hot_swap_phy", MAC_PROP_PERM_READ}, 308 {"_accept_jumbo", MAC_PROP_PERM_RW}, 309 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 310 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 311 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 312 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 313 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 314 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 315 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 316 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 317 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 318 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 319 {"_soft_lso_enable", MAC_PROP_PERM_RW} 320 }; 321 322 #define NXGE_MAX_PRIV_PROPS \ 323 (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 324 325 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 326 #define MAX_DUMP_SZ 256 327 328 #define NXGE_M_CALLBACK_FLAGS \ 329 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 330 331 mac_callbacks_t nxge_m_callbacks = { 332 NXGE_M_CALLBACK_FLAGS, 333 nxge_m_stat, 334 nxge_m_start, 335 nxge_m_stop, 336 nxge_m_promisc, 337 nxge_m_multicst, 338 NULL, 339 NULL, 340 nxge_m_ioctl, 341 nxge_m_getcapab, 342 NULL, 343 NULL, 344 nxge_m_setprop, 345 nxge_m_getprop 346 }; 347 348 void 349 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 350 351 /* PSARC/2007/453 MSI-X interrupt limit override. */ 352 #define NXGE_MSIX_REQUEST_10G 8 353 #define NXGE_MSIX_REQUEST_1G 2 354 static int nxge_create_msi_property(p_nxge_t); 355 /* 356 * For applications that care about the 357 * latency, it was requested by PAE and the 358 * customers that the driver has tunables that 359 * allow the user to tune it to a higher number 360 * interrupts to spread the interrupts among 361 * multiple channels. The DDI framework limits 362 * the maximum number of MSI-X resources to allocate 363 * to 8 (ddi_msix_alloc_limit). If more than 8 364 * is set, ddi_msix_alloc_limit must be set accordingly. 365 * The default number of MSI interrupts are set to 366 * 8 for 10G and 2 for 1G link. 367 */ 368 #define NXGE_MSIX_MAX_ALLOWED 32 369 uint32_t nxge_msix_10g_intrs = NXGE_MSIX_REQUEST_10G; 370 uint32_t nxge_msix_1g_intrs = NXGE_MSIX_REQUEST_1G; 371 372 /* 373 * These global variables control the message 374 * output. 375 */ 376 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 377 uint64_t nxge_debug_level; 378 379 /* 380 * This list contains the instance structures for the Neptune 381 * devices present in the system. The lock exists to guarantee 382 * mutually exclusive access to the list. 383 */ 384 void *nxge_list = NULL; 385 386 void *nxge_hw_list = NULL; 387 nxge_os_mutex_t nxge_common_lock; 388 389 extern uint64_t npi_debug_level; 390 391 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 392 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 393 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 394 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 395 extern void nxge_fm_init(p_nxge_t, 396 ddi_device_acc_attr_t *, 397 ddi_device_acc_attr_t *, 398 ddi_dma_attr_t *); 399 extern void nxge_fm_fini(p_nxge_t); 400 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 401 402 /* 403 * Count used to maintain the number of buffers being used 404 * by Neptune instances and loaned up to the upper layers. 405 */ 406 uint32_t nxge_mblks_pending = 0; 407 408 /* 409 * Device register access attributes for PIO. 410 */ 411 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 412 DDI_DEVICE_ATTR_V0, 413 DDI_STRUCTURE_LE_ACC, 414 DDI_STRICTORDER_ACC, 415 }; 416 417 /* 418 * Device descriptor access attributes for DMA. 419 */ 420 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 421 DDI_DEVICE_ATTR_V0, 422 DDI_STRUCTURE_LE_ACC, 423 DDI_STRICTORDER_ACC 424 }; 425 426 /* 427 * Device buffer access attributes for DMA. 428 */ 429 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 430 DDI_DEVICE_ATTR_V0, 431 DDI_STRUCTURE_BE_ACC, 432 DDI_STRICTORDER_ACC 433 }; 434 435 ddi_dma_attr_t nxge_desc_dma_attr = { 436 DMA_ATTR_V0, /* version number. */ 437 0, /* low address */ 438 0xffffffffffffffff, /* high address */ 439 0xffffffffffffffff, /* address counter max */ 440 #ifndef NIU_PA_WORKAROUND 441 0x100000, /* alignment */ 442 #else 443 0x2000, 444 #endif 445 0xfc00fc, /* dlim_burstsizes */ 446 0x1, /* minimum transfer size */ 447 0xffffffffffffffff, /* maximum transfer size */ 448 0xffffffffffffffff, /* maximum segment size */ 449 1, /* scatter/gather list length */ 450 (unsigned int) 1, /* granularity */ 451 0 /* attribute flags */ 452 }; 453 454 ddi_dma_attr_t nxge_tx_dma_attr = { 455 DMA_ATTR_V0, /* version number. */ 456 0, /* low address */ 457 0xffffffffffffffff, /* high address */ 458 0xffffffffffffffff, /* address counter max */ 459 #if defined(_BIG_ENDIAN) 460 0x2000, /* alignment */ 461 #else 462 0x1000, /* alignment */ 463 #endif 464 0xfc00fc, /* dlim_burstsizes */ 465 0x1, /* minimum transfer size */ 466 0xffffffffffffffff, /* maximum transfer size */ 467 0xffffffffffffffff, /* maximum segment size */ 468 5, /* scatter/gather list length */ 469 (unsigned int) 1, /* granularity */ 470 0 /* attribute flags */ 471 }; 472 473 ddi_dma_attr_t nxge_rx_dma_attr = { 474 DMA_ATTR_V0, /* version number. */ 475 0, /* low address */ 476 0xffffffffffffffff, /* high address */ 477 0xffffffffffffffff, /* address counter max */ 478 0x2000, /* alignment */ 479 0xfc00fc, /* dlim_burstsizes */ 480 0x1, /* minimum transfer size */ 481 0xffffffffffffffff, /* maximum transfer size */ 482 0xffffffffffffffff, /* maximum segment size */ 483 1, /* scatter/gather list length */ 484 (unsigned int) 1, /* granularity */ 485 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 486 }; 487 488 ddi_dma_lim_t nxge_dma_limits = { 489 (uint_t)0, /* dlim_addr_lo */ 490 (uint_t)0xffffffff, /* dlim_addr_hi */ 491 (uint_t)0xffffffff, /* dlim_cntr_max */ 492 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 493 0x1, /* dlim_minxfer */ 494 1024 /* dlim_speed */ 495 }; 496 497 dma_method_t nxge_force_dma = DVMA; 498 499 /* 500 * dma chunk sizes. 501 * 502 * Try to allocate the largest possible size 503 * so that fewer number of dma chunks would be managed 504 */ 505 #ifdef NIU_PA_WORKAROUND 506 size_t alloc_sizes [] = {0x2000}; 507 #else 508 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 509 0x10000, 0x20000, 0x40000, 0x80000, 510 0x100000, 0x200000, 0x400000, 0x800000, 511 0x1000000, 0x2000000, 0x4000000}; 512 #endif 513 514 /* 515 * Translate "dev_t" to a pointer to the associated "dev_info_t". 516 */ 517 518 extern void nxge_get_environs(nxge_t *); 519 520 static int 521 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 522 { 523 p_nxge_t nxgep = NULL; 524 int instance; 525 int status = DDI_SUCCESS; 526 uint8_t portn; 527 nxge_mmac_t *mmac_info; 528 p_nxge_param_t param_arr; 529 530 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 531 532 /* 533 * Get the device instance since we'll need to setup 534 * or retrieve a soft state for this instance. 535 */ 536 instance = ddi_get_instance(dip); 537 538 switch (cmd) { 539 case DDI_ATTACH: 540 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 541 break; 542 543 case DDI_RESUME: 544 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 545 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 546 if (nxgep == NULL) { 547 status = DDI_FAILURE; 548 break; 549 } 550 if (nxgep->dip != dip) { 551 status = DDI_FAILURE; 552 break; 553 } 554 if (nxgep->suspended == DDI_PM_SUSPEND) { 555 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 556 } else { 557 status = nxge_resume(nxgep); 558 } 559 goto nxge_attach_exit; 560 561 case DDI_PM_RESUME: 562 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 563 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 564 if (nxgep == NULL) { 565 status = DDI_FAILURE; 566 break; 567 } 568 if (nxgep->dip != dip) { 569 status = DDI_FAILURE; 570 break; 571 } 572 status = nxge_resume(nxgep); 573 goto nxge_attach_exit; 574 575 default: 576 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 577 status = DDI_FAILURE; 578 goto nxge_attach_exit; 579 } 580 581 582 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 583 status = DDI_FAILURE; 584 goto nxge_attach_exit; 585 } 586 587 nxgep = ddi_get_soft_state(nxge_list, instance); 588 if (nxgep == NULL) { 589 status = NXGE_ERROR; 590 goto nxge_attach_fail2; 591 } 592 593 nxgep->nxge_magic = NXGE_MAGIC; 594 595 nxgep->drv_state = 0; 596 nxgep->dip = dip; 597 nxgep->instance = instance; 598 nxgep->p_dip = ddi_get_parent(dip); 599 nxgep->nxge_debug_level = nxge_debug_level; 600 npi_debug_level = nxge_debug_level; 601 602 /* Are we a guest running in a Hybrid I/O environment? */ 603 nxge_get_environs(nxgep); 604 605 status = nxge_map_regs(nxgep); 606 607 if (status != NXGE_OK) { 608 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 609 goto nxge_attach_fail3; 610 } 611 612 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, 613 &nxge_dev_desc_dma_acc_attr, 614 &nxge_rx_dma_attr); 615 616 /* Create & initialize the per-Neptune data structure */ 617 /* (even if we're a guest). */ 618 status = nxge_init_common_dev(nxgep); 619 if (status != NXGE_OK) { 620 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 621 "nxge_init_common_dev failed")); 622 goto nxge_attach_fail4; 623 } 624 625 /* 626 * Software workaround: set the replay timer. 627 */ 628 if (nxgep->niu_type != N2_NIU) { 629 nxge_set_pci_replay_timeout(nxgep); 630 } 631 #if defined(sun4v) 632 if (isLDOMguest(nxgep)) { 633 nxge_m_callbacks.mc_tx = nxge_m_tx; 634 } 635 #endif 636 637 #if defined(sun4v) 638 /* This is required by nxge_hio_init(), which follows. */ 639 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 640 goto nxge_attach_fail4; 641 #endif 642 643 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 644 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 645 "nxge_hio_init failed")); 646 goto nxge_attach_fail4; 647 } 648 649 if (nxgep->niu_type == NEPTUNE_2_10GF) { 650 if (nxgep->function_num > 1) { 651 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 652 " function %d. Only functions 0 and 1 are " 653 "supported for this card.", nxgep->function_num)); 654 status = NXGE_ERROR; 655 goto nxge_attach_fail4; 656 } 657 } 658 659 if (isLDOMguest(nxgep)) { 660 /* 661 * Use the function number here. 662 */ 663 nxgep->mac.portnum = nxgep->function_num; 664 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 665 666 /* XXX We'll set the MAC address counts to 1 for now. */ 667 mmac_info = &nxgep->nxge_mmac_info; 668 mmac_info->num_mmac = 1; 669 mmac_info->naddrfree = 1; 670 } else { 671 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 672 nxgep->mac.portnum = portn; 673 if ((portn == 0) || (portn == 1)) 674 nxgep->mac.porttype = PORT_TYPE_XMAC; 675 else 676 nxgep->mac.porttype = PORT_TYPE_BMAC; 677 /* 678 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 679 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 680 * The two types of MACs have different characterizations. 681 */ 682 mmac_info = &nxgep->nxge_mmac_info; 683 if (nxgep->function_num < 2) { 684 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 685 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 686 } else { 687 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 688 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 689 } 690 } 691 /* 692 * Setup the Ndd parameters for the this instance. 693 */ 694 nxge_init_param(nxgep); 695 696 /* 697 * Setup Register Tracing Buffer. 698 */ 699 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 700 701 /* init stats ptr */ 702 nxge_init_statsp(nxgep); 703 704 /* 705 * Copy the vpd info from eeprom to a local data 706 * structure, and then check its validity. 707 */ 708 if (!isLDOMguest(nxgep)) { 709 int *regp; 710 uint_t reglen; 711 int rv; 712 713 nxge_vpd_info_get(nxgep); 714 715 /* Find the NIU config handle. */ 716 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 717 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 718 "reg", ®p, ®len); 719 720 if (rv != DDI_PROP_SUCCESS) { 721 goto nxge_attach_fail5; 722 } 723 /* 724 * The address_hi, that is the first int, in the reg 725 * property consists of config handle, but need to remove 726 * the bits 28-31 which are OBP specific info. 727 */ 728 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 729 ddi_prop_free(regp); 730 } 731 732 if (isLDOMguest(nxgep)) { 733 uchar_t *prop_val; 734 uint_t prop_len; 735 uint32_t max_frame_size; 736 737 extern void nxge_get_logical_props(p_nxge_t); 738 739 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 740 nxgep->mac.portmode = PORT_LOGICAL; 741 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 742 "phy-type", "virtual transceiver"); 743 744 nxgep->nports = 1; 745 nxgep->board_ver = 0; /* XXX What? */ 746 747 /* 748 * local-mac-address property gives us info on which 749 * specific MAC address the Hybrid resource is associated 750 * with. 751 */ 752 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 753 "local-mac-address", &prop_val, 754 &prop_len) != DDI_PROP_SUCCESS) { 755 goto nxge_attach_fail5; 756 } 757 if (prop_len != ETHERADDRL) { 758 ddi_prop_free(prop_val); 759 goto nxge_attach_fail5; 760 } 761 ether_copy(prop_val, nxgep->hio_mac_addr); 762 ddi_prop_free(prop_val); 763 nxge_get_logical_props(nxgep); 764 765 /* 766 * Enable Jumbo property based on the "max-frame-size" 767 * property value. 768 */ 769 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY, 770 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 771 "max-frame-size", NXGE_MTU_DEFAULT_MAX); 772 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) && 773 (max_frame_size <= TX_JUMBO_MTU)) { 774 param_arr = nxgep->param_arr; 775 776 param_arr[param_accept_jumbo].value = 1; 777 nxgep->mac.is_jumbo = B_TRUE; 778 nxgep->mac.maxframesize = (uint16_t)max_frame_size; 779 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 780 NXGE_EHEADER_VLAN_CRC; 781 } 782 } else { 783 status = nxge_xcvr_find(nxgep); 784 785 if (status != NXGE_OK) { 786 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 787 " Couldn't determine card type" 788 " .... exit ")); 789 goto nxge_attach_fail5; 790 } 791 792 status = nxge_get_config_properties(nxgep); 793 794 if (status != NXGE_OK) { 795 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 796 "get_hw create failed")); 797 goto nxge_attach_fail; 798 } 799 } 800 801 /* 802 * Setup the Kstats for the driver. 803 */ 804 nxge_setup_kstats(nxgep); 805 806 if (!isLDOMguest(nxgep)) 807 nxge_setup_param(nxgep); 808 809 status = nxge_setup_system_dma_pages(nxgep); 810 if (status != NXGE_OK) { 811 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 812 goto nxge_attach_fail; 813 } 814 815 nxge_hw_id_init(nxgep); 816 817 if (!isLDOMguest(nxgep)) 818 nxge_hw_init_niu_common(nxgep); 819 820 status = nxge_setup_mutexes(nxgep); 821 if (status != NXGE_OK) { 822 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 823 goto nxge_attach_fail; 824 } 825 826 #if defined(sun4v) 827 if (isLDOMguest(nxgep)) { 828 /* Find our VR & channel sets. */ 829 status = nxge_hio_vr_add(nxgep); 830 if (status != NXGE_OK) { 831 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 832 "nxge_hio_vr_add failed")); 833 (void) hsvc_unregister(&nxgep->niu_hsvc); 834 nxgep->niu_hsvc_available = B_FALSE; 835 } 836 goto nxge_attach_exit; 837 } 838 #endif 839 840 status = nxge_setup_dev(nxgep); 841 if (status != DDI_SUCCESS) { 842 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 843 goto nxge_attach_fail; 844 } 845 846 status = nxge_add_intrs(nxgep); 847 if (status != DDI_SUCCESS) { 848 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 849 goto nxge_attach_fail; 850 } 851 852 /* If a guest, register with vio_net instead. */ 853 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 854 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 855 "unable to register to mac layer (%d)", status)); 856 goto nxge_attach_fail; 857 } 858 859 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 860 861 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 862 "registered to mac (instance %d)", instance)); 863 864 /* nxge_link_monitor calls xcvr.check_link recursively */ 865 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 866 867 goto nxge_attach_exit; 868 869 nxge_attach_fail: 870 nxge_unattach(nxgep); 871 goto nxge_attach_fail1; 872 873 nxge_attach_fail5: 874 /* 875 * Tear down the ndd parameters setup. 876 */ 877 nxge_destroy_param(nxgep); 878 879 /* 880 * Tear down the kstat setup. 881 */ 882 nxge_destroy_kstats(nxgep); 883 884 nxge_attach_fail4: 885 if (nxgep->nxge_hw_p) { 886 nxge_uninit_common_dev(nxgep); 887 nxgep->nxge_hw_p = NULL; 888 } 889 890 nxge_attach_fail3: 891 /* 892 * Unmap the register setup. 893 */ 894 nxge_unmap_regs(nxgep); 895 896 nxge_fm_fini(nxgep); 897 898 nxge_attach_fail2: 899 ddi_soft_state_free(nxge_list, nxgep->instance); 900 901 nxge_attach_fail1: 902 if (status != NXGE_OK) 903 status = (NXGE_ERROR | NXGE_DDI_FAILED); 904 nxgep = NULL; 905 906 nxge_attach_exit: 907 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 908 status)); 909 910 return (status); 911 } 912 913 static int 914 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 915 { 916 int status = DDI_SUCCESS; 917 int instance; 918 p_nxge_t nxgep = NULL; 919 920 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 921 instance = ddi_get_instance(dip); 922 nxgep = ddi_get_soft_state(nxge_list, instance); 923 if (nxgep == NULL) { 924 status = DDI_FAILURE; 925 goto nxge_detach_exit; 926 } 927 928 switch (cmd) { 929 case DDI_DETACH: 930 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 931 break; 932 933 case DDI_PM_SUSPEND: 934 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 935 nxgep->suspended = DDI_PM_SUSPEND; 936 nxge_suspend(nxgep); 937 break; 938 939 case DDI_SUSPEND: 940 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 941 if (nxgep->suspended != DDI_PM_SUSPEND) { 942 nxgep->suspended = DDI_SUSPEND; 943 nxge_suspend(nxgep); 944 } 945 break; 946 947 default: 948 status = DDI_FAILURE; 949 } 950 951 if (cmd != DDI_DETACH) 952 goto nxge_detach_exit; 953 954 /* 955 * Stop the xcvr polling. 956 */ 957 nxgep->suspended = cmd; 958 959 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 960 961 if (isLDOMguest(nxgep)) { 962 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 963 nxge_m_stop((void *)nxgep); 964 nxge_hio_unregister(nxgep); 965 } else if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 966 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 967 "<== nxge_detach status = 0x%08X", status)); 968 return (DDI_FAILURE); 969 } 970 971 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 972 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 973 974 nxge_unattach(nxgep); 975 nxgep = NULL; 976 977 nxge_detach_exit: 978 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 979 status)); 980 981 return (status); 982 } 983 984 static void 985 nxge_unattach(p_nxge_t nxgep) 986 { 987 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 988 989 if (nxgep == NULL || nxgep->dev_regs == NULL) { 990 return; 991 } 992 993 nxgep->nxge_magic = 0; 994 995 if (nxgep->nxge_timerid) { 996 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 997 nxgep->nxge_timerid = 0; 998 } 999 1000 /* 1001 * If this flag is set, it will affect the Neptune 1002 * only. 1003 */ 1004 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { 1005 nxge_niu_peu_reset(nxgep); 1006 } 1007 1008 #if defined(sun4v) 1009 if (isLDOMguest(nxgep)) { 1010 (void) nxge_hio_vr_release(nxgep); 1011 } 1012 #endif 1013 1014 if (nxgep->nxge_hw_p) { 1015 nxge_uninit_common_dev(nxgep); 1016 nxgep->nxge_hw_p = NULL; 1017 } 1018 1019 #if defined(sun4v) 1020 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 1021 (void) hsvc_unregister(&nxgep->niu_hsvc); 1022 nxgep->niu_hsvc_available = B_FALSE; 1023 } 1024 #endif 1025 /* 1026 * Stop any further interrupts. 1027 */ 1028 nxge_remove_intrs(nxgep); 1029 1030 /* 1031 * Stop the device and free resources. 1032 */ 1033 if (!isLDOMguest(nxgep)) { 1034 nxge_destroy_dev(nxgep); 1035 } 1036 1037 /* 1038 * Tear down the ndd parameters setup. 1039 */ 1040 nxge_destroy_param(nxgep); 1041 1042 /* 1043 * Tear down the kstat setup. 1044 */ 1045 nxge_destroy_kstats(nxgep); 1046 1047 /* 1048 * Destroy all mutexes. 1049 */ 1050 nxge_destroy_mutexes(nxgep); 1051 1052 /* 1053 * Remove the list of ndd parameters which 1054 * were setup during attach. 1055 */ 1056 if (nxgep->dip) { 1057 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 1058 " nxge_unattach: remove all properties")); 1059 1060 (void) ddi_prop_remove_all(nxgep->dip); 1061 } 1062 1063 #if NXGE_PROPERTY 1064 nxge_remove_hard_properties(nxgep); 1065 #endif 1066 1067 /* 1068 * Unmap the register setup. 1069 */ 1070 nxge_unmap_regs(nxgep); 1071 1072 nxge_fm_fini(nxgep); 1073 1074 ddi_soft_state_free(nxge_list, nxgep->instance); 1075 1076 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1077 } 1078 1079 #if defined(sun4v) 1080 int 1081 nxge_hsvc_register(nxge_t *nxgep) 1082 { 1083 nxge_status_t status; 1084 1085 if (nxgep->niu_type == N2_NIU) { 1086 nxgep->niu_hsvc_available = B_FALSE; 1087 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 1088 if ((status = hsvc_register(&nxgep->niu_hsvc, 1089 &nxgep->niu_min_ver)) != 0) { 1090 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1091 "nxge_attach: %s: cannot negotiate " 1092 "hypervisor services revision %d group: 0x%lx " 1093 "major: 0x%lx minor: 0x%lx errno: %d", 1094 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1095 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1096 niu_hsvc.hsvc_minor, status)); 1097 return (DDI_FAILURE); 1098 } 1099 nxgep->niu_hsvc_available = B_TRUE; 1100 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1101 "NIU Hypervisor service enabled")); 1102 } 1103 1104 return (DDI_SUCCESS); 1105 } 1106 #endif 1107 1108 static char n2_siu_name[] = "niu"; 1109 1110 static nxge_status_t 1111 nxge_map_regs(p_nxge_t nxgep) 1112 { 1113 int ddi_status = DDI_SUCCESS; 1114 p_dev_regs_t dev_regs; 1115 char buf[MAXPATHLEN + 1]; 1116 char *devname; 1117 #ifdef NXGE_DEBUG 1118 char *sysname; 1119 #endif 1120 off_t regsize; 1121 nxge_status_t status = NXGE_OK; 1122 #if !defined(_BIG_ENDIAN) 1123 off_t pci_offset; 1124 uint16_t pcie_devctl; 1125 #endif 1126 1127 if (isLDOMguest(nxgep)) { 1128 return (nxge_guest_regs_map(nxgep)); 1129 } 1130 1131 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1132 nxgep->dev_regs = NULL; 1133 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1134 dev_regs->nxge_regh = NULL; 1135 dev_regs->nxge_pciregh = NULL; 1136 dev_regs->nxge_msix_regh = NULL; 1137 dev_regs->nxge_vir_regh = NULL; 1138 dev_regs->nxge_vir2_regh = NULL; 1139 nxgep->niu_type = NIU_TYPE_NONE; 1140 1141 devname = ddi_pathname(nxgep->dip, buf); 1142 ASSERT(strlen(devname) > 0); 1143 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1144 "nxge_map_regs: pathname devname %s", devname)); 1145 1146 /* 1147 * The driver is running on a N2-NIU system if devname is something 1148 * like "/niu@80/network@0" 1149 */ 1150 if (strstr(devname, n2_siu_name)) { 1151 /* N2/NIU */ 1152 nxgep->niu_type = N2_NIU; 1153 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1154 "nxge_map_regs: N2/NIU devname %s", devname)); 1155 /* get function number */ 1156 nxgep->function_num = 1157 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1158 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1159 "nxge_map_regs: N2/NIU function number %d", 1160 nxgep->function_num)); 1161 } else { 1162 int *prop_val; 1163 uint_t prop_len; 1164 uint8_t func_num; 1165 1166 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1167 0, "reg", 1168 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1169 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1170 "Reg property not found")); 1171 ddi_status = DDI_FAILURE; 1172 goto nxge_map_regs_fail0; 1173 1174 } else { 1175 func_num = (prop_val[0] >> 8) & 0x7; 1176 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1177 "Reg property found: fun # %d", 1178 func_num)); 1179 nxgep->function_num = func_num; 1180 if (isLDOMguest(nxgep)) { 1181 nxgep->function_num /= 2; 1182 return (NXGE_OK); 1183 } 1184 ddi_prop_free(prop_val); 1185 } 1186 } 1187 1188 switch (nxgep->niu_type) { 1189 default: 1190 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1191 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1192 "nxge_map_regs: pci config size 0x%x", regsize)); 1193 1194 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1195 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1196 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1197 if (ddi_status != DDI_SUCCESS) { 1198 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1199 "ddi_map_regs, nxge bus config regs failed")); 1200 goto nxge_map_regs_fail0; 1201 } 1202 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1203 "nxge_map_reg: PCI config addr 0x%0llx " 1204 " handle 0x%0llx", dev_regs->nxge_pciregp, 1205 dev_regs->nxge_pciregh)); 1206 /* 1207 * IMP IMP 1208 * workaround for bit swapping bug in HW 1209 * which ends up in no-snoop = yes 1210 * resulting, in DMA not synched properly 1211 */ 1212 #if !defined(_BIG_ENDIAN) 1213 /* workarounds for x86 systems */ 1214 pci_offset = 0x80 + PCIE_DEVCTL; 1215 pcie_devctl = 0x0; 1216 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 1217 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1218 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1219 pcie_devctl); 1220 #endif 1221 1222 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1223 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1224 "nxge_map_regs: pio size 0x%x", regsize)); 1225 /* set up the device mapped register */ 1226 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1227 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1228 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1229 if (ddi_status != DDI_SUCCESS) { 1230 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1231 "ddi_map_regs for Neptune global reg failed")); 1232 goto nxge_map_regs_fail1; 1233 } 1234 1235 /* set up the msi/msi-x mapped register */ 1236 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1237 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1238 "nxge_map_regs: msix size 0x%x", regsize)); 1239 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1240 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1241 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1242 if (ddi_status != DDI_SUCCESS) { 1243 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1244 "ddi_map_regs for msi reg failed")); 1245 goto nxge_map_regs_fail2; 1246 } 1247 1248 /* set up the vio region mapped register */ 1249 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1250 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1251 "nxge_map_regs: vio size 0x%x", regsize)); 1252 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1253 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1254 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1255 1256 if (ddi_status != DDI_SUCCESS) { 1257 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1258 "ddi_map_regs for nxge vio reg failed")); 1259 goto nxge_map_regs_fail3; 1260 } 1261 nxgep->dev_regs = dev_regs; 1262 1263 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1264 NPI_PCI_ADD_HANDLE_SET(nxgep, 1265 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1266 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1267 NPI_MSI_ADD_HANDLE_SET(nxgep, 1268 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1269 1270 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1271 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1272 1273 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1274 NPI_REG_ADD_HANDLE_SET(nxgep, 1275 (npi_reg_ptr_t)dev_regs->nxge_regp); 1276 1277 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1278 NPI_VREG_ADD_HANDLE_SET(nxgep, 1279 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1280 1281 break; 1282 1283 case N2_NIU: 1284 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1285 /* 1286 * Set up the device mapped register (FWARC 2006/556) 1287 * (changed back to 1: reg starts at 1!) 1288 */ 1289 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1290 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1291 "nxge_map_regs: dev size 0x%x", regsize)); 1292 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1293 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1294 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1295 1296 if (ddi_status != DDI_SUCCESS) { 1297 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1298 "ddi_map_regs for N2/NIU, global reg failed ")); 1299 goto nxge_map_regs_fail1; 1300 } 1301 1302 /* set up the first vio region mapped register */ 1303 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1304 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1305 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1306 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1307 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1308 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1309 1310 if (ddi_status != DDI_SUCCESS) { 1311 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1312 "ddi_map_regs for nxge vio reg failed")); 1313 goto nxge_map_regs_fail2; 1314 } 1315 /* set up the second vio region mapped register */ 1316 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1317 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1318 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1319 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1320 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1321 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1322 1323 if (ddi_status != DDI_SUCCESS) { 1324 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1325 "ddi_map_regs for nxge vio2 reg failed")); 1326 goto nxge_map_regs_fail3; 1327 } 1328 nxgep->dev_regs = dev_regs; 1329 1330 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1331 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1332 1333 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1334 NPI_REG_ADD_HANDLE_SET(nxgep, 1335 (npi_reg_ptr_t)dev_regs->nxge_regp); 1336 1337 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1338 NPI_VREG_ADD_HANDLE_SET(nxgep, 1339 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1340 1341 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1342 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1343 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1344 1345 break; 1346 } 1347 1348 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1349 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1350 1351 goto nxge_map_regs_exit; 1352 nxge_map_regs_fail3: 1353 if (dev_regs->nxge_msix_regh) { 1354 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1355 } 1356 if (dev_regs->nxge_vir_regh) { 1357 ddi_regs_map_free(&dev_regs->nxge_regh); 1358 } 1359 nxge_map_regs_fail2: 1360 if (dev_regs->nxge_regh) { 1361 ddi_regs_map_free(&dev_regs->nxge_regh); 1362 } 1363 nxge_map_regs_fail1: 1364 if (dev_regs->nxge_pciregh) { 1365 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1366 } 1367 nxge_map_regs_fail0: 1368 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1369 kmem_free(dev_regs, sizeof (dev_regs_t)); 1370 1371 nxge_map_regs_exit: 1372 if (ddi_status != DDI_SUCCESS) 1373 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1374 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1375 return (status); 1376 } 1377 1378 static void 1379 nxge_unmap_regs(p_nxge_t nxgep) 1380 { 1381 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1382 1383 if (isLDOMguest(nxgep)) { 1384 nxge_guest_regs_map_free(nxgep); 1385 return; 1386 } 1387 1388 if (nxgep->dev_regs) { 1389 if (nxgep->dev_regs->nxge_pciregh) { 1390 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1391 "==> nxge_unmap_regs: bus")); 1392 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1393 nxgep->dev_regs->nxge_pciregh = NULL; 1394 } 1395 if (nxgep->dev_regs->nxge_regh) { 1396 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1397 "==> nxge_unmap_regs: device registers")); 1398 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1399 nxgep->dev_regs->nxge_regh = NULL; 1400 } 1401 if (nxgep->dev_regs->nxge_msix_regh) { 1402 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1403 "==> nxge_unmap_regs: device interrupts")); 1404 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1405 nxgep->dev_regs->nxge_msix_regh = NULL; 1406 } 1407 if (nxgep->dev_regs->nxge_vir_regh) { 1408 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1409 "==> nxge_unmap_regs: vio region")); 1410 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1411 nxgep->dev_regs->nxge_vir_regh = NULL; 1412 } 1413 if (nxgep->dev_regs->nxge_vir2_regh) { 1414 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1415 "==> nxge_unmap_regs: vio2 region")); 1416 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1417 nxgep->dev_regs->nxge_vir2_regh = NULL; 1418 } 1419 1420 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1421 nxgep->dev_regs = NULL; 1422 } 1423 1424 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1425 } 1426 1427 static nxge_status_t 1428 nxge_setup_mutexes(p_nxge_t nxgep) 1429 { 1430 int ddi_status = DDI_SUCCESS; 1431 nxge_status_t status = NXGE_OK; 1432 nxge_classify_t *classify_ptr; 1433 int partition; 1434 1435 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1436 1437 /* 1438 * Get the interrupt cookie so the mutexes can be 1439 * Initialized. 1440 */ 1441 if (isLDOMguest(nxgep)) { 1442 nxgep->interrupt_cookie = 0; 1443 } else { 1444 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1445 &nxgep->interrupt_cookie); 1446 1447 if (ddi_status != DDI_SUCCESS) { 1448 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1449 "<== nxge_setup_mutexes: failed 0x%x", 1450 ddi_status)); 1451 goto nxge_setup_mutexes_exit; 1452 } 1453 } 1454 1455 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1456 MUTEX_INIT(&nxgep->poll_lock, NULL, 1457 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1458 1459 /* 1460 * Initialize mutexes for this device. 1461 */ 1462 MUTEX_INIT(nxgep->genlock, NULL, 1463 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1464 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1465 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1466 MUTEX_INIT(&nxgep->mif_lock, NULL, 1467 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1468 MUTEX_INIT(&nxgep->group_lock, NULL, 1469 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1470 RW_INIT(&nxgep->filter_lock, NULL, 1471 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1472 1473 classify_ptr = &nxgep->classifier; 1474 /* 1475 * FFLP Mutexes are never used in interrupt context 1476 * as fflp operation can take very long time to 1477 * complete and hence not suitable to invoke from interrupt 1478 * handlers. 1479 */ 1480 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1481 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1482 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1483 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1484 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1485 for (partition = 0; partition < MAX_PARTITION; partition++) { 1486 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1487 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1488 } 1489 } 1490 1491 nxge_setup_mutexes_exit: 1492 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1493 "<== nxge_setup_mutexes status = %x", status)); 1494 1495 if (ddi_status != DDI_SUCCESS) 1496 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1497 1498 return (status); 1499 } 1500 1501 static void 1502 nxge_destroy_mutexes(p_nxge_t nxgep) 1503 { 1504 int partition; 1505 nxge_classify_t *classify_ptr; 1506 1507 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1508 RW_DESTROY(&nxgep->filter_lock); 1509 MUTEX_DESTROY(&nxgep->group_lock); 1510 MUTEX_DESTROY(&nxgep->mif_lock); 1511 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1512 MUTEX_DESTROY(nxgep->genlock); 1513 1514 classify_ptr = &nxgep->classifier; 1515 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1516 1517 /* Destroy all polling resources. */ 1518 MUTEX_DESTROY(&nxgep->poll_lock); 1519 cv_destroy(&nxgep->poll_cv); 1520 1521 /* free data structures, based on HW type */ 1522 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1523 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1524 for (partition = 0; partition < MAX_PARTITION; partition++) { 1525 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1526 } 1527 } 1528 1529 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1530 } 1531 1532 nxge_status_t 1533 nxge_init(p_nxge_t nxgep) 1534 { 1535 nxge_status_t status = NXGE_OK; 1536 1537 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1538 1539 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1540 return (status); 1541 } 1542 1543 /* 1544 * Allocate system memory for the receive/transmit buffer blocks 1545 * and receive/transmit descriptor rings. 1546 */ 1547 status = nxge_alloc_mem_pool(nxgep); 1548 if (status != NXGE_OK) { 1549 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1550 goto nxge_init_fail1; 1551 } 1552 1553 if (!isLDOMguest(nxgep)) { 1554 /* 1555 * Initialize and enable the TXC registers. 1556 * (Globally enable the Tx controller, 1557 * enable the port, configure the dma channel bitmap, 1558 * configure the max burst size). 1559 */ 1560 status = nxge_txc_init(nxgep); 1561 if (status != NXGE_OK) { 1562 NXGE_ERROR_MSG((nxgep, 1563 NXGE_ERR_CTL, "init txc failed\n")); 1564 goto nxge_init_fail2; 1565 } 1566 } 1567 1568 /* 1569 * Initialize and enable TXDMA channels. 1570 */ 1571 status = nxge_init_txdma_channels(nxgep); 1572 if (status != NXGE_OK) { 1573 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1574 goto nxge_init_fail3; 1575 } 1576 1577 /* 1578 * Initialize and enable RXDMA channels. 1579 */ 1580 status = nxge_init_rxdma_channels(nxgep); 1581 if (status != NXGE_OK) { 1582 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1583 goto nxge_init_fail4; 1584 } 1585 1586 /* 1587 * The guest domain is now done. 1588 */ 1589 if (isLDOMguest(nxgep)) { 1590 nxgep->drv_state |= STATE_HW_INITIALIZED; 1591 goto nxge_init_exit; 1592 } 1593 1594 /* 1595 * Initialize TCAM and FCRAM (Neptune). 1596 */ 1597 status = nxge_classify_init(nxgep); 1598 if (status != NXGE_OK) { 1599 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1600 goto nxge_init_fail5; 1601 } 1602 1603 /* 1604 * Initialize ZCP 1605 */ 1606 status = nxge_zcp_init(nxgep); 1607 if (status != NXGE_OK) { 1608 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1609 goto nxge_init_fail5; 1610 } 1611 1612 /* 1613 * Initialize IPP. 1614 */ 1615 status = nxge_ipp_init(nxgep); 1616 if (status != NXGE_OK) { 1617 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1618 goto nxge_init_fail5; 1619 } 1620 1621 /* 1622 * Initialize the MAC block. 1623 */ 1624 status = nxge_mac_init(nxgep); 1625 if (status != NXGE_OK) { 1626 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1627 goto nxge_init_fail5; 1628 } 1629 1630 nxge_intrs_enable(nxgep); /* XXX What changes do I need to make here? */ 1631 1632 /* 1633 * Enable hardware interrupts. 1634 */ 1635 nxge_intr_hw_enable(nxgep); 1636 nxgep->drv_state |= STATE_HW_INITIALIZED; 1637 1638 goto nxge_init_exit; 1639 1640 nxge_init_fail5: 1641 nxge_uninit_rxdma_channels(nxgep); 1642 nxge_init_fail4: 1643 nxge_uninit_txdma_channels(nxgep); 1644 nxge_init_fail3: 1645 if (!isLDOMguest(nxgep)) { 1646 (void) nxge_txc_uninit(nxgep); 1647 } 1648 nxge_init_fail2: 1649 nxge_free_mem_pool(nxgep); 1650 nxge_init_fail1: 1651 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1652 "<== nxge_init status (failed) = 0x%08x", status)); 1653 return (status); 1654 1655 nxge_init_exit: 1656 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1657 status)); 1658 return (status); 1659 } 1660 1661 1662 timeout_id_t 1663 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1664 { 1665 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1666 return (timeout(func, (caddr_t)nxgep, 1667 drv_usectohz(1000 * msec))); 1668 } 1669 return (NULL); 1670 } 1671 1672 /*ARGSUSED*/ 1673 void 1674 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1675 { 1676 if (timerid) { 1677 (void) untimeout(timerid); 1678 } 1679 } 1680 1681 void 1682 nxge_uninit(p_nxge_t nxgep) 1683 { 1684 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1685 1686 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1687 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1688 "==> nxge_uninit: not initialized")); 1689 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1690 "<== nxge_uninit")); 1691 return; 1692 } 1693 1694 /* stop timer */ 1695 if (nxgep->nxge_timerid) { 1696 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1697 nxgep->nxge_timerid = 0; 1698 } 1699 1700 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1701 (void) nxge_intr_hw_disable(nxgep); 1702 1703 /* 1704 * Reset the receive MAC side. 1705 */ 1706 (void) nxge_rx_mac_disable(nxgep); 1707 1708 /* Disable and soft reset the IPP */ 1709 if (!isLDOMguest(nxgep)) 1710 (void) nxge_ipp_disable(nxgep); 1711 1712 /* Free classification resources */ 1713 (void) nxge_classify_uninit(nxgep); 1714 1715 /* 1716 * Reset the transmit/receive DMA side. 1717 */ 1718 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1719 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1720 1721 nxge_uninit_txdma_channels(nxgep); 1722 nxge_uninit_rxdma_channels(nxgep); 1723 1724 /* 1725 * Reset the transmit MAC side. 1726 */ 1727 (void) nxge_tx_mac_disable(nxgep); 1728 1729 nxge_free_mem_pool(nxgep); 1730 1731 /* 1732 * Start the timer if the reset flag is not set. 1733 * If this reset flag is set, the link monitor 1734 * will not be started in order to stop furthur bus 1735 * activities coming from this interface. 1736 * The driver will start the monitor function 1737 * if the interface was initialized again later. 1738 */ 1739 if (!nxge_peu_reset_enable) { 1740 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1741 } 1742 1743 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1744 1745 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1746 "nxge_mblks_pending %d", nxge_mblks_pending)); 1747 } 1748 1749 void 1750 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1751 { 1752 uint64_t reg; 1753 uint64_t regdata; 1754 int i, retry; 1755 1756 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1757 regdata = 0; 1758 retry = 1; 1759 1760 for (i = 0; i < retry; i++) { 1761 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1762 } 1763 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1764 } 1765 1766 void 1767 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1768 { 1769 uint64_t reg; 1770 uint64_t buf[2]; 1771 1772 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1773 reg = buf[0]; 1774 1775 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1776 } 1777 1778 1779 nxge_os_mutex_t nxgedebuglock; 1780 int nxge_debug_init = 0; 1781 1782 /*ARGSUSED*/ 1783 /*VARARGS*/ 1784 void 1785 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1786 { 1787 char msg_buffer[1048]; 1788 char prefix_buffer[32]; 1789 int instance; 1790 uint64_t debug_level; 1791 int cmn_level = CE_CONT; 1792 va_list ap; 1793 1794 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1795 /* In case a developer has changed nxge_debug_level. */ 1796 if (nxgep->nxge_debug_level != nxge_debug_level) 1797 nxgep->nxge_debug_level = nxge_debug_level; 1798 } 1799 1800 debug_level = (nxgep == NULL) ? nxge_debug_level : 1801 nxgep->nxge_debug_level; 1802 1803 if ((level & debug_level) || 1804 (level == NXGE_NOTE) || 1805 (level == NXGE_ERR_CTL)) { 1806 /* do the msg processing */ 1807 if (nxge_debug_init == 0) { 1808 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1809 nxge_debug_init = 1; 1810 } 1811 1812 MUTEX_ENTER(&nxgedebuglock); 1813 1814 if ((level & NXGE_NOTE)) { 1815 cmn_level = CE_NOTE; 1816 } 1817 1818 if (level & NXGE_ERR_CTL) { 1819 cmn_level = CE_WARN; 1820 } 1821 1822 va_start(ap, fmt); 1823 (void) vsprintf(msg_buffer, fmt, ap); 1824 va_end(ap); 1825 if (nxgep == NULL) { 1826 instance = -1; 1827 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1828 } else { 1829 instance = nxgep->instance; 1830 (void) sprintf(prefix_buffer, 1831 "%s%d :", "nxge", instance); 1832 } 1833 1834 MUTEX_EXIT(&nxgedebuglock); 1835 cmn_err(cmn_level, "!%s %s\n", 1836 prefix_buffer, msg_buffer); 1837 1838 } 1839 } 1840 1841 char * 1842 nxge_dump_packet(char *addr, int size) 1843 { 1844 uchar_t *ap = (uchar_t *)addr; 1845 int i; 1846 static char etherbuf[1024]; 1847 char *cp = etherbuf; 1848 char digits[] = "0123456789abcdef"; 1849 1850 if (!size) 1851 size = 60; 1852 1853 if (size > MAX_DUMP_SZ) { 1854 /* Dump the leading bytes */ 1855 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1856 if (*ap > 0x0f) 1857 *cp++ = digits[*ap >> 4]; 1858 *cp++ = digits[*ap++ & 0xf]; 1859 *cp++ = ':'; 1860 } 1861 for (i = 0; i < 20; i++) 1862 *cp++ = '.'; 1863 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1864 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1865 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1866 if (*ap > 0x0f) 1867 *cp++ = digits[*ap >> 4]; 1868 *cp++ = digits[*ap++ & 0xf]; 1869 *cp++ = ':'; 1870 } 1871 } else { 1872 for (i = 0; i < size; i++) { 1873 if (*ap > 0x0f) 1874 *cp++ = digits[*ap >> 4]; 1875 *cp++ = digits[*ap++ & 0xf]; 1876 *cp++ = ':'; 1877 } 1878 } 1879 *--cp = 0; 1880 return (etherbuf); 1881 } 1882 1883 #ifdef NXGE_DEBUG 1884 static void 1885 nxge_test_map_regs(p_nxge_t nxgep) 1886 { 1887 ddi_acc_handle_t cfg_handle; 1888 p_pci_cfg_t cfg_ptr; 1889 ddi_acc_handle_t dev_handle; 1890 char *dev_ptr; 1891 ddi_acc_handle_t pci_config_handle; 1892 uint32_t regval; 1893 int i; 1894 1895 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1896 1897 dev_handle = nxgep->dev_regs->nxge_regh; 1898 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1899 1900 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1901 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1902 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1903 1904 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1905 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1906 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1907 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1908 &cfg_ptr->vendorid)); 1909 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1910 "\tvendorid 0x%x devid 0x%x", 1911 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1912 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1913 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1914 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1915 "bar1c 0x%x", 1916 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1917 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1918 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1919 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1920 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1921 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1922 "base 28 0x%x bar2c 0x%x\n", 1923 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1924 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1925 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1926 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1927 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1928 "\nNeptune PCI BAR: base30 0x%x\n", 1929 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1930 1931 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1932 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1933 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1934 "first 0x%llx second 0x%llx third 0x%llx " 1935 "last 0x%llx ", 1936 NXGE_PIO_READ64(dev_handle, 1937 (uint64_t *)(dev_ptr + 0), 0), 1938 NXGE_PIO_READ64(dev_handle, 1939 (uint64_t *)(dev_ptr + 8), 0), 1940 NXGE_PIO_READ64(dev_handle, 1941 (uint64_t *)(dev_ptr + 16), 0), 1942 NXGE_PIO_READ64(cfg_handle, 1943 (uint64_t *)(dev_ptr + 24), 0))); 1944 } 1945 } 1946 1947 #endif 1948 1949 static void 1950 nxge_suspend(p_nxge_t nxgep) 1951 { 1952 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1953 1954 nxge_intrs_disable(nxgep); 1955 nxge_destroy_dev(nxgep); 1956 1957 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1958 } 1959 1960 static nxge_status_t 1961 nxge_resume(p_nxge_t nxgep) 1962 { 1963 nxge_status_t status = NXGE_OK; 1964 1965 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1966 1967 nxgep->suspended = DDI_RESUME; 1968 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1969 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1970 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1971 (void) nxge_rx_mac_enable(nxgep); 1972 (void) nxge_tx_mac_enable(nxgep); 1973 nxge_intrs_enable(nxgep); 1974 nxgep->suspended = 0; 1975 1976 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1977 "<== nxge_resume status = 0x%x", status)); 1978 return (status); 1979 } 1980 1981 static nxge_status_t 1982 nxge_setup_dev(p_nxge_t nxgep) 1983 { 1984 nxge_status_t status = NXGE_OK; 1985 1986 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1987 nxgep->mac.portnum)); 1988 1989 status = nxge_link_init(nxgep); 1990 1991 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1992 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1993 "port%d Bad register acc handle", nxgep->mac.portnum)); 1994 status = NXGE_ERROR; 1995 } 1996 1997 if (status != NXGE_OK) { 1998 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1999 " nxge_setup_dev status " 2000 "(xcvr init 0x%08x)", status)); 2001 goto nxge_setup_dev_exit; 2002 } 2003 2004 nxge_setup_dev_exit: 2005 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2006 "<== nxge_setup_dev port %d status = 0x%08x", 2007 nxgep->mac.portnum, status)); 2008 2009 return (status); 2010 } 2011 2012 static void 2013 nxge_destroy_dev(p_nxge_t nxgep) 2014 { 2015 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 2016 2017 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 2018 2019 (void) nxge_hw_stop(nxgep); 2020 2021 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 2022 } 2023 2024 static nxge_status_t 2025 nxge_setup_system_dma_pages(p_nxge_t nxgep) 2026 { 2027 int ddi_status = DDI_SUCCESS; 2028 uint_t count; 2029 ddi_dma_cookie_t cookie; 2030 uint_t iommu_pagesize; 2031 nxge_status_t status = NXGE_OK; 2032 2033 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 2034 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 2035 if (nxgep->niu_type != N2_NIU) { 2036 iommu_pagesize = dvma_pagesize(nxgep->dip); 2037 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2038 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2039 " default_block_size %d iommu_pagesize %d", 2040 nxgep->sys_page_sz, 2041 ddi_ptob(nxgep->dip, (ulong_t)1), 2042 nxgep->rx_default_block_size, 2043 iommu_pagesize)); 2044 2045 if (iommu_pagesize != 0) { 2046 if (nxgep->sys_page_sz == iommu_pagesize) { 2047 if (iommu_pagesize > 0x4000) 2048 nxgep->sys_page_sz = 0x4000; 2049 } else { 2050 if (nxgep->sys_page_sz > iommu_pagesize) 2051 nxgep->sys_page_sz = iommu_pagesize; 2052 } 2053 } 2054 } 2055 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2056 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2057 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2058 "default_block_size %d page mask %d", 2059 nxgep->sys_page_sz, 2060 ddi_ptob(nxgep->dip, (ulong_t)1), 2061 nxgep->rx_default_block_size, 2062 nxgep->sys_page_mask)); 2063 2064 2065 switch (nxgep->sys_page_sz) { 2066 default: 2067 nxgep->sys_page_sz = 0x1000; 2068 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2069 nxgep->rx_default_block_size = 0x1000; 2070 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2071 break; 2072 case 0x1000: 2073 nxgep->rx_default_block_size = 0x1000; 2074 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2075 break; 2076 case 0x2000: 2077 nxgep->rx_default_block_size = 0x2000; 2078 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2079 break; 2080 case 0x4000: 2081 nxgep->rx_default_block_size = 0x4000; 2082 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2083 break; 2084 case 0x8000: 2085 nxgep->rx_default_block_size = 0x8000; 2086 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2087 break; 2088 } 2089 2090 #ifndef USE_RX_BIG_BUF 2091 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2092 #else 2093 nxgep->rx_default_block_size = 0x2000; 2094 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2095 #endif 2096 /* 2097 * Get the system DMA burst size. 2098 */ 2099 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2100 DDI_DMA_DONTWAIT, 0, 2101 &nxgep->dmasparehandle); 2102 if (ddi_status != DDI_SUCCESS) { 2103 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2104 "ddi_dma_alloc_handle: failed " 2105 " status 0x%x", ddi_status)); 2106 goto nxge_get_soft_properties_exit; 2107 } 2108 2109 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2110 (caddr_t)nxgep->dmasparehandle, 2111 sizeof (nxgep->dmasparehandle), 2112 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2113 DDI_DMA_DONTWAIT, 0, 2114 &cookie, &count); 2115 if (ddi_status != DDI_DMA_MAPPED) { 2116 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2117 "Binding spare handle to find system" 2118 " burstsize failed.")); 2119 ddi_status = DDI_FAILURE; 2120 goto nxge_get_soft_properties_fail1; 2121 } 2122 2123 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2124 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2125 2126 nxge_get_soft_properties_fail1: 2127 ddi_dma_free_handle(&nxgep->dmasparehandle); 2128 2129 nxge_get_soft_properties_exit: 2130 2131 if (ddi_status != DDI_SUCCESS) 2132 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2133 2134 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2135 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2136 return (status); 2137 } 2138 2139 static nxge_status_t 2140 nxge_alloc_mem_pool(p_nxge_t nxgep) 2141 { 2142 nxge_status_t status = NXGE_OK; 2143 2144 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2145 2146 status = nxge_alloc_rx_mem_pool(nxgep); 2147 if (status != NXGE_OK) { 2148 return (NXGE_ERROR); 2149 } 2150 2151 status = nxge_alloc_tx_mem_pool(nxgep); 2152 if (status != NXGE_OK) { 2153 nxge_free_rx_mem_pool(nxgep); 2154 return (NXGE_ERROR); 2155 } 2156 2157 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2158 return (NXGE_OK); 2159 } 2160 2161 static void 2162 nxge_free_mem_pool(p_nxge_t nxgep) 2163 { 2164 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2165 2166 nxge_free_rx_mem_pool(nxgep); 2167 nxge_free_tx_mem_pool(nxgep); 2168 2169 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2170 } 2171 2172 nxge_status_t 2173 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2174 { 2175 uint32_t rdc_max; 2176 p_nxge_dma_pt_cfg_t p_all_cfgp; 2177 p_nxge_hw_pt_cfg_t p_cfgp; 2178 p_nxge_dma_pool_t dma_poolp; 2179 p_nxge_dma_common_t *dma_buf_p; 2180 p_nxge_dma_pool_t dma_cntl_poolp; 2181 p_nxge_dma_common_t *dma_cntl_p; 2182 uint32_t *num_chunks; /* per dma */ 2183 nxge_status_t status = NXGE_OK; 2184 2185 uint32_t nxge_port_rbr_size; 2186 uint32_t nxge_port_rbr_spare_size; 2187 uint32_t nxge_port_rcr_size; 2188 uint32_t rx_cntl_alloc_size; 2189 2190 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2191 2192 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2193 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2194 rdc_max = NXGE_MAX_RDCS; 2195 2196 /* 2197 * Allocate memory for the common DMA data structures. 2198 */ 2199 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2200 KM_SLEEP); 2201 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2202 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2203 2204 dma_cntl_poolp = (p_nxge_dma_pool_t) 2205 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2206 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2207 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2208 2209 num_chunks = (uint32_t *)KMEM_ZALLOC( 2210 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2211 2212 /* 2213 * Assume that each DMA channel will be configured with 2214 * the default block size. 2215 * rbr block counts are modulo the batch count (16). 2216 */ 2217 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2218 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2219 2220 if (!nxge_port_rbr_size) { 2221 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2222 } 2223 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2224 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2225 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2226 } 2227 2228 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2229 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2230 2231 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2232 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2233 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2234 } 2235 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2236 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2237 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2238 "set to default %d", 2239 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2240 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2241 } 2242 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2243 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2244 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2245 "set to default %d", 2246 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2247 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2248 } 2249 2250 /* 2251 * N2/NIU has limitation on the descriptor sizes (contiguous 2252 * memory allocation on data buffers to 4M (contig_mem_alloc) 2253 * and little endian for control buffers (must use the ddi/dki mem alloc 2254 * function). 2255 */ 2256 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2257 if (nxgep->niu_type == N2_NIU) { 2258 nxge_port_rbr_spare_size = 0; 2259 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2260 (!ISP2(nxge_port_rbr_size))) { 2261 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2262 } 2263 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2264 (!ISP2(nxge_port_rcr_size))) { 2265 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2266 } 2267 } 2268 #endif 2269 2270 /* 2271 * Addresses of receive block ring, receive completion ring and the 2272 * mailbox must be all cache-aligned (64 bytes). 2273 */ 2274 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2275 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2276 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2277 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2278 2279 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2280 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2281 "nxge_port_rcr_size = %d " 2282 "rx_cntl_alloc_size = %d", 2283 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2284 nxge_port_rcr_size, 2285 rx_cntl_alloc_size)); 2286 2287 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2288 if (nxgep->niu_type == N2_NIU) { 2289 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2290 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2291 2292 if (!ISP2(rx_buf_alloc_size)) { 2293 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2294 "==> nxge_alloc_rx_mem_pool: " 2295 " must be power of 2")); 2296 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2297 goto nxge_alloc_rx_mem_pool_exit; 2298 } 2299 2300 if (rx_buf_alloc_size > (1 << 22)) { 2301 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2302 "==> nxge_alloc_rx_mem_pool: " 2303 " limit size to 4M")); 2304 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2305 goto nxge_alloc_rx_mem_pool_exit; 2306 } 2307 2308 if (rx_cntl_alloc_size < 0x2000) { 2309 rx_cntl_alloc_size = 0x2000; 2310 } 2311 } 2312 #endif 2313 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2314 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2315 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2316 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2317 2318 dma_poolp->ndmas = p_cfgp->max_rdcs; 2319 dma_poolp->num_chunks = num_chunks; 2320 dma_poolp->buf_allocated = B_TRUE; 2321 nxgep->rx_buf_pool_p = dma_poolp; 2322 dma_poolp->dma_buf_pool_p = dma_buf_p; 2323 2324 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2325 dma_cntl_poolp->buf_allocated = B_TRUE; 2326 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2327 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2328 2329 /* Allocate the receive rings, too. */ 2330 nxgep->rx_rbr_rings = 2331 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2332 nxgep->rx_rbr_rings->rbr_rings = 2333 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2334 nxgep->rx_rcr_rings = 2335 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2336 nxgep->rx_rcr_rings->rcr_rings = 2337 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2338 nxgep->rx_mbox_areas_p = 2339 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2340 nxgep->rx_mbox_areas_p->rxmbox_areas = 2341 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2342 2343 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2344 p_cfgp->max_rdcs; 2345 2346 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2347 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2348 2349 nxge_alloc_rx_mem_pool_exit: 2350 return (status); 2351 } 2352 2353 /* 2354 * nxge_alloc_rxb 2355 * 2356 * Allocate buffers for an RDC. 2357 * 2358 * Arguments: 2359 * nxgep 2360 * channel The channel to map into our kernel space. 2361 * 2362 * Notes: 2363 * 2364 * NPI function calls: 2365 * 2366 * NXGE function calls: 2367 * 2368 * Registers accessed: 2369 * 2370 * Context: 2371 * 2372 * Taking apart: 2373 * 2374 * Open questions: 2375 * 2376 */ 2377 nxge_status_t 2378 nxge_alloc_rxb( 2379 p_nxge_t nxgep, 2380 int channel) 2381 { 2382 size_t rx_buf_alloc_size; 2383 nxge_status_t status = NXGE_OK; 2384 2385 nxge_dma_common_t **data; 2386 nxge_dma_common_t **control; 2387 uint32_t *num_chunks; 2388 2389 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2390 2391 /* 2392 * Allocate memory for the receive buffers and descriptor rings. 2393 * Replace these allocation functions with the interface functions 2394 * provided by the partition manager if/when they are available. 2395 */ 2396 2397 /* 2398 * Allocate memory for the receive buffer blocks. 2399 */ 2400 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2401 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2402 2403 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2404 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2405 2406 if ((status = nxge_alloc_rx_buf_dma( 2407 nxgep, channel, data, rx_buf_alloc_size, 2408 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2409 return (status); 2410 } 2411 2412 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2413 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2414 2415 /* 2416 * Allocate memory for descriptor rings and mailbox. 2417 */ 2418 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2419 2420 if ((status = nxge_alloc_rx_cntl_dma( 2421 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2422 != NXGE_OK) { 2423 nxge_free_rx_cntl_dma(nxgep, *control); 2424 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2425 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2426 return (status); 2427 } 2428 2429 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2430 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2431 2432 return (status); 2433 } 2434 2435 void 2436 nxge_free_rxb( 2437 p_nxge_t nxgep, 2438 int channel) 2439 { 2440 nxge_dma_common_t *data; 2441 nxge_dma_common_t *control; 2442 uint32_t num_chunks; 2443 2444 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2445 2446 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2447 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2448 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2449 2450 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2451 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2452 2453 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2454 nxge_free_rx_cntl_dma(nxgep, control); 2455 2456 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2457 2458 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2459 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2460 2461 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2462 } 2463 2464 static void 2465 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2466 { 2467 int rdc_max = NXGE_MAX_RDCS; 2468 2469 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2470 2471 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2472 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2473 "<== nxge_free_rx_mem_pool " 2474 "(null rx buf pool or buf not allocated")); 2475 return; 2476 } 2477 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2478 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2479 "<== nxge_free_rx_mem_pool " 2480 "(null rx cntl buf pool or cntl buf not allocated")); 2481 return; 2482 } 2483 2484 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2485 sizeof (p_nxge_dma_common_t) * rdc_max); 2486 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2487 2488 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2489 sizeof (uint32_t) * rdc_max); 2490 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2491 sizeof (p_nxge_dma_common_t) * rdc_max); 2492 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2493 2494 nxgep->rx_buf_pool_p = 0; 2495 nxgep->rx_cntl_pool_p = 0; 2496 2497 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2498 sizeof (p_rx_rbr_ring_t) * rdc_max); 2499 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2500 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2501 sizeof (p_rx_rcr_ring_t) * rdc_max); 2502 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2503 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2504 sizeof (p_rx_mbox_t) * rdc_max); 2505 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2506 2507 nxgep->rx_rbr_rings = 0; 2508 nxgep->rx_rcr_rings = 0; 2509 nxgep->rx_mbox_areas_p = 0; 2510 2511 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2512 } 2513 2514 2515 static nxge_status_t 2516 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2517 p_nxge_dma_common_t *dmap, 2518 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2519 { 2520 p_nxge_dma_common_t rx_dmap; 2521 nxge_status_t status = NXGE_OK; 2522 size_t total_alloc_size; 2523 size_t allocated = 0; 2524 int i, size_index, array_size; 2525 boolean_t use_kmem_alloc = B_FALSE; 2526 2527 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2528 2529 rx_dmap = (p_nxge_dma_common_t) 2530 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2531 KM_SLEEP); 2532 2533 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2534 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2535 dma_channel, alloc_size, block_size, dmap)); 2536 2537 total_alloc_size = alloc_size; 2538 2539 #if defined(RX_USE_RECLAIM_POST) 2540 total_alloc_size = alloc_size + alloc_size/4; 2541 #endif 2542 2543 i = 0; 2544 size_index = 0; 2545 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2546 while ((size_index < array_size) && 2547 (alloc_sizes[size_index] < alloc_size)) 2548 size_index++; 2549 if (size_index >= array_size) { 2550 size_index = array_size - 1; 2551 } 2552 2553 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2554 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2555 use_kmem_alloc = B_TRUE; 2556 #if defined(__i386) || defined(__amd64) 2557 size_index = 0; 2558 #endif 2559 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2560 "==> nxge_alloc_rx_buf_dma: " 2561 "Neptune use kmem_alloc() - size_index %d", 2562 size_index)); 2563 } 2564 2565 while ((allocated < total_alloc_size) && 2566 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2567 rx_dmap[i].dma_chunk_index = i; 2568 rx_dmap[i].block_size = block_size; 2569 rx_dmap[i].alength = alloc_sizes[size_index]; 2570 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2571 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2572 rx_dmap[i].dma_channel = dma_channel; 2573 rx_dmap[i].contig_alloc_type = B_FALSE; 2574 rx_dmap[i].kmem_alloc_type = B_FALSE; 2575 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2576 2577 /* 2578 * N2/NIU: data buffers must be contiguous as the driver 2579 * needs to call Hypervisor api to set up 2580 * logical pages. 2581 */ 2582 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2583 rx_dmap[i].contig_alloc_type = B_TRUE; 2584 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2585 } else if (use_kmem_alloc) { 2586 /* For Neptune, use kmem_alloc */ 2587 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2588 "==> nxge_alloc_rx_buf_dma: " 2589 "Neptune use kmem_alloc()")); 2590 rx_dmap[i].kmem_alloc_type = B_TRUE; 2591 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2592 } 2593 2594 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2595 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2596 "i %d nblocks %d alength %d", 2597 dma_channel, i, &rx_dmap[i], block_size, 2598 i, rx_dmap[i].nblocks, 2599 rx_dmap[i].alength)); 2600 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2601 &nxge_rx_dma_attr, 2602 rx_dmap[i].alength, 2603 &nxge_dev_buf_dma_acc_attr, 2604 DDI_DMA_READ | DDI_DMA_STREAMING, 2605 (p_nxge_dma_common_t)(&rx_dmap[i])); 2606 if (status != NXGE_OK) { 2607 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2608 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2609 "dma %d size_index %d size requested %d", 2610 dma_channel, 2611 size_index, 2612 rx_dmap[i].alength)); 2613 size_index--; 2614 } else { 2615 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2616 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2617 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2618 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2619 "buf_alloc_state %d alloc_type %d", 2620 dma_channel, 2621 &rx_dmap[i], 2622 rx_dmap[i].kaddrp, 2623 rx_dmap[i].alength, 2624 rx_dmap[i].buf_alloc_state, 2625 rx_dmap[i].buf_alloc_type)); 2626 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2627 " alloc_rx_buf_dma allocated rdc %d " 2628 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2629 dma_channel, i, rx_dmap[i].alength, 2630 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2631 rx_dmap[i].kaddrp)); 2632 i++; 2633 allocated += alloc_sizes[size_index]; 2634 } 2635 } 2636 2637 if (allocated < total_alloc_size) { 2638 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2639 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2640 "allocated 0x%x requested 0x%x", 2641 dma_channel, 2642 allocated, total_alloc_size)); 2643 status = NXGE_ERROR; 2644 goto nxge_alloc_rx_mem_fail1; 2645 } 2646 2647 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2648 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2649 "allocated 0x%x requested 0x%x", 2650 dma_channel, 2651 allocated, total_alloc_size)); 2652 2653 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2654 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2655 dma_channel, i)); 2656 *num_chunks = i; 2657 *dmap = rx_dmap; 2658 2659 goto nxge_alloc_rx_mem_exit; 2660 2661 nxge_alloc_rx_mem_fail1: 2662 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2663 2664 nxge_alloc_rx_mem_exit: 2665 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2666 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2667 2668 return (status); 2669 } 2670 2671 /*ARGSUSED*/ 2672 static void 2673 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2674 uint32_t num_chunks) 2675 { 2676 int i; 2677 2678 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2679 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2680 2681 if (dmap == 0) 2682 return; 2683 2684 for (i = 0; i < num_chunks; i++) { 2685 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2686 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2687 i, dmap)); 2688 nxge_dma_free_rx_data_buf(dmap++); 2689 } 2690 2691 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2692 } 2693 2694 /*ARGSUSED*/ 2695 static nxge_status_t 2696 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2697 p_nxge_dma_common_t *dmap, size_t size) 2698 { 2699 p_nxge_dma_common_t rx_dmap; 2700 nxge_status_t status = NXGE_OK; 2701 2702 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2703 2704 rx_dmap = (p_nxge_dma_common_t) 2705 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2706 2707 rx_dmap->contig_alloc_type = B_FALSE; 2708 rx_dmap->kmem_alloc_type = B_FALSE; 2709 2710 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2711 &nxge_desc_dma_attr, 2712 size, 2713 &nxge_dev_desc_dma_acc_attr, 2714 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2715 rx_dmap); 2716 if (status != NXGE_OK) { 2717 goto nxge_alloc_rx_cntl_dma_fail1; 2718 } 2719 2720 *dmap = rx_dmap; 2721 goto nxge_alloc_rx_cntl_dma_exit; 2722 2723 nxge_alloc_rx_cntl_dma_fail1: 2724 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2725 2726 nxge_alloc_rx_cntl_dma_exit: 2727 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2728 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2729 2730 return (status); 2731 } 2732 2733 /*ARGSUSED*/ 2734 static void 2735 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2736 { 2737 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2738 2739 if (dmap == 0) 2740 return; 2741 2742 nxge_dma_mem_free(dmap); 2743 2744 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2745 } 2746 2747 typedef struct { 2748 size_t tx_size; 2749 size_t cr_size; 2750 size_t threshhold; 2751 } nxge_tdc_sizes_t; 2752 2753 static 2754 nxge_status_t 2755 nxge_tdc_sizes( 2756 nxge_t *nxgep, 2757 nxge_tdc_sizes_t *sizes) 2758 { 2759 uint32_t threshhold; /* The bcopy() threshhold */ 2760 size_t tx_size; /* Transmit buffer size */ 2761 size_t cr_size; /* Completion ring size */ 2762 2763 /* 2764 * Assume that each DMA channel will be configured with the 2765 * default transmit buffer size for copying transmit data. 2766 * (If a packet is bigger than this, it will not be copied.) 2767 */ 2768 if (nxgep->niu_type == N2_NIU) { 2769 threshhold = TX_BCOPY_SIZE; 2770 } else { 2771 threshhold = nxge_bcopy_thresh; 2772 } 2773 tx_size = nxge_tx_ring_size * threshhold; 2774 2775 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2776 cr_size += sizeof (txdma_mailbox_t); 2777 2778 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2779 if (nxgep->niu_type == N2_NIU) { 2780 if (!ISP2(tx_size)) { 2781 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2782 "==> nxge_tdc_sizes: Tx size" 2783 " must be power of 2")); 2784 return (NXGE_ERROR); 2785 } 2786 2787 if (tx_size > (1 << 22)) { 2788 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2789 "==> nxge_tdc_sizes: Tx size" 2790 " limited to 4M")); 2791 return (NXGE_ERROR); 2792 } 2793 2794 if (cr_size < 0x2000) 2795 cr_size = 0x2000; 2796 } 2797 #endif 2798 2799 sizes->threshhold = threshhold; 2800 sizes->tx_size = tx_size; 2801 sizes->cr_size = cr_size; 2802 2803 return (NXGE_OK); 2804 } 2805 /* 2806 * nxge_alloc_txb 2807 * 2808 * Allocate buffers for an TDC. 2809 * 2810 * Arguments: 2811 * nxgep 2812 * channel The channel to map into our kernel space. 2813 * 2814 * Notes: 2815 * 2816 * NPI function calls: 2817 * 2818 * NXGE function calls: 2819 * 2820 * Registers accessed: 2821 * 2822 * Context: 2823 * 2824 * Taking apart: 2825 * 2826 * Open questions: 2827 * 2828 */ 2829 nxge_status_t 2830 nxge_alloc_txb( 2831 p_nxge_t nxgep, 2832 int channel) 2833 { 2834 nxge_dma_common_t **dma_buf_p; 2835 nxge_dma_common_t **dma_cntl_p; 2836 uint32_t *num_chunks; 2837 nxge_status_t status = NXGE_OK; 2838 2839 nxge_tdc_sizes_t sizes; 2840 2841 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2842 2843 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2844 return (NXGE_ERROR); 2845 2846 /* 2847 * Allocate memory for transmit buffers and descriptor rings. 2848 * Replace these allocation functions with the interface functions 2849 * provided by the partition manager Real Soon Now. 2850 */ 2851 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2852 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2853 2854 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2855 2856 /* 2857 * Allocate memory for transmit buffers and descriptor rings. 2858 * Replace allocation functions with interface functions provided 2859 * by the partition manager when it is available. 2860 * 2861 * Allocate memory for the transmit buffer pool. 2862 */ 2863 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2864 "sizes: tx: %ld, cr:%ld, th:%ld", 2865 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2866 2867 *num_chunks = 0; 2868 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2869 sizes.tx_size, sizes.threshhold, num_chunks); 2870 if (status != NXGE_OK) { 2871 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2872 return (status); 2873 } 2874 2875 /* 2876 * Allocate memory for descriptor rings and mailbox. 2877 */ 2878 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2879 sizes.cr_size); 2880 if (status != NXGE_OK) { 2881 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2882 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2883 return (status); 2884 } 2885 2886 return (NXGE_OK); 2887 } 2888 2889 void 2890 nxge_free_txb( 2891 p_nxge_t nxgep, 2892 int channel) 2893 { 2894 nxge_dma_common_t *data; 2895 nxge_dma_common_t *control; 2896 uint32_t num_chunks; 2897 2898 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2899 2900 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2901 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2902 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2903 2904 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2905 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2906 2907 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2908 nxge_free_tx_cntl_dma(nxgep, control); 2909 2910 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2911 2912 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2913 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2914 2915 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2916 } 2917 2918 /* 2919 * nxge_alloc_tx_mem_pool 2920 * 2921 * This function allocates all of the per-port TDC control data structures. 2922 * The per-channel (TDC) data structures are allocated when needed. 2923 * 2924 * Arguments: 2925 * nxgep 2926 * 2927 * Notes: 2928 * 2929 * Context: 2930 * Any domain 2931 */ 2932 nxge_status_t 2933 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2934 { 2935 nxge_hw_pt_cfg_t *p_cfgp; 2936 nxge_dma_pool_t *dma_poolp; 2937 nxge_dma_common_t **dma_buf_p; 2938 nxge_dma_pool_t *dma_cntl_poolp; 2939 nxge_dma_common_t **dma_cntl_p; 2940 uint32_t *num_chunks; /* per dma */ 2941 int tdc_max; 2942 2943 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2944 2945 p_cfgp = &nxgep->pt_config.hw_config; 2946 tdc_max = NXGE_MAX_TDCS; 2947 2948 /* 2949 * Allocate memory for each transmit DMA channel. 2950 */ 2951 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2952 KM_SLEEP); 2953 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2954 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2955 2956 dma_cntl_poolp = (p_nxge_dma_pool_t) 2957 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2958 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2959 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2960 2961 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2962 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2963 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2964 "set to default %d", 2965 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2966 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2967 } 2968 2969 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2970 /* 2971 * N2/NIU has limitation on the descriptor sizes (contiguous 2972 * memory allocation on data buffers to 4M (contig_mem_alloc) 2973 * and little endian for control buffers (must use the ddi/dki mem alloc 2974 * function). The transmit ring is limited to 8K (includes the 2975 * mailbox). 2976 */ 2977 if (nxgep->niu_type == N2_NIU) { 2978 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2979 (!ISP2(nxge_tx_ring_size))) { 2980 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2981 } 2982 } 2983 #endif 2984 2985 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2986 2987 num_chunks = (uint32_t *)KMEM_ZALLOC( 2988 sizeof (uint32_t) * tdc_max, KM_SLEEP); 2989 2990 dma_poolp->ndmas = p_cfgp->tdc.owned; 2991 dma_poolp->num_chunks = num_chunks; 2992 dma_poolp->dma_buf_pool_p = dma_buf_p; 2993 nxgep->tx_buf_pool_p = dma_poolp; 2994 2995 dma_poolp->buf_allocated = B_TRUE; 2996 2997 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 2998 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2999 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 3000 3001 dma_cntl_poolp->buf_allocated = B_TRUE; 3002 3003 nxgep->tx_rings = 3004 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 3005 nxgep->tx_rings->rings = 3006 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 3007 nxgep->tx_mbox_areas_p = 3008 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 3009 nxgep->tx_mbox_areas_p->txmbox_areas_p = 3010 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 3011 3012 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 3013 3014 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3015 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 3016 tdc_max, dma_poolp->ndmas)); 3017 3018 return (NXGE_OK); 3019 } 3020 3021 nxge_status_t 3022 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 3023 p_nxge_dma_common_t *dmap, size_t alloc_size, 3024 size_t block_size, uint32_t *num_chunks) 3025 { 3026 p_nxge_dma_common_t tx_dmap; 3027 nxge_status_t status = NXGE_OK; 3028 size_t total_alloc_size; 3029 size_t allocated = 0; 3030 int i, size_index, array_size; 3031 3032 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 3033 3034 tx_dmap = (p_nxge_dma_common_t) 3035 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 3036 KM_SLEEP); 3037 3038 total_alloc_size = alloc_size; 3039 i = 0; 3040 size_index = 0; 3041 array_size = sizeof (alloc_sizes) / sizeof (size_t); 3042 while ((size_index < array_size) && 3043 (alloc_sizes[size_index] < alloc_size)) 3044 size_index++; 3045 if (size_index >= array_size) { 3046 size_index = array_size - 1; 3047 } 3048 3049 while ((allocated < total_alloc_size) && 3050 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 3051 3052 tx_dmap[i].dma_chunk_index = i; 3053 tx_dmap[i].block_size = block_size; 3054 tx_dmap[i].alength = alloc_sizes[size_index]; 3055 tx_dmap[i].orig_alength = tx_dmap[i].alength; 3056 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 3057 tx_dmap[i].dma_channel = dma_channel; 3058 tx_dmap[i].contig_alloc_type = B_FALSE; 3059 tx_dmap[i].kmem_alloc_type = B_FALSE; 3060 3061 /* 3062 * N2/NIU: data buffers must be contiguous as the driver 3063 * needs to call Hypervisor api to set up 3064 * logical pages. 3065 */ 3066 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 3067 tx_dmap[i].contig_alloc_type = B_TRUE; 3068 } 3069 3070 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3071 &nxge_tx_dma_attr, 3072 tx_dmap[i].alength, 3073 &nxge_dev_buf_dma_acc_attr, 3074 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3075 (p_nxge_dma_common_t)(&tx_dmap[i])); 3076 if (status != NXGE_OK) { 3077 size_index--; 3078 } else { 3079 i++; 3080 allocated += alloc_sizes[size_index]; 3081 } 3082 } 3083 3084 if (allocated < total_alloc_size) { 3085 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3086 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3087 "allocated 0x%x requested 0x%x", 3088 dma_channel, 3089 allocated, total_alloc_size)); 3090 status = NXGE_ERROR; 3091 goto nxge_alloc_tx_mem_fail1; 3092 } 3093 3094 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3095 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3096 "allocated 0x%x requested 0x%x", 3097 dma_channel, 3098 allocated, total_alloc_size)); 3099 3100 *num_chunks = i; 3101 *dmap = tx_dmap; 3102 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3103 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3104 *dmap, i)); 3105 goto nxge_alloc_tx_mem_exit; 3106 3107 nxge_alloc_tx_mem_fail1: 3108 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3109 3110 nxge_alloc_tx_mem_exit: 3111 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3112 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3113 3114 return (status); 3115 } 3116 3117 /*ARGSUSED*/ 3118 static void 3119 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3120 uint32_t num_chunks) 3121 { 3122 int i; 3123 3124 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3125 3126 if (dmap == 0) 3127 return; 3128 3129 for (i = 0; i < num_chunks; i++) { 3130 nxge_dma_mem_free(dmap++); 3131 } 3132 3133 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3134 } 3135 3136 /*ARGSUSED*/ 3137 nxge_status_t 3138 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3139 p_nxge_dma_common_t *dmap, size_t size) 3140 { 3141 p_nxge_dma_common_t tx_dmap; 3142 nxge_status_t status = NXGE_OK; 3143 3144 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3145 tx_dmap = (p_nxge_dma_common_t) 3146 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3147 3148 tx_dmap->contig_alloc_type = B_FALSE; 3149 tx_dmap->kmem_alloc_type = B_FALSE; 3150 3151 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3152 &nxge_desc_dma_attr, 3153 size, 3154 &nxge_dev_desc_dma_acc_attr, 3155 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3156 tx_dmap); 3157 if (status != NXGE_OK) { 3158 goto nxge_alloc_tx_cntl_dma_fail1; 3159 } 3160 3161 *dmap = tx_dmap; 3162 goto nxge_alloc_tx_cntl_dma_exit; 3163 3164 nxge_alloc_tx_cntl_dma_fail1: 3165 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3166 3167 nxge_alloc_tx_cntl_dma_exit: 3168 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3169 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3170 3171 return (status); 3172 } 3173 3174 /*ARGSUSED*/ 3175 static void 3176 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3177 { 3178 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3179 3180 if (dmap == 0) 3181 return; 3182 3183 nxge_dma_mem_free(dmap); 3184 3185 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3186 } 3187 3188 /* 3189 * nxge_free_tx_mem_pool 3190 * 3191 * This function frees all of the per-port TDC control data structures. 3192 * The per-channel (TDC) data structures are freed when the channel 3193 * is stopped. 3194 * 3195 * Arguments: 3196 * nxgep 3197 * 3198 * Notes: 3199 * 3200 * Context: 3201 * Any domain 3202 */ 3203 static void 3204 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3205 { 3206 int tdc_max = NXGE_MAX_TDCS; 3207 3208 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3209 3210 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3211 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3212 "<== nxge_free_tx_mem_pool " 3213 "(null tx buf pool or buf not allocated")); 3214 return; 3215 } 3216 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3217 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3218 "<== nxge_free_tx_mem_pool " 3219 "(null tx cntl buf pool or cntl buf not allocated")); 3220 return; 3221 } 3222 3223 /* 1. Free the mailboxes. */ 3224 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3225 sizeof (p_tx_mbox_t) * tdc_max); 3226 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3227 3228 nxgep->tx_mbox_areas_p = 0; 3229 3230 /* 2. Free the transmit ring arrays. */ 3231 KMEM_FREE(nxgep->tx_rings->rings, 3232 sizeof (p_tx_ring_t) * tdc_max); 3233 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3234 3235 nxgep->tx_rings = 0; 3236 3237 /* 3. Free the completion ring data structures. */ 3238 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3239 sizeof (p_nxge_dma_common_t) * tdc_max); 3240 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3241 3242 nxgep->tx_cntl_pool_p = 0; 3243 3244 /* 4. Free the data ring data structures. */ 3245 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3246 sizeof (uint32_t) * tdc_max); 3247 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3248 sizeof (p_nxge_dma_common_t) * tdc_max); 3249 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3250 3251 nxgep->tx_buf_pool_p = 0; 3252 3253 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3254 } 3255 3256 /*ARGSUSED*/ 3257 static nxge_status_t 3258 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3259 struct ddi_dma_attr *dma_attrp, 3260 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3261 p_nxge_dma_common_t dma_p) 3262 { 3263 caddr_t kaddrp; 3264 int ddi_status = DDI_SUCCESS; 3265 boolean_t contig_alloc_type; 3266 boolean_t kmem_alloc_type; 3267 3268 contig_alloc_type = dma_p->contig_alloc_type; 3269 3270 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3271 /* 3272 * contig_alloc_type for contiguous memory only allowed 3273 * for N2/NIU. 3274 */ 3275 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3276 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3277 dma_p->contig_alloc_type)); 3278 return (NXGE_ERROR | NXGE_DDI_FAILED); 3279 } 3280 3281 dma_p->dma_handle = NULL; 3282 dma_p->acc_handle = NULL; 3283 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3284 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3285 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3286 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3287 if (ddi_status != DDI_SUCCESS) { 3288 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3289 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3290 return (NXGE_ERROR | NXGE_DDI_FAILED); 3291 } 3292 3293 kmem_alloc_type = dma_p->kmem_alloc_type; 3294 3295 switch (contig_alloc_type) { 3296 case B_FALSE: 3297 switch (kmem_alloc_type) { 3298 case B_FALSE: 3299 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3300 length, 3301 acc_attr_p, 3302 xfer_flags, 3303 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3304 &dma_p->acc_handle); 3305 if (ddi_status != DDI_SUCCESS) { 3306 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3307 "nxge_dma_mem_alloc: " 3308 "ddi_dma_mem_alloc failed")); 3309 ddi_dma_free_handle(&dma_p->dma_handle); 3310 dma_p->dma_handle = NULL; 3311 return (NXGE_ERROR | NXGE_DDI_FAILED); 3312 } 3313 if (dma_p->alength < length) { 3314 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3315 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3316 "< length.")); 3317 ddi_dma_mem_free(&dma_p->acc_handle); 3318 ddi_dma_free_handle(&dma_p->dma_handle); 3319 dma_p->acc_handle = NULL; 3320 dma_p->dma_handle = NULL; 3321 return (NXGE_ERROR); 3322 } 3323 3324 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3325 NULL, 3326 kaddrp, dma_p->alength, xfer_flags, 3327 DDI_DMA_DONTWAIT, 3328 0, &dma_p->dma_cookie, &dma_p->ncookies); 3329 if (ddi_status != DDI_DMA_MAPPED) { 3330 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3331 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3332 "failed " 3333 "(staus 0x%x ncookies %d.)", ddi_status, 3334 dma_p->ncookies)); 3335 if (dma_p->acc_handle) { 3336 ddi_dma_mem_free(&dma_p->acc_handle); 3337 dma_p->acc_handle = NULL; 3338 } 3339 ddi_dma_free_handle(&dma_p->dma_handle); 3340 dma_p->dma_handle = NULL; 3341 return (NXGE_ERROR | NXGE_DDI_FAILED); 3342 } 3343 3344 if (dma_p->ncookies != 1) { 3345 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3346 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3347 "> 1 cookie" 3348 "(staus 0x%x ncookies %d.)", ddi_status, 3349 dma_p->ncookies)); 3350 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3351 if (dma_p->acc_handle) { 3352 ddi_dma_mem_free(&dma_p->acc_handle); 3353 dma_p->acc_handle = NULL; 3354 } 3355 ddi_dma_free_handle(&dma_p->dma_handle); 3356 dma_p->dma_handle = NULL; 3357 dma_p->acc_handle = NULL; 3358 return (NXGE_ERROR); 3359 } 3360 break; 3361 3362 case B_TRUE: 3363 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3364 if (kaddrp == NULL) { 3365 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3366 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3367 "kmem alloc failed")); 3368 return (NXGE_ERROR); 3369 } 3370 3371 dma_p->alength = length; 3372 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3373 NULL, kaddrp, dma_p->alength, xfer_flags, 3374 DDI_DMA_DONTWAIT, 0, 3375 &dma_p->dma_cookie, &dma_p->ncookies); 3376 if (ddi_status != DDI_DMA_MAPPED) { 3377 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3378 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3379 "(kmem_alloc) failed kaddrp $%p length %d " 3380 "(staus 0x%x (%d) ncookies %d.)", 3381 kaddrp, length, 3382 ddi_status, ddi_status, dma_p->ncookies)); 3383 KMEM_FREE(kaddrp, length); 3384 dma_p->acc_handle = NULL; 3385 ddi_dma_free_handle(&dma_p->dma_handle); 3386 dma_p->dma_handle = NULL; 3387 dma_p->kaddrp = NULL; 3388 return (NXGE_ERROR | NXGE_DDI_FAILED); 3389 } 3390 3391 if (dma_p->ncookies != 1) { 3392 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3393 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3394 "(kmem_alloc) > 1 cookie" 3395 "(staus 0x%x ncookies %d.)", ddi_status, 3396 dma_p->ncookies)); 3397 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3398 KMEM_FREE(kaddrp, length); 3399 ddi_dma_free_handle(&dma_p->dma_handle); 3400 dma_p->dma_handle = NULL; 3401 dma_p->acc_handle = NULL; 3402 dma_p->kaddrp = NULL; 3403 return (NXGE_ERROR); 3404 } 3405 3406 dma_p->kaddrp = kaddrp; 3407 3408 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3409 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3410 "kaddr $%p alength %d", 3411 dma_p, 3412 kaddrp, 3413 dma_p->alength)); 3414 break; 3415 } 3416 break; 3417 3418 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3419 case B_TRUE: 3420 kaddrp = (caddr_t)contig_mem_alloc(length); 3421 if (kaddrp == NULL) { 3422 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3423 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3424 ddi_dma_free_handle(&dma_p->dma_handle); 3425 return (NXGE_ERROR | NXGE_DDI_FAILED); 3426 } 3427 3428 dma_p->alength = length; 3429 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3430 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3431 &dma_p->dma_cookie, &dma_p->ncookies); 3432 if (ddi_status != DDI_DMA_MAPPED) { 3433 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3434 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3435 "(status 0x%x ncookies %d.)", ddi_status, 3436 dma_p->ncookies)); 3437 3438 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3439 "==> nxge_dma_mem_alloc: (not mapped)" 3440 "length %lu (0x%x) " 3441 "free contig kaddrp $%p " 3442 "va_to_pa $%p", 3443 length, length, 3444 kaddrp, 3445 va_to_pa(kaddrp))); 3446 3447 3448 contig_mem_free((void *)kaddrp, length); 3449 ddi_dma_free_handle(&dma_p->dma_handle); 3450 3451 dma_p->dma_handle = NULL; 3452 dma_p->acc_handle = NULL; 3453 dma_p->alength = NULL; 3454 dma_p->kaddrp = NULL; 3455 3456 return (NXGE_ERROR | NXGE_DDI_FAILED); 3457 } 3458 3459 if (dma_p->ncookies != 1 || 3460 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3461 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3462 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3463 "cookie or " 3464 "dmac_laddress is NULL $%p size %d " 3465 " (status 0x%x ncookies %d.)", 3466 ddi_status, 3467 dma_p->dma_cookie.dmac_laddress, 3468 dma_p->dma_cookie.dmac_size, 3469 dma_p->ncookies)); 3470 3471 contig_mem_free((void *)kaddrp, length); 3472 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3473 ddi_dma_free_handle(&dma_p->dma_handle); 3474 3475 dma_p->alength = 0; 3476 dma_p->dma_handle = NULL; 3477 dma_p->acc_handle = NULL; 3478 dma_p->kaddrp = NULL; 3479 3480 return (NXGE_ERROR | NXGE_DDI_FAILED); 3481 } 3482 break; 3483 3484 #else 3485 case B_TRUE: 3486 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3487 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3488 return (NXGE_ERROR | NXGE_DDI_FAILED); 3489 #endif 3490 } 3491 3492 dma_p->kaddrp = kaddrp; 3493 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3494 dma_p->alength - RXBUF_64B_ALIGNED; 3495 #if defined(__i386) 3496 dma_p->ioaddr_pp = 3497 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3498 #else 3499 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3500 #endif 3501 dma_p->last_ioaddr_pp = 3502 #if defined(__i386) 3503 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3504 #else 3505 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3506 #endif 3507 dma_p->alength - RXBUF_64B_ALIGNED; 3508 3509 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3510 3511 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3512 dma_p->orig_ioaddr_pp = 3513 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3514 dma_p->orig_alength = length; 3515 dma_p->orig_kaddrp = kaddrp; 3516 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3517 #endif 3518 3519 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3520 "dma buffer allocated: dma_p $%p " 3521 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3522 "dma_p->ioaddr_p $%p " 3523 "dma_p->orig_ioaddr_p $%p " 3524 "orig_vatopa $%p " 3525 "alength %d (0x%x) " 3526 "kaddrp $%p " 3527 "length %d (0x%x)", 3528 dma_p, 3529 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3530 dma_p->ioaddr_pp, 3531 dma_p->orig_ioaddr_pp, 3532 dma_p->orig_vatopa, 3533 dma_p->alength, dma_p->alength, 3534 kaddrp, 3535 length, length)); 3536 3537 return (NXGE_OK); 3538 } 3539 3540 static void 3541 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3542 { 3543 if (dma_p->dma_handle != NULL) { 3544 if (dma_p->ncookies) { 3545 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3546 dma_p->ncookies = 0; 3547 } 3548 ddi_dma_free_handle(&dma_p->dma_handle); 3549 dma_p->dma_handle = NULL; 3550 } 3551 3552 if (dma_p->acc_handle != NULL) { 3553 ddi_dma_mem_free(&dma_p->acc_handle); 3554 dma_p->acc_handle = NULL; 3555 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3556 } 3557 3558 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3559 if (dma_p->contig_alloc_type && 3560 dma_p->orig_kaddrp && dma_p->orig_alength) { 3561 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3562 "kaddrp $%p (orig_kaddrp $%p)" 3563 "mem type %d ", 3564 "orig_alength %d " 3565 "alength 0x%x (%d)", 3566 dma_p->kaddrp, 3567 dma_p->orig_kaddrp, 3568 dma_p->contig_alloc_type, 3569 dma_p->orig_alength, 3570 dma_p->alength, dma_p->alength)); 3571 3572 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3573 dma_p->orig_alength = NULL; 3574 dma_p->orig_kaddrp = NULL; 3575 dma_p->contig_alloc_type = B_FALSE; 3576 } 3577 #endif 3578 dma_p->kaddrp = NULL; 3579 dma_p->alength = NULL; 3580 } 3581 3582 static void 3583 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3584 { 3585 uint64_t kaddr; 3586 uint32_t buf_size; 3587 3588 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3589 3590 if (dma_p->dma_handle != NULL) { 3591 if (dma_p->ncookies) { 3592 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3593 dma_p->ncookies = 0; 3594 } 3595 ddi_dma_free_handle(&dma_p->dma_handle); 3596 dma_p->dma_handle = NULL; 3597 } 3598 3599 if (dma_p->acc_handle != NULL) { 3600 ddi_dma_mem_free(&dma_p->acc_handle); 3601 dma_p->acc_handle = NULL; 3602 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3603 } 3604 3605 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3606 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3607 dma_p, 3608 dma_p->buf_alloc_state)); 3609 3610 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3611 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3612 "<== nxge_dma_free_rx_data_buf: " 3613 "outstanding data buffers")); 3614 return; 3615 } 3616 3617 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3618 if (dma_p->contig_alloc_type && 3619 dma_p->orig_kaddrp && dma_p->orig_alength) { 3620 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3621 "kaddrp $%p (orig_kaddrp $%p)" 3622 "mem type %d ", 3623 "orig_alength %d " 3624 "alength 0x%x (%d)", 3625 dma_p->kaddrp, 3626 dma_p->orig_kaddrp, 3627 dma_p->contig_alloc_type, 3628 dma_p->orig_alength, 3629 dma_p->alength, dma_p->alength)); 3630 3631 kaddr = (uint64_t)dma_p->orig_kaddrp; 3632 buf_size = dma_p->orig_alength; 3633 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3634 dma_p->orig_alength = NULL; 3635 dma_p->orig_kaddrp = NULL; 3636 dma_p->contig_alloc_type = B_FALSE; 3637 dma_p->kaddrp = NULL; 3638 dma_p->alength = NULL; 3639 return; 3640 } 3641 #endif 3642 3643 if (dma_p->kmem_alloc_type) { 3644 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3645 "nxge_dma_free_rx_data_buf: free kmem " 3646 "kaddrp $%p (orig_kaddrp $%p)" 3647 "alloc type %d " 3648 "orig_alength %d " 3649 "alength 0x%x (%d)", 3650 dma_p->kaddrp, 3651 dma_p->orig_kaddrp, 3652 dma_p->kmem_alloc_type, 3653 dma_p->orig_alength, 3654 dma_p->alength, dma_p->alength)); 3655 #if defined(__i386) 3656 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3657 #else 3658 kaddr = (uint64_t)dma_p->kaddrp; 3659 #endif 3660 buf_size = dma_p->orig_alength; 3661 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3662 "nxge_dma_free_rx_data_buf: free dmap $%p " 3663 "kaddr $%p buf_size %d", 3664 dma_p, 3665 kaddr, buf_size)); 3666 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3667 dma_p->alength = 0; 3668 dma_p->orig_alength = 0; 3669 dma_p->kaddrp = NULL; 3670 dma_p->kmem_alloc_type = B_FALSE; 3671 } 3672 3673 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3674 } 3675 3676 /* 3677 * nxge_m_start() -- start transmitting and receiving. 3678 * 3679 * This function is called by the MAC layer when the first 3680 * stream is open to prepare the hardware ready for sending 3681 * and transmitting packets. 3682 */ 3683 static int 3684 nxge_m_start(void *arg) 3685 { 3686 p_nxge_t nxgep = (p_nxge_t)arg; 3687 3688 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3689 3690 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) { 3691 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 3692 } 3693 3694 MUTEX_ENTER(nxgep->genlock); 3695 if (nxge_init(nxgep) != NXGE_OK) { 3696 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3697 "<== nxge_m_start: initialization failed")); 3698 MUTEX_EXIT(nxgep->genlock); 3699 return (EIO); 3700 } 3701 3702 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 3703 goto nxge_m_start_exit; 3704 /* 3705 * Start timer to check the system error and tx hangs 3706 */ 3707 if (!isLDOMguest(nxgep)) 3708 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3709 nxge_check_hw_state, NXGE_CHECK_TIMER); 3710 #if defined(sun4v) 3711 else 3712 nxge_hio_start_timer(nxgep); 3713 #endif 3714 3715 nxgep->link_notify = B_TRUE; 3716 3717 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3718 3719 nxge_m_start_exit: 3720 MUTEX_EXIT(nxgep->genlock); 3721 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3722 return (0); 3723 } 3724 3725 3726 static boolean_t 3727 nxge_check_groups_stopped(p_nxge_t nxgep) 3728 { 3729 int i; 3730 3731 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 3732 if (nxgep->rx_hio_groups[i].started) 3733 return (B_FALSE); 3734 } 3735 3736 return (B_TRUE); 3737 } 3738 3739 /* 3740 * nxge_m_stop(): stop transmitting and receiving. 3741 */ 3742 static void 3743 nxge_m_stop(void *arg) 3744 { 3745 p_nxge_t nxgep = (p_nxge_t)arg; 3746 boolean_t groups_stopped; 3747 3748 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3749 3750 groups_stopped = nxge_check_groups_stopped(nxgep); 3751 #ifdef later 3752 ASSERT(groups_stopped == B_FALSE); 3753 #endif 3754 3755 if (!groups_stopped) { 3756 cmn_err(CE_WARN, "nxge(%d): groups are not stopped!\n", 3757 nxgep->instance); 3758 return; 3759 } 3760 3761 MUTEX_ENTER(nxgep->genlock); 3762 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3763 3764 if (nxgep->nxge_timerid) { 3765 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3766 nxgep->nxge_timerid = 0; 3767 } 3768 3769 nxge_uninit(nxgep); 3770 3771 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3772 3773 MUTEX_EXIT(nxgep->genlock); 3774 3775 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3776 } 3777 3778 static int 3779 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3780 { 3781 p_nxge_t nxgep = (p_nxge_t)arg; 3782 struct ether_addr addrp; 3783 3784 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3785 "==> nxge_m_multicst: add %d", add)); 3786 3787 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3788 if (add) { 3789 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3790 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3791 "<== nxge_m_multicst: add multicast failed")); 3792 return (EINVAL); 3793 } 3794 } else { 3795 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3796 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3797 "<== nxge_m_multicst: del multicast failed")); 3798 return (EINVAL); 3799 } 3800 } 3801 3802 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3803 3804 return (0); 3805 } 3806 3807 static int 3808 nxge_m_promisc(void *arg, boolean_t on) 3809 { 3810 p_nxge_t nxgep = (p_nxge_t)arg; 3811 3812 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3813 "==> nxge_m_promisc: on %d", on)); 3814 3815 if (nxge_set_promisc(nxgep, on)) { 3816 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3817 "<== nxge_m_promisc: set promisc failed")); 3818 return (EINVAL); 3819 } 3820 3821 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3822 "<== nxge_m_promisc: on %d", on)); 3823 3824 return (0); 3825 } 3826 3827 static void 3828 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3829 { 3830 p_nxge_t nxgep = (p_nxge_t)arg; 3831 struct iocblk *iocp; 3832 boolean_t need_privilege; 3833 int err; 3834 int cmd; 3835 3836 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3837 3838 iocp = (struct iocblk *)mp->b_rptr; 3839 iocp->ioc_error = 0; 3840 need_privilege = B_TRUE; 3841 cmd = iocp->ioc_cmd; 3842 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3843 switch (cmd) { 3844 default: 3845 miocnak(wq, mp, 0, EINVAL); 3846 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3847 return; 3848 3849 case LB_GET_INFO_SIZE: 3850 case LB_GET_INFO: 3851 case LB_GET_MODE: 3852 need_privilege = B_FALSE; 3853 break; 3854 case LB_SET_MODE: 3855 break; 3856 3857 3858 case NXGE_GET_MII: 3859 case NXGE_PUT_MII: 3860 case NXGE_GET64: 3861 case NXGE_PUT64: 3862 case NXGE_GET_TX_RING_SZ: 3863 case NXGE_GET_TX_DESC: 3864 case NXGE_TX_SIDE_RESET: 3865 case NXGE_RX_SIDE_RESET: 3866 case NXGE_GLOBAL_RESET: 3867 case NXGE_RESET_MAC: 3868 case NXGE_TX_REGS_DUMP: 3869 case NXGE_RX_REGS_DUMP: 3870 case NXGE_INT_REGS_DUMP: 3871 case NXGE_VIR_INT_REGS_DUMP: 3872 case NXGE_PUT_TCAM: 3873 case NXGE_GET_TCAM: 3874 case NXGE_RTRACE: 3875 case NXGE_RDUMP: 3876 3877 need_privilege = B_FALSE; 3878 break; 3879 case NXGE_INJECT_ERR: 3880 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3881 nxge_err_inject(nxgep, wq, mp); 3882 break; 3883 } 3884 3885 if (need_privilege) { 3886 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3887 if (err != 0) { 3888 miocnak(wq, mp, 0, err); 3889 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3890 "<== nxge_m_ioctl: no priv")); 3891 return; 3892 } 3893 } 3894 3895 switch (cmd) { 3896 3897 case LB_GET_MODE: 3898 case LB_SET_MODE: 3899 case LB_GET_INFO_SIZE: 3900 case LB_GET_INFO: 3901 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3902 break; 3903 3904 case NXGE_GET_MII: 3905 case NXGE_PUT_MII: 3906 case NXGE_PUT_TCAM: 3907 case NXGE_GET_TCAM: 3908 case NXGE_GET64: 3909 case NXGE_PUT64: 3910 case NXGE_GET_TX_RING_SZ: 3911 case NXGE_GET_TX_DESC: 3912 case NXGE_TX_SIDE_RESET: 3913 case NXGE_RX_SIDE_RESET: 3914 case NXGE_GLOBAL_RESET: 3915 case NXGE_RESET_MAC: 3916 case NXGE_TX_REGS_DUMP: 3917 case NXGE_RX_REGS_DUMP: 3918 case NXGE_INT_REGS_DUMP: 3919 case NXGE_VIR_INT_REGS_DUMP: 3920 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3921 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3922 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3923 break; 3924 } 3925 3926 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3927 } 3928 3929 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3930 3931 void 3932 nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory) 3933 { 3934 p_nxge_mmac_stats_t mmac_stats; 3935 int i; 3936 nxge_mmac_t *mmac_info; 3937 3938 mmac_info = &nxgep->nxge_mmac_info; 3939 3940 mmac_stats = &nxgep->statsp->mmac_stats; 3941 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3942 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3943 3944 for (i = 0; i < ETHERADDRL; i++) { 3945 if (factory) { 3946 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3947 = mmac_info->factory_mac_pool[slot][ 3948 (ETHERADDRL-1) - i]; 3949 } else { 3950 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3951 = mmac_info->mac_pool[slot].addr[ 3952 (ETHERADDRL - 1) - i]; 3953 } 3954 } 3955 } 3956 3957 /* 3958 * nxge_altmac_set() -- Set an alternate MAC address 3959 */ 3960 static int 3961 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, int slot, 3962 int rdctbl, boolean_t usetbl) 3963 { 3964 uint8_t addrn; 3965 uint8_t portn; 3966 npi_mac_addr_t altmac; 3967 hostinfo_t mac_rdc; 3968 p_nxge_class_pt_cfg_t clscfgp; 3969 3970 3971 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3972 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3973 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3974 3975 portn = nxgep->mac.portnum; 3976 addrn = (uint8_t)slot - 1; 3977 3978 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, 3979 nxgep->function_num, addrn, &altmac) != NPI_SUCCESS) 3980 return (EIO); 3981 3982 /* 3983 * Set the rdc table number for the host info entry 3984 * for this mac address slot. 3985 */ 3986 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 3987 mac_rdc.value = 0; 3988 if (usetbl) 3989 mac_rdc.bits.w0.rdc_tbl_num = rdctbl; 3990 else 3991 mac_rdc.bits.w0.rdc_tbl_num = 3992 clscfgp->mac_host_info[addrn].rdctbl; 3993 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 3994 3995 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 3996 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 3997 return (EIO); 3998 } 3999 4000 /* 4001 * Enable comparison with the alternate MAC address. 4002 * While the first alternate addr is enabled by bit 1 of register 4003 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 4004 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 4005 * accordingly before calling npi_mac_altaddr_entry. 4006 */ 4007 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4008 addrn = (uint8_t)slot - 1; 4009 else 4010 addrn = (uint8_t)slot; 4011 4012 if (npi_mac_altaddr_enable(nxgep->npi_handle, 4013 nxgep->function_num, addrn) != NPI_SUCCESS) { 4014 return (EIO); 4015 } 4016 4017 return (0); 4018 } 4019 4020 /* 4021 * nxeg_m_mmac_add_g() - find an unused address slot, set the address 4022 * value to the one specified, enable the port to start filtering on 4023 * the new MAC address. Returns 0 on success. 4024 */ 4025 int 4026 nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl, 4027 boolean_t usetbl) 4028 { 4029 p_nxge_t nxgep = arg; 4030 int slot; 4031 nxge_mmac_t *mmac_info; 4032 int err; 4033 nxge_status_t status; 4034 4035 mutex_enter(nxgep->genlock); 4036 4037 /* 4038 * Make sure that nxge is initialized, if _start() has 4039 * not been called. 4040 */ 4041 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4042 status = nxge_init(nxgep); 4043 if (status != NXGE_OK) { 4044 mutex_exit(nxgep->genlock); 4045 return (ENXIO); 4046 } 4047 } 4048 4049 mmac_info = &nxgep->nxge_mmac_info; 4050 if (mmac_info->naddrfree == 0) { 4051 mutex_exit(nxgep->genlock); 4052 return (ENOSPC); 4053 } 4054 4055 /* 4056 * Search for the first available slot. Because naddrfree 4057 * is not zero, we are guaranteed to find one. 4058 * Each of the first two ports of Neptune has 16 alternate 4059 * MAC slots but only the first 7 (of 15) slots have assigned factory 4060 * MAC addresses. We first search among the slots without bundled 4061 * factory MACs. If we fail to find one in that range, then we 4062 * search the slots with bundled factory MACs. A factory MAC 4063 * will be wasted while the slot is used with a user MAC address. 4064 * But the slot could be used by factory MAC again after calling 4065 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4066 */ 4067 for (slot = 0; slot <= mmac_info->num_mmac; slot++) { 4068 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4069 break; 4070 } 4071 4072 ASSERT(slot <= mmac_info->num_mmac); 4073 4074 if ((err = nxge_altmac_set(nxgep, (uint8_t *)maddr, slot, rdctbl, 4075 usetbl)) != 0) { 4076 mutex_exit(nxgep->genlock); 4077 return (err); 4078 } 4079 4080 bcopy(maddr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4081 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4082 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4083 mmac_info->naddrfree--; 4084 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4085 4086 mutex_exit(nxgep->genlock); 4087 return (0); 4088 } 4089 4090 /* 4091 * Remove the specified mac address and update the HW not to filter 4092 * the mac address anymore. 4093 */ 4094 int 4095 nxge_m_mmac_remove(void *arg, int slot) 4096 { 4097 p_nxge_t nxgep = arg; 4098 nxge_mmac_t *mmac_info; 4099 uint8_t addrn; 4100 uint8_t portn; 4101 int err = 0; 4102 nxge_status_t status; 4103 4104 mutex_enter(nxgep->genlock); 4105 4106 /* 4107 * Make sure that nxge is initialized, if _start() has 4108 * not been called. 4109 */ 4110 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4111 status = nxge_init(nxgep); 4112 if (status != NXGE_OK) { 4113 mutex_exit(nxgep->genlock); 4114 return (ENXIO); 4115 } 4116 } 4117 4118 mmac_info = &nxgep->nxge_mmac_info; 4119 if (slot < 1 || slot > mmac_info->num_mmac) { 4120 mutex_exit(nxgep->genlock); 4121 return (EINVAL); 4122 } 4123 4124 portn = nxgep->mac.portnum; 4125 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4126 addrn = (uint8_t)slot - 1; 4127 else 4128 addrn = (uint8_t)slot; 4129 4130 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4131 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4132 == NPI_SUCCESS) { 4133 mmac_info->naddrfree++; 4134 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4135 /* 4136 * Regardless if the MAC we just stopped filtering 4137 * is a user addr or a facory addr, we must set 4138 * the MMAC_VENDOR_ADDR flag if this slot has an 4139 * associated factory MAC to indicate that a factory 4140 * MAC is available. 4141 */ 4142 if (slot <= mmac_info->num_factory_mmac) { 4143 mmac_info->mac_pool[slot].flags 4144 |= MMAC_VENDOR_ADDR; 4145 } 4146 /* 4147 * Clear mac_pool[slot].addr so that kstat shows 0 4148 * alternate MAC address if the slot is not used. 4149 * (But nxge_m_mmac_get returns the factory MAC even 4150 * when the slot is not used!) 4151 */ 4152 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4153 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4154 } else { 4155 err = EIO; 4156 } 4157 } else { 4158 err = EINVAL; 4159 } 4160 4161 mutex_exit(nxgep->genlock); 4162 return (err); 4163 } 4164 4165 /* 4166 * The callback to query all the factory addresses. naddr must be the same as 4167 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and 4168 * mcm_addr is the space allocated for keep all the addresses, whose size is 4169 * naddr * MAXMACADDRLEN. 4170 */ 4171 static void 4172 nxge_m_getfactaddr(void *arg, uint_t naddr, uint8_t *addr) 4173 { 4174 nxge_t *nxgep = arg; 4175 nxge_mmac_t *mmac_info; 4176 int i; 4177 4178 mutex_enter(nxgep->genlock); 4179 4180 mmac_info = &nxgep->nxge_mmac_info; 4181 ASSERT(naddr == mmac_info->num_factory_mmac); 4182 4183 for (i = 0; i < naddr; i++) { 4184 bcopy(mmac_info->factory_mac_pool[i + 1], 4185 addr + i * MAXMACADDRLEN, ETHERADDRL); 4186 } 4187 4188 mutex_exit(nxgep->genlock); 4189 } 4190 4191 4192 static boolean_t 4193 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4194 { 4195 nxge_t *nxgep = arg; 4196 uint32_t *txflags = cap_data; 4197 4198 switch (cap) { 4199 case MAC_CAPAB_HCKSUM: 4200 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4201 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4202 if (nxge_cksum_offload <= 1) { 4203 *txflags = HCKSUM_INET_PARTIAL; 4204 } 4205 break; 4206 4207 case MAC_CAPAB_MULTIFACTADDR: { 4208 mac_capab_multifactaddr_t *mfacp = cap_data; 4209 4210 mutex_enter(nxgep->genlock); 4211 mfacp->mcm_naddr = nxgep->nxge_mmac_info.num_factory_mmac; 4212 mfacp->mcm_getaddr = nxge_m_getfactaddr; 4213 mutex_exit(nxgep->genlock); 4214 break; 4215 } 4216 4217 case MAC_CAPAB_LSO: { 4218 mac_capab_lso_t *cap_lso = cap_data; 4219 4220 if (nxgep->soft_lso_enable) { 4221 if (nxge_cksum_offload <= 1) { 4222 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4223 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4224 nxge_lso_max = NXGE_LSO_MAXLEN; 4225 } 4226 cap_lso->lso_basic_tcp_ipv4.lso_max = 4227 nxge_lso_max; 4228 } 4229 break; 4230 } else { 4231 return (B_FALSE); 4232 } 4233 } 4234 4235 case MAC_CAPAB_RINGS: { 4236 mac_capab_rings_t *cap_rings = cap_data; 4237 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 4238 4239 mutex_enter(nxgep->genlock); 4240 if (cap_rings->mr_type == MAC_RING_TYPE_RX) { 4241 cap_rings->mr_group_type = MAC_GROUP_TYPE_DYNAMIC; 4242 cap_rings->mr_rnum = p_cfgp->max_rdcs; 4243 cap_rings->mr_rget = nxge_fill_ring; 4244 cap_rings->mr_gnum = p_cfgp->max_rdc_grpids; 4245 cap_rings->mr_gget = nxge_hio_group_get; 4246 cap_rings->mr_gaddring = nxge_group_add_ring; 4247 cap_rings->mr_gremring = nxge_group_rem_ring; 4248 4249 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4250 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]", 4251 p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids)); 4252 } else { 4253 cap_rings->mr_group_type = MAC_GROUP_TYPE_DYNAMIC; 4254 cap_rings->mr_rnum = p_cfgp->tdc.count; 4255 cap_rings->mr_rget = nxge_fill_ring; 4256 if (isLDOMservice(nxgep)) { 4257 /* share capable */ 4258 /* Do not report the default ring: hence -1 */ 4259 cap_rings->mr_gnum = 4260 NXGE_MAX_TDC_GROUPS / nxgep->nports - 1; 4261 } else { 4262 cap_rings->mr_gnum = 0; 4263 } 4264 4265 cap_rings->mr_gget = nxge_hio_group_get; 4266 cap_rings->mr_gaddring = nxge_group_add_ring; 4267 cap_rings->mr_gremring = nxge_group_rem_ring; 4268 4269 NXGE_DEBUG_MSG((nxgep, TX_CTL, 4270 "==> nxge_m_getcapab: tx rings # of rings %d", 4271 p_cfgp->tdc.count)); 4272 } 4273 mutex_exit(nxgep->genlock); 4274 break; 4275 } 4276 4277 #if defined(sun4v) 4278 case MAC_CAPAB_SHARES: { 4279 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4280 4281 /* 4282 * Only the service domain driver responds to 4283 * this capability request. 4284 */ 4285 mutex_enter(nxgep->genlock); 4286 if (isLDOMservice(nxgep)) { 4287 mshares->ms_snum = 3; 4288 mshares->ms_handle = (void *)nxgep; 4289 mshares->ms_salloc = nxge_hio_share_alloc; 4290 mshares->ms_sfree = nxge_hio_share_free; 4291 mshares->ms_sadd = nxge_hio_share_add_group; 4292 mshares->ms_sremove = nxge_hio_share_rem_group; 4293 mshares->ms_squery = nxge_hio_share_query; 4294 mshares->ms_sbind = nxge_hio_share_bind; 4295 mshares->ms_sunbind = nxge_hio_share_unbind; 4296 mutex_exit(nxgep->genlock); 4297 } else { 4298 mutex_exit(nxgep->genlock); 4299 return (B_FALSE); 4300 } 4301 break; 4302 } 4303 #endif 4304 default: 4305 return (B_FALSE); 4306 } 4307 return (B_TRUE); 4308 } 4309 4310 static boolean_t 4311 nxge_param_locked(mac_prop_id_t pr_num) 4312 { 4313 /* 4314 * All adv_* parameters are locked (read-only) while 4315 * the device is in any sort of loopback mode ... 4316 */ 4317 switch (pr_num) { 4318 case MAC_PROP_ADV_1000FDX_CAP: 4319 case MAC_PROP_EN_1000FDX_CAP: 4320 case MAC_PROP_ADV_1000HDX_CAP: 4321 case MAC_PROP_EN_1000HDX_CAP: 4322 case MAC_PROP_ADV_100FDX_CAP: 4323 case MAC_PROP_EN_100FDX_CAP: 4324 case MAC_PROP_ADV_100HDX_CAP: 4325 case MAC_PROP_EN_100HDX_CAP: 4326 case MAC_PROP_ADV_10FDX_CAP: 4327 case MAC_PROP_EN_10FDX_CAP: 4328 case MAC_PROP_ADV_10HDX_CAP: 4329 case MAC_PROP_EN_10HDX_CAP: 4330 case MAC_PROP_AUTONEG: 4331 case MAC_PROP_FLOWCTRL: 4332 return (B_TRUE); 4333 } 4334 return (B_FALSE); 4335 } 4336 4337 /* 4338 * callback functions for set/get of properties 4339 */ 4340 static int 4341 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4342 uint_t pr_valsize, const void *pr_val) 4343 { 4344 nxge_t *nxgep = barg; 4345 p_nxge_param_t param_arr; 4346 p_nxge_stats_t statsp; 4347 int err = 0; 4348 uint8_t val; 4349 uint32_t cur_mtu, new_mtu, old_framesize; 4350 link_flowctrl_t fl; 4351 4352 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4353 param_arr = nxgep->param_arr; 4354 statsp = nxgep->statsp; 4355 mutex_enter(nxgep->genlock); 4356 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4357 nxge_param_locked(pr_num)) { 4358 /* 4359 * All adv_* parameters are locked (read-only) 4360 * while the device is in any sort of loopback mode. 4361 */ 4362 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4363 "==> nxge_m_setprop: loopback mode: read only")); 4364 mutex_exit(nxgep->genlock); 4365 return (EBUSY); 4366 } 4367 4368 val = *(uint8_t *)pr_val; 4369 switch (pr_num) { 4370 case MAC_PROP_EN_1000FDX_CAP: 4371 nxgep->param_en_1000fdx = val; 4372 param_arr[param_anar_1000fdx].value = val; 4373 4374 goto reprogram; 4375 4376 case MAC_PROP_EN_100FDX_CAP: 4377 nxgep->param_en_100fdx = val; 4378 param_arr[param_anar_100fdx].value = val; 4379 4380 goto reprogram; 4381 4382 case MAC_PROP_EN_10FDX_CAP: 4383 nxgep->param_en_10fdx = val; 4384 param_arr[param_anar_10fdx].value = val; 4385 4386 goto reprogram; 4387 4388 case MAC_PROP_EN_1000HDX_CAP: 4389 case MAC_PROP_EN_100HDX_CAP: 4390 case MAC_PROP_EN_10HDX_CAP: 4391 case MAC_PROP_ADV_1000FDX_CAP: 4392 case MAC_PROP_ADV_1000HDX_CAP: 4393 case MAC_PROP_ADV_100FDX_CAP: 4394 case MAC_PROP_ADV_100HDX_CAP: 4395 case MAC_PROP_ADV_10FDX_CAP: 4396 case MAC_PROP_ADV_10HDX_CAP: 4397 case MAC_PROP_STATUS: 4398 case MAC_PROP_SPEED: 4399 case MAC_PROP_DUPLEX: 4400 err = EINVAL; /* cannot set read-only properties */ 4401 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4402 "==> nxge_m_setprop: read only property %d", 4403 pr_num)); 4404 break; 4405 4406 case MAC_PROP_AUTONEG: 4407 param_arr[param_autoneg].value = val; 4408 4409 goto reprogram; 4410 4411 case MAC_PROP_MTU: 4412 cur_mtu = nxgep->mac.default_mtu; 4413 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4414 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4415 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4416 new_mtu, nxgep->mac.is_jumbo)); 4417 4418 if (new_mtu == cur_mtu) { 4419 err = 0; 4420 break; 4421 } 4422 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4423 err = EBUSY; 4424 break; 4425 } 4426 if (new_mtu < NXGE_DEFAULT_MTU || 4427 new_mtu > NXGE_MAXIMUM_MTU) { 4428 err = EINVAL; 4429 break; 4430 } 4431 4432 if ((new_mtu > NXGE_DEFAULT_MTU) && 4433 !nxgep->mac.is_jumbo) { 4434 err = EINVAL; 4435 break; 4436 } 4437 4438 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4439 nxgep->mac.maxframesize = (uint16_t) 4440 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4441 if (nxge_mac_set_framesize(nxgep)) { 4442 nxgep->mac.maxframesize = 4443 (uint16_t)old_framesize; 4444 err = EINVAL; 4445 break; 4446 } 4447 4448 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4449 if (err) { 4450 nxgep->mac.maxframesize = 4451 (uint16_t)old_framesize; 4452 err = EINVAL; 4453 break; 4454 } 4455 4456 nxgep->mac.default_mtu = new_mtu; 4457 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4458 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4459 new_mtu, nxgep->mac.maxframesize)); 4460 break; 4461 4462 case MAC_PROP_FLOWCTRL: 4463 bcopy(pr_val, &fl, sizeof (fl)); 4464 switch (fl) { 4465 default: 4466 err = EINVAL; 4467 break; 4468 4469 case LINK_FLOWCTRL_NONE: 4470 param_arr[param_anar_pause].value = 0; 4471 break; 4472 4473 case LINK_FLOWCTRL_RX: 4474 param_arr[param_anar_pause].value = 1; 4475 break; 4476 4477 case LINK_FLOWCTRL_TX: 4478 case LINK_FLOWCTRL_BI: 4479 err = EINVAL; 4480 break; 4481 } 4482 4483 reprogram: 4484 if (err == 0) { 4485 if (!nxge_param_link_update(nxgep)) { 4486 err = EINVAL; 4487 } 4488 } 4489 break; 4490 case MAC_PROP_PRIVATE: 4491 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4492 "==> nxge_m_setprop: private property")); 4493 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4494 pr_val); 4495 break; 4496 4497 default: 4498 err = ENOTSUP; 4499 break; 4500 } 4501 4502 mutex_exit(nxgep->genlock); 4503 4504 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4505 "<== nxge_m_setprop (return %d)", err)); 4506 return (err); 4507 } 4508 4509 static int 4510 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4511 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 4512 { 4513 nxge_t *nxgep = barg; 4514 p_nxge_param_t param_arr = nxgep->param_arr; 4515 p_nxge_stats_t statsp = nxgep->statsp; 4516 int err = 0; 4517 link_flowctrl_t fl; 4518 uint64_t tmp = 0; 4519 link_state_t ls; 4520 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4521 4522 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4523 "==> nxge_m_getprop: pr_num %d", pr_num)); 4524 4525 if (pr_valsize == 0) 4526 return (EINVAL); 4527 4528 *perm = MAC_PROP_PERM_RW; 4529 4530 if ((is_default) && (pr_num != MAC_PROP_PRIVATE)) { 4531 err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4532 return (err); 4533 } 4534 4535 bzero(pr_val, pr_valsize); 4536 switch (pr_num) { 4537 case MAC_PROP_DUPLEX: 4538 *perm = MAC_PROP_PERM_READ; 4539 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4540 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4541 "==> nxge_m_getprop: duplex mode %d", 4542 *(uint8_t *)pr_val)); 4543 break; 4544 4545 case MAC_PROP_SPEED: 4546 if (pr_valsize < sizeof (uint64_t)) 4547 return (EINVAL); 4548 *perm = MAC_PROP_PERM_READ; 4549 tmp = statsp->mac_stats.link_speed * 1000000ull; 4550 bcopy(&tmp, pr_val, sizeof (tmp)); 4551 break; 4552 4553 case MAC_PROP_STATUS: 4554 if (pr_valsize < sizeof (link_state_t)) 4555 return (EINVAL); 4556 *perm = MAC_PROP_PERM_READ; 4557 if (!statsp->mac_stats.link_up) 4558 ls = LINK_STATE_DOWN; 4559 else 4560 ls = LINK_STATE_UP; 4561 bcopy(&ls, pr_val, sizeof (ls)); 4562 break; 4563 4564 case MAC_PROP_AUTONEG: 4565 *(uint8_t *)pr_val = 4566 param_arr[param_autoneg].value; 4567 break; 4568 4569 case MAC_PROP_FLOWCTRL: 4570 if (pr_valsize < sizeof (link_flowctrl_t)) 4571 return (EINVAL); 4572 4573 fl = LINK_FLOWCTRL_NONE; 4574 if (param_arr[param_anar_pause].value) { 4575 fl = LINK_FLOWCTRL_RX; 4576 } 4577 bcopy(&fl, pr_val, sizeof (fl)); 4578 break; 4579 4580 case MAC_PROP_ADV_1000FDX_CAP: 4581 *perm = MAC_PROP_PERM_READ; 4582 *(uint8_t *)pr_val = 4583 param_arr[param_anar_1000fdx].value; 4584 break; 4585 4586 case MAC_PROP_EN_1000FDX_CAP: 4587 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4588 break; 4589 4590 case MAC_PROP_ADV_100FDX_CAP: 4591 *perm = MAC_PROP_PERM_READ; 4592 *(uint8_t *)pr_val = 4593 param_arr[param_anar_100fdx].value; 4594 break; 4595 4596 case MAC_PROP_EN_100FDX_CAP: 4597 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4598 break; 4599 4600 case MAC_PROP_ADV_10FDX_CAP: 4601 *perm = MAC_PROP_PERM_READ; 4602 *(uint8_t *)pr_val = 4603 param_arr[param_anar_10fdx].value; 4604 break; 4605 4606 case MAC_PROP_EN_10FDX_CAP: 4607 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4608 break; 4609 4610 case MAC_PROP_EN_1000HDX_CAP: 4611 case MAC_PROP_EN_100HDX_CAP: 4612 case MAC_PROP_EN_10HDX_CAP: 4613 case MAC_PROP_ADV_1000HDX_CAP: 4614 case MAC_PROP_ADV_100HDX_CAP: 4615 case MAC_PROP_ADV_10HDX_CAP: 4616 err = ENOTSUP; 4617 break; 4618 4619 case MAC_PROP_PRIVATE: 4620 err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4621 pr_valsize, pr_val, perm); 4622 break; 4623 default: 4624 err = EINVAL; 4625 break; 4626 } 4627 4628 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4629 4630 return (err); 4631 } 4632 4633 /* ARGSUSED */ 4634 static int 4635 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4636 const void *pr_val) 4637 { 4638 p_nxge_param_t param_arr = nxgep->param_arr; 4639 int err = 0; 4640 long result; 4641 4642 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4643 "==> nxge_set_priv_prop: name %s", pr_name)); 4644 4645 if (strcmp(pr_name, "_accept_jumbo") == 0) { 4646 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4647 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4648 "<== nxge_set_priv_prop: name %s " 4649 "pr_val %s result %d " 4650 "param %d is_jumbo %d", 4651 pr_name, pr_val, result, 4652 param_arr[param_accept_jumbo].value, 4653 nxgep->mac.is_jumbo)); 4654 4655 if (result > 1 || result < 0) { 4656 err = EINVAL; 4657 } else { 4658 if (nxgep->mac.is_jumbo == 4659 (uint32_t)result) { 4660 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4661 "no change (%d %d)", 4662 nxgep->mac.is_jumbo, 4663 result)); 4664 return (0); 4665 } 4666 } 4667 4668 param_arr[param_accept_jumbo].value = result; 4669 nxgep->mac.is_jumbo = B_FALSE; 4670 if (result) { 4671 nxgep->mac.is_jumbo = B_TRUE; 4672 } 4673 4674 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4675 "<== nxge_set_priv_prop: name %s (value %d) is_jumbo %d", 4676 pr_name, result, nxgep->mac.is_jumbo)); 4677 4678 return (err); 4679 } 4680 4681 /* Blanking */ 4682 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4683 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4684 (char *)pr_val, 4685 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4686 if (err) { 4687 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4688 "<== nxge_set_priv_prop: " 4689 "unable to set (%s)", pr_name)); 4690 err = EINVAL; 4691 } else { 4692 err = 0; 4693 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4694 "<== nxge_set_priv_prop: " 4695 "set (%s)", pr_name)); 4696 } 4697 4698 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4699 "<== nxge_set_priv_prop: name %s (value %d)", 4700 pr_name, result)); 4701 4702 return (err); 4703 } 4704 4705 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4706 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4707 (char *)pr_val, 4708 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4709 if (err) { 4710 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4711 "<== nxge_set_priv_prop: " 4712 "unable to set (%s)", pr_name)); 4713 err = EINVAL; 4714 } else { 4715 err = 0; 4716 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4717 "<== nxge_set_priv_prop: " 4718 "set (%s)", pr_name)); 4719 } 4720 4721 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4722 "<== nxge_set_priv_prop: name %s (value %d)", 4723 pr_name, result)); 4724 4725 return (err); 4726 } 4727 4728 /* Classification */ 4729 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4730 if (pr_val == NULL) { 4731 err = EINVAL; 4732 return (err); 4733 } 4734 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4735 4736 err = nxge_param_set_ip_opt(nxgep, NULL, 4737 NULL, (char *)pr_val, 4738 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4739 4740 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4741 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4742 pr_name, result)); 4743 4744 return (err); 4745 } 4746 4747 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 4748 if (pr_val == NULL) { 4749 err = EINVAL; 4750 return (err); 4751 } 4752 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4753 4754 err = nxge_param_set_ip_opt(nxgep, NULL, 4755 NULL, (char *)pr_val, 4756 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 4757 4758 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4759 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4760 pr_name, result)); 4761 4762 return (err); 4763 } 4764 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 4765 if (pr_val == NULL) { 4766 err = EINVAL; 4767 return (err); 4768 } 4769 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4770 4771 err = nxge_param_set_ip_opt(nxgep, NULL, 4772 NULL, (char *)pr_val, 4773 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 4774 4775 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4776 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4777 pr_name, result)); 4778 4779 return (err); 4780 } 4781 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 4782 if (pr_val == NULL) { 4783 err = EINVAL; 4784 return (err); 4785 } 4786 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4787 4788 err = nxge_param_set_ip_opt(nxgep, NULL, 4789 NULL, (char *)pr_val, 4790 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 4791 4792 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4793 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4794 pr_name, result)); 4795 4796 return (err); 4797 } 4798 4799 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 4800 if (pr_val == NULL) { 4801 err = EINVAL; 4802 return (err); 4803 } 4804 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4805 4806 err = nxge_param_set_ip_opt(nxgep, NULL, 4807 NULL, (char *)pr_val, 4808 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 4809 4810 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4811 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4812 pr_name, result)); 4813 4814 return (err); 4815 } 4816 4817 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 4818 if (pr_val == NULL) { 4819 err = EINVAL; 4820 return (err); 4821 } 4822 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4823 4824 err = nxge_param_set_ip_opt(nxgep, NULL, 4825 NULL, (char *)pr_val, 4826 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 4827 4828 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4829 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4830 pr_name, result)); 4831 4832 return (err); 4833 } 4834 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 4835 if (pr_val == NULL) { 4836 err = EINVAL; 4837 return (err); 4838 } 4839 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4840 4841 err = nxge_param_set_ip_opt(nxgep, NULL, 4842 NULL, (char *)pr_val, 4843 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 4844 4845 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4846 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4847 pr_name, result)); 4848 4849 return (err); 4850 } 4851 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 4852 if (pr_val == NULL) { 4853 err = EINVAL; 4854 return (err); 4855 } 4856 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4857 4858 err = nxge_param_set_ip_opt(nxgep, NULL, 4859 NULL, (char *)pr_val, 4860 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 4861 4862 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4863 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4864 pr_name, result)); 4865 4866 return (err); 4867 } 4868 4869 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 4870 if (pr_val == NULL) { 4871 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4872 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 4873 err = EINVAL; 4874 return (err); 4875 } 4876 4877 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4878 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4879 "<== nxge_set_priv_prop: name %s " 4880 "(lso %d pr_val %s value %d)", 4881 pr_name, nxgep->soft_lso_enable, pr_val, result)); 4882 4883 if (result > 1 || result < 0) { 4884 err = EINVAL; 4885 } else { 4886 if (nxgep->soft_lso_enable == (uint32_t)result) { 4887 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4888 "no change (%d %d)", 4889 nxgep->soft_lso_enable, result)); 4890 return (0); 4891 } 4892 } 4893 4894 nxgep->soft_lso_enable = (int)result; 4895 4896 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4897 "<== nxge_set_priv_prop: name %s (value %d)", 4898 pr_name, result)); 4899 4900 return (err); 4901 } 4902 /* 4903 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the 4904 * following code to be executed. 4905 */ 4906 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 4907 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 4908 (caddr_t)¶m_arr[param_anar_10gfdx]); 4909 return (err); 4910 } 4911 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 4912 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 4913 (caddr_t)¶m_arr[param_anar_pause]); 4914 return (err); 4915 } 4916 4917 return (EINVAL); 4918 } 4919 4920 static int 4921 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 4922 uint_t pr_valsize, void *pr_val, uint_t *perm) 4923 { 4924 p_nxge_param_t param_arr = nxgep->param_arr; 4925 char valstr[MAXNAMELEN]; 4926 int err = EINVAL; 4927 uint_t strsize; 4928 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4929 4930 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4931 "==> nxge_get_priv_prop: property %s", pr_name)); 4932 4933 /* function number */ 4934 if (strcmp(pr_name, "_function_number") == 0) { 4935 if (is_default) 4936 return (ENOTSUP); 4937 *perm = MAC_PROP_PERM_READ; 4938 (void) snprintf(valstr, sizeof (valstr), "%d", 4939 nxgep->function_num); 4940 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4941 "==> nxge_get_priv_prop: name %s " 4942 "(value %d valstr %s)", 4943 pr_name, nxgep->function_num, valstr)); 4944 4945 err = 0; 4946 goto done; 4947 } 4948 4949 /* Neptune firmware version */ 4950 if (strcmp(pr_name, "_fw_version") == 0) { 4951 if (is_default) 4952 return (ENOTSUP); 4953 *perm = MAC_PROP_PERM_READ; 4954 (void) snprintf(valstr, sizeof (valstr), "%s", 4955 nxgep->vpd_info.ver); 4956 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4957 "==> nxge_get_priv_prop: name %s " 4958 "(value %d valstr %s)", 4959 pr_name, nxgep->vpd_info.ver, valstr)); 4960 4961 err = 0; 4962 goto done; 4963 } 4964 4965 /* port PHY mode */ 4966 if (strcmp(pr_name, "_port_mode") == 0) { 4967 if (is_default) 4968 return (ENOTSUP); 4969 *perm = MAC_PROP_PERM_READ; 4970 switch (nxgep->mac.portmode) { 4971 case PORT_1G_COPPER: 4972 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 4973 nxgep->hot_swappable_phy ? 4974 "[Hot Swappable]" : ""); 4975 break; 4976 case PORT_1G_FIBER: 4977 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 4978 nxgep->hot_swappable_phy ? 4979 "[hot swappable]" : ""); 4980 break; 4981 case PORT_10G_COPPER: 4982 (void) snprintf(valstr, sizeof (valstr), 4983 "10G copper %s", 4984 nxgep->hot_swappable_phy ? 4985 "[hot swappable]" : ""); 4986 break; 4987 case PORT_10G_FIBER: 4988 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 4989 nxgep->hot_swappable_phy ? 4990 "[hot swappable]" : ""); 4991 break; 4992 case PORT_10G_SERDES: 4993 (void) snprintf(valstr, sizeof (valstr), 4994 "10G serdes %s", nxgep->hot_swappable_phy ? 4995 "[hot swappable]" : ""); 4996 break; 4997 case PORT_1G_SERDES: 4998 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 4999 nxgep->hot_swappable_phy ? 5000 "[hot swappable]" : ""); 5001 break; 5002 case PORT_1G_TN1010: 5003 (void) snprintf(valstr, sizeof (valstr), 5004 "1G TN1010 copper %s", nxgep->hot_swappable_phy ? 5005 "[hot swappable]" : ""); 5006 break; 5007 case PORT_10G_TN1010: 5008 (void) snprintf(valstr, sizeof (valstr), 5009 "10G TN1010 copper %s", nxgep->hot_swappable_phy ? 5010 "[hot swappable]" : ""); 5011 break; 5012 case PORT_1G_RGMII_FIBER: 5013 (void) snprintf(valstr, sizeof (valstr), 5014 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5015 "[hot swappable]" : ""); 5016 break; 5017 case PORT_HSP_MODE: 5018 (void) snprintf(valstr, sizeof (valstr), 5019 "phy not present[hot swappable]"); 5020 break; 5021 default: 5022 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5023 nxgep->hot_swappable_phy ? 5024 "[hot swappable]" : ""); 5025 break; 5026 } 5027 5028 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5029 "==> nxge_get_priv_prop: name %s (value %s)", 5030 pr_name, valstr)); 5031 5032 err = 0; 5033 goto done; 5034 } 5035 5036 /* Hot swappable PHY */ 5037 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5038 if (is_default) 5039 return (ENOTSUP); 5040 *perm = MAC_PROP_PERM_READ; 5041 (void) snprintf(valstr, sizeof (valstr), "%s", 5042 nxgep->hot_swappable_phy ? 5043 "yes" : "no"); 5044 5045 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5046 "==> nxge_get_priv_prop: name %s " 5047 "(value %d valstr %s)", 5048 pr_name, nxgep->hot_swappable_phy, valstr)); 5049 5050 err = 0; 5051 goto done; 5052 } 5053 5054 5055 /* accept jumbo */ 5056 if (strcmp(pr_name, "_accept_jumbo") == 0) { 5057 if (is_default) 5058 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5059 else 5060 (void) snprintf(valstr, sizeof (valstr), 5061 "%d", nxgep->mac.is_jumbo); 5062 err = 0; 5063 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5064 "==> nxge_get_priv_prop: name %s (value %d (%d, %d))", 5065 pr_name, 5066 (uint32_t)param_arr[param_accept_jumbo].value, 5067 nxgep->mac.is_jumbo, 5068 nxge_jumbo_enable)); 5069 5070 goto done; 5071 } 5072 5073 /* Receive Interrupt Blanking Parameters */ 5074 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5075 err = 0; 5076 if (is_default) { 5077 (void) snprintf(valstr, sizeof (valstr), 5078 "%d", RXDMA_RCR_TO_DEFAULT); 5079 goto done; 5080 } 5081 5082 (void) snprintf(valstr, sizeof (valstr), "%d", 5083 nxgep->intr_timeout); 5084 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5085 "==> nxge_get_priv_prop: name %s (value %d)", 5086 pr_name, 5087 (uint32_t)nxgep->intr_timeout)); 5088 goto done; 5089 } 5090 5091 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5092 err = 0; 5093 if (is_default) { 5094 (void) snprintf(valstr, sizeof (valstr), 5095 "%d", RXDMA_RCR_PTHRES_DEFAULT); 5096 goto done; 5097 } 5098 (void) snprintf(valstr, sizeof (valstr), "%d", 5099 nxgep->intr_threshold); 5100 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5101 "==> nxge_get_priv_prop: name %s (value %d)", 5102 pr_name, (uint32_t)nxgep->intr_threshold)); 5103 5104 goto done; 5105 } 5106 5107 /* Classification and Load Distribution Configuration */ 5108 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5109 if (is_default) { 5110 (void) snprintf(valstr, sizeof (valstr), "%x", 5111 NXGE_CLASS_FLOW_GEN_SERVER); 5112 err = 0; 5113 goto done; 5114 } 5115 err = nxge_dld_get_ip_opt(nxgep, 5116 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5117 5118 (void) snprintf(valstr, sizeof (valstr), "%x", 5119 (int)param_arr[param_class_opt_ipv4_tcp].value); 5120 5121 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5122 "==> nxge_get_priv_prop: %s", valstr)); 5123 goto done; 5124 } 5125 5126 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5127 if (is_default) { 5128 (void) snprintf(valstr, sizeof (valstr), "%x", 5129 NXGE_CLASS_FLOW_GEN_SERVER); 5130 err = 0; 5131 goto done; 5132 } 5133 err = nxge_dld_get_ip_opt(nxgep, 5134 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5135 5136 (void) snprintf(valstr, sizeof (valstr), "%x", 5137 (int)param_arr[param_class_opt_ipv4_udp].value); 5138 5139 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5140 "==> nxge_get_priv_prop: %s", valstr)); 5141 goto done; 5142 } 5143 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5144 if (is_default) { 5145 (void) snprintf(valstr, sizeof (valstr), "%x", 5146 NXGE_CLASS_FLOW_GEN_SERVER); 5147 err = 0; 5148 goto done; 5149 } 5150 err = nxge_dld_get_ip_opt(nxgep, 5151 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5152 5153 (void) snprintf(valstr, sizeof (valstr), "%x", 5154 (int)param_arr[param_class_opt_ipv4_ah].value); 5155 5156 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5157 "==> nxge_get_priv_prop: %s", valstr)); 5158 goto done; 5159 } 5160 5161 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5162 if (is_default) { 5163 (void) snprintf(valstr, sizeof (valstr), "%x", 5164 NXGE_CLASS_FLOW_GEN_SERVER); 5165 err = 0; 5166 goto done; 5167 } 5168 err = nxge_dld_get_ip_opt(nxgep, 5169 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5170 5171 (void) snprintf(valstr, sizeof (valstr), "%x", 5172 (int)param_arr[param_class_opt_ipv4_sctp].value); 5173 5174 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5175 "==> nxge_get_priv_prop: %s", valstr)); 5176 goto done; 5177 } 5178 5179 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5180 if (is_default) { 5181 (void) snprintf(valstr, sizeof (valstr), "%x", 5182 NXGE_CLASS_FLOW_GEN_SERVER); 5183 err = 0; 5184 goto done; 5185 } 5186 err = nxge_dld_get_ip_opt(nxgep, 5187 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5188 5189 (void) snprintf(valstr, sizeof (valstr), "%x", 5190 (int)param_arr[param_class_opt_ipv6_tcp].value); 5191 5192 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5193 "==> nxge_get_priv_prop: %s", valstr)); 5194 goto done; 5195 } 5196 5197 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5198 if (is_default) { 5199 (void) snprintf(valstr, sizeof (valstr), "%x", 5200 NXGE_CLASS_FLOW_GEN_SERVER); 5201 err = 0; 5202 goto done; 5203 } 5204 err = nxge_dld_get_ip_opt(nxgep, 5205 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5206 5207 (void) snprintf(valstr, sizeof (valstr), "%x", 5208 (int)param_arr[param_class_opt_ipv6_udp].value); 5209 5210 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5211 "==> nxge_get_priv_prop: %s", valstr)); 5212 goto done; 5213 } 5214 5215 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5216 if (is_default) { 5217 (void) snprintf(valstr, sizeof (valstr), "%x", 5218 NXGE_CLASS_FLOW_GEN_SERVER); 5219 err = 0; 5220 goto done; 5221 } 5222 err = nxge_dld_get_ip_opt(nxgep, 5223 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5224 5225 (void) snprintf(valstr, sizeof (valstr), "%x", 5226 (int)param_arr[param_class_opt_ipv6_ah].value); 5227 5228 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5229 "==> nxge_get_priv_prop: %s", valstr)); 5230 goto done; 5231 } 5232 5233 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5234 if (is_default) { 5235 (void) snprintf(valstr, sizeof (valstr), "%x", 5236 NXGE_CLASS_FLOW_GEN_SERVER); 5237 err = 0; 5238 goto done; 5239 } 5240 err = nxge_dld_get_ip_opt(nxgep, 5241 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5242 5243 (void) snprintf(valstr, sizeof (valstr), "%x", 5244 (int)param_arr[param_class_opt_ipv6_sctp].value); 5245 5246 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5247 "==> nxge_get_priv_prop: %s", valstr)); 5248 goto done; 5249 } 5250 5251 /* Software LSO */ 5252 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5253 if (is_default) { 5254 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5255 err = 0; 5256 goto done; 5257 } 5258 (void) snprintf(valstr, sizeof (valstr), 5259 "%d", nxgep->soft_lso_enable); 5260 err = 0; 5261 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5262 "==> nxge_get_priv_prop: name %s (value %d)", 5263 pr_name, nxgep->soft_lso_enable)); 5264 5265 goto done; 5266 } 5267 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5268 err = 0; 5269 if (is_default || 5270 nxgep->param_arr[param_anar_10gfdx].value != 0) { 5271 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5272 goto done; 5273 } else { 5274 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5275 goto done; 5276 } 5277 } 5278 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5279 err = 0; 5280 if (is_default || 5281 nxgep->param_arr[param_anar_pause].value != 0) { 5282 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5283 goto done; 5284 } else { 5285 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5286 goto done; 5287 } 5288 } 5289 5290 done: 5291 if (err == 0) { 5292 strsize = (uint_t)strlen(valstr); 5293 if (pr_valsize < strsize) { 5294 err = ENOBUFS; 5295 } else { 5296 (void) strlcpy(pr_val, valstr, pr_valsize); 5297 } 5298 } 5299 5300 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5301 "<== nxge_get_priv_prop: return %d", err)); 5302 return (err); 5303 } 5304 5305 /* 5306 * Module loading and removing entry points. 5307 */ 5308 5309 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach, 5310 nodev, NULL, D_MP, NULL, nxge_quiesce); 5311 5312 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5313 5314 /* 5315 * Module linkage information for the kernel. 5316 */ 5317 static struct modldrv nxge_modldrv = { 5318 &mod_driverops, 5319 NXGE_DESC_VER, 5320 &nxge_dev_ops 5321 }; 5322 5323 static struct modlinkage modlinkage = { 5324 MODREV_1, (void *) &nxge_modldrv, NULL 5325 }; 5326 5327 int 5328 _init(void) 5329 { 5330 int status; 5331 5332 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5333 mac_init_ops(&nxge_dev_ops, "nxge"); 5334 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5335 if (status != 0) { 5336 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5337 "failed to init device soft state")); 5338 goto _init_exit; 5339 } 5340 status = mod_install(&modlinkage); 5341 if (status != 0) { 5342 ddi_soft_state_fini(&nxge_list); 5343 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5344 goto _init_exit; 5345 } 5346 5347 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5348 5349 _init_exit: 5350 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 5351 5352 return (status); 5353 } 5354 5355 int 5356 _fini(void) 5357 { 5358 int status; 5359 5360 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5361 5362 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5363 5364 if (nxge_mblks_pending) 5365 return (EBUSY); 5366 5367 status = mod_remove(&modlinkage); 5368 if (status != DDI_SUCCESS) { 5369 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5370 "Module removal failed 0x%08x", 5371 status)); 5372 goto _fini_exit; 5373 } 5374 5375 mac_fini_ops(&nxge_dev_ops); 5376 5377 ddi_soft_state_fini(&nxge_list); 5378 5379 MUTEX_DESTROY(&nxge_common_lock); 5380 _fini_exit: 5381 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 5382 5383 return (status); 5384 } 5385 5386 int 5387 _info(struct modinfo *modinfop) 5388 { 5389 int status; 5390 5391 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5392 status = mod_info(&modlinkage, modinfop); 5393 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5394 5395 return (status); 5396 } 5397 5398 /*ARGSUSED*/ 5399 static int 5400 nxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5401 { 5402 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5403 p_nxge_t nxgep = rhp->nxgep; 5404 uint32_t channel; 5405 p_tx_ring_t ring; 5406 5407 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5408 ring = nxgep->tx_rings->rings[channel]; 5409 5410 MUTEX_ENTER(&ring->lock); 5411 ring->tx_ring_handle = rhp->ring_handle; 5412 MUTEX_EXIT(&ring->lock); 5413 5414 return (0); 5415 } 5416 5417 static void 5418 nxge_tx_ring_stop(mac_ring_driver_t rdriver) 5419 { 5420 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5421 p_nxge_t nxgep = rhp->nxgep; 5422 uint32_t channel; 5423 p_tx_ring_t ring; 5424 5425 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5426 ring = nxgep->tx_rings->rings[channel]; 5427 5428 MUTEX_ENTER(&ring->lock); 5429 ring->tx_ring_handle = (mac_ring_handle_t)NULL; 5430 MUTEX_EXIT(&ring->lock); 5431 } 5432 5433 static int 5434 nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5435 { 5436 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5437 p_nxge_t nxgep = rhp->nxgep; 5438 uint32_t channel; 5439 p_rx_rcr_ring_t ring; 5440 int i; 5441 5442 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5443 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5444 5445 MUTEX_ENTER(&ring->lock); 5446 5447 if (nxgep->rx_channel_started[channel] == B_TRUE) { 5448 MUTEX_EXIT(&ring->lock); 5449 return (0); 5450 } 5451 5452 /* set rcr_ring */ 5453 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) { 5454 if ((nxgep->ldgvp->ldvp[i].is_rxdma == 1) && 5455 (nxgep->ldgvp->ldvp[i].channel == channel)) { 5456 ring->ldvp = &nxgep->ldgvp->ldvp[i]; 5457 ring->ldgp = nxgep->ldgvp->ldvp[i].ldgp; 5458 } 5459 } 5460 5461 nxgep->rx_channel_started[channel] = B_TRUE; 5462 ring->rcr_mac_handle = rhp->ring_handle; 5463 ring->rcr_gen_num = mr_gen_num; 5464 MUTEX_EXIT(&ring->lock); 5465 5466 return (0); 5467 } 5468 5469 static void 5470 nxge_rx_ring_stop(mac_ring_driver_t rdriver) 5471 { 5472 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5473 p_nxge_t nxgep = rhp->nxgep; 5474 uint32_t channel; 5475 p_rx_rcr_ring_t ring; 5476 5477 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5478 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5479 5480 MUTEX_ENTER(&ring->lock); 5481 nxgep->rx_channel_started[channel] = B_FALSE; 5482 ring->rcr_mac_handle = NULL; 5483 MUTEX_EXIT(&ring->lock); 5484 } 5485 5486 /* 5487 * Callback funtion for MAC layer to register all rings. 5488 */ 5489 static void 5490 nxge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 5491 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 5492 { 5493 p_nxge_t nxgep = (p_nxge_t)arg; 5494 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 5495 5496 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5497 "==> nxge_fill_ring 0x%x index %d", rtype, index)); 5498 5499 switch (rtype) { 5500 case MAC_RING_TYPE_TX: { 5501 p_nxge_ring_handle_t rhandlep; 5502 5503 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5504 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d", 5505 rtype, index, p_cfgp->tdc.count)); 5506 5507 ASSERT((index >= 0) && (index < p_cfgp->tdc.count)); 5508 rhandlep = &nxgep->tx_ring_handles[index]; 5509 rhandlep->nxgep = nxgep; 5510 rhandlep->index = index; 5511 rhandlep->ring_handle = rh; 5512 5513 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5514 infop->mri_start = nxge_tx_ring_start; 5515 infop->mri_stop = nxge_tx_ring_stop; 5516 infop->mri_tx = nxge_tx_ring_send; 5517 5518 break; 5519 } 5520 case MAC_RING_TYPE_RX: { 5521 p_nxge_ring_handle_t rhandlep; 5522 int nxge_rindex; 5523 mac_intr_t nxge_mac_intr; 5524 5525 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5526 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d", 5527 rtype, index, p_cfgp->max_rdcs)); 5528 5529 /* 5530 * 'index' is the ring index within the group. 5531 * Find the ring index in the nxge instance. 5532 */ 5533 nxge_rindex = nxge_get_rxring_index(nxgep, rg_index, index); 5534 5535 ASSERT((nxge_rindex >= 0) && (nxge_rindex < p_cfgp->max_rdcs)); 5536 rhandlep = &nxgep->rx_ring_handles[nxge_rindex]; 5537 rhandlep->nxgep = nxgep; 5538 rhandlep->index = nxge_rindex; 5539 rhandlep->ring_handle = rh; 5540 5541 /* 5542 * Entrypoint to enable interrupt (disable poll) and 5543 * disable interrupt (enable poll). 5544 */ 5545 nxge_mac_intr.mi_handle = (mac_intr_handle_t)rhandlep; 5546 nxge_mac_intr.mi_enable = (mac_intr_enable_t)nxge_disable_poll; 5547 nxge_mac_intr.mi_disable = (mac_intr_disable_t)nxge_enable_poll; 5548 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5549 infop->mri_start = nxge_rx_ring_start; 5550 infop->mri_stop = nxge_rx_ring_stop; 5551 infop->mri_intr = nxge_mac_intr; /* ??? */ 5552 infop->mri_poll = nxge_rx_poll; 5553 5554 break; 5555 } 5556 default: 5557 break; 5558 } 5559 5560 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_fill_ring 0x%x", 5561 rtype)); 5562 } 5563 5564 static void 5565 nxge_group_add_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5566 mac_ring_type_t type) 5567 { 5568 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5569 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5570 nxge_t *nxge; 5571 nxge_grp_t *grp; 5572 nxge_rdc_grp_t *rdc_grp; 5573 uint16_t channel; /* device-wise ring id */ 5574 int dev_gindex; 5575 int rv; 5576 5577 nxge = rgroup->nxgep; 5578 5579 switch (type) { 5580 case MAC_RING_TYPE_TX: 5581 /* 5582 * nxge_grp_dc_add takes a channel number which is a 5583 * "devise" ring ID. 5584 */ 5585 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5586 5587 /* 5588 * Remove the ring from the default group 5589 */ 5590 if (rgroup->gindex != 0) { 5591 (void) nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5592 } 5593 5594 /* 5595 * nxge->tx_set.group[] is an array of groups indexed by 5596 * a "port" group ID. 5597 */ 5598 grp = nxge->tx_set.group[rgroup->gindex]; 5599 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5600 if (rv != 0) { 5601 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5602 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5603 } 5604 break; 5605 5606 case MAC_RING_TYPE_RX: 5607 /* 5608 * nxge->rx_set.group[] is an array of groups indexed by 5609 * a "port" group ID. 5610 */ 5611 grp = nxge->rx_set.group[rgroup->gindex]; 5612 5613 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5614 rgroup->gindex; 5615 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5616 5617 /* 5618 * nxge_grp_dc_add takes a channel number which is a 5619 * "devise" ring ID. 5620 */ 5621 channel = nxge->pt_config.hw_config.start_rdc + rhandle->index; 5622 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_RX, channel); 5623 if (rv != 0) { 5624 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5625 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5626 } 5627 5628 rdc_grp->map |= (1 << channel); 5629 rdc_grp->max_rdcs++; 5630 5631 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5632 break; 5633 } 5634 } 5635 5636 static void 5637 nxge_group_rem_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5638 mac_ring_type_t type) 5639 { 5640 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5641 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5642 nxge_t *nxge; 5643 uint16_t channel; /* device-wise ring id */ 5644 nxge_rdc_grp_t *rdc_grp; 5645 int dev_gindex; 5646 5647 nxge = rgroup->nxgep; 5648 5649 switch (type) { 5650 case MAC_RING_TYPE_TX: 5651 dev_gindex = nxge->pt_config.hw_config.def_mac_txdma_grpid + 5652 rgroup->gindex; 5653 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5654 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5655 5656 /* 5657 * Add the ring back to the default group 5658 */ 5659 if (rgroup->gindex != 0) { 5660 nxge_grp_t *grp; 5661 grp = nxge->tx_set.group[0]; 5662 (void) nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5663 } 5664 break; 5665 5666 case MAC_RING_TYPE_RX: 5667 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5668 rgroup->gindex; 5669 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5670 channel = rdc_grp->start_rdc + rhandle->index; 5671 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 5672 5673 rdc_grp->map &= ~(1 << channel); 5674 rdc_grp->max_rdcs--; 5675 5676 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5677 break; 5678 } 5679 } 5680 5681 5682 /*ARGSUSED*/ 5683 static nxge_status_t 5684 nxge_add_intrs(p_nxge_t nxgep) 5685 { 5686 5687 int intr_types; 5688 int type = 0; 5689 int ddi_status = DDI_SUCCESS; 5690 nxge_status_t status = NXGE_OK; 5691 5692 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5693 5694 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5695 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5696 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5697 nxgep->nxge_intr_type.intr_added = 0; 5698 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5699 nxgep->nxge_intr_type.intr_type = 0; 5700 5701 if (nxgep->niu_type == N2_NIU) { 5702 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5703 } else if (nxge_msi_enable) { 5704 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5705 } 5706 5707 /* Get the supported interrupt types */ 5708 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5709 != DDI_SUCCESS) { 5710 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5711 "ddi_intr_get_supported_types failed: status 0x%08x", 5712 ddi_status)); 5713 return (NXGE_ERROR | NXGE_DDI_FAILED); 5714 } 5715 nxgep->nxge_intr_type.intr_types = intr_types; 5716 5717 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5718 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5719 5720 /* 5721 * Solaris MSIX is not supported yet. use MSI for now. 5722 * nxge_msi_enable (1): 5723 * 1 - MSI 2 - MSI-X others - FIXED 5724 */ 5725 switch (nxge_msi_enable) { 5726 default: 5727 type = DDI_INTR_TYPE_FIXED; 5728 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5729 "use fixed (intx emulation) type %08x", 5730 type)); 5731 break; 5732 5733 case 2: 5734 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5735 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5736 if (intr_types & DDI_INTR_TYPE_MSIX) { 5737 type = DDI_INTR_TYPE_MSIX; 5738 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5739 "ddi_intr_get_supported_types: MSIX 0x%08x", 5740 type)); 5741 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5742 type = DDI_INTR_TYPE_MSI; 5743 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5744 "ddi_intr_get_supported_types: MSI 0x%08x", 5745 type)); 5746 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5747 type = DDI_INTR_TYPE_FIXED; 5748 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5749 "ddi_intr_get_supported_types: MSXED0x%08x", 5750 type)); 5751 } 5752 break; 5753 5754 case 1: 5755 if (intr_types & DDI_INTR_TYPE_MSI) { 5756 type = DDI_INTR_TYPE_MSI; 5757 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5758 "ddi_intr_get_supported_types: MSI 0x%08x", 5759 type)); 5760 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5761 type = DDI_INTR_TYPE_MSIX; 5762 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5763 "ddi_intr_get_supported_types: MSIX 0x%08x", 5764 type)); 5765 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5766 type = DDI_INTR_TYPE_FIXED; 5767 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5768 "ddi_intr_get_supported_types: MSXED0x%08x", 5769 type)); 5770 } 5771 } 5772 5773 nxgep->nxge_intr_type.intr_type = type; 5774 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5775 type == DDI_INTR_TYPE_FIXED) && 5776 nxgep->nxge_intr_type.niu_msi_enable) { 5777 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5778 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5779 " nxge_add_intrs: " 5780 " nxge_add_intrs_adv failed: status 0x%08x", 5781 status)); 5782 return (status); 5783 } else { 5784 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5785 "interrupts registered : type %d", type)); 5786 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5787 5788 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5789 "\nAdded advanced nxge add_intr_adv " 5790 "intr type 0x%x\n", type)); 5791 5792 return (status); 5793 } 5794 } 5795 5796 if (!nxgep->nxge_intr_type.intr_registered) { 5797 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5798 "failed to register interrupts")); 5799 return (NXGE_ERROR | NXGE_DDI_FAILED); 5800 } 5801 5802 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5803 return (status); 5804 } 5805 5806 static nxge_status_t 5807 nxge_add_intrs_adv(p_nxge_t nxgep) 5808 { 5809 int intr_type; 5810 p_nxge_intr_t intrp; 5811 5812 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5813 5814 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5815 intr_type = intrp->intr_type; 5816 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5817 intr_type)); 5818 5819 switch (intr_type) { 5820 case DDI_INTR_TYPE_MSI: /* 0x2 */ 5821 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 5822 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 5823 5824 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 5825 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 5826 5827 default: 5828 return (NXGE_ERROR); 5829 } 5830 } 5831 5832 5833 /*ARGSUSED*/ 5834 static nxge_status_t 5835 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 5836 { 5837 dev_info_t *dip = nxgep->dip; 5838 p_nxge_ldg_t ldgp; 5839 p_nxge_intr_t intrp; 5840 uint_t *inthandler; 5841 void *arg1, *arg2; 5842 int behavior; 5843 int nintrs, navail, nrequest; 5844 int nactual, nrequired; 5845 int inum = 0; 5846 int x, y; 5847 int ddi_status = DDI_SUCCESS; 5848 nxge_status_t status = NXGE_OK; 5849 5850 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 5851 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5852 intrp->start_inum = 0; 5853 5854 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5855 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5856 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5857 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5858 "nintrs: %d", ddi_status, nintrs)); 5859 return (NXGE_ERROR | NXGE_DDI_FAILED); 5860 } 5861 5862 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5863 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5864 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5865 "ddi_intr_get_navail() failed, status: 0x%x%, " 5866 "nintrs: %d", ddi_status, navail)); 5867 return (NXGE_ERROR | NXGE_DDI_FAILED); 5868 } 5869 5870 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5871 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 5872 nintrs, navail)); 5873 5874 /* PSARC/2007/453 MSI-X interrupt limit override */ 5875 if (int_type == DDI_INTR_TYPE_MSIX) { 5876 nrequest = nxge_create_msi_property(nxgep); 5877 if (nrequest < navail) { 5878 navail = nrequest; 5879 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5880 "nxge_add_intrs_adv_type: nintrs %d " 5881 "navail %d (nrequest %d)", 5882 nintrs, navail, nrequest)); 5883 } 5884 } 5885 5886 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 5887 /* MSI must be power of 2 */ 5888 if ((navail & 16) == 16) { 5889 navail = 16; 5890 } else if ((navail & 8) == 8) { 5891 navail = 8; 5892 } else if ((navail & 4) == 4) { 5893 navail = 4; 5894 } else if ((navail & 2) == 2) { 5895 navail = 2; 5896 } else { 5897 navail = 1; 5898 } 5899 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5900 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 5901 "navail %d", nintrs, navail)); 5902 } 5903 5904 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5905 DDI_INTR_ALLOC_NORMAL); 5906 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 5907 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 5908 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5909 navail, &nactual, behavior); 5910 if (ddi_status != DDI_SUCCESS || nactual == 0) { 5911 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5912 " ddi_intr_alloc() failed: %d", 5913 ddi_status)); 5914 kmem_free(intrp->htable, intrp->intr_size); 5915 return (NXGE_ERROR | NXGE_DDI_FAILED); 5916 } 5917 5918 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 5919 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 5920 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5921 " ddi_intr_get_pri() failed: %d", 5922 ddi_status)); 5923 /* Free already allocated interrupts */ 5924 for (y = 0; y < nactual; y++) { 5925 (void) ddi_intr_free(intrp->htable[y]); 5926 } 5927 5928 kmem_free(intrp->htable, intrp->intr_size); 5929 return (NXGE_ERROR | NXGE_DDI_FAILED); 5930 } 5931 5932 nrequired = 0; 5933 switch (nxgep->niu_type) { 5934 default: 5935 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 5936 break; 5937 5938 case N2_NIU: 5939 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 5940 break; 5941 } 5942 5943 if (status != NXGE_OK) { 5944 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5945 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 5946 "failed: 0x%x", status)); 5947 /* Free already allocated interrupts */ 5948 for (y = 0; y < nactual; y++) { 5949 (void) ddi_intr_free(intrp->htable[y]); 5950 } 5951 5952 kmem_free(intrp->htable, intrp->intr_size); 5953 return (status); 5954 } 5955 5956 ldgp = nxgep->ldgvp->ldgp; 5957 for (x = 0; x < nrequired; x++, ldgp++) { 5958 ldgp->vector = (uint8_t)x; 5959 ldgp->intdata = SID_DATA(ldgp->func, x); 5960 arg1 = ldgp->ldvp; 5961 arg2 = nxgep; 5962 if (ldgp->nldvs == 1) { 5963 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 5964 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5965 "nxge_add_intrs_adv_type: " 5966 "arg1 0x%x arg2 0x%x: " 5967 "1-1 int handler (entry %d intdata 0x%x)\n", 5968 arg1, arg2, 5969 x, ldgp->intdata)); 5970 } else if (ldgp->nldvs > 1) { 5971 inthandler = (uint_t *)ldgp->sys_intr_handler; 5972 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5973 "nxge_add_intrs_adv_type: " 5974 "arg1 0x%x arg2 0x%x: " 5975 "nldevs %d int handler " 5976 "(entry %d intdata 0x%x)\n", 5977 arg1, arg2, 5978 ldgp->nldvs, x, ldgp->intdata)); 5979 } 5980 5981 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5982 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 5983 "htable 0x%llx", x, intrp->htable[x])); 5984 5985 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 5986 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 5987 != DDI_SUCCESS) { 5988 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5989 "==> nxge_add_intrs_adv_type: failed #%d " 5990 "status 0x%x", x, ddi_status)); 5991 for (y = 0; y < intrp->intr_added; y++) { 5992 (void) ddi_intr_remove_handler( 5993 intrp->htable[y]); 5994 } 5995 /* Free already allocated intr */ 5996 for (y = 0; y < nactual; y++) { 5997 (void) ddi_intr_free(intrp->htable[y]); 5998 } 5999 kmem_free(intrp->htable, intrp->intr_size); 6000 6001 (void) nxge_ldgv_uninit(nxgep); 6002 6003 return (NXGE_ERROR | NXGE_DDI_FAILED); 6004 } 6005 intrp->intr_added++; 6006 } 6007 6008 intrp->msi_intx_cnt = nactual; 6009 6010 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6011 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 6012 navail, nactual, 6013 intrp->msi_intx_cnt, 6014 intrp->intr_added)); 6015 6016 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6017 6018 (void) nxge_intr_ldgv_init(nxgep); 6019 6020 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 6021 6022 return (status); 6023 } 6024 6025 /*ARGSUSED*/ 6026 static nxge_status_t 6027 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 6028 { 6029 dev_info_t *dip = nxgep->dip; 6030 p_nxge_ldg_t ldgp; 6031 p_nxge_intr_t intrp; 6032 uint_t *inthandler; 6033 void *arg1, *arg2; 6034 int behavior; 6035 int nintrs, navail; 6036 int nactual, nrequired; 6037 int inum = 0; 6038 int x, y; 6039 int ddi_status = DDI_SUCCESS; 6040 nxge_status_t status = NXGE_OK; 6041 6042 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 6043 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6044 intrp->start_inum = 0; 6045 6046 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6047 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6048 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6049 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6050 "nintrs: %d", status, nintrs)); 6051 return (NXGE_ERROR | NXGE_DDI_FAILED); 6052 } 6053 6054 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6055 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6056 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6057 "ddi_intr_get_navail() failed, status: 0x%x%, " 6058 "nintrs: %d", ddi_status, navail)); 6059 return (NXGE_ERROR | NXGE_DDI_FAILED); 6060 } 6061 6062 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6063 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6064 nintrs, navail)); 6065 6066 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6067 DDI_INTR_ALLOC_NORMAL); 6068 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6069 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6070 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6071 navail, &nactual, behavior); 6072 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6073 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6074 " ddi_intr_alloc() failed: %d", 6075 ddi_status)); 6076 kmem_free(intrp->htable, intrp->intr_size); 6077 return (NXGE_ERROR | NXGE_DDI_FAILED); 6078 } 6079 6080 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6081 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6082 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6083 " ddi_intr_get_pri() failed: %d", 6084 ddi_status)); 6085 /* Free already allocated interrupts */ 6086 for (y = 0; y < nactual; y++) { 6087 (void) ddi_intr_free(intrp->htable[y]); 6088 } 6089 6090 kmem_free(intrp->htable, intrp->intr_size); 6091 return (NXGE_ERROR | NXGE_DDI_FAILED); 6092 } 6093 6094 nrequired = 0; 6095 switch (nxgep->niu_type) { 6096 default: 6097 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6098 break; 6099 6100 case N2_NIU: 6101 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6102 break; 6103 } 6104 6105 if (status != NXGE_OK) { 6106 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6107 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6108 "failed: 0x%x", status)); 6109 /* Free already allocated interrupts */ 6110 for (y = 0; y < nactual; y++) { 6111 (void) ddi_intr_free(intrp->htable[y]); 6112 } 6113 6114 kmem_free(intrp->htable, intrp->intr_size); 6115 return (status); 6116 } 6117 6118 ldgp = nxgep->ldgvp->ldgp; 6119 for (x = 0; x < nrequired; x++, ldgp++) { 6120 ldgp->vector = (uint8_t)x; 6121 if (nxgep->niu_type != N2_NIU) { 6122 ldgp->intdata = SID_DATA(ldgp->func, x); 6123 } 6124 6125 arg1 = ldgp->ldvp; 6126 arg2 = nxgep; 6127 if (ldgp->nldvs == 1) { 6128 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6129 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6130 "nxge_add_intrs_adv_type_fix: " 6131 "1-1 int handler(%d) ldg %d ldv %d " 6132 "arg1 $%p arg2 $%p\n", 6133 x, ldgp->ldg, ldgp->ldvp->ldv, 6134 arg1, arg2)); 6135 } else if (ldgp->nldvs > 1) { 6136 inthandler = (uint_t *)ldgp->sys_intr_handler; 6137 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6138 "nxge_add_intrs_adv_type_fix: " 6139 "shared ldv %d int handler(%d) ldv %d ldg %d" 6140 "arg1 0x%016llx arg2 0x%016llx\n", 6141 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6142 arg1, arg2)); 6143 } 6144 6145 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6146 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6147 != DDI_SUCCESS) { 6148 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6149 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6150 "status 0x%x", x, ddi_status)); 6151 for (y = 0; y < intrp->intr_added; y++) { 6152 (void) ddi_intr_remove_handler( 6153 intrp->htable[y]); 6154 } 6155 for (y = 0; y < nactual; y++) { 6156 (void) ddi_intr_free(intrp->htable[y]); 6157 } 6158 /* Free already allocated intr */ 6159 kmem_free(intrp->htable, intrp->intr_size); 6160 6161 (void) nxge_ldgv_uninit(nxgep); 6162 6163 return (NXGE_ERROR | NXGE_DDI_FAILED); 6164 } 6165 intrp->intr_added++; 6166 } 6167 6168 intrp->msi_intx_cnt = nactual; 6169 6170 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6171 6172 status = nxge_intr_ldgv_init(nxgep); 6173 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6174 6175 return (status); 6176 } 6177 6178 static void 6179 nxge_remove_intrs(p_nxge_t nxgep) 6180 { 6181 int i, inum; 6182 p_nxge_intr_t intrp; 6183 6184 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6185 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6186 if (!intrp->intr_registered) { 6187 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6188 "<== nxge_remove_intrs: interrupts not registered")); 6189 return; 6190 } 6191 6192 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6193 6194 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6195 (void) ddi_intr_block_disable(intrp->htable, 6196 intrp->intr_added); 6197 } else { 6198 for (i = 0; i < intrp->intr_added; i++) { 6199 (void) ddi_intr_disable(intrp->htable[i]); 6200 } 6201 } 6202 6203 for (inum = 0; inum < intrp->intr_added; inum++) { 6204 if (intrp->htable[inum]) { 6205 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6206 } 6207 } 6208 6209 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6210 if (intrp->htable[inum]) { 6211 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6212 "nxge_remove_intrs: ddi_intr_free inum %d " 6213 "msi_intx_cnt %d intr_added %d", 6214 inum, 6215 intrp->msi_intx_cnt, 6216 intrp->intr_added)); 6217 6218 (void) ddi_intr_free(intrp->htable[inum]); 6219 } 6220 } 6221 6222 kmem_free(intrp->htable, intrp->intr_size); 6223 intrp->intr_registered = B_FALSE; 6224 intrp->intr_enabled = B_FALSE; 6225 intrp->msi_intx_cnt = 0; 6226 intrp->intr_added = 0; 6227 6228 (void) nxge_ldgv_uninit(nxgep); 6229 6230 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6231 "#msix-request"); 6232 6233 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6234 } 6235 6236 /*ARGSUSED*/ 6237 static void 6238 nxge_intrs_enable(p_nxge_t nxgep) 6239 { 6240 p_nxge_intr_t intrp; 6241 int i; 6242 int status; 6243 6244 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6245 6246 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6247 6248 if (!intrp->intr_registered) { 6249 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6250 "interrupts are not registered")); 6251 return; 6252 } 6253 6254 if (intrp->intr_enabled) { 6255 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6256 "<== nxge_intrs_enable: already enabled")); 6257 return; 6258 } 6259 6260 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6261 status = ddi_intr_block_enable(intrp->htable, 6262 intrp->intr_added); 6263 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6264 "block enable - status 0x%x total inums #%d\n", 6265 status, intrp->intr_added)); 6266 } else { 6267 for (i = 0; i < intrp->intr_added; i++) { 6268 status = ddi_intr_enable(intrp->htable[i]); 6269 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6270 "ddi_intr_enable:enable - status 0x%x " 6271 "total inums %d enable inum #%d\n", 6272 status, intrp->intr_added, i)); 6273 if (status == DDI_SUCCESS) { 6274 intrp->intr_enabled = B_TRUE; 6275 } 6276 } 6277 } 6278 6279 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6280 } 6281 6282 /*ARGSUSED*/ 6283 static void 6284 nxge_intrs_disable(p_nxge_t nxgep) 6285 { 6286 p_nxge_intr_t intrp; 6287 int i; 6288 6289 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6290 6291 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6292 6293 if (!intrp->intr_registered) { 6294 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6295 "interrupts are not registered")); 6296 return; 6297 } 6298 6299 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6300 (void) ddi_intr_block_disable(intrp->htable, 6301 intrp->intr_added); 6302 } else { 6303 for (i = 0; i < intrp->intr_added; i++) { 6304 (void) ddi_intr_disable(intrp->htable[i]); 6305 } 6306 } 6307 6308 intrp->intr_enabled = B_FALSE; 6309 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6310 } 6311 6312 static nxge_status_t 6313 nxge_mac_register(p_nxge_t nxgep) 6314 { 6315 mac_register_t *macp; 6316 int status; 6317 6318 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6319 6320 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6321 return (NXGE_ERROR); 6322 6323 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6324 macp->m_driver = nxgep; 6325 macp->m_dip = nxgep->dip; 6326 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6327 macp->m_callbacks = &nxge_m_callbacks; 6328 macp->m_min_sdu = 0; 6329 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6330 NXGE_EHEADER_VLAN_CRC; 6331 macp->m_max_sdu = nxgep->mac.default_mtu; 6332 macp->m_margin = VLAN_TAGSZ; 6333 macp->m_priv_props = nxge_priv_props; 6334 macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 6335 macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1 | MAC_VIRT_SERIALIZE; 6336 6337 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6338 "==> nxge_mac_register: instance %d " 6339 "max_sdu %d margin %d maxframe %d (header %d)", 6340 nxgep->instance, 6341 macp->m_max_sdu, macp->m_margin, 6342 nxgep->mac.maxframesize, 6343 NXGE_EHEADER_VLAN_CRC)); 6344 6345 status = mac_register(macp, &nxgep->mach); 6346 mac_free(macp); 6347 6348 if (status != 0) { 6349 cmn_err(CE_WARN, 6350 "!nxge_mac_register failed (status %d instance %d)", 6351 status, nxgep->instance); 6352 return (NXGE_ERROR); 6353 } 6354 6355 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6356 "(instance %d)", nxgep->instance)); 6357 6358 return (NXGE_OK); 6359 } 6360 6361 void 6362 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6363 { 6364 ssize_t size; 6365 mblk_t *nmp; 6366 uint8_t blk_id; 6367 uint8_t chan; 6368 uint32_t err_id; 6369 err_inject_t *eip; 6370 6371 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6372 6373 size = 1024; 6374 nmp = mp->b_cont; 6375 eip = (err_inject_t *)nmp->b_rptr; 6376 blk_id = eip->blk_id; 6377 err_id = eip->err_id; 6378 chan = eip->chan; 6379 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6380 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6381 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6382 switch (blk_id) { 6383 case MAC_BLK_ID: 6384 break; 6385 case TXMAC_BLK_ID: 6386 break; 6387 case RXMAC_BLK_ID: 6388 break; 6389 case MIF_BLK_ID: 6390 break; 6391 case IPP_BLK_ID: 6392 nxge_ipp_inject_err(nxgep, err_id); 6393 break; 6394 case TXC_BLK_ID: 6395 nxge_txc_inject_err(nxgep, err_id); 6396 break; 6397 case TXDMA_BLK_ID: 6398 nxge_txdma_inject_err(nxgep, err_id, chan); 6399 break; 6400 case RXDMA_BLK_ID: 6401 nxge_rxdma_inject_err(nxgep, err_id, chan); 6402 break; 6403 case ZCP_BLK_ID: 6404 nxge_zcp_inject_err(nxgep, err_id); 6405 break; 6406 case ESPC_BLK_ID: 6407 break; 6408 case FFLP_BLK_ID: 6409 break; 6410 case PHY_BLK_ID: 6411 break; 6412 case ETHER_SERDES_BLK_ID: 6413 break; 6414 case PCIE_SERDES_BLK_ID: 6415 break; 6416 case VIR_BLK_ID: 6417 break; 6418 } 6419 6420 nmp->b_wptr = nmp->b_rptr + size; 6421 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6422 6423 miocack(wq, mp, (int)size, 0); 6424 } 6425 6426 static int 6427 nxge_init_common_dev(p_nxge_t nxgep) 6428 { 6429 p_nxge_hw_list_t hw_p; 6430 dev_info_t *p_dip; 6431 6432 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6433 6434 p_dip = nxgep->p_dip; 6435 MUTEX_ENTER(&nxge_common_lock); 6436 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6437 "==> nxge_init_common_dev:func # %d", 6438 nxgep->function_num)); 6439 /* 6440 * Loop through existing per neptune hardware list. 6441 */ 6442 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6443 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6444 "==> nxge_init_common_device:func # %d " 6445 "hw_p $%p parent dip $%p", 6446 nxgep->function_num, 6447 hw_p, 6448 p_dip)); 6449 if (hw_p->parent_devp == p_dip) { 6450 nxgep->nxge_hw_p = hw_p; 6451 hw_p->ndevs++; 6452 hw_p->nxge_p[nxgep->function_num] = nxgep; 6453 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6454 "==> nxge_init_common_device:func # %d " 6455 "hw_p $%p parent dip $%p " 6456 "ndevs %d (found)", 6457 nxgep->function_num, 6458 hw_p, 6459 p_dip, 6460 hw_p->ndevs)); 6461 break; 6462 } 6463 } 6464 6465 if (hw_p == NULL) { 6466 6467 char **prop_val; 6468 uint_t prop_len; 6469 int i; 6470 6471 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6472 "==> nxge_init_common_device:func # %d " 6473 "parent dip $%p (new)", 6474 nxgep->function_num, 6475 p_dip)); 6476 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6477 hw_p->parent_devp = p_dip; 6478 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6479 nxgep->nxge_hw_p = hw_p; 6480 hw_p->ndevs++; 6481 hw_p->nxge_p[nxgep->function_num] = nxgep; 6482 hw_p->next = nxge_hw_list; 6483 if (nxgep->niu_type == N2_NIU) { 6484 hw_p->niu_type = N2_NIU; 6485 hw_p->platform_type = P_NEPTUNE_NIU; 6486 } else { 6487 hw_p->niu_type = NIU_TYPE_NONE; 6488 hw_p->platform_type = P_NEPTUNE_NONE; 6489 } 6490 6491 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6492 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6493 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6494 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6495 6496 nxge_hw_list = hw_p; 6497 6498 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0, 6499 "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) { 6500 for (i = 0; i < prop_len; i++) { 6501 if ((strcmp((caddr_t)prop_val[i], 6502 NXGE_ROCK_COMPATIBLE) == 0)) { 6503 hw_p->platform_type = P_NEPTUNE_ROCK; 6504 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6505 "ROCK hw_p->platform_type %d", 6506 hw_p->platform_type)); 6507 break; 6508 } 6509 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6510 "nxge_init_common_dev: read compatible" 6511 " property[%d] val[%s]", 6512 i, (caddr_t)prop_val[i])); 6513 } 6514 } 6515 6516 ddi_prop_free(prop_val); 6517 6518 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6519 } 6520 6521 MUTEX_EXIT(&nxge_common_lock); 6522 6523 nxgep->platform_type = hw_p->platform_type; 6524 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxgep->platform_type %d", 6525 nxgep->platform_type)); 6526 if (nxgep->niu_type != N2_NIU) { 6527 nxgep->niu_type = hw_p->niu_type; 6528 } 6529 6530 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6531 "==> nxge_init_common_device (nxge_hw_list) $%p", 6532 nxge_hw_list)); 6533 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6534 6535 return (NXGE_OK); 6536 } 6537 6538 static void 6539 nxge_uninit_common_dev(p_nxge_t nxgep) 6540 { 6541 p_nxge_hw_list_t hw_p, h_hw_p; 6542 p_nxge_dma_pt_cfg_t p_dma_cfgp; 6543 p_nxge_hw_pt_cfg_t p_cfgp; 6544 dev_info_t *p_dip; 6545 6546 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6547 if (nxgep->nxge_hw_p == NULL) { 6548 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6549 "<== nxge_uninit_common_device (no common)")); 6550 return; 6551 } 6552 6553 MUTEX_ENTER(&nxge_common_lock); 6554 h_hw_p = nxge_hw_list; 6555 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6556 p_dip = hw_p->parent_devp; 6557 if (nxgep->nxge_hw_p == hw_p && 6558 p_dip == nxgep->p_dip && 6559 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6560 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6561 6562 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6563 "==> nxge_uninit_common_device:func # %d " 6564 "hw_p $%p parent dip $%p " 6565 "ndevs %d (found)", 6566 nxgep->function_num, 6567 hw_p, 6568 p_dip, 6569 hw_p->ndevs)); 6570 6571 /* 6572 * Release the RDC table, a shared resoruce 6573 * of the nxge hardware. The RDC table was 6574 * assigned to this instance of nxge in 6575 * nxge_use_cfg_dma_config(). 6576 */ 6577 if (!isLDOMguest(nxgep)) { 6578 p_dma_cfgp = 6579 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 6580 p_cfgp = 6581 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 6582 (void) nxge_fzc_rdc_tbl_unbind(nxgep, 6583 p_cfgp->def_mac_rxdma_grpid); 6584 6585 /* Cleanup any outstanding groups. */ 6586 nxge_grp_cleanup(nxgep); 6587 } 6588 6589 if (hw_p->ndevs) { 6590 hw_p->ndevs--; 6591 } 6592 hw_p->nxge_p[nxgep->function_num] = NULL; 6593 if (!hw_p->ndevs) { 6594 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6595 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6596 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6597 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6598 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6599 "==> nxge_uninit_common_device: " 6600 "func # %d " 6601 "hw_p $%p parent dip $%p " 6602 "ndevs %d (last)", 6603 nxgep->function_num, 6604 hw_p, 6605 p_dip, 6606 hw_p->ndevs)); 6607 6608 nxge_hio_uninit(nxgep); 6609 6610 if (hw_p == nxge_hw_list) { 6611 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6612 "==> nxge_uninit_common_device:" 6613 "remove head func # %d " 6614 "hw_p $%p parent dip $%p " 6615 "ndevs %d (head)", 6616 nxgep->function_num, 6617 hw_p, 6618 p_dip, 6619 hw_p->ndevs)); 6620 nxge_hw_list = hw_p->next; 6621 } else { 6622 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6623 "==> nxge_uninit_common_device:" 6624 "remove middle func # %d " 6625 "hw_p $%p parent dip $%p " 6626 "ndevs %d (middle)", 6627 nxgep->function_num, 6628 hw_p, 6629 p_dip, 6630 hw_p->ndevs)); 6631 h_hw_p->next = hw_p->next; 6632 } 6633 6634 nxgep->nxge_hw_p = NULL; 6635 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6636 } 6637 break; 6638 } else { 6639 h_hw_p = hw_p; 6640 } 6641 } 6642 6643 MUTEX_EXIT(&nxge_common_lock); 6644 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6645 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6646 nxge_hw_list)); 6647 6648 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6649 } 6650 6651 /* 6652 * Determines the number of ports from the niu_type or the platform type. 6653 * Returns the number of ports, or returns zero on failure. 6654 */ 6655 6656 int 6657 nxge_get_nports(p_nxge_t nxgep) 6658 { 6659 int nports = 0; 6660 6661 switch (nxgep->niu_type) { 6662 case N2_NIU: 6663 case NEPTUNE_2_10GF: 6664 nports = 2; 6665 break; 6666 case NEPTUNE_4_1GC: 6667 case NEPTUNE_2_10GF_2_1GC: 6668 case NEPTUNE_1_10GF_3_1GC: 6669 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6670 case NEPTUNE_2_10GF_2_1GRF: 6671 nports = 4; 6672 break; 6673 default: 6674 switch (nxgep->platform_type) { 6675 case P_NEPTUNE_NIU: 6676 case P_NEPTUNE_ATLAS_2PORT: 6677 nports = 2; 6678 break; 6679 case P_NEPTUNE_ATLAS_4PORT: 6680 case P_NEPTUNE_MARAMBA_P0: 6681 case P_NEPTUNE_MARAMBA_P1: 6682 case P_NEPTUNE_ROCK: 6683 case P_NEPTUNE_ALONSO: 6684 nports = 4; 6685 break; 6686 default: 6687 break; 6688 } 6689 break; 6690 } 6691 6692 return (nports); 6693 } 6694 6695 /* 6696 * The following two functions are to support 6697 * PSARC/2007/453 MSI-X interrupt limit override. 6698 */ 6699 static int 6700 nxge_create_msi_property(p_nxge_t nxgep) 6701 { 6702 int nmsi; 6703 extern int ncpus; 6704 6705 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6706 6707 switch (nxgep->mac.portmode) { 6708 case PORT_10G_COPPER: 6709 case PORT_10G_FIBER: 6710 case PORT_10G_TN1010: 6711 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6712 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6713 /* 6714 * The maximum MSI-X requested will be 8. 6715 * If the # of CPUs is less than 8, we will request 6716 * # MSI-X based on the # of CPUs (default). 6717 */ 6718 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6719 "==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d", 6720 nxge_msix_10g_intrs)); 6721 if ((nxge_msix_10g_intrs == 0) || 6722 (nxge_msix_10g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6723 nmsi = NXGE_MSIX_REQUEST_10G; 6724 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6725 "==>nxge_create_msi_property (10G): reset to 8")); 6726 } else { 6727 nmsi = nxge_msix_10g_intrs; 6728 } 6729 6730 /* 6731 * If # of interrupts requested is 8 (default), 6732 * the checking of the number of cpus will be 6733 * be maintained. 6734 */ 6735 if ((nmsi == NXGE_MSIX_REQUEST_10G) && 6736 (ncpus < nmsi)) { 6737 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6738 "==>nxge_create_msi_property (10G): reset to 8")); 6739 nmsi = ncpus; 6740 } 6741 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6742 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6743 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6744 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6745 break; 6746 6747 default: 6748 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6749 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6750 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6751 "==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d", 6752 nxge_msix_1g_intrs)); 6753 if ((nxge_msix_1g_intrs == 0) || 6754 (nxge_msix_1g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6755 nmsi = NXGE_MSIX_REQUEST_1G; 6756 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6757 "==>nxge_create_msi_property (1G): reset to 2")); 6758 } else { 6759 nmsi = nxge_msix_1g_intrs; 6760 } 6761 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6762 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6763 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6764 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6765 break; 6766 } 6767 6768 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6769 return (nmsi); 6770 } 6771 6772 /* ARGSUSED */ 6773 static int 6774 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6775 void *pr_val) 6776 { 6777 int err = 0; 6778 link_flowctrl_t fl; 6779 6780 switch (pr_num) { 6781 case MAC_PROP_AUTONEG: 6782 *(uint8_t *)pr_val = 1; 6783 break; 6784 case MAC_PROP_FLOWCTRL: 6785 if (pr_valsize < sizeof (link_flowctrl_t)) 6786 return (EINVAL); 6787 fl = LINK_FLOWCTRL_RX; 6788 bcopy(&fl, pr_val, sizeof (fl)); 6789 break; 6790 case MAC_PROP_ADV_1000FDX_CAP: 6791 case MAC_PROP_EN_1000FDX_CAP: 6792 *(uint8_t *)pr_val = 1; 6793 break; 6794 case MAC_PROP_ADV_100FDX_CAP: 6795 case MAC_PROP_EN_100FDX_CAP: 6796 *(uint8_t *)pr_val = 1; 6797 break; 6798 default: 6799 err = ENOTSUP; 6800 break; 6801 } 6802 return (err); 6803 } 6804 6805 6806 /* 6807 * The following is a software around for the Neptune hardware's 6808 * interrupt bugs; The Neptune hardware may generate spurious interrupts when 6809 * an interrupr handler is removed. 6810 */ 6811 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98 6812 #define NXGE_PIM_RESET (1ULL << 29) 6813 #define NXGE_GLU_RESET (1ULL << 30) 6814 #define NXGE_NIU_RESET (1ULL << 31) 6815 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \ 6816 NXGE_GLU_RESET | \ 6817 NXGE_NIU_RESET) 6818 6819 #define NXGE_WAIT_QUITE_TIME 200000 6820 #define NXGE_WAIT_QUITE_RETRY 40 6821 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */ 6822 6823 static void 6824 nxge_niu_peu_reset(p_nxge_t nxgep) 6825 { 6826 uint32_t rvalue; 6827 p_nxge_hw_list_t hw_p; 6828 p_nxge_t fnxgep; 6829 int i, j; 6830 6831 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset")); 6832 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 6833 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6834 "==> nxge_niu_peu_reset: NULL hardware pointer")); 6835 return; 6836 } 6837 6838 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6839 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d", 6840 hw_p->flags, nxgep->nxge_link_poll_timerid, 6841 nxgep->nxge_timerid)); 6842 6843 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 6844 /* 6845 * Make sure other instances from the same hardware 6846 * stop sending PIO and in quiescent state. 6847 */ 6848 for (i = 0; i < NXGE_MAX_PORTS; i++) { 6849 fnxgep = hw_p->nxge_p[i]; 6850 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6851 "==> nxge_niu_peu_reset: checking entry %d " 6852 "nxgep $%p", i, fnxgep)); 6853 #ifdef NXGE_DEBUG 6854 if (fnxgep) { 6855 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6856 "==> nxge_niu_peu_reset: entry %d (function %d) " 6857 "link timer id %d hw timer id %d", 6858 i, fnxgep->function_num, 6859 fnxgep->nxge_link_poll_timerid, 6860 fnxgep->nxge_timerid)); 6861 } 6862 #endif 6863 if (fnxgep && fnxgep != nxgep && 6864 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) { 6865 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6866 "==> nxge_niu_peu_reset: checking $%p " 6867 "(function %d) timer ids", 6868 fnxgep, fnxgep->function_num)); 6869 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) { 6870 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6871 "==> nxge_niu_peu_reset: waiting")); 6872 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6873 if (!fnxgep->nxge_timerid && 6874 !fnxgep->nxge_link_poll_timerid) { 6875 break; 6876 } 6877 } 6878 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6879 if (fnxgep->nxge_timerid || 6880 fnxgep->nxge_link_poll_timerid) { 6881 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6882 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6883 "<== nxge_niu_peu_reset: cannot reset " 6884 "hardware (devices are still in use)")); 6885 return; 6886 } 6887 } 6888 } 6889 6890 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) { 6891 hw_p->flags |= COMMON_RESET_NIU_PCI; 6892 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh, 6893 NXGE_PCI_PORT_LOGIC_OFFSET); 6894 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6895 "nxge_niu_peu_reset: read offset 0x%x (%d) " 6896 "(data 0x%x)", 6897 NXGE_PCI_PORT_LOGIC_OFFSET, 6898 NXGE_PCI_PORT_LOGIC_OFFSET, 6899 rvalue)); 6900 6901 rvalue |= NXGE_PCI_RESET_ALL; 6902 pci_config_put32(nxgep->dev_regs->nxge_pciregh, 6903 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue); 6904 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6905 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x", 6906 rvalue)); 6907 6908 NXGE_DELAY(NXGE_PCI_RESET_WAIT); 6909 } 6910 6911 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6912 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset")); 6913 } 6914 6915 static void 6916 nxge_set_pci_replay_timeout(p_nxge_t nxgep) 6917 { 6918 p_dev_regs_t dev_regs; 6919 uint32_t value; 6920 6921 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout")); 6922 6923 if (!nxge_set_replay_timer) { 6924 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6925 "==> nxge_set_pci_replay_timeout: will not change " 6926 "the timeout")); 6927 return; 6928 } 6929 6930 dev_regs = nxgep->dev_regs; 6931 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6932 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p", 6933 dev_regs, dev_regs->nxge_pciregh)); 6934 6935 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) { 6936 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6937 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or " 6938 "no PCI handle", 6939 dev_regs)); 6940 return; 6941 } 6942 value = (pci_config_get32(dev_regs->nxge_pciregh, 6943 PCI_REPLAY_TIMEOUT_CFG_OFFSET) | 6944 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT)); 6945 6946 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6947 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x " 6948 "(timeout value to set 0x%x at offset 0x%x) value 0x%x", 6949 pci_config_get32(dev_regs->nxge_pciregh, 6950 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout, 6951 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value)); 6952 6953 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET, 6954 value); 6955 6956 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6957 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x", 6958 pci_config_get32(dev_regs->nxge_pciregh, 6959 PCI_REPLAY_TIMEOUT_CFG_OFFSET))); 6960 6961 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout")); 6962 } 6963 6964 /* 6965 * quiesce(9E) entry point. 6966 * 6967 * This function is called when the system is single-threaded at high 6968 * PIL with preemption disabled. Therefore, this function must not be 6969 * blocked. 6970 * 6971 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 6972 * DDI_FAILURE indicates an error condition and should almost never happen. 6973 */ 6974 static int 6975 nxge_quiesce(dev_info_t *dip) 6976 { 6977 int instance = ddi_get_instance(dip); 6978 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 6979 6980 if (nxgep == NULL) 6981 return (DDI_FAILURE); 6982 6983 /* Turn off debugging */ 6984 nxge_debug_level = NO_DEBUG; 6985 nxgep->nxge_debug_level = NO_DEBUG; 6986 npi_debug_level = NO_DEBUG; 6987 6988 /* 6989 * Stop link monitor only when linkchkmod is interrupt based 6990 */ 6991 if (nxgep->mac.linkchkmode == LINKCHK_INTR) { 6992 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 6993 } 6994 6995 (void) nxge_intr_hw_disable(nxgep); 6996 6997 /* 6998 * Reset the receive MAC side. 6999 */ 7000 (void) nxge_rx_mac_disable(nxgep); 7001 7002 /* Disable and soft reset the IPP */ 7003 if (!isLDOMguest(nxgep)) 7004 (void) nxge_ipp_disable(nxgep); 7005 7006 /* 7007 * Reset the transmit/receive DMA side. 7008 */ 7009 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 7010 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 7011 7012 /* 7013 * Reset the transmit MAC side. 7014 */ 7015 (void) nxge_tx_mac_disable(nxgep); 7016 7017 return (DDI_SUCCESS); 7018 } 7019