1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 29 */ 30 #include <sys/nxge/nxge_impl.h> 31 #include <sys/nxge/nxge_hio.h> 32 #include <sys/nxge/nxge_rxdma.h> 33 #include <sys/pcie.h> 34 35 uint32_t nxge_use_partition = 0; /* debug partition flag */ 36 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 37 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 38 /* 39 * PSARC/2007/453 MSI-X interrupt limit override 40 */ 41 uint32_t nxge_msi_enable = 2; 42 43 /* 44 * Software workaround for a Neptune (PCI-E) 45 * hardware interrupt bug which the hardware 46 * may generate spurious interrupts after the 47 * device interrupt handler was removed. If this flag 48 * is enabled, the driver will reset the 49 * hardware when devices are being detached. 50 */ 51 uint32_t nxge_peu_reset_enable = 0; 52 53 /* 54 * Software workaround for the hardware 55 * checksum bugs that affect packet transmission 56 * and receive: 57 * 58 * Usage of nxge_cksum_offload: 59 * 60 * (1) nxge_cksum_offload = 0 (default): 61 * - transmits packets: 62 * TCP: uses the hardware checksum feature. 63 * UDP: driver will compute the software checksum 64 * based on the partial checksum computed 65 * by the IP layer. 66 * - receives packets 67 * TCP: marks packets checksum flags based on hardware result. 68 * UDP: will not mark checksum flags. 69 * 70 * (2) nxge_cksum_offload = 1: 71 * - transmit packets: 72 * TCP/UDP: uses the hardware checksum feature. 73 * - receives packets 74 * TCP/UDP: marks packet checksum flags based on hardware result. 75 * 76 * (3) nxge_cksum_offload = 2: 77 * - The driver will not register its checksum capability. 78 * Checksum for both TCP and UDP will be computed 79 * by the stack. 80 * - The software LSO is not allowed in this case. 81 * 82 * (4) nxge_cksum_offload > 2: 83 * - Will be treated as it is set to 2 84 * (stack will compute the checksum). 85 * 86 * (5) If the hardware bug is fixed, this workaround 87 * needs to be updated accordingly to reflect 88 * the new hardware revision. 89 */ 90 uint32_t nxge_cksum_offload = 0; 91 92 /* 93 * Globals: tunable parameters (/etc/system or adb) 94 * 95 */ 96 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 97 uint32_t nxge_rbr_spare_size = 0; 98 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 99 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 100 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 101 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 102 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 103 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 104 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 105 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 106 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 107 108 /* MAX LSO size */ 109 #define NXGE_LSO_MAXLEN 65535 110 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 111 112 113 /* 114 * Add tunable to reduce the amount of time spent in the 115 * ISR doing Rx Processing. 116 */ 117 uint32_t nxge_max_rx_pkts = 1024; 118 119 /* 120 * Tunables to manage the receive buffer blocks. 121 * 122 * nxge_rx_threshold_hi: copy all buffers. 123 * nxge_rx_bcopy_size_type: receive buffer block size type. 124 * nxge_rx_threshold_lo: copy only up to tunable block size type. 125 */ 126 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 127 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 128 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 129 130 /* Use kmem_alloc() to allocate data buffers. */ 131 #if defined(__sparc) 132 uint32_t nxge_use_kmem_alloc = 1; 133 #elif defined(__i386) 134 uint32_t nxge_use_kmem_alloc = 0; 135 #else 136 uint32_t nxge_use_kmem_alloc = 1; 137 #endif 138 139 rtrace_t npi_rtracebuf; 140 141 /* 142 * The hardware sometimes fails to allow enough time for the link partner 143 * to send an acknowledgement for packets that the hardware sent to it. The 144 * hardware resends the packets earlier than it should be in those instances. 145 * This behavior caused some switches to acknowledge the wrong packets 146 * and it triggered the fatal error. 147 * This software workaround is to set the replay timer to a value 148 * suggested by the hardware team. 149 * 150 * PCI config space replay timer register: 151 * The following replay timeout value is 0xc 152 * for bit 14:18. 153 */ 154 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8 155 #define PCI_REPLAY_TIMEOUT_SHIFT 14 156 157 uint32_t nxge_set_replay_timer = 1; 158 uint32_t nxge_replay_timeout = 0xc; 159 160 /* 161 * The transmit serialization sometimes causes 162 * longer sleep before calling the driver transmit 163 * function as it sleeps longer than it should. 164 * The performace group suggests that a time wait tunable 165 * can be used to set the maximum wait time when needed 166 * and the default is set to 1 tick. 167 */ 168 uint32_t nxge_tx_serial_maxsleep = 1; 169 170 #if defined(sun4v) 171 /* 172 * Hypervisor N2/NIU services information. 173 */ 174 static hsvc_info_t niu_hsvc = { 175 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 176 NIU_MINOR_VER, "nxge" 177 }; 178 179 static int nxge_hsvc_register(p_nxge_t); 180 #endif 181 182 /* 183 * Function Prototypes 184 */ 185 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 186 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 187 static void nxge_unattach(p_nxge_t); 188 static int nxge_quiesce(dev_info_t *); 189 190 #if NXGE_PROPERTY 191 static void nxge_remove_hard_properties(p_nxge_t); 192 #endif 193 194 /* 195 * These two functions are required by nxge_hio.c 196 */ 197 extern int nxge_m_mmac_remove(void *arg, int slot); 198 extern void nxge_grp_cleanup(p_nxge_t nxge); 199 200 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 201 202 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 203 static void nxge_destroy_mutexes(p_nxge_t); 204 205 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 206 static void nxge_unmap_regs(p_nxge_t nxgep); 207 #ifdef NXGE_DEBUG 208 static void nxge_test_map_regs(p_nxge_t nxgep); 209 #endif 210 211 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 212 static void nxge_remove_intrs(p_nxge_t nxgep); 213 214 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 215 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 216 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 217 static void nxge_intrs_enable(p_nxge_t nxgep); 218 static void nxge_intrs_disable(p_nxge_t nxgep); 219 220 static void nxge_suspend(p_nxge_t); 221 static nxge_status_t nxge_resume(p_nxge_t); 222 223 static nxge_status_t nxge_setup_dev(p_nxge_t); 224 static void nxge_destroy_dev(p_nxge_t); 225 226 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 227 static void nxge_free_mem_pool(p_nxge_t); 228 229 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 230 static void nxge_free_rx_mem_pool(p_nxge_t); 231 232 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 233 static void nxge_free_tx_mem_pool(p_nxge_t); 234 235 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 236 struct ddi_dma_attr *, 237 size_t, ddi_device_acc_attr_t *, uint_t, 238 p_nxge_dma_common_t); 239 240 static void nxge_dma_mem_free(p_nxge_dma_common_t); 241 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 242 243 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 244 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 245 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 246 247 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 248 p_nxge_dma_common_t *, size_t); 249 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 250 251 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 252 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 253 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 254 255 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 256 p_nxge_dma_common_t *, 257 size_t); 258 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 259 260 static int nxge_init_common_dev(p_nxge_t); 261 static void nxge_uninit_common_dev(p_nxge_t); 262 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 263 char *, caddr_t); 264 #if defined(sun4v) 265 extern nxge_status_t nxge_hio_rdc_enable(p_nxge_t nxgep); 266 extern nxge_status_t nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm); 267 #endif 268 269 /* 270 * The next declarations are for the GLDv3 interface. 271 */ 272 static int nxge_m_start(void *); 273 static void nxge_m_stop(void *); 274 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 275 static int nxge_m_promisc(void *, boolean_t); 276 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 277 nxge_status_t nxge_mac_register(p_nxge_t); 278 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 279 int slot, int rdctbl, boolean_t usetbl); 280 void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, 281 boolean_t factory); 282 283 static void nxge_m_getfactaddr(void *, uint_t, uint8_t *); 284 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 285 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 286 uint_t, const void *); 287 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 288 uint_t, uint_t, void *, uint_t *); 289 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 290 const void *); 291 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 292 void *, uint_t *); 293 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 294 static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int, 295 mac_ring_info_t *, mac_ring_handle_t); 296 static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t, 297 mac_ring_type_t); 298 static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t, 299 mac_ring_type_t); 300 301 static void nxge_niu_peu_reset(p_nxge_t nxgep); 302 static void nxge_set_pci_replay_timeout(nxge_t *); 303 304 mac_priv_prop_t nxge_priv_props[] = { 305 {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 306 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 307 {"_function_number", MAC_PROP_PERM_READ}, 308 {"_fw_version", MAC_PROP_PERM_READ}, 309 {"_port_mode", MAC_PROP_PERM_READ}, 310 {"_hot_swap_phy", MAC_PROP_PERM_READ}, 311 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 312 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 313 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 314 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 315 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 316 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 317 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 318 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 319 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 320 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 321 {"_soft_lso_enable", MAC_PROP_PERM_RW} 322 }; 323 324 #define NXGE_MAX_PRIV_PROPS \ 325 (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 326 327 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 328 #define MAX_DUMP_SZ 256 329 330 #define NXGE_M_CALLBACK_FLAGS \ 331 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 332 333 mac_callbacks_t nxge_m_callbacks = { 334 NXGE_M_CALLBACK_FLAGS, 335 nxge_m_stat, 336 nxge_m_start, 337 nxge_m_stop, 338 nxge_m_promisc, 339 nxge_m_multicst, 340 NULL, 341 NULL, 342 nxge_m_ioctl, 343 nxge_m_getcapab, 344 NULL, 345 NULL, 346 nxge_m_setprop, 347 nxge_m_getprop 348 }; 349 350 void 351 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 352 353 /* PSARC/2007/453 MSI-X interrupt limit override. */ 354 #define NXGE_MSIX_REQUEST_10G 8 355 #define NXGE_MSIX_REQUEST_1G 2 356 static int nxge_create_msi_property(p_nxge_t); 357 /* 358 * For applications that care about the 359 * latency, it was requested by PAE and the 360 * customers that the driver has tunables that 361 * allow the user to tune it to a higher number 362 * interrupts to spread the interrupts among 363 * multiple channels. The DDI framework limits 364 * the maximum number of MSI-X resources to allocate 365 * to 8 (ddi_msix_alloc_limit). If more than 8 366 * is set, ddi_msix_alloc_limit must be set accordingly. 367 * The default number of MSI interrupts are set to 368 * 8 for 10G and 2 for 1G link. 369 */ 370 #define NXGE_MSIX_MAX_ALLOWED 32 371 uint32_t nxge_msix_10g_intrs = NXGE_MSIX_REQUEST_10G; 372 uint32_t nxge_msix_1g_intrs = NXGE_MSIX_REQUEST_1G; 373 374 /* 375 * These global variables control the message 376 * output. 377 */ 378 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 379 uint64_t nxge_debug_level; 380 381 /* 382 * This list contains the instance structures for the Neptune 383 * devices present in the system. The lock exists to guarantee 384 * mutually exclusive access to the list. 385 */ 386 void *nxge_list = NULL; 387 void *nxge_hw_list = NULL; 388 nxge_os_mutex_t nxge_common_lock; 389 nxge_os_mutex_t nxgedebuglock; 390 391 extern uint64_t npi_debug_level; 392 393 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 394 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 395 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 396 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 397 extern void nxge_fm_init(p_nxge_t, 398 ddi_device_acc_attr_t *, 399 ddi_dma_attr_t *); 400 extern void nxge_fm_fini(p_nxge_t); 401 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 402 403 /* 404 * Count used to maintain the number of buffers being used 405 * by Neptune instances and loaned up to the upper layers. 406 */ 407 uint32_t nxge_mblks_pending = 0; 408 409 /* 410 * Device register access attributes for PIO. 411 */ 412 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 413 DDI_DEVICE_ATTR_V1, 414 DDI_STRUCTURE_LE_ACC, 415 DDI_STRICTORDER_ACC, 416 DDI_DEFAULT_ACC 417 }; 418 419 /* 420 * Device descriptor access attributes for DMA. 421 */ 422 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 423 DDI_DEVICE_ATTR_V0, 424 DDI_STRUCTURE_LE_ACC, 425 DDI_STRICTORDER_ACC 426 }; 427 428 /* 429 * Device buffer access attributes for DMA. 430 */ 431 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 432 DDI_DEVICE_ATTR_V0, 433 DDI_STRUCTURE_BE_ACC, 434 DDI_STRICTORDER_ACC 435 }; 436 437 ddi_dma_attr_t nxge_desc_dma_attr = { 438 DMA_ATTR_V0, /* version number. */ 439 0, /* low address */ 440 0xffffffffffffffff, /* high address */ 441 0xffffffffffffffff, /* address counter max */ 442 #ifndef NIU_PA_WORKAROUND 443 0x100000, /* alignment */ 444 #else 445 0x2000, 446 #endif 447 0xfc00fc, /* dlim_burstsizes */ 448 0x1, /* minimum transfer size */ 449 0xffffffffffffffff, /* maximum transfer size */ 450 0xffffffffffffffff, /* maximum segment size */ 451 1, /* scatter/gather list length */ 452 (unsigned int) 1, /* granularity */ 453 0 /* attribute flags */ 454 }; 455 456 ddi_dma_attr_t nxge_tx_dma_attr = { 457 DMA_ATTR_V0, /* version number. */ 458 0, /* low address */ 459 0xffffffffffffffff, /* high address */ 460 0xffffffffffffffff, /* address counter max */ 461 #if defined(_BIG_ENDIAN) 462 0x2000, /* alignment */ 463 #else 464 0x1000, /* alignment */ 465 #endif 466 0xfc00fc, /* dlim_burstsizes */ 467 0x1, /* minimum transfer size */ 468 0xffffffffffffffff, /* maximum transfer size */ 469 0xffffffffffffffff, /* maximum segment size */ 470 5, /* scatter/gather list length */ 471 (unsigned int) 1, /* granularity */ 472 0 /* attribute flags */ 473 }; 474 475 ddi_dma_attr_t nxge_rx_dma_attr = { 476 DMA_ATTR_V0, /* version number. */ 477 0, /* low address */ 478 0xffffffffffffffff, /* high address */ 479 0xffffffffffffffff, /* address counter max */ 480 0x2000, /* alignment */ 481 0xfc00fc, /* dlim_burstsizes */ 482 0x1, /* minimum transfer size */ 483 0xffffffffffffffff, /* maximum transfer size */ 484 0xffffffffffffffff, /* maximum segment size */ 485 1, /* scatter/gather list length */ 486 (unsigned int) 1, /* granularity */ 487 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 488 }; 489 490 ddi_dma_lim_t nxge_dma_limits = { 491 (uint_t)0, /* dlim_addr_lo */ 492 (uint_t)0xffffffff, /* dlim_addr_hi */ 493 (uint_t)0xffffffff, /* dlim_cntr_max */ 494 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 495 0x1, /* dlim_minxfer */ 496 1024 /* dlim_speed */ 497 }; 498 499 dma_method_t nxge_force_dma = DVMA; 500 501 /* 502 * dma chunk sizes. 503 * 504 * Try to allocate the largest possible size 505 * so that fewer number of dma chunks would be managed 506 */ 507 #ifdef NIU_PA_WORKAROUND 508 size_t alloc_sizes [] = {0x2000}; 509 #else 510 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 511 0x10000, 0x20000, 0x40000, 0x80000, 512 0x100000, 0x200000, 0x400000, 0x800000, 513 0x1000000, 0x2000000, 0x4000000}; 514 #endif 515 516 /* 517 * Translate "dev_t" to a pointer to the associated "dev_info_t". 518 */ 519 520 extern void nxge_get_environs(nxge_t *); 521 522 static int 523 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 524 { 525 p_nxge_t nxgep = NULL; 526 int instance; 527 int status = DDI_SUCCESS; 528 uint8_t portn; 529 nxge_mmac_t *mmac_info; 530 531 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 532 533 /* 534 * Get the device instance since we'll need to setup 535 * or retrieve a soft state for this instance. 536 */ 537 instance = ddi_get_instance(dip); 538 539 switch (cmd) { 540 case DDI_ATTACH: 541 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 542 break; 543 544 case DDI_RESUME: 545 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 546 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 547 if (nxgep == NULL) { 548 status = DDI_FAILURE; 549 break; 550 } 551 if (nxgep->dip != dip) { 552 status = DDI_FAILURE; 553 break; 554 } 555 if (nxgep->suspended == DDI_PM_SUSPEND) { 556 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 557 } else { 558 status = nxge_resume(nxgep); 559 } 560 goto nxge_attach_exit; 561 562 case DDI_PM_RESUME: 563 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 564 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 565 if (nxgep == NULL) { 566 status = DDI_FAILURE; 567 break; 568 } 569 if (nxgep->dip != dip) { 570 status = DDI_FAILURE; 571 break; 572 } 573 status = nxge_resume(nxgep); 574 goto nxge_attach_exit; 575 576 default: 577 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 578 status = DDI_FAILURE; 579 goto nxge_attach_exit; 580 } 581 582 583 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 584 status = DDI_FAILURE; 585 goto nxge_attach_exit; 586 } 587 588 nxgep = ddi_get_soft_state(nxge_list, instance); 589 if (nxgep == NULL) { 590 status = NXGE_ERROR; 591 goto nxge_attach_fail2; 592 } 593 594 nxgep->nxge_magic = NXGE_MAGIC; 595 596 nxgep->drv_state = 0; 597 nxgep->dip = dip; 598 nxgep->instance = instance; 599 nxgep->p_dip = ddi_get_parent(dip); 600 nxgep->nxge_debug_level = nxge_debug_level; 601 npi_debug_level = nxge_debug_level; 602 603 /* Are we a guest running in a Hybrid I/O environment? */ 604 nxge_get_environs(nxgep); 605 606 status = nxge_map_regs(nxgep); 607 608 if (status != NXGE_OK) { 609 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 610 goto nxge_attach_fail3; 611 } 612 613 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_rx_dma_attr); 614 615 /* Create & initialize the per-Neptune data structure */ 616 /* (even if we're a guest). */ 617 status = nxge_init_common_dev(nxgep); 618 if (status != NXGE_OK) { 619 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 620 "nxge_init_common_dev failed")); 621 goto nxge_attach_fail4; 622 } 623 624 /* 625 * Software workaround: set the replay timer. 626 */ 627 if (nxgep->niu_type != N2_NIU) { 628 nxge_set_pci_replay_timeout(nxgep); 629 } 630 631 #if defined(sun4v) 632 /* This is required by nxge_hio_init(), which follows. */ 633 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 634 goto nxge_attach_fail4; 635 #endif 636 637 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 638 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 639 "nxge_hio_init failed")); 640 goto nxge_attach_fail4; 641 } 642 643 if (nxgep->niu_type == NEPTUNE_2_10GF) { 644 if (nxgep->function_num > 1) { 645 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 646 " function %d. Only functions 0 and 1 are " 647 "supported for this card.", nxgep->function_num)); 648 status = NXGE_ERROR; 649 goto nxge_attach_fail4; 650 } 651 } 652 653 if (isLDOMguest(nxgep)) { 654 /* 655 * Use the function number here. 656 */ 657 nxgep->mac.portnum = nxgep->function_num; 658 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 659 660 /* XXX We'll set the MAC address counts to 1 for now. */ 661 mmac_info = &nxgep->nxge_mmac_info; 662 mmac_info->num_mmac = 1; 663 mmac_info->naddrfree = 1; 664 } else { 665 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 666 nxgep->mac.portnum = portn; 667 if ((portn == 0) || (portn == 1)) 668 nxgep->mac.porttype = PORT_TYPE_XMAC; 669 else 670 nxgep->mac.porttype = PORT_TYPE_BMAC; 671 /* 672 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 673 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 674 * The two types of MACs have different characterizations. 675 */ 676 mmac_info = &nxgep->nxge_mmac_info; 677 if (nxgep->function_num < 2) { 678 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 679 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 680 } else { 681 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 682 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 683 } 684 } 685 /* 686 * Setup the Ndd parameters for the this instance. 687 */ 688 nxge_init_param(nxgep); 689 690 /* 691 * Setup Register Tracing Buffer. 692 */ 693 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 694 695 /* init stats ptr */ 696 nxge_init_statsp(nxgep); 697 698 /* 699 * Copy the vpd info from eeprom to a local data 700 * structure, and then check its validity. 701 */ 702 if (!isLDOMguest(nxgep)) { 703 int *regp; 704 uint_t reglen; 705 int rv; 706 707 nxge_vpd_info_get(nxgep); 708 709 /* Find the NIU config handle. */ 710 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 711 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 712 "reg", ®p, ®len); 713 714 if (rv != DDI_PROP_SUCCESS) { 715 goto nxge_attach_fail5; 716 } 717 /* 718 * The address_hi, that is the first int, in the reg 719 * property consists of config handle, but need to remove 720 * the bits 28-31 which are OBP specific info. 721 */ 722 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 723 ddi_prop_free(regp); 724 } 725 726 /* 727 * Set the defaults for the MTU size. 728 */ 729 nxge_hw_id_init(nxgep); 730 731 if (isLDOMguest(nxgep)) { 732 uchar_t *prop_val; 733 uint_t prop_len; 734 uint32_t max_frame_size; 735 736 extern void nxge_get_logical_props(p_nxge_t); 737 738 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 739 nxgep->mac.portmode = PORT_LOGICAL; 740 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 741 "phy-type", "virtual transceiver"); 742 743 nxgep->nports = 1; 744 nxgep->board_ver = 0; /* XXX What? */ 745 746 /* 747 * local-mac-address property gives us info on which 748 * specific MAC address the Hybrid resource is associated 749 * with. 750 */ 751 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 752 "local-mac-address", &prop_val, 753 &prop_len) != DDI_PROP_SUCCESS) { 754 goto nxge_attach_fail5; 755 } 756 if (prop_len != ETHERADDRL) { 757 ddi_prop_free(prop_val); 758 goto nxge_attach_fail5; 759 } 760 ether_copy(prop_val, nxgep->hio_mac_addr); 761 ddi_prop_free(prop_val); 762 nxge_get_logical_props(nxgep); 763 764 /* 765 * Enable Jumbo property based on the "max-frame-size" 766 * property value. 767 */ 768 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY, 769 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 770 "max-frame-size", NXGE_MTU_DEFAULT_MAX); 771 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) && 772 (max_frame_size <= TX_JUMBO_MTU)) { 773 nxgep->mac.is_jumbo = B_TRUE; 774 nxgep->mac.maxframesize = (uint16_t)max_frame_size; 775 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 776 NXGE_EHEADER_VLAN_CRC; 777 } 778 } else { 779 status = nxge_xcvr_find(nxgep); 780 781 if (status != NXGE_OK) { 782 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 783 " Couldn't determine card type" 784 " .... exit ")); 785 goto nxge_attach_fail5; 786 } 787 788 status = nxge_get_config_properties(nxgep); 789 790 if (status != NXGE_OK) { 791 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 792 "get_hw create failed")); 793 goto nxge_attach_fail; 794 } 795 } 796 797 /* 798 * Setup the Kstats for the driver. 799 */ 800 nxge_setup_kstats(nxgep); 801 802 if (!isLDOMguest(nxgep)) 803 nxge_setup_param(nxgep); 804 805 status = nxge_setup_system_dma_pages(nxgep); 806 if (status != NXGE_OK) { 807 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 808 goto nxge_attach_fail; 809 } 810 811 812 if (!isLDOMguest(nxgep)) 813 nxge_hw_init_niu_common(nxgep); 814 815 status = nxge_setup_mutexes(nxgep); 816 if (status != NXGE_OK) { 817 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 818 goto nxge_attach_fail; 819 } 820 821 #if defined(sun4v) 822 if (isLDOMguest(nxgep)) { 823 /* Find our VR & channel sets. */ 824 status = nxge_hio_vr_add(nxgep); 825 if (status != DDI_SUCCESS) { 826 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 827 "nxge_hio_vr_add failed")); 828 (void) hsvc_unregister(&nxgep->niu_hsvc); 829 nxgep->niu_hsvc_available = B_FALSE; 830 goto nxge_attach_fail; 831 } 832 goto nxge_attach_exit; 833 } 834 #endif 835 836 status = nxge_setup_dev(nxgep); 837 if (status != DDI_SUCCESS) { 838 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 839 goto nxge_attach_fail; 840 } 841 842 status = nxge_add_intrs(nxgep); 843 if (status != DDI_SUCCESS) { 844 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 845 goto nxge_attach_fail; 846 } 847 848 /* If a guest, register with vio_net instead. */ 849 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 850 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 851 "unable to register to mac layer (%d)", status)); 852 goto nxge_attach_fail; 853 } 854 855 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 856 857 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 858 "registered to mac (instance %d)", instance)); 859 860 /* nxge_link_monitor calls xcvr.check_link recursively */ 861 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 862 863 goto nxge_attach_exit; 864 865 nxge_attach_fail: 866 nxge_unattach(nxgep); 867 goto nxge_attach_fail1; 868 869 nxge_attach_fail5: 870 /* 871 * Tear down the ndd parameters setup. 872 */ 873 nxge_destroy_param(nxgep); 874 875 /* 876 * Tear down the kstat setup. 877 */ 878 nxge_destroy_kstats(nxgep); 879 880 nxge_attach_fail4: 881 if (nxgep->nxge_hw_p) { 882 nxge_uninit_common_dev(nxgep); 883 nxgep->nxge_hw_p = NULL; 884 } 885 886 nxge_attach_fail3: 887 /* 888 * Unmap the register setup. 889 */ 890 nxge_unmap_regs(nxgep); 891 892 nxge_fm_fini(nxgep); 893 894 nxge_attach_fail2: 895 ddi_soft_state_free(nxge_list, nxgep->instance); 896 897 nxge_attach_fail1: 898 if (status != NXGE_OK) 899 status = (NXGE_ERROR | NXGE_DDI_FAILED); 900 nxgep = NULL; 901 902 nxge_attach_exit: 903 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 904 status)); 905 906 return (status); 907 } 908 909 static int 910 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 911 { 912 int status = DDI_SUCCESS; 913 int instance; 914 p_nxge_t nxgep = NULL; 915 916 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 917 instance = ddi_get_instance(dip); 918 nxgep = ddi_get_soft_state(nxge_list, instance); 919 if (nxgep == NULL) { 920 status = DDI_FAILURE; 921 goto nxge_detach_exit; 922 } 923 924 switch (cmd) { 925 case DDI_DETACH: 926 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 927 break; 928 929 case DDI_PM_SUSPEND: 930 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 931 nxgep->suspended = DDI_PM_SUSPEND; 932 nxge_suspend(nxgep); 933 break; 934 935 case DDI_SUSPEND: 936 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 937 if (nxgep->suspended != DDI_PM_SUSPEND) { 938 nxgep->suspended = DDI_SUSPEND; 939 nxge_suspend(nxgep); 940 } 941 break; 942 943 default: 944 status = DDI_FAILURE; 945 } 946 947 if (cmd != DDI_DETACH) 948 goto nxge_detach_exit; 949 950 /* 951 * Stop the xcvr polling. 952 */ 953 nxgep->suspended = cmd; 954 955 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 956 957 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 958 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 959 "<== nxge_detach status = 0x%08X", status)); 960 return (DDI_FAILURE); 961 } 962 963 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 964 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 965 966 nxge_unattach(nxgep); 967 nxgep = NULL; 968 969 nxge_detach_exit: 970 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 971 status)); 972 973 return (status); 974 } 975 976 static void 977 nxge_unattach(p_nxge_t nxgep) 978 { 979 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 980 981 if (nxgep == NULL || nxgep->dev_regs == NULL) { 982 return; 983 } 984 985 nxgep->nxge_magic = 0; 986 987 if (nxgep->nxge_timerid) { 988 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 989 nxgep->nxge_timerid = 0; 990 } 991 992 /* 993 * If this flag is set, it will affect the Neptune 994 * only. 995 */ 996 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { 997 nxge_niu_peu_reset(nxgep); 998 } 999 1000 #if defined(sun4v) 1001 if (isLDOMguest(nxgep)) { 1002 (void) nxge_hio_vr_release(nxgep); 1003 } 1004 #endif 1005 1006 if (nxgep->nxge_hw_p) { 1007 nxge_uninit_common_dev(nxgep); 1008 nxgep->nxge_hw_p = NULL; 1009 } 1010 1011 #if defined(sun4v) 1012 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 1013 (void) hsvc_unregister(&nxgep->niu_hsvc); 1014 nxgep->niu_hsvc_available = B_FALSE; 1015 } 1016 #endif 1017 /* 1018 * Stop any further interrupts. 1019 */ 1020 nxge_remove_intrs(nxgep); 1021 1022 /* 1023 * Stop the device and free resources. 1024 */ 1025 if (!isLDOMguest(nxgep)) { 1026 nxge_destroy_dev(nxgep); 1027 } 1028 1029 /* 1030 * Tear down the ndd parameters setup. 1031 */ 1032 nxge_destroy_param(nxgep); 1033 1034 /* 1035 * Tear down the kstat setup. 1036 */ 1037 nxge_destroy_kstats(nxgep); 1038 1039 /* 1040 * Destroy all mutexes. 1041 */ 1042 nxge_destroy_mutexes(nxgep); 1043 1044 /* 1045 * Remove the list of ndd parameters which 1046 * were setup during attach. 1047 */ 1048 if (nxgep->dip) { 1049 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 1050 " nxge_unattach: remove all properties")); 1051 1052 (void) ddi_prop_remove_all(nxgep->dip); 1053 } 1054 1055 #if NXGE_PROPERTY 1056 nxge_remove_hard_properties(nxgep); 1057 #endif 1058 1059 /* 1060 * Unmap the register setup. 1061 */ 1062 nxge_unmap_regs(nxgep); 1063 1064 nxge_fm_fini(nxgep); 1065 1066 ddi_soft_state_free(nxge_list, nxgep->instance); 1067 1068 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1069 } 1070 1071 #if defined(sun4v) 1072 int 1073 nxge_hsvc_register(nxge_t *nxgep) 1074 { 1075 nxge_status_t status; 1076 1077 if (nxgep->niu_type == N2_NIU) { 1078 nxgep->niu_hsvc_available = B_FALSE; 1079 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 1080 if ((status = hsvc_register(&nxgep->niu_hsvc, 1081 &nxgep->niu_min_ver)) != 0) { 1082 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1083 "nxge_attach: %s: cannot negotiate " 1084 "hypervisor services revision %d group: 0x%lx " 1085 "major: 0x%lx minor: 0x%lx errno: %d", 1086 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1087 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1088 niu_hsvc.hsvc_minor, status)); 1089 return (DDI_FAILURE); 1090 } 1091 nxgep->niu_hsvc_available = B_TRUE; 1092 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1093 "NIU Hypervisor service enabled")); 1094 } 1095 1096 return (DDI_SUCCESS); 1097 } 1098 #endif 1099 1100 static char n2_siu_name[] = "niu"; 1101 1102 static nxge_status_t 1103 nxge_map_regs(p_nxge_t nxgep) 1104 { 1105 int ddi_status = DDI_SUCCESS; 1106 p_dev_regs_t dev_regs; 1107 char buf[MAXPATHLEN + 1]; 1108 char *devname; 1109 #ifdef NXGE_DEBUG 1110 char *sysname; 1111 #endif 1112 off_t regsize; 1113 nxge_status_t status = NXGE_OK; 1114 #if !defined(_BIG_ENDIAN) 1115 off_t pci_offset; 1116 uint16_t pcie_devctl; 1117 #endif 1118 1119 if (isLDOMguest(nxgep)) { 1120 return (nxge_guest_regs_map(nxgep)); 1121 } 1122 1123 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1124 nxgep->dev_regs = NULL; 1125 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1126 dev_regs->nxge_regh = NULL; 1127 dev_regs->nxge_pciregh = NULL; 1128 dev_regs->nxge_msix_regh = NULL; 1129 dev_regs->nxge_vir_regh = NULL; 1130 dev_regs->nxge_vir2_regh = NULL; 1131 nxgep->niu_type = NIU_TYPE_NONE; 1132 1133 devname = ddi_pathname(nxgep->dip, buf); 1134 ASSERT(strlen(devname) > 0); 1135 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1136 "nxge_map_regs: pathname devname %s", devname)); 1137 1138 /* 1139 * The driver is running on a N2-NIU system if devname is something 1140 * like "/niu@80/network@0" 1141 */ 1142 if (strstr(devname, n2_siu_name)) { 1143 /* N2/NIU */ 1144 nxgep->niu_type = N2_NIU; 1145 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1146 "nxge_map_regs: N2/NIU devname %s", devname)); 1147 /* get function number */ 1148 nxgep->function_num = 1149 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1150 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1151 "nxge_map_regs: N2/NIU function number %d", 1152 nxgep->function_num)); 1153 } else { 1154 int *prop_val; 1155 uint_t prop_len; 1156 uint8_t func_num; 1157 1158 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1159 0, "reg", 1160 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1161 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1162 "Reg property not found")); 1163 ddi_status = DDI_FAILURE; 1164 goto nxge_map_regs_fail0; 1165 1166 } else { 1167 func_num = (prop_val[0] >> 8) & 0x7; 1168 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1169 "Reg property found: fun # %d", 1170 func_num)); 1171 nxgep->function_num = func_num; 1172 if (isLDOMguest(nxgep)) { 1173 nxgep->function_num /= 2; 1174 return (NXGE_OK); 1175 } 1176 ddi_prop_free(prop_val); 1177 } 1178 } 1179 1180 switch (nxgep->niu_type) { 1181 default: 1182 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1183 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1184 "nxge_map_regs: pci config size 0x%x", regsize)); 1185 1186 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1187 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1188 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1189 if (ddi_status != DDI_SUCCESS) { 1190 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1191 "ddi_map_regs, nxge bus config regs failed")); 1192 goto nxge_map_regs_fail0; 1193 } 1194 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1195 "nxge_map_reg: PCI config addr 0x%0llx " 1196 " handle 0x%0llx", dev_regs->nxge_pciregp, 1197 dev_regs->nxge_pciregh)); 1198 /* 1199 * IMP IMP 1200 * workaround for bit swapping bug in HW 1201 * which ends up in no-snoop = yes 1202 * resulting, in DMA not synched properly 1203 */ 1204 #if !defined(_BIG_ENDIAN) 1205 /* workarounds for x86 systems */ 1206 pci_offset = 0x80 + PCIE_DEVCTL; 1207 pcie_devctl = pci_config_get16(dev_regs->nxge_pciregh, 1208 pci_offset); 1209 pcie_devctl &= ~PCIE_DEVCTL_ENABLE_NO_SNOOP; 1210 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1211 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1212 pcie_devctl); 1213 #endif 1214 1215 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1216 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1217 "nxge_map_regs: pio size 0x%x", regsize)); 1218 /* set up the device mapped register */ 1219 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1220 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1221 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1222 if (ddi_status != DDI_SUCCESS) { 1223 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1224 "ddi_map_regs for Neptune global reg failed")); 1225 goto nxge_map_regs_fail1; 1226 } 1227 1228 /* set up the msi/msi-x mapped register */ 1229 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1230 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1231 "nxge_map_regs: msix size 0x%x", regsize)); 1232 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1233 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1234 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1235 if (ddi_status != DDI_SUCCESS) { 1236 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1237 "ddi_map_regs for msi reg failed")); 1238 goto nxge_map_regs_fail2; 1239 } 1240 1241 /* set up the vio region mapped register */ 1242 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1243 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1244 "nxge_map_regs: vio size 0x%x", regsize)); 1245 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1246 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1247 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1248 1249 if (ddi_status != DDI_SUCCESS) { 1250 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1251 "ddi_map_regs for nxge vio reg failed")); 1252 goto nxge_map_regs_fail3; 1253 } 1254 nxgep->dev_regs = dev_regs; 1255 1256 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1257 NPI_PCI_ADD_HANDLE_SET(nxgep, 1258 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1259 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1260 NPI_MSI_ADD_HANDLE_SET(nxgep, 1261 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1262 1263 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1264 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1265 1266 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1267 NPI_REG_ADD_HANDLE_SET(nxgep, 1268 (npi_reg_ptr_t)dev_regs->nxge_regp); 1269 1270 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1271 NPI_VREG_ADD_HANDLE_SET(nxgep, 1272 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1273 1274 break; 1275 1276 case N2_NIU: 1277 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1278 /* 1279 * Set up the device mapped register (FWARC 2006/556) 1280 * (changed back to 1: reg starts at 1!) 1281 */ 1282 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1283 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1284 "nxge_map_regs: dev size 0x%x", regsize)); 1285 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1286 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1287 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1288 1289 if (ddi_status != DDI_SUCCESS) { 1290 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1291 "ddi_map_regs for N2/NIU, global reg failed ")); 1292 goto nxge_map_regs_fail1; 1293 } 1294 1295 /* set up the first vio region mapped register */ 1296 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1297 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1298 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1299 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1300 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1301 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1302 1303 if (ddi_status != DDI_SUCCESS) { 1304 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1305 "ddi_map_regs for nxge vio reg failed")); 1306 goto nxge_map_regs_fail2; 1307 } 1308 /* set up the second vio region mapped register */ 1309 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1310 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1311 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1312 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1313 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1314 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1315 1316 if (ddi_status != DDI_SUCCESS) { 1317 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1318 "ddi_map_regs for nxge vio2 reg failed")); 1319 goto nxge_map_regs_fail3; 1320 } 1321 nxgep->dev_regs = dev_regs; 1322 1323 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1324 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1325 1326 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1327 NPI_REG_ADD_HANDLE_SET(nxgep, 1328 (npi_reg_ptr_t)dev_regs->nxge_regp); 1329 1330 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1331 NPI_VREG_ADD_HANDLE_SET(nxgep, 1332 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1333 1334 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1335 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1336 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1337 1338 break; 1339 } 1340 1341 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1342 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1343 1344 goto nxge_map_regs_exit; 1345 nxge_map_regs_fail3: 1346 if (dev_regs->nxge_msix_regh) { 1347 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1348 } 1349 if (dev_regs->nxge_vir_regh) { 1350 ddi_regs_map_free(&dev_regs->nxge_regh); 1351 } 1352 nxge_map_regs_fail2: 1353 if (dev_regs->nxge_regh) { 1354 ddi_regs_map_free(&dev_regs->nxge_regh); 1355 } 1356 nxge_map_regs_fail1: 1357 if (dev_regs->nxge_pciregh) { 1358 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1359 } 1360 nxge_map_regs_fail0: 1361 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1362 kmem_free(dev_regs, sizeof (dev_regs_t)); 1363 1364 nxge_map_regs_exit: 1365 if (ddi_status != DDI_SUCCESS) 1366 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1367 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1368 return (status); 1369 } 1370 1371 static void 1372 nxge_unmap_regs(p_nxge_t nxgep) 1373 { 1374 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1375 1376 if (isLDOMguest(nxgep)) { 1377 nxge_guest_regs_map_free(nxgep); 1378 return; 1379 } 1380 1381 if (nxgep->dev_regs) { 1382 if (nxgep->dev_regs->nxge_pciregh) { 1383 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1384 "==> nxge_unmap_regs: bus")); 1385 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1386 nxgep->dev_regs->nxge_pciregh = NULL; 1387 } 1388 if (nxgep->dev_regs->nxge_regh) { 1389 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1390 "==> nxge_unmap_regs: device registers")); 1391 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1392 nxgep->dev_regs->nxge_regh = NULL; 1393 } 1394 if (nxgep->dev_regs->nxge_msix_regh) { 1395 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1396 "==> nxge_unmap_regs: device interrupts")); 1397 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1398 nxgep->dev_regs->nxge_msix_regh = NULL; 1399 } 1400 if (nxgep->dev_regs->nxge_vir_regh) { 1401 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1402 "==> nxge_unmap_regs: vio region")); 1403 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1404 nxgep->dev_regs->nxge_vir_regh = NULL; 1405 } 1406 if (nxgep->dev_regs->nxge_vir2_regh) { 1407 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1408 "==> nxge_unmap_regs: vio2 region")); 1409 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1410 nxgep->dev_regs->nxge_vir2_regh = NULL; 1411 } 1412 1413 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1414 nxgep->dev_regs = NULL; 1415 } 1416 1417 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1418 } 1419 1420 static nxge_status_t 1421 nxge_setup_mutexes(p_nxge_t nxgep) 1422 { 1423 int ddi_status = DDI_SUCCESS; 1424 nxge_status_t status = NXGE_OK; 1425 nxge_classify_t *classify_ptr; 1426 int partition; 1427 1428 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1429 1430 /* 1431 * Get the interrupt cookie so the mutexes can be 1432 * Initialized. 1433 */ 1434 if (isLDOMguest(nxgep)) { 1435 nxgep->interrupt_cookie = 0; 1436 } else { 1437 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1438 &nxgep->interrupt_cookie); 1439 1440 if (ddi_status != DDI_SUCCESS) { 1441 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1442 "<== nxge_setup_mutexes: failed 0x%x", 1443 ddi_status)); 1444 goto nxge_setup_mutexes_exit; 1445 } 1446 } 1447 1448 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1449 MUTEX_INIT(&nxgep->poll_lock, NULL, 1450 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1451 1452 /* 1453 * Initialize mutexes for this device. 1454 */ 1455 MUTEX_INIT(nxgep->genlock, NULL, 1456 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1457 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1458 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1459 MUTEX_INIT(&nxgep->mif_lock, NULL, 1460 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1461 MUTEX_INIT(&nxgep->group_lock, NULL, 1462 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1463 RW_INIT(&nxgep->filter_lock, NULL, 1464 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1465 1466 classify_ptr = &nxgep->classifier; 1467 /* 1468 * FFLP Mutexes are never used in interrupt context 1469 * as fflp operation can take very long time to 1470 * complete and hence not suitable to invoke from interrupt 1471 * handlers. 1472 */ 1473 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1474 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1475 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1476 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1477 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1478 for (partition = 0; partition < MAX_PARTITION; partition++) { 1479 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1480 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1481 } 1482 } 1483 1484 nxge_setup_mutexes_exit: 1485 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1486 "<== nxge_setup_mutexes status = %x", status)); 1487 1488 if (ddi_status != DDI_SUCCESS) 1489 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1490 1491 return (status); 1492 } 1493 1494 static void 1495 nxge_destroy_mutexes(p_nxge_t nxgep) 1496 { 1497 int partition; 1498 nxge_classify_t *classify_ptr; 1499 1500 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1501 RW_DESTROY(&nxgep->filter_lock); 1502 MUTEX_DESTROY(&nxgep->group_lock); 1503 MUTEX_DESTROY(&nxgep->mif_lock); 1504 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1505 MUTEX_DESTROY(nxgep->genlock); 1506 1507 classify_ptr = &nxgep->classifier; 1508 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1509 1510 /* Destroy all polling resources. */ 1511 MUTEX_DESTROY(&nxgep->poll_lock); 1512 cv_destroy(&nxgep->poll_cv); 1513 1514 /* free data structures, based on HW type */ 1515 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1516 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1517 for (partition = 0; partition < MAX_PARTITION; partition++) { 1518 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1519 } 1520 } 1521 1522 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1523 } 1524 1525 nxge_status_t 1526 nxge_init(p_nxge_t nxgep) 1527 { 1528 nxge_status_t status = NXGE_OK; 1529 1530 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1531 1532 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1533 return (status); 1534 } 1535 1536 /* 1537 * Allocate system memory for the receive/transmit buffer blocks 1538 * and receive/transmit descriptor rings. 1539 */ 1540 status = nxge_alloc_mem_pool(nxgep); 1541 if (status != NXGE_OK) { 1542 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1543 goto nxge_init_fail1; 1544 } 1545 1546 if (!isLDOMguest(nxgep)) { 1547 /* 1548 * Initialize and enable the TXC registers. 1549 * (Globally enable the Tx controller, 1550 * enable the port, configure the dma channel bitmap, 1551 * configure the max burst size). 1552 */ 1553 status = nxge_txc_init(nxgep); 1554 if (status != NXGE_OK) { 1555 NXGE_ERROR_MSG((nxgep, 1556 NXGE_ERR_CTL, "init txc failed\n")); 1557 goto nxge_init_fail2; 1558 } 1559 } 1560 1561 /* 1562 * Initialize and enable TXDMA channels. 1563 */ 1564 status = nxge_init_txdma_channels(nxgep); 1565 if (status != NXGE_OK) { 1566 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1567 goto nxge_init_fail3; 1568 } 1569 1570 /* 1571 * Initialize and enable RXDMA channels. 1572 */ 1573 status = nxge_init_rxdma_channels(nxgep); 1574 if (status != NXGE_OK) { 1575 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1576 goto nxge_init_fail4; 1577 } 1578 1579 /* 1580 * The guest domain is now done. 1581 */ 1582 if (isLDOMguest(nxgep)) { 1583 nxgep->drv_state |= STATE_HW_INITIALIZED; 1584 goto nxge_init_exit; 1585 } 1586 1587 /* 1588 * Initialize TCAM and FCRAM (Neptune). 1589 */ 1590 status = nxge_classify_init(nxgep); 1591 if (status != NXGE_OK) { 1592 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1593 goto nxge_init_fail5; 1594 } 1595 1596 /* 1597 * Initialize ZCP 1598 */ 1599 status = nxge_zcp_init(nxgep); 1600 if (status != NXGE_OK) { 1601 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1602 goto nxge_init_fail5; 1603 } 1604 1605 /* 1606 * Initialize IPP. 1607 */ 1608 status = nxge_ipp_init(nxgep); 1609 if (status != NXGE_OK) { 1610 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1611 goto nxge_init_fail5; 1612 } 1613 1614 /* 1615 * Initialize the MAC block. 1616 */ 1617 status = nxge_mac_init(nxgep); 1618 if (status != NXGE_OK) { 1619 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1620 goto nxge_init_fail5; 1621 } 1622 1623 /* 1624 * Enable the interrrupts for DDI. 1625 */ 1626 nxge_intrs_enable(nxgep); 1627 1628 nxgep->drv_state |= STATE_HW_INITIALIZED; 1629 1630 goto nxge_init_exit; 1631 1632 nxge_init_fail5: 1633 nxge_uninit_rxdma_channels(nxgep); 1634 nxge_init_fail4: 1635 nxge_uninit_txdma_channels(nxgep); 1636 nxge_init_fail3: 1637 if (!isLDOMguest(nxgep)) { 1638 (void) nxge_txc_uninit(nxgep); 1639 } 1640 nxge_init_fail2: 1641 nxge_free_mem_pool(nxgep); 1642 nxge_init_fail1: 1643 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1644 "<== nxge_init status (failed) = 0x%08x", status)); 1645 return (status); 1646 1647 nxge_init_exit: 1648 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1649 status)); 1650 return (status); 1651 } 1652 1653 1654 timeout_id_t 1655 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1656 { 1657 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1658 return (timeout(func, (caddr_t)nxgep, 1659 drv_usectohz(1000 * msec))); 1660 } 1661 return (NULL); 1662 } 1663 1664 /*ARGSUSED*/ 1665 void 1666 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1667 { 1668 if (timerid) { 1669 (void) untimeout(timerid); 1670 } 1671 } 1672 1673 void 1674 nxge_uninit(p_nxge_t nxgep) 1675 { 1676 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1677 1678 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1679 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1680 "==> nxge_uninit: not initialized")); 1681 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1682 "<== nxge_uninit")); 1683 return; 1684 } 1685 1686 if (!isLDOMguest(nxgep)) { 1687 /* 1688 * Reset the receive MAC side. 1689 */ 1690 (void) nxge_rx_mac_disable(nxgep); 1691 1692 /* 1693 * Drain the IPP. 1694 */ 1695 (void) nxge_ipp_drain(nxgep); 1696 } 1697 1698 /* stop timer */ 1699 if (nxgep->nxge_timerid) { 1700 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1701 nxgep->nxge_timerid = 0; 1702 } 1703 1704 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1705 (void) nxge_intr_hw_disable(nxgep); 1706 1707 1708 /* Disable and soft reset the IPP */ 1709 if (!isLDOMguest(nxgep)) 1710 (void) nxge_ipp_disable(nxgep); 1711 1712 /* Free classification resources */ 1713 (void) nxge_classify_uninit(nxgep); 1714 1715 /* 1716 * Reset the transmit/receive DMA side. 1717 */ 1718 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1719 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1720 1721 nxge_uninit_txdma_channels(nxgep); 1722 nxge_uninit_rxdma_channels(nxgep); 1723 1724 /* 1725 * Reset the transmit MAC side. 1726 */ 1727 (void) nxge_tx_mac_disable(nxgep); 1728 1729 nxge_free_mem_pool(nxgep); 1730 1731 /* 1732 * Start the timer if the reset flag is not set. 1733 * If this reset flag is set, the link monitor 1734 * will not be started in order to stop furthur bus 1735 * activities coming from this interface. 1736 * The driver will start the monitor function 1737 * if the interface was initialized again later. 1738 */ 1739 if (!nxge_peu_reset_enable) { 1740 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1741 } 1742 1743 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1744 1745 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1746 "nxge_mblks_pending %d", nxge_mblks_pending)); 1747 } 1748 1749 void 1750 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1751 { 1752 uint64_t reg; 1753 uint64_t regdata; 1754 int i, retry; 1755 1756 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1757 regdata = 0; 1758 retry = 1; 1759 1760 for (i = 0; i < retry; i++) { 1761 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1762 } 1763 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1764 } 1765 1766 void 1767 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1768 { 1769 uint64_t reg; 1770 uint64_t buf[2]; 1771 1772 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1773 reg = buf[0]; 1774 1775 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1776 } 1777 1778 /*ARGSUSED*/ 1779 /*VARARGS*/ 1780 void 1781 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1782 { 1783 char msg_buffer[1048]; 1784 char prefix_buffer[32]; 1785 int instance; 1786 uint64_t debug_level; 1787 int cmn_level = CE_CONT; 1788 va_list ap; 1789 1790 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1791 /* In case a developer has changed nxge_debug_level. */ 1792 if (nxgep->nxge_debug_level != nxge_debug_level) 1793 nxgep->nxge_debug_level = nxge_debug_level; 1794 } 1795 1796 debug_level = (nxgep == NULL) ? nxge_debug_level : 1797 nxgep->nxge_debug_level; 1798 1799 if ((level & debug_level) || 1800 (level == NXGE_NOTE) || 1801 (level == NXGE_ERR_CTL)) { 1802 /* do the msg processing */ 1803 MUTEX_ENTER(&nxgedebuglock); 1804 1805 if ((level & NXGE_NOTE)) { 1806 cmn_level = CE_NOTE; 1807 } 1808 1809 if (level & NXGE_ERR_CTL) { 1810 cmn_level = CE_WARN; 1811 } 1812 1813 va_start(ap, fmt); 1814 (void) vsprintf(msg_buffer, fmt, ap); 1815 va_end(ap); 1816 if (nxgep == NULL) { 1817 instance = -1; 1818 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1819 } else { 1820 instance = nxgep->instance; 1821 (void) sprintf(prefix_buffer, 1822 "%s%d :", "nxge", instance); 1823 } 1824 1825 MUTEX_EXIT(&nxgedebuglock); 1826 cmn_err(cmn_level, "!%s %s\n", 1827 prefix_buffer, msg_buffer); 1828 1829 } 1830 } 1831 1832 char * 1833 nxge_dump_packet(char *addr, int size) 1834 { 1835 uchar_t *ap = (uchar_t *)addr; 1836 int i; 1837 static char etherbuf[1024]; 1838 char *cp = etherbuf; 1839 char digits[] = "0123456789abcdef"; 1840 1841 if (!size) 1842 size = 60; 1843 1844 if (size > MAX_DUMP_SZ) { 1845 /* Dump the leading bytes */ 1846 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1847 if (*ap > 0x0f) 1848 *cp++ = digits[*ap >> 4]; 1849 *cp++ = digits[*ap++ & 0xf]; 1850 *cp++ = ':'; 1851 } 1852 for (i = 0; i < 20; i++) 1853 *cp++ = '.'; 1854 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1855 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1856 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1857 if (*ap > 0x0f) 1858 *cp++ = digits[*ap >> 4]; 1859 *cp++ = digits[*ap++ & 0xf]; 1860 *cp++ = ':'; 1861 } 1862 } else { 1863 for (i = 0; i < size; i++) { 1864 if (*ap > 0x0f) 1865 *cp++ = digits[*ap >> 4]; 1866 *cp++ = digits[*ap++ & 0xf]; 1867 *cp++ = ':'; 1868 } 1869 } 1870 *--cp = 0; 1871 return (etherbuf); 1872 } 1873 1874 #ifdef NXGE_DEBUG 1875 static void 1876 nxge_test_map_regs(p_nxge_t nxgep) 1877 { 1878 ddi_acc_handle_t cfg_handle; 1879 p_pci_cfg_t cfg_ptr; 1880 ddi_acc_handle_t dev_handle; 1881 char *dev_ptr; 1882 ddi_acc_handle_t pci_config_handle; 1883 uint32_t regval; 1884 int i; 1885 1886 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1887 1888 dev_handle = nxgep->dev_regs->nxge_regh; 1889 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1890 1891 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1892 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1893 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1894 1895 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1896 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1897 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1898 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1899 &cfg_ptr->vendorid)); 1900 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1901 "\tvendorid 0x%x devid 0x%x", 1902 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1903 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1904 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1905 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1906 "bar1c 0x%x", 1907 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1908 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1909 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1910 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1911 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1912 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1913 "base 28 0x%x bar2c 0x%x\n", 1914 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1915 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1916 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1917 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1918 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1919 "\nNeptune PCI BAR: base30 0x%x\n", 1920 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1921 1922 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1923 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1924 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1925 "first 0x%llx second 0x%llx third 0x%llx " 1926 "last 0x%llx ", 1927 NXGE_PIO_READ64(dev_handle, 1928 (uint64_t *)(dev_ptr + 0), 0), 1929 NXGE_PIO_READ64(dev_handle, 1930 (uint64_t *)(dev_ptr + 8), 0), 1931 NXGE_PIO_READ64(dev_handle, 1932 (uint64_t *)(dev_ptr + 16), 0), 1933 NXGE_PIO_READ64(cfg_handle, 1934 (uint64_t *)(dev_ptr + 24), 0))); 1935 } 1936 } 1937 1938 #endif 1939 1940 static void 1941 nxge_suspend(p_nxge_t nxgep) 1942 { 1943 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1944 1945 nxge_intrs_disable(nxgep); 1946 nxge_destroy_dev(nxgep); 1947 1948 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1949 } 1950 1951 static nxge_status_t 1952 nxge_resume(p_nxge_t nxgep) 1953 { 1954 nxge_status_t status = NXGE_OK; 1955 1956 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1957 1958 nxgep->suspended = DDI_RESUME; 1959 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1960 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1961 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1962 (void) nxge_rx_mac_enable(nxgep); 1963 (void) nxge_tx_mac_enable(nxgep); 1964 nxge_intrs_enable(nxgep); 1965 nxgep->suspended = 0; 1966 1967 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1968 "<== nxge_resume status = 0x%x", status)); 1969 return (status); 1970 } 1971 1972 static nxge_status_t 1973 nxge_setup_dev(p_nxge_t nxgep) 1974 { 1975 nxge_status_t status = NXGE_OK; 1976 1977 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1978 nxgep->mac.portnum)); 1979 1980 status = nxge_link_init(nxgep); 1981 1982 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1983 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1984 "port%d Bad register acc handle", nxgep->mac.portnum)); 1985 status = NXGE_ERROR; 1986 } 1987 1988 if (status != NXGE_OK) { 1989 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1990 " nxge_setup_dev status " 1991 "(xcvr init 0x%08x)", status)); 1992 goto nxge_setup_dev_exit; 1993 } 1994 1995 nxge_setup_dev_exit: 1996 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1997 "<== nxge_setup_dev port %d status = 0x%08x", 1998 nxgep->mac.portnum, status)); 1999 2000 return (status); 2001 } 2002 2003 static void 2004 nxge_destroy_dev(p_nxge_t nxgep) 2005 { 2006 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 2007 2008 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 2009 2010 (void) nxge_hw_stop(nxgep); 2011 2012 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 2013 } 2014 2015 static nxge_status_t 2016 nxge_setup_system_dma_pages(p_nxge_t nxgep) 2017 { 2018 int ddi_status = DDI_SUCCESS; 2019 uint_t count; 2020 ddi_dma_cookie_t cookie; 2021 uint_t iommu_pagesize; 2022 nxge_status_t status = NXGE_OK; 2023 2024 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 2025 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 2026 if (nxgep->niu_type != N2_NIU) { 2027 iommu_pagesize = dvma_pagesize(nxgep->dip); 2028 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2029 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2030 " default_block_size %d iommu_pagesize %d", 2031 nxgep->sys_page_sz, 2032 ddi_ptob(nxgep->dip, (ulong_t)1), 2033 nxgep->rx_default_block_size, 2034 iommu_pagesize)); 2035 2036 if (iommu_pagesize != 0) { 2037 if (nxgep->sys_page_sz == iommu_pagesize) { 2038 if (iommu_pagesize > 0x4000) 2039 nxgep->sys_page_sz = 0x4000; 2040 } else { 2041 if (nxgep->sys_page_sz > iommu_pagesize) 2042 nxgep->sys_page_sz = iommu_pagesize; 2043 } 2044 } 2045 } 2046 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2047 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2048 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2049 "default_block_size %d page mask %d", 2050 nxgep->sys_page_sz, 2051 ddi_ptob(nxgep->dip, (ulong_t)1), 2052 nxgep->rx_default_block_size, 2053 nxgep->sys_page_mask)); 2054 2055 2056 switch (nxgep->sys_page_sz) { 2057 default: 2058 nxgep->sys_page_sz = 0x1000; 2059 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2060 nxgep->rx_default_block_size = 0x1000; 2061 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2062 break; 2063 case 0x1000: 2064 nxgep->rx_default_block_size = 0x1000; 2065 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2066 break; 2067 case 0x2000: 2068 nxgep->rx_default_block_size = 0x2000; 2069 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2070 break; 2071 case 0x4000: 2072 nxgep->rx_default_block_size = 0x4000; 2073 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2074 break; 2075 case 0x8000: 2076 nxgep->rx_default_block_size = 0x8000; 2077 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2078 break; 2079 } 2080 2081 #ifndef USE_RX_BIG_BUF 2082 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2083 #else 2084 nxgep->rx_default_block_size = 0x2000; 2085 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2086 #endif 2087 /* 2088 * Get the system DMA burst size. 2089 */ 2090 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2091 DDI_DMA_DONTWAIT, 0, 2092 &nxgep->dmasparehandle); 2093 if (ddi_status != DDI_SUCCESS) { 2094 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2095 "ddi_dma_alloc_handle: failed " 2096 " status 0x%x", ddi_status)); 2097 goto nxge_get_soft_properties_exit; 2098 } 2099 2100 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2101 (caddr_t)nxgep->dmasparehandle, 2102 sizeof (nxgep->dmasparehandle), 2103 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2104 DDI_DMA_DONTWAIT, 0, 2105 &cookie, &count); 2106 if (ddi_status != DDI_DMA_MAPPED) { 2107 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2108 "Binding spare handle to find system" 2109 " burstsize failed.")); 2110 ddi_status = DDI_FAILURE; 2111 goto nxge_get_soft_properties_fail1; 2112 } 2113 2114 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2115 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2116 2117 nxge_get_soft_properties_fail1: 2118 ddi_dma_free_handle(&nxgep->dmasparehandle); 2119 2120 nxge_get_soft_properties_exit: 2121 2122 if (ddi_status != DDI_SUCCESS) 2123 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2124 2125 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2126 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2127 return (status); 2128 } 2129 2130 static nxge_status_t 2131 nxge_alloc_mem_pool(p_nxge_t nxgep) 2132 { 2133 nxge_status_t status = NXGE_OK; 2134 2135 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2136 2137 status = nxge_alloc_rx_mem_pool(nxgep); 2138 if (status != NXGE_OK) { 2139 return (NXGE_ERROR); 2140 } 2141 2142 status = nxge_alloc_tx_mem_pool(nxgep); 2143 if (status != NXGE_OK) { 2144 nxge_free_rx_mem_pool(nxgep); 2145 return (NXGE_ERROR); 2146 } 2147 2148 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2149 return (NXGE_OK); 2150 } 2151 2152 static void 2153 nxge_free_mem_pool(p_nxge_t nxgep) 2154 { 2155 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2156 2157 nxge_free_rx_mem_pool(nxgep); 2158 nxge_free_tx_mem_pool(nxgep); 2159 2160 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2161 } 2162 2163 nxge_status_t 2164 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2165 { 2166 uint32_t rdc_max; 2167 p_nxge_dma_pt_cfg_t p_all_cfgp; 2168 p_nxge_hw_pt_cfg_t p_cfgp; 2169 p_nxge_dma_pool_t dma_poolp; 2170 p_nxge_dma_common_t *dma_buf_p; 2171 p_nxge_dma_pool_t dma_cntl_poolp; 2172 p_nxge_dma_common_t *dma_cntl_p; 2173 uint32_t *num_chunks; /* per dma */ 2174 nxge_status_t status = NXGE_OK; 2175 2176 uint32_t nxge_port_rbr_size; 2177 uint32_t nxge_port_rbr_spare_size; 2178 uint32_t nxge_port_rcr_size; 2179 uint32_t rx_cntl_alloc_size; 2180 2181 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2182 2183 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2184 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2185 rdc_max = NXGE_MAX_RDCS; 2186 2187 /* 2188 * Allocate memory for the common DMA data structures. 2189 */ 2190 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2191 KM_SLEEP); 2192 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2193 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2194 2195 dma_cntl_poolp = (p_nxge_dma_pool_t) 2196 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2197 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2198 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2199 2200 num_chunks = (uint32_t *)KMEM_ZALLOC( 2201 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2202 2203 /* 2204 * Assume that each DMA channel will be configured with 2205 * the default block size. 2206 * rbr block counts are modulo the batch count (16). 2207 */ 2208 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2209 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2210 2211 if (!nxge_port_rbr_size) { 2212 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2213 } 2214 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2215 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2216 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2217 } 2218 2219 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2220 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2221 2222 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2223 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2224 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2225 } 2226 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2227 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2228 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2229 "set to default %d", 2230 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2231 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2232 } 2233 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2234 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2235 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2236 "set to default %d", 2237 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2238 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2239 } 2240 2241 /* 2242 * N2/NIU has limitation on the descriptor sizes (contiguous 2243 * memory allocation on data buffers to 4M (contig_mem_alloc) 2244 * and little endian for control buffers (must use the ddi/dki mem alloc 2245 * function). 2246 */ 2247 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2248 if (nxgep->niu_type == N2_NIU) { 2249 nxge_port_rbr_spare_size = 0; 2250 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2251 (!ISP2(nxge_port_rbr_size))) { 2252 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2253 } 2254 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2255 (!ISP2(nxge_port_rcr_size))) { 2256 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2257 } 2258 } 2259 #endif 2260 2261 /* 2262 * Addresses of receive block ring, receive completion ring and the 2263 * mailbox must be all cache-aligned (64 bytes). 2264 */ 2265 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2266 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2267 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2268 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2269 2270 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2271 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2272 "nxge_port_rcr_size = %d " 2273 "rx_cntl_alloc_size = %d", 2274 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2275 nxge_port_rcr_size, 2276 rx_cntl_alloc_size)); 2277 2278 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2279 if (nxgep->niu_type == N2_NIU) { 2280 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2281 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2282 2283 if (!ISP2(rx_buf_alloc_size)) { 2284 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2285 "==> nxge_alloc_rx_mem_pool: " 2286 " must be power of 2")); 2287 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2288 goto nxge_alloc_rx_mem_pool_exit; 2289 } 2290 2291 if (rx_buf_alloc_size > (1 << 22)) { 2292 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2293 "==> nxge_alloc_rx_mem_pool: " 2294 " limit size to 4M")); 2295 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2296 goto nxge_alloc_rx_mem_pool_exit; 2297 } 2298 2299 if (rx_cntl_alloc_size < 0x2000) { 2300 rx_cntl_alloc_size = 0x2000; 2301 } 2302 } 2303 #endif 2304 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2305 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2306 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2307 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2308 2309 dma_poolp->ndmas = p_cfgp->max_rdcs; 2310 dma_poolp->num_chunks = num_chunks; 2311 dma_poolp->buf_allocated = B_TRUE; 2312 nxgep->rx_buf_pool_p = dma_poolp; 2313 dma_poolp->dma_buf_pool_p = dma_buf_p; 2314 2315 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2316 dma_cntl_poolp->buf_allocated = B_TRUE; 2317 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2318 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2319 2320 /* Allocate the receive rings, too. */ 2321 nxgep->rx_rbr_rings = 2322 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2323 nxgep->rx_rbr_rings->rbr_rings = 2324 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2325 nxgep->rx_rcr_rings = 2326 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2327 nxgep->rx_rcr_rings->rcr_rings = 2328 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2329 nxgep->rx_mbox_areas_p = 2330 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2331 nxgep->rx_mbox_areas_p->rxmbox_areas = 2332 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2333 2334 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2335 p_cfgp->max_rdcs; 2336 2337 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2338 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2339 2340 nxge_alloc_rx_mem_pool_exit: 2341 return (status); 2342 } 2343 2344 /* 2345 * nxge_alloc_rxb 2346 * 2347 * Allocate buffers for an RDC. 2348 * 2349 * Arguments: 2350 * nxgep 2351 * channel The channel to map into our kernel space. 2352 * 2353 * Notes: 2354 * 2355 * NPI function calls: 2356 * 2357 * NXGE function calls: 2358 * 2359 * Registers accessed: 2360 * 2361 * Context: 2362 * 2363 * Taking apart: 2364 * 2365 * Open questions: 2366 * 2367 */ 2368 nxge_status_t 2369 nxge_alloc_rxb( 2370 p_nxge_t nxgep, 2371 int channel) 2372 { 2373 size_t rx_buf_alloc_size; 2374 nxge_status_t status = NXGE_OK; 2375 2376 nxge_dma_common_t **data; 2377 nxge_dma_common_t **control; 2378 uint32_t *num_chunks; 2379 2380 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2381 2382 /* 2383 * Allocate memory for the receive buffers and descriptor rings. 2384 * Replace these allocation functions with the interface functions 2385 * provided by the partition manager if/when they are available. 2386 */ 2387 2388 /* 2389 * Allocate memory for the receive buffer blocks. 2390 */ 2391 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2392 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2393 2394 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2395 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2396 2397 if ((status = nxge_alloc_rx_buf_dma( 2398 nxgep, channel, data, rx_buf_alloc_size, 2399 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2400 return (status); 2401 } 2402 2403 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2404 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2405 2406 /* 2407 * Allocate memory for descriptor rings and mailbox. 2408 */ 2409 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2410 2411 if ((status = nxge_alloc_rx_cntl_dma( 2412 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2413 != NXGE_OK) { 2414 nxge_free_rx_cntl_dma(nxgep, *control); 2415 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2416 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2417 return (status); 2418 } 2419 2420 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2421 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2422 2423 return (status); 2424 } 2425 2426 void 2427 nxge_free_rxb( 2428 p_nxge_t nxgep, 2429 int channel) 2430 { 2431 nxge_dma_common_t *data; 2432 nxge_dma_common_t *control; 2433 uint32_t num_chunks; 2434 2435 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2436 2437 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2438 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2439 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2440 2441 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2442 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2443 2444 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2445 nxge_free_rx_cntl_dma(nxgep, control); 2446 2447 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2448 2449 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2450 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2451 2452 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2453 } 2454 2455 static void 2456 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2457 { 2458 int rdc_max = NXGE_MAX_RDCS; 2459 2460 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2461 2462 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2463 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2464 "<== nxge_free_rx_mem_pool " 2465 "(null rx buf pool or buf not allocated")); 2466 return; 2467 } 2468 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2469 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2470 "<== nxge_free_rx_mem_pool " 2471 "(null rx cntl buf pool or cntl buf not allocated")); 2472 return; 2473 } 2474 2475 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2476 sizeof (p_nxge_dma_common_t) * rdc_max); 2477 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2478 2479 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2480 sizeof (uint32_t) * rdc_max); 2481 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2482 sizeof (p_nxge_dma_common_t) * rdc_max); 2483 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2484 2485 nxgep->rx_buf_pool_p = 0; 2486 nxgep->rx_cntl_pool_p = 0; 2487 2488 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2489 sizeof (p_rx_rbr_ring_t) * rdc_max); 2490 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2491 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2492 sizeof (p_rx_rcr_ring_t) * rdc_max); 2493 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2494 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2495 sizeof (p_rx_mbox_t) * rdc_max); 2496 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2497 2498 nxgep->rx_rbr_rings = 0; 2499 nxgep->rx_rcr_rings = 0; 2500 nxgep->rx_mbox_areas_p = 0; 2501 2502 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2503 } 2504 2505 2506 static nxge_status_t 2507 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2508 p_nxge_dma_common_t *dmap, 2509 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2510 { 2511 p_nxge_dma_common_t rx_dmap; 2512 nxge_status_t status = NXGE_OK; 2513 size_t total_alloc_size; 2514 size_t allocated = 0; 2515 int i, size_index, array_size; 2516 boolean_t use_kmem_alloc = B_FALSE; 2517 2518 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2519 2520 rx_dmap = (p_nxge_dma_common_t) 2521 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2522 KM_SLEEP); 2523 2524 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2525 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2526 dma_channel, alloc_size, block_size, dmap)); 2527 2528 total_alloc_size = alloc_size; 2529 2530 #if defined(RX_USE_RECLAIM_POST) 2531 total_alloc_size = alloc_size + alloc_size/4; 2532 #endif 2533 2534 i = 0; 2535 size_index = 0; 2536 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2537 while ((size_index < array_size) && 2538 (alloc_sizes[size_index] < alloc_size)) 2539 size_index++; 2540 if (size_index >= array_size) { 2541 size_index = array_size - 1; 2542 } 2543 2544 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2545 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2546 use_kmem_alloc = B_TRUE; 2547 #if defined(__i386) || defined(__amd64) 2548 size_index = 0; 2549 #endif 2550 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2551 "==> nxge_alloc_rx_buf_dma: " 2552 "Neptune use kmem_alloc() - size_index %d", 2553 size_index)); 2554 } 2555 2556 while ((allocated < total_alloc_size) && 2557 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2558 rx_dmap[i].dma_chunk_index = i; 2559 rx_dmap[i].block_size = block_size; 2560 rx_dmap[i].alength = alloc_sizes[size_index]; 2561 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2562 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2563 rx_dmap[i].dma_channel = dma_channel; 2564 rx_dmap[i].contig_alloc_type = B_FALSE; 2565 rx_dmap[i].kmem_alloc_type = B_FALSE; 2566 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2567 2568 /* 2569 * N2/NIU: data buffers must be contiguous as the driver 2570 * needs to call Hypervisor api to set up 2571 * logical pages. 2572 */ 2573 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2574 rx_dmap[i].contig_alloc_type = B_TRUE; 2575 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2576 } else if (use_kmem_alloc) { 2577 /* For Neptune, use kmem_alloc */ 2578 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2579 "==> nxge_alloc_rx_buf_dma: " 2580 "Neptune use kmem_alloc()")); 2581 rx_dmap[i].kmem_alloc_type = B_TRUE; 2582 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2583 } 2584 2585 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2586 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2587 "i %d nblocks %d alength %d", 2588 dma_channel, i, &rx_dmap[i], block_size, 2589 i, rx_dmap[i].nblocks, 2590 rx_dmap[i].alength)); 2591 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2592 &nxge_rx_dma_attr, 2593 rx_dmap[i].alength, 2594 &nxge_dev_buf_dma_acc_attr, 2595 DDI_DMA_READ | DDI_DMA_STREAMING, 2596 (p_nxge_dma_common_t)(&rx_dmap[i])); 2597 if (status != NXGE_OK) { 2598 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2599 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2600 "dma %d size_index %d size requested %d", 2601 dma_channel, 2602 size_index, 2603 rx_dmap[i].alength)); 2604 size_index--; 2605 } else { 2606 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2607 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2608 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2609 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2610 "buf_alloc_state %d alloc_type %d", 2611 dma_channel, 2612 &rx_dmap[i], 2613 rx_dmap[i].kaddrp, 2614 rx_dmap[i].alength, 2615 rx_dmap[i].buf_alloc_state, 2616 rx_dmap[i].buf_alloc_type)); 2617 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2618 " alloc_rx_buf_dma allocated rdc %d " 2619 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2620 dma_channel, i, rx_dmap[i].alength, 2621 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2622 rx_dmap[i].kaddrp)); 2623 i++; 2624 allocated += alloc_sizes[size_index]; 2625 } 2626 } 2627 2628 if (allocated < total_alloc_size) { 2629 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2630 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2631 "allocated 0x%x requested 0x%x", 2632 dma_channel, 2633 allocated, total_alloc_size)); 2634 status = NXGE_ERROR; 2635 goto nxge_alloc_rx_mem_fail1; 2636 } 2637 2638 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2639 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2640 "allocated 0x%x requested 0x%x", 2641 dma_channel, 2642 allocated, total_alloc_size)); 2643 2644 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2645 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2646 dma_channel, i)); 2647 *num_chunks = i; 2648 *dmap = rx_dmap; 2649 2650 goto nxge_alloc_rx_mem_exit; 2651 2652 nxge_alloc_rx_mem_fail1: 2653 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2654 2655 nxge_alloc_rx_mem_exit: 2656 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2657 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2658 2659 return (status); 2660 } 2661 2662 /*ARGSUSED*/ 2663 static void 2664 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2665 uint32_t num_chunks) 2666 { 2667 int i; 2668 2669 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2670 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2671 2672 if (dmap == 0) 2673 return; 2674 2675 for (i = 0; i < num_chunks; i++) { 2676 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2677 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2678 i, dmap)); 2679 nxge_dma_free_rx_data_buf(dmap++); 2680 } 2681 2682 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2683 } 2684 2685 /*ARGSUSED*/ 2686 static nxge_status_t 2687 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2688 p_nxge_dma_common_t *dmap, size_t size) 2689 { 2690 p_nxge_dma_common_t rx_dmap; 2691 nxge_status_t status = NXGE_OK; 2692 2693 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2694 2695 rx_dmap = (p_nxge_dma_common_t) 2696 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2697 2698 rx_dmap->contig_alloc_type = B_FALSE; 2699 rx_dmap->kmem_alloc_type = B_FALSE; 2700 2701 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2702 &nxge_desc_dma_attr, 2703 size, 2704 &nxge_dev_desc_dma_acc_attr, 2705 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2706 rx_dmap); 2707 if (status != NXGE_OK) { 2708 goto nxge_alloc_rx_cntl_dma_fail1; 2709 } 2710 2711 *dmap = rx_dmap; 2712 goto nxge_alloc_rx_cntl_dma_exit; 2713 2714 nxge_alloc_rx_cntl_dma_fail1: 2715 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2716 2717 nxge_alloc_rx_cntl_dma_exit: 2718 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2719 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2720 2721 return (status); 2722 } 2723 2724 /*ARGSUSED*/ 2725 static void 2726 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2727 { 2728 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2729 2730 if (dmap == 0) 2731 return; 2732 2733 nxge_dma_mem_free(dmap); 2734 2735 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2736 } 2737 2738 typedef struct { 2739 size_t tx_size; 2740 size_t cr_size; 2741 size_t threshhold; 2742 } nxge_tdc_sizes_t; 2743 2744 static 2745 nxge_status_t 2746 nxge_tdc_sizes( 2747 nxge_t *nxgep, 2748 nxge_tdc_sizes_t *sizes) 2749 { 2750 uint32_t threshhold; /* The bcopy() threshhold */ 2751 size_t tx_size; /* Transmit buffer size */ 2752 size_t cr_size; /* Completion ring size */ 2753 2754 /* 2755 * Assume that each DMA channel will be configured with the 2756 * default transmit buffer size for copying transmit data. 2757 * (If a packet is bigger than this, it will not be copied.) 2758 */ 2759 if (nxgep->niu_type == N2_NIU) { 2760 threshhold = TX_BCOPY_SIZE; 2761 } else { 2762 threshhold = nxge_bcopy_thresh; 2763 } 2764 tx_size = nxge_tx_ring_size * threshhold; 2765 2766 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2767 cr_size += sizeof (txdma_mailbox_t); 2768 2769 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2770 if (nxgep->niu_type == N2_NIU) { 2771 if (!ISP2(tx_size)) { 2772 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2773 "==> nxge_tdc_sizes: Tx size" 2774 " must be power of 2")); 2775 return (NXGE_ERROR); 2776 } 2777 2778 if (tx_size > (1 << 22)) { 2779 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2780 "==> nxge_tdc_sizes: Tx size" 2781 " limited to 4M")); 2782 return (NXGE_ERROR); 2783 } 2784 2785 if (cr_size < 0x2000) 2786 cr_size = 0x2000; 2787 } 2788 #endif 2789 2790 sizes->threshhold = threshhold; 2791 sizes->tx_size = tx_size; 2792 sizes->cr_size = cr_size; 2793 2794 return (NXGE_OK); 2795 } 2796 /* 2797 * nxge_alloc_txb 2798 * 2799 * Allocate buffers for an TDC. 2800 * 2801 * Arguments: 2802 * nxgep 2803 * channel The channel to map into our kernel space. 2804 * 2805 * Notes: 2806 * 2807 * NPI function calls: 2808 * 2809 * NXGE function calls: 2810 * 2811 * Registers accessed: 2812 * 2813 * Context: 2814 * 2815 * Taking apart: 2816 * 2817 * Open questions: 2818 * 2819 */ 2820 nxge_status_t 2821 nxge_alloc_txb( 2822 p_nxge_t nxgep, 2823 int channel) 2824 { 2825 nxge_dma_common_t **dma_buf_p; 2826 nxge_dma_common_t **dma_cntl_p; 2827 uint32_t *num_chunks; 2828 nxge_status_t status = NXGE_OK; 2829 2830 nxge_tdc_sizes_t sizes; 2831 2832 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2833 2834 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2835 return (NXGE_ERROR); 2836 2837 /* 2838 * Allocate memory for transmit buffers and descriptor rings. 2839 * Replace these allocation functions with the interface functions 2840 * provided by the partition manager Real Soon Now. 2841 */ 2842 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2843 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2844 2845 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2846 2847 /* 2848 * Allocate memory for transmit buffers and descriptor rings. 2849 * Replace allocation functions with interface functions provided 2850 * by the partition manager when it is available. 2851 * 2852 * Allocate memory for the transmit buffer pool. 2853 */ 2854 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2855 "sizes: tx: %ld, cr:%ld, th:%ld", 2856 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2857 2858 *num_chunks = 0; 2859 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2860 sizes.tx_size, sizes.threshhold, num_chunks); 2861 if (status != NXGE_OK) { 2862 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2863 return (status); 2864 } 2865 2866 /* 2867 * Allocate memory for descriptor rings and mailbox. 2868 */ 2869 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2870 sizes.cr_size); 2871 if (status != NXGE_OK) { 2872 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2873 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2874 return (status); 2875 } 2876 2877 return (NXGE_OK); 2878 } 2879 2880 void 2881 nxge_free_txb( 2882 p_nxge_t nxgep, 2883 int channel) 2884 { 2885 nxge_dma_common_t *data; 2886 nxge_dma_common_t *control; 2887 uint32_t num_chunks; 2888 2889 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2890 2891 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2892 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2893 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2894 2895 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2896 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2897 2898 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2899 nxge_free_tx_cntl_dma(nxgep, control); 2900 2901 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2902 2903 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2904 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2905 2906 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2907 } 2908 2909 /* 2910 * nxge_alloc_tx_mem_pool 2911 * 2912 * This function allocates all of the per-port TDC control data structures. 2913 * The per-channel (TDC) data structures are allocated when needed. 2914 * 2915 * Arguments: 2916 * nxgep 2917 * 2918 * Notes: 2919 * 2920 * Context: 2921 * Any domain 2922 */ 2923 nxge_status_t 2924 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2925 { 2926 nxge_hw_pt_cfg_t *p_cfgp; 2927 nxge_dma_pool_t *dma_poolp; 2928 nxge_dma_common_t **dma_buf_p; 2929 nxge_dma_pool_t *dma_cntl_poolp; 2930 nxge_dma_common_t **dma_cntl_p; 2931 uint32_t *num_chunks; /* per dma */ 2932 int tdc_max; 2933 2934 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2935 2936 p_cfgp = &nxgep->pt_config.hw_config; 2937 tdc_max = NXGE_MAX_TDCS; 2938 2939 /* 2940 * Allocate memory for each transmit DMA channel. 2941 */ 2942 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2943 KM_SLEEP); 2944 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2945 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2946 2947 dma_cntl_poolp = (p_nxge_dma_pool_t) 2948 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2949 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2950 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2951 2952 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2953 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2954 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2955 "set to default %d", 2956 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2957 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2958 } 2959 2960 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2961 /* 2962 * N2/NIU has limitation on the descriptor sizes (contiguous 2963 * memory allocation on data buffers to 4M (contig_mem_alloc) 2964 * and little endian for control buffers (must use the ddi/dki mem alloc 2965 * function). The transmit ring is limited to 8K (includes the 2966 * mailbox). 2967 */ 2968 if (nxgep->niu_type == N2_NIU) { 2969 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2970 (!ISP2(nxge_tx_ring_size))) { 2971 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2972 } 2973 } 2974 #endif 2975 2976 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2977 2978 num_chunks = (uint32_t *)KMEM_ZALLOC( 2979 sizeof (uint32_t) * tdc_max, KM_SLEEP); 2980 2981 dma_poolp->ndmas = p_cfgp->tdc.owned; 2982 dma_poolp->num_chunks = num_chunks; 2983 dma_poolp->dma_buf_pool_p = dma_buf_p; 2984 nxgep->tx_buf_pool_p = dma_poolp; 2985 2986 dma_poolp->buf_allocated = B_TRUE; 2987 2988 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 2989 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2990 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2991 2992 dma_cntl_poolp->buf_allocated = B_TRUE; 2993 2994 nxgep->tx_rings = 2995 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 2996 nxgep->tx_rings->rings = 2997 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 2998 nxgep->tx_mbox_areas_p = 2999 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 3000 nxgep->tx_mbox_areas_p->txmbox_areas_p = 3001 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 3002 3003 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 3004 3005 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3006 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 3007 tdc_max, dma_poolp->ndmas)); 3008 3009 return (NXGE_OK); 3010 } 3011 3012 nxge_status_t 3013 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 3014 p_nxge_dma_common_t *dmap, size_t alloc_size, 3015 size_t block_size, uint32_t *num_chunks) 3016 { 3017 p_nxge_dma_common_t tx_dmap; 3018 nxge_status_t status = NXGE_OK; 3019 size_t total_alloc_size; 3020 size_t allocated = 0; 3021 int i, size_index, array_size; 3022 3023 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 3024 3025 tx_dmap = (p_nxge_dma_common_t) 3026 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 3027 KM_SLEEP); 3028 3029 total_alloc_size = alloc_size; 3030 i = 0; 3031 size_index = 0; 3032 array_size = sizeof (alloc_sizes) / sizeof (size_t); 3033 while ((size_index < array_size) && 3034 (alloc_sizes[size_index] < alloc_size)) 3035 size_index++; 3036 if (size_index >= array_size) { 3037 size_index = array_size - 1; 3038 } 3039 3040 while ((allocated < total_alloc_size) && 3041 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 3042 3043 tx_dmap[i].dma_chunk_index = i; 3044 tx_dmap[i].block_size = block_size; 3045 tx_dmap[i].alength = alloc_sizes[size_index]; 3046 tx_dmap[i].orig_alength = tx_dmap[i].alength; 3047 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 3048 tx_dmap[i].dma_channel = dma_channel; 3049 tx_dmap[i].contig_alloc_type = B_FALSE; 3050 tx_dmap[i].kmem_alloc_type = B_FALSE; 3051 3052 /* 3053 * N2/NIU: data buffers must be contiguous as the driver 3054 * needs to call Hypervisor api to set up 3055 * logical pages. 3056 */ 3057 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 3058 tx_dmap[i].contig_alloc_type = B_TRUE; 3059 } 3060 3061 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3062 &nxge_tx_dma_attr, 3063 tx_dmap[i].alength, 3064 &nxge_dev_buf_dma_acc_attr, 3065 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3066 (p_nxge_dma_common_t)(&tx_dmap[i])); 3067 if (status != NXGE_OK) { 3068 size_index--; 3069 } else { 3070 i++; 3071 allocated += alloc_sizes[size_index]; 3072 } 3073 } 3074 3075 if (allocated < total_alloc_size) { 3076 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3077 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3078 "allocated 0x%x requested 0x%x", 3079 dma_channel, 3080 allocated, total_alloc_size)); 3081 status = NXGE_ERROR; 3082 goto nxge_alloc_tx_mem_fail1; 3083 } 3084 3085 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3086 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3087 "allocated 0x%x requested 0x%x", 3088 dma_channel, 3089 allocated, total_alloc_size)); 3090 3091 *num_chunks = i; 3092 *dmap = tx_dmap; 3093 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3094 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3095 *dmap, i)); 3096 goto nxge_alloc_tx_mem_exit; 3097 3098 nxge_alloc_tx_mem_fail1: 3099 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3100 3101 nxge_alloc_tx_mem_exit: 3102 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3103 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3104 3105 return (status); 3106 } 3107 3108 /*ARGSUSED*/ 3109 static void 3110 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3111 uint32_t num_chunks) 3112 { 3113 int i; 3114 3115 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3116 3117 if (dmap == 0) 3118 return; 3119 3120 for (i = 0; i < num_chunks; i++) { 3121 nxge_dma_mem_free(dmap++); 3122 } 3123 3124 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3125 } 3126 3127 /*ARGSUSED*/ 3128 nxge_status_t 3129 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3130 p_nxge_dma_common_t *dmap, size_t size) 3131 { 3132 p_nxge_dma_common_t tx_dmap; 3133 nxge_status_t status = NXGE_OK; 3134 3135 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3136 tx_dmap = (p_nxge_dma_common_t) 3137 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3138 3139 tx_dmap->contig_alloc_type = B_FALSE; 3140 tx_dmap->kmem_alloc_type = B_FALSE; 3141 3142 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3143 &nxge_desc_dma_attr, 3144 size, 3145 &nxge_dev_desc_dma_acc_attr, 3146 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3147 tx_dmap); 3148 if (status != NXGE_OK) { 3149 goto nxge_alloc_tx_cntl_dma_fail1; 3150 } 3151 3152 *dmap = tx_dmap; 3153 goto nxge_alloc_tx_cntl_dma_exit; 3154 3155 nxge_alloc_tx_cntl_dma_fail1: 3156 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3157 3158 nxge_alloc_tx_cntl_dma_exit: 3159 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3160 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3161 3162 return (status); 3163 } 3164 3165 /*ARGSUSED*/ 3166 static void 3167 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3168 { 3169 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3170 3171 if (dmap == 0) 3172 return; 3173 3174 nxge_dma_mem_free(dmap); 3175 3176 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3177 } 3178 3179 /* 3180 * nxge_free_tx_mem_pool 3181 * 3182 * This function frees all of the per-port TDC control data structures. 3183 * The per-channel (TDC) data structures are freed when the channel 3184 * is stopped. 3185 * 3186 * Arguments: 3187 * nxgep 3188 * 3189 * Notes: 3190 * 3191 * Context: 3192 * Any domain 3193 */ 3194 static void 3195 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3196 { 3197 int tdc_max = NXGE_MAX_TDCS; 3198 3199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3200 3201 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3202 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3203 "<== nxge_free_tx_mem_pool " 3204 "(null tx buf pool or buf not allocated")); 3205 return; 3206 } 3207 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3208 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3209 "<== nxge_free_tx_mem_pool " 3210 "(null tx cntl buf pool or cntl buf not allocated")); 3211 return; 3212 } 3213 3214 /* 1. Free the mailboxes. */ 3215 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3216 sizeof (p_tx_mbox_t) * tdc_max); 3217 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3218 3219 nxgep->tx_mbox_areas_p = 0; 3220 3221 /* 2. Free the transmit ring arrays. */ 3222 KMEM_FREE(nxgep->tx_rings->rings, 3223 sizeof (p_tx_ring_t) * tdc_max); 3224 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3225 3226 nxgep->tx_rings = 0; 3227 3228 /* 3. Free the completion ring data structures. */ 3229 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3230 sizeof (p_nxge_dma_common_t) * tdc_max); 3231 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3232 3233 nxgep->tx_cntl_pool_p = 0; 3234 3235 /* 4. Free the data ring data structures. */ 3236 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3237 sizeof (uint32_t) * tdc_max); 3238 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3239 sizeof (p_nxge_dma_common_t) * tdc_max); 3240 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3241 3242 nxgep->tx_buf_pool_p = 0; 3243 3244 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3245 } 3246 3247 /*ARGSUSED*/ 3248 static nxge_status_t 3249 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3250 struct ddi_dma_attr *dma_attrp, 3251 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3252 p_nxge_dma_common_t dma_p) 3253 { 3254 caddr_t kaddrp; 3255 int ddi_status = DDI_SUCCESS; 3256 boolean_t contig_alloc_type; 3257 boolean_t kmem_alloc_type; 3258 3259 contig_alloc_type = dma_p->contig_alloc_type; 3260 3261 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3262 /* 3263 * contig_alloc_type for contiguous memory only allowed 3264 * for N2/NIU. 3265 */ 3266 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3267 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3268 dma_p->contig_alloc_type)); 3269 return (NXGE_ERROR | NXGE_DDI_FAILED); 3270 } 3271 3272 dma_p->dma_handle = NULL; 3273 dma_p->acc_handle = NULL; 3274 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3275 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3276 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3277 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3278 if (ddi_status != DDI_SUCCESS) { 3279 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3280 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3281 return (NXGE_ERROR | NXGE_DDI_FAILED); 3282 } 3283 3284 kmem_alloc_type = dma_p->kmem_alloc_type; 3285 3286 switch (contig_alloc_type) { 3287 case B_FALSE: 3288 switch (kmem_alloc_type) { 3289 case B_FALSE: 3290 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3291 length, 3292 acc_attr_p, 3293 xfer_flags, 3294 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3295 &dma_p->acc_handle); 3296 if (ddi_status != DDI_SUCCESS) { 3297 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3298 "nxge_dma_mem_alloc: " 3299 "ddi_dma_mem_alloc failed")); 3300 ddi_dma_free_handle(&dma_p->dma_handle); 3301 dma_p->dma_handle = NULL; 3302 return (NXGE_ERROR | NXGE_DDI_FAILED); 3303 } 3304 if (dma_p->alength < length) { 3305 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3306 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3307 "< length.")); 3308 ddi_dma_mem_free(&dma_p->acc_handle); 3309 ddi_dma_free_handle(&dma_p->dma_handle); 3310 dma_p->acc_handle = NULL; 3311 dma_p->dma_handle = NULL; 3312 return (NXGE_ERROR); 3313 } 3314 3315 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3316 NULL, 3317 kaddrp, dma_p->alength, xfer_flags, 3318 DDI_DMA_DONTWAIT, 3319 0, &dma_p->dma_cookie, &dma_p->ncookies); 3320 if (ddi_status != DDI_DMA_MAPPED) { 3321 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3322 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3323 "failed " 3324 "(staus 0x%x ncookies %d.)", ddi_status, 3325 dma_p->ncookies)); 3326 if (dma_p->acc_handle) { 3327 ddi_dma_mem_free(&dma_p->acc_handle); 3328 dma_p->acc_handle = NULL; 3329 } 3330 ddi_dma_free_handle(&dma_p->dma_handle); 3331 dma_p->dma_handle = NULL; 3332 return (NXGE_ERROR | NXGE_DDI_FAILED); 3333 } 3334 3335 if (dma_p->ncookies != 1) { 3336 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3337 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3338 "> 1 cookie" 3339 "(staus 0x%x ncookies %d.)", ddi_status, 3340 dma_p->ncookies)); 3341 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3342 if (dma_p->acc_handle) { 3343 ddi_dma_mem_free(&dma_p->acc_handle); 3344 dma_p->acc_handle = NULL; 3345 } 3346 ddi_dma_free_handle(&dma_p->dma_handle); 3347 dma_p->dma_handle = NULL; 3348 dma_p->acc_handle = NULL; 3349 return (NXGE_ERROR); 3350 } 3351 break; 3352 3353 case B_TRUE: 3354 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3355 if (kaddrp == NULL) { 3356 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3357 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3358 "kmem alloc failed")); 3359 return (NXGE_ERROR); 3360 } 3361 3362 dma_p->alength = length; 3363 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3364 NULL, kaddrp, dma_p->alength, xfer_flags, 3365 DDI_DMA_DONTWAIT, 0, 3366 &dma_p->dma_cookie, &dma_p->ncookies); 3367 if (ddi_status != DDI_DMA_MAPPED) { 3368 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3369 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3370 "(kmem_alloc) failed kaddrp $%p length %d " 3371 "(staus 0x%x (%d) ncookies %d.)", 3372 kaddrp, length, 3373 ddi_status, ddi_status, dma_p->ncookies)); 3374 KMEM_FREE(kaddrp, length); 3375 dma_p->acc_handle = NULL; 3376 ddi_dma_free_handle(&dma_p->dma_handle); 3377 dma_p->dma_handle = NULL; 3378 dma_p->kaddrp = NULL; 3379 return (NXGE_ERROR | NXGE_DDI_FAILED); 3380 } 3381 3382 if (dma_p->ncookies != 1) { 3383 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3384 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3385 "(kmem_alloc) > 1 cookie" 3386 "(staus 0x%x ncookies %d.)", ddi_status, 3387 dma_p->ncookies)); 3388 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3389 KMEM_FREE(kaddrp, length); 3390 ddi_dma_free_handle(&dma_p->dma_handle); 3391 dma_p->dma_handle = NULL; 3392 dma_p->acc_handle = NULL; 3393 dma_p->kaddrp = NULL; 3394 return (NXGE_ERROR); 3395 } 3396 3397 dma_p->kaddrp = kaddrp; 3398 3399 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3400 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3401 "kaddr $%p alength %d", 3402 dma_p, 3403 kaddrp, 3404 dma_p->alength)); 3405 break; 3406 } 3407 break; 3408 3409 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3410 case B_TRUE: 3411 kaddrp = (caddr_t)contig_mem_alloc(length); 3412 if (kaddrp == NULL) { 3413 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3414 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3415 ddi_dma_free_handle(&dma_p->dma_handle); 3416 return (NXGE_ERROR | NXGE_DDI_FAILED); 3417 } 3418 3419 dma_p->alength = length; 3420 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3421 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3422 &dma_p->dma_cookie, &dma_p->ncookies); 3423 if (ddi_status != DDI_DMA_MAPPED) { 3424 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3425 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3426 "(status 0x%x ncookies %d.)", ddi_status, 3427 dma_p->ncookies)); 3428 3429 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3430 "==> nxge_dma_mem_alloc: (not mapped)" 3431 "length %lu (0x%x) " 3432 "free contig kaddrp $%p " 3433 "va_to_pa $%p", 3434 length, length, 3435 kaddrp, 3436 va_to_pa(kaddrp))); 3437 3438 3439 contig_mem_free((void *)kaddrp, length); 3440 ddi_dma_free_handle(&dma_p->dma_handle); 3441 3442 dma_p->dma_handle = NULL; 3443 dma_p->acc_handle = NULL; 3444 dma_p->alength = NULL; 3445 dma_p->kaddrp = NULL; 3446 3447 return (NXGE_ERROR | NXGE_DDI_FAILED); 3448 } 3449 3450 if (dma_p->ncookies != 1 || 3451 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3452 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3453 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3454 "cookie or " 3455 "dmac_laddress is NULL $%p size %d " 3456 " (status 0x%x ncookies %d.)", 3457 ddi_status, 3458 dma_p->dma_cookie.dmac_laddress, 3459 dma_p->dma_cookie.dmac_size, 3460 dma_p->ncookies)); 3461 3462 contig_mem_free((void *)kaddrp, length); 3463 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3464 ddi_dma_free_handle(&dma_p->dma_handle); 3465 3466 dma_p->alength = 0; 3467 dma_p->dma_handle = NULL; 3468 dma_p->acc_handle = NULL; 3469 dma_p->kaddrp = NULL; 3470 3471 return (NXGE_ERROR | NXGE_DDI_FAILED); 3472 } 3473 break; 3474 3475 #else 3476 case B_TRUE: 3477 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3478 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3479 return (NXGE_ERROR | NXGE_DDI_FAILED); 3480 #endif 3481 } 3482 3483 dma_p->kaddrp = kaddrp; 3484 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3485 dma_p->alength - RXBUF_64B_ALIGNED; 3486 #if defined(__i386) 3487 dma_p->ioaddr_pp = 3488 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3489 #else 3490 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3491 #endif 3492 dma_p->last_ioaddr_pp = 3493 #if defined(__i386) 3494 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3495 #else 3496 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3497 #endif 3498 dma_p->alength - RXBUF_64B_ALIGNED; 3499 3500 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3501 3502 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3503 dma_p->orig_ioaddr_pp = 3504 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3505 dma_p->orig_alength = length; 3506 dma_p->orig_kaddrp = kaddrp; 3507 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3508 #endif 3509 3510 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3511 "dma buffer allocated: dma_p $%p " 3512 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3513 "dma_p->ioaddr_p $%p " 3514 "dma_p->orig_ioaddr_p $%p " 3515 "orig_vatopa $%p " 3516 "alength %d (0x%x) " 3517 "kaddrp $%p " 3518 "length %d (0x%x)", 3519 dma_p, 3520 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3521 dma_p->ioaddr_pp, 3522 dma_p->orig_ioaddr_pp, 3523 dma_p->orig_vatopa, 3524 dma_p->alength, dma_p->alength, 3525 kaddrp, 3526 length, length)); 3527 3528 return (NXGE_OK); 3529 } 3530 3531 static void 3532 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3533 { 3534 if (dma_p->dma_handle != NULL) { 3535 if (dma_p->ncookies) { 3536 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3537 dma_p->ncookies = 0; 3538 } 3539 ddi_dma_free_handle(&dma_p->dma_handle); 3540 dma_p->dma_handle = NULL; 3541 } 3542 3543 if (dma_p->acc_handle != NULL) { 3544 ddi_dma_mem_free(&dma_p->acc_handle); 3545 dma_p->acc_handle = NULL; 3546 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3547 } 3548 3549 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3550 if (dma_p->contig_alloc_type && 3551 dma_p->orig_kaddrp && dma_p->orig_alength) { 3552 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3553 "kaddrp $%p (orig_kaddrp $%p)" 3554 "mem type %d ", 3555 "orig_alength %d " 3556 "alength 0x%x (%d)", 3557 dma_p->kaddrp, 3558 dma_p->orig_kaddrp, 3559 dma_p->contig_alloc_type, 3560 dma_p->orig_alength, 3561 dma_p->alength, dma_p->alength)); 3562 3563 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3564 dma_p->orig_alength = NULL; 3565 dma_p->orig_kaddrp = NULL; 3566 dma_p->contig_alloc_type = B_FALSE; 3567 } 3568 #endif 3569 dma_p->kaddrp = NULL; 3570 dma_p->alength = NULL; 3571 } 3572 3573 static void 3574 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3575 { 3576 uint64_t kaddr; 3577 uint32_t buf_size; 3578 3579 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3580 3581 if (dma_p->dma_handle != NULL) { 3582 if (dma_p->ncookies) { 3583 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3584 dma_p->ncookies = 0; 3585 } 3586 ddi_dma_free_handle(&dma_p->dma_handle); 3587 dma_p->dma_handle = NULL; 3588 } 3589 3590 if (dma_p->acc_handle != NULL) { 3591 ddi_dma_mem_free(&dma_p->acc_handle); 3592 dma_p->acc_handle = NULL; 3593 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3594 } 3595 3596 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3597 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3598 dma_p, 3599 dma_p->buf_alloc_state)); 3600 3601 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3602 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3603 "<== nxge_dma_free_rx_data_buf: " 3604 "outstanding data buffers")); 3605 return; 3606 } 3607 3608 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3609 if (dma_p->contig_alloc_type && 3610 dma_p->orig_kaddrp && dma_p->orig_alength) { 3611 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3612 "kaddrp $%p (orig_kaddrp $%p)" 3613 "mem type %d ", 3614 "orig_alength %d " 3615 "alength 0x%x (%d)", 3616 dma_p->kaddrp, 3617 dma_p->orig_kaddrp, 3618 dma_p->contig_alloc_type, 3619 dma_p->orig_alength, 3620 dma_p->alength, dma_p->alength)); 3621 3622 kaddr = (uint64_t)dma_p->orig_kaddrp; 3623 buf_size = dma_p->orig_alength; 3624 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3625 dma_p->orig_alength = NULL; 3626 dma_p->orig_kaddrp = NULL; 3627 dma_p->contig_alloc_type = B_FALSE; 3628 dma_p->kaddrp = NULL; 3629 dma_p->alength = NULL; 3630 return; 3631 } 3632 #endif 3633 3634 if (dma_p->kmem_alloc_type) { 3635 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3636 "nxge_dma_free_rx_data_buf: free kmem " 3637 "kaddrp $%p (orig_kaddrp $%p)" 3638 "alloc type %d " 3639 "orig_alength %d " 3640 "alength 0x%x (%d)", 3641 dma_p->kaddrp, 3642 dma_p->orig_kaddrp, 3643 dma_p->kmem_alloc_type, 3644 dma_p->orig_alength, 3645 dma_p->alength, dma_p->alength)); 3646 #if defined(__i386) 3647 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3648 #else 3649 kaddr = (uint64_t)dma_p->kaddrp; 3650 #endif 3651 buf_size = dma_p->orig_alength; 3652 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3653 "nxge_dma_free_rx_data_buf: free dmap $%p " 3654 "kaddr $%p buf_size %d", 3655 dma_p, 3656 kaddr, buf_size)); 3657 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3658 dma_p->alength = 0; 3659 dma_p->orig_alength = 0; 3660 dma_p->kaddrp = NULL; 3661 dma_p->kmem_alloc_type = B_FALSE; 3662 } 3663 3664 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3665 } 3666 3667 /* 3668 * nxge_m_start() -- start transmitting and receiving. 3669 * 3670 * This function is called by the MAC layer when the first 3671 * stream is open to prepare the hardware ready for sending 3672 * and transmitting packets. 3673 */ 3674 static int 3675 nxge_m_start(void *arg) 3676 { 3677 p_nxge_t nxgep = (p_nxge_t)arg; 3678 3679 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3680 3681 /* 3682 * Are we already started? 3683 */ 3684 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 3685 return (0); 3686 } 3687 3688 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) { 3689 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 3690 } 3691 3692 /* 3693 * Make sure RX MAC is disabled while we initialize. 3694 */ 3695 if (!isLDOMguest(nxgep)) { 3696 (void) nxge_rx_mac_disable(nxgep); 3697 } 3698 3699 /* 3700 * Grab the global lock. 3701 */ 3702 MUTEX_ENTER(nxgep->genlock); 3703 3704 /* 3705 * Initialize the driver and hardware. 3706 */ 3707 if (nxge_init(nxgep) != NXGE_OK) { 3708 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3709 "<== nxge_m_start: initialization failed")); 3710 MUTEX_EXIT(nxgep->genlock); 3711 return (EIO); 3712 } 3713 3714 /* 3715 * Start timer to check the system error and tx hangs 3716 */ 3717 if (!isLDOMguest(nxgep)) 3718 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3719 nxge_check_hw_state, NXGE_CHECK_TIMER); 3720 #if defined(sun4v) 3721 else 3722 nxge_hio_start_timer(nxgep); 3723 #endif 3724 3725 nxgep->link_notify = B_TRUE; 3726 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3727 3728 /* 3729 * Let the global lock go, since we are intialized. 3730 */ 3731 MUTEX_EXIT(nxgep->genlock); 3732 3733 /* 3734 * Let the MAC start receiving packets, now that 3735 * we are initialized. 3736 */ 3737 if (!isLDOMguest(nxgep)) { 3738 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 3739 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3740 "<== nxge_m_start: enable of RX mac failed")); 3741 return (EIO); 3742 } 3743 3744 /* 3745 * Enable hardware interrupts. 3746 */ 3747 nxge_intr_hw_enable(nxgep); 3748 } 3749 #if defined(sun4v) 3750 else { 3751 /* 3752 * In guest domain we enable RDCs and their interrupts as 3753 * the last step. 3754 */ 3755 if (nxge_hio_rdc_enable(nxgep) != NXGE_OK) { 3756 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3757 "<== nxge_m_start: enable of RDCs failed")); 3758 return (EIO); 3759 } 3760 3761 if (nxge_hio_rdc_intr_arm(nxgep, B_TRUE) != NXGE_OK) { 3762 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3763 "<== nxge_m_start: intrs enable for RDCs failed")); 3764 return (EIO); 3765 } 3766 } 3767 #endif 3768 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3769 return (0); 3770 } 3771 3772 static boolean_t 3773 nxge_check_groups_stopped(p_nxge_t nxgep) 3774 { 3775 int i; 3776 3777 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 3778 if (nxgep->rx_hio_groups[i].started) 3779 return (B_FALSE); 3780 } 3781 3782 return (B_TRUE); 3783 } 3784 3785 /* 3786 * nxge_m_stop(): stop transmitting and receiving. 3787 */ 3788 static void 3789 nxge_m_stop(void *arg) 3790 { 3791 p_nxge_t nxgep = (p_nxge_t)arg; 3792 boolean_t groups_stopped; 3793 3794 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3795 3796 /* 3797 * Are the groups stopped? 3798 */ 3799 groups_stopped = nxge_check_groups_stopped(nxgep); 3800 ASSERT(groups_stopped == B_TRUE); 3801 if (!groups_stopped) { 3802 cmn_err(CE_WARN, "nxge(%d): groups are not stopped!\n", 3803 nxgep->instance); 3804 return; 3805 } 3806 3807 if (!isLDOMguest(nxgep)) { 3808 /* 3809 * Disable the RX mac. 3810 */ 3811 (void) nxge_rx_mac_disable(nxgep); 3812 3813 /* 3814 * Wait for the IPP to drain. 3815 */ 3816 (void) nxge_ipp_drain(nxgep); 3817 3818 /* 3819 * Disable hardware interrupts. 3820 */ 3821 nxge_intr_hw_disable(nxgep); 3822 } 3823 #if defined(sun4v) 3824 else { 3825 (void) nxge_hio_rdc_intr_arm(nxgep, B_FALSE); 3826 } 3827 #endif 3828 3829 /* 3830 * Grab the global lock. 3831 */ 3832 MUTEX_ENTER(nxgep->genlock); 3833 3834 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3835 if (nxgep->nxge_timerid) { 3836 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3837 nxgep->nxge_timerid = 0; 3838 } 3839 3840 /* 3841 * Clean up. 3842 */ 3843 nxge_uninit(nxgep); 3844 3845 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3846 3847 /* 3848 * Let go of the global lock. 3849 */ 3850 MUTEX_EXIT(nxgep->genlock); 3851 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3852 } 3853 3854 static int 3855 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3856 { 3857 p_nxge_t nxgep = (p_nxge_t)arg; 3858 struct ether_addr addrp; 3859 3860 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3861 "==> nxge_m_multicst: add %d", add)); 3862 3863 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3864 if (add) { 3865 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3866 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3867 "<== nxge_m_multicst: add multicast failed")); 3868 return (EINVAL); 3869 } 3870 } else { 3871 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3872 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3873 "<== nxge_m_multicst: del multicast failed")); 3874 return (EINVAL); 3875 } 3876 } 3877 3878 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3879 3880 return (0); 3881 } 3882 3883 static int 3884 nxge_m_promisc(void *arg, boolean_t on) 3885 { 3886 p_nxge_t nxgep = (p_nxge_t)arg; 3887 3888 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3889 "==> nxge_m_promisc: on %d", on)); 3890 3891 if (nxge_set_promisc(nxgep, on)) { 3892 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3893 "<== nxge_m_promisc: set promisc failed")); 3894 return (EINVAL); 3895 } 3896 3897 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3898 "<== nxge_m_promisc: on %d", on)); 3899 3900 return (0); 3901 } 3902 3903 static void 3904 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3905 { 3906 p_nxge_t nxgep = (p_nxge_t)arg; 3907 struct iocblk *iocp; 3908 boolean_t need_privilege; 3909 int err; 3910 int cmd; 3911 3912 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3913 3914 iocp = (struct iocblk *)mp->b_rptr; 3915 iocp->ioc_error = 0; 3916 need_privilege = B_TRUE; 3917 cmd = iocp->ioc_cmd; 3918 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3919 switch (cmd) { 3920 default: 3921 miocnak(wq, mp, 0, EINVAL); 3922 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3923 return; 3924 3925 case LB_GET_INFO_SIZE: 3926 case LB_GET_INFO: 3927 case LB_GET_MODE: 3928 need_privilege = B_FALSE; 3929 break; 3930 case LB_SET_MODE: 3931 break; 3932 3933 3934 case NXGE_GET_MII: 3935 case NXGE_PUT_MII: 3936 case NXGE_GET64: 3937 case NXGE_PUT64: 3938 case NXGE_GET_TX_RING_SZ: 3939 case NXGE_GET_TX_DESC: 3940 case NXGE_TX_SIDE_RESET: 3941 case NXGE_RX_SIDE_RESET: 3942 case NXGE_GLOBAL_RESET: 3943 case NXGE_RESET_MAC: 3944 case NXGE_TX_REGS_DUMP: 3945 case NXGE_RX_REGS_DUMP: 3946 case NXGE_INT_REGS_DUMP: 3947 case NXGE_VIR_INT_REGS_DUMP: 3948 case NXGE_PUT_TCAM: 3949 case NXGE_GET_TCAM: 3950 case NXGE_RTRACE: 3951 case NXGE_RDUMP: 3952 3953 need_privilege = B_FALSE; 3954 break; 3955 case NXGE_INJECT_ERR: 3956 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3957 nxge_err_inject(nxgep, wq, mp); 3958 break; 3959 } 3960 3961 if (need_privilege) { 3962 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3963 if (err != 0) { 3964 miocnak(wq, mp, 0, err); 3965 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3966 "<== nxge_m_ioctl: no priv")); 3967 return; 3968 } 3969 } 3970 3971 switch (cmd) { 3972 3973 case LB_GET_MODE: 3974 case LB_SET_MODE: 3975 case LB_GET_INFO_SIZE: 3976 case LB_GET_INFO: 3977 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3978 break; 3979 3980 case NXGE_GET_MII: 3981 case NXGE_PUT_MII: 3982 case NXGE_PUT_TCAM: 3983 case NXGE_GET_TCAM: 3984 case NXGE_GET64: 3985 case NXGE_PUT64: 3986 case NXGE_GET_TX_RING_SZ: 3987 case NXGE_GET_TX_DESC: 3988 case NXGE_TX_SIDE_RESET: 3989 case NXGE_RX_SIDE_RESET: 3990 case NXGE_GLOBAL_RESET: 3991 case NXGE_RESET_MAC: 3992 case NXGE_TX_REGS_DUMP: 3993 case NXGE_RX_REGS_DUMP: 3994 case NXGE_INT_REGS_DUMP: 3995 case NXGE_VIR_INT_REGS_DUMP: 3996 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3997 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3998 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3999 break; 4000 } 4001 4002 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 4003 } 4004 4005 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 4006 4007 void 4008 nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory) 4009 { 4010 p_nxge_mmac_stats_t mmac_stats; 4011 int i; 4012 nxge_mmac_t *mmac_info; 4013 4014 mmac_info = &nxgep->nxge_mmac_info; 4015 4016 mmac_stats = &nxgep->statsp->mmac_stats; 4017 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 4018 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 4019 4020 for (i = 0; i < ETHERADDRL; i++) { 4021 if (factory) { 4022 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4023 = mmac_info->factory_mac_pool[slot][ 4024 (ETHERADDRL-1) - i]; 4025 } else { 4026 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4027 = mmac_info->mac_pool[slot].addr[ 4028 (ETHERADDRL - 1) - i]; 4029 } 4030 } 4031 } 4032 4033 /* 4034 * nxge_altmac_set() -- Set an alternate MAC address 4035 */ 4036 static int 4037 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, int slot, 4038 int rdctbl, boolean_t usetbl) 4039 { 4040 uint8_t addrn; 4041 uint8_t portn; 4042 npi_mac_addr_t altmac; 4043 hostinfo_t mac_rdc; 4044 p_nxge_class_pt_cfg_t clscfgp; 4045 4046 4047 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 4048 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 4049 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 4050 4051 portn = nxgep->mac.portnum; 4052 addrn = (uint8_t)slot - 1; 4053 4054 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, 4055 nxgep->function_num, addrn, &altmac) != NPI_SUCCESS) 4056 return (EIO); 4057 4058 /* 4059 * Set the rdc table number for the host info entry 4060 * for this mac address slot. 4061 */ 4062 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 4063 mac_rdc.value = 0; 4064 if (usetbl) 4065 mac_rdc.bits.w0.rdc_tbl_num = rdctbl; 4066 else 4067 mac_rdc.bits.w0.rdc_tbl_num = 4068 clscfgp->mac_host_info[addrn].rdctbl; 4069 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 4070 4071 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 4072 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 4073 return (EIO); 4074 } 4075 4076 /* 4077 * Enable comparison with the alternate MAC address. 4078 * While the first alternate addr is enabled by bit 1 of register 4079 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 4080 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 4081 * accordingly before calling npi_mac_altaddr_entry. 4082 */ 4083 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4084 addrn = (uint8_t)slot - 1; 4085 else 4086 addrn = (uint8_t)slot; 4087 4088 if (npi_mac_altaddr_enable(nxgep->npi_handle, 4089 nxgep->function_num, addrn) != NPI_SUCCESS) { 4090 return (EIO); 4091 } 4092 4093 return (0); 4094 } 4095 4096 /* 4097 * nxeg_m_mmac_add_g() - find an unused address slot, set the address 4098 * value to the one specified, enable the port to start filtering on 4099 * the new MAC address. Returns 0 on success. 4100 */ 4101 int 4102 nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl, 4103 boolean_t usetbl) 4104 { 4105 p_nxge_t nxgep = arg; 4106 int slot; 4107 nxge_mmac_t *mmac_info; 4108 int err; 4109 nxge_status_t status; 4110 4111 mutex_enter(nxgep->genlock); 4112 4113 /* 4114 * Make sure that nxge is initialized, if _start() has 4115 * not been called. 4116 */ 4117 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4118 status = nxge_init(nxgep); 4119 if (status != NXGE_OK) { 4120 mutex_exit(nxgep->genlock); 4121 return (ENXIO); 4122 } 4123 } 4124 4125 mmac_info = &nxgep->nxge_mmac_info; 4126 if (mmac_info->naddrfree == 0) { 4127 mutex_exit(nxgep->genlock); 4128 return (ENOSPC); 4129 } 4130 4131 /* 4132 * Search for the first available slot. Because naddrfree 4133 * is not zero, we are guaranteed to find one. 4134 * Each of the first two ports of Neptune has 16 alternate 4135 * MAC slots but only the first 7 (of 15) slots have assigned factory 4136 * MAC addresses. We first search among the slots without bundled 4137 * factory MACs. If we fail to find one in that range, then we 4138 * search the slots with bundled factory MACs. A factory MAC 4139 * will be wasted while the slot is used with a user MAC address. 4140 * But the slot could be used by factory MAC again after calling 4141 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4142 */ 4143 for (slot = 0; slot <= mmac_info->num_mmac; slot++) { 4144 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4145 break; 4146 } 4147 4148 ASSERT(slot <= mmac_info->num_mmac); 4149 4150 if ((err = nxge_altmac_set(nxgep, (uint8_t *)maddr, slot, rdctbl, 4151 usetbl)) != 0) { 4152 mutex_exit(nxgep->genlock); 4153 return (err); 4154 } 4155 4156 bcopy(maddr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4157 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4158 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4159 mmac_info->naddrfree--; 4160 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4161 4162 mutex_exit(nxgep->genlock); 4163 return (0); 4164 } 4165 4166 /* 4167 * Remove the specified mac address and update the HW not to filter 4168 * the mac address anymore. 4169 */ 4170 int 4171 nxge_m_mmac_remove(void *arg, int slot) 4172 { 4173 p_nxge_t nxgep = arg; 4174 nxge_mmac_t *mmac_info; 4175 uint8_t addrn; 4176 uint8_t portn; 4177 int err = 0; 4178 nxge_status_t status; 4179 4180 mutex_enter(nxgep->genlock); 4181 4182 /* 4183 * Make sure that nxge is initialized, if _start() has 4184 * not been called. 4185 */ 4186 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4187 status = nxge_init(nxgep); 4188 if (status != NXGE_OK) { 4189 mutex_exit(nxgep->genlock); 4190 return (ENXIO); 4191 } 4192 } 4193 4194 mmac_info = &nxgep->nxge_mmac_info; 4195 if (slot < 1 || slot > mmac_info->num_mmac) { 4196 mutex_exit(nxgep->genlock); 4197 return (EINVAL); 4198 } 4199 4200 portn = nxgep->mac.portnum; 4201 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4202 addrn = (uint8_t)slot - 1; 4203 else 4204 addrn = (uint8_t)slot; 4205 4206 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4207 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4208 == NPI_SUCCESS) { 4209 mmac_info->naddrfree++; 4210 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4211 /* 4212 * Regardless if the MAC we just stopped filtering 4213 * is a user addr or a facory addr, we must set 4214 * the MMAC_VENDOR_ADDR flag if this slot has an 4215 * associated factory MAC to indicate that a factory 4216 * MAC is available. 4217 */ 4218 if (slot <= mmac_info->num_factory_mmac) { 4219 mmac_info->mac_pool[slot].flags 4220 |= MMAC_VENDOR_ADDR; 4221 } 4222 /* 4223 * Clear mac_pool[slot].addr so that kstat shows 0 4224 * alternate MAC address if the slot is not used. 4225 * (But nxge_m_mmac_get returns the factory MAC even 4226 * when the slot is not used!) 4227 */ 4228 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4229 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4230 } else { 4231 err = EIO; 4232 } 4233 } else { 4234 err = EINVAL; 4235 } 4236 4237 mutex_exit(nxgep->genlock); 4238 return (err); 4239 } 4240 4241 /* 4242 * The callback to query all the factory addresses. naddr must be the same as 4243 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and 4244 * mcm_addr is the space allocated for keep all the addresses, whose size is 4245 * naddr * MAXMACADDRLEN. 4246 */ 4247 static void 4248 nxge_m_getfactaddr(void *arg, uint_t naddr, uint8_t *addr) 4249 { 4250 nxge_t *nxgep = arg; 4251 nxge_mmac_t *mmac_info; 4252 int i; 4253 4254 mutex_enter(nxgep->genlock); 4255 4256 mmac_info = &nxgep->nxge_mmac_info; 4257 ASSERT(naddr == mmac_info->num_factory_mmac); 4258 4259 for (i = 0; i < naddr; i++) { 4260 bcopy(mmac_info->factory_mac_pool[i + 1], 4261 addr + i * MAXMACADDRLEN, ETHERADDRL); 4262 } 4263 4264 mutex_exit(nxgep->genlock); 4265 } 4266 4267 4268 static boolean_t 4269 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4270 { 4271 nxge_t *nxgep = arg; 4272 uint32_t *txflags = cap_data; 4273 4274 switch (cap) { 4275 case MAC_CAPAB_HCKSUM: 4276 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4277 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4278 if (nxge_cksum_offload <= 1) { 4279 *txflags = HCKSUM_INET_PARTIAL; 4280 } 4281 break; 4282 4283 case MAC_CAPAB_MULTIFACTADDR: { 4284 mac_capab_multifactaddr_t *mfacp = cap_data; 4285 4286 if (!isLDOMguest(nxgep)) { 4287 mutex_enter(nxgep->genlock); 4288 mfacp->mcm_naddr = 4289 nxgep->nxge_mmac_info.num_factory_mmac; 4290 mfacp->mcm_getaddr = nxge_m_getfactaddr; 4291 mutex_exit(nxgep->genlock); 4292 } 4293 break; 4294 } 4295 4296 case MAC_CAPAB_LSO: { 4297 mac_capab_lso_t *cap_lso = cap_data; 4298 4299 if (nxgep->soft_lso_enable) { 4300 if (nxge_cksum_offload <= 1) { 4301 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4302 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4303 nxge_lso_max = NXGE_LSO_MAXLEN; 4304 } 4305 cap_lso->lso_basic_tcp_ipv4.lso_max = 4306 nxge_lso_max; 4307 } 4308 break; 4309 } else { 4310 return (B_FALSE); 4311 } 4312 } 4313 4314 case MAC_CAPAB_RINGS: { 4315 mac_capab_rings_t *cap_rings = cap_data; 4316 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 4317 4318 mutex_enter(nxgep->genlock); 4319 if (cap_rings->mr_type == MAC_RING_TYPE_RX) { 4320 if (isLDOMguest(nxgep)) { 4321 cap_rings->mr_group_type = 4322 MAC_GROUP_TYPE_STATIC; 4323 cap_rings->mr_rnum = 4324 NXGE_HIO_SHARE_MAX_CHANNELS; 4325 cap_rings->mr_rget = nxge_fill_ring; 4326 cap_rings->mr_gnum = 1; 4327 cap_rings->mr_gget = nxge_hio_group_get; 4328 cap_rings->mr_gaddring = NULL; 4329 cap_rings->mr_gremring = NULL; 4330 } else { 4331 /* 4332 * Service Domain. 4333 */ 4334 cap_rings->mr_group_type = 4335 MAC_GROUP_TYPE_DYNAMIC; 4336 cap_rings->mr_rnum = p_cfgp->max_rdcs; 4337 cap_rings->mr_rget = nxge_fill_ring; 4338 cap_rings->mr_gnum = p_cfgp->max_rdc_grpids; 4339 cap_rings->mr_gget = nxge_hio_group_get; 4340 cap_rings->mr_gaddring = nxge_group_add_ring; 4341 cap_rings->mr_gremring = nxge_group_rem_ring; 4342 } 4343 4344 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4345 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]", 4346 p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids)); 4347 } else { 4348 /* 4349 * TX Rings. 4350 */ 4351 if (isLDOMguest(nxgep)) { 4352 cap_rings->mr_group_type = 4353 MAC_GROUP_TYPE_STATIC; 4354 cap_rings->mr_rnum = 4355 NXGE_HIO_SHARE_MAX_CHANNELS; 4356 cap_rings->mr_rget = nxge_fill_ring; 4357 cap_rings->mr_gnum = 0; 4358 cap_rings->mr_gget = NULL; 4359 cap_rings->mr_gaddring = NULL; 4360 cap_rings->mr_gremring = NULL; 4361 } else { 4362 /* 4363 * Service Domain. 4364 */ 4365 cap_rings->mr_group_type = 4366 MAC_GROUP_TYPE_DYNAMIC; 4367 cap_rings->mr_rnum = p_cfgp->tdc.count; 4368 cap_rings->mr_rget = nxge_fill_ring; 4369 4370 /* 4371 * Share capable. 4372 * 4373 * Do not report the default group: hence -1 4374 */ 4375 cap_rings->mr_gnum = 4376 NXGE_MAX_TDC_GROUPS / nxgep->nports - 1; 4377 cap_rings->mr_gget = nxge_hio_group_get; 4378 cap_rings->mr_gaddring = nxge_group_add_ring; 4379 cap_rings->mr_gremring = nxge_group_rem_ring; 4380 } 4381 4382 NXGE_DEBUG_MSG((nxgep, TX_CTL, 4383 "==> nxge_m_getcapab: tx rings # of rings %d", 4384 p_cfgp->tdc.count)); 4385 } 4386 mutex_exit(nxgep->genlock); 4387 break; 4388 } 4389 4390 #if defined(sun4v) 4391 case MAC_CAPAB_SHARES: { 4392 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4393 4394 /* 4395 * Only the service domain driver responds to 4396 * this capability request. 4397 */ 4398 mutex_enter(nxgep->genlock); 4399 if (isLDOMservice(nxgep)) { 4400 mshares->ms_snum = 3; 4401 mshares->ms_handle = (void *)nxgep; 4402 mshares->ms_salloc = nxge_hio_share_alloc; 4403 mshares->ms_sfree = nxge_hio_share_free; 4404 mshares->ms_sadd = nxge_hio_share_add_group; 4405 mshares->ms_sremove = nxge_hio_share_rem_group; 4406 mshares->ms_squery = nxge_hio_share_query; 4407 mshares->ms_sbind = nxge_hio_share_bind; 4408 mshares->ms_sunbind = nxge_hio_share_unbind; 4409 mutex_exit(nxgep->genlock); 4410 } else { 4411 mutex_exit(nxgep->genlock); 4412 return (B_FALSE); 4413 } 4414 break; 4415 } 4416 #endif 4417 default: 4418 return (B_FALSE); 4419 } 4420 return (B_TRUE); 4421 } 4422 4423 static boolean_t 4424 nxge_param_locked(mac_prop_id_t pr_num) 4425 { 4426 /* 4427 * All adv_* parameters are locked (read-only) while 4428 * the device is in any sort of loopback mode ... 4429 */ 4430 switch (pr_num) { 4431 case MAC_PROP_ADV_1000FDX_CAP: 4432 case MAC_PROP_EN_1000FDX_CAP: 4433 case MAC_PROP_ADV_1000HDX_CAP: 4434 case MAC_PROP_EN_1000HDX_CAP: 4435 case MAC_PROP_ADV_100FDX_CAP: 4436 case MAC_PROP_EN_100FDX_CAP: 4437 case MAC_PROP_ADV_100HDX_CAP: 4438 case MAC_PROP_EN_100HDX_CAP: 4439 case MAC_PROP_ADV_10FDX_CAP: 4440 case MAC_PROP_EN_10FDX_CAP: 4441 case MAC_PROP_ADV_10HDX_CAP: 4442 case MAC_PROP_EN_10HDX_CAP: 4443 case MAC_PROP_AUTONEG: 4444 case MAC_PROP_FLOWCTRL: 4445 return (B_TRUE); 4446 } 4447 return (B_FALSE); 4448 } 4449 4450 /* 4451 * callback functions for set/get of properties 4452 */ 4453 static int 4454 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4455 uint_t pr_valsize, const void *pr_val) 4456 { 4457 nxge_t *nxgep = barg; 4458 p_nxge_param_t param_arr; 4459 p_nxge_stats_t statsp; 4460 int err = 0; 4461 uint8_t val; 4462 uint32_t cur_mtu, new_mtu, old_framesize; 4463 link_flowctrl_t fl; 4464 4465 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4466 param_arr = nxgep->param_arr; 4467 statsp = nxgep->statsp; 4468 mutex_enter(nxgep->genlock); 4469 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4470 nxge_param_locked(pr_num)) { 4471 /* 4472 * All adv_* parameters are locked (read-only) 4473 * while the device is in any sort of loopback mode. 4474 */ 4475 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4476 "==> nxge_m_setprop: loopback mode: read only")); 4477 mutex_exit(nxgep->genlock); 4478 return (EBUSY); 4479 } 4480 4481 val = *(uint8_t *)pr_val; 4482 switch (pr_num) { 4483 case MAC_PROP_EN_1000FDX_CAP: 4484 nxgep->param_en_1000fdx = val; 4485 param_arr[param_anar_1000fdx].value = val; 4486 4487 goto reprogram; 4488 4489 case MAC_PROP_EN_100FDX_CAP: 4490 nxgep->param_en_100fdx = val; 4491 param_arr[param_anar_100fdx].value = val; 4492 4493 goto reprogram; 4494 4495 case MAC_PROP_EN_10FDX_CAP: 4496 nxgep->param_en_10fdx = val; 4497 param_arr[param_anar_10fdx].value = val; 4498 4499 goto reprogram; 4500 4501 case MAC_PROP_EN_1000HDX_CAP: 4502 case MAC_PROP_EN_100HDX_CAP: 4503 case MAC_PROP_EN_10HDX_CAP: 4504 case MAC_PROP_ADV_1000FDX_CAP: 4505 case MAC_PROP_ADV_1000HDX_CAP: 4506 case MAC_PROP_ADV_100FDX_CAP: 4507 case MAC_PROP_ADV_100HDX_CAP: 4508 case MAC_PROP_ADV_10FDX_CAP: 4509 case MAC_PROP_ADV_10HDX_CAP: 4510 case MAC_PROP_STATUS: 4511 case MAC_PROP_SPEED: 4512 case MAC_PROP_DUPLEX: 4513 err = EINVAL; /* cannot set read-only properties */ 4514 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4515 "==> nxge_m_setprop: read only property %d", 4516 pr_num)); 4517 break; 4518 4519 case MAC_PROP_AUTONEG: 4520 param_arr[param_autoneg].value = val; 4521 4522 goto reprogram; 4523 4524 case MAC_PROP_MTU: 4525 cur_mtu = nxgep->mac.default_mtu; 4526 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4527 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4528 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4529 new_mtu, nxgep->mac.is_jumbo)); 4530 4531 if (new_mtu == cur_mtu) { 4532 err = 0; 4533 break; 4534 } 4535 4536 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4537 err = EBUSY; 4538 break; 4539 } 4540 4541 if ((new_mtu < NXGE_DEFAULT_MTU) || 4542 (new_mtu > NXGE_MAXIMUM_MTU)) { 4543 err = EINVAL; 4544 break; 4545 } 4546 4547 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4548 nxgep->mac.maxframesize = (uint16_t) 4549 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4550 if (nxge_mac_set_framesize(nxgep)) { 4551 nxgep->mac.maxframesize = 4552 (uint16_t)old_framesize; 4553 err = EINVAL; 4554 break; 4555 } 4556 4557 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4558 if (err) { 4559 nxgep->mac.maxframesize = 4560 (uint16_t)old_framesize; 4561 err = EINVAL; 4562 break; 4563 } 4564 4565 nxgep->mac.default_mtu = new_mtu; 4566 if (new_mtu > NXGE_DEFAULT_MTU) 4567 nxgep->mac.is_jumbo = B_TRUE; 4568 else 4569 nxgep->mac.is_jumbo = B_FALSE; 4570 4571 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4572 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4573 new_mtu, nxgep->mac.maxframesize)); 4574 break; 4575 4576 case MAC_PROP_FLOWCTRL: 4577 bcopy(pr_val, &fl, sizeof (fl)); 4578 switch (fl) { 4579 default: 4580 err = EINVAL; 4581 break; 4582 4583 case LINK_FLOWCTRL_NONE: 4584 param_arr[param_anar_pause].value = 0; 4585 break; 4586 4587 case LINK_FLOWCTRL_RX: 4588 param_arr[param_anar_pause].value = 1; 4589 break; 4590 4591 case LINK_FLOWCTRL_TX: 4592 case LINK_FLOWCTRL_BI: 4593 err = EINVAL; 4594 break; 4595 } 4596 4597 reprogram: 4598 if (err == 0) { 4599 if (!nxge_param_link_update(nxgep)) { 4600 err = EINVAL; 4601 } 4602 } 4603 break; 4604 case MAC_PROP_PRIVATE: 4605 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4606 "==> nxge_m_setprop: private property")); 4607 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4608 pr_val); 4609 break; 4610 4611 default: 4612 err = ENOTSUP; 4613 break; 4614 } 4615 4616 mutex_exit(nxgep->genlock); 4617 4618 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4619 "<== nxge_m_setprop (return %d)", err)); 4620 return (err); 4621 } 4622 4623 static int 4624 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4625 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 4626 { 4627 nxge_t *nxgep = barg; 4628 p_nxge_param_t param_arr = nxgep->param_arr; 4629 p_nxge_stats_t statsp = nxgep->statsp; 4630 int err = 0; 4631 link_flowctrl_t fl; 4632 uint64_t tmp = 0; 4633 link_state_t ls; 4634 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4635 4636 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4637 "==> nxge_m_getprop: pr_num %d", pr_num)); 4638 4639 if (pr_valsize == 0) 4640 return (EINVAL); 4641 4642 *perm = MAC_PROP_PERM_RW; 4643 4644 if ((is_default) && (pr_num != MAC_PROP_PRIVATE)) { 4645 err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4646 return (err); 4647 } 4648 4649 bzero(pr_val, pr_valsize); 4650 switch (pr_num) { 4651 case MAC_PROP_DUPLEX: 4652 *perm = MAC_PROP_PERM_READ; 4653 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4654 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4655 "==> nxge_m_getprop: duplex mode %d", 4656 *(uint8_t *)pr_val)); 4657 break; 4658 4659 case MAC_PROP_SPEED: 4660 if (pr_valsize < sizeof (uint64_t)) 4661 return (EINVAL); 4662 *perm = MAC_PROP_PERM_READ; 4663 tmp = statsp->mac_stats.link_speed * 1000000ull; 4664 bcopy(&tmp, pr_val, sizeof (tmp)); 4665 break; 4666 4667 case MAC_PROP_STATUS: 4668 if (pr_valsize < sizeof (link_state_t)) 4669 return (EINVAL); 4670 *perm = MAC_PROP_PERM_READ; 4671 if (!statsp->mac_stats.link_up) 4672 ls = LINK_STATE_DOWN; 4673 else 4674 ls = LINK_STATE_UP; 4675 bcopy(&ls, pr_val, sizeof (ls)); 4676 break; 4677 4678 case MAC_PROP_AUTONEG: 4679 *(uint8_t *)pr_val = 4680 param_arr[param_autoneg].value; 4681 break; 4682 4683 case MAC_PROP_FLOWCTRL: 4684 if (pr_valsize < sizeof (link_flowctrl_t)) 4685 return (EINVAL); 4686 4687 fl = LINK_FLOWCTRL_NONE; 4688 if (param_arr[param_anar_pause].value) { 4689 fl = LINK_FLOWCTRL_RX; 4690 } 4691 bcopy(&fl, pr_val, sizeof (fl)); 4692 break; 4693 4694 case MAC_PROP_ADV_1000FDX_CAP: 4695 *perm = MAC_PROP_PERM_READ; 4696 *(uint8_t *)pr_val = 4697 param_arr[param_anar_1000fdx].value; 4698 break; 4699 4700 case MAC_PROP_EN_1000FDX_CAP: 4701 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4702 break; 4703 4704 case MAC_PROP_ADV_100FDX_CAP: 4705 *perm = MAC_PROP_PERM_READ; 4706 *(uint8_t *)pr_val = 4707 param_arr[param_anar_100fdx].value; 4708 break; 4709 4710 case MAC_PROP_EN_100FDX_CAP: 4711 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4712 break; 4713 4714 case MAC_PROP_ADV_10FDX_CAP: 4715 *perm = MAC_PROP_PERM_READ; 4716 *(uint8_t *)pr_val = 4717 param_arr[param_anar_10fdx].value; 4718 break; 4719 4720 case MAC_PROP_EN_10FDX_CAP: 4721 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4722 break; 4723 4724 case MAC_PROP_EN_1000HDX_CAP: 4725 case MAC_PROP_EN_100HDX_CAP: 4726 case MAC_PROP_EN_10HDX_CAP: 4727 case MAC_PROP_ADV_1000HDX_CAP: 4728 case MAC_PROP_ADV_100HDX_CAP: 4729 case MAC_PROP_ADV_10HDX_CAP: 4730 err = ENOTSUP; 4731 break; 4732 4733 case MAC_PROP_PRIVATE: 4734 err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4735 pr_valsize, pr_val, perm); 4736 break; 4737 4738 case MAC_PROP_MTU: { 4739 mac_propval_range_t range; 4740 4741 if (!(pr_flags & MAC_PROP_POSSIBLE)) 4742 return (ENOTSUP); 4743 if (pr_valsize < sizeof (mac_propval_range_t)) 4744 return (EINVAL); 4745 range.mpr_count = 1; 4746 range.mpr_type = MAC_PROPVAL_UINT32; 4747 range.range_uint32[0].mpur_min = 4748 range.range_uint32[0].mpur_max = NXGE_DEFAULT_MTU; 4749 range.range_uint32[0].mpur_max = NXGE_MAXIMUM_MTU; 4750 bcopy(&range, pr_val, sizeof (range)); 4751 break; 4752 } 4753 default: 4754 err = EINVAL; 4755 break; 4756 } 4757 4758 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4759 4760 return (err); 4761 } 4762 4763 /* ARGSUSED */ 4764 static int 4765 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4766 const void *pr_val) 4767 { 4768 p_nxge_param_t param_arr = nxgep->param_arr; 4769 int err = 0; 4770 long result; 4771 4772 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4773 "==> nxge_set_priv_prop: name %s", pr_name)); 4774 4775 /* Blanking */ 4776 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4777 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4778 (char *)pr_val, 4779 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4780 if (err) { 4781 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4782 "<== nxge_set_priv_prop: " 4783 "unable to set (%s)", pr_name)); 4784 err = EINVAL; 4785 } else { 4786 err = 0; 4787 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4788 "<== nxge_set_priv_prop: " 4789 "set (%s)", pr_name)); 4790 } 4791 4792 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4793 "<== nxge_set_priv_prop: name %s (value %d)", 4794 pr_name, result)); 4795 4796 return (err); 4797 } 4798 4799 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4800 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4801 (char *)pr_val, 4802 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4803 if (err) { 4804 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4805 "<== nxge_set_priv_prop: " 4806 "unable to set (%s)", pr_name)); 4807 err = EINVAL; 4808 } else { 4809 err = 0; 4810 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4811 "<== nxge_set_priv_prop: " 4812 "set (%s)", pr_name)); 4813 } 4814 4815 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4816 "<== nxge_set_priv_prop: name %s (value %d)", 4817 pr_name, result)); 4818 4819 return (err); 4820 } 4821 4822 /* Classification */ 4823 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4824 if (pr_val == NULL) { 4825 err = EINVAL; 4826 return (err); 4827 } 4828 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4829 4830 err = nxge_param_set_ip_opt(nxgep, NULL, 4831 NULL, (char *)pr_val, 4832 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4833 4834 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4835 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4836 pr_name, result)); 4837 4838 return (err); 4839 } 4840 4841 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 4842 if (pr_val == NULL) { 4843 err = EINVAL; 4844 return (err); 4845 } 4846 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4847 4848 err = nxge_param_set_ip_opt(nxgep, NULL, 4849 NULL, (char *)pr_val, 4850 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 4851 4852 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4853 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4854 pr_name, result)); 4855 4856 return (err); 4857 } 4858 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 4859 if (pr_val == NULL) { 4860 err = EINVAL; 4861 return (err); 4862 } 4863 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4864 4865 err = nxge_param_set_ip_opt(nxgep, NULL, 4866 NULL, (char *)pr_val, 4867 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 4868 4869 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4870 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4871 pr_name, result)); 4872 4873 return (err); 4874 } 4875 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 4876 if (pr_val == NULL) { 4877 err = EINVAL; 4878 return (err); 4879 } 4880 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4881 4882 err = nxge_param_set_ip_opt(nxgep, NULL, 4883 NULL, (char *)pr_val, 4884 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 4885 4886 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4887 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4888 pr_name, result)); 4889 4890 return (err); 4891 } 4892 4893 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 4894 if (pr_val == NULL) { 4895 err = EINVAL; 4896 return (err); 4897 } 4898 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4899 4900 err = nxge_param_set_ip_opt(nxgep, NULL, 4901 NULL, (char *)pr_val, 4902 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 4903 4904 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4905 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4906 pr_name, result)); 4907 4908 return (err); 4909 } 4910 4911 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 4912 if (pr_val == NULL) { 4913 err = EINVAL; 4914 return (err); 4915 } 4916 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4917 4918 err = nxge_param_set_ip_opt(nxgep, NULL, 4919 NULL, (char *)pr_val, 4920 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 4921 4922 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4923 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4924 pr_name, result)); 4925 4926 return (err); 4927 } 4928 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 4929 if (pr_val == NULL) { 4930 err = EINVAL; 4931 return (err); 4932 } 4933 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4934 4935 err = nxge_param_set_ip_opt(nxgep, NULL, 4936 NULL, (char *)pr_val, 4937 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 4938 4939 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4940 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4941 pr_name, result)); 4942 4943 return (err); 4944 } 4945 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 4946 if (pr_val == NULL) { 4947 err = EINVAL; 4948 return (err); 4949 } 4950 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4951 4952 err = nxge_param_set_ip_opt(nxgep, NULL, 4953 NULL, (char *)pr_val, 4954 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 4955 4956 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4957 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4958 pr_name, result)); 4959 4960 return (err); 4961 } 4962 4963 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 4964 if (pr_val == NULL) { 4965 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4966 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 4967 err = EINVAL; 4968 return (err); 4969 } 4970 4971 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4972 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4973 "<== nxge_set_priv_prop: name %s " 4974 "(lso %d pr_val %s value %d)", 4975 pr_name, nxgep->soft_lso_enable, pr_val, result)); 4976 4977 if (result > 1 || result < 0) { 4978 err = EINVAL; 4979 } else { 4980 if (nxgep->soft_lso_enable == (uint32_t)result) { 4981 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4982 "no change (%d %d)", 4983 nxgep->soft_lso_enable, result)); 4984 return (0); 4985 } 4986 } 4987 4988 nxgep->soft_lso_enable = (int)result; 4989 4990 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4991 "<== nxge_set_priv_prop: name %s (value %d)", 4992 pr_name, result)); 4993 4994 return (err); 4995 } 4996 /* 4997 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the 4998 * following code to be executed. 4999 */ 5000 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5001 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5002 (caddr_t)¶m_arr[param_anar_10gfdx]); 5003 return (err); 5004 } 5005 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5006 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5007 (caddr_t)¶m_arr[param_anar_pause]); 5008 return (err); 5009 } 5010 5011 return (EINVAL); 5012 } 5013 5014 static int 5015 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 5016 uint_t pr_valsize, void *pr_val, uint_t *perm) 5017 { 5018 p_nxge_param_t param_arr = nxgep->param_arr; 5019 char valstr[MAXNAMELEN]; 5020 int err = EINVAL; 5021 uint_t strsize; 5022 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 5023 5024 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5025 "==> nxge_get_priv_prop: property %s", pr_name)); 5026 5027 /* function number */ 5028 if (strcmp(pr_name, "_function_number") == 0) { 5029 if (is_default) 5030 return (ENOTSUP); 5031 *perm = MAC_PROP_PERM_READ; 5032 (void) snprintf(valstr, sizeof (valstr), "%d", 5033 nxgep->function_num); 5034 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5035 "==> nxge_get_priv_prop: name %s " 5036 "(value %d valstr %s)", 5037 pr_name, nxgep->function_num, valstr)); 5038 5039 err = 0; 5040 goto done; 5041 } 5042 5043 /* Neptune firmware version */ 5044 if (strcmp(pr_name, "_fw_version") == 0) { 5045 if (is_default) 5046 return (ENOTSUP); 5047 *perm = MAC_PROP_PERM_READ; 5048 (void) snprintf(valstr, sizeof (valstr), "%s", 5049 nxgep->vpd_info.ver); 5050 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5051 "==> nxge_get_priv_prop: name %s " 5052 "(value %d valstr %s)", 5053 pr_name, nxgep->vpd_info.ver, valstr)); 5054 5055 err = 0; 5056 goto done; 5057 } 5058 5059 /* port PHY mode */ 5060 if (strcmp(pr_name, "_port_mode") == 0) { 5061 if (is_default) 5062 return (ENOTSUP); 5063 *perm = MAC_PROP_PERM_READ; 5064 switch (nxgep->mac.portmode) { 5065 case PORT_1G_COPPER: 5066 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 5067 nxgep->hot_swappable_phy ? 5068 "[Hot Swappable]" : ""); 5069 break; 5070 case PORT_1G_FIBER: 5071 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 5072 nxgep->hot_swappable_phy ? 5073 "[hot swappable]" : ""); 5074 break; 5075 case PORT_10G_COPPER: 5076 (void) snprintf(valstr, sizeof (valstr), 5077 "10G copper %s", 5078 nxgep->hot_swappable_phy ? 5079 "[hot swappable]" : ""); 5080 break; 5081 case PORT_10G_FIBER: 5082 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 5083 nxgep->hot_swappable_phy ? 5084 "[hot swappable]" : ""); 5085 break; 5086 case PORT_10G_SERDES: 5087 (void) snprintf(valstr, sizeof (valstr), 5088 "10G serdes %s", nxgep->hot_swappable_phy ? 5089 "[hot swappable]" : ""); 5090 break; 5091 case PORT_1G_SERDES: 5092 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5093 nxgep->hot_swappable_phy ? 5094 "[hot swappable]" : ""); 5095 break; 5096 case PORT_1G_TN1010: 5097 (void) snprintf(valstr, sizeof (valstr), 5098 "1G TN1010 copper %s", nxgep->hot_swappable_phy ? 5099 "[hot swappable]" : ""); 5100 break; 5101 case PORT_10G_TN1010: 5102 (void) snprintf(valstr, sizeof (valstr), 5103 "10G TN1010 copper %s", nxgep->hot_swappable_phy ? 5104 "[hot swappable]" : ""); 5105 break; 5106 case PORT_1G_RGMII_FIBER: 5107 (void) snprintf(valstr, sizeof (valstr), 5108 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5109 "[hot swappable]" : ""); 5110 break; 5111 case PORT_HSP_MODE: 5112 (void) snprintf(valstr, sizeof (valstr), 5113 "phy not present[hot swappable]"); 5114 break; 5115 default: 5116 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5117 nxgep->hot_swappable_phy ? 5118 "[hot swappable]" : ""); 5119 break; 5120 } 5121 5122 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5123 "==> nxge_get_priv_prop: name %s (value %s)", 5124 pr_name, valstr)); 5125 5126 err = 0; 5127 goto done; 5128 } 5129 5130 /* Hot swappable PHY */ 5131 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5132 if (is_default) 5133 return (ENOTSUP); 5134 *perm = MAC_PROP_PERM_READ; 5135 (void) snprintf(valstr, sizeof (valstr), "%s", 5136 nxgep->hot_swappable_phy ? 5137 "yes" : "no"); 5138 5139 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5140 "==> nxge_get_priv_prop: name %s " 5141 "(value %d valstr %s)", 5142 pr_name, nxgep->hot_swappable_phy, valstr)); 5143 5144 err = 0; 5145 goto done; 5146 } 5147 5148 5149 /* Receive Interrupt Blanking Parameters */ 5150 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5151 err = 0; 5152 if (is_default) { 5153 (void) snprintf(valstr, sizeof (valstr), 5154 "%d", RXDMA_RCR_TO_DEFAULT); 5155 goto done; 5156 } 5157 5158 (void) snprintf(valstr, sizeof (valstr), "%d", 5159 nxgep->intr_timeout); 5160 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5161 "==> nxge_get_priv_prop: name %s (value %d)", 5162 pr_name, 5163 (uint32_t)nxgep->intr_timeout)); 5164 goto done; 5165 } 5166 5167 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5168 err = 0; 5169 if (is_default) { 5170 (void) snprintf(valstr, sizeof (valstr), 5171 "%d", RXDMA_RCR_PTHRES_DEFAULT); 5172 goto done; 5173 } 5174 (void) snprintf(valstr, sizeof (valstr), "%d", 5175 nxgep->intr_threshold); 5176 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5177 "==> nxge_get_priv_prop: name %s (value %d)", 5178 pr_name, (uint32_t)nxgep->intr_threshold)); 5179 5180 goto done; 5181 } 5182 5183 /* Classification and Load Distribution Configuration */ 5184 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5185 if (is_default) { 5186 (void) snprintf(valstr, sizeof (valstr), "%x", 5187 NXGE_CLASS_FLOW_GEN_SERVER); 5188 err = 0; 5189 goto done; 5190 } 5191 err = nxge_dld_get_ip_opt(nxgep, 5192 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5193 5194 (void) snprintf(valstr, sizeof (valstr), "%x", 5195 (int)param_arr[param_class_opt_ipv4_tcp].value); 5196 5197 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5198 "==> nxge_get_priv_prop: %s", valstr)); 5199 goto done; 5200 } 5201 5202 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5203 if (is_default) { 5204 (void) snprintf(valstr, sizeof (valstr), "%x", 5205 NXGE_CLASS_FLOW_GEN_SERVER); 5206 err = 0; 5207 goto done; 5208 } 5209 err = nxge_dld_get_ip_opt(nxgep, 5210 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5211 5212 (void) snprintf(valstr, sizeof (valstr), "%x", 5213 (int)param_arr[param_class_opt_ipv4_udp].value); 5214 5215 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5216 "==> nxge_get_priv_prop: %s", valstr)); 5217 goto done; 5218 } 5219 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5220 if (is_default) { 5221 (void) snprintf(valstr, sizeof (valstr), "%x", 5222 NXGE_CLASS_FLOW_GEN_SERVER); 5223 err = 0; 5224 goto done; 5225 } 5226 err = nxge_dld_get_ip_opt(nxgep, 5227 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5228 5229 (void) snprintf(valstr, sizeof (valstr), "%x", 5230 (int)param_arr[param_class_opt_ipv4_ah].value); 5231 5232 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5233 "==> nxge_get_priv_prop: %s", valstr)); 5234 goto done; 5235 } 5236 5237 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5238 if (is_default) { 5239 (void) snprintf(valstr, sizeof (valstr), "%x", 5240 NXGE_CLASS_FLOW_GEN_SERVER); 5241 err = 0; 5242 goto done; 5243 } 5244 err = nxge_dld_get_ip_opt(nxgep, 5245 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5246 5247 (void) snprintf(valstr, sizeof (valstr), "%x", 5248 (int)param_arr[param_class_opt_ipv4_sctp].value); 5249 5250 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5251 "==> nxge_get_priv_prop: %s", valstr)); 5252 goto done; 5253 } 5254 5255 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5256 if (is_default) { 5257 (void) snprintf(valstr, sizeof (valstr), "%x", 5258 NXGE_CLASS_FLOW_GEN_SERVER); 5259 err = 0; 5260 goto done; 5261 } 5262 err = nxge_dld_get_ip_opt(nxgep, 5263 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5264 5265 (void) snprintf(valstr, sizeof (valstr), "%x", 5266 (int)param_arr[param_class_opt_ipv6_tcp].value); 5267 5268 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5269 "==> nxge_get_priv_prop: %s", valstr)); 5270 goto done; 5271 } 5272 5273 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5274 if (is_default) { 5275 (void) snprintf(valstr, sizeof (valstr), "%x", 5276 NXGE_CLASS_FLOW_GEN_SERVER); 5277 err = 0; 5278 goto done; 5279 } 5280 err = nxge_dld_get_ip_opt(nxgep, 5281 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5282 5283 (void) snprintf(valstr, sizeof (valstr), "%x", 5284 (int)param_arr[param_class_opt_ipv6_udp].value); 5285 5286 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5287 "==> nxge_get_priv_prop: %s", valstr)); 5288 goto done; 5289 } 5290 5291 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5292 if (is_default) { 5293 (void) snprintf(valstr, sizeof (valstr), "%x", 5294 NXGE_CLASS_FLOW_GEN_SERVER); 5295 err = 0; 5296 goto done; 5297 } 5298 err = nxge_dld_get_ip_opt(nxgep, 5299 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5300 5301 (void) snprintf(valstr, sizeof (valstr), "%x", 5302 (int)param_arr[param_class_opt_ipv6_ah].value); 5303 5304 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5305 "==> nxge_get_priv_prop: %s", valstr)); 5306 goto done; 5307 } 5308 5309 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5310 if (is_default) { 5311 (void) snprintf(valstr, sizeof (valstr), "%x", 5312 NXGE_CLASS_FLOW_GEN_SERVER); 5313 err = 0; 5314 goto done; 5315 } 5316 err = nxge_dld_get_ip_opt(nxgep, 5317 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5318 5319 (void) snprintf(valstr, sizeof (valstr), "%x", 5320 (int)param_arr[param_class_opt_ipv6_sctp].value); 5321 5322 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5323 "==> nxge_get_priv_prop: %s", valstr)); 5324 goto done; 5325 } 5326 5327 /* Software LSO */ 5328 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5329 if (is_default) { 5330 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5331 err = 0; 5332 goto done; 5333 } 5334 (void) snprintf(valstr, sizeof (valstr), 5335 "%d", nxgep->soft_lso_enable); 5336 err = 0; 5337 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5338 "==> nxge_get_priv_prop: name %s (value %d)", 5339 pr_name, nxgep->soft_lso_enable)); 5340 5341 goto done; 5342 } 5343 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5344 err = 0; 5345 if (is_default || 5346 nxgep->param_arr[param_anar_10gfdx].value != 0) { 5347 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5348 goto done; 5349 } else { 5350 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5351 goto done; 5352 } 5353 } 5354 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5355 err = 0; 5356 if (is_default || 5357 nxgep->param_arr[param_anar_pause].value != 0) { 5358 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5359 goto done; 5360 } else { 5361 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5362 goto done; 5363 } 5364 } 5365 5366 done: 5367 if (err == 0) { 5368 strsize = (uint_t)strlen(valstr); 5369 if (pr_valsize < strsize) { 5370 err = ENOBUFS; 5371 } else { 5372 (void) strlcpy(pr_val, valstr, pr_valsize); 5373 } 5374 } 5375 5376 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5377 "<== nxge_get_priv_prop: return %d", err)); 5378 return (err); 5379 } 5380 5381 /* 5382 * Module loading and removing entry points. 5383 */ 5384 5385 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach, 5386 nodev, NULL, D_MP, NULL, nxge_quiesce); 5387 5388 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5389 5390 /* 5391 * Module linkage information for the kernel. 5392 */ 5393 static struct modldrv nxge_modldrv = { 5394 &mod_driverops, 5395 NXGE_DESC_VER, 5396 &nxge_dev_ops 5397 }; 5398 5399 static struct modlinkage modlinkage = { 5400 MODREV_1, (void *) &nxge_modldrv, NULL 5401 }; 5402 5403 int 5404 _init(void) 5405 { 5406 int status; 5407 5408 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 5409 5410 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5411 5412 mac_init_ops(&nxge_dev_ops, "nxge"); 5413 5414 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5415 if (status != 0) { 5416 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5417 "failed to init device soft state")); 5418 goto _init_exit; 5419 } 5420 5421 status = mod_install(&modlinkage); 5422 if (status != 0) { 5423 ddi_soft_state_fini(&nxge_list); 5424 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5425 goto _init_exit; 5426 } 5427 5428 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5429 5430 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status)); 5431 return (status); 5432 5433 _init_exit: 5434 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status)); 5435 MUTEX_DESTROY(&nxgedebuglock); 5436 return (status); 5437 } 5438 5439 int 5440 _fini(void) 5441 { 5442 int status; 5443 5444 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5445 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5446 5447 if (nxge_mblks_pending) 5448 return (EBUSY); 5449 5450 status = mod_remove(&modlinkage); 5451 if (status != DDI_SUCCESS) { 5452 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5453 "Module removal failed 0x%08x", 5454 status)); 5455 goto _fini_exit; 5456 } 5457 5458 mac_fini_ops(&nxge_dev_ops); 5459 5460 ddi_soft_state_fini(&nxge_list); 5461 5462 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status)); 5463 5464 MUTEX_DESTROY(&nxge_common_lock); 5465 MUTEX_DESTROY(&nxgedebuglock); 5466 return (status); 5467 5468 _fini_exit: 5469 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status)); 5470 return (status); 5471 } 5472 5473 int 5474 _info(struct modinfo *modinfop) 5475 { 5476 int status; 5477 5478 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5479 status = mod_info(&modlinkage, modinfop); 5480 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5481 5482 return (status); 5483 } 5484 5485 /*ARGSUSED*/ 5486 static int 5487 nxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5488 { 5489 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5490 p_nxge_t nxgep = rhp->nxgep; 5491 uint32_t channel; 5492 p_tx_ring_t ring; 5493 5494 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5495 ring = nxgep->tx_rings->rings[channel]; 5496 5497 MUTEX_ENTER(&ring->lock); 5498 ring->tx_ring_handle = rhp->ring_handle; 5499 MUTEX_EXIT(&ring->lock); 5500 5501 return (0); 5502 } 5503 5504 static void 5505 nxge_tx_ring_stop(mac_ring_driver_t rdriver) 5506 { 5507 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5508 p_nxge_t nxgep = rhp->nxgep; 5509 uint32_t channel; 5510 p_tx_ring_t ring; 5511 5512 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5513 ring = nxgep->tx_rings->rings[channel]; 5514 5515 MUTEX_ENTER(&ring->lock); 5516 ring->tx_ring_handle = (mac_ring_handle_t)NULL; 5517 MUTEX_EXIT(&ring->lock); 5518 } 5519 5520 static int 5521 nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5522 { 5523 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5524 p_nxge_t nxgep = rhp->nxgep; 5525 uint32_t channel; 5526 p_rx_rcr_ring_t ring; 5527 int i; 5528 5529 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5530 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5531 5532 MUTEX_ENTER(&ring->lock); 5533 5534 if (nxgep->rx_channel_started[channel] == B_TRUE) { 5535 MUTEX_EXIT(&ring->lock); 5536 return (0); 5537 } 5538 5539 /* set rcr_ring */ 5540 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) { 5541 if ((nxgep->ldgvp->ldvp[i].is_rxdma == 1) && 5542 (nxgep->ldgvp->ldvp[i].channel == channel)) { 5543 ring->ldvp = &nxgep->ldgvp->ldvp[i]; 5544 ring->ldgp = nxgep->ldgvp->ldvp[i].ldgp; 5545 } 5546 } 5547 5548 nxgep->rx_channel_started[channel] = B_TRUE; 5549 ring->rcr_mac_handle = rhp->ring_handle; 5550 ring->rcr_gen_num = mr_gen_num; 5551 MUTEX_EXIT(&ring->lock); 5552 5553 return (0); 5554 } 5555 5556 static void 5557 nxge_rx_ring_stop(mac_ring_driver_t rdriver) 5558 { 5559 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5560 p_nxge_t nxgep = rhp->nxgep; 5561 uint32_t channel; 5562 p_rx_rcr_ring_t ring; 5563 5564 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5565 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5566 5567 MUTEX_ENTER(&ring->lock); 5568 nxgep->rx_channel_started[channel] = B_FALSE; 5569 ring->rcr_mac_handle = NULL; 5570 MUTEX_EXIT(&ring->lock); 5571 } 5572 5573 /* 5574 * Callback funtion for MAC layer to register all rings. 5575 */ 5576 static void 5577 nxge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 5578 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 5579 { 5580 p_nxge_t nxgep = (p_nxge_t)arg; 5581 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 5582 5583 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5584 "==> nxge_fill_ring 0x%x index %d", rtype, index)); 5585 5586 switch (rtype) { 5587 case MAC_RING_TYPE_TX: { 5588 p_nxge_ring_handle_t rhandlep; 5589 5590 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5591 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d", 5592 rtype, index, p_cfgp->tdc.count)); 5593 5594 ASSERT((index >= 0) && (index < p_cfgp->tdc.count)); 5595 rhandlep = &nxgep->tx_ring_handles[index]; 5596 rhandlep->nxgep = nxgep; 5597 rhandlep->index = index; 5598 rhandlep->ring_handle = rh; 5599 5600 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5601 infop->mri_start = nxge_tx_ring_start; 5602 infop->mri_stop = nxge_tx_ring_stop; 5603 infop->mri_tx = nxge_tx_ring_send; 5604 5605 break; 5606 } 5607 case MAC_RING_TYPE_RX: { 5608 p_nxge_ring_handle_t rhandlep; 5609 int nxge_rindex; 5610 mac_intr_t nxge_mac_intr; 5611 5612 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5613 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d", 5614 rtype, index, p_cfgp->max_rdcs)); 5615 5616 /* 5617 * 'index' is the ring index within the group. 5618 * Find the ring index in the nxge instance. 5619 */ 5620 nxge_rindex = nxge_get_rxring_index(nxgep, rg_index, index); 5621 5622 ASSERT((nxge_rindex >= 0) && (nxge_rindex < p_cfgp->max_rdcs)); 5623 rhandlep = &nxgep->rx_ring_handles[nxge_rindex]; 5624 rhandlep->nxgep = nxgep; 5625 rhandlep->index = nxge_rindex; 5626 rhandlep->ring_handle = rh; 5627 5628 /* 5629 * Entrypoint to enable interrupt (disable poll) and 5630 * disable interrupt (enable poll). 5631 */ 5632 nxge_mac_intr.mi_handle = (mac_intr_handle_t)rhandlep; 5633 nxge_mac_intr.mi_enable = (mac_intr_enable_t)nxge_disable_poll; 5634 nxge_mac_intr.mi_disable = (mac_intr_disable_t)nxge_enable_poll; 5635 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5636 infop->mri_start = nxge_rx_ring_start; 5637 infop->mri_stop = nxge_rx_ring_stop; 5638 infop->mri_intr = nxge_mac_intr; /* ??? */ 5639 infop->mri_poll = nxge_rx_poll; 5640 5641 break; 5642 } 5643 default: 5644 break; 5645 } 5646 5647 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_fill_ring 0x%x", 5648 rtype)); 5649 } 5650 5651 static void 5652 nxge_group_add_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5653 mac_ring_type_t type) 5654 { 5655 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5656 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5657 nxge_t *nxge; 5658 nxge_grp_t *grp; 5659 nxge_rdc_grp_t *rdc_grp; 5660 uint16_t channel; /* device-wise ring id */ 5661 int dev_gindex; 5662 int rv; 5663 5664 nxge = rgroup->nxgep; 5665 5666 switch (type) { 5667 case MAC_RING_TYPE_TX: 5668 /* 5669 * nxge_grp_dc_add takes a channel number which is a 5670 * "devise" ring ID. 5671 */ 5672 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5673 5674 /* 5675 * Remove the ring from the default group 5676 */ 5677 if (rgroup->gindex != 0) { 5678 (void) nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5679 } 5680 5681 /* 5682 * nxge->tx_set.group[] is an array of groups indexed by 5683 * a "port" group ID. 5684 */ 5685 grp = nxge->tx_set.group[rgroup->gindex]; 5686 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5687 if (rv != 0) { 5688 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5689 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5690 } 5691 break; 5692 5693 case MAC_RING_TYPE_RX: 5694 /* 5695 * nxge->rx_set.group[] is an array of groups indexed by 5696 * a "port" group ID. 5697 */ 5698 grp = nxge->rx_set.group[rgroup->gindex]; 5699 5700 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5701 rgroup->gindex; 5702 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5703 5704 /* 5705 * nxge_grp_dc_add takes a channel number which is a 5706 * "devise" ring ID. 5707 */ 5708 channel = nxge->pt_config.hw_config.start_rdc + rhandle->index; 5709 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_RX, channel); 5710 if (rv != 0) { 5711 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5712 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5713 } 5714 5715 rdc_grp->map |= (1 << channel); 5716 rdc_grp->max_rdcs++; 5717 5718 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5719 break; 5720 } 5721 } 5722 5723 static void 5724 nxge_group_rem_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5725 mac_ring_type_t type) 5726 { 5727 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5728 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5729 nxge_t *nxge; 5730 uint16_t channel; /* device-wise ring id */ 5731 nxge_rdc_grp_t *rdc_grp; 5732 int dev_gindex; 5733 5734 nxge = rgroup->nxgep; 5735 5736 switch (type) { 5737 case MAC_RING_TYPE_TX: 5738 dev_gindex = nxge->pt_config.hw_config.def_mac_txdma_grpid + 5739 rgroup->gindex; 5740 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5741 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5742 5743 /* 5744 * Add the ring back to the default group 5745 */ 5746 if (rgroup->gindex != 0) { 5747 nxge_grp_t *grp; 5748 grp = nxge->tx_set.group[0]; 5749 (void) nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5750 } 5751 break; 5752 5753 case MAC_RING_TYPE_RX: 5754 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5755 rgroup->gindex; 5756 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5757 channel = rdc_grp->start_rdc + rhandle->index; 5758 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 5759 5760 rdc_grp->map &= ~(1 << channel); 5761 rdc_grp->max_rdcs--; 5762 5763 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5764 break; 5765 } 5766 } 5767 5768 5769 /*ARGSUSED*/ 5770 static nxge_status_t 5771 nxge_add_intrs(p_nxge_t nxgep) 5772 { 5773 5774 int intr_types; 5775 int type = 0; 5776 int ddi_status = DDI_SUCCESS; 5777 nxge_status_t status = NXGE_OK; 5778 5779 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5780 5781 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5782 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5783 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5784 nxgep->nxge_intr_type.intr_added = 0; 5785 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5786 nxgep->nxge_intr_type.intr_type = 0; 5787 5788 if (nxgep->niu_type == N2_NIU) { 5789 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5790 } else if (nxge_msi_enable) { 5791 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5792 } 5793 5794 /* Get the supported interrupt types */ 5795 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5796 != DDI_SUCCESS) { 5797 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5798 "ddi_intr_get_supported_types failed: status 0x%08x", 5799 ddi_status)); 5800 return (NXGE_ERROR | NXGE_DDI_FAILED); 5801 } 5802 nxgep->nxge_intr_type.intr_types = intr_types; 5803 5804 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5805 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5806 5807 /* 5808 * Solaris MSIX is not supported yet. use MSI for now. 5809 * nxge_msi_enable (1): 5810 * 1 - MSI 2 - MSI-X others - FIXED 5811 */ 5812 switch (nxge_msi_enable) { 5813 default: 5814 type = DDI_INTR_TYPE_FIXED; 5815 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5816 "use fixed (intx emulation) type %08x", 5817 type)); 5818 break; 5819 5820 case 2: 5821 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5822 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5823 if (intr_types & DDI_INTR_TYPE_MSIX) { 5824 type = DDI_INTR_TYPE_MSIX; 5825 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5826 "ddi_intr_get_supported_types: MSIX 0x%08x", 5827 type)); 5828 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5829 type = DDI_INTR_TYPE_MSI; 5830 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5831 "ddi_intr_get_supported_types: MSI 0x%08x", 5832 type)); 5833 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5834 type = DDI_INTR_TYPE_FIXED; 5835 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5836 "ddi_intr_get_supported_types: MSXED0x%08x", 5837 type)); 5838 } 5839 break; 5840 5841 case 1: 5842 if (intr_types & DDI_INTR_TYPE_MSI) { 5843 type = DDI_INTR_TYPE_MSI; 5844 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5845 "ddi_intr_get_supported_types: MSI 0x%08x", 5846 type)); 5847 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5848 type = DDI_INTR_TYPE_MSIX; 5849 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5850 "ddi_intr_get_supported_types: MSIX 0x%08x", 5851 type)); 5852 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5853 type = DDI_INTR_TYPE_FIXED; 5854 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5855 "ddi_intr_get_supported_types: MSXED0x%08x", 5856 type)); 5857 } 5858 } 5859 5860 nxgep->nxge_intr_type.intr_type = type; 5861 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5862 type == DDI_INTR_TYPE_FIXED) && 5863 nxgep->nxge_intr_type.niu_msi_enable) { 5864 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5865 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5866 " nxge_add_intrs: " 5867 " nxge_add_intrs_adv failed: status 0x%08x", 5868 status)); 5869 return (status); 5870 } else { 5871 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5872 "interrupts registered : type %d", type)); 5873 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5874 5875 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5876 "\nAdded advanced nxge add_intr_adv " 5877 "intr type 0x%x\n", type)); 5878 5879 return (status); 5880 } 5881 } 5882 5883 if (!nxgep->nxge_intr_type.intr_registered) { 5884 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5885 "failed to register interrupts")); 5886 return (NXGE_ERROR | NXGE_DDI_FAILED); 5887 } 5888 5889 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5890 return (status); 5891 } 5892 5893 static nxge_status_t 5894 nxge_add_intrs_adv(p_nxge_t nxgep) 5895 { 5896 int intr_type; 5897 p_nxge_intr_t intrp; 5898 5899 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5900 5901 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5902 intr_type = intrp->intr_type; 5903 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5904 intr_type)); 5905 5906 switch (intr_type) { 5907 case DDI_INTR_TYPE_MSI: /* 0x2 */ 5908 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 5909 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 5910 5911 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 5912 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 5913 5914 default: 5915 return (NXGE_ERROR); 5916 } 5917 } 5918 5919 5920 /*ARGSUSED*/ 5921 static nxge_status_t 5922 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 5923 { 5924 dev_info_t *dip = nxgep->dip; 5925 p_nxge_ldg_t ldgp; 5926 p_nxge_intr_t intrp; 5927 uint_t *inthandler; 5928 void *arg1, *arg2; 5929 int behavior; 5930 int nintrs, navail, nrequest; 5931 int nactual, nrequired; 5932 int inum = 0; 5933 int x, y; 5934 int ddi_status = DDI_SUCCESS; 5935 nxge_status_t status = NXGE_OK; 5936 5937 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 5938 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5939 intrp->start_inum = 0; 5940 5941 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5942 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5943 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5944 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5945 "nintrs: %d", ddi_status, nintrs)); 5946 return (NXGE_ERROR | NXGE_DDI_FAILED); 5947 } 5948 5949 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5950 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5951 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5952 "ddi_intr_get_navail() failed, status: 0x%x%, " 5953 "nintrs: %d", ddi_status, navail)); 5954 return (NXGE_ERROR | NXGE_DDI_FAILED); 5955 } 5956 5957 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5958 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 5959 nintrs, navail)); 5960 5961 /* PSARC/2007/453 MSI-X interrupt limit override */ 5962 if (int_type == DDI_INTR_TYPE_MSIX) { 5963 nrequest = nxge_create_msi_property(nxgep); 5964 if (nrequest < navail) { 5965 navail = nrequest; 5966 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5967 "nxge_add_intrs_adv_type: nintrs %d " 5968 "navail %d (nrequest %d)", 5969 nintrs, navail, nrequest)); 5970 } 5971 } 5972 5973 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 5974 /* MSI must be power of 2 */ 5975 if ((navail & 16) == 16) { 5976 navail = 16; 5977 } else if ((navail & 8) == 8) { 5978 navail = 8; 5979 } else if ((navail & 4) == 4) { 5980 navail = 4; 5981 } else if ((navail & 2) == 2) { 5982 navail = 2; 5983 } else { 5984 navail = 1; 5985 } 5986 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5987 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 5988 "navail %d", nintrs, navail)); 5989 } 5990 5991 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5992 DDI_INTR_ALLOC_NORMAL); 5993 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 5994 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 5995 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5996 navail, &nactual, behavior); 5997 if (ddi_status != DDI_SUCCESS || nactual == 0) { 5998 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5999 " ddi_intr_alloc() failed: %d", 6000 ddi_status)); 6001 kmem_free(intrp->htable, intrp->intr_size); 6002 return (NXGE_ERROR | NXGE_DDI_FAILED); 6003 } 6004 6005 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6006 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6007 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6008 " ddi_intr_get_pri() failed: %d", 6009 ddi_status)); 6010 /* Free already allocated interrupts */ 6011 for (y = 0; y < nactual; y++) { 6012 (void) ddi_intr_free(intrp->htable[y]); 6013 } 6014 6015 kmem_free(intrp->htable, intrp->intr_size); 6016 return (NXGE_ERROR | NXGE_DDI_FAILED); 6017 } 6018 6019 nrequired = 0; 6020 switch (nxgep->niu_type) { 6021 default: 6022 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6023 break; 6024 6025 case N2_NIU: 6026 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6027 break; 6028 } 6029 6030 if (status != NXGE_OK) { 6031 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6032 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 6033 "failed: 0x%x", status)); 6034 /* Free already allocated interrupts */ 6035 for (y = 0; y < nactual; y++) { 6036 (void) ddi_intr_free(intrp->htable[y]); 6037 } 6038 6039 kmem_free(intrp->htable, intrp->intr_size); 6040 return (status); 6041 } 6042 6043 ldgp = nxgep->ldgvp->ldgp; 6044 for (x = 0; x < nrequired; x++, ldgp++) { 6045 ldgp->vector = (uint8_t)x; 6046 ldgp->intdata = SID_DATA(ldgp->func, x); 6047 arg1 = ldgp->ldvp; 6048 arg2 = nxgep; 6049 if (ldgp->nldvs == 1) { 6050 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6051 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6052 "nxge_add_intrs_adv_type: " 6053 "arg1 0x%x arg2 0x%x: " 6054 "1-1 int handler (entry %d intdata 0x%x)\n", 6055 arg1, arg2, 6056 x, ldgp->intdata)); 6057 } else if (ldgp->nldvs > 1) { 6058 inthandler = (uint_t *)ldgp->sys_intr_handler; 6059 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6060 "nxge_add_intrs_adv_type: " 6061 "arg1 0x%x arg2 0x%x: " 6062 "nldevs %d int handler " 6063 "(entry %d intdata 0x%x)\n", 6064 arg1, arg2, 6065 ldgp->nldvs, x, ldgp->intdata)); 6066 } 6067 6068 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6069 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 6070 "htable 0x%llx", x, intrp->htable[x])); 6071 6072 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6073 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6074 != DDI_SUCCESS) { 6075 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6076 "==> nxge_add_intrs_adv_type: failed #%d " 6077 "status 0x%x", x, ddi_status)); 6078 for (y = 0; y < intrp->intr_added; y++) { 6079 (void) ddi_intr_remove_handler( 6080 intrp->htable[y]); 6081 } 6082 /* Free already allocated intr */ 6083 for (y = 0; y < nactual; y++) { 6084 (void) ddi_intr_free(intrp->htable[y]); 6085 } 6086 kmem_free(intrp->htable, intrp->intr_size); 6087 6088 (void) nxge_ldgv_uninit(nxgep); 6089 6090 return (NXGE_ERROR | NXGE_DDI_FAILED); 6091 } 6092 intrp->intr_added++; 6093 } 6094 6095 intrp->msi_intx_cnt = nactual; 6096 6097 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6098 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 6099 navail, nactual, 6100 intrp->msi_intx_cnt, 6101 intrp->intr_added)); 6102 6103 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6104 6105 (void) nxge_intr_ldgv_init(nxgep); 6106 6107 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 6108 6109 return (status); 6110 } 6111 6112 /*ARGSUSED*/ 6113 static nxge_status_t 6114 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 6115 { 6116 dev_info_t *dip = nxgep->dip; 6117 p_nxge_ldg_t ldgp; 6118 p_nxge_intr_t intrp; 6119 uint_t *inthandler; 6120 void *arg1, *arg2; 6121 int behavior; 6122 int nintrs, navail; 6123 int nactual, nrequired; 6124 int inum = 0; 6125 int x, y; 6126 int ddi_status = DDI_SUCCESS; 6127 nxge_status_t status = NXGE_OK; 6128 6129 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 6130 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6131 intrp->start_inum = 0; 6132 6133 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6134 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6135 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6136 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6137 "nintrs: %d", status, nintrs)); 6138 return (NXGE_ERROR | NXGE_DDI_FAILED); 6139 } 6140 6141 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6142 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6143 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6144 "ddi_intr_get_navail() failed, status: 0x%x%, " 6145 "nintrs: %d", ddi_status, navail)); 6146 return (NXGE_ERROR | NXGE_DDI_FAILED); 6147 } 6148 6149 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6150 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6151 nintrs, navail)); 6152 6153 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6154 DDI_INTR_ALLOC_NORMAL); 6155 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6156 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6157 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6158 navail, &nactual, behavior); 6159 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6160 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6161 " ddi_intr_alloc() failed: %d", 6162 ddi_status)); 6163 kmem_free(intrp->htable, intrp->intr_size); 6164 return (NXGE_ERROR | NXGE_DDI_FAILED); 6165 } 6166 6167 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6168 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6169 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6170 " ddi_intr_get_pri() failed: %d", 6171 ddi_status)); 6172 /* Free already allocated interrupts */ 6173 for (y = 0; y < nactual; y++) { 6174 (void) ddi_intr_free(intrp->htable[y]); 6175 } 6176 6177 kmem_free(intrp->htable, intrp->intr_size); 6178 return (NXGE_ERROR | NXGE_DDI_FAILED); 6179 } 6180 6181 nrequired = 0; 6182 switch (nxgep->niu_type) { 6183 default: 6184 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6185 break; 6186 6187 case N2_NIU: 6188 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6189 break; 6190 } 6191 6192 if (status != NXGE_OK) { 6193 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6194 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6195 "failed: 0x%x", status)); 6196 /* Free already allocated interrupts */ 6197 for (y = 0; y < nactual; y++) { 6198 (void) ddi_intr_free(intrp->htable[y]); 6199 } 6200 6201 kmem_free(intrp->htable, intrp->intr_size); 6202 return (status); 6203 } 6204 6205 ldgp = nxgep->ldgvp->ldgp; 6206 for (x = 0; x < nrequired; x++, ldgp++) { 6207 ldgp->vector = (uint8_t)x; 6208 if (nxgep->niu_type != N2_NIU) { 6209 ldgp->intdata = SID_DATA(ldgp->func, x); 6210 } 6211 6212 arg1 = ldgp->ldvp; 6213 arg2 = nxgep; 6214 if (ldgp->nldvs == 1) { 6215 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6216 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6217 "nxge_add_intrs_adv_type_fix: " 6218 "1-1 int handler(%d) ldg %d ldv %d " 6219 "arg1 $%p arg2 $%p\n", 6220 x, ldgp->ldg, ldgp->ldvp->ldv, 6221 arg1, arg2)); 6222 } else if (ldgp->nldvs > 1) { 6223 inthandler = (uint_t *)ldgp->sys_intr_handler; 6224 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6225 "nxge_add_intrs_adv_type_fix: " 6226 "shared ldv %d int handler(%d) ldv %d ldg %d" 6227 "arg1 0x%016llx arg2 0x%016llx\n", 6228 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6229 arg1, arg2)); 6230 } 6231 6232 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6233 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6234 != DDI_SUCCESS) { 6235 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6236 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6237 "status 0x%x", x, ddi_status)); 6238 for (y = 0; y < intrp->intr_added; y++) { 6239 (void) ddi_intr_remove_handler( 6240 intrp->htable[y]); 6241 } 6242 for (y = 0; y < nactual; y++) { 6243 (void) ddi_intr_free(intrp->htable[y]); 6244 } 6245 /* Free already allocated intr */ 6246 kmem_free(intrp->htable, intrp->intr_size); 6247 6248 (void) nxge_ldgv_uninit(nxgep); 6249 6250 return (NXGE_ERROR | NXGE_DDI_FAILED); 6251 } 6252 intrp->intr_added++; 6253 } 6254 6255 intrp->msi_intx_cnt = nactual; 6256 6257 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6258 6259 status = nxge_intr_ldgv_init(nxgep); 6260 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6261 6262 return (status); 6263 } 6264 6265 static void 6266 nxge_remove_intrs(p_nxge_t nxgep) 6267 { 6268 int i, inum; 6269 p_nxge_intr_t intrp; 6270 6271 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6272 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6273 if (!intrp->intr_registered) { 6274 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6275 "<== nxge_remove_intrs: interrupts not registered")); 6276 return; 6277 } 6278 6279 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6280 6281 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6282 (void) ddi_intr_block_disable(intrp->htable, 6283 intrp->intr_added); 6284 } else { 6285 for (i = 0; i < intrp->intr_added; i++) { 6286 (void) ddi_intr_disable(intrp->htable[i]); 6287 } 6288 } 6289 6290 for (inum = 0; inum < intrp->intr_added; inum++) { 6291 if (intrp->htable[inum]) { 6292 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6293 } 6294 } 6295 6296 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6297 if (intrp->htable[inum]) { 6298 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6299 "nxge_remove_intrs: ddi_intr_free inum %d " 6300 "msi_intx_cnt %d intr_added %d", 6301 inum, 6302 intrp->msi_intx_cnt, 6303 intrp->intr_added)); 6304 6305 (void) ddi_intr_free(intrp->htable[inum]); 6306 } 6307 } 6308 6309 kmem_free(intrp->htable, intrp->intr_size); 6310 intrp->intr_registered = B_FALSE; 6311 intrp->intr_enabled = B_FALSE; 6312 intrp->msi_intx_cnt = 0; 6313 intrp->intr_added = 0; 6314 6315 (void) nxge_ldgv_uninit(nxgep); 6316 6317 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6318 "#msix-request"); 6319 6320 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6321 } 6322 6323 /*ARGSUSED*/ 6324 static void 6325 nxge_intrs_enable(p_nxge_t nxgep) 6326 { 6327 p_nxge_intr_t intrp; 6328 int i; 6329 int status; 6330 6331 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6332 6333 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6334 6335 if (!intrp->intr_registered) { 6336 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6337 "interrupts are not registered")); 6338 return; 6339 } 6340 6341 if (intrp->intr_enabled) { 6342 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6343 "<== nxge_intrs_enable: already enabled")); 6344 return; 6345 } 6346 6347 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6348 status = ddi_intr_block_enable(intrp->htable, 6349 intrp->intr_added); 6350 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6351 "block enable - status 0x%x total inums #%d\n", 6352 status, intrp->intr_added)); 6353 } else { 6354 for (i = 0; i < intrp->intr_added; i++) { 6355 status = ddi_intr_enable(intrp->htable[i]); 6356 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6357 "ddi_intr_enable:enable - status 0x%x " 6358 "total inums %d enable inum #%d\n", 6359 status, intrp->intr_added, i)); 6360 if (status == DDI_SUCCESS) { 6361 intrp->intr_enabled = B_TRUE; 6362 } 6363 } 6364 } 6365 6366 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6367 } 6368 6369 /*ARGSUSED*/ 6370 static void 6371 nxge_intrs_disable(p_nxge_t nxgep) 6372 { 6373 p_nxge_intr_t intrp; 6374 int i; 6375 6376 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6377 6378 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6379 6380 if (!intrp->intr_registered) { 6381 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6382 "interrupts are not registered")); 6383 return; 6384 } 6385 6386 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6387 (void) ddi_intr_block_disable(intrp->htable, 6388 intrp->intr_added); 6389 } else { 6390 for (i = 0; i < intrp->intr_added; i++) { 6391 (void) ddi_intr_disable(intrp->htable[i]); 6392 } 6393 } 6394 6395 intrp->intr_enabled = B_FALSE; 6396 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6397 } 6398 6399 nxge_status_t 6400 nxge_mac_register(p_nxge_t nxgep) 6401 { 6402 mac_register_t *macp; 6403 int status; 6404 6405 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6406 6407 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6408 return (NXGE_ERROR); 6409 6410 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6411 macp->m_driver = nxgep; 6412 macp->m_dip = nxgep->dip; 6413 if (!isLDOMguest(nxgep)) { 6414 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6415 } else { 6416 macp->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); 6417 macp->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); 6418 (void) memset(macp->m_src_addr, 0xff, sizeof (MAXMACADDRLEN)); 6419 } 6420 macp->m_callbacks = &nxge_m_callbacks; 6421 macp->m_min_sdu = 0; 6422 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6423 NXGE_EHEADER_VLAN_CRC; 6424 macp->m_max_sdu = nxgep->mac.default_mtu; 6425 macp->m_margin = VLAN_TAGSZ; 6426 macp->m_priv_props = nxge_priv_props; 6427 macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 6428 if (isLDOMguest(nxgep)) { 6429 macp->m_v12n = MAC_VIRT_LEVEL1 | MAC_VIRT_SERIALIZE; 6430 } else { 6431 macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1 | \ 6432 MAC_VIRT_SERIALIZE; 6433 } 6434 6435 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6436 "==> nxge_mac_register: instance %d " 6437 "max_sdu %d margin %d maxframe %d (header %d)", 6438 nxgep->instance, 6439 macp->m_max_sdu, macp->m_margin, 6440 nxgep->mac.maxframesize, 6441 NXGE_EHEADER_VLAN_CRC)); 6442 6443 status = mac_register(macp, &nxgep->mach); 6444 if (isLDOMguest(nxgep)) { 6445 KMEM_FREE(macp->m_src_addr, MAXMACADDRLEN); 6446 KMEM_FREE(macp->m_dst_addr, MAXMACADDRLEN); 6447 } 6448 mac_free(macp); 6449 6450 if (status != 0) { 6451 cmn_err(CE_WARN, 6452 "!nxge_mac_register failed (status %d instance %d)", 6453 status, nxgep->instance); 6454 return (NXGE_ERROR); 6455 } 6456 6457 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6458 "(instance %d)", nxgep->instance)); 6459 6460 return (NXGE_OK); 6461 } 6462 6463 void 6464 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6465 { 6466 ssize_t size; 6467 mblk_t *nmp; 6468 uint8_t blk_id; 6469 uint8_t chan; 6470 uint32_t err_id; 6471 err_inject_t *eip; 6472 6473 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6474 6475 size = 1024; 6476 nmp = mp->b_cont; 6477 eip = (err_inject_t *)nmp->b_rptr; 6478 blk_id = eip->blk_id; 6479 err_id = eip->err_id; 6480 chan = eip->chan; 6481 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6482 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6483 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6484 switch (blk_id) { 6485 case MAC_BLK_ID: 6486 break; 6487 case TXMAC_BLK_ID: 6488 break; 6489 case RXMAC_BLK_ID: 6490 break; 6491 case MIF_BLK_ID: 6492 break; 6493 case IPP_BLK_ID: 6494 nxge_ipp_inject_err(nxgep, err_id); 6495 break; 6496 case TXC_BLK_ID: 6497 nxge_txc_inject_err(nxgep, err_id); 6498 break; 6499 case TXDMA_BLK_ID: 6500 nxge_txdma_inject_err(nxgep, err_id, chan); 6501 break; 6502 case RXDMA_BLK_ID: 6503 nxge_rxdma_inject_err(nxgep, err_id, chan); 6504 break; 6505 case ZCP_BLK_ID: 6506 nxge_zcp_inject_err(nxgep, err_id); 6507 break; 6508 case ESPC_BLK_ID: 6509 break; 6510 case FFLP_BLK_ID: 6511 break; 6512 case PHY_BLK_ID: 6513 break; 6514 case ETHER_SERDES_BLK_ID: 6515 break; 6516 case PCIE_SERDES_BLK_ID: 6517 break; 6518 case VIR_BLK_ID: 6519 break; 6520 } 6521 6522 nmp->b_wptr = nmp->b_rptr + size; 6523 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6524 6525 miocack(wq, mp, (int)size, 0); 6526 } 6527 6528 static int 6529 nxge_init_common_dev(p_nxge_t nxgep) 6530 { 6531 p_nxge_hw_list_t hw_p; 6532 dev_info_t *p_dip; 6533 6534 ASSERT(nxgep != NULL); 6535 6536 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6537 6538 p_dip = nxgep->p_dip; 6539 MUTEX_ENTER(&nxge_common_lock); 6540 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6541 "==> nxge_init_common_dev:func # %d", 6542 nxgep->function_num)); 6543 /* 6544 * Loop through existing per neptune hardware list. 6545 */ 6546 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6547 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6548 "==> nxge_init_common_device:func # %d " 6549 "hw_p $%p parent dip $%p", 6550 nxgep->function_num, 6551 hw_p, 6552 p_dip)); 6553 if (hw_p->parent_devp == p_dip) { 6554 nxgep->nxge_hw_p = hw_p; 6555 hw_p->ndevs++; 6556 hw_p->nxge_p[nxgep->function_num] = nxgep; 6557 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6558 "==> nxge_init_common_device:func # %d " 6559 "hw_p $%p parent dip $%p " 6560 "ndevs %d (found)", 6561 nxgep->function_num, 6562 hw_p, 6563 p_dip, 6564 hw_p->ndevs)); 6565 break; 6566 } 6567 } 6568 6569 if (hw_p == NULL) { 6570 6571 char **prop_val; 6572 uint_t prop_len; 6573 int i; 6574 6575 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6576 "==> nxge_init_common_device:func # %d " 6577 "parent dip $%p (new)", 6578 nxgep->function_num, 6579 p_dip)); 6580 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6581 hw_p->parent_devp = p_dip; 6582 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6583 nxgep->nxge_hw_p = hw_p; 6584 hw_p->ndevs++; 6585 hw_p->nxge_p[nxgep->function_num] = nxgep; 6586 hw_p->next = nxge_hw_list; 6587 if (nxgep->niu_type == N2_NIU) { 6588 hw_p->niu_type = N2_NIU; 6589 hw_p->platform_type = P_NEPTUNE_NIU; 6590 } else { 6591 hw_p->niu_type = NIU_TYPE_NONE; 6592 hw_p->platform_type = P_NEPTUNE_NONE; 6593 } 6594 6595 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6596 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6597 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6598 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6599 6600 nxge_hw_list = hw_p; 6601 6602 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0, 6603 "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) { 6604 for (i = 0; i < prop_len; i++) { 6605 if ((strcmp((caddr_t)prop_val[i], 6606 NXGE_ROCK_COMPATIBLE) == 0)) { 6607 hw_p->platform_type = P_NEPTUNE_ROCK; 6608 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6609 "ROCK hw_p->platform_type %d", 6610 hw_p->platform_type)); 6611 break; 6612 } 6613 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6614 "nxge_init_common_dev: read compatible" 6615 " property[%d] val[%s]", 6616 i, (caddr_t)prop_val[i])); 6617 } 6618 } 6619 6620 ddi_prop_free(prop_val); 6621 6622 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6623 } 6624 6625 MUTEX_EXIT(&nxge_common_lock); 6626 6627 nxgep->platform_type = hw_p->platform_type; 6628 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxgep->platform_type %d", 6629 nxgep->platform_type)); 6630 if (nxgep->niu_type != N2_NIU) { 6631 nxgep->niu_type = hw_p->niu_type; 6632 } 6633 6634 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6635 "==> nxge_init_common_device (nxge_hw_list) $%p", 6636 nxge_hw_list)); 6637 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6638 6639 return (NXGE_OK); 6640 } 6641 6642 static void 6643 nxge_uninit_common_dev(p_nxge_t nxgep) 6644 { 6645 p_nxge_hw_list_t hw_p, h_hw_p; 6646 p_nxge_dma_pt_cfg_t p_dma_cfgp; 6647 p_nxge_hw_pt_cfg_t p_cfgp; 6648 dev_info_t *p_dip; 6649 6650 ASSERT(nxgep != NULL); 6651 6652 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6653 if (nxgep->nxge_hw_p == NULL) { 6654 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6655 "<== nxge_uninit_common_device (no common)")); 6656 return; 6657 } 6658 6659 MUTEX_ENTER(&nxge_common_lock); 6660 h_hw_p = nxge_hw_list; 6661 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6662 p_dip = hw_p->parent_devp; 6663 if (nxgep->nxge_hw_p == hw_p && 6664 p_dip == nxgep->p_dip && 6665 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6666 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6667 6668 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6669 "==> nxge_uninit_common_device:func # %d " 6670 "hw_p $%p parent dip $%p " 6671 "ndevs %d (found)", 6672 nxgep->function_num, 6673 hw_p, 6674 p_dip, 6675 hw_p->ndevs)); 6676 6677 /* 6678 * Release the RDC table, a shared resoruce 6679 * of the nxge hardware. The RDC table was 6680 * assigned to this instance of nxge in 6681 * nxge_use_cfg_dma_config(). 6682 */ 6683 if (!isLDOMguest(nxgep)) { 6684 p_dma_cfgp = 6685 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 6686 p_cfgp = 6687 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 6688 (void) nxge_fzc_rdc_tbl_unbind(nxgep, 6689 p_cfgp->def_mac_rxdma_grpid); 6690 6691 /* Cleanup any outstanding groups. */ 6692 nxge_grp_cleanup(nxgep); 6693 } 6694 6695 if (hw_p->ndevs) { 6696 hw_p->ndevs--; 6697 } 6698 hw_p->nxge_p[nxgep->function_num] = NULL; 6699 if (!hw_p->ndevs) { 6700 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6701 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6702 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6703 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6704 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6705 "==> nxge_uninit_common_device: " 6706 "func # %d " 6707 "hw_p $%p parent dip $%p " 6708 "ndevs %d (last)", 6709 nxgep->function_num, 6710 hw_p, 6711 p_dip, 6712 hw_p->ndevs)); 6713 6714 nxge_hio_uninit(nxgep); 6715 6716 if (hw_p == nxge_hw_list) { 6717 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6718 "==> nxge_uninit_common_device:" 6719 "remove head func # %d " 6720 "hw_p $%p parent dip $%p " 6721 "ndevs %d (head)", 6722 nxgep->function_num, 6723 hw_p, 6724 p_dip, 6725 hw_p->ndevs)); 6726 nxge_hw_list = hw_p->next; 6727 } else { 6728 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6729 "==> nxge_uninit_common_device:" 6730 "remove middle func # %d " 6731 "hw_p $%p parent dip $%p " 6732 "ndevs %d (middle)", 6733 nxgep->function_num, 6734 hw_p, 6735 p_dip, 6736 hw_p->ndevs)); 6737 h_hw_p->next = hw_p->next; 6738 } 6739 6740 nxgep->nxge_hw_p = NULL; 6741 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6742 } 6743 break; 6744 } else { 6745 h_hw_p = hw_p; 6746 } 6747 } 6748 6749 MUTEX_EXIT(&nxge_common_lock); 6750 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6751 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6752 nxge_hw_list)); 6753 6754 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6755 } 6756 6757 /* 6758 * Determines the number of ports from the niu_type or the platform type. 6759 * Returns the number of ports, or returns zero on failure. 6760 */ 6761 6762 int 6763 nxge_get_nports(p_nxge_t nxgep) 6764 { 6765 int nports = 0; 6766 6767 switch (nxgep->niu_type) { 6768 case N2_NIU: 6769 case NEPTUNE_2_10GF: 6770 nports = 2; 6771 break; 6772 case NEPTUNE_4_1GC: 6773 case NEPTUNE_2_10GF_2_1GC: 6774 case NEPTUNE_1_10GF_3_1GC: 6775 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6776 case NEPTUNE_2_10GF_2_1GRF: 6777 nports = 4; 6778 break; 6779 default: 6780 switch (nxgep->platform_type) { 6781 case P_NEPTUNE_NIU: 6782 case P_NEPTUNE_ATLAS_2PORT: 6783 nports = 2; 6784 break; 6785 case P_NEPTUNE_ATLAS_4PORT: 6786 case P_NEPTUNE_MARAMBA_P0: 6787 case P_NEPTUNE_MARAMBA_P1: 6788 case P_NEPTUNE_ROCK: 6789 case P_NEPTUNE_ALONSO: 6790 nports = 4; 6791 break; 6792 default: 6793 break; 6794 } 6795 break; 6796 } 6797 6798 return (nports); 6799 } 6800 6801 /* 6802 * The following two functions are to support 6803 * PSARC/2007/453 MSI-X interrupt limit override. 6804 */ 6805 static int 6806 nxge_create_msi_property(p_nxge_t nxgep) 6807 { 6808 int nmsi; 6809 extern int ncpus; 6810 6811 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6812 6813 switch (nxgep->mac.portmode) { 6814 case PORT_10G_COPPER: 6815 case PORT_10G_FIBER: 6816 case PORT_10G_TN1010: 6817 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6818 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6819 /* 6820 * The maximum MSI-X requested will be 8. 6821 * If the # of CPUs is less than 8, we will request 6822 * # MSI-X based on the # of CPUs (default). 6823 */ 6824 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6825 "==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d", 6826 nxge_msix_10g_intrs)); 6827 if ((nxge_msix_10g_intrs == 0) || 6828 (nxge_msix_10g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6829 nmsi = NXGE_MSIX_REQUEST_10G; 6830 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6831 "==>nxge_create_msi_property (10G): reset to 8")); 6832 } else { 6833 nmsi = nxge_msix_10g_intrs; 6834 } 6835 6836 /* 6837 * If # of interrupts requested is 8 (default), 6838 * the checking of the number of cpus will be 6839 * be maintained. 6840 */ 6841 if ((nmsi == NXGE_MSIX_REQUEST_10G) && 6842 (ncpus < nmsi)) { 6843 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6844 "==>nxge_create_msi_property (10G): reset to 8")); 6845 nmsi = ncpus; 6846 } 6847 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6848 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6849 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6850 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6851 break; 6852 6853 default: 6854 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6855 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6856 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6857 "==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d", 6858 nxge_msix_1g_intrs)); 6859 if ((nxge_msix_1g_intrs == 0) || 6860 (nxge_msix_1g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6861 nmsi = NXGE_MSIX_REQUEST_1G; 6862 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6863 "==>nxge_create_msi_property (1G): reset to 2")); 6864 } else { 6865 nmsi = nxge_msix_1g_intrs; 6866 } 6867 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6868 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6869 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6870 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6871 break; 6872 } 6873 6874 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6875 return (nmsi); 6876 } 6877 6878 /* ARGSUSED */ 6879 static int 6880 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6881 void *pr_val) 6882 { 6883 int err = 0; 6884 link_flowctrl_t fl; 6885 6886 switch (pr_num) { 6887 case MAC_PROP_AUTONEG: 6888 *(uint8_t *)pr_val = 1; 6889 break; 6890 case MAC_PROP_FLOWCTRL: 6891 if (pr_valsize < sizeof (link_flowctrl_t)) 6892 return (EINVAL); 6893 fl = LINK_FLOWCTRL_RX; 6894 bcopy(&fl, pr_val, sizeof (fl)); 6895 break; 6896 case MAC_PROP_ADV_1000FDX_CAP: 6897 case MAC_PROP_EN_1000FDX_CAP: 6898 *(uint8_t *)pr_val = 1; 6899 break; 6900 case MAC_PROP_ADV_100FDX_CAP: 6901 case MAC_PROP_EN_100FDX_CAP: 6902 *(uint8_t *)pr_val = 1; 6903 break; 6904 default: 6905 err = ENOTSUP; 6906 break; 6907 } 6908 return (err); 6909 } 6910 6911 6912 /* 6913 * The following is a software around for the Neptune hardware's 6914 * interrupt bugs; The Neptune hardware may generate spurious interrupts when 6915 * an interrupr handler is removed. 6916 */ 6917 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98 6918 #define NXGE_PIM_RESET (1ULL << 29) 6919 #define NXGE_GLU_RESET (1ULL << 30) 6920 #define NXGE_NIU_RESET (1ULL << 31) 6921 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \ 6922 NXGE_GLU_RESET | \ 6923 NXGE_NIU_RESET) 6924 6925 #define NXGE_WAIT_QUITE_TIME 200000 6926 #define NXGE_WAIT_QUITE_RETRY 40 6927 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */ 6928 6929 static void 6930 nxge_niu_peu_reset(p_nxge_t nxgep) 6931 { 6932 uint32_t rvalue; 6933 p_nxge_hw_list_t hw_p; 6934 p_nxge_t fnxgep; 6935 int i, j; 6936 6937 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset")); 6938 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 6939 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6940 "==> nxge_niu_peu_reset: NULL hardware pointer")); 6941 return; 6942 } 6943 6944 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6945 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d", 6946 hw_p->flags, nxgep->nxge_link_poll_timerid, 6947 nxgep->nxge_timerid)); 6948 6949 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 6950 /* 6951 * Make sure other instances from the same hardware 6952 * stop sending PIO and in quiescent state. 6953 */ 6954 for (i = 0; i < NXGE_MAX_PORTS; i++) { 6955 fnxgep = hw_p->nxge_p[i]; 6956 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6957 "==> nxge_niu_peu_reset: checking entry %d " 6958 "nxgep $%p", i, fnxgep)); 6959 #ifdef NXGE_DEBUG 6960 if (fnxgep) { 6961 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6962 "==> nxge_niu_peu_reset: entry %d (function %d) " 6963 "link timer id %d hw timer id %d", 6964 i, fnxgep->function_num, 6965 fnxgep->nxge_link_poll_timerid, 6966 fnxgep->nxge_timerid)); 6967 } 6968 #endif 6969 if (fnxgep && fnxgep != nxgep && 6970 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) { 6971 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6972 "==> nxge_niu_peu_reset: checking $%p " 6973 "(function %d) timer ids", 6974 fnxgep, fnxgep->function_num)); 6975 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) { 6976 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6977 "==> nxge_niu_peu_reset: waiting")); 6978 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6979 if (!fnxgep->nxge_timerid && 6980 !fnxgep->nxge_link_poll_timerid) { 6981 break; 6982 } 6983 } 6984 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6985 if (fnxgep->nxge_timerid || 6986 fnxgep->nxge_link_poll_timerid) { 6987 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6988 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6989 "<== nxge_niu_peu_reset: cannot reset " 6990 "hardware (devices are still in use)")); 6991 return; 6992 } 6993 } 6994 } 6995 6996 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) { 6997 hw_p->flags |= COMMON_RESET_NIU_PCI; 6998 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh, 6999 NXGE_PCI_PORT_LOGIC_OFFSET); 7000 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7001 "nxge_niu_peu_reset: read offset 0x%x (%d) " 7002 "(data 0x%x)", 7003 NXGE_PCI_PORT_LOGIC_OFFSET, 7004 NXGE_PCI_PORT_LOGIC_OFFSET, 7005 rvalue)); 7006 7007 rvalue |= NXGE_PCI_RESET_ALL; 7008 pci_config_put32(nxgep->dev_regs->nxge_pciregh, 7009 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue); 7010 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7011 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x", 7012 rvalue)); 7013 7014 NXGE_DELAY(NXGE_PCI_RESET_WAIT); 7015 } 7016 7017 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 7018 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset")); 7019 } 7020 7021 static void 7022 nxge_set_pci_replay_timeout(p_nxge_t nxgep) 7023 { 7024 p_dev_regs_t dev_regs; 7025 uint32_t value; 7026 7027 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout")); 7028 7029 if (!nxge_set_replay_timer) { 7030 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7031 "==> nxge_set_pci_replay_timeout: will not change " 7032 "the timeout")); 7033 return; 7034 } 7035 7036 dev_regs = nxgep->dev_regs; 7037 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7038 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p", 7039 dev_regs, dev_regs->nxge_pciregh)); 7040 7041 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) { 7042 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7043 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or " 7044 "no PCI handle", 7045 dev_regs)); 7046 return; 7047 } 7048 value = (pci_config_get32(dev_regs->nxge_pciregh, 7049 PCI_REPLAY_TIMEOUT_CFG_OFFSET) | 7050 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT)); 7051 7052 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7053 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x " 7054 "(timeout value to set 0x%x at offset 0x%x) value 0x%x", 7055 pci_config_get32(dev_regs->nxge_pciregh, 7056 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout, 7057 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value)); 7058 7059 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET, 7060 value); 7061 7062 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7063 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x", 7064 pci_config_get32(dev_regs->nxge_pciregh, 7065 PCI_REPLAY_TIMEOUT_CFG_OFFSET))); 7066 7067 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout")); 7068 } 7069 7070 /* 7071 * quiesce(9E) entry point. 7072 * 7073 * This function is called when the system is single-threaded at high 7074 * PIL with preemption disabled. Therefore, this function must not be 7075 * blocked. 7076 * 7077 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 7078 * DDI_FAILURE indicates an error condition and should almost never happen. 7079 */ 7080 static int 7081 nxge_quiesce(dev_info_t *dip) 7082 { 7083 int instance = ddi_get_instance(dip); 7084 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 7085 7086 if (nxgep == NULL) 7087 return (DDI_FAILURE); 7088 7089 /* Turn off debugging */ 7090 nxge_debug_level = NO_DEBUG; 7091 nxgep->nxge_debug_level = NO_DEBUG; 7092 npi_debug_level = NO_DEBUG; 7093 7094 /* 7095 * Stop link monitor only when linkchkmod is interrupt based 7096 */ 7097 if (nxgep->mac.linkchkmode == LINKCHK_INTR) { 7098 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 7099 } 7100 7101 (void) nxge_intr_hw_disable(nxgep); 7102 7103 /* 7104 * Reset the receive MAC side. 7105 */ 7106 (void) nxge_rx_mac_disable(nxgep); 7107 7108 /* Disable and soft reset the IPP */ 7109 if (!isLDOMguest(nxgep)) 7110 (void) nxge_ipp_disable(nxgep); 7111 7112 /* 7113 * Reset the transmit/receive DMA side. 7114 */ 7115 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 7116 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 7117 7118 /* 7119 * Reset the transmit MAC side. 7120 */ 7121 (void) nxge_tx_mac_disable(nxgep); 7122 7123 return (DDI_SUCCESS); 7124 } 7125