1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 29 */ 30 #include <sys/nxge/nxge_impl.h> 31 #include <sys/nxge/nxge_hio.h> 32 #include <sys/nxge/nxge_rxdma.h> 33 #include <sys/pcie.h> 34 35 uint32_t nxge_use_partition = 0; /* debug partition flag */ 36 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 37 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 38 /* 39 * PSARC/2007/453 MSI-X interrupt limit override 40 */ 41 uint32_t nxge_msi_enable = 2; 42 43 /* 44 * Software workaround for a Neptune (PCI-E) 45 * hardware interrupt bug which the hardware 46 * may generate spurious interrupts after the 47 * device interrupt handler was removed. If this flag 48 * is enabled, the driver will reset the 49 * hardware when devices are being detached. 50 */ 51 uint32_t nxge_peu_reset_enable = 0; 52 53 /* 54 * Software workaround for the hardware 55 * checksum bugs that affect packet transmission 56 * and receive: 57 * 58 * Usage of nxge_cksum_offload: 59 * 60 * (1) nxge_cksum_offload = 0 (default): 61 * - transmits packets: 62 * TCP: uses the hardware checksum feature. 63 * UDP: driver will compute the software checksum 64 * based on the partial checksum computed 65 * by the IP layer. 66 * - receives packets 67 * TCP: marks packets checksum flags based on hardware result. 68 * UDP: will not mark checksum flags. 69 * 70 * (2) nxge_cksum_offload = 1: 71 * - transmit packets: 72 * TCP/UDP: uses the hardware checksum feature. 73 * - receives packets 74 * TCP/UDP: marks packet checksum flags based on hardware result. 75 * 76 * (3) nxge_cksum_offload = 2: 77 * - The driver will not register its checksum capability. 78 * Checksum for both TCP and UDP will be computed 79 * by the stack. 80 * - The software LSO is not allowed in this case. 81 * 82 * (4) nxge_cksum_offload > 2: 83 * - Will be treated as it is set to 2 84 * (stack will compute the checksum). 85 * 86 * (5) If the hardware bug is fixed, this workaround 87 * needs to be updated accordingly to reflect 88 * the new hardware revision. 89 */ 90 uint32_t nxge_cksum_offload = 0; 91 92 /* 93 * Globals: tunable parameters (/etc/system or adb) 94 * 95 */ 96 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 97 uint32_t nxge_rbr_spare_size = 0; 98 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 99 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 100 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 101 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 102 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 103 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 104 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 105 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 106 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 107 108 /* MAX LSO size */ 109 #define NXGE_LSO_MAXLEN 65535 110 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 111 112 113 /* 114 * Add tunable to reduce the amount of time spent in the 115 * ISR doing Rx Processing. 116 */ 117 uint32_t nxge_max_rx_pkts = 1024; 118 119 /* 120 * Tunables to manage the receive buffer blocks. 121 * 122 * nxge_rx_threshold_hi: copy all buffers. 123 * nxge_rx_bcopy_size_type: receive buffer block size type. 124 * nxge_rx_threshold_lo: copy only up to tunable block size type. 125 */ 126 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 127 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 128 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 129 130 /* Use kmem_alloc() to allocate data buffers. */ 131 #if defined(_BIG_ENDIAN) 132 uint32_t nxge_use_kmem_alloc = 1; 133 #else 134 uint32_t nxge_use_kmem_alloc = 0; 135 #endif 136 137 rtrace_t npi_rtracebuf; 138 139 /* 140 * The hardware sometimes fails to allow enough time for the link partner 141 * to send an acknowledgement for packets that the hardware sent to it. The 142 * hardware resends the packets earlier than it should be in those instances. 143 * This behavior caused some switches to acknowledge the wrong packets 144 * and it triggered the fatal error. 145 * This software workaround is to set the replay timer to a value 146 * suggested by the hardware team. 147 * 148 * PCI config space replay timer register: 149 * The following replay timeout value is 0xc 150 * for bit 14:18. 151 */ 152 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8 153 #define PCI_REPLAY_TIMEOUT_SHIFT 14 154 155 uint32_t nxge_set_replay_timer = 1; 156 uint32_t nxge_replay_timeout = 0xc; 157 158 /* 159 * The transmit serialization sometimes causes 160 * longer sleep before calling the driver transmit 161 * function as it sleeps longer than it should. 162 * The performace group suggests that a time wait tunable 163 * can be used to set the maximum wait time when needed 164 * and the default is set to 1 tick. 165 */ 166 uint32_t nxge_tx_serial_maxsleep = 1; 167 168 #if defined(sun4v) 169 /* 170 * Hypervisor N2/NIU services information. 171 */ 172 static hsvc_info_t niu_hsvc = { 173 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 174 NIU_MINOR_VER, "nxge" 175 }; 176 177 static int nxge_hsvc_register(p_nxge_t); 178 #endif 179 180 /* 181 * Function Prototypes 182 */ 183 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 184 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 185 static void nxge_unattach(p_nxge_t); 186 static int nxge_quiesce(dev_info_t *); 187 188 #if NXGE_PROPERTY 189 static void nxge_remove_hard_properties(p_nxge_t); 190 #endif 191 192 /* 193 * These two functions are required by nxge_hio.c 194 */ 195 extern int nxge_m_mmac_remove(void *arg, int slot); 196 extern void nxge_grp_cleanup(p_nxge_t nxge); 197 198 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 199 200 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 201 static void nxge_destroy_mutexes(p_nxge_t); 202 203 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 204 static void nxge_unmap_regs(p_nxge_t nxgep); 205 #ifdef NXGE_DEBUG 206 static void nxge_test_map_regs(p_nxge_t nxgep); 207 #endif 208 209 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 210 static void nxge_remove_intrs(p_nxge_t nxgep); 211 212 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 213 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 214 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 215 static void nxge_intrs_enable(p_nxge_t nxgep); 216 static void nxge_intrs_disable(p_nxge_t nxgep); 217 218 static void nxge_suspend(p_nxge_t); 219 static nxge_status_t nxge_resume(p_nxge_t); 220 221 static nxge_status_t nxge_setup_dev(p_nxge_t); 222 static void nxge_destroy_dev(p_nxge_t); 223 224 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 225 static void nxge_free_mem_pool(p_nxge_t); 226 227 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 228 static void nxge_free_rx_mem_pool(p_nxge_t); 229 230 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 231 static void nxge_free_tx_mem_pool(p_nxge_t); 232 233 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 234 struct ddi_dma_attr *, 235 size_t, ddi_device_acc_attr_t *, uint_t, 236 p_nxge_dma_common_t); 237 238 static void nxge_dma_mem_free(p_nxge_dma_common_t); 239 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 240 241 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 242 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 243 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 244 245 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 246 p_nxge_dma_common_t *, size_t); 247 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 248 249 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 250 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 251 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 252 253 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 254 p_nxge_dma_common_t *, 255 size_t); 256 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 257 258 static int nxge_init_common_dev(p_nxge_t); 259 static void nxge_uninit_common_dev(p_nxge_t); 260 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 261 char *, caddr_t); 262 #if defined(sun4v) 263 extern nxge_status_t nxge_hio_rdc_enable(p_nxge_t nxgep); 264 extern nxge_status_t nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm); 265 #endif 266 267 /* 268 * The next declarations are for the GLDv3 interface. 269 */ 270 static int nxge_m_start(void *); 271 static void nxge_m_stop(void *); 272 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 273 static int nxge_m_promisc(void *, boolean_t); 274 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 275 nxge_status_t nxge_mac_register(p_nxge_t); 276 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 277 int slot, int rdctbl, boolean_t usetbl); 278 void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, 279 boolean_t factory); 280 281 static void nxge_m_getfactaddr(void *, uint_t, uint8_t *); 282 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 283 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 284 uint_t, const void *); 285 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 286 uint_t, uint_t, void *, uint_t *); 287 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 288 const void *); 289 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 290 void *, uint_t *); 291 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 292 static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int, 293 mac_ring_info_t *, mac_ring_handle_t); 294 static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t, 295 mac_ring_type_t); 296 static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t, 297 mac_ring_type_t); 298 299 static void nxge_niu_peu_reset(p_nxge_t nxgep); 300 static void nxge_set_pci_replay_timeout(nxge_t *); 301 302 mac_priv_prop_t nxge_priv_props[] = { 303 {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 304 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 305 {"_function_number", MAC_PROP_PERM_READ}, 306 {"_fw_version", MAC_PROP_PERM_READ}, 307 {"_port_mode", MAC_PROP_PERM_READ}, 308 {"_hot_swap_phy", MAC_PROP_PERM_READ}, 309 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 310 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 311 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 312 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 313 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 314 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 315 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 316 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 317 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 318 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 319 {"_soft_lso_enable", MAC_PROP_PERM_RW} 320 }; 321 322 #define NXGE_MAX_PRIV_PROPS \ 323 (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 324 325 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 326 #define MAX_DUMP_SZ 256 327 328 #define NXGE_M_CALLBACK_FLAGS \ 329 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 330 331 mac_callbacks_t nxge_m_callbacks = { 332 NXGE_M_CALLBACK_FLAGS, 333 nxge_m_stat, 334 nxge_m_start, 335 nxge_m_stop, 336 nxge_m_promisc, 337 nxge_m_multicst, 338 NULL, 339 NULL, 340 nxge_m_ioctl, 341 nxge_m_getcapab, 342 NULL, 343 NULL, 344 nxge_m_setprop, 345 nxge_m_getprop 346 }; 347 348 void 349 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 350 351 /* PSARC/2007/453 MSI-X interrupt limit override. */ 352 #define NXGE_MSIX_REQUEST_10G 8 353 #define NXGE_MSIX_REQUEST_1G 2 354 static int nxge_create_msi_property(p_nxge_t); 355 /* 356 * For applications that care about the 357 * latency, it was requested by PAE and the 358 * customers that the driver has tunables that 359 * allow the user to tune it to a higher number 360 * interrupts to spread the interrupts among 361 * multiple channels. The DDI framework limits 362 * the maximum number of MSI-X resources to allocate 363 * to 8 (ddi_msix_alloc_limit). If more than 8 364 * is set, ddi_msix_alloc_limit must be set accordingly. 365 * The default number of MSI interrupts are set to 366 * 8 for 10G and 2 for 1G link. 367 */ 368 #define NXGE_MSIX_MAX_ALLOWED 32 369 uint32_t nxge_msix_10g_intrs = NXGE_MSIX_REQUEST_10G; 370 uint32_t nxge_msix_1g_intrs = NXGE_MSIX_REQUEST_1G; 371 372 /* 373 * These global variables control the message 374 * output. 375 */ 376 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 377 uint64_t nxge_debug_level; 378 379 /* 380 * This list contains the instance structures for the Neptune 381 * devices present in the system. The lock exists to guarantee 382 * mutually exclusive access to the list. 383 */ 384 void *nxge_list = NULL; 385 void *nxge_hw_list = NULL; 386 nxge_os_mutex_t nxge_common_lock; 387 nxge_os_mutex_t nxgedebuglock; 388 389 extern uint64_t npi_debug_level; 390 391 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 392 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 393 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 394 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 395 extern void nxge_fm_init(p_nxge_t, 396 ddi_device_acc_attr_t *, 397 ddi_device_acc_attr_t *, 398 ddi_dma_attr_t *); 399 extern void nxge_fm_fini(p_nxge_t); 400 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 401 402 /* 403 * Count used to maintain the number of buffers being used 404 * by Neptune instances and loaned up to the upper layers. 405 */ 406 uint32_t nxge_mblks_pending = 0; 407 408 /* 409 * Device register access attributes for PIO. 410 */ 411 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 412 DDI_DEVICE_ATTR_V0, 413 DDI_STRUCTURE_LE_ACC, 414 DDI_STRICTORDER_ACC, 415 }; 416 417 /* 418 * Device descriptor access attributes for DMA. 419 */ 420 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 421 DDI_DEVICE_ATTR_V0, 422 DDI_STRUCTURE_LE_ACC, 423 DDI_STRICTORDER_ACC 424 }; 425 426 /* 427 * Device buffer access attributes for DMA. 428 */ 429 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 430 DDI_DEVICE_ATTR_V0, 431 DDI_STRUCTURE_BE_ACC, 432 DDI_STRICTORDER_ACC 433 }; 434 435 ddi_dma_attr_t nxge_desc_dma_attr = { 436 DMA_ATTR_V0, /* version number. */ 437 0, /* low address */ 438 0xffffffffffffffff, /* high address */ 439 0xffffffffffffffff, /* address counter max */ 440 #ifndef NIU_PA_WORKAROUND 441 0x100000, /* alignment */ 442 #else 443 0x2000, 444 #endif 445 0xfc00fc, /* dlim_burstsizes */ 446 0x1, /* minimum transfer size */ 447 0xffffffffffffffff, /* maximum transfer size */ 448 0xffffffffffffffff, /* maximum segment size */ 449 1, /* scatter/gather list length */ 450 (unsigned int) 1, /* granularity */ 451 0 /* attribute flags */ 452 }; 453 454 ddi_dma_attr_t nxge_tx_dma_attr = { 455 DMA_ATTR_V0, /* version number. */ 456 0, /* low address */ 457 0xffffffffffffffff, /* high address */ 458 0xffffffffffffffff, /* address counter max */ 459 #if defined(_BIG_ENDIAN) 460 0x2000, /* alignment */ 461 #else 462 0x1000, /* alignment */ 463 #endif 464 0xfc00fc, /* dlim_burstsizes */ 465 0x1, /* minimum transfer size */ 466 0xffffffffffffffff, /* maximum transfer size */ 467 0xffffffffffffffff, /* maximum segment size */ 468 5, /* scatter/gather list length */ 469 (unsigned int) 1, /* granularity */ 470 0 /* attribute flags */ 471 }; 472 473 ddi_dma_attr_t nxge_rx_dma_attr = { 474 DMA_ATTR_V0, /* version number. */ 475 0, /* low address */ 476 0xffffffffffffffff, /* high address */ 477 0xffffffffffffffff, /* address counter max */ 478 0x2000, /* alignment */ 479 0xfc00fc, /* dlim_burstsizes */ 480 0x1, /* minimum transfer size */ 481 0xffffffffffffffff, /* maximum transfer size */ 482 0xffffffffffffffff, /* maximum segment size */ 483 1, /* scatter/gather list length */ 484 (unsigned int) 1, /* granularity */ 485 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 486 }; 487 488 ddi_dma_lim_t nxge_dma_limits = { 489 (uint_t)0, /* dlim_addr_lo */ 490 (uint_t)0xffffffff, /* dlim_addr_hi */ 491 (uint_t)0xffffffff, /* dlim_cntr_max */ 492 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 493 0x1, /* dlim_minxfer */ 494 1024 /* dlim_speed */ 495 }; 496 497 dma_method_t nxge_force_dma = DVMA; 498 499 /* 500 * dma chunk sizes. 501 * 502 * Try to allocate the largest possible size 503 * so that fewer number of dma chunks would be managed 504 */ 505 #ifdef NIU_PA_WORKAROUND 506 size_t alloc_sizes [] = {0x2000}; 507 #else 508 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 509 0x10000, 0x20000, 0x40000, 0x80000, 510 0x100000, 0x200000, 0x400000, 0x800000, 511 0x1000000, 0x2000000, 0x4000000}; 512 #endif 513 514 /* 515 * Translate "dev_t" to a pointer to the associated "dev_info_t". 516 */ 517 518 extern void nxge_get_environs(nxge_t *); 519 520 static int 521 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 522 { 523 p_nxge_t nxgep = NULL; 524 int instance; 525 int status = DDI_SUCCESS; 526 uint8_t portn; 527 nxge_mmac_t *mmac_info; 528 529 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 530 531 /* 532 * Get the device instance since we'll need to setup 533 * or retrieve a soft state for this instance. 534 */ 535 instance = ddi_get_instance(dip); 536 537 switch (cmd) { 538 case DDI_ATTACH: 539 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 540 break; 541 542 case DDI_RESUME: 543 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 544 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 545 if (nxgep == NULL) { 546 status = DDI_FAILURE; 547 break; 548 } 549 if (nxgep->dip != dip) { 550 status = DDI_FAILURE; 551 break; 552 } 553 if (nxgep->suspended == DDI_PM_SUSPEND) { 554 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 555 } else { 556 status = nxge_resume(nxgep); 557 } 558 goto nxge_attach_exit; 559 560 case DDI_PM_RESUME: 561 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 562 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 563 if (nxgep == NULL) { 564 status = DDI_FAILURE; 565 break; 566 } 567 if (nxgep->dip != dip) { 568 status = DDI_FAILURE; 569 break; 570 } 571 status = nxge_resume(nxgep); 572 goto nxge_attach_exit; 573 574 default: 575 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 576 status = DDI_FAILURE; 577 goto nxge_attach_exit; 578 } 579 580 581 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 582 status = DDI_FAILURE; 583 goto nxge_attach_exit; 584 } 585 586 nxgep = ddi_get_soft_state(nxge_list, instance); 587 if (nxgep == NULL) { 588 status = NXGE_ERROR; 589 goto nxge_attach_fail2; 590 } 591 592 nxgep->nxge_magic = NXGE_MAGIC; 593 594 nxgep->drv_state = 0; 595 nxgep->dip = dip; 596 nxgep->instance = instance; 597 nxgep->p_dip = ddi_get_parent(dip); 598 nxgep->nxge_debug_level = nxge_debug_level; 599 npi_debug_level = nxge_debug_level; 600 601 /* Are we a guest running in a Hybrid I/O environment? */ 602 nxge_get_environs(nxgep); 603 604 status = nxge_map_regs(nxgep); 605 606 if (status != NXGE_OK) { 607 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 608 goto nxge_attach_fail3; 609 } 610 611 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, 612 &nxge_dev_desc_dma_acc_attr, 613 &nxge_rx_dma_attr); 614 615 /* Create & initialize the per-Neptune data structure */ 616 /* (even if we're a guest). */ 617 status = nxge_init_common_dev(nxgep); 618 if (status != NXGE_OK) { 619 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 620 "nxge_init_common_dev failed")); 621 goto nxge_attach_fail4; 622 } 623 624 /* 625 * Software workaround: set the replay timer. 626 */ 627 if (nxgep->niu_type != N2_NIU) { 628 nxge_set_pci_replay_timeout(nxgep); 629 } 630 631 #if defined(sun4v) 632 /* This is required by nxge_hio_init(), which follows. */ 633 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 634 goto nxge_attach_fail4; 635 #endif 636 637 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 638 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 639 "nxge_hio_init failed")); 640 goto nxge_attach_fail4; 641 } 642 643 if (nxgep->niu_type == NEPTUNE_2_10GF) { 644 if (nxgep->function_num > 1) { 645 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 646 " function %d. Only functions 0 and 1 are " 647 "supported for this card.", nxgep->function_num)); 648 status = NXGE_ERROR; 649 goto nxge_attach_fail4; 650 } 651 } 652 653 if (isLDOMguest(nxgep)) { 654 /* 655 * Use the function number here. 656 */ 657 nxgep->mac.portnum = nxgep->function_num; 658 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 659 660 /* XXX We'll set the MAC address counts to 1 for now. */ 661 mmac_info = &nxgep->nxge_mmac_info; 662 mmac_info->num_mmac = 1; 663 mmac_info->naddrfree = 1; 664 } else { 665 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 666 nxgep->mac.portnum = portn; 667 if ((portn == 0) || (portn == 1)) 668 nxgep->mac.porttype = PORT_TYPE_XMAC; 669 else 670 nxgep->mac.porttype = PORT_TYPE_BMAC; 671 /* 672 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 673 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 674 * The two types of MACs have different characterizations. 675 */ 676 mmac_info = &nxgep->nxge_mmac_info; 677 if (nxgep->function_num < 2) { 678 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 679 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 680 } else { 681 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 682 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 683 } 684 } 685 /* 686 * Setup the Ndd parameters for the this instance. 687 */ 688 nxge_init_param(nxgep); 689 690 /* 691 * Setup Register Tracing Buffer. 692 */ 693 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 694 695 /* init stats ptr */ 696 nxge_init_statsp(nxgep); 697 698 /* 699 * Copy the vpd info from eeprom to a local data 700 * structure, and then check its validity. 701 */ 702 if (!isLDOMguest(nxgep)) { 703 int *regp; 704 uint_t reglen; 705 int rv; 706 707 nxge_vpd_info_get(nxgep); 708 709 /* Find the NIU config handle. */ 710 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 711 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 712 "reg", ®p, ®len); 713 714 if (rv != DDI_PROP_SUCCESS) { 715 goto nxge_attach_fail5; 716 } 717 /* 718 * The address_hi, that is the first int, in the reg 719 * property consists of config handle, but need to remove 720 * the bits 28-31 which are OBP specific info. 721 */ 722 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 723 ddi_prop_free(regp); 724 } 725 726 /* 727 * Set the defaults for the MTU size. 728 */ 729 nxge_hw_id_init(nxgep); 730 731 if (isLDOMguest(nxgep)) { 732 uchar_t *prop_val; 733 uint_t prop_len; 734 uint32_t max_frame_size; 735 736 extern void nxge_get_logical_props(p_nxge_t); 737 738 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 739 nxgep->mac.portmode = PORT_LOGICAL; 740 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 741 "phy-type", "virtual transceiver"); 742 743 nxgep->nports = 1; 744 nxgep->board_ver = 0; /* XXX What? */ 745 746 /* 747 * local-mac-address property gives us info on which 748 * specific MAC address the Hybrid resource is associated 749 * with. 750 */ 751 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 752 "local-mac-address", &prop_val, 753 &prop_len) != DDI_PROP_SUCCESS) { 754 goto nxge_attach_fail5; 755 } 756 if (prop_len != ETHERADDRL) { 757 ddi_prop_free(prop_val); 758 goto nxge_attach_fail5; 759 } 760 ether_copy(prop_val, nxgep->hio_mac_addr); 761 ddi_prop_free(prop_val); 762 nxge_get_logical_props(nxgep); 763 764 /* 765 * Enable Jumbo property based on the "max-frame-size" 766 * property value. 767 */ 768 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY, 769 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 770 "max-frame-size", NXGE_MTU_DEFAULT_MAX); 771 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) && 772 (max_frame_size <= TX_JUMBO_MTU)) { 773 nxgep->mac.is_jumbo = B_TRUE; 774 nxgep->mac.maxframesize = (uint16_t)max_frame_size; 775 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 776 NXGE_EHEADER_VLAN_CRC; 777 } 778 } else { 779 status = nxge_xcvr_find(nxgep); 780 781 if (status != NXGE_OK) { 782 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 783 " Couldn't determine card type" 784 " .... exit ")); 785 goto nxge_attach_fail5; 786 } 787 788 status = nxge_get_config_properties(nxgep); 789 790 if (status != NXGE_OK) { 791 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 792 "get_hw create failed")); 793 goto nxge_attach_fail; 794 } 795 } 796 797 /* 798 * Setup the Kstats for the driver. 799 */ 800 nxge_setup_kstats(nxgep); 801 802 if (!isLDOMguest(nxgep)) 803 nxge_setup_param(nxgep); 804 805 status = nxge_setup_system_dma_pages(nxgep); 806 if (status != NXGE_OK) { 807 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 808 goto nxge_attach_fail; 809 } 810 811 812 if (!isLDOMguest(nxgep)) 813 nxge_hw_init_niu_common(nxgep); 814 815 status = nxge_setup_mutexes(nxgep); 816 if (status != NXGE_OK) { 817 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 818 goto nxge_attach_fail; 819 } 820 821 #if defined(sun4v) 822 if (isLDOMguest(nxgep)) { 823 /* Find our VR & channel sets. */ 824 status = nxge_hio_vr_add(nxgep); 825 if (status != NXGE_OK) { 826 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 827 "nxge_hio_vr_add failed")); 828 (void) hsvc_unregister(&nxgep->niu_hsvc); 829 nxgep->niu_hsvc_available = B_FALSE; 830 } 831 goto nxge_attach_exit; 832 } 833 #endif 834 835 status = nxge_setup_dev(nxgep); 836 if (status != DDI_SUCCESS) { 837 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 838 goto nxge_attach_fail; 839 } 840 841 status = nxge_add_intrs(nxgep); 842 if (status != DDI_SUCCESS) { 843 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 844 goto nxge_attach_fail; 845 } 846 847 /* If a guest, register with vio_net instead. */ 848 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 849 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 850 "unable to register to mac layer (%d)", status)); 851 goto nxge_attach_fail; 852 } 853 854 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 855 856 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 857 "registered to mac (instance %d)", instance)); 858 859 /* nxge_link_monitor calls xcvr.check_link recursively */ 860 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 861 862 goto nxge_attach_exit; 863 864 nxge_attach_fail: 865 nxge_unattach(nxgep); 866 goto nxge_attach_fail1; 867 868 nxge_attach_fail5: 869 /* 870 * Tear down the ndd parameters setup. 871 */ 872 nxge_destroy_param(nxgep); 873 874 /* 875 * Tear down the kstat setup. 876 */ 877 nxge_destroy_kstats(nxgep); 878 879 nxge_attach_fail4: 880 if (nxgep->nxge_hw_p) { 881 nxge_uninit_common_dev(nxgep); 882 nxgep->nxge_hw_p = NULL; 883 } 884 885 nxge_attach_fail3: 886 /* 887 * Unmap the register setup. 888 */ 889 nxge_unmap_regs(nxgep); 890 891 nxge_fm_fini(nxgep); 892 893 nxge_attach_fail2: 894 ddi_soft_state_free(nxge_list, nxgep->instance); 895 896 nxge_attach_fail1: 897 if (status != NXGE_OK) 898 status = (NXGE_ERROR | NXGE_DDI_FAILED); 899 nxgep = NULL; 900 901 nxge_attach_exit: 902 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 903 status)); 904 905 return (status); 906 } 907 908 static int 909 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 910 { 911 int status = DDI_SUCCESS; 912 int instance; 913 p_nxge_t nxgep = NULL; 914 915 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 916 instance = ddi_get_instance(dip); 917 nxgep = ddi_get_soft_state(nxge_list, instance); 918 if (nxgep == NULL) { 919 status = DDI_FAILURE; 920 goto nxge_detach_exit; 921 } 922 923 switch (cmd) { 924 case DDI_DETACH: 925 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 926 break; 927 928 case DDI_PM_SUSPEND: 929 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 930 nxgep->suspended = DDI_PM_SUSPEND; 931 nxge_suspend(nxgep); 932 break; 933 934 case DDI_SUSPEND: 935 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 936 if (nxgep->suspended != DDI_PM_SUSPEND) { 937 nxgep->suspended = DDI_SUSPEND; 938 nxge_suspend(nxgep); 939 } 940 break; 941 942 default: 943 status = DDI_FAILURE; 944 } 945 946 if (cmd != DDI_DETACH) 947 goto nxge_detach_exit; 948 949 /* 950 * Stop the xcvr polling. 951 */ 952 nxgep->suspended = cmd; 953 954 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 955 956 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 957 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 958 "<== nxge_detach status = 0x%08X", status)); 959 return (DDI_FAILURE); 960 } 961 962 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 963 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 964 965 nxge_unattach(nxgep); 966 nxgep = NULL; 967 968 nxge_detach_exit: 969 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 970 status)); 971 972 return (status); 973 } 974 975 static void 976 nxge_unattach(p_nxge_t nxgep) 977 { 978 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 979 980 if (nxgep == NULL || nxgep->dev_regs == NULL) { 981 return; 982 } 983 984 nxgep->nxge_magic = 0; 985 986 if (nxgep->nxge_timerid) { 987 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 988 nxgep->nxge_timerid = 0; 989 } 990 991 /* 992 * If this flag is set, it will affect the Neptune 993 * only. 994 */ 995 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { 996 nxge_niu_peu_reset(nxgep); 997 } 998 999 #if defined(sun4v) 1000 if (isLDOMguest(nxgep)) { 1001 (void) nxge_hio_vr_release(nxgep); 1002 } 1003 #endif 1004 1005 if (nxgep->nxge_hw_p) { 1006 nxge_uninit_common_dev(nxgep); 1007 nxgep->nxge_hw_p = NULL; 1008 } 1009 1010 #if defined(sun4v) 1011 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 1012 (void) hsvc_unregister(&nxgep->niu_hsvc); 1013 nxgep->niu_hsvc_available = B_FALSE; 1014 } 1015 #endif 1016 /* 1017 * Stop any further interrupts. 1018 */ 1019 nxge_remove_intrs(nxgep); 1020 1021 /* 1022 * Stop the device and free resources. 1023 */ 1024 if (!isLDOMguest(nxgep)) { 1025 nxge_destroy_dev(nxgep); 1026 } 1027 1028 /* 1029 * Tear down the ndd parameters setup. 1030 */ 1031 nxge_destroy_param(nxgep); 1032 1033 /* 1034 * Tear down the kstat setup. 1035 */ 1036 nxge_destroy_kstats(nxgep); 1037 1038 /* 1039 * Destroy all mutexes. 1040 */ 1041 nxge_destroy_mutexes(nxgep); 1042 1043 /* 1044 * Remove the list of ndd parameters which 1045 * were setup during attach. 1046 */ 1047 if (nxgep->dip) { 1048 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 1049 " nxge_unattach: remove all properties")); 1050 1051 (void) ddi_prop_remove_all(nxgep->dip); 1052 } 1053 1054 #if NXGE_PROPERTY 1055 nxge_remove_hard_properties(nxgep); 1056 #endif 1057 1058 /* 1059 * Unmap the register setup. 1060 */ 1061 nxge_unmap_regs(nxgep); 1062 1063 nxge_fm_fini(nxgep); 1064 1065 ddi_soft_state_free(nxge_list, nxgep->instance); 1066 1067 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1068 } 1069 1070 #if defined(sun4v) 1071 int 1072 nxge_hsvc_register(nxge_t *nxgep) 1073 { 1074 nxge_status_t status; 1075 1076 if (nxgep->niu_type == N2_NIU) { 1077 nxgep->niu_hsvc_available = B_FALSE; 1078 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 1079 if ((status = hsvc_register(&nxgep->niu_hsvc, 1080 &nxgep->niu_min_ver)) != 0) { 1081 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1082 "nxge_attach: %s: cannot negotiate " 1083 "hypervisor services revision %d group: 0x%lx " 1084 "major: 0x%lx minor: 0x%lx errno: %d", 1085 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1086 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1087 niu_hsvc.hsvc_minor, status)); 1088 return (DDI_FAILURE); 1089 } 1090 nxgep->niu_hsvc_available = B_TRUE; 1091 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1092 "NIU Hypervisor service enabled")); 1093 } 1094 1095 return (DDI_SUCCESS); 1096 } 1097 #endif 1098 1099 static char n2_siu_name[] = "niu"; 1100 1101 static nxge_status_t 1102 nxge_map_regs(p_nxge_t nxgep) 1103 { 1104 int ddi_status = DDI_SUCCESS; 1105 p_dev_regs_t dev_regs; 1106 char buf[MAXPATHLEN + 1]; 1107 char *devname; 1108 #ifdef NXGE_DEBUG 1109 char *sysname; 1110 #endif 1111 off_t regsize; 1112 nxge_status_t status = NXGE_OK; 1113 #if !defined(_BIG_ENDIAN) 1114 off_t pci_offset; 1115 uint16_t pcie_devctl; 1116 #endif 1117 1118 if (isLDOMguest(nxgep)) { 1119 return (nxge_guest_regs_map(nxgep)); 1120 } 1121 1122 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1123 nxgep->dev_regs = NULL; 1124 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1125 dev_regs->nxge_regh = NULL; 1126 dev_regs->nxge_pciregh = NULL; 1127 dev_regs->nxge_msix_regh = NULL; 1128 dev_regs->nxge_vir_regh = NULL; 1129 dev_regs->nxge_vir2_regh = NULL; 1130 nxgep->niu_type = NIU_TYPE_NONE; 1131 1132 devname = ddi_pathname(nxgep->dip, buf); 1133 ASSERT(strlen(devname) > 0); 1134 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1135 "nxge_map_regs: pathname devname %s", devname)); 1136 1137 /* 1138 * The driver is running on a N2-NIU system if devname is something 1139 * like "/niu@80/network@0" 1140 */ 1141 if (strstr(devname, n2_siu_name)) { 1142 /* N2/NIU */ 1143 nxgep->niu_type = N2_NIU; 1144 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1145 "nxge_map_regs: N2/NIU devname %s", devname)); 1146 /* get function number */ 1147 nxgep->function_num = 1148 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1149 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1150 "nxge_map_regs: N2/NIU function number %d", 1151 nxgep->function_num)); 1152 } else { 1153 int *prop_val; 1154 uint_t prop_len; 1155 uint8_t func_num; 1156 1157 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1158 0, "reg", 1159 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1160 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1161 "Reg property not found")); 1162 ddi_status = DDI_FAILURE; 1163 goto nxge_map_regs_fail0; 1164 1165 } else { 1166 func_num = (prop_val[0] >> 8) & 0x7; 1167 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1168 "Reg property found: fun # %d", 1169 func_num)); 1170 nxgep->function_num = func_num; 1171 if (isLDOMguest(nxgep)) { 1172 nxgep->function_num /= 2; 1173 return (NXGE_OK); 1174 } 1175 ddi_prop_free(prop_val); 1176 } 1177 } 1178 1179 switch (nxgep->niu_type) { 1180 default: 1181 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1182 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1183 "nxge_map_regs: pci config size 0x%x", regsize)); 1184 1185 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1186 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1187 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1188 if (ddi_status != DDI_SUCCESS) { 1189 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1190 "ddi_map_regs, nxge bus config regs failed")); 1191 goto nxge_map_regs_fail0; 1192 } 1193 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1194 "nxge_map_reg: PCI config addr 0x%0llx " 1195 " handle 0x%0llx", dev_regs->nxge_pciregp, 1196 dev_regs->nxge_pciregh)); 1197 /* 1198 * IMP IMP 1199 * workaround for bit swapping bug in HW 1200 * which ends up in no-snoop = yes 1201 * resulting, in DMA not synched properly 1202 */ 1203 #if !defined(_BIG_ENDIAN) 1204 /* workarounds for x86 systems */ 1205 pci_offset = 0x80 + PCIE_DEVCTL; 1206 pcie_devctl = pci_config_get16(dev_regs->nxge_pciregh, 1207 pci_offset); 1208 pcie_devctl &= ~PCIE_DEVCTL_ENABLE_NO_SNOOP; 1209 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1210 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1211 pcie_devctl); 1212 #endif 1213 1214 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1215 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1216 "nxge_map_regs: pio size 0x%x", regsize)); 1217 /* set up the device mapped register */ 1218 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1219 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1220 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1221 if (ddi_status != DDI_SUCCESS) { 1222 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1223 "ddi_map_regs for Neptune global reg failed")); 1224 goto nxge_map_regs_fail1; 1225 } 1226 1227 /* set up the msi/msi-x mapped register */ 1228 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1229 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1230 "nxge_map_regs: msix size 0x%x", regsize)); 1231 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1232 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1233 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1234 if (ddi_status != DDI_SUCCESS) { 1235 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1236 "ddi_map_regs for msi reg failed")); 1237 goto nxge_map_regs_fail2; 1238 } 1239 1240 /* set up the vio region mapped register */ 1241 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1242 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1243 "nxge_map_regs: vio size 0x%x", regsize)); 1244 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1245 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1246 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1247 1248 if (ddi_status != DDI_SUCCESS) { 1249 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1250 "ddi_map_regs for nxge vio reg failed")); 1251 goto nxge_map_regs_fail3; 1252 } 1253 nxgep->dev_regs = dev_regs; 1254 1255 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1256 NPI_PCI_ADD_HANDLE_SET(nxgep, 1257 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1258 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1259 NPI_MSI_ADD_HANDLE_SET(nxgep, 1260 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1261 1262 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1263 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1264 1265 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1266 NPI_REG_ADD_HANDLE_SET(nxgep, 1267 (npi_reg_ptr_t)dev_regs->nxge_regp); 1268 1269 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1270 NPI_VREG_ADD_HANDLE_SET(nxgep, 1271 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1272 1273 break; 1274 1275 case N2_NIU: 1276 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1277 /* 1278 * Set up the device mapped register (FWARC 2006/556) 1279 * (changed back to 1: reg starts at 1!) 1280 */ 1281 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1282 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1283 "nxge_map_regs: dev size 0x%x", regsize)); 1284 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1285 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1286 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1287 1288 if (ddi_status != DDI_SUCCESS) { 1289 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1290 "ddi_map_regs for N2/NIU, global reg failed ")); 1291 goto nxge_map_regs_fail1; 1292 } 1293 1294 /* set up the first vio region mapped register */ 1295 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1296 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1297 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1298 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1299 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1300 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1301 1302 if (ddi_status != DDI_SUCCESS) { 1303 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1304 "ddi_map_regs for nxge vio reg failed")); 1305 goto nxge_map_regs_fail2; 1306 } 1307 /* set up the second vio region mapped register */ 1308 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1309 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1310 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1311 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1312 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1313 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1314 1315 if (ddi_status != DDI_SUCCESS) { 1316 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1317 "ddi_map_regs for nxge vio2 reg failed")); 1318 goto nxge_map_regs_fail3; 1319 } 1320 nxgep->dev_regs = dev_regs; 1321 1322 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1323 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1324 1325 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1326 NPI_REG_ADD_HANDLE_SET(nxgep, 1327 (npi_reg_ptr_t)dev_regs->nxge_regp); 1328 1329 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1330 NPI_VREG_ADD_HANDLE_SET(nxgep, 1331 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1332 1333 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1334 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1335 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1336 1337 break; 1338 } 1339 1340 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1341 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1342 1343 goto nxge_map_regs_exit; 1344 nxge_map_regs_fail3: 1345 if (dev_regs->nxge_msix_regh) { 1346 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1347 } 1348 if (dev_regs->nxge_vir_regh) { 1349 ddi_regs_map_free(&dev_regs->nxge_regh); 1350 } 1351 nxge_map_regs_fail2: 1352 if (dev_regs->nxge_regh) { 1353 ddi_regs_map_free(&dev_regs->nxge_regh); 1354 } 1355 nxge_map_regs_fail1: 1356 if (dev_regs->nxge_pciregh) { 1357 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1358 } 1359 nxge_map_regs_fail0: 1360 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1361 kmem_free(dev_regs, sizeof (dev_regs_t)); 1362 1363 nxge_map_regs_exit: 1364 if (ddi_status != DDI_SUCCESS) 1365 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1366 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1367 return (status); 1368 } 1369 1370 static void 1371 nxge_unmap_regs(p_nxge_t nxgep) 1372 { 1373 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1374 1375 if (isLDOMguest(nxgep)) { 1376 nxge_guest_regs_map_free(nxgep); 1377 return; 1378 } 1379 1380 if (nxgep->dev_regs) { 1381 if (nxgep->dev_regs->nxge_pciregh) { 1382 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1383 "==> nxge_unmap_regs: bus")); 1384 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1385 nxgep->dev_regs->nxge_pciregh = NULL; 1386 } 1387 if (nxgep->dev_regs->nxge_regh) { 1388 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1389 "==> nxge_unmap_regs: device registers")); 1390 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1391 nxgep->dev_regs->nxge_regh = NULL; 1392 } 1393 if (nxgep->dev_regs->nxge_msix_regh) { 1394 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1395 "==> nxge_unmap_regs: device interrupts")); 1396 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1397 nxgep->dev_regs->nxge_msix_regh = NULL; 1398 } 1399 if (nxgep->dev_regs->nxge_vir_regh) { 1400 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1401 "==> nxge_unmap_regs: vio region")); 1402 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1403 nxgep->dev_regs->nxge_vir_regh = NULL; 1404 } 1405 if (nxgep->dev_regs->nxge_vir2_regh) { 1406 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1407 "==> nxge_unmap_regs: vio2 region")); 1408 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1409 nxgep->dev_regs->nxge_vir2_regh = NULL; 1410 } 1411 1412 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1413 nxgep->dev_regs = NULL; 1414 } 1415 1416 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1417 } 1418 1419 static nxge_status_t 1420 nxge_setup_mutexes(p_nxge_t nxgep) 1421 { 1422 int ddi_status = DDI_SUCCESS; 1423 nxge_status_t status = NXGE_OK; 1424 nxge_classify_t *classify_ptr; 1425 int partition; 1426 1427 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1428 1429 /* 1430 * Get the interrupt cookie so the mutexes can be 1431 * Initialized. 1432 */ 1433 if (isLDOMguest(nxgep)) { 1434 nxgep->interrupt_cookie = 0; 1435 } else { 1436 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1437 &nxgep->interrupt_cookie); 1438 1439 if (ddi_status != DDI_SUCCESS) { 1440 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1441 "<== nxge_setup_mutexes: failed 0x%x", 1442 ddi_status)); 1443 goto nxge_setup_mutexes_exit; 1444 } 1445 } 1446 1447 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1448 MUTEX_INIT(&nxgep->poll_lock, NULL, 1449 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1450 1451 /* 1452 * Initialize mutexes for this device. 1453 */ 1454 MUTEX_INIT(nxgep->genlock, NULL, 1455 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1456 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1457 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1458 MUTEX_INIT(&nxgep->mif_lock, NULL, 1459 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1460 MUTEX_INIT(&nxgep->group_lock, NULL, 1461 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1462 RW_INIT(&nxgep->filter_lock, NULL, 1463 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1464 1465 classify_ptr = &nxgep->classifier; 1466 /* 1467 * FFLP Mutexes are never used in interrupt context 1468 * as fflp operation can take very long time to 1469 * complete and hence not suitable to invoke from interrupt 1470 * handlers. 1471 */ 1472 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1473 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1474 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1475 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1476 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1477 for (partition = 0; partition < MAX_PARTITION; partition++) { 1478 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1479 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1480 } 1481 } 1482 1483 nxge_setup_mutexes_exit: 1484 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1485 "<== nxge_setup_mutexes status = %x", status)); 1486 1487 if (ddi_status != DDI_SUCCESS) 1488 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1489 1490 return (status); 1491 } 1492 1493 static void 1494 nxge_destroy_mutexes(p_nxge_t nxgep) 1495 { 1496 int partition; 1497 nxge_classify_t *classify_ptr; 1498 1499 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1500 RW_DESTROY(&nxgep->filter_lock); 1501 MUTEX_DESTROY(&nxgep->group_lock); 1502 MUTEX_DESTROY(&nxgep->mif_lock); 1503 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1504 MUTEX_DESTROY(nxgep->genlock); 1505 1506 classify_ptr = &nxgep->classifier; 1507 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1508 1509 /* Destroy all polling resources. */ 1510 MUTEX_DESTROY(&nxgep->poll_lock); 1511 cv_destroy(&nxgep->poll_cv); 1512 1513 /* free data structures, based on HW type */ 1514 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1515 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1516 for (partition = 0; partition < MAX_PARTITION; partition++) { 1517 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1518 } 1519 } 1520 1521 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1522 } 1523 1524 nxge_status_t 1525 nxge_init(p_nxge_t nxgep) 1526 { 1527 nxge_status_t status = NXGE_OK; 1528 1529 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1530 1531 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1532 return (status); 1533 } 1534 1535 /* 1536 * Allocate system memory for the receive/transmit buffer blocks 1537 * and receive/transmit descriptor rings. 1538 */ 1539 status = nxge_alloc_mem_pool(nxgep); 1540 if (status != NXGE_OK) { 1541 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1542 goto nxge_init_fail1; 1543 } 1544 1545 if (!isLDOMguest(nxgep)) { 1546 /* 1547 * Initialize and enable the TXC registers. 1548 * (Globally enable the Tx controller, 1549 * enable the port, configure the dma channel bitmap, 1550 * configure the max burst size). 1551 */ 1552 status = nxge_txc_init(nxgep); 1553 if (status != NXGE_OK) { 1554 NXGE_ERROR_MSG((nxgep, 1555 NXGE_ERR_CTL, "init txc failed\n")); 1556 goto nxge_init_fail2; 1557 } 1558 } 1559 1560 /* 1561 * Initialize and enable TXDMA channels. 1562 */ 1563 status = nxge_init_txdma_channels(nxgep); 1564 if (status != NXGE_OK) { 1565 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1566 goto nxge_init_fail3; 1567 } 1568 1569 /* 1570 * Initialize and enable RXDMA channels. 1571 */ 1572 status = nxge_init_rxdma_channels(nxgep); 1573 if (status != NXGE_OK) { 1574 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1575 goto nxge_init_fail4; 1576 } 1577 1578 /* 1579 * The guest domain is now done. 1580 */ 1581 if (isLDOMguest(nxgep)) { 1582 nxgep->drv_state |= STATE_HW_INITIALIZED; 1583 goto nxge_init_exit; 1584 } 1585 1586 /* 1587 * Initialize TCAM and FCRAM (Neptune). 1588 */ 1589 status = nxge_classify_init(nxgep); 1590 if (status != NXGE_OK) { 1591 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1592 goto nxge_init_fail5; 1593 } 1594 1595 /* 1596 * Initialize ZCP 1597 */ 1598 status = nxge_zcp_init(nxgep); 1599 if (status != NXGE_OK) { 1600 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1601 goto nxge_init_fail5; 1602 } 1603 1604 /* 1605 * Initialize IPP. 1606 */ 1607 status = nxge_ipp_init(nxgep); 1608 if (status != NXGE_OK) { 1609 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1610 goto nxge_init_fail5; 1611 } 1612 1613 /* 1614 * Initialize the MAC block. 1615 */ 1616 status = nxge_mac_init(nxgep); 1617 if (status != NXGE_OK) { 1618 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1619 goto nxge_init_fail5; 1620 } 1621 1622 /* 1623 * Enable the interrrupts for DDI. 1624 */ 1625 nxge_intrs_enable(nxgep); 1626 1627 nxgep->drv_state |= STATE_HW_INITIALIZED; 1628 1629 goto nxge_init_exit; 1630 1631 nxge_init_fail5: 1632 nxge_uninit_rxdma_channels(nxgep); 1633 nxge_init_fail4: 1634 nxge_uninit_txdma_channels(nxgep); 1635 nxge_init_fail3: 1636 if (!isLDOMguest(nxgep)) { 1637 (void) nxge_txc_uninit(nxgep); 1638 } 1639 nxge_init_fail2: 1640 nxge_free_mem_pool(nxgep); 1641 nxge_init_fail1: 1642 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1643 "<== nxge_init status (failed) = 0x%08x", status)); 1644 return (status); 1645 1646 nxge_init_exit: 1647 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1648 status)); 1649 return (status); 1650 } 1651 1652 1653 timeout_id_t 1654 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1655 { 1656 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1657 return (timeout(func, (caddr_t)nxgep, 1658 drv_usectohz(1000 * msec))); 1659 } 1660 return (NULL); 1661 } 1662 1663 /*ARGSUSED*/ 1664 void 1665 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1666 { 1667 if (timerid) { 1668 (void) untimeout(timerid); 1669 } 1670 } 1671 1672 void 1673 nxge_uninit(p_nxge_t nxgep) 1674 { 1675 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1676 1677 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1678 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1679 "==> nxge_uninit: not initialized")); 1680 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1681 "<== nxge_uninit")); 1682 return; 1683 } 1684 1685 if (!isLDOMguest(nxgep)) { 1686 /* 1687 * Reset the receive MAC side. 1688 */ 1689 (void) nxge_rx_mac_disable(nxgep); 1690 1691 /* 1692 * Drain the IPP. 1693 */ 1694 (void) nxge_ipp_drain(nxgep); 1695 } 1696 1697 /* stop timer */ 1698 if (nxgep->nxge_timerid) { 1699 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1700 nxgep->nxge_timerid = 0; 1701 } 1702 1703 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1704 (void) nxge_intr_hw_disable(nxgep); 1705 1706 1707 /* Disable and soft reset the IPP */ 1708 if (!isLDOMguest(nxgep)) 1709 (void) nxge_ipp_disable(nxgep); 1710 1711 /* Free classification resources */ 1712 (void) nxge_classify_uninit(nxgep); 1713 1714 /* 1715 * Reset the transmit/receive DMA side. 1716 */ 1717 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1718 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1719 1720 nxge_uninit_txdma_channels(nxgep); 1721 nxge_uninit_rxdma_channels(nxgep); 1722 1723 /* 1724 * Reset the transmit MAC side. 1725 */ 1726 (void) nxge_tx_mac_disable(nxgep); 1727 1728 nxge_free_mem_pool(nxgep); 1729 1730 /* 1731 * Start the timer if the reset flag is not set. 1732 * If this reset flag is set, the link monitor 1733 * will not be started in order to stop furthur bus 1734 * activities coming from this interface. 1735 * The driver will start the monitor function 1736 * if the interface was initialized again later. 1737 */ 1738 if (!nxge_peu_reset_enable) { 1739 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1740 } 1741 1742 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1743 1744 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1745 "nxge_mblks_pending %d", nxge_mblks_pending)); 1746 } 1747 1748 void 1749 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1750 { 1751 uint64_t reg; 1752 uint64_t regdata; 1753 int i, retry; 1754 1755 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1756 regdata = 0; 1757 retry = 1; 1758 1759 for (i = 0; i < retry; i++) { 1760 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1761 } 1762 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1763 } 1764 1765 void 1766 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1767 { 1768 uint64_t reg; 1769 uint64_t buf[2]; 1770 1771 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1772 reg = buf[0]; 1773 1774 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1775 } 1776 1777 /*ARGSUSED*/ 1778 /*VARARGS*/ 1779 void 1780 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1781 { 1782 char msg_buffer[1048]; 1783 char prefix_buffer[32]; 1784 int instance; 1785 uint64_t debug_level; 1786 int cmn_level = CE_CONT; 1787 va_list ap; 1788 1789 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1790 /* In case a developer has changed nxge_debug_level. */ 1791 if (nxgep->nxge_debug_level != nxge_debug_level) 1792 nxgep->nxge_debug_level = nxge_debug_level; 1793 } 1794 1795 debug_level = (nxgep == NULL) ? nxge_debug_level : 1796 nxgep->nxge_debug_level; 1797 1798 if ((level & debug_level) || 1799 (level == NXGE_NOTE) || 1800 (level == NXGE_ERR_CTL)) { 1801 /* do the msg processing */ 1802 MUTEX_ENTER(&nxgedebuglock); 1803 1804 if ((level & NXGE_NOTE)) { 1805 cmn_level = CE_NOTE; 1806 } 1807 1808 if (level & NXGE_ERR_CTL) { 1809 cmn_level = CE_WARN; 1810 } 1811 1812 va_start(ap, fmt); 1813 (void) vsprintf(msg_buffer, fmt, ap); 1814 va_end(ap); 1815 if (nxgep == NULL) { 1816 instance = -1; 1817 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1818 } else { 1819 instance = nxgep->instance; 1820 (void) sprintf(prefix_buffer, 1821 "%s%d :", "nxge", instance); 1822 } 1823 1824 MUTEX_EXIT(&nxgedebuglock); 1825 cmn_err(cmn_level, "!%s %s\n", 1826 prefix_buffer, msg_buffer); 1827 1828 } 1829 } 1830 1831 char * 1832 nxge_dump_packet(char *addr, int size) 1833 { 1834 uchar_t *ap = (uchar_t *)addr; 1835 int i; 1836 static char etherbuf[1024]; 1837 char *cp = etherbuf; 1838 char digits[] = "0123456789abcdef"; 1839 1840 if (!size) 1841 size = 60; 1842 1843 if (size > MAX_DUMP_SZ) { 1844 /* Dump the leading bytes */ 1845 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1846 if (*ap > 0x0f) 1847 *cp++ = digits[*ap >> 4]; 1848 *cp++ = digits[*ap++ & 0xf]; 1849 *cp++ = ':'; 1850 } 1851 for (i = 0; i < 20; i++) 1852 *cp++ = '.'; 1853 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1854 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1855 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1856 if (*ap > 0x0f) 1857 *cp++ = digits[*ap >> 4]; 1858 *cp++ = digits[*ap++ & 0xf]; 1859 *cp++ = ':'; 1860 } 1861 } else { 1862 for (i = 0; i < size; i++) { 1863 if (*ap > 0x0f) 1864 *cp++ = digits[*ap >> 4]; 1865 *cp++ = digits[*ap++ & 0xf]; 1866 *cp++ = ':'; 1867 } 1868 } 1869 *--cp = 0; 1870 return (etherbuf); 1871 } 1872 1873 #ifdef NXGE_DEBUG 1874 static void 1875 nxge_test_map_regs(p_nxge_t nxgep) 1876 { 1877 ddi_acc_handle_t cfg_handle; 1878 p_pci_cfg_t cfg_ptr; 1879 ddi_acc_handle_t dev_handle; 1880 char *dev_ptr; 1881 ddi_acc_handle_t pci_config_handle; 1882 uint32_t regval; 1883 int i; 1884 1885 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1886 1887 dev_handle = nxgep->dev_regs->nxge_regh; 1888 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1889 1890 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1891 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1892 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1893 1894 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1895 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1896 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1897 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1898 &cfg_ptr->vendorid)); 1899 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1900 "\tvendorid 0x%x devid 0x%x", 1901 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1902 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1903 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1904 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1905 "bar1c 0x%x", 1906 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1907 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1908 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1909 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1910 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1911 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1912 "base 28 0x%x bar2c 0x%x\n", 1913 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1914 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1915 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1916 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1917 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1918 "\nNeptune PCI BAR: base30 0x%x\n", 1919 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1920 1921 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1922 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1923 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1924 "first 0x%llx second 0x%llx third 0x%llx " 1925 "last 0x%llx ", 1926 NXGE_PIO_READ64(dev_handle, 1927 (uint64_t *)(dev_ptr + 0), 0), 1928 NXGE_PIO_READ64(dev_handle, 1929 (uint64_t *)(dev_ptr + 8), 0), 1930 NXGE_PIO_READ64(dev_handle, 1931 (uint64_t *)(dev_ptr + 16), 0), 1932 NXGE_PIO_READ64(cfg_handle, 1933 (uint64_t *)(dev_ptr + 24), 0))); 1934 } 1935 } 1936 1937 #endif 1938 1939 static void 1940 nxge_suspend(p_nxge_t nxgep) 1941 { 1942 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1943 1944 nxge_intrs_disable(nxgep); 1945 nxge_destroy_dev(nxgep); 1946 1947 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1948 } 1949 1950 static nxge_status_t 1951 nxge_resume(p_nxge_t nxgep) 1952 { 1953 nxge_status_t status = NXGE_OK; 1954 1955 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1956 1957 nxgep->suspended = DDI_RESUME; 1958 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1959 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1960 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1961 (void) nxge_rx_mac_enable(nxgep); 1962 (void) nxge_tx_mac_enable(nxgep); 1963 nxge_intrs_enable(nxgep); 1964 nxgep->suspended = 0; 1965 1966 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1967 "<== nxge_resume status = 0x%x", status)); 1968 return (status); 1969 } 1970 1971 static nxge_status_t 1972 nxge_setup_dev(p_nxge_t nxgep) 1973 { 1974 nxge_status_t status = NXGE_OK; 1975 1976 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1977 nxgep->mac.portnum)); 1978 1979 status = nxge_link_init(nxgep); 1980 1981 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1982 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1983 "port%d Bad register acc handle", nxgep->mac.portnum)); 1984 status = NXGE_ERROR; 1985 } 1986 1987 if (status != NXGE_OK) { 1988 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1989 " nxge_setup_dev status " 1990 "(xcvr init 0x%08x)", status)); 1991 goto nxge_setup_dev_exit; 1992 } 1993 1994 nxge_setup_dev_exit: 1995 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1996 "<== nxge_setup_dev port %d status = 0x%08x", 1997 nxgep->mac.portnum, status)); 1998 1999 return (status); 2000 } 2001 2002 static void 2003 nxge_destroy_dev(p_nxge_t nxgep) 2004 { 2005 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 2006 2007 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 2008 2009 (void) nxge_hw_stop(nxgep); 2010 2011 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 2012 } 2013 2014 static nxge_status_t 2015 nxge_setup_system_dma_pages(p_nxge_t nxgep) 2016 { 2017 int ddi_status = DDI_SUCCESS; 2018 uint_t count; 2019 ddi_dma_cookie_t cookie; 2020 uint_t iommu_pagesize; 2021 nxge_status_t status = NXGE_OK; 2022 2023 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 2024 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 2025 if (nxgep->niu_type != N2_NIU) { 2026 iommu_pagesize = dvma_pagesize(nxgep->dip); 2027 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2028 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2029 " default_block_size %d iommu_pagesize %d", 2030 nxgep->sys_page_sz, 2031 ddi_ptob(nxgep->dip, (ulong_t)1), 2032 nxgep->rx_default_block_size, 2033 iommu_pagesize)); 2034 2035 if (iommu_pagesize != 0) { 2036 if (nxgep->sys_page_sz == iommu_pagesize) { 2037 if (iommu_pagesize > 0x4000) 2038 nxgep->sys_page_sz = 0x4000; 2039 } else { 2040 if (nxgep->sys_page_sz > iommu_pagesize) 2041 nxgep->sys_page_sz = iommu_pagesize; 2042 } 2043 } 2044 } 2045 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2046 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2047 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2048 "default_block_size %d page mask %d", 2049 nxgep->sys_page_sz, 2050 ddi_ptob(nxgep->dip, (ulong_t)1), 2051 nxgep->rx_default_block_size, 2052 nxgep->sys_page_mask)); 2053 2054 2055 switch (nxgep->sys_page_sz) { 2056 default: 2057 nxgep->sys_page_sz = 0x1000; 2058 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2059 nxgep->rx_default_block_size = 0x1000; 2060 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2061 break; 2062 case 0x1000: 2063 nxgep->rx_default_block_size = 0x1000; 2064 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2065 break; 2066 case 0x2000: 2067 nxgep->rx_default_block_size = 0x2000; 2068 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2069 break; 2070 case 0x4000: 2071 nxgep->rx_default_block_size = 0x4000; 2072 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2073 break; 2074 case 0x8000: 2075 nxgep->rx_default_block_size = 0x8000; 2076 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2077 break; 2078 } 2079 2080 #ifndef USE_RX_BIG_BUF 2081 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2082 #else 2083 nxgep->rx_default_block_size = 0x2000; 2084 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2085 #endif 2086 /* 2087 * Get the system DMA burst size. 2088 */ 2089 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2090 DDI_DMA_DONTWAIT, 0, 2091 &nxgep->dmasparehandle); 2092 if (ddi_status != DDI_SUCCESS) { 2093 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2094 "ddi_dma_alloc_handle: failed " 2095 " status 0x%x", ddi_status)); 2096 goto nxge_get_soft_properties_exit; 2097 } 2098 2099 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2100 (caddr_t)nxgep->dmasparehandle, 2101 sizeof (nxgep->dmasparehandle), 2102 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2103 DDI_DMA_DONTWAIT, 0, 2104 &cookie, &count); 2105 if (ddi_status != DDI_DMA_MAPPED) { 2106 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2107 "Binding spare handle to find system" 2108 " burstsize failed.")); 2109 ddi_status = DDI_FAILURE; 2110 goto nxge_get_soft_properties_fail1; 2111 } 2112 2113 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2114 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2115 2116 nxge_get_soft_properties_fail1: 2117 ddi_dma_free_handle(&nxgep->dmasparehandle); 2118 2119 nxge_get_soft_properties_exit: 2120 2121 if (ddi_status != DDI_SUCCESS) 2122 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2123 2124 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2125 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2126 return (status); 2127 } 2128 2129 static nxge_status_t 2130 nxge_alloc_mem_pool(p_nxge_t nxgep) 2131 { 2132 nxge_status_t status = NXGE_OK; 2133 2134 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2135 2136 status = nxge_alloc_rx_mem_pool(nxgep); 2137 if (status != NXGE_OK) { 2138 return (NXGE_ERROR); 2139 } 2140 2141 status = nxge_alloc_tx_mem_pool(nxgep); 2142 if (status != NXGE_OK) { 2143 nxge_free_rx_mem_pool(nxgep); 2144 return (NXGE_ERROR); 2145 } 2146 2147 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2148 return (NXGE_OK); 2149 } 2150 2151 static void 2152 nxge_free_mem_pool(p_nxge_t nxgep) 2153 { 2154 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2155 2156 nxge_free_rx_mem_pool(nxgep); 2157 nxge_free_tx_mem_pool(nxgep); 2158 2159 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2160 } 2161 2162 nxge_status_t 2163 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2164 { 2165 uint32_t rdc_max; 2166 p_nxge_dma_pt_cfg_t p_all_cfgp; 2167 p_nxge_hw_pt_cfg_t p_cfgp; 2168 p_nxge_dma_pool_t dma_poolp; 2169 p_nxge_dma_common_t *dma_buf_p; 2170 p_nxge_dma_pool_t dma_cntl_poolp; 2171 p_nxge_dma_common_t *dma_cntl_p; 2172 uint32_t *num_chunks; /* per dma */ 2173 nxge_status_t status = NXGE_OK; 2174 2175 uint32_t nxge_port_rbr_size; 2176 uint32_t nxge_port_rbr_spare_size; 2177 uint32_t nxge_port_rcr_size; 2178 uint32_t rx_cntl_alloc_size; 2179 2180 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2181 2182 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2183 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2184 rdc_max = NXGE_MAX_RDCS; 2185 2186 /* 2187 * Allocate memory for the common DMA data structures. 2188 */ 2189 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2190 KM_SLEEP); 2191 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2192 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2193 2194 dma_cntl_poolp = (p_nxge_dma_pool_t) 2195 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2196 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2197 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2198 2199 num_chunks = (uint32_t *)KMEM_ZALLOC( 2200 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2201 2202 /* 2203 * Assume that each DMA channel will be configured with 2204 * the default block size. 2205 * rbr block counts are modulo the batch count (16). 2206 */ 2207 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2208 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2209 2210 if (!nxge_port_rbr_size) { 2211 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2212 } 2213 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2214 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2215 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2216 } 2217 2218 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2219 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2220 2221 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2222 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2223 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2224 } 2225 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2226 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2227 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2228 "set to default %d", 2229 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2230 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2231 } 2232 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2233 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2234 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2235 "set to default %d", 2236 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2237 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2238 } 2239 2240 /* 2241 * N2/NIU has limitation on the descriptor sizes (contiguous 2242 * memory allocation on data buffers to 4M (contig_mem_alloc) 2243 * and little endian for control buffers (must use the ddi/dki mem alloc 2244 * function). 2245 */ 2246 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2247 if (nxgep->niu_type == N2_NIU) { 2248 nxge_port_rbr_spare_size = 0; 2249 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2250 (!ISP2(nxge_port_rbr_size))) { 2251 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2252 } 2253 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2254 (!ISP2(nxge_port_rcr_size))) { 2255 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2256 } 2257 } 2258 #endif 2259 2260 /* 2261 * Addresses of receive block ring, receive completion ring and the 2262 * mailbox must be all cache-aligned (64 bytes). 2263 */ 2264 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2265 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2266 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2267 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2268 2269 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2270 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2271 "nxge_port_rcr_size = %d " 2272 "rx_cntl_alloc_size = %d", 2273 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2274 nxge_port_rcr_size, 2275 rx_cntl_alloc_size)); 2276 2277 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2278 if (nxgep->niu_type == N2_NIU) { 2279 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2280 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2281 2282 if (!ISP2(rx_buf_alloc_size)) { 2283 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2284 "==> nxge_alloc_rx_mem_pool: " 2285 " must be power of 2")); 2286 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2287 goto nxge_alloc_rx_mem_pool_exit; 2288 } 2289 2290 if (rx_buf_alloc_size > (1 << 22)) { 2291 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2292 "==> nxge_alloc_rx_mem_pool: " 2293 " limit size to 4M")); 2294 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2295 goto nxge_alloc_rx_mem_pool_exit; 2296 } 2297 2298 if (rx_cntl_alloc_size < 0x2000) { 2299 rx_cntl_alloc_size = 0x2000; 2300 } 2301 } 2302 #endif 2303 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2304 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2305 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2306 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2307 2308 dma_poolp->ndmas = p_cfgp->max_rdcs; 2309 dma_poolp->num_chunks = num_chunks; 2310 dma_poolp->buf_allocated = B_TRUE; 2311 nxgep->rx_buf_pool_p = dma_poolp; 2312 dma_poolp->dma_buf_pool_p = dma_buf_p; 2313 2314 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2315 dma_cntl_poolp->buf_allocated = B_TRUE; 2316 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2317 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2318 2319 /* Allocate the receive rings, too. */ 2320 nxgep->rx_rbr_rings = 2321 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2322 nxgep->rx_rbr_rings->rbr_rings = 2323 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2324 nxgep->rx_rcr_rings = 2325 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2326 nxgep->rx_rcr_rings->rcr_rings = 2327 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2328 nxgep->rx_mbox_areas_p = 2329 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2330 nxgep->rx_mbox_areas_p->rxmbox_areas = 2331 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2332 2333 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2334 p_cfgp->max_rdcs; 2335 2336 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2337 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2338 2339 nxge_alloc_rx_mem_pool_exit: 2340 return (status); 2341 } 2342 2343 /* 2344 * nxge_alloc_rxb 2345 * 2346 * Allocate buffers for an RDC. 2347 * 2348 * Arguments: 2349 * nxgep 2350 * channel The channel to map into our kernel space. 2351 * 2352 * Notes: 2353 * 2354 * NPI function calls: 2355 * 2356 * NXGE function calls: 2357 * 2358 * Registers accessed: 2359 * 2360 * Context: 2361 * 2362 * Taking apart: 2363 * 2364 * Open questions: 2365 * 2366 */ 2367 nxge_status_t 2368 nxge_alloc_rxb( 2369 p_nxge_t nxgep, 2370 int channel) 2371 { 2372 size_t rx_buf_alloc_size; 2373 nxge_status_t status = NXGE_OK; 2374 2375 nxge_dma_common_t **data; 2376 nxge_dma_common_t **control; 2377 uint32_t *num_chunks; 2378 2379 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2380 2381 /* 2382 * Allocate memory for the receive buffers and descriptor rings. 2383 * Replace these allocation functions with the interface functions 2384 * provided by the partition manager if/when they are available. 2385 */ 2386 2387 /* 2388 * Allocate memory for the receive buffer blocks. 2389 */ 2390 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2391 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2392 2393 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2394 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2395 2396 if ((status = nxge_alloc_rx_buf_dma( 2397 nxgep, channel, data, rx_buf_alloc_size, 2398 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2399 return (status); 2400 } 2401 2402 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2403 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2404 2405 /* 2406 * Allocate memory for descriptor rings and mailbox. 2407 */ 2408 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2409 2410 if ((status = nxge_alloc_rx_cntl_dma( 2411 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2412 != NXGE_OK) { 2413 nxge_free_rx_cntl_dma(nxgep, *control); 2414 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2415 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2416 return (status); 2417 } 2418 2419 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2420 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2421 2422 return (status); 2423 } 2424 2425 void 2426 nxge_free_rxb( 2427 p_nxge_t nxgep, 2428 int channel) 2429 { 2430 nxge_dma_common_t *data; 2431 nxge_dma_common_t *control; 2432 uint32_t num_chunks; 2433 2434 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2435 2436 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2437 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2438 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2439 2440 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2441 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2442 2443 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2444 nxge_free_rx_cntl_dma(nxgep, control); 2445 2446 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2447 2448 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2449 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2450 2451 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2452 } 2453 2454 static void 2455 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2456 { 2457 int rdc_max = NXGE_MAX_RDCS; 2458 2459 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2460 2461 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2462 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2463 "<== nxge_free_rx_mem_pool " 2464 "(null rx buf pool or buf not allocated")); 2465 return; 2466 } 2467 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2468 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2469 "<== nxge_free_rx_mem_pool " 2470 "(null rx cntl buf pool or cntl buf not allocated")); 2471 return; 2472 } 2473 2474 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2475 sizeof (p_nxge_dma_common_t) * rdc_max); 2476 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2477 2478 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2479 sizeof (uint32_t) * rdc_max); 2480 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2481 sizeof (p_nxge_dma_common_t) * rdc_max); 2482 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2483 2484 nxgep->rx_buf_pool_p = 0; 2485 nxgep->rx_cntl_pool_p = 0; 2486 2487 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2488 sizeof (p_rx_rbr_ring_t) * rdc_max); 2489 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2490 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2491 sizeof (p_rx_rcr_ring_t) * rdc_max); 2492 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2493 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2494 sizeof (p_rx_mbox_t) * rdc_max); 2495 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2496 2497 nxgep->rx_rbr_rings = 0; 2498 nxgep->rx_rcr_rings = 0; 2499 nxgep->rx_mbox_areas_p = 0; 2500 2501 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2502 } 2503 2504 2505 static nxge_status_t 2506 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2507 p_nxge_dma_common_t *dmap, 2508 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2509 { 2510 p_nxge_dma_common_t rx_dmap; 2511 nxge_status_t status = NXGE_OK; 2512 size_t total_alloc_size; 2513 size_t allocated = 0; 2514 int i, size_index, array_size; 2515 boolean_t use_kmem_alloc = B_FALSE; 2516 2517 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2518 2519 rx_dmap = (p_nxge_dma_common_t) 2520 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2521 KM_SLEEP); 2522 2523 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2524 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2525 dma_channel, alloc_size, block_size, dmap)); 2526 2527 total_alloc_size = alloc_size; 2528 2529 #if defined(RX_USE_RECLAIM_POST) 2530 total_alloc_size = alloc_size + alloc_size/4; 2531 #endif 2532 2533 i = 0; 2534 size_index = 0; 2535 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2536 while ((size_index < array_size) && 2537 (alloc_sizes[size_index] < alloc_size)) 2538 size_index++; 2539 if (size_index >= array_size) { 2540 size_index = array_size - 1; 2541 } 2542 2543 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2544 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2545 use_kmem_alloc = B_TRUE; 2546 #if defined(__i386) || defined(__amd64) 2547 size_index = 0; 2548 #endif 2549 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2550 "==> nxge_alloc_rx_buf_dma: " 2551 "Neptune use kmem_alloc() - size_index %d", 2552 size_index)); 2553 } 2554 2555 while ((allocated < total_alloc_size) && 2556 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2557 rx_dmap[i].dma_chunk_index = i; 2558 rx_dmap[i].block_size = block_size; 2559 rx_dmap[i].alength = alloc_sizes[size_index]; 2560 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2561 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2562 rx_dmap[i].dma_channel = dma_channel; 2563 rx_dmap[i].contig_alloc_type = B_FALSE; 2564 rx_dmap[i].kmem_alloc_type = B_FALSE; 2565 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2566 2567 /* 2568 * N2/NIU: data buffers must be contiguous as the driver 2569 * needs to call Hypervisor api to set up 2570 * logical pages. 2571 */ 2572 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2573 rx_dmap[i].contig_alloc_type = B_TRUE; 2574 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2575 } else if (use_kmem_alloc) { 2576 /* For Neptune, use kmem_alloc */ 2577 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2578 "==> nxge_alloc_rx_buf_dma: " 2579 "Neptune use kmem_alloc()")); 2580 rx_dmap[i].kmem_alloc_type = B_TRUE; 2581 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2582 } 2583 2584 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2585 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2586 "i %d nblocks %d alength %d", 2587 dma_channel, i, &rx_dmap[i], block_size, 2588 i, rx_dmap[i].nblocks, 2589 rx_dmap[i].alength)); 2590 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2591 &nxge_rx_dma_attr, 2592 rx_dmap[i].alength, 2593 &nxge_dev_buf_dma_acc_attr, 2594 DDI_DMA_READ | DDI_DMA_STREAMING, 2595 (p_nxge_dma_common_t)(&rx_dmap[i])); 2596 if (status != NXGE_OK) { 2597 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2598 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2599 "dma %d size_index %d size requested %d", 2600 dma_channel, 2601 size_index, 2602 rx_dmap[i].alength)); 2603 size_index--; 2604 } else { 2605 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2606 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2607 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2608 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2609 "buf_alloc_state %d alloc_type %d", 2610 dma_channel, 2611 &rx_dmap[i], 2612 rx_dmap[i].kaddrp, 2613 rx_dmap[i].alength, 2614 rx_dmap[i].buf_alloc_state, 2615 rx_dmap[i].buf_alloc_type)); 2616 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2617 " alloc_rx_buf_dma allocated rdc %d " 2618 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2619 dma_channel, i, rx_dmap[i].alength, 2620 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2621 rx_dmap[i].kaddrp)); 2622 i++; 2623 allocated += alloc_sizes[size_index]; 2624 } 2625 } 2626 2627 if (allocated < total_alloc_size) { 2628 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2629 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2630 "allocated 0x%x requested 0x%x", 2631 dma_channel, 2632 allocated, total_alloc_size)); 2633 status = NXGE_ERROR; 2634 goto nxge_alloc_rx_mem_fail1; 2635 } 2636 2637 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2638 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2639 "allocated 0x%x requested 0x%x", 2640 dma_channel, 2641 allocated, total_alloc_size)); 2642 2643 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2644 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2645 dma_channel, i)); 2646 *num_chunks = i; 2647 *dmap = rx_dmap; 2648 2649 goto nxge_alloc_rx_mem_exit; 2650 2651 nxge_alloc_rx_mem_fail1: 2652 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2653 2654 nxge_alloc_rx_mem_exit: 2655 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2656 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2657 2658 return (status); 2659 } 2660 2661 /*ARGSUSED*/ 2662 static void 2663 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2664 uint32_t num_chunks) 2665 { 2666 int i; 2667 2668 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2669 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2670 2671 if (dmap == 0) 2672 return; 2673 2674 for (i = 0; i < num_chunks; i++) { 2675 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2676 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2677 i, dmap)); 2678 nxge_dma_free_rx_data_buf(dmap++); 2679 } 2680 2681 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2682 } 2683 2684 /*ARGSUSED*/ 2685 static nxge_status_t 2686 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2687 p_nxge_dma_common_t *dmap, size_t size) 2688 { 2689 p_nxge_dma_common_t rx_dmap; 2690 nxge_status_t status = NXGE_OK; 2691 2692 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2693 2694 rx_dmap = (p_nxge_dma_common_t) 2695 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2696 2697 rx_dmap->contig_alloc_type = B_FALSE; 2698 rx_dmap->kmem_alloc_type = B_FALSE; 2699 2700 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2701 &nxge_desc_dma_attr, 2702 size, 2703 &nxge_dev_desc_dma_acc_attr, 2704 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2705 rx_dmap); 2706 if (status != NXGE_OK) { 2707 goto nxge_alloc_rx_cntl_dma_fail1; 2708 } 2709 2710 *dmap = rx_dmap; 2711 goto nxge_alloc_rx_cntl_dma_exit; 2712 2713 nxge_alloc_rx_cntl_dma_fail1: 2714 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2715 2716 nxge_alloc_rx_cntl_dma_exit: 2717 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2718 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2719 2720 return (status); 2721 } 2722 2723 /*ARGSUSED*/ 2724 static void 2725 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2726 { 2727 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2728 2729 if (dmap == 0) 2730 return; 2731 2732 nxge_dma_mem_free(dmap); 2733 2734 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2735 } 2736 2737 typedef struct { 2738 size_t tx_size; 2739 size_t cr_size; 2740 size_t threshhold; 2741 } nxge_tdc_sizes_t; 2742 2743 static 2744 nxge_status_t 2745 nxge_tdc_sizes( 2746 nxge_t *nxgep, 2747 nxge_tdc_sizes_t *sizes) 2748 { 2749 uint32_t threshhold; /* The bcopy() threshhold */ 2750 size_t tx_size; /* Transmit buffer size */ 2751 size_t cr_size; /* Completion ring size */ 2752 2753 /* 2754 * Assume that each DMA channel will be configured with the 2755 * default transmit buffer size for copying transmit data. 2756 * (If a packet is bigger than this, it will not be copied.) 2757 */ 2758 if (nxgep->niu_type == N2_NIU) { 2759 threshhold = TX_BCOPY_SIZE; 2760 } else { 2761 threshhold = nxge_bcopy_thresh; 2762 } 2763 tx_size = nxge_tx_ring_size * threshhold; 2764 2765 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2766 cr_size += sizeof (txdma_mailbox_t); 2767 2768 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2769 if (nxgep->niu_type == N2_NIU) { 2770 if (!ISP2(tx_size)) { 2771 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2772 "==> nxge_tdc_sizes: Tx size" 2773 " must be power of 2")); 2774 return (NXGE_ERROR); 2775 } 2776 2777 if (tx_size > (1 << 22)) { 2778 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2779 "==> nxge_tdc_sizes: Tx size" 2780 " limited to 4M")); 2781 return (NXGE_ERROR); 2782 } 2783 2784 if (cr_size < 0x2000) 2785 cr_size = 0x2000; 2786 } 2787 #endif 2788 2789 sizes->threshhold = threshhold; 2790 sizes->tx_size = tx_size; 2791 sizes->cr_size = cr_size; 2792 2793 return (NXGE_OK); 2794 } 2795 /* 2796 * nxge_alloc_txb 2797 * 2798 * Allocate buffers for an TDC. 2799 * 2800 * Arguments: 2801 * nxgep 2802 * channel The channel to map into our kernel space. 2803 * 2804 * Notes: 2805 * 2806 * NPI function calls: 2807 * 2808 * NXGE function calls: 2809 * 2810 * Registers accessed: 2811 * 2812 * Context: 2813 * 2814 * Taking apart: 2815 * 2816 * Open questions: 2817 * 2818 */ 2819 nxge_status_t 2820 nxge_alloc_txb( 2821 p_nxge_t nxgep, 2822 int channel) 2823 { 2824 nxge_dma_common_t **dma_buf_p; 2825 nxge_dma_common_t **dma_cntl_p; 2826 uint32_t *num_chunks; 2827 nxge_status_t status = NXGE_OK; 2828 2829 nxge_tdc_sizes_t sizes; 2830 2831 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2832 2833 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2834 return (NXGE_ERROR); 2835 2836 /* 2837 * Allocate memory for transmit buffers and descriptor rings. 2838 * Replace these allocation functions with the interface functions 2839 * provided by the partition manager Real Soon Now. 2840 */ 2841 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2842 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2843 2844 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2845 2846 /* 2847 * Allocate memory for transmit buffers and descriptor rings. 2848 * Replace allocation functions with interface functions provided 2849 * by the partition manager when it is available. 2850 * 2851 * Allocate memory for the transmit buffer pool. 2852 */ 2853 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2854 "sizes: tx: %ld, cr:%ld, th:%ld", 2855 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2856 2857 *num_chunks = 0; 2858 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2859 sizes.tx_size, sizes.threshhold, num_chunks); 2860 if (status != NXGE_OK) { 2861 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2862 return (status); 2863 } 2864 2865 /* 2866 * Allocate memory for descriptor rings and mailbox. 2867 */ 2868 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2869 sizes.cr_size); 2870 if (status != NXGE_OK) { 2871 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2872 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2873 return (status); 2874 } 2875 2876 return (NXGE_OK); 2877 } 2878 2879 void 2880 nxge_free_txb( 2881 p_nxge_t nxgep, 2882 int channel) 2883 { 2884 nxge_dma_common_t *data; 2885 nxge_dma_common_t *control; 2886 uint32_t num_chunks; 2887 2888 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2889 2890 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2891 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2892 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2893 2894 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2895 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2896 2897 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2898 nxge_free_tx_cntl_dma(nxgep, control); 2899 2900 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2901 2902 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2903 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2904 2905 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2906 } 2907 2908 /* 2909 * nxge_alloc_tx_mem_pool 2910 * 2911 * This function allocates all of the per-port TDC control data structures. 2912 * The per-channel (TDC) data structures are allocated when needed. 2913 * 2914 * Arguments: 2915 * nxgep 2916 * 2917 * Notes: 2918 * 2919 * Context: 2920 * Any domain 2921 */ 2922 nxge_status_t 2923 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2924 { 2925 nxge_hw_pt_cfg_t *p_cfgp; 2926 nxge_dma_pool_t *dma_poolp; 2927 nxge_dma_common_t **dma_buf_p; 2928 nxge_dma_pool_t *dma_cntl_poolp; 2929 nxge_dma_common_t **dma_cntl_p; 2930 uint32_t *num_chunks; /* per dma */ 2931 int tdc_max; 2932 2933 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2934 2935 p_cfgp = &nxgep->pt_config.hw_config; 2936 tdc_max = NXGE_MAX_TDCS; 2937 2938 /* 2939 * Allocate memory for each transmit DMA channel. 2940 */ 2941 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2942 KM_SLEEP); 2943 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2944 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2945 2946 dma_cntl_poolp = (p_nxge_dma_pool_t) 2947 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2948 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2949 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2950 2951 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2952 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2953 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2954 "set to default %d", 2955 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2956 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2957 } 2958 2959 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2960 /* 2961 * N2/NIU has limitation on the descriptor sizes (contiguous 2962 * memory allocation on data buffers to 4M (contig_mem_alloc) 2963 * and little endian for control buffers (must use the ddi/dki mem alloc 2964 * function). The transmit ring is limited to 8K (includes the 2965 * mailbox). 2966 */ 2967 if (nxgep->niu_type == N2_NIU) { 2968 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2969 (!ISP2(nxge_tx_ring_size))) { 2970 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2971 } 2972 } 2973 #endif 2974 2975 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2976 2977 num_chunks = (uint32_t *)KMEM_ZALLOC( 2978 sizeof (uint32_t) * tdc_max, KM_SLEEP); 2979 2980 dma_poolp->ndmas = p_cfgp->tdc.owned; 2981 dma_poolp->num_chunks = num_chunks; 2982 dma_poolp->dma_buf_pool_p = dma_buf_p; 2983 nxgep->tx_buf_pool_p = dma_poolp; 2984 2985 dma_poolp->buf_allocated = B_TRUE; 2986 2987 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 2988 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2989 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2990 2991 dma_cntl_poolp->buf_allocated = B_TRUE; 2992 2993 nxgep->tx_rings = 2994 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 2995 nxgep->tx_rings->rings = 2996 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 2997 nxgep->tx_mbox_areas_p = 2998 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 2999 nxgep->tx_mbox_areas_p->txmbox_areas_p = 3000 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 3001 3002 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 3003 3004 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3005 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 3006 tdc_max, dma_poolp->ndmas)); 3007 3008 return (NXGE_OK); 3009 } 3010 3011 nxge_status_t 3012 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 3013 p_nxge_dma_common_t *dmap, size_t alloc_size, 3014 size_t block_size, uint32_t *num_chunks) 3015 { 3016 p_nxge_dma_common_t tx_dmap; 3017 nxge_status_t status = NXGE_OK; 3018 size_t total_alloc_size; 3019 size_t allocated = 0; 3020 int i, size_index, array_size; 3021 3022 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 3023 3024 tx_dmap = (p_nxge_dma_common_t) 3025 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 3026 KM_SLEEP); 3027 3028 total_alloc_size = alloc_size; 3029 i = 0; 3030 size_index = 0; 3031 array_size = sizeof (alloc_sizes) / sizeof (size_t); 3032 while ((size_index < array_size) && 3033 (alloc_sizes[size_index] < alloc_size)) 3034 size_index++; 3035 if (size_index >= array_size) { 3036 size_index = array_size - 1; 3037 } 3038 3039 while ((allocated < total_alloc_size) && 3040 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 3041 3042 tx_dmap[i].dma_chunk_index = i; 3043 tx_dmap[i].block_size = block_size; 3044 tx_dmap[i].alength = alloc_sizes[size_index]; 3045 tx_dmap[i].orig_alength = tx_dmap[i].alength; 3046 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 3047 tx_dmap[i].dma_channel = dma_channel; 3048 tx_dmap[i].contig_alloc_type = B_FALSE; 3049 tx_dmap[i].kmem_alloc_type = B_FALSE; 3050 3051 /* 3052 * N2/NIU: data buffers must be contiguous as the driver 3053 * needs to call Hypervisor api to set up 3054 * logical pages. 3055 */ 3056 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 3057 tx_dmap[i].contig_alloc_type = B_TRUE; 3058 } 3059 3060 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3061 &nxge_tx_dma_attr, 3062 tx_dmap[i].alength, 3063 &nxge_dev_buf_dma_acc_attr, 3064 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3065 (p_nxge_dma_common_t)(&tx_dmap[i])); 3066 if (status != NXGE_OK) { 3067 size_index--; 3068 } else { 3069 i++; 3070 allocated += alloc_sizes[size_index]; 3071 } 3072 } 3073 3074 if (allocated < total_alloc_size) { 3075 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3076 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3077 "allocated 0x%x requested 0x%x", 3078 dma_channel, 3079 allocated, total_alloc_size)); 3080 status = NXGE_ERROR; 3081 goto nxge_alloc_tx_mem_fail1; 3082 } 3083 3084 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3085 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3086 "allocated 0x%x requested 0x%x", 3087 dma_channel, 3088 allocated, total_alloc_size)); 3089 3090 *num_chunks = i; 3091 *dmap = tx_dmap; 3092 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3093 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3094 *dmap, i)); 3095 goto nxge_alloc_tx_mem_exit; 3096 3097 nxge_alloc_tx_mem_fail1: 3098 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3099 3100 nxge_alloc_tx_mem_exit: 3101 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3102 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3103 3104 return (status); 3105 } 3106 3107 /*ARGSUSED*/ 3108 static void 3109 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3110 uint32_t num_chunks) 3111 { 3112 int i; 3113 3114 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3115 3116 if (dmap == 0) 3117 return; 3118 3119 for (i = 0; i < num_chunks; i++) { 3120 nxge_dma_mem_free(dmap++); 3121 } 3122 3123 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3124 } 3125 3126 /*ARGSUSED*/ 3127 nxge_status_t 3128 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3129 p_nxge_dma_common_t *dmap, size_t size) 3130 { 3131 p_nxge_dma_common_t tx_dmap; 3132 nxge_status_t status = NXGE_OK; 3133 3134 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3135 tx_dmap = (p_nxge_dma_common_t) 3136 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3137 3138 tx_dmap->contig_alloc_type = B_FALSE; 3139 tx_dmap->kmem_alloc_type = B_FALSE; 3140 3141 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3142 &nxge_desc_dma_attr, 3143 size, 3144 &nxge_dev_desc_dma_acc_attr, 3145 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3146 tx_dmap); 3147 if (status != NXGE_OK) { 3148 goto nxge_alloc_tx_cntl_dma_fail1; 3149 } 3150 3151 *dmap = tx_dmap; 3152 goto nxge_alloc_tx_cntl_dma_exit; 3153 3154 nxge_alloc_tx_cntl_dma_fail1: 3155 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3156 3157 nxge_alloc_tx_cntl_dma_exit: 3158 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3159 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3160 3161 return (status); 3162 } 3163 3164 /*ARGSUSED*/ 3165 static void 3166 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3167 { 3168 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3169 3170 if (dmap == 0) 3171 return; 3172 3173 nxge_dma_mem_free(dmap); 3174 3175 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3176 } 3177 3178 /* 3179 * nxge_free_tx_mem_pool 3180 * 3181 * This function frees all of the per-port TDC control data structures. 3182 * The per-channel (TDC) data structures are freed when the channel 3183 * is stopped. 3184 * 3185 * Arguments: 3186 * nxgep 3187 * 3188 * Notes: 3189 * 3190 * Context: 3191 * Any domain 3192 */ 3193 static void 3194 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3195 { 3196 int tdc_max = NXGE_MAX_TDCS; 3197 3198 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3199 3200 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3201 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3202 "<== nxge_free_tx_mem_pool " 3203 "(null tx buf pool or buf not allocated")); 3204 return; 3205 } 3206 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3207 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3208 "<== nxge_free_tx_mem_pool " 3209 "(null tx cntl buf pool or cntl buf not allocated")); 3210 return; 3211 } 3212 3213 /* 1. Free the mailboxes. */ 3214 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3215 sizeof (p_tx_mbox_t) * tdc_max); 3216 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3217 3218 nxgep->tx_mbox_areas_p = 0; 3219 3220 /* 2. Free the transmit ring arrays. */ 3221 KMEM_FREE(nxgep->tx_rings->rings, 3222 sizeof (p_tx_ring_t) * tdc_max); 3223 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3224 3225 nxgep->tx_rings = 0; 3226 3227 /* 3. Free the completion ring data structures. */ 3228 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3229 sizeof (p_nxge_dma_common_t) * tdc_max); 3230 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3231 3232 nxgep->tx_cntl_pool_p = 0; 3233 3234 /* 4. Free the data ring data structures. */ 3235 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3236 sizeof (uint32_t) * tdc_max); 3237 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3238 sizeof (p_nxge_dma_common_t) * tdc_max); 3239 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3240 3241 nxgep->tx_buf_pool_p = 0; 3242 3243 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3244 } 3245 3246 /*ARGSUSED*/ 3247 static nxge_status_t 3248 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3249 struct ddi_dma_attr *dma_attrp, 3250 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3251 p_nxge_dma_common_t dma_p) 3252 { 3253 caddr_t kaddrp; 3254 int ddi_status = DDI_SUCCESS; 3255 boolean_t contig_alloc_type; 3256 boolean_t kmem_alloc_type; 3257 3258 contig_alloc_type = dma_p->contig_alloc_type; 3259 3260 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3261 /* 3262 * contig_alloc_type for contiguous memory only allowed 3263 * for N2/NIU. 3264 */ 3265 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3266 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3267 dma_p->contig_alloc_type)); 3268 return (NXGE_ERROR | NXGE_DDI_FAILED); 3269 } 3270 3271 dma_p->dma_handle = NULL; 3272 dma_p->acc_handle = NULL; 3273 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3274 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3275 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3276 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3277 if (ddi_status != DDI_SUCCESS) { 3278 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3279 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3280 return (NXGE_ERROR | NXGE_DDI_FAILED); 3281 } 3282 3283 kmem_alloc_type = dma_p->kmem_alloc_type; 3284 3285 switch (contig_alloc_type) { 3286 case B_FALSE: 3287 switch (kmem_alloc_type) { 3288 case B_FALSE: 3289 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3290 length, 3291 acc_attr_p, 3292 xfer_flags, 3293 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3294 &dma_p->acc_handle); 3295 if (ddi_status != DDI_SUCCESS) { 3296 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3297 "nxge_dma_mem_alloc: " 3298 "ddi_dma_mem_alloc failed")); 3299 ddi_dma_free_handle(&dma_p->dma_handle); 3300 dma_p->dma_handle = NULL; 3301 return (NXGE_ERROR | NXGE_DDI_FAILED); 3302 } 3303 if (dma_p->alength < length) { 3304 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3305 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3306 "< length.")); 3307 ddi_dma_mem_free(&dma_p->acc_handle); 3308 ddi_dma_free_handle(&dma_p->dma_handle); 3309 dma_p->acc_handle = NULL; 3310 dma_p->dma_handle = NULL; 3311 return (NXGE_ERROR); 3312 } 3313 3314 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3315 NULL, 3316 kaddrp, dma_p->alength, xfer_flags, 3317 DDI_DMA_DONTWAIT, 3318 0, &dma_p->dma_cookie, &dma_p->ncookies); 3319 if (ddi_status != DDI_DMA_MAPPED) { 3320 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3321 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3322 "failed " 3323 "(staus 0x%x ncookies %d.)", ddi_status, 3324 dma_p->ncookies)); 3325 if (dma_p->acc_handle) { 3326 ddi_dma_mem_free(&dma_p->acc_handle); 3327 dma_p->acc_handle = NULL; 3328 } 3329 ddi_dma_free_handle(&dma_p->dma_handle); 3330 dma_p->dma_handle = NULL; 3331 return (NXGE_ERROR | NXGE_DDI_FAILED); 3332 } 3333 3334 if (dma_p->ncookies != 1) { 3335 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3336 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3337 "> 1 cookie" 3338 "(staus 0x%x ncookies %d.)", ddi_status, 3339 dma_p->ncookies)); 3340 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3341 if (dma_p->acc_handle) { 3342 ddi_dma_mem_free(&dma_p->acc_handle); 3343 dma_p->acc_handle = NULL; 3344 } 3345 ddi_dma_free_handle(&dma_p->dma_handle); 3346 dma_p->dma_handle = NULL; 3347 dma_p->acc_handle = NULL; 3348 return (NXGE_ERROR); 3349 } 3350 break; 3351 3352 case B_TRUE: 3353 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3354 if (kaddrp == NULL) { 3355 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3356 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3357 "kmem alloc failed")); 3358 return (NXGE_ERROR); 3359 } 3360 3361 dma_p->alength = length; 3362 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3363 NULL, kaddrp, dma_p->alength, xfer_flags, 3364 DDI_DMA_DONTWAIT, 0, 3365 &dma_p->dma_cookie, &dma_p->ncookies); 3366 if (ddi_status != DDI_DMA_MAPPED) { 3367 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3368 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3369 "(kmem_alloc) failed kaddrp $%p length %d " 3370 "(staus 0x%x (%d) ncookies %d.)", 3371 kaddrp, length, 3372 ddi_status, ddi_status, dma_p->ncookies)); 3373 KMEM_FREE(kaddrp, length); 3374 dma_p->acc_handle = NULL; 3375 ddi_dma_free_handle(&dma_p->dma_handle); 3376 dma_p->dma_handle = NULL; 3377 dma_p->kaddrp = NULL; 3378 return (NXGE_ERROR | NXGE_DDI_FAILED); 3379 } 3380 3381 if (dma_p->ncookies != 1) { 3382 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3383 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3384 "(kmem_alloc) > 1 cookie" 3385 "(staus 0x%x ncookies %d.)", ddi_status, 3386 dma_p->ncookies)); 3387 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3388 KMEM_FREE(kaddrp, length); 3389 ddi_dma_free_handle(&dma_p->dma_handle); 3390 dma_p->dma_handle = NULL; 3391 dma_p->acc_handle = NULL; 3392 dma_p->kaddrp = NULL; 3393 return (NXGE_ERROR); 3394 } 3395 3396 dma_p->kaddrp = kaddrp; 3397 3398 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3399 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3400 "kaddr $%p alength %d", 3401 dma_p, 3402 kaddrp, 3403 dma_p->alength)); 3404 break; 3405 } 3406 break; 3407 3408 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3409 case B_TRUE: 3410 kaddrp = (caddr_t)contig_mem_alloc(length); 3411 if (kaddrp == NULL) { 3412 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3413 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3414 ddi_dma_free_handle(&dma_p->dma_handle); 3415 return (NXGE_ERROR | NXGE_DDI_FAILED); 3416 } 3417 3418 dma_p->alength = length; 3419 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3420 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3421 &dma_p->dma_cookie, &dma_p->ncookies); 3422 if (ddi_status != DDI_DMA_MAPPED) { 3423 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3424 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3425 "(status 0x%x ncookies %d.)", ddi_status, 3426 dma_p->ncookies)); 3427 3428 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3429 "==> nxge_dma_mem_alloc: (not mapped)" 3430 "length %lu (0x%x) " 3431 "free contig kaddrp $%p " 3432 "va_to_pa $%p", 3433 length, length, 3434 kaddrp, 3435 va_to_pa(kaddrp))); 3436 3437 3438 contig_mem_free((void *)kaddrp, length); 3439 ddi_dma_free_handle(&dma_p->dma_handle); 3440 3441 dma_p->dma_handle = NULL; 3442 dma_p->acc_handle = NULL; 3443 dma_p->alength = NULL; 3444 dma_p->kaddrp = NULL; 3445 3446 return (NXGE_ERROR | NXGE_DDI_FAILED); 3447 } 3448 3449 if (dma_p->ncookies != 1 || 3450 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3451 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3452 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3453 "cookie or " 3454 "dmac_laddress is NULL $%p size %d " 3455 " (status 0x%x ncookies %d.)", 3456 ddi_status, 3457 dma_p->dma_cookie.dmac_laddress, 3458 dma_p->dma_cookie.dmac_size, 3459 dma_p->ncookies)); 3460 3461 contig_mem_free((void *)kaddrp, length); 3462 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3463 ddi_dma_free_handle(&dma_p->dma_handle); 3464 3465 dma_p->alength = 0; 3466 dma_p->dma_handle = NULL; 3467 dma_p->acc_handle = NULL; 3468 dma_p->kaddrp = NULL; 3469 3470 return (NXGE_ERROR | NXGE_DDI_FAILED); 3471 } 3472 break; 3473 3474 #else 3475 case B_TRUE: 3476 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3477 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3478 return (NXGE_ERROR | NXGE_DDI_FAILED); 3479 #endif 3480 } 3481 3482 dma_p->kaddrp = kaddrp; 3483 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3484 dma_p->alength - RXBUF_64B_ALIGNED; 3485 #if defined(__i386) 3486 dma_p->ioaddr_pp = 3487 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3488 #else 3489 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3490 #endif 3491 dma_p->last_ioaddr_pp = 3492 #if defined(__i386) 3493 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3494 #else 3495 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3496 #endif 3497 dma_p->alength - RXBUF_64B_ALIGNED; 3498 3499 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3500 3501 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3502 dma_p->orig_ioaddr_pp = 3503 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3504 dma_p->orig_alength = length; 3505 dma_p->orig_kaddrp = kaddrp; 3506 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3507 #endif 3508 3509 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3510 "dma buffer allocated: dma_p $%p " 3511 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3512 "dma_p->ioaddr_p $%p " 3513 "dma_p->orig_ioaddr_p $%p " 3514 "orig_vatopa $%p " 3515 "alength %d (0x%x) " 3516 "kaddrp $%p " 3517 "length %d (0x%x)", 3518 dma_p, 3519 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3520 dma_p->ioaddr_pp, 3521 dma_p->orig_ioaddr_pp, 3522 dma_p->orig_vatopa, 3523 dma_p->alength, dma_p->alength, 3524 kaddrp, 3525 length, length)); 3526 3527 return (NXGE_OK); 3528 } 3529 3530 static void 3531 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3532 { 3533 if (dma_p->dma_handle != NULL) { 3534 if (dma_p->ncookies) { 3535 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3536 dma_p->ncookies = 0; 3537 } 3538 ddi_dma_free_handle(&dma_p->dma_handle); 3539 dma_p->dma_handle = NULL; 3540 } 3541 3542 if (dma_p->acc_handle != NULL) { 3543 ddi_dma_mem_free(&dma_p->acc_handle); 3544 dma_p->acc_handle = NULL; 3545 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3546 } 3547 3548 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3549 if (dma_p->contig_alloc_type && 3550 dma_p->orig_kaddrp && dma_p->orig_alength) { 3551 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3552 "kaddrp $%p (orig_kaddrp $%p)" 3553 "mem type %d ", 3554 "orig_alength %d " 3555 "alength 0x%x (%d)", 3556 dma_p->kaddrp, 3557 dma_p->orig_kaddrp, 3558 dma_p->contig_alloc_type, 3559 dma_p->orig_alength, 3560 dma_p->alength, dma_p->alength)); 3561 3562 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3563 dma_p->orig_alength = NULL; 3564 dma_p->orig_kaddrp = NULL; 3565 dma_p->contig_alloc_type = B_FALSE; 3566 } 3567 #endif 3568 dma_p->kaddrp = NULL; 3569 dma_p->alength = NULL; 3570 } 3571 3572 static void 3573 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3574 { 3575 uint64_t kaddr; 3576 uint32_t buf_size; 3577 3578 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3579 3580 if (dma_p->dma_handle != NULL) { 3581 if (dma_p->ncookies) { 3582 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3583 dma_p->ncookies = 0; 3584 } 3585 ddi_dma_free_handle(&dma_p->dma_handle); 3586 dma_p->dma_handle = NULL; 3587 } 3588 3589 if (dma_p->acc_handle != NULL) { 3590 ddi_dma_mem_free(&dma_p->acc_handle); 3591 dma_p->acc_handle = NULL; 3592 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3593 } 3594 3595 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3596 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3597 dma_p, 3598 dma_p->buf_alloc_state)); 3599 3600 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3601 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3602 "<== nxge_dma_free_rx_data_buf: " 3603 "outstanding data buffers")); 3604 return; 3605 } 3606 3607 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3608 if (dma_p->contig_alloc_type && 3609 dma_p->orig_kaddrp && dma_p->orig_alength) { 3610 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3611 "kaddrp $%p (orig_kaddrp $%p)" 3612 "mem type %d ", 3613 "orig_alength %d " 3614 "alength 0x%x (%d)", 3615 dma_p->kaddrp, 3616 dma_p->orig_kaddrp, 3617 dma_p->contig_alloc_type, 3618 dma_p->orig_alength, 3619 dma_p->alength, dma_p->alength)); 3620 3621 kaddr = (uint64_t)dma_p->orig_kaddrp; 3622 buf_size = dma_p->orig_alength; 3623 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3624 dma_p->orig_alength = NULL; 3625 dma_p->orig_kaddrp = NULL; 3626 dma_p->contig_alloc_type = B_FALSE; 3627 dma_p->kaddrp = NULL; 3628 dma_p->alength = NULL; 3629 return; 3630 } 3631 #endif 3632 3633 if (dma_p->kmem_alloc_type) { 3634 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3635 "nxge_dma_free_rx_data_buf: free kmem " 3636 "kaddrp $%p (orig_kaddrp $%p)" 3637 "alloc type %d " 3638 "orig_alength %d " 3639 "alength 0x%x (%d)", 3640 dma_p->kaddrp, 3641 dma_p->orig_kaddrp, 3642 dma_p->kmem_alloc_type, 3643 dma_p->orig_alength, 3644 dma_p->alength, dma_p->alength)); 3645 #if defined(__i386) 3646 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3647 #else 3648 kaddr = (uint64_t)dma_p->kaddrp; 3649 #endif 3650 buf_size = dma_p->orig_alength; 3651 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3652 "nxge_dma_free_rx_data_buf: free dmap $%p " 3653 "kaddr $%p buf_size %d", 3654 dma_p, 3655 kaddr, buf_size)); 3656 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3657 dma_p->alength = 0; 3658 dma_p->orig_alength = 0; 3659 dma_p->kaddrp = NULL; 3660 dma_p->kmem_alloc_type = B_FALSE; 3661 } 3662 3663 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3664 } 3665 3666 /* 3667 * nxge_m_start() -- start transmitting and receiving. 3668 * 3669 * This function is called by the MAC layer when the first 3670 * stream is open to prepare the hardware ready for sending 3671 * and transmitting packets. 3672 */ 3673 static int 3674 nxge_m_start(void *arg) 3675 { 3676 p_nxge_t nxgep = (p_nxge_t)arg; 3677 3678 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3679 3680 /* 3681 * Are we already started? 3682 */ 3683 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 3684 return (0); 3685 } 3686 3687 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) { 3688 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 3689 } 3690 3691 /* 3692 * Make sure RX MAC is disabled while we initialize. 3693 */ 3694 if (!isLDOMguest(nxgep)) { 3695 (void) nxge_rx_mac_disable(nxgep); 3696 } 3697 3698 /* 3699 * Grab the global lock. 3700 */ 3701 MUTEX_ENTER(nxgep->genlock); 3702 3703 /* 3704 * Initialize the driver and hardware. 3705 */ 3706 if (nxge_init(nxgep) != NXGE_OK) { 3707 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3708 "<== nxge_m_start: initialization failed")); 3709 MUTEX_EXIT(nxgep->genlock); 3710 return (EIO); 3711 } 3712 3713 /* 3714 * Start timer to check the system error and tx hangs 3715 */ 3716 if (!isLDOMguest(nxgep)) 3717 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3718 nxge_check_hw_state, NXGE_CHECK_TIMER); 3719 #if defined(sun4v) 3720 else 3721 nxge_hio_start_timer(nxgep); 3722 #endif 3723 3724 nxgep->link_notify = B_TRUE; 3725 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3726 3727 /* 3728 * Let the global lock go, since we are intialized. 3729 */ 3730 MUTEX_EXIT(nxgep->genlock); 3731 3732 /* 3733 * Let the MAC start receiving packets, now that 3734 * we are initialized. 3735 */ 3736 if (!isLDOMguest(nxgep)) { 3737 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 3738 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3739 "<== nxge_m_start: enable of RX mac failed")); 3740 return (EIO); 3741 } 3742 3743 /* 3744 * Enable hardware interrupts. 3745 */ 3746 nxge_intr_hw_enable(nxgep); 3747 } 3748 #if defined(sun4v) 3749 else { 3750 /* 3751 * In guest domain we enable RDCs and their interrupts as 3752 * the last step. 3753 */ 3754 if (nxge_hio_rdc_enable(nxgep) != NXGE_OK) { 3755 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3756 "<== nxge_m_start: enable of RDCs failed")); 3757 return (EIO); 3758 } 3759 3760 if (nxge_hio_rdc_intr_arm(nxgep, B_TRUE) != NXGE_OK) { 3761 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3762 "<== nxge_m_start: intrs enable for RDCs failed")); 3763 return (EIO); 3764 } 3765 } 3766 #endif 3767 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3768 return (0); 3769 } 3770 3771 static boolean_t 3772 nxge_check_groups_stopped(p_nxge_t nxgep) 3773 { 3774 int i; 3775 3776 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 3777 if (nxgep->rx_hio_groups[i].started) 3778 return (B_FALSE); 3779 } 3780 3781 return (B_TRUE); 3782 } 3783 3784 /* 3785 * nxge_m_stop(): stop transmitting and receiving. 3786 */ 3787 static void 3788 nxge_m_stop(void *arg) 3789 { 3790 p_nxge_t nxgep = (p_nxge_t)arg; 3791 boolean_t groups_stopped; 3792 3793 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3794 3795 /* 3796 * Are the groups stopped? 3797 */ 3798 groups_stopped = nxge_check_groups_stopped(nxgep); 3799 ASSERT(groups_stopped == B_TRUE); 3800 if (!groups_stopped) { 3801 cmn_err(CE_WARN, "nxge(%d): groups are not stopped!\n", 3802 nxgep->instance); 3803 return; 3804 } 3805 3806 if (!isLDOMguest(nxgep)) { 3807 /* 3808 * Disable the RX mac. 3809 */ 3810 (void) nxge_rx_mac_disable(nxgep); 3811 3812 /* 3813 * Wait for the IPP to drain. 3814 */ 3815 (void) nxge_ipp_drain(nxgep); 3816 3817 /* 3818 * Disable hardware interrupts. 3819 */ 3820 nxge_intr_hw_disable(nxgep); 3821 } 3822 #if defined(sun4v) 3823 else { 3824 (void) nxge_hio_rdc_intr_arm(nxgep, B_FALSE); 3825 } 3826 #endif 3827 3828 /* 3829 * Grab the global lock. 3830 */ 3831 MUTEX_ENTER(nxgep->genlock); 3832 3833 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3834 if (nxgep->nxge_timerid) { 3835 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3836 nxgep->nxge_timerid = 0; 3837 } 3838 3839 /* 3840 * Clean up. 3841 */ 3842 nxge_uninit(nxgep); 3843 3844 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3845 3846 /* 3847 * Let go of the global lock. 3848 */ 3849 MUTEX_EXIT(nxgep->genlock); 3850 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3851 } 3852 3853 static int 3854 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3855 { 3856 p_nxge_t nxgep = (p_nxge_t)arg; 3857 struct ether_addr addrp; 3858 3859 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3860 "==> nxge_m_multicst: add %d", add)); 3861 3862 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3863 if (add) { 3864 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3865 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3866 "<== nxge_m_multicst: add multicast failed")); 3867 return (EINVAL); 3868 } 3869 } else { 3870 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3871 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3872 "<== nxge_m_multicst: del multicast failed")); 3873 return (EINVAL); 3874 } 3875 } 3876 3877 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3878 3879 return (0); 3880 } 3881 3882 static int 3883 nxge_m_promisc(void *arg, boolean_t on) 3884 { 3885 p_nxge_t nxgep = (p_nxge_t)arg; 3886 3887 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3888 "==> nxge_m_promisc: on %d", on)); 3889 3890 if (nxge_set_promisc(nxgep, on)) { 3891 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3892 "<== nxge_m_promisc: set promisc failed")); 3893 return (EINVAL); 3894 } 3895 3896 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3897 "<== nxge_m_promisc: on %d", on)); 3898 3899 return (0); 3900 } 3901 3902 static void 3903 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3904 { 3905 p_nxge_t nxgep = (p_nxge_t)arg; 3906 struct iocblk *iocp; 3907 boolean_t need_privilege; 3908 int err; 3909 int cmd; 3910 3911 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3912 3913 iocp = (struct iocblk *)mp->b_rptr; 3914 iocp->ioc_error = 0; 3915 need_privilege = B_TRUE; 3916 cmd = iocp->ioc_cmd; 3917 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3918 switch (cmd) { 3919 default: 3920 miocnak(wq, mp, 0, EINVAL); 3921 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3922 return; 3923 3924 case LB_GET_INFO_SIZE: 3925 case LB_GET_INFO: 3926 case LB_GET_MODE: 3927 need_privilege = B_FALSE; 3928 break; 3929 case LB_SET_MODE: 3930 break; 3931 3932 3933 case NXGE_GET_MII: 3934 case NXGE_PUT_MII: 3935 case NXGE_GET64: 3936 case NXGE_PUT64: 3937 case NXGE_GET_TX_RING_SZ: 3938 case NXGE_GET_TX_DESC: 3939 case NXGE_TX_SIDE_RESET: 3940 case NXGE_RX_SIDE_RESET: 3941 case NXGE_GLOBAL_RESET: 3942 case NXGE_RESET_MAC: 3943 case NXGE_TX_REGS_DUMP: 3944 case NXGE_RX_REGS_DUMP: 3945 case NXGE_INT_REGS_DUMP: 3946 case NXGE_VIR_INT_REGS_DUMP: 3947 case NXGE_PUT_TCAM: 3948 case NXGE_GET_TCAM: 3949 case NXGE_RTRACE: 3950 case NXGE_RDUMP: 3951 3952 need_privilege = B_FALSE; 3953 break; 3954 case NXGE_INJECT_ERR: 3955 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3956 nxge_err_inject(nxgep, wq, mp); 3957 break; 3958 } 3959 3960 if (need_privilege) { 3961 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3962 if (err != 0) { 3963 miocnak(wq, mp, 0, err); 3964 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3965 "<== nxge_m_ioctl: no priv")); 3966 return; 3967 } 3968 } 3969 3970 switch (cmd) { 3971 3972 case LB_GET_MODE: 3973 case LB_SET_MODE: 3974 case LB_GET_INFO_SIZE: 3975 case LB_GET_INFO: 3976 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3977 break; 3978 3979 case NXGE_GET_MII: 3980 case NXGE_PUT_MII: 3981 case NXGE_PUT_TCAM: 3982 case NXGE_GET_TCAM: 3983 case NXGE_GET64: 3984 case NXGE_PUT64: 3985 case NXGE_GET_TX_RING_SZ: 3986 case NXGE_GET_TX_DESC: 3987 case NXGE_TX_SIDE_RESET: 3988 case NXGE_RX_SIDE_RESET: 3989 case NXGE_GLOBAL_RESET: 3990 case NXGE_RESET_MAC: 3991 case NXGE_TX_REGS_DUMP: 3992 case NXGE_RX_REGS_DUMP: 3993 case NXGE_INT_REGS_DUMP: 3994 case NXGE_VIR_INT_REGS_DUMP: 3995 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3996 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3997 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3998 break; 3999 } 4000 4001 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 4002 } 4003 4004 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 4005 4006 void 4007 nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory) 4008 { 4009 p_nxge_mmac_stats_t mmac_stats; 4010 int i; 4011 nxge_mmac_t *mmac_info; 4012 4013 mmac_info = &nxgep->nxge_mmac_info; 4014 4015 mmac_stats = &nxgep->statsp->mmac_stats; 4016 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 4017 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 4018 4019 for (i = 0; i < ETHERADDRL; i++) { 4020 if (factory) { 4021 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4022 = mmac_info->factory_mac_pool[slot][ 4023 (ETHERADDRL-1) - i]; 4024 } else { 4025 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4026 = mmac_info->mac_pool[slot].addr[ 4027 (ETHERADDRL - 1) - i]; 4028 } 4029 } 4030 } 4031 4032 /* 4033 * nxge_altmac_set() -- Set an alternate MAC address 4034 */ 4035 static int 4036 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, int slot, 4037 int rdctbl, boolean_t usetbl) 4038 { 4039 uint8_t addrn; 4040 uint8_t portn; 4041 npi_mac_addr_t altmac; 4042 hostinfo_t mac_rdc; 4043 p_nxge_class_pt_cfg_t clscfgp; 4044 4045 4046 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 4047 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 4048 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 4049 4050 portn = nxgep->mac.portnum; 4051 addrn = (uint8_t)slot - 1; 4052 4053 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, 4054 nxgep->function_num, addrn, &altmac) != NPI_SUCCESS) 4055 return (EIO); 4056 4057 /* 4058 * Set the rdc table number for the host info entry 4059 * for this mac address slot. 4060 */ 4061 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 4062 mac_rdc.value = 0; 4063 if (usetbl) 4064 mac_rdc.bits.w0.rdc_tbl_num = rdctbl; 4065 else 4066 mac_rdc.bits.w0.rdc_tbl_num = 4067 clscfgp->mac_host_info[addrn].rdctbl; 4068 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 4069 4070 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 4071 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 4072 return (EIO); 4073 } 4074 4075 /* 4076 * Enable comparison with the alternate MAC address. 4077 * While the first alternate addr is enabled by bit 1 of register 4078 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 4079 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 4080 * accordingly before calling npi_mac_altaddr_entry. 4081 */ 4082 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4083 addrn = (uint8_t)slot - 1; 4084 else 4085 addrn = (uint8_t)slot; 4086 4087 if (npi_mac_altaddr_enable(nxgep->npi_handle, 4088 nxgep->function_num, addrn) != NPI_SUCCESS) { 4089 return (EIO); 4090 } 4091 4092 return (0); 4093 } 4094 4095 /* 4096 * nxeg_m_mmac_add_g() - find an unused address slot, set the address 4097 * value to the one specified, enable the port to start filtering on 4098 * the new MAC address. Returns 0 on success. 4099 */ 4100 int 4101 nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl, 4102 boolean_t usetbl) 4103 { 4104 p_nxge_t nxgep = arg; 4105 int slot; 4106 nxge_mmac_t *mmac_info; 4107 int err; 4108 nxge_status_t status; 4109 4110 mutex_enter(nxgep->genlock); 4111 4112 /* 4113 * Make sure that nxge is initialized, if _start() has 4114 * not been called. 4115 */ 4116 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4117 status = nxge_init(nxgep); 4118 if (status != NXGE_OK) { 4119 mutex_exit(nxgep->genlock); 4120 return (ENXIO); 4121 } 4122 } 4123 4124 mmac_info = &nxgep->nxge_mmac_info; 4125 if (mmac_info->naddrfree == 0) { 4126 mutex_exit(nxgep->genlock); 4127 return (ENOSPC); 4128 } 4129 4130 /* 4131 * Search for the first available slot. Because naddrfree 4132 * is not zero, we are guaranteed to find one. 4133 * Each of the first two ports of Neptune has 16 alternate 4134 * MAC slots but only the first 7 (of 15) slots have assigned factory 4135 * MAC addresses. We first search among the slots without bundled 4136 * factory MACs. If we fail to find one in that range, then we 4137 * search the slots with bundled factory MACs. A factory MAC 4138 * will be wasted while the slot is used with a user MAC address. 4139 * But the slot could be used by factory MAC again after calling 4140 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4141 */ 4142 for (slot = 0; slot <= mmac_info->num_mmac; slot++) { 4143 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4144 break; 4145 } 4146 4147 ASSERT(slot <= mmac_info->num_mmac); 4148 4149 if ((err = nxge_altmac_set(nxgep, (uint8_t *)maddr, slot, rdctbl, 4150 usetbl)) != 0) { 4151 mutex_exit(nxgep->genlock); 4152 return (err); 4153 } 4154 4155 bcopy(maddr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4156 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4157 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4158 mmac_info->naddrfree--; 4159 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4160 4161 mutex_exit(nxgep->genlock); 4162 return (0); 4163 } 4164 4165 /* 4166 * Remove the specified mac address and update the HW not to filter 4167 * the mac address anymore. 4168 */ 4169 int 4170 nxge_m_mmac_remove(void *arg, int slot) 4171 { 4172 p_nxge_t nxgep = arg; 4173 nxge_mmac_t *mmac_info; 4174 uint8_t addrn; 4175 uint8_t portn; 4176 int err = 0; 4177 nxge_status_t status; 4178 4179 mutex_enter(nxgep->genlock); 4180 4181 /* 4182 * Make sure that nxge is initialized, if _start() has 4183 * not been called. 4184 */ 4185 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4186 status = nxge_init(nxgep); 4187 if (status != NXGE_OK) { 4188 mutex_exit(nxgep->genlock); 4189 return (ENXIO); 4190 } 4191 } 4192 4193 mmac_info = &nxgep->nxge_mmac_info; 4194 if (slot < 1 || slot > mmac_info->num_mmac) { 4195 mutex_exit(nxgep->genlock); 4196 return (EINVAL); 4197 } 4198 4199 portn = nxgep->mac.portnum; 4200 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4201 addrn = (uint8_t)slot - 1; 4202 else 4203 addrn = (uint8_t)slot; 4204 4205 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4206 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4207 == NPI_SUCCESS) { 4208 mmac_info->naddrfree++; 4209 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4210 /* 4211 * Regardless if the MAC we just stopped filtering 4212 * is a user addr or a facory addr, we must set 4213 * the MMAC_VENDOR_ADDR flag if this slot has an 4214 * associated factory MAC to indicate that a factory 4215 * MAC is available. 4216 */ 4217 if (slot <= mmac_info->num_factory_mmac) { 4218 mmac_info->mac_pool[slot].flags 4219 |= MMAC_VENDOR_ADDR; 4220 } 4221 /* 4222 * Clear mac_pool[slot].addr so that kstat shows 0 4223 * alternate MAC address if the slot is not used. 4224 * (But nxge_m_mmac_get returns the factory MAC even 4225 * when the slot is not used!) 4226 */ 4227 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4228 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4229 } else { 4230 err = EIO; 4231 } 4232 } else { 4233 err = EINVAL; 4234 } 4235 4236 mutex_exit(nxgep->genlock); 4237 return (err); 4238 } 4239 4240 /* 4241 * The callback to query all the factory addresses. naddr must be the same as 4242 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and 4243 * mcm_addr is the space allocated for keep all the addresses, whose size is 4244 * naddr * MAXMACADDRLEN. 4245 */ 4246 static void 4247 nxge_m_getfactaddr(void *arg, uint_t naddr, uint8_t *addr) 4248 { 4249 nxge_t *nxgep = arg; 4250 nxge_mmac_t *mmac_info; 4251 int i; 4252 4253 mutex_enter(nxgep->genlock); 4254 4255 mmac_info = &nxgep->nxge_mmac_info; 4256 ASSERT(naddr == mmac_info->num_factory_mmac); 4257 4258 for (i = 0; i < naddr; i++) { 4259 bcopy(mmac_info->factory_mac_pool[i + 1], 4260 addr + i * MAXMACADDRLEN, ETHERADDRL); 4261 } 4262 4263 mutex_exit(nxgep->genlock); 4264 } 4265 4266 4267 static boolean_t 4268 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4269 { 4270 nxge_t *nxgep = arg; 4271 uint32_t *txflags = cap_data; 4272 4273 switch (cap) { 4274 case MAC_CAPAB_HCKSUM: 4275 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4276 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4277 if (nxge_cksum_offload <= 1) { 4278 *txflags = HCKSUM_INET_PARTIAL; 4279 } 4280 break; 4281 4282 case MAC_CAPAB_MULTIFACTADDR: { 4283 mac_capab_multifactaddr_t *mfacp = cap_data; 4284 4285 if (!isLDOMguest(nxgep)) { 4286 mutex_enter(nxgep->genlock); 4287 mfacp->mcm_naddr = 4288 nxgep->nxge_mmac_info.num_factory_mmac; 4289 mfacp->mcm_getaddr = nxge_m_getfactaddr; 4290 mutex_exit(nxgep->genlock); 4291 } 4292 break; 4293 } 4294 4295 case MAC_CAPAB_LSO: { 4296 mac_capab_lso_t *cap_lso = cap_data; 4297 4298 if (nxgep->soft_lso_enable) { 4299 if (nxge_cksum_offload <= 1) { 4300 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4301 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4302 nxge_lso_max = NXGE_LSO_MAXLEN; 4303 } 4304 cap_lso->lso_basic_tcp_ipv4.lso_max = 4305 nxge_lso_max; 4306 } 4307 break; 4308 } else { 4309 return (B_FALSE); 4310 } 4311 } 4312 4313 case MAC_CAPAB_RINGS: { 4314 mac_capab_rings_t *cap_rings = cap_data; 4315 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 4316 4317 mutex_enter(nxgep->genlock); 4318 if (cap_rings->mr_type == MAC_RING_TYPE_RX) { 4319 if (isLDOMguest(nxgep)) { 4320 cap_rings->mr_group_type = 4321 MAC_GROUP_TYPE_STATIC; 4322 cap_rings->mr_rnum = 4323 NXGE_HIO_SHARE_MAX_CHANNELS; 4324 cap_rings->mr_rget = nxge_fill_ring; 4325 cap_rings->mr_gnum = 1; 4326 cap_rings->mr_gget = nxge_hio_group_get; 4327 cap_rings->mr_gaddring = NULL; 4328 cap_rings->mr_gremring = NULL; 4329 } else { 4330 /* 4331 * Service Domain. 4332 */ 4333 cap_rings->mr_group_type = 4334 MAC_GROUP_TYPE_DYNAMIC; 4335 cap_rings->mr_rnum = p_cfgp->max_rdcs; 4336 cap_rings->mr_rget = nxge_fill_ring; 4337 cap_rings->mr_gnum = p_cfgp->max_rdc_grpids; 4338 cap_rings->mr_gget = nxge_hio_group_get; 4339 cap_rings->mr_gaddring = nxge_group_add_ring; 4340 cap_rings->mr_gremring = nxge_group_rem_ring; 4341 } 4342 4343 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4344 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]", 4345 p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids)); 4346 } else { 4347 /* 4348 * TX Rings. 4349 */ 4350 if (isLDOMguest(nxgep)) { 4351 cap_rings->mr_group_type = 4352 MAC_GROUP_TYPE_STATIC; 4353 cap_rings->mr_rnum = 4354 NXGE_HIO_SHARE_MAX_CHANNELS; 4355 cap_rings->mr_rget = nxge_fill_ring; 4356 cap_rings->mr_gnum = 0; 4357 cap_rings->mr_gget = NULL; 4358 cap_rings->mr_gaddring = NULL; 4359 cap_rings->mr_gremring = NULL; 4360 } else { 4361 /* 4362 * Service Domain. 4363 */ 4364 cap_rings->mr_group_type = 4365 MAC_GROUP_TYPE_DYNAMIC; 4366 cap_rings->mr_rnum = p_cfgp->tdc.count; 4367 cap_rings->mr_rget = nxge_fill_ring; 4368 4369 /* 4370 * Share capable. 4371 * 4372 * Do not report the default group: hence -1 4373 */ 4374 cap_rings->mr_gnum = 4375 NXGE_MAX_TDC_GROUPS / nxgep->nports - 1; 4376 cap_rings->mr_gget = nxge_hio_group_get; 4377 cap_rings->mr_gaddring = nxge_group_add_ring; 4378 cap_rings->mr_gremring = nxge_group_rem_ring; 4379 } 4380 4381 NXGE_DEBUG_MSG((nxgep, TX_CTL, 4382 "==> nxge_m_getcapab: tx rings # of rings %d", 4383 p_cfgp->tdc.count)); 4384 } 4385 mutex_exit(nxgep->genlock); 4386 break; 4387 } 4388 4389 #if defined(sun4v) 4390 case MAC_CAPAB_SHARES: { 4391 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4392 4393 /* 4394 * Only the service domain driver responds to 4395 * this capability request. 4396 */ 4397 mutex_enter(nxgep->genlock); 4398 if (isLDOMservice(nxgep)) { 4399 mshares->ms_snum = 3; 4400 mshares->ms_handle = (void *)nxgep; 4401 mshares->ms_salloc = nxge_hio_share_alloc; 4402 mshares->ms_sfree = nxge_hio_share_free; 4403 mshares->ms_sadd = nxge_hio_share_add_group; 4404 mshares->ms_sremove = nxge_hio_share_rem_group; 4405 mshares->ms_squery = nxge_hio_share_query; 4406 mshares->ms_sbind = nxge_hio_share_bind; 4407 mshares->ms_sunbind = nxge_hio_share_unbind; 4408 mutex_exit(nxgep->genlock); 4409 } else { 4410 mutex_exit(nxgep->genlock); 4411 return (B_FALSE); 4412 } 4413 break; 4414 } 4415 #endif 4416 default: 4417 return (B_FALSE); 4418 } 4419 return (B_TRUE); 4420 } 4421 4422 static boolean_t 4423 nxge_param_locked(mac_prop_id_t pr_num) 4424 { 4425 /* 4426 * All adv_* parameters are locked (read-only) while 4427 * the device is in any sort of loopback mode ... 4428 */ 4429 switch (pr_num) { 4430 case MAC_PROP_ADV_1000FDX_CAP: 4431 case MAC_PROP_EN_1000FDX_CAP: 4432 case MAC_PROP_ADV_1000HDX_CAP: 4433 case MAC_PROP_EN_1000HDX_CAP: 4434 case MAC_PROP_ADV_100FDX_CAP: 4435 case MAC_PROP_EN_100FDX_CAP: 4436 case MAC_PROP_ADV_100HDX_CAP: 4437 case MAC_PROP_EN_100HDX_CAP: 4438 case MAC_PROP_ADV_10FDX_CAP: 4439 case MAC_PROP_EN_10FDX_CAP: 4440 case MAC_PROP_ADV_10HDX_CAP: 4441 case MAC_PROP_EN_10HDX_CAP: 4442 case MAC_PROP_AUTONEG: 4443 case MAC_PROP_FLOWCTRL: 4444 return (B_TRUE); 4445 } 4446 return (B_FALSE); 4447 } 4448 4449 /* 4450 * callback functions for set/get of properties 4451 */ 4452 static int 4453 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4454 uint_t pr_valsize, const void *pr_val) 4455 { 4456 nxge_t *nxgep = barg; 4457 p_nxge_param_t param_arr; 4458 p_nxge_stats_t statsp; 4459 int err = 0; 4460 uint8_t val; 4461 uint32_t cur_mtu, new_mtu, old_framesize; 4462 link_flowctrl_t fl; 4463 4464 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4465 param_arr = nxgep->param_arr; 4466 statsp = nxgep->statsp; 4467 mutex_enter(nxgep->genlock); 4468 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4469 nxge_param_locked(pr_num)) { 4470 /* 4471 * All adv_* parameters are locked (read-only) 4472 * while the device is in any sort of loopback mode. 4473 */ 4474 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4475 "==> nxge_m_setprop: loopback mode: read only")); 4476 mutex_exit(nxgep->genlock); 4477 return (EBUSY); 4478 } 4479 4480 val = *(uint8_t *)pr_val; 4481 switch (pr_num) { 4482 case MAC_PROP_EN_1000FDX_CAP: 4483 nxgep->param_en_1000fdx = val; 4484 param_arr[param_anar_1000fdx].value = val; 4485 4486 goto reprogram; 4487 4488 case MAC_PROP_EN_100FDX_CAP: 4489 nxgep->param_en_100fdx = val; 4490 param_arr[param_anar_100fdx].value = val; 4491 4492 goto reprogram; 4493 4494 case MAC_PROP_EN_10FDX_CAP: 4495 nxgep->param_en_10fdx = val; 4496 param_arr[param_anar_10fdx].value = val; 4497 4498 goto reprogram; 4499 4500 case MAC_PROP_EN_1000HDX_CAP: 4501 case MAC_PROP_EN_100HDX_CAP: 4502 case MAC_PROP_EN_10HDX_CAP: 4503 case MAC_PROP_ADV_1000FDX_CAP: 4504 case MAC_PROP_ADV_1000HDX_CAP: 4505 case MAC_PROP_ADV_100FDX_CAP: 4506 case MAC_PROP_ADV_100HDX_CAP: 4507 case MAC_PROP_ADV_10FDX_CAP: 4508 case MAC_PROP_ADV_10HDX_CAP: 4509 case MAC_PROP_STATUS: 4510 case MAC_PROP_SPEED: 4511 case MAC_PROP_DUPLEX: 4512 err = EINVAL; /* cannot set read-only properties */ 4513 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4514 "==> nxge_m_setprop: read only property %d", 4515 pr_num)); 4516 break; 4517 4518 case MAC_PROP_AUTONEG: 4519 param_arr[param_autoneg].value = val; 4520 4521 goto reprogram; 4522 4523 case MAC_PROP_MTU: 4524 cur_mtu = nxgep->mac.default_mtu; 4525 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4526 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4527 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4528 new_mtu, nxgep->mac.is_jumbo)); 4529 4530 if (new_mtu == cur_mtu) { 4531 err = 0; 4532 break; 4533 } 4534 4535 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4536 err = EBUSY; 4537 break; 4538 } 4539 4540 if ((new_mtu < NXGE_DEFAULT_MTU) || 4541 (new_mtu > NXGE_MAXIMUM_MTU)) { 4542 err = EINVAL; 4543 break; 4544 } 4545 4546 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4547 nxgep->mac.maxframesize = (uint16_t) 4548 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4549 if (nxge_mac_set_framesize(nxgep)) { 4550 nxgep->mac.maxframesize = 4551 (uint16_t)old_framesize; 4552 err = EINVAL; 4553 break; 4554 } 4555 4556 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4557 if (err) { 4558 nxgep->mac.maxframesize = 4559 (uint16_t)old_framesize; 4560 err = EINVAL; 4561 break; 4562 } 4563 4564 nxgep->mac.default_mtu = new_mtu; 4565 if (new_mtu > NXGE_DEFAULT_MTU) 4566 nxgep->mac.is_jumbo = B_TRUE; 4567 else 4568 nxgep->mac.is_jumbo = B_FALSE; 4569 4570 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4571 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4572 new_mtu, nxgep->mac.maxframesize)); 4573 break; 4574 4575 case MAC_PROP_FLOWCTRL: 4576 bcopy(pr_val, &fl, sizeof (fl)); 4577 switch (fl) { 4578 default: 4579 err = EINVAL; 4580 break; 4581 4582 case LINK_FLOWCTRL_NONE: 4583 param_arr[param_anar_pause].value = 0; 4584 break; 4585 4586 case LINK_FLOWCTRL_RX: 4587 param_arr[param_anar_pause].value = 1; 4588 break; 4589 4590 case LINK_FLOWCTRL_TX: 4591 case LINK_FLOWCTRL_BI: 4592 err = EINVAL; 4593 break; 4594 } 4595 4596 reprogram: 4597 if (err == 0) { 4598 if (!nxge_param_link_update(nxgep)) { 4599 err = EINVAL; 4600 } 4601 } 4602 break; 4603 case MAC_PROP_PRIVATE: 4604 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4605 "==> nxge_m_setprop: private property")); 4606 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4607 pr_val); 4608 break; 4609 4610 default: 4611 err = ENOTSUP; 4612 break; 4613 } 4614 4615 mutex_exit(nxgep->genlock); 4616 4617 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4618 "<== nxge_m_setprop (return %d)", err)); 4619 return (err); 4620 } 4621 4622 static int 4623 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4624 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 4625 { 4626 nxge_t *nxgep = barg; 4627 p_nxge_param_t param_arr = nxgep->param_arr; 4628 p_nxge_stats_t statsp = nxgep->statsp; 4629 int err = 0; 4630 link_flowctrl_t fl; 4631 uint64_t tmp = 0; 4632 link_state_t ls; 4633 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4634 4635 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4636 "==> nxge_m_getprop: pr_num %d", pr_num)); 4637 4638 if (pr_valsize == 0) 4639 return (EINVAL); 4640 4641 *perm = MAC_PROP_PERM_RW; 4642 4643 if ((is_default) && (pr_num != MAC_PROP_PRIVATE)) { 4644 err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4645 return (err); 4646 } 4647 4648 bzero(pr_val, pr_valsize); 4649 switch (pr_num) { 4650 case MAC_PROP_DUPLEX: 4651 *perm = MAC_PROP_PERM_READ; 4652 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4653 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4654 "==> nxge_m_getprop: duplex mode %d", 4655 *(uint8_t *)pr_val)); 4656 break; 4657 4658 case MAC_PROP_SPEED: 4659 if (pr_valsize < sizeof (uint64_t)) 4660 return (EINVAL); 4661 *perm = MAC_PROP_PERM_READ; 4662 tmp = statsp->mac_stats.link_speed * 1000000ull; 4663 bcopy(&tmp, pr_val, sizeof (tmp)); 4664 break; 4665 4666 case MAC_PROP_STATUS: 4667 if (pr_valsize < sizeof (link_state_t)) 4668 return (EINVAL); 4669 *perm = MAC_PROP_PERM_READ; 4670 if (!statsp->mac_stats.link_up) 4671 ls = LINK_STATE_DOWN; 4672 else 4673 ls = LINK_STATE_UP; 4674 bcopy(&ls, pr_val, sizeof (ls)); 4675 break; 4676 4677 case MAC_PROP_AUTONEG: 4678 *(uint8_t *)pr_val = 4679 param_arr[param_autoneg].value; 4680 break; 4681 4682 case MAC_PROP_FLOWCTRL: 4683 if (pr_valsize < sizeof (link_flowctrl_t)) 4684 return (EINVAL); 4685 4686 fl = LINK_FLOWCTRL_NONE; 4687 if (param_arr[param_anar_pause].value) { 4688 fl = LINK_FLOWCTRL_RX; 4689 } 4690 bcopy(&fl, pr_val, sizeof (fl)); 4691 break; 4692 4693 case MAC_PROP_ADV_1000FDX_CAP: 4694 *perm = MAC_PROP_PERM_READ; 4695 *(uint8_t *)pr_val = 4696 param_arr[param_anar_1000fdx].value; 4697 break; 4698 4699 case MAC_PROP_EN_1000FDX_CAP: 4700 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4701 break; 4702 4703 case MAC_PROP_ADV_100FDX_CAP: 4704 *perm = MAC_PROP_PERM_READ; 4705 *(uint8_t *)pr_val = 4706 param_arr[param_anar_100fdx].value; 4707 break; 4708 4709 case MAC_PROP_EN_100FDX_CAP: 4710 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4711 break; 4712 4713 case MAC_PROP_ADV_10FDX_CAP: 4714 *perm = MAC_PROP_PERM_READ; 4715 *(uint8_t *)pr_val = 4716 param_arr[param_anar_10fdx].value; 4717 break; 4718 4719 case MAC_PROP_EN_10FDX_CAP: 4720 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4721 break; 4722 4723 case MAC_PROP_EN_1000HDX_CAP: 4724 case MAC_PROP_EN_100HDX_CAP: 4725 case MAC_PROP_EN_10HDX_CAP: 4726 case MAC_PROP_ADV_1000HDX_CAP: 4727 case MAC_PROP_ADV_100HDX_CAP: 4728 case MAC_PROP_ADV_10HDX_CAP: 4729 err = ENOTSUP; 4730 break; 4731 4732 case MAC_PROP_PRIVATE: 4733 err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4734 pr_valsize, pr_val, perm); 4735 break; 4736 4737 case MAC_PROP_MTU: { 4738 mac_propval_range_t range; 4739 4740 if (!(pr_flags & MAC_PROP_POSSIBLE)) 4741 return (ENOTSUP); 4742 if (pr_valsize < sizeof (mac_propval_range_t)) 4743 return (EINVAL); 4744 range.mpr_count = 1; 4745 range.mpr_type = MAC_PROPVAL_UINT32; 4746 range.range_uint32[0].mpur_min = 4747 range.range_uint32[0].mpur_max = NXGE_DEFAULT_MTU; 4748 if (nxgep->mac.is_jumbo) 4749 range.range_uint32[0].mpur_max = 4750 NXGE_MAXIMUM_MTU; 4751 bcopy(&range, pr_val, sizeof (range)); 4752 break; 4753 } 4754 default: 4755 err = EINVAL; 4756 break; 4757 } 4758 4759 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4760 4761 return (err); 4762 } 4763 4764 /* ARGSUSED */ 4765 static int 4766 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4767 const void *pr_val) 4768 { 4769 p_nxge_param_t param_arr = nxgep->param_arr; 4770 int err = 0; 4771 long result; 4772 4773 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4774 "==> nxge_set_priv_prop: name %s", pr_name)); 4775 4776 /* Blanking */ 4777 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4778 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4779 (char *)pr_val, 4780 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4781 if (err) { 4782 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4783 "<== nxge_set_priv_prop: " 4784 "unable to set (%s)", pr_name)); 4785 err = EINVAL; 4786 } else { 4787 err = 0; 4788 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4789 "<== nxge_set_priv_prop: " 4790 "set (%s)", pr_name)); 4791 } 4792 4793 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4794 "<== nxge_set_priv_prop: name %s (value %d)", 4795 pr_name, result)); 4796 4797 return (err); 4798 } 4799 4800 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4801 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4802 (char *)pr_val, 4803 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4804 if (err) { 4805 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4806 "<== nxge_set_priv_prop: " 4807 "unable to set (%s)", pr_name)); 4808 err = EINVAL; 4809 } else { 4810 err = 0; 4811 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4812 "<== nxge_set_priv_prop: " 4813 "set (%s)", pr_name)); 4814 } 4815 4816 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4817 "<== nxge_set_priv_prop: name %s (value %d)", 4818 pr_name, result)); 4819 4820 return (err); 4821 } 4822 4823 /* Classification */ 4824 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4825 if (pr_val == NULL) { 4826 err = EINVAL; 4827 return (err); 4828 } 4829 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4830 4831 err = nxge_param_set_ip_opt(nxgep, NULL, 4832 NULL, (char *)pr_val, 4833 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4834 4835 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4836 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4837 pr_name, result)); 4838 4839 return (err); 4840 } 4841 4842 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 4843 if (pr_val == NULL) { 4844 err = EINVAL; 4845 return (err); 4846 } 4847 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4848 4849 err = nxge_param_set_ip_opt(nxgep, NULL, 4850 NULL, (char *)pr_val, 4851 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 4852 4853 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4854 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4855 pr_name, result)); 4856 4857 return (err); 4858 } 4859 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 4860 if (pr_val == NULL) { 4861 err = EINVAL; 4862 return (err); 4863 } 4864 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4865 4866 err = nxge_param_set_ip_opt(nxgep, NULL, 4867 NULL, (char *)pr_val, 4868 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 4869 4870 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4871 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4872 pr_name, result)); 4873 4874 return (err); 4875 } 4876 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 4877 if (pr_val == NULL) { 4878 err = EINVAL; 4879 return (err); 4880 } 4881 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4882 4883 err = nxge_param_set_ip_opt(nxgep, NULL, 4884 NULL, (char *)pr_val, 4885 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 4886 4887 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4888 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4889 pr_name, result)); 4890 4891 return (err); 4892 } 4893 4894 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 4895 if (pr_val == NULL) { 4896 err = EINVAL; 4897 return (err); 4898 } 4899 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4900 4901 err = nxge_param_set_ip_opt(nxgep, NULL, 4902 NULL, (char *)pr_val, 4903 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 4904 4905 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4906 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4907 pr_name, result)); 4908 4909 return (err); 4910 } 4911 4912 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 4913 if (pr_val == NULL) { 4914 err = EINVAL; 4915 return (err); 4916 } 4917 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4918 4919 err = nxge_param_set_ip_opt(nxgep, NULL, 4920 NULL, (char *)pr_val, 4921 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 4922 4923 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4924 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4925 pr_name, result)); 4926 4927 return (err); 4928 } 4929 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 4930 if (pr_val == NULL) { 4931 err = EINVAL; 4932 return (err); 4933 } 4934 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4935 4936 err = nxge_param_set_ip_opt(nxgep, NULL, 4937 NULL, (char *)pr_val, 4938 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 4939 4940 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4941 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4942 pr_name, result)); 4943 4944 return (err); 4945 } 4946 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 4947 if (pr_val == NULL) { 4948 err = EINVAL; 4949 return (err); 4950 } 4951 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4952 4953 err = nxge_param_set_ip_opt(nxgep, NULL, 4954 NULL, (char *)pr_val, 4955 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 4956 4957 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4958 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4959 pr_name, result)); 4960 4961 return (err); 4962 } 4963 4964 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 4965 if (pr_val == NULL) { 4966 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4967 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 4968 err = EINVAL; 4969 return (err); 4970 } 4971 4972 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4973 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4974 "<== nxge_set_priv_prop: name %s " 4975 "(lso %d pr_val %s value %d)", 4976 pr_name, nxgep->soft_lso_enable, pr_val, result)); 4977 4978 if (result > 1 || result < 0) { 4979 err = EINVAL; 4980 } else { 4981 if (nxgep->soft_lso_enable == (uint32_t)result) { 4982 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4983 "no change (%d %d)", 4984 nxgep->soft_lso_enable, result)); 4985 return (0); 4986 } 4987 } 4988 4989 nxgep->soft_lso_enable = (int)result; 4990 4991 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4992 "<== nxge_set_priv_prop: name %s (value %d)", 4993 pr_name, result)); 4994 4995 return (err); 4996 } 4997 /* 4998 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the 4999 * following code to be executed. 5000 */ 5001 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5002 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5003 (caddr_t)¶m_arr[param_anar_10gfdx]); 5004 return (err); 5005 } 5006 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5007 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5008 (caddr_t)¶m_arr[param_anar_pause]); 5009 return (err); 5010 } 5011 5012 return (EINVAL); 5013 } 5014 5015 static int 5016 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 5017 uint_t pr_valsize, void *pr_val, uint_t *perm) 5018 { 5019 p_nxge_param_t param_arr = nxgep->param_arr; 5020 char valstr[MAXNAMELEN]; 5021 int err = EINVAL; 5022 uint_t strsize; 5023 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 5024 5025 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5026 "==> nxge_get_priv_prop: property %s", pr_name)); 5027 5028 /* function number */ 5029 if (strcmp(pr_name, "_function_number") == 0) { 5030 if (is_default) 5031 return (ENOTSUP); 5032 *perm = MAC_PROP_PERM_READ; 5033 (void) snprintf(valstr, sizeof (valstr), "%d", 5034 nxgep->function_num); 5035 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5036 "==> nxge_get_priv_prop: name %s " 5037 "(value %d valstr %s)", 5038 pr_name, nxgep->function_num, valstr)); 5039 5040 err = 0; 5041 goto done; 5042 } 5043 5044 /* Neptune firmware version */ 5045 if (strcmp(pr_name, "_fw_version") == 0) { 5046 if (is_default) 5047 return (ENOTSUP); 5048 *perm = MAC_PROP_PERM_READ; 5049 (void) snprintf(valstr, sizeof (valstr), "%s", 5050 nxgep->vpd_info.ver); 5051 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5052 "==> nxge_get_priv_prop: name %s " 5053 "(value %d valstr %s)", 5054 pr_name, nxgep->vpd_info.ver, valstr)); 5055 5056 err = 0; 5057 goto done; 5058 } 5059 5060 /* port PHY mode */ 5061 if (strcmp(pr_name, "_port_mode") == 0) { 5062 if (is_default) 5063 return (ENOTSUP); 5064 *perm = MAC_PROP_PERM_READ; 5065 switch (nxgep->mac.portmode) { 5066 case PORT_1G_COPPER: 5067 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 5068 nxgep->hot_swappable_phy ? 5069 "[Hot Swappable]" : ""); 5070 break; 5071 case PORT_1G_FIBER: 5072 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 5073 nxgep->hot_swappable_phy ? 5074 "[hot swappable]" : ""); 5075 break; 5076 case PORT_10G_COPPER: 5077 (void) snprintf(valstr, sizeof (valstr), 5078 "10G copper %s", 5079 nxgep->hot_swappable_phy ? 5080 "[hot swappable]" : ""); 5081 break; 5082 case PORT_10G_FIBER: 5083 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 5084 nxgep->hot_swappable_phy ? 5085 "[hot swappable]" : ""); 5086 break; 5087 case PORT_10G_SERDES: 5088 (void) snprintf(valstr, sizeof (valstr), 5089 "10G serdes %s", nxgep->hot_swappable_phy ? 5090 "[hot swappable]" : ""); 5091 break; 5092 case PORT_1G_SERDES: 5093 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5094 nxgep->hot_swappable_phy ? 5095 "[hot swappable]" : ""); 5096 break; 5097 case PORT_1G_TN1010: 5098 (void) snprintf(valstr, sizeof (valstr), 5099 "1G TN1010 copper %s", nxgep->hot_swappable_phy ? 5100 "[hot swappable]" : ""); 5101 break; 5102 case PORT_10G_TN1010: 5103 (void) snprintf(valstr, sizeof (valstr), 5104 "10G TN1010 copper %s", nxgep->hot_swappable_phy ? 5105 "[hot swappable]" : ""); 5106 break; 5107 case PORT_1G_RGMII_FIBER: 5108 (void) snprintf(valstr, sizeof (valstr), 5109 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5110 "[hot swappable]" : ""); 5111 break; 5112 case PORT_HSP_MODE: 5113 (void) snprintf(valstr, sizeof (valstr), 5114 "phy not present[hot swappable]"); 5115 break; 5116 default: 5117 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5118 nxgep->hot_swappable_phy ? 5119 "[hot swappable]" : ""); 5120 break; 5121 } 5122 5123 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5124 "==> nxge_get_priv_prop: name %s (value %s)", 5125 pr_name, valstr)); 5126 5127 err = 0; 5128 goto done; 5129 } 5130 5131 /* Hot swappable PHY */ 5132 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5133 if (is_default) 5134 return (ENOTSUP); 5135 *perm = MAC_PROP_PERM_READ; 5136 (void) snprintf(valstr, sizeof (valstr), "%s", 5137 nxgep->hot_swappable_phy ? 5138 "yes" : "no"); 5139 5140 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5141 "==> nxge_get_priv_prop: name %s " 5142 "(value %d valstr %s)", 5143 pr_name, nxgep->hot_swappable_phy, valstr)); 5144 5145 err = 0; 5146 goto done; 5147 } 5148 5149 5150 /* Receive Interrupt Blanking Parameters */ 5151 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5152 err = 0; 5153 if (is_default) { 5154 (void) snprintf(valstr, sizeof (valstr), 5155 "%d", RXDMA_RCR_TO_DEFAULT); 5156 goto done; 5157 } 5158 5159 (void) snprintf(valstr, sizeof (valstr), "%d", 5160 nxgep->intr_timeout); 5161 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5162 "==> nxge_get_priv_prop: name %s (value %d)", 5163 pr_name, 5164 (uint32_t)nxgep->intr_timeout)); 5165 goto done; 5166 } 5167 5168 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5169 err = 0; 5170 if (is_default) { 5171 (void) snprintf(valstr, sizeof (valstr), 5172 "%d", RXDMA_RCR_PTHRES_DEFAULT); 5173 goto done; 5174 } 5175 (void) snprintf(valstr, sizeof (valstr), "%d", 5176 nxgep->intr_threshold); 5177 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5178 "==> nxge_get_priv_prop: name %s (value %d)", 5179 pr_name, (uint32_t)nxgep->intr_threshold)); 5180 5181 goto done; 5182 } 5183 5184 /* Classification and Load Distribution Configuration */ 5185 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5186 if (is_default) { 5187 (void) snprintf(valstr, sizeof (valstr), "%x", 5188 NXGE_CLASS_FLOW_GEN_SERVER); 5189 err = 0; 5190 goto done; 5191 } 5192 err = nxge_dld_get_ip_opt(nxgep, 5193 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5194 5195 (void) snprintf(valstr, sizeof (valstr), "%x", 5196 (int)param_arr[param_class_opt_ipv4_tcp].value); 5197 5198 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5199 "==> nxge_get_priv_prop: %s", valstr)); 5200 goto done; 5201 } 5202 5203 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5204 if (is_default) { 5205 (void) snprintf(valstr, sizeof (valstr), "%x", 5206 NXGE_CLASS_FLOW_GEN_SERVER); 5207 err = 0; 5208 goto done; 5209 } 5210 err = nxge_dld_get_ip_opt(nxgep, 5211 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5212 5213 (void) snprintf(valstr, sizeof (valstr), "%x", 5214 (int)param_arr[param_class_opt_ipv4_udp].value); 5215 5216 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5217 "==> nxge_get_priv_prop: %s", valstr)); 5218 goto done; 5219 } 5220 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5221 if (is_default) { 5222 (void) snprintf(valstr, sizeof (valstr), "%x", 5223 NXGE_CLASS_FLOW_GEN_SERVER); 5224 err = 0; 5225 goto done; 5226 } 5227 err = nxge_dld_get_ip_opt(nxgep, 5228 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5229 5230 (void) snprintf(valstr, sizeof (valstr), "%x", 5231 (int)param_arr[param_class_opt_ipv4_ah].value); 5232 5233 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5234 "==> nxge_get_priv_prop: %s", valstr)); 5235 goto done; 5236 } 5237 5238 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5239 if (is_default) { 5240 (void) snprintf(valstr, sizeof (valstr), "%x", 5241 NXGE_CLASS_FLOW_GEN_SERVER); 5242 err = 0; 5243 goto done; 5244 } 5245 err = nxge_dld_get_ip_opt(nxgep, 5246 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5247 5248 (void) snprintf(valstr, sizeof (valstr), "%x", 5249 (int)param_arr[param_class_opt_ipv4_sctp].value); 5250 5251 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5252 "==> nxge_get_priv_prop: %s", valstr)); 5253 goto done; 5254 } 5255 5256 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5257 if (is_default) { 5258 (void) snprintf(valstr, sizeof (valstr), "%x", 5259 NXGE_CLASS_FLOW_GEN_SERVER); 5260 err = 0; 5261 goto done; 5262 } 5263 err = nxge_dld_get_ip_opt(nxgep, 5264 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5265 5266 (void) snprintf(valstr, sizeof (valstr), "%x", 5267 (int)param_arr[param_class_opt_ipv6_tcp].value); 5268 5269 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5270 "==> nxge_get_priv_prop: %s", valstr)); 5271 goto done; 5272 } 5273 5274 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5275 if (is_default) { 5276 (void) snprintf(valstr, sizeof (valstr), "%x", 5277 NXGE_CLASS_FLOW_GEN_SERVER); 5278 err = 0; 5279 goto done; 5280 } 5281 err = nxge_dld_get_ip_opt(nxgep, 5282 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5283 5284 (void) snprintf(valstr, sizeof (valstr), "%x", 5285 (int)param_arr[param_class_opt_ipv6_udp].value); 5286 5287 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5288 "==> nxge_get_priv_prop: %s", valstr)); 5289 goto done; 5290 } 5291 5292 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5293 if (is_default) { 5294 (void) snprintf(valstr, sizeof (valstr), "%x", 5295 NXGE_CLASS_FLOW_GEN_SERVER); 5296 err = 0; 5297 goto done; 5298 } 5299 err = nxge_dld_get_ip_opt(nxgep, 5300 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5301 5302 (void) snprintf(valstr, sizeof (valstr), "%x", 5303 (int)param_arr[param_class_opt_ipv6_ah].value); 5304 5305 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5306 "==> nxge_get_priv_prop: %s", valstr)); 5307 goto done; 5308 } 5309 5310 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5311 if (is_default) { 5312 (void) snprintf(valstr, sizeof (valstr), "%x", 5313 NXGE_CLASS_FLOW_GEN_SERVER); 5314 err = 0; 5315 goto done; 5316 } 5317 err = nxge_dld_get_ip_opt(nxgep, 5318 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5319 5320 (void) snprintf(valstr, sizeof (valstr), "%x", 5321 (int)param_arr[param_class_opt_ipv6_sctp].value); 5322 5323 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5324 "==> nxge_get_priv_prop: %s", valstr)); 5325 goto done; 5326 } 5327 5328 /* Software LSO */ 5329 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5330 if (is_default) { 5331 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5332 err = 0; 5333 goto done; 5334 } 5335 (void) snprintf(valstr, sizeof (valstr), 5336 "%d", nxgep->soft_lso_enable); 5337 err = 0; 5338 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5339 "==> nxge_get_priv_prop: name %s (value %d)", 5340 pr_name, nxgep->soft_lso_enable)); 5341 5342 goto done; 5343 } 5344 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5345 err = 0; 5346 if (is_default || 5347 nxgep->param_arr[param_anar_10gfdx].value != 0) { 5348 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5349 goto done; 5350 } else { 5351 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5352 goto done; 5353 } 5354 } 5355 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5356 err = 0; 5357 if (is_default || 5358 nxgep->param_arr[param_anar_pause].value != 0) { 5359 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5360 goto done; 5361 } else { 5362 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5363 goto done; 5364 } 5365 } 5366 5367 done: 5368 if (err == 0) { 5369 strsize = (uint_t)strlen(valstr); 5370 if (pr_valsize < strsize) { 5371 err = ENOBUFS; 5372 } else { 5373 (void) strlcpy(pr_val, valstr, pr_valsize); 5374 } 5375 } 5376 5377 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5378 "<== nxge_get_priv_prop: return %d", err)); 5379 return (err); 5380 } 5381 5382 /* 5383 * Module loading and removing entry points. 5384 */ 5385 5386 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach, 5387 nodev, NULL, D_MP, NULL, nxge_quiesce); 5388 5389 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5390 5391 /* 5392 * Module linkage information for the kernel. 5393 */ 5394 static struct modldrv nxge_modldrv = { 5395 &mod_driverops, 5396 NXGE_DESC_VER, 5397 &nxge_dev_ops 5398 }; 5399 5400 static struct modlinkage modlinkage = { 5401 MODREV_1, (void *) &nxge_modldrv, NULL 5402 }; 5403 5404 int 5405 _init(void) 5406 { 5407 int status; 5408 5409 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 5410 5411 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5412 5413 mac_init_ops(&nxge_dev_ops, "nxge"); 5414 5415 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5416 if (status != 0) { 5417 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5418 "failed to init device soft state")); 5419 goto _init_exit; 5420 } 5421 5422 status = mod_install(&modlinkage); 5423 if (status != 0) { 5424 ddi_soft_state_fini(&nxge_list); 5425 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5426 goto _init_exit; 5427 } 5428 5429 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5430 5431 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status)); 5432 return (status); 5433 5434 _init_exit: 5435 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status)); 5436 MUTEX_DESTROY(&nxgedebuglock); 5437 return (status); 5438 } 5439 5440 int 5441 _fini(void) 5442 { 5443 int status; 5444 5445 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5446 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5447 5448 if (nxge_mblks_pending) 5449 return (EBUSY); 5450 5451 status = mod_remove(&modlinkage); 5452 if (status != DDI_SUCCESS) { 5453 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5454 "Module removal failed 0x%08x", 5455 status)); 5456 goto _fini_exit; 5457 } 5458 5459 mac_fini_ops(&nxge_dev_ops); 5460 5461 ddi_soft_state_fini(&nxge_list); 5462 5463 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status)); 5464 5465 MUTEX_DESTROY(&nxge_common_lock); 5466 MUTEX_DESTROY(&nxgedebuglock); 5467 return (status); 5468 5469 _fini_exit: 5470 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status)); 5471 return (status); 5472 } 5473 5474 int 5475 _info(struct modinfo *modinfop) 5476 { 5477 int status; 5478 5479 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5480 status = mod_info(&modlinkage, modinfop); 5481 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5482 5483 return (status); 5484 } 5485 5486 /*ARGSUSED*/ 5487 static int 5488 nxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5489 { 5490 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5491 p_nxge_t nxgep = rhp->nxgep; 5492 uint32_t channel; 5493 p_tx_ring_t ring; 5494 5495 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5496 ring = nxgep->tx_rings->rings[channel]; 5497 5498 MUTEX_ENTER(&ring->lock); 5499 ring->tx_ring_handle = rhp->ring_handle; 5500 MUTEX_EXIT(&ring->lock); 5501 5502 return (0); 5503 } 5504 5505 static void 5506 nxge_tx_ring_stop(mac_ring_driver_t rdriver) 5507 { 5508 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5509 p_nxge_t nxgep = rhp->nxgep; 5510 uint32_t channel; 5511 p_tx_ring_t ring; 5512 5513 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index; 5514 ring = nxgep->tx_rings->rings[channel]; 5515 5516 MUTEX_ENTER(&ring->lock); 5517 ring->tx_ring_handle = (mac_ring_handle_t)NULL; 5518 MUTEX_EXIT(&ring->lock); 5519 } 5520 5521 static int 5522 nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num) 5523 { 5524 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5525 p_nxge_t nxgep = rhp->nxgep; 5526 uint32_t channel; 5527 p_rx_rcr_ring_t ring; 5528 int i; 5529 5530 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5531 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5532 5533 MUTEX_ENTER(&ring->lock); 5534 5535 if (nxgep->rx_channel_started[channel] == B_TRUE) { 5536 MUTEX_EXIT(&ring->lock); 5537 return (0); 5538 } 5539 5540 /* set rcr_ring */ 5541 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) { 5542 if ((nxgep->ldgvp->ldvp[i].is_rxdma == 1) && 5543 (nxgep->ldgvp->ldvp[i].channel == channel)) { 5544 ring->ldvp = &nxgep->ldgvp->ldvp[i]; 5545 ring->ldgp = nxgep->ldgvp->ldvp[i].ldgp; 5546 } 5547 } 5548 5549 nxgep->rx_channel_started[channel] = B_TRUE; 5550 ring->rcr_mac_handle = rhp->ring_handle; 5551 ring->rcr_gen_num = mr_gen_num; 5552 MUTEX_EXIT(&ring->lock); 5553 5554 return (0); 5555 } 5556 5557 static void 5558 nxge_rx_ring_stop(mac_ring_driver_t rdriver) 5559 { 5560 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; 5561 p_nxge_t nxgep = rhp->nxgep; 5562 uint32_t channel; 5563 p_rx_rcr_ring_t ring; 5564 5565 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index; 5566 ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 5567 5568 MUTEX_ENTER(&ring->lock); 5569 nxgep->rx_channel_started[channel] = B_FALSE; 5570 ring->rcr_mac_handle = NULL; 5571 MUTEX_EXIT(&ring->lock); 5572 } 5573 5574 /* 5575 * Callback funtion for MAC layer to register all rings. 5576 */ 5577 static void 5578 nxge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 5579 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 5580 { 5581 p_nxge_t nxgep = (p_nxge_t)arg; 5582 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config; 5583 5584 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5585 "==> nxge_fill_ring 0x%x index %d", rtype, index)); 5586 5587 switch (rtype) { 5588 case MAC_RING_TYPE_TX: { 5589 p_nxge_ring_handle_t rhandlep; 5590 5591 NXGE_DEBUG_MSG((nxgep, TX_CTL, 5592 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d", 5593 rtype, index, p_cfgp->tdc.count)); 5594 5595 ASSERT((index >= 0) && (index < p_cfgp->tdc.count)); 5596 rhandlep = &nxgep->tx_ring_handles[index]; 5597 rhandlep->nxgep = nxgep; 5598 rhandlep->index = index; 5599 rhandlep->ring_handle = rh; 5600 5601 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5602 infop->mri_start = nxge_tx_ring_start; 5603 infop->mri_stop = nxge_tx_ring_stop; 5604 infop->mri_tx = nxge_tx_ring_send; 5605 5606 break; 5607 } 5608 case MAC_RING_TYPE_RX: { 5609 p_nxge_ring_handle_t rhandlep; 5610 int nxge_rindex; 5611 mac_intr_t nxge_mac_intr; 5612 5613 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5614 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d", 5615 rtype, index, p_cfgp->max_rdcs)); 5616 5617 /* 5618 * 'index' is the ring index within the group. 5619 * Find the ring index in the nxge instance. 5620 */ 5621 nxge_rindex = nxge_get_rxring_index(nxgep, rg_index, index); 5622 5623 ASSERT((nxge_rindex >= 0) && (nxge_rindex < p_cfgp->max_rdcs)); 5624 rhandlep = &nxgep->rx_ring_handles[nxge_rindex]; 5625 rhandlep->nxgep = nxgep; 5626 rhandlep->index = nxge_rindex; 5627 rhandlep->ring_handle = rh; 5628 5629 /* 5630 * Entrypoint to enable interrupt (disable poll) and 5631 * disable interrupt (enable poll). 5632 */ 5633 nxge_mac_intr.mi_handle = (mac_intr_handle_t)rhandlep; 5634 nxge_mac_intr.mi_enable = (mac_intr_enable_t)nxge_disable_poll; 5635 nxge_mac_intr.mi_disable = (mac_intr_disable_t)nxge_enable_poll; 5636 infop->mri_driver = (mac_ring_driver_t)rhandlep; 5637 infop->mri_start = nxge_rx_ring_start; 5638 infop->mri_stop = nxge_rx_ring_stop; 5639 infop->mri_intr = nxge_mac_intr; /* ??? */ 5640 infop->mri_poll = nxge_rx_poll; 5641 5642 break; 5643 } 5644 default: 5645 break; 5646 } 5647 5648 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_fill_ring 0x%x", 5649 rtype)); 5650 } 5651 5652 static void 5653 nxge_group_add_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5654 mac_ring_type_t type) 5655 { 5656 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5657 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5658 nxge_t *nxge; 5659 nxge_grp_t *grp; 5660 nxge_rdc_grp_t *rdc_grp; 5661 uint16_t channel; /* device-wise ring id */ 5662 int dev_gindex; 5663 int rv; 5664 5665 nxge = rgroup->nxgep; 5666 5667 switch (type) { 5668 case MAC_RING_TYPE_TX: 5669 /* 5670 * nxge_grp_dc_add takes a channel number which is a 5671 * "devise" ring ID. 5672 */ 5673 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5674 5675 /* 5676 * Remove the ring from the default group 5677 */ 5678 if (rgroup->gindex != 0) { 5679 (void) nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5680 } 5681 5682 /* 5683 * nxge->tx_set.group[] is an array of groups indexed by 5684 * a "port" group ID. 5685 */ 5686 grp = nxge->tx_set.group[rgroup->gindex]; 5687 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5688 if (rv != 0) { 5689 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5690 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5691 } 5692 break; 5693 5694 case MAC_RING_TYPE_RX: 5695 /* 5696 * nxge->rx_set.group[] is an array of groups indexed by 5697 * a "port" group ID. 5698 */ 5699 grp = nxge->rx_set.group[rgroup->gindex]; 5700 5701 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5702 rgroup->gindex; 5703 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5704 5705 /* 5706 * nxge_grp_dc_add takes a channel number which is a 5707 * "devise" ring ID. 5708 */ 5709 channel = nxge->pt_config.hw_config.start_rdc + rhandle->index; 5710 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_RX, channel); 5711 if (rv != 0) { 5712 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5713 "nxge_group_add_ring: nxge_grp_dc_add failed")); 5714 } 5715 5716 rdc_grp->map |= (1 << channel); 5717 rdc_grp->max_rdcs++; 5718 5719 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5720 break; 5721 } 5722 } 5723 5724 static void 5725 nxge_group_rem_ring(mac_group_driver_t gh, mac_ring_driver_t rh, 5726 mac_ring_type_t type) 5727 { 5728 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh; 5729 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh; 5730 nxge_t *nxge; 5731 uint16_t channel; /* device-wise ring id */ 5732 nxge_rdc_grp_t *rdc_grp; 5733 int dev_gindex; 5734 5735 nxge = rgroup->nxgep; 5736 5737 switch (type) { 5738 case MAC_RING_TYPE_TX: 5739 dev_gindex = nxge->pt_config.hw_config.def_mac_txdma_grpid + 5740 rgroup->gindex; 5741 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index; 5742 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 5743 5744 /* 5745 * Add the ring back to the default group 5746 */ 5747 if (rgroup->gindex != 0) { 5748 nxge_grp_t *grp; 5749 grp = nxge->tx_set.group[0]; 5750 (void) nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel); 5751 } 5752 break; 5753 5754 case MAC_RING_TYPE_RX: 5755 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid + 5756 rgroup->gindex; 5757 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex]; 5758 channel = rdc_grp->start_rdc + rhandle->index; 5759 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 5760 5761 rdc_grp->map &= ~(1 << channel); 5762 rdc_grp->max_rdcs--; 5763 5764 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl); 5765 break; 5766 } 5767 } 5768 5769 5770 /*ARGSUSED*/ 5771 static nxge_status_t 5772 nxge_add_intrs(p_nxge_t nxgep) 5773 { 5774 5775 int intr_types; 5776 int type = 0; 5777 int ddi_status = DDI_SUCCESS; 5778 nxge_status_t status = NXGE_OK; 5779 5780 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5781 5782 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5783 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5784 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5785 nxgep->nxge_intr_type.intr_added = 0; 5786 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5787 nxgep->nxge_intr_type.intr_type = 0; 5788 5789 if (nxgep->niu_type == N2_NIU) { 5790 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5791 } else if (nxge_msi_enable) { 5792 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5793 } 5794 5795 /* Get the supported interrupt types */ 5796 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5797 != DDI_SUCCESS) { 5798 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5799 "ddi_intr_get_supported_types failed: status 0x%08x", 5800 ddi_status)); 5801 return (NXGE_ERROR | NXGE_DDI_FAILED); 5802 } 5803 nxgep->nxge_intr_type.intr_types = intr_types; 5804 5805 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5806 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5807 5808 /* 5809 * Solaris MSIX is not supported yet. use MSI for now. 5810 * nxge_msi_enable (1): 5811 * 1 - MSI 2 - MSI-X others - FIXED 5812 */ 5813 switch (nxge_msi_enable) { 5814 default: 5815 type = DDI_INTR_TYPE_FIXED; 5816 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5817 "use fixed (intx emulation) type %08x", 5818 type)); 5819 break; 5820 5821 case 2: 5822 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5823 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5824 if (intr_types & DDI_INTR_TYPE_MSIX) { 5825 type = DDI_INTR_TYPE_MSIX; 5826 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5827 "ddi_intr_get_supported_types: MSIX 0x%08x", 5828 type)); 5829 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5830 type = DDI_INTR_TYPE_MSI; 5831 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5832 "ddi_intr_get_supported_types: MSI 0x%08x", 5833 type)); 5834 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5835 type = DDI_INTR_TYPE_FIXED; 5836 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5837 "ddi_intr_get_supported_types: MSXED0x%08x", 5838 type)); 5839 } 5840 break; 5841 5842 case 1: 5843 if (intr_types & DDI_INTR_TYPE_MSI) { 5844 type = DDI_INTR_TYPE_MSI; 5845 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5846 "ddi_intr_get_supported_types: MSI 0x%08x", 5847 type)); 5848 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5849 type = DDI_INTR_TYPE_MSIX; 5850 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5851 "ddi_intr_get_supported_types: MSIX 0x%08x", 5852 type)); 5853 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5854 type = DDI_INTR_TYPE_FIXED; 5855 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5856 "ddi_intr_get_supported_types: MSXED0x%08x", 5857 type)); 5858 } 5859 } 5860 5861 nxgep->nxge_intr_type.intr_type = type; 5862 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5863 type == DDI_INTR_TYPE_FIXED) && 5864 nxgep->nxge_intr_type.niu_msi_enable) { 5865 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5866 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5867 " nxge_add_intrs: " 5868 " nxge_add_intrs_adv failed: status 0x%08x", 5869 status)); 5870 return (status); 5871 } else { 5872 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5873 "interrupts registered : type %d", type)); 5874 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5875 5876 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5877 "\nAdded advanced nxge add_intr_adv " 5878 "intr type 0x%x\n", type)); 5879 5880 return (status); 5881 } 5882 } 5883 5884 if (!nxgep->nxge_intr_type.intr_registered) { 5885 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5886 "failed to register interrupts")); 5887 return (NXGE_ERROR | NXGE_DDI_FAILED); 5888 } 5889 5890 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5891 return (status); 5892 } 5893 5894 static nxge_status_t 5895 nxge_add_intrs_adv(p_nxge_t nxgep) 5896 { 5897 int intr_type; 5898 p_nxge_intr_t intrp; 5899 5900 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5901 5902 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5903 intr_type = intrp->intr_type; 5904 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5905 intr_type)); 5906 5907 switch (intr_type) { 5908 case DDI_INTR_TYPE_MSI: /* 0x2 */ 5909 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 5910 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 5911 5912 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 5913 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 5914 5915 default: 5916 return (NXGE_ERROR); 5917 } 5918 } 5919 5920 5921 /*ARGSUSED*/ 5922 static nxge_status_t 5923 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 5924 { 5925 dev_info_t *dip = nxgep->dip; 5926 p_nxge_ldg_t ldgp; 5927 p_nxge_intr_t intrp; 5928 uint_t *inthandler; 5929 void *arg1, *arg2; 5930 int behavior; 5931 int nintrs, navail, nrequest; 5932 int nactual, nrequired; 5933 int inum = 0; 5934 int x, y; 5935 int ddi_status = DDI_SUCCESS; 5936 nxge_status_t status = NXGE_OK; 5937 5938 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 5939 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5940 intrp->start_inum = 0; 5941 5942 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5943 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5944 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5945 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5946 "nintrs: %d", ddi_status, nintrs)); 5947 return (NXGE_ERROR | NXGE_DDI_FAILED); 5948 } 5949 5950 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5951 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5952 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5953 "ddi_intr_get_navail() failed, status: 0x%x%, " 5954 "nintrs: %d", ddi_status, navail)); 5955 return (NXGE_ERROR | NXGE_DDI_FAILED); 5956 } 5957 5958 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5959 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 5960 nintrs, navail)); 5961 5962 /* PSARC/2007/453 MSI-X interrupt limit override */ 5963 if (int_type == DDI_INTR_TYPE_MSIX) { 5964 nrequest = nxge_create_msi_property(nxgep); 5965 if (nrequest < navail) { 5966 navail = nrequest; 5967 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5968 "nxge_add_intrs_adv_type: nintrs %d " 5969 "navail %d (nrequest %d)", 5970 nintrs, navail, nrequest)); 5971 } 5972 } 5973 5974 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 5975 /* MSI must be power of 2 */ 5976 if ((navail & 16) == 16) { 5977 navail = 16; 5978 } else if ((navail & 8) == 8) { 5979 navail = 8; 5980 } else if ((navail & 4) == 4) { 5981 navail = 4; 5982 } else if ((navail & 2) == 2) { 5983 navail = 2; 5984 } else { 5985 navail = 1; 5986 } 5987 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5988 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 5989 "navail %d", nintrs, navail)); 5990 } 5991 5992 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5993 DDI_INTR_ALLOC_NORMAL); 5994 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 5995 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 5996 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5997 navail, &nactual, behavior); 5998 if (ddi_status != DDI_SUCCESS || nactual == 0) { 5999 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6000 " ddi_intr_alloc() failed: %d", 6001 ddi_status)); 6002 kmem_free(intrp->htable, intrp->intr_size); 6003 return (NXGE_ERROR | NXGE_DDI_FAILED); 6004 } 6005 6006 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6007 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6008 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6009 " ddi_intr_get_pri() failed: %d", 6010 ddi_status)); 6011 /* Free already allocated interrupts */ 6012 for (y = 0; y < nactual; y++) { 6013 (void) ddi_intr_free(intrp->htable[y]); 6014 } 6015 6016 kmem_free(intrp->htable, intrp->intr_size); 6017 return (NXGE_ERROR | NXGE_DDI_FAILED); 6018 } 6019 6020 nrequired = 0; 6021 switch (nxgep->niu_type) { 6022 default: 6023 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6024 break; 6025 6026 case N2_NIU: 6027 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6028 break; 6029 } 6030 6031 if (status != NXGE_OK) { 6032 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6033 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 6034 "failed: 0x%x", status)); 6035 /* Free already allocated interrupts */ 6036 for (y = 0; y < nactual; y++) { 6037 (void) ddi_intr_free(intrp->htable[y]); 6038 } 6039 6040 kmem_free(intrp->htable, intrp->intr_size); 6041 return (status); 6042 } 6043 6044 ldgp = nxgep->ldgvp->ldgp; 6045 for (x = 0; x < nrequired; x++, ldgp++) { 6046 ldgp->vector = (uint8_t)x; 6047 ldgp->intdata = SID_DATA(ldgp->func, x); 6048 arg1 = ldgp->ldvp; 6049 arg2 = nxgep; 6050 if (ldgp->nldvs == 1) { 6051 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6052 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6053 "nxge_add_intrs_adv_type: " 6054 "arg1 0x%x arg2 0x%x: " 6055 "1-1 int handler (entry %d intdata 0x%x)\n", 6056 arg1, arg2, 6057 x, ldgp->intdata)); 6058 } else if (ldgp->nldvs > 1) { 6059 inthandler = (uint_t *)ldgp->sys_intr_handler; 6060 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6061 "nxge_add_intrs_adv_type: " 6062 "arg1 0x%x arg2 0x%x: " 6063 "nldevs %d int handler " 6064 "(entry %d intdata 0x%x)\n", 6065 arg1, arg2, 6066 ldgp->nldvs, x, ldgp->intdata)); 6067 } 6068 6069 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6070 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 6071 "htable 0x%llx", x, intrp->htable[x])); 6072 6073 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6074 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6075 != DDI_SUCCESS) { 6076 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6077 "==> nxge_add_intrs_adv_type: failed #%d " 6078 "status 0x%x", x, ddi_status)); 6079 for (y = 0; y < intrp->intr_added; y++) { 6080 (void) ddi_intr_remove_handler( 6081 intrp->htable[y]); 6082 } 6083 /* Free already allocated intr */ 6084 for (y = 0; y < nactual; y++) { 6085 (void) ddi_intr_free(intrp->htable[y]); 6086 } 6087 kmem_free(intrp->htable, intrp->intr_size); 6088 6089 (void) nxge_ldgv_uninit(nxgep); 6090 6091 return (NXGE_ERROR | NXGE_DDI_FAILED); 6092 } 6093 intrp->intr_added++; 6094 } 6095 6096 intrp->msi_intx_cnt = nactual; 6097 6098 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6099 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 6100 navail, nactual, 6101 intrp->msi_intx_cnt, 6102 intrp->intr_added)); 6103 6104 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6105 6106 (void) nxge_intr_ldgv_init(nxgep); 6107 6108 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 6109 6110 return (status); 6111 } 6112 6113 /*ARGSUSED*/ 6114 static nxge_status_t 6115 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 6116 { 6117 dev_info_t *dip = nxgep->dip; 6118 p_nxge_ldg_t ldgp; 6119 p_nxge_intr_t intrp; 6120 uint_t *inthandler; 6121 void *arg1, *arg2; 6122 int behavior; 6123 int nintrs, navail; 6124 int nactual, nrequired; 6125 int inum = 0; 6126 int x, y; 6127 int ddi_status = DDI_SUCCESS; 6128 nxge_status_t status = NXGE_OK; 6129 6130 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 6131 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6132 intrp->start_inum = 0; 6133 6134 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6135 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6136 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6137 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6138 "nintrs: %d", status, nintrs)); 6139 return (NXGE_ERROR | NXGE_DDI_FAILED); 6140 } 6141 6142 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6143 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6144 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6145 "ddi_intr_get_navail() failed, status: 0x%x%, " 6146 "nintrs: %d", ddi_status, navail)); 6147 return (NXGE_ERROR | NXGE_DDI_FAILED); 6148 } 6149 6150 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6151 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6152 nintrs, navail)); 6153 6154 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6155 DDI_INTR_ALLOC_NORMAL); 6156 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6157 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6158 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6159 navail, &nactual, behavior); 6160 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6161 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6162 " ddi_intr_alloc() failed: %d", 6163 ddi_status)); 6164 kmem_free(intrp->htable, intrp->intr_size); 6165 return (NXGE_ERROR | NXGE_DDI_FAILED); 6166 } 6167 6168 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6169 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6170 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6171 " ddi_intr_get_pri() failed: %d", 6172 ddi_status)); 6173 /* Free already allocated interrupts */ 6174 for (y = 0; y < nactual; y++) { 6175 (void) ddi_intr_free(intrp->htable[y]); 6176 } 6177 6178 kmem_free(intrp->htable, intrp->intr_size); 6179 return (NXGE_ERROR | NXGE_DDI_FAILED); 6180 } 6181 6182 nrequired = 0; 6183 switch (nxgep->niu_type) { 6184 default: 6185 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6186 break; 6187 6188 case N2_NIU: 6189 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6190 break; 6191 } 6192 6193 if (status != NXGE_OK) { 6194 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6195 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6196 "failed: 0x%x", status)); 6197 /* Free already allocated interrupts */ 6198 for (y = 0; y < nactual; y++) { 6199 (void) ddi_intr_free(intrp->htable[y]); 6200 } 6201 6202 kmem_free(intrp->htable, intrp->intr_size); 6203 return (status); 6204 } 6205 6206 ldgp = nxgep->ldgvp->ldgp; 6207 for (x = 0; x < nrequired; x++, ldgp++) { 6208 ldgp->vector = (uint8_t)x; 6209 if (nxgep->niu_type != N2_NIU) { 6210 ldgp->intdata = SID_DATA(ldgp->func, x); 6211 } 6212 6213 arg1 = ldgp->ldvp; 6214 arg2 = nxgep; 6215 if (ldgp->nldvs == 1) { 6216 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6217 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6218 "nxge_add_intrs_adv_type_fix: " 6219 "1-1 int handler(%d) ldg %d ldv %d " 6220 "arg1 $%p arg2 $%p\n", 6221 x, ldgp->ldg, ldgp->ldvp->ldv, 6222 arg1, arg2)); 6223 } else if (ldgp->nldvs > 1) { 6224 inthandler = (uint_t *)ldgp->sys_intr_handler; 6225 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6226 "nxge_add_intrs_adv_type_fix: " 6227 "shared ldv %d int handler(%d) ldv %d ldg %d" 6228 "arg1 0x%016llx arg2 0x%016llx\n", 6229 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6230 arg1, arg2)); 6231 } 6232 6233 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6234 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6235 != DDI_SUCCESS) { 6236 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6237 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6238 "status 0x%x", x, ddi_status)); 6239 for (y = 0; y < intrp->intr_added; y++) { 6240 (void) ddi_intr_remove_handler( 6241 intrp->htable[y]); 6242 } 6243 for (y = 0; y < nactual; y++) { 6244 (void) ddi_intr_free(intrp->htable[y]); 6245 } 6246 /* Free already allocated intr */ 6247 kmem_free(intrp->htable, intrp->intr_size); 6248 6249 (void) nxge_ldgv_uninit(nxgep); 6250 6251 return (NXGE_ERROR | NXGE_DDI_FAILED); 6252 } 6253 intrp->intr_added++; 6254 } 6255 6256 intrp->msi_intx_cnt = nactual; 6257 6258 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6259 6260 status = nxge_intr_ldgv_init(nxgep); 6261 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6262 6263 return (status); 6264 } 6265 6266 static void 6267 nxge_remove_intrs(p_nxge_t nxgep) 6268 { 6269 int i, inum; 6270 p_nxge_intr_t intrp; 6271 6272 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6273 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6274 if (!intrp->intr_registered) { 6275 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6276 "<== nxge_remove_intrs: interrupts not registered")); 6277 return; 6278 } 6279 6280 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6281 6282 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6283 (void) ddi_intr_block_disable(intrp->htable, 6284 intrp->intr_added); 6285 } else { 6286 for (i = 0; i < intrp->intr_added; i++) { 6287 (void) ddi_intr_disable(intrp->htable[i]); 6288 } 6289 } 6290 6291 for (inum = 0; inum < intrp->intr_added; inum++) { 6292 if (intrp->htable[inum]) { 6293 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6294 } 6295 } 6296 6297 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6298 if (intrp->htable[inum]) { 6299 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6300 "nxge_remove_intrs: ddi_intr_free inum %d " 6301 "msi_intx_cnt %d intr_added %d", 6302 inum, 6303 intrp->msi_intx_cnt, 6304 intrp->intr_added)); 6305 6306 (void) ddi_intr_free(intrp->htable[inum]); 6307 } 6308 } 6309 6310 kmem_free(intrp->htable, intrp->intr_size); 6311 intrp->intr_registered = B_FALSE; 6312 intrp->intr_enabled = B_FALSE; 6313 intrp->msi_intx_cnt = 0; 6314 intrp->intr_added = 0; 6315 6316 (void) nxge_ldgv_uninit(nxgep); 6317 6318 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6319 "#msix-request"); 6320 6321 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6322 } 6323 6324 /*ARGSUSED*/ 6325 static void 6326 nxge_intrs_enable(p_nxge_t nxgep) 6327 { 6328 p_nxge_intr_t intrp; 6329 int i; 6330 int status; 6331 6332 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6333 6334 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6335 6336 if (!intrp->intr_registered) { 6337 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6338 "interrupts are not registered")); 6339 return; 6340 } 6341 6342 if (intrp->intr_enabled) { 6343 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6344 "<== nxge_intrs_enable: already enabled")); 6345 return; 6346 } 6347 6348 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6349 status = ddi_intr_block_enable(intrp->htable, 6350 intrp->intr_added); 6351 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6352 "block enable - status 0x%x total inums #%d\n", 6353 status, intrp->intr_added)); 6354 } else { 6355 for (i = 0; i < intrp->intr_added; i++) { 6356 status = ddi_intr_enable(intrp->htable[i]); 6357 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6358 "ddi_intr_enable:enable - status 0x%x " 6359 "total inums %d enable inum #%d\n", 6360 status, intrp->intr_added, i)); 6361 if (status == DDI_SUCCESS) { 6362 intrp->intr_enabled = B_TRUE; 6363 } 6364 } 6365 } 6366 6367 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6368 } 6369 6370 /*ARGSUSED*/ 6371 static void 6372 nxge_intrs_disable(p_nxge_t nxgep) 6373 { 6374 p_nxge_intr_t intrp; 6375 int i; 6376 6377 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6378 6379 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6380 6381 if (!intrp->intr_registered) { 6382 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6383 "interrupts are not registered")); 6384 return; 6385 } 6386 6387 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6388 (void) ddi_intr_block_disable(intrp->htable, 6389 intrp->intr_added); 6390 } else { 6391 for (i = 0; i < intrp->intr_added; i++) { 6392 (void) ddi_intr_disable(intrp->htable[i]); 6393 } 6394 } 6395 6396 intrp->intr_enabled = B_FALSE; 6397 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6398 } 6399 6400 nxge_status_t 6401 nxge_mac_register(p_nxge_t nxgep) 6402 { 6403 mac_register_t *macp; 6404 int status; 6405 6406 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6407 6408 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6409 return (NXGE_ERROR); 6410 6411 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6412 macp->m_driver = nxgep; 6413 macp->m_dip = nxgep->dip; 6414 if (!isLDOMguest(nxgep)) { 6415 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6416 } else { 6417 macp->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); 6418 macp->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); 6419 (void) memset(macp->m_src_addr, 0xff, sizeof (MAXMACADDRLEN)); 6420 } 6421 macp->m_callbacks = &nxge_m_callbacks; 6422 macp->m_min_sdu = 0; 6423 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6424 NXGE_EHEADER_VLAN_CRC; 6425 macp->m_max_sdu = nxgep->mac.default_mtu; 6426 macp->m_margin = VLAN_TAGSZ; 6427 macp->m_priv_props = nxge_priv_props; 6428 macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 6429 if (isLDOMguest(nxgep)) { 6430 macp->m_v12n = MAC_VIRT_LEVEL1 | MAC_VIRT_SERIALIZE; 6431 } else { 6432 macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1 | \ 6433 MAC_VIRT_SERIALIZE; 6434 } 6435 6436 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6437 "==> nxge_mac_register: instance %d " 6438 "max_sdu %d margin %d maxframe %d (header %d)", 6439 nxgep->instance, 6440 macp->m_max_sdu, macp->m_margin, 6441 nxgep->mac.maxframesize, 6442 NXGE_EHEADER_VLAN_CRC)); 6443 6444 status = mac_register(macp, &nxgep->mach); 6445 if (isLDOMguest(nxgep)) { 6446 KMEM_FREE(macp->m_src_addr, MAXMACADDRLEN); 6447 KMEM_FREE(macp->m_dst_addr, MAXMACADDRLEN); 6448 } 6449 mac_free(macp); 6450 6451 if (status != 0) { 6452 cmn_err(CE_WARN, 6453 "!nxge_mac_register failed (status %d instance %d)", 6454 status, nxgep->instance); 6455 return (NXGE_ERROR); 6456 } 6457 6458 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6459 "(instance %d)", nxgep->instance)); 6460 6461 return (NXGE_OK); 6462 } 6463 6464 void 6465 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6466 { 6467 ssize_t size; 6468 mblk_t *nmp; 6469 uint8_t blk_id; 6470 uint8_t chan; 6471 uint32_t err_id; 6472 err_inject_t *eip; 6473 6474 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6475 6476 size = 1024; 6477 nmp = mp->b_cont; 6478 eip = (err_inject_t *)nmp->b_rptr; 6479 blk_id = eip->blk_id; 6480 err_id = eip->err_id; 6481 chan = eip->chan; 6482 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6483 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6484 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6485 switch (blk_id) { 6486 case MAC_BLK_ID: 6487 break; 6488 case TXMAC_BLK_ID: 6489 break; 6490 case RXMAC_BLK_ID: 6491 break; 6492 case MIF_BLK_ID: 6493 break; 6494 case IPP_BLK_ID: 6495 nxge_ipp_inject_err(nxgep, err_id); 6496 break; 6497 case TXC_BLK_ID: 6498 nxge_txc_inject_err(nxgep, err_id); 6499 break; 6500 case TXDMA_BLK_ID: 6501 nxge_txdma_inject_err(nxgep, err_id, chan); 6502 break; 6503 case RXDMA_BLK_ID: 6504 nxge_rxdma_inject_err(nxgep, err_id, chan); 6505 break; 6506 case ZCP_BLK_ID: 6507 nxge_zcp_inject_err(nxgep, err_id); 6508 break; 6509 case ESPC_BLK_ID: 6510 break; 6511 case FFLP_BLK_ID: 6512 break; 6513 case PHY_BLK_ID: 6514 break; 6515 case ETHER_SERDES_BLK_ID: 6516 break; 6517 case PCIE_SERDES_BLK_ID: 6518 break; 6519 case VIR_BLK_ID: 6520 break; 6521 } 6522 6523 nmp->b_wptr = nmp->b_rptr + size; 6524 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6525 6526 miocack(wq, mp, (int)size, 0); 6527 } 6528 6529 static int 6530 nxge_init_common_dev(p_nxge_t nxgep) 6531 { 6532 p_nxge_hw_list_t hw_p; 6533 dev_info_t *p_dip; 6534 6535 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6536 6537 p_dip = nxgep->p_dip; 6538 MUTEX_ENTER(&nxge_common_lock); 6539 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6540 "==> nxge_init_common_dev:func # %d", 6541 nxgep->function_num)); 6542 /* 6543 * Loop through existing per neptune hardware list. 6544 */ 6545 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6546 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6547 "==> nxge_init_common_device:func # %d " 6548 "hw_p $%p parent dip $%p", 6549 nxgep->function_num, 6550 hw_p, 6551 p_dip)); 6552 if (hw_p->parent_devp == p_dip) { 6553 nxgep->nxge_hw_p = hw_p; 6554 hw_p->ndevs++; 6555 hw_p->nxge_p[nxgep->function_num] = nxgep; 6556 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6557 "==> nxge_init_common_device:func # %d " 6558 "hw_p $%p parent dip $%p " 6559 "ndevs %d (found)", 6560 nxgep->function_num, 6561 hw_p, 6562 p_dip, 6563 hw_p->ndevs)); 6564 break; 6565 } 6566 } 6567 6568 if (hw_p == NULL) { 6569 6570 char **prop_val; 6571 uint_t prop_len; 6572 int i; 6573 6574 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6575 "==> nxge_init_common_device:func # %d " 6576 "parent dip $%p (new)", 6577 nxgep->function_num, 6578 p_dip)); 6579 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6580 hw_p->parent_devp = p_dip; 6581 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6582 nxgep->nxge_hw_p = hw_p; 6583 hw_p->ndevs++; 6584 hw_p->nxge_p[nxgep->function_num] = nxgep; 6585 hw_p->next = nxge_hw_list; 6586 if (nxgep->niu_type == N2_NIU) { 6587 hw_p->niu_type = N2_NIU; 6588 hw_p->platform_type = P_NEPTUNE_NIU; 6589 } else { 6590 hw_p->niu_type = NIU_TYPE_NONE; 6591 hw_p->platform_type = P_NEPTUNE_NONE; 6592 } 6593 6594 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6595 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6596 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6597 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6598 6599 nxge_hw_list = hw_p; 6600 6601 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0, 6602 "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) { 6603 for (i = 0; i < prop_len; i++) { 6604 if ((strcmp((caddr_t)prop_val[i], 6605 NXGE_ROCK_COMPATIBLE) == 0)) { 6606 hw_p->platform_type = P_NEPTUNE_ROCK; 6607 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6608 "ROCK hw_p->platform_type %d", 6609 hw_p->platform_type)); 6610 break; 6611 } 6612 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6613 "nxge_init_common_dev: read compatible" 6614 " property[%d] val[%s]", 6615 i, (caddr_t)prop_val[i])); 6616 } 6617 } 6618 6619 ddi_prop_free(prop_val); 6620 6621 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6622 } 6623 6624 MUTEX_EXIT(&nxge_common_lock); 6625 6626 nxgep->platform_type = hw_p->platform_type; 6627 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxgep->platform_type %d", 6628 nxgep->platform_type)); 6629 if (nxgep->niu_type != N2_NIU) { 6630 nxgep->niu_type = hw_p->niu_type; 6631 } 6632 6633 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6634 "==> nxge_init_common_device (nxge_hw_list) $%p", 6635 nxge_hw_list)); 6636 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6637 6638 return (NXGE_OK); 6639 } 6640 6641 static void 6642 nxge_uninit_common_dev(p_nxge_t nxgep) 6643 { 6644 p_nxge_hw_list_t hw_p, h_hw_p; 6645 p_nxge_dma_pt_cfg_t p_dma_cfgp; 6646 p_nxge_hw_pt_cfg_t p_cfgp; 6647 dev_info_t *p_dip; 6648 6649 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6650 if (nxgep->nxge_hw_p == NULL) { 6651 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6652 "<== nxge_uninit_common_device (no common)")); 6653 return; 6654 } 6655 6656 MUTEX_ENTER(&nxge_common_lock); 6657 h_hw_p = nxge_hw_list; 6658 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6659 p_dip = hw_p->parent_devp; 6660 if (nxgep->nxge_hw_p == hw_p && 6661 p_dip == nxgep->p_dip && 6662 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6663 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6664 6665 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6666 "==> nxge_uninit_common_device:func # %d " 6667 "hw_p $%p parent dip $%p " 6668 "ndevs %d (found)", 6669 nxgep->function_num, 6670 hw_p, 6671 p_dip, 6672 hw_p->ndevs)); 6673 6674 /* 6675 * Release the RDC table, a shared resoruce 6676 * of the nxge hardware. The RDC table was 6677 * assigned to this instance of nxge in 6678 * nxge_use_cfg_dma_config(). 6679 */ 6680 if (!isLDOMguest(nxgep)) { 6681 p_dma_cfgp = 6682 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 6683 p_cfgp = 6684 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 6685 (void) nxge_fzc_rdc_tbl_unbind(nxgep, 6686 p_cfgp->def_mac_rxdma_grpid); 6687 6688 /* Cleanup any outstanding groups. */ 6689 nxge_grp_cleanup(nxgep); 6690 } 6691 6692 if (hw_p->ndevs) { 6693 hw_p->ndevs--; 6694 } 6695 hw_p->nxge_p[nxgep->function_num] = NULL; 6696 if (!hw_p->ndevs) { 6697 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6698 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6699 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6700 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6701 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6702 "==> nxge_uninit_common_device: " 6703 "func # %d " 6704 "hw_p $%p parent dip $%p " 6705 "ndevs %d (last)", 6706 nxgep->function_num, 6707 hw_p, 6708 p_dip, 6709 hw_p->ndevs)); 6710 6711 nxge_hio_uninit(nxgep); 6712 6713 if (hw_p == nxge_hw_list) { 6714 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6715 "==> nxge_uninit_common_device:" 6716 "remove head func # %d " 6717 "hw_p $%p parent dip $%p " 6718 "ndevs %d (head)", 6719 nxgep->function_num, 6720 hw_p, 6721 p_dip, 6722 hw_p->ndevs)); 6723 nxge_hw_list = hw_p->next; 6724 } else { 6725 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6726 "==> nxge_uninit_common_device:" 6727 "remove middle func # %d " 6728 "hw_p $%p parent dip $%p " 6729 "ndevs %d (middle)", 6730 nxgep->function_num, 6731 hw_p, 6732 p_dip, 6733 hw_p->ndevs)); 6734 h_hw_p->next = hw_p->next; 6735 } 6736 6737 nxgep->nxge_hw_p = NULL; 6738 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6739 } 6740 break; 6741 } else { 6742 h_hw_p = hw_p; 6743 } 6744 } 6745 6746 MUTEX_EXIT(&nxge_common_lock); 6747 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6748 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6749 nxge_hw_list)); 6750 6751 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6752 } 6753 6754 /* 6755 * Determines the number of ports from the niu_type or the platform type. 6756 * Returns the number of ports, or returns zero on failure. 6757 */ 6758 6759 int 6760 nxge_get_nports(p_nxge_t nxgep) 6761 { 6762 int nports = 0; 6763 6764 switch (nxgep->niu_type) { 6765 case N2_NIU: 6766 case NEPTUNE_2_10GF: 6767 nports = 2; 6768 break; 6769 case NEPTUNE_4_1GC: 6770 case NEPTUNE_2_10GF_2_1GC: 6771 case NEPTUNE_1_10GF_3_1GC: 6772 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6773 case NEPTUNE_2_10GF_2_1GRF: 6774 nports = 4; 6775 break; 6776 default: 6777 switch (nxgep->platform_type) { 6778 case P_NEPTUNE_NIU: 6779 case P_NEPTUNE_ATLAS_2PORT: 6780 nports = 2; 6781 break; 6782 case P_NEPTUNE_ATLAS_4PORT: 6783 case P_NEPTUNE_MARAMBA_P0: 6784 case P_NEPTUNE_MARAMBA_P1: 6785 case P_NEPTUNE_ROCK: 6786 case P_NEPTUNE_ALONSO: 6787 nports = 4; 6788 break; 6789 default: 6790 break; 6791 } 6792 break; 6793 } 6794 6795 return (nports); 6796 } 6797 6798 /* 6799 * The following two functions are to support 6800 * PSARC/2007/453 MSI-X interrupt limit override. 6801 */ 6802 static int 6803 nxge_create_msi_property(p_nxge_t nxgep) 6804 { 6805 int nmsi; 6806 extern int ncpus; 6807 6808 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6809 6810 switch (nxgep->mac.portmode) { 6811 case PORT_10G_COPPER: 6812 case PORT_10G_FIBER: 6813 case PORT_10G_TN1010: 6814 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6815 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6816 /* 6817 * The maximum MSI-X requested will be 8. 6818 * If the # of CPUs is less than 8, we will request 6819 * # MSI-X based on the # of CPUs (default). 6820 */ 6821 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6822 "==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d", 6823 nxge_msix_10g_intrs)); 6824 if ((nxge_msix_10g_intrs == 0) || 6825 (nxge_msix_10g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6826 nmsi = NXGE_MSIX_REQUEST_10G; 6827 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6828 "==>nxge_create_msi_property (10G): reset to 8")); 6829 } else { 6830 nmsi = nxge_msix_10g_intrs; 6831 } 6832 6833 /* 6834 * If # of interrupts requested is 8 (default), 6835 * the checking of the number of cpus will be 6836 * be maintained. 6837 */ 6838 if ((nmsi == NXGE_MSIX_REQUEST_10G) && 6839 (ncpus < nmsi)) { 6840 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6841 "==>nxge_create_msi_property (10G): reset to 8")); 6842 nmsi = ncpus; 6843 } 6844 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6845 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6846 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6847 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6848 break; 6849 6850 default: 6851 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6852 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6853 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6854 "==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d", 6855 nxge_msix_1g_intrs)); 6856 if ((nxge_msix_1g_intrs == 0) || 6857 (nxge_msix_1g_intrs > NXGE_MSIX_MAX_ALLOWED)) { 6858 nmsi = NXGE_MSIX_REQUEST_1G; 6859 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6860 "==>nxge_create_msi_property (1G): reset to 2")); 6861 } else { 6862 nmsi = nxge_msix_1g_intrs; 6863 } 6864 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6865 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6866 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6867 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6868 break; 6869 } 6870 6871 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6872 return (nmsi); 6873 } 6874 6875 /* ARGSUSED */ 6876 static int 6877 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6878 void *pr_val) 6879 { 6880 int err = 0; 6881 link_flowctrl_t fl; 6882 6883 switch (pr_num) { 6884 case MAC_PROP_AUTONEG: 6885 *(uint8_t *)pr_val = 1; 6886 break; 6887 case MAC_PROP_FLOWCTRL: 6888 if (pr_valsize < sizeof (link_flowctrl_t)) 6889 return (EINVAL); 6890 fl = LINK_FLOWCTRL_RX; 6891 bcopy(&fl, pr_val, sizeof (fl)); 6892 break; 6893 case MAC_PROP_ADV_1000FDX_CAP: 6894 case MAC_PROP_EN_1000FDX_CAP: 6895 *(uint8_t *)pr_val = 1; 6896 break; 6897 case MAC_PROP_ADV_100FDX_CAP: 6898 case MAC_PROP_EN_100FDX_CAP: 6899 *(uint8_t *)pr_val = 1; 6900 break; 6901 default: 6902 err = ENOTSUP; 6903 break; 6904 } 6905 return (err); 6906 } 6907 6908 6909 /* 6910 * The following is a software around for the Neptune hardware's 6911 * interrupt bugs; The Neptune hardware may generate spurious interrupts when 6912 * an interrupr handler is removed. 6913 */ 6914 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98 6915 #define NXGE_PIM_RESET (1ULL << 29) 6916 #define NXGE_GLU_RESET (1ULL << 30) 6917 #define NXGE_NIU_RESET (1ULL << 31) 6918 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \ 6919 NXGE_GLU_RESET | \ 6920 NXGE_NIU_RESET) 6921 6922 #define NXGE_WAIT_QUITE_TIME 200000 6923 #define NXGE_WAIT_QUITE_RETRY 40 6924 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */ 6925 6926 static void 6927 nxge_niu_peu_reset(p_nxge_t nxgep) 6928 { 6929 uint32_t rvalue; 6930 p_nxge_hw_list_t hw_p; 6931 p_nxge_t fnxgep; 6932 int i, j; 6933 6934 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset")); 6935 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 6936 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6937 "==> nxge_niu_peu_reset: NULL hardware pointer")); 6938 return; 6939 } 6940 6941 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6942 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d", 6943 hw_p->flags, nxgep->nxge_link_poll_timerid, 6944 nxgep->nxge_timerid)); 6945 6946 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 6947 /* 6948 * Make sure other instances from the same hardware 6949 * stop sending PIO and in quiescent state. 6950 */ 6951 for (i = 0; i < NXGE_MAX_PORTS; i++) { 6952 fnxgep = hw_p->nxge_p[i]; 6953 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6954 "==> nxge_niu_peu_reset: checking entry %d " 6955 "nxgep $%p", i, fnxgep)); 6956 #ifdef NXGE_DEBUG 6957 if (fnxgep) { 6958 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6959 "==> nxge_niu_peu_reset: entry %d (function %d) " 6960 "link timer id %d hw timer id %d", 6961 i, fnxgep->function_num, 6962 fnxgep->nxge_link_poll_timerid, 6963 fnxgep->nxge_timerid)); 6964 } 6965 #endif 6966 if (fnxgep && fnxgep != nxgep && 6967 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) { 6968 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6969 "==> nxge_niu_peu_reset: checking $%p " 6970 "(function %d) timer ids", 6971 fnxgep, fnxgep->function_num)); 6972 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) { 6973 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6974 "==> nxge_niu_peu_reset: waiting")); 6975 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6976 if (!fnxgep->nxge_timerid && 6977 !fnxgep->nxge_link_poll_timerid) { 6978 break; 6979 } 6980 } 6981 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6982 if (fnxgep->nxge_timerid || 6983 fnxgep->nxge_link_poll_timerid) { 6984 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6985 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6986 "<== nxge_niu_peu_reset: cannot reset " 6987 "hardware (devices are still in use)")); 6988 return; 6989 } 6990 } 6991 } 6992 6993 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) { 6994 hw_p->flags |= COMMON_RESET_NIU_PCI; 6995 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh, 6996 NXGE_PCI_PORT_LOGIC_OFFSET); 6997 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6998 "nxge_niu_peu_reset: read offset 0x%x (%d) " 6999 "(data 0x%x)", 7000 NXGE_PCI_PORT_LOGIC_OFFSET, 7001 NXGE_PCI_PORT_LOGIC_OFFSET, 7002 rvalue)); 7003 7004 rvalue |= NXGE_PCI_RESET_ALL; 7005 pci_config_put32(nxgep->dev_regs->nxge_pciregh, 7006 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue); 7007 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 7008 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x", 7009 rvalue)); 7010 7011 NXGE_DELAY(NXGE_PCI_RESET_WAIT); 7012 } 7013 7014 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 7015 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset")); 7016 } 7017 7018 static void 7019 nxge_set_pci_replay_timeout(p_nxge_t nxgep) 7020 { 7021 p_dev_regs_t dev_regs; 7022 uint32_t value; 7023 7024 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout")); 7025 7026 if (!nxge_set_replay_timer) { 7027 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7028 "==> nxge_set_pci_replay_timeout: will not change " 7029 "the timeout")); 7030 return; 7031 } 7032 7033 dev_regs = nxgep->dev_regs; 7034 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7035 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p", 7036 dev_regs, dev_regs->nxge_pciregh)); 7037 7038 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) { 7039 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7040 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or " 7041 "no PCI handle", 7042 dev_regs)); 7043 return; 7044 } 7045 value = (pci_config_get32(dev_regs->nxge_pciregh, 7046 PCI_REPLAY_TIMEOUT_CFG_OFFSET) | 7047 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT)); 7048 7049 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7050 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x " 7051 "(timeout value to set 0x%x at offset 0x%x) value 0x%x", 7052 pci_config_get32(dev_regs->nxge_pciregh, 7053 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout, 7054 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value)); 7055 7056 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET, 7057 value); 7058 7059 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 7060 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x", 7061 pci_config_get32(dev_regs->nxge_pciregh, 7062 PCI_REPLAY_TIMEOUT_CFG_OFFSET))); 7063 7064 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout")); 7065 } 7066 7067 /* 7068 * quiesce(9E) entry point. 7069 * 7070 * This function is called when the system is single-threaded at high 7071 * PIL with preemption disabled. Therefore, this function must not be 7072 * blocked. 7073 * 7074 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 7075 * DDI_FAILURE indicates an error condition and should almost never happen. 7076 */ 7077 static int 7078 nxge_quiesce(dev_info_t *dip) 7079 { 7080 int instance = ddi_get_instance(dip); 7081 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 7082 7083 if (nxgep == NULL) 7084 return (DDI_FAILURE); 7085 7086 /* Turn off debugging */ 7087 nxge_debug_level = NO_DEBUG; 7088 nxgep->nxge_debug_level = NO_DEBUG; 7089 npi_debug_level = NO_DEBUG; 7090 7091 /* 7092 * Stop link monitor only when linkchkmod is interrupt based 7093 */ 7094 if (nxgep->mac.linkchkmode == LINKCHK_INTR) { 7095 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 7096 } 7097 7098 (void) nxge_intr_hw_disable(nxgep); 7099 7100 /* 7101 * Reset the receive MAC side. 7102 */ 7103 (void) nxge_rx_mac_disable(nxgep); 7104 7105 /* Disable and soft reset the IPP */ 7106 if (!isLDOMguest(nxgep)) 7107 (void) nxge_ipp_disable(nxgep); 7108 7109 /* 7110 * Reset the transmit/receive DMA side. 7111 */ 7112 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 7113 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 7114 7115 /* 7116 * Reset the transmit MAC side. 7117 */ 7118 (void) nxge_tx_mac_disable(nxgep); 7119 7120 return (DDI_SUCCESS); 7121 } 7122